Compare commits
221 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
86a01362c0 | ||
![]() |
9789612689 | ||
![]() |
3769601fb6 | ||
![]() |
2528468e20 | ||
![]() |
f1d59a207f | ||
![]() |
85b2396265 | ||
![]() |
25dc978bb1 | ||
![]() |
13838647ca | ||
![]() |
133dc77da9 | ||
![]() |
9ce4686bfe | ||
![]() |
50e04b3f3c | ||
![]() |
226727f08f | ||
![]() |
b8021620e2 | ||
![]() |
a0f4f12b62 | ||
![]() |
c351cd720a | ||
![]() |
7279be7c75 | ||
![]() |
f5fd937fc5 | ||
![]() |
de43442391 | ||
![]() |
3b9cb8d7d8 | ||
![]() |
b9510b3274 | ||
![]() |
30099bf0f0 | ||
![]() |
f2fde86dae | ||
![]() |
a40e6a214e | ||
![]() |
3e8b73e65b | ||
![]() |
8babbdc9b1 | ||
![]() |
e91df69cf2 | ||
![]() |
855ae45c5a | ||
![]() |
b1959b1719 | ||
![]() |
ecae610207 | ||
![]() |
33a67961c8 | ||
![]() |
fc1fed62d9 | ||
![]() |
0684cd5d8c | ||
![]() |
a4a87a7efd | ||
![]() |
b76dc8b5b8 | ||
![]() |
6e2204b152 | ||
![]() |
81bdaacb65 | ||
![]() |
3e3193f03c | ||
![]() |
3b17c1e13e | ||
![]() |
8abe459ac6 | ||
![]() |
bebe3d35f3 | ||
![]() |
7c270a5e3b | ||
![]() |
20a03d5c93 | ||
![]() |
57710c3646 | ||
![]() |
0d481efb7b | ||
![]() |
92a36a4e78 | ||
![]() |
ef33242c2a | ||
![]() |
1c983ee2c1 | ||
![]() |
a6d59978a0 | ||
![]() |
e9ddf726aa | ||
![]() |
c52c9f534e | ||
![]() |
0d31a01d3a | ||
![]() |
475d68e0f5 | ||
![]() |
7fd20799d7 | ||
![]() |
bf00132a14 | ||
![]() |
ede5946b70 | ||
![]() |
3cd2c33d36 | ||
![]() |
c494be6411 | ||
![]() |
2d855c94b6 | ||
![]() |
f249e98891 | ||
![]() |
92888e9ed4 | ||
![]() |
3f10a779b4 | ||
![]() |
8f238dd9bd | ||
![]() |
da4f5d9d77 | ||
![]() |
931f5b2351 | ||
![]() |
b807e987f1 | ||
![]() |
e7fdd6aa0d | ||
![]() |
9f09bfe681 | ||
![]() |
7390c2629d | ||
![]() |
3d9860f68a | ||
![]() |
4fde30ba9d | ||
![]() |
a06432b6c3 | ||
![]() |
d7470271c7 | ||
![]() |
d41010b895 | ||
![]() |
a63941eec2 | ||
![]() |
5d6f8bab02 | ||
![]() |
de7671e4c4 | ||
![]() |
217f781adc | ||
![]() |
9440079216 | ||
![]() |
d054ec868d | ||
![]() |
4d8b82160d | ||
![]() |
84fdfcab99 | ||
![]() |
cd01611d7b | ||
![]() |
8b8d794800 | ||
![]() |
9a02be3122 | ||
![]() |
223ae2467e | ||
![]() |
9e65065080 | ||
![]() |
5111c78eab | ||
![]() |
394d3c937a | ||
![]() |
afcd152b97 | ||
![]() |
dad7beaceb | ||
![]() |
93360af0d7 | ||
![]() |
4ed83378bf | ||
![]() |
1579f14632 | ||
![]() |
7394e53f30 | ||
![]() |
9bbed55b10 | ||
![]() |
9181faab68 | ||
![]() |
d1c2f86b21 | ||
![]() |
2bcd8f22f2 | ||
![]() |
c790e31ae4 | ||
![]() |
7fe5d0a78d | ||
![]() |
bf219a564c | ||
![]() |
871d99ef77 | ||
![]() |
418e9a6113 | ||
![]() |
e25e0903ab | ||
![]() |
969aee07e6 | ||
![]() |
76587eea64 | ||
![]() |
667fe8c75b | ||
![]() |
64624c5678 | ||
![]() |
114e4b970e | ||
![]() |
c9659dfd29 | ||
![]() |
41ee9a4495 | ||
![]() |
e812a089f5 | ||
![]() |
9b8b35910f | ||
![]() |
f00ec3307b | ||
![]() |
f6499563c3 | ||
![]() |
16a4aef345 | ||
![]() |
6352153811 | ||
![]() |
557e8bd589 | ||
![]() |
45361d8aa3 | ||
![]() |
81e1b5f5fe | ||
![]() |
6505eb45bc | ||
![]() |
17f0581e0d | ||
![]() |
13b7962aae | ||
![]() |
aa40f11b81 | ||
![]() |
3bb4c3e74d | ||
![]() |
7ac50d846c | ||
![]() |
0f4c03cd63 | ||
![]() |
2d1d053c5d | ||
![]() |
72ed4166a6 | ||
![]() |
787a6156a2 | ||
![]() |
b05d355040 | ||
![]() |
f253fa9552 | ||
![]() |
a28c276b8d | ||
![]() |
26da47a09b | ||
![]() |
23fdcd3b0a | ||
![]() |
653329dfcb | ||
![]() |
b0964918d8 | ||
![]() |
d8fda618d0 | ||
![]() |
43881c7732 | ||
![]() |
42bdcebf33 | ||
![]() |
d1e71ecbb0 | ||
![]() |
64be1a45eb | ||
![]() |
f8bd98ae4d | ||
![]() |
f2c6e2c3b4 | ||
![]() |
8acfae6901 | ||
![]() |
e5ccd894d1 | ||
![]() |
6287107eae | ||
![]() |
53330b30fd | ||
![]() |
9726c1a5f8 | ||
![]() |
f1dd343007 | ||
![]() |
be894938c4 | ||
![]() |
fe0fd3de22 | ||
![]() |
fe12b3a7a6 | ||
![]() |
a1605af9b5 | ||
![]() |
b0937dd61d | ||
![]() |
ac0dea5dd7 | ||
![]() |
0989a120f1 | ||
![]() |
c7caed88a0 | ||
![]() |
5df02760dd | ||
![]() |
014dee89d0 | ||
![]() |
da1a8191c5 | ||
![]() |
dd9b24a488 | ||
![]() |
16b5df17ea | ||
![]() |
99af97ea11 | ||
![]() |
6c66ea5e73 | ||
![]() |
252356cf06 | ||
![]() |
56fb830c30 | ||
![]() |
d19b55649c | ||
![]() |
fd6230e8f0 | ||
![]() |
9406d3c910 | ||
![]() |
9e1ce9a8ee | ||
![]() |
b4d2888ce8 | ||
![]() |
b3f30cb6d6 | ||
![]() |
0484d7ad7e | ||
![]() |
85b829bff9 | ||
![]() |
f1685bd31a | ||
![]() |
8d10d6e127 | ||
![]() |
f93f739eca | ||
![]() |
1a6218954a | ||
![]() |
1f8e0f7e06 | ||
![]() |
6dd718e416 | ||
![]() |
1aab060996 | ||
![]() |
3bc5aa65bb | ||
![]() |
da1dfea076 | ||
![]() |
e940d15a98 | ||
![]() |
481118615c | ||
![]() |
4b6e46c427 | ||
![]() |
516ba41f05 | ||
![]() |
1a642b7217 | ||
![]() |
58b5b062b8 | ||
![]() |
f13f5a7d4b | ||
![]() |
d14696c99c | ||
![]() |
d0af7d5745 | ||
![]() |
f2abf8df7a | ||
![]() |
40c7613ecf | ||
![]() |
1a7d1793d6 | ||
![]() |
9fcc632249 | ||
![]() |
f7395926f2 | ||
![]() |
7bc37641e3 | ||
![]() |
3ac0638d57 | ||
![]() |
051ac5c0f5 | ||
![]() |
5ac46a0969 | ||
![]() |
8b55f67e3e | ||
![]() |
bb1d75e6c5 | ||
![]() |
459a84ada3 | ||
![]() |
35fe089dd9 | ||
![]() |
9e6d8c309f | ||
![]() |
afbaf6b367 | ||
![]() |
f1da6691a4 | ||
![]() |
5b1a953960 | ||
![]() |
37e2d574dd | ||
![]() |
f25f5f8c62 | ||
![]() |
a437298de5 | ||
![]() |
e912b0777b | ||
![]() |
b3f48a5044 | ||
![]() |
ee9e966296 | ||
![]() |
493a92313f | ||
![]() |
1578986a0d | ||
![]() |
7788297a59 | ||
![]() |
23376ae2f0 | ||
![]() |
8231764784 |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.pnm -diff -text
|
1
LICENSE
1
LICENSE
@@ -33,6 +33,7 @@ Specifically, the GPL parts of FFmpeg are
|
||||
- vf_geq.c
|
||||
- vf_histeq.c
|
||||
- vf_hqdn3d.c
|
||||
- vf_interlace.c
|
||||
- vf_kerndeint.c
|
||||
- vf_mcdeint.c
|
||||
- vf_mp.c
|
||||
|
2
Makefile
2
Makefile
@@ -106,7 +106,7 @@ endef
|
||||
|
||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
||||
|
||||
ffprobe.o cmdutils.o : libavutil/ffversion.h
|
||||
ffprobe.o cmdutils.o libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
$(CP) $< $@
|
||||
|
@@ -1803,7 +1803,7 @@ int read_yesno(void)
|
||||
|
||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
{
|
||||
int ret;
|
||||
int64_t ret;
|
||||
FILE *f = av_fopen_utf8(filename, "rb");
|
||||
|
||||
if (!f) {
|
||||
|
4
configure
vendored
4
configure
vendored
@@ -4287,6 +4287,7 @@ EOF
|
||||
fi
|
||||
|
||||
check_ldflags -Wl,--as-needed
|
||||
check_ldflags -Wl,-z,noexecstack
|
||||
|
||||
if check_func dlopen; then
|
||||
ldl=
|
||||
@@ -5175,7 +5176,7 @@ cat > $TMPH <<EOF
|
||||
#define FFMPEG_CONFIG_H
|
||||
#define FFMPEG_CONFIGURATION "$(c_escape $FFMPEG_CONFIGURATION)"
|
||||
#define FFMPEG_LICENSE "$(c_escape $license)"
|
||||
#define CONFIG_THIS_YEAR 2014
|
||||
#define CONFIG_THIS_YEAR 2015
|
||||
#define FFMPEG_DATADIR "$(eval c_escape $datadir)"
|
||||
#define AVCONV_DATADIR "$(eval c_escape $datadir)"
|
||||
#define CC_IDENT "$(c_escape ${cc_ident:-Unknown compiler})"
|
||||
@@ -5202,6 +5203,7 @@ enabled getenv || echo "#define getenv(x) NULL" >> $TMPH
|
||||
|
||||
|
||||
mkdir -p doc
|
||||
mkdir -p tests
|
||||
echo "@c auto-generated by configure" > doc/config.texi
|
||||
|
||||
print_config ARCH_ "$config_files" $ARCH_LIST
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.2.7
|
||||
PROJECT_NUMBER = 2.2.12
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -1405,11 +1405,11 @@ ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
|
||||
You can put many streams of the same type in the output:
|
||||
|
||||
@example
|
||||
ffmpeg -i test1.avi -i test2.avi -map 0:3 -map 0:2 -map 0:1 -map 0:0 -c copy test12.nut
|
||||
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
|
||||
@end example
|
||||
|
||||
The resulting output file @file{test12.avi} will contain first four streams from
|
||||
the input file in reverse order.
|
||||
The resulting output file @file{test12.nut} will contain the first four streams
|
||||
from the input files in reverse order.
|
||||
|
||||
@item
|
||||
To force CBR video output:
|
||||
|
@@ -491,7 +491,7 @@ aeval=val(ch)/2:c=same
|
||||
@item
|
||||
Invert phase of the second channel:
|
||||
@example
|
||||
eval=val(0)|-val(1)
|
||||
aeval=val(0)|-val(1)
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@@ -8850,7 +8850,7 @@ Default value is "all", which will cycle through the list of all tests.
|
||||
|
||||
For example the following:
|
||||
@example
|
||||
testsrc=t=dc_luma
|
||||
mptestsrc=t=dc_luma
|
||||
@end example
|
||||
|
||||
will generate a "dc_luma" test pattern.
|
||||
|
2
ffmpeg.c
2
ffmpeg.c
@@ -2284,7 +2284,7 @@ static int transcode_init(void)
|
||||
AVCodecContext *codec;
|
||||
OutputStream *ost;
|
||||
InputStream *ist;
|
||||
char error[1024];
|
||||
char error[1024] = {0};
|
||||
int want_sdp = 1;
|
||||
|
||||
for (i = 0; i < nb_filtergraphs; i++) {
|
||||
|
52
ffmpeg_opt.c
52
ffmpeg_opt.c
@@ -2153,19 +2153,19 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
|
||||
parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "327680", AV_DICT_DONT_OVERWRITE); // 40*1024*8;
|
||||
opt_default(NULL, "b:v", "1150000");
|
||||
opt_default(NULL, "maxrate", "1150000");
|
||||
opt_default(NULL, "minrate", "1150000");
|
||||
opt_default(NULL, "bufsize", "327680"); // 40*1024*8;
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
parse_option(o, "ar", "44100", options);
|
||||
parse_option(o, "ac", "2", options);
|
||||
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->format_opts, "muxrate", "1411200", AV_DICT_DONT_OVERWRITE); // 2352 * 75 * 8;
|
||||
opt_default(NULL, "packetsize", "2324");
|
||||
opt_default(NULL, "muxrate", "1411200"); // 2352 * 75 * 8;
|
||||
|
||||
/* We have to offset the PTS, so that it is consistent with the SCR.
|
||||
SCR starts at 36000, but the first two packs contain only padding
|
||||
@@ -2182,18 +2182,18 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
parse_option(o, "pix_fmt", "yuv420p", options);
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "2040000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "2516000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1145000;
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
|
||||
av_dict_set(&o->g->codec_opts, "scan_offset", "1", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "b:v", "2040000");
|
||||
opt_default(NULL, "maxrate", "2516000");
|
||||
opt_default(NULL, "minrate", "0"); // 1145000;
|
||||
opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
|
||||
opt_default(NULL, "scan_offset", "1");
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
parse_option(o, "ar", "44100", options);
|
||||
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "packetsize", "2324");
|
||||
|
||||
} else if (!strcmp(arg, "dvd")) {
|
||||
|
||||
@@ -2204,17 +2204,17 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
parse_option(o, "pix_fmt", "yuv420p", options);
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "6000000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "9000000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1500000;
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
|
||||
opt_default(NULL, "b:v", "6000000");
|
||||
opt_default(NULL, "maxrate", "9000000");
|
||||
opt_default(NULL, "minrate", "0"); // 1500000;
|
||||
opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
|
||||
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2048", AV_DICT_DONT_OVERWRITE); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
|
||||
av_dict_set(&o->g->format_opts, "muxrate", "10080000", AV_DICT_DONT_OVERWRITE); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
|
||||
opt_default(NULL, "packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
|
||||
opt_default(NULL, "muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "448000", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "b:a", "448000");
|
||||
parse_option(o, "ar", "48000", options);
|
||||
|
||||
} else if (!strncmp(arg, "dv", 2)) {
|
||||
@@ -2233,6 +2233,10 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
av_dict_copy(&o->g->codec_opts, codec_opts, AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_copy(&o->g->format_opts, format_opts, AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -34,7 +34,7 @@ static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
int size;
|
||||
union {
|
||||
uint64_t u64;
|
||||
uint8_t u8[8];
|
||||
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
} tmp;
|
||||
|
||||
tmp.u64 = av_be2ne64(state);
|
||||
|
@@ -166,7 +166,7 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
int err;
|
||||
union {
|
||||
uint64_t u64;
|
||||
uint8_t u8[8];
|
||||
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
} tmp = { av_be2ne64(state) };
|
||||
AC3HeaderInfo hdr, *phdr = &hdr;
|
||||
GetBitContext gbc;
|
||||
|
@@ -256,7 +256,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
|
||||
energy_cpl = energy[blk][CPL_CH][bnd];
|
||||
energy_ch = energy[blk][ch][bnd];
|
||||
blk1 = blk+1;
|
||||
while (!s->blocks[blk1].new_cpl_coords[ch] && blk1 < s->num_blocks) {
|
||||
while (blk1 < s->num_blocks && !s->blocks[blk1].new_cpl_coords[ch]) {
|
||||
if (s->blocks[blk1].cpl_in_use) {
|
||||
energy_cpl += energy[blk1][CPL_CH][bnd];
|
||||
energy_ch += energy[blk1][ch][bnd];
|
||||
|
@@ -135,7 +135,7 @@ static int cinepak_decode_vectors (CinepakContext *s, cvid_strip *strip,
|
||||
const uint8_t *eod = (data + size);
|
||||
uint32_t flag, mask;
|
||||
uint8_t *cb0, *cb1, *cb2, *cb3;
|
||||
unsigned int x, y;
|
||||
int x, y;
|
||||
char *ip0, *ip1, *ip2, *ip3;
|
||||
|
||||
flag = 0;
|
||||
|
@@ -1214,8 +1214,8 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
|
||||
q->num_subpackets++;
|
||||
s++;
|
||||
if (s > MAX_SUBPACKETS) {
|
||||
avpriv_request_sample(avctx, "subpackets > %d", MAX_SUBPACKETS);
|
||||
if (s > FFMIN(MAX_SUBPACKETS, avctx->block_align)) {
|
||||
avpriv_request_sample(avctx, "subpackets > %d", FFMIN(MAX_SUBPACKETS, avctx->block_align));
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
}
|
||||
|
@@ -2349,6 +2349,10 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||
#else
|
||||
if (s->xch_present && !s->xch_disable) {
|
||||
#endif
|
||||
if (avctx->channel_layout & AV_CH_BACK_CENTER) {
|
||||
avpriv_request_sample(avctx, "XCh with Back center channel");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
avctx->channel_layout |= AV_CH_BACK_CENTER;
|
||||
if (s->lfe) {
|
||||
avctx->channel_layout |= AV_CH_LOW_FREQUENCY;
|
||||
|
@@ -171,6 +171,10 @@ static inline int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_c
|
||||
{
|
||||
int ret = 1;
|
||||
while (!dirac_get_arith_bit(c, follow_ctx)) {
|
||||
if (ret >= 0x40000000) {
|
||||
av_log(NULL, AV_LOG_ERROR, "dirac_get_arith_uint overflow\n");
|
||||
return -1;
|
||||
}
|
||||
ret <<= 1;
|
||||
ret += dirac_get_arith_bit(c, data_ctx);
|
||||
follow_ctx = ff_dirac_next_ctx[follow_ctx];
|
||||
|
@@ -610,10 +610,10 @@ static av_always_inline void decode_subband_internal(DiracContext *s, SubBand *b
|
||||
|
||||
top = 0;
|
||||
for (cb_y = 0; cb_y < cb_height; cb_y++) {
|
||||
bottom = (b->height * (cb_y+1)) / cb_height;
|
||||
bottom = (b->height * (cb_y+1LL)) / cb_height;
|
||||
left = 0;
|
||||
for (cb_x = 0; cb_x < cb_width; cb_x++) {
|
||||
right = (b->width * (cb_x+1)) / cb_width;
|
||||
right = (b->width * (cb_x+1LL)) / cb_width;
|
||||
codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
|
||||
left = right;
|
||||
}
|
||||
@@ -1002,8 +1002,8 @@ static int dirac_unpack_idwt_params(DiracContext *s)
|
||||
/* Codeblock parameters (core syntax only) */
|
||||
if (get_bits1(gb)) {
|
||||
for (i = 0; i <= s->wavelet_depth; i++) {
|
||||
CHECKEDREAD(s->codeblock[i].width , tmp < 1, "codeblock width invalid\n")
|
||||
CHECKEDREAD(s->codeblock[i].height, tmp < 1, "codeblock height invalid\n")
|
||||
CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
|
||||
CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
|
||||
}
|
||||
|
||||
CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
|
||||
|
@@ -36,6 +36,7 @@ typedef struct DNXHDContext {
|
||||
GetBitContext gb;
|
||||
int64_t cid; ///< compression id
|
||||
unsigned int width, height;
|
||||
enum AVPixelFormat pix_fmt;
|
||||
unsigned int mb_width, mb_height;
|
||||
uint32_t mb_scan_index[68]; /* max for 1080p */
|
||||
int cur_field; ///< current interlaced field
|
||||
@@ -133,7 +134,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
||||
|
||||
ctx->is_444 = 0;
|
||||
if (buf[0x4] == 0x2) {
|
||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
|
||||
ctx->pix_fmt = AV_PIX_FMT_YUV444P10;
|
||||
ctx->avctx->bits_per_raw_sample = 10;
|
||||
if (ctx->bit_depth != 10) {
|
||||
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
||||
@@ -142,7 +143,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
||||
}
|
||||
ctx->is_444 = 1;
|
||||
} else if (buf[0x21] & 0x40) {
|
||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
||||
ctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
||||
ctx->avctx->bits_per_raw_sample = 10;
|
||||
if (ctx->bit_depth != 10) {
|
||||
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
||||
@@ -150,7 +151,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
||||
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
|
||||
}
|
||||
} else {
|
||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||
ctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||
ctx->avctx->bits_per_raw_sample = 8;
|
||||
if (ctx->bit_depth != 8) {
|
||||
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
||||
@@ -432,7 +433,13 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
avctx->width, avctx->height, ctx->width, ctx->height);
|
||||
first_field = 1;
|
||||
}
|
||||
if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) {
|
||||
av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n",
|
||||
av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt));
|
||||
first_field = 1;
|
||||
}
|
||||
|
||||
avctx->pix_fmt = ctx->pix_fmt;
|
||||
ret = ff_set_dimensions(avctx, ctx->width, ctx->height);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@@ -37,7 +37,7 @@ typedef struct DVDSubContext
|
||||
int has_palette;
|
||||
uint8_t colormap[4];
|
||||
uint8_t alpha[256];
|
||||
uint8_t *buf;
|
||||
uint8_t buf[0x10000];
|
||||
int buf_size;
|
||||
#ifdef DEBUG
|
||||
int sub_id;
|
||||
@@ -105,6 +105,12 @@ static int decode_rle(uint8_t *bitmap, int linesize, int w, int h,
|
||||
int x, y, len, color;
|
||||
uint8_t *d;
|
||||
|
||||
if (start >= buf_size)
|
||||
return -1;
|
||||
|
||||
if (w <= 0 || h <= 0)
|
||||
return -1;
|
||||
|
||||
bit_len = (buf_size - start) * 8;
|
||||
init_get_bits(&gb, buf + start, bit_len);
|
||||
|
||||
@@ -356,10 +362,12 @@ static int decode_dvd_subtitles(DVDSubContext *ctx, AVSubtitle *sub_header,
|
||||
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
||||
sub_header->num_rects = 1;
|
||||
sub_header->rects[0]->pict.data[0] = bitmap;
|
||||
decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
||||
buf, offset1, buf_size, is_8bit);
|
||||
decode_rle(bitmap + w, w * 2, w, h / 2,
|
||||
buf, offset2, buf_size, is_8bit);
|
||||
if (decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
||||
buf, offset1, buf_size, is_8bit) < 0)
|
||||
goto fail;
|
||||
if (decode_rle(bitmap + w, w * 2, w, h / 2,
|
||||
buf, offset2, buf_size, is_8bit) < 0)
|
||||
goto fail;
|
||||
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
||||
if (is_8bit) {
|
||||
if (yuv_palette == 0)
|
||||
@@ -494,15 +502,11 @@ static int append_to_cached_buf(AVCodecContext *avctx,
|
||||
{
|
||||
DVDSubContext *ctx = avctx->priv_data;
|
||||
|
||||
if (ctx->buf_size > 0xffff - buf_size) {
|
||||
if (ctx->buf_size >= sizeof(ctx->buf) - buf_size) {
|
||||
av_log(avctx, AV_LOG_WARNING, "Attempt to reconstruct "
|
||||
"too large SPU packets aborted.\n");
|
||||
av_freep(&ctx->buf);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
ctx->buf = av_realloc(ctx->buf, ctx->buf_size + buf_size);
|
||||
if (!ctx->buf)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(ctx->buf + ctx->buf_size, buf, buf_size);
|
||||
ctx->buf_size += buf_size;
|
||||
return 0;
|
||||
@@ -518,7 +522,7 @@ static int dvdsub_decode(AVCodecContext *avctx,
|
||||
AVSubtitle *sub = data;
|
||||
int is_menu;
|
||||
|
||||
if (ctx->buf) {
|
||||
if (ctx->buf_size) {
|
||||
int ret = append_to_cached_buf(avctx, buf, buf_size);
|
||||
if (ret < 0) {
|
||||
*data_size = 0;
|
||||
@@ -556,7 +560,6 @@ static int dvdsub_decode(AVCodecContext *avctx,
|
||||
}
|
||||
#endif
|
||||
|
||||
av_freep(&ctx->buf);
|
||||
ctx->buf_size = 0;
|
||||
*data_size = 1;
|
||||
return buf_size;
|
||||
@@ -638,7 +641,6 @@ static av_cold int dvdsub_init(AVCodecContext *avctx)
|
||||
static av_cold int dvdsub_close(AVCodecContext *avctx)
|
||||
{
|
||||
DVDSubContext *ctx = avctx->priv_data;
|
||||
av_freep(&ctx->buf);
|
||||
ctx->buf_size = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -329,6 +329,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
DxaDecContext * const c = avctx->priv_data;
|
||||
|
||||
if (avctx->width%4 || avctx->height%4) {
|
||||
avpriv_request_sample(avctx, "dimensions are not a multiple of 4");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
c->prev = av_frame_alloc();
|
||||
if (!c->prev)
|
||||
return AVERROR(ENOMEM);
|
||||
|
@@ -622,31 +622,31 @@ static int read_header(FFV1Context *f)
|
||||
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
||||
}
|
||||
|
||||
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
|
||||
chroma_planes = get_rac(c, state);
|
||||
chroma_h_shift = get_symbol(c, state, 0);
|
||||
chroma_v_shift = get_symbol(c, state, 0);
|
||||
transparency = get_rac(c, state);
|
||||
chroma_planes = get_rac(c, state);
|
||||
chroma_h_shift = get_symbol(c, state, 0);
|
||||
chroma_v_shift = get_symbol(c, state, 0);
|
||||
transparency = get_rac(c, state);
|
||||
|
||||
if (f->plane_count) {
|
||||
if ( colorspace != f->colorspace
|
||||
|| bits_per_raw_sample != f->avctx->bits_per_raw_sample
|
||||
|| chroma_planes != f->chroma_planes
|
||||
|| chroma_h_shift!= f->chroma_h_shift
|
||||
|| chroma_v_shift!= f->chroma_v_shift
|
||||
|| transparency != f->transparency) {
|
||||
if (colorspace != f->colorspace ||
|
||||
bits_per_raw_sample != f->avctx->bits_per_raw_sample ||
|
||||
chroma_planes != f->chroma_planes ||
|
||||
chroma_h_shift != f->chroma_h_shift ||
|
||||
chroma_v_shift != f->chroma_v_shift ||
|
||||
transparency != f->transparency) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
f->colorspace = colorspace;
|
||||
f->colorspace = colorspace;
|
||||
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
||||
f->chroma_planes = chroma_planes;
|
||||
f->chroma_h_shift = chroma_h_shift;
|
||||
f->chroma_v_shift = chroma_v_shift;
|
||||
f->transparency = transparency;
|
||||
f->chroma_planes = chroma_planes;
|
||||
f->chroma_h_shift = chroma_h_shift;
|
||||
f->chroma_v_shift = chroma_v_shift;
|
||||
f->transparency = transparency;
|
||||
|
||||
f->plane_count = 2 + f->transparency;
|
||||
}
|
||||
|
@@ -685,7 +685,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
|
||||
handle_error:
|
||||
*poutbuf = NULL;
|
||||
*poutbuf_size = 0;
|
||||
return read_end - buf;
|
||||
return buf_size ? read_end - buf : 0;
|
||||
}
|
||||
|
||||
static av_cold int flac_parse_init(AVCodecParserContext *c)
|
||||
|
@@ -471,10 +471,10 @@ static int decode_frame(FLACContext *s)
|
||||
ret = allocate_buffers(s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||
s->got_streaminfo = 1;
|
||||
dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||
}
|
||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||
|
||||
// dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||
|
||||
|
@@ -724,8 +724,10 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
c->tile_width = bytestream2_get_be32(&bc);
|
||||
c->tile_height = bytestream2_get_be32(&bc);
|
||||
if (!c->tile_width || !c->tile_height ||
|
||||
((c->tile_width | c->tile_height) & 0xF)) {
|
||||
if (c->tile_width <= 0 || c->tile_height <= 0 ||
|
||||
((c->tile_width | c->tile_height) & 0xF) ||
|
||||
c->tile_width * 4LL * c->tile_height >= INT_MAX
|
||||
) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid tile dimensions %dx%d\n",
|
||||
c->tile_width, c->tile_height);
|
||||
|
@@ -258,26 +258,21 @@ static int gif_read_image(GifState *s, AVFrame *frame)
|
||||
case 1:
|
||||
y1 += 8;
|
||||
ptr += linesize * 8;
|
||||
if (y1 >= height) {
|
||||
y1 = pass ? 2 : 4;
|
||||
ptr = ptr1 + linesize * y1;
|
||||
pass++;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
y1 += 4;
|
||||
ptr += linesize * 4;
|
||||
if (y1 >= height) {
|
||||
y1 = 1;
|
||||
ptr = ptr1 + linesize;
|
||||
pass++;
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
y1 += 2;
|
||||
ptr += linesize * 2;
|
||||
break;
|
||||
}
|
||||
while (y1 >= height) {
|
||||
y1 = 4 >> pass;
|
||||
ptr = ptr1 + linesize * y1;
|
||||
pass++;
|
||||
}
|
||||
} else {
|
||||
ptr += linesize;
|
||||
}
|
||||
|
@@ -319,6 +319,14 @@ static int decode_slice(MpegEncContext *s)
|
||||
}
|
||||
}
|
||||
|
||||
if (s->codec_id == AV_CODEC_ID_H263 &&
|
||||
(s->workaround_bugs & FF_BUG_AUTODETECT) &&
|
||||
get_bits_left(&s->gb) >= 64 &&
|
||||
AV_RB64(s->gb.buffer_end - 8) == 0xCDCDCDCDFC7F0000) {
|
||||
|
||||
s->padding_bug_score += 32;
|
||||
}
|
||||
|
||||
if (s->workaround_bugs & FF_BUG_AUTODETECT) {
|
||||
if (s->padding_bug_score > -2 && !s->data_partitioning)
|
||||
s->workaround_bugs |= FF_BUG_NO_PADDING;
|
||||
|
@@ -601,18 +601,18 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
|
||||
|
||||
if ((h->left_samples_available & 0x8080) != 0x8080) {
|
||||
mode = left[mode];
|
||||
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
||||
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
||||
(!(h->left_samples_available & 0x8000)) +
|
||||
2 * (mode == DC_128_PRED8x8);
|
||||
}
|
||||
if (mode < 0) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"left block unavailable for requested intra mode at %d %d\n",
|
||||
h->mb_x, h->mb_y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
||||
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
||||
(!(h->left_samples_available & 0x8000)) +
|
||||
2 * (mode == DC_128_PRED8x8);
|
||||
}
|
||||
}
|
||||
|
||||
return mode;
|
||||
@@ -634,7 +634,7 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
|
||||
|
||||
#define STARTCODE_TEST \
|
||||
if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
|
||||
if (src[i + 2] != 3) { \
|
||||
if (src[i + 2] != 3 && src[i + 2] != 0) { \
|
||||
/* startcode, so we must be past the end */ \
|
||||
length = i; \
|
||||
} \
|
||||
@@ -707,7 +707,7 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
|
||||
if (src[si + 2] > 3) {
|
||||
dst[di++] = src[si++];
|
||||
dst[di++] = src[si++];
|
||||
} else if (src[si] == 0 && src[si + 1] == 0) {
|
||||
} else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) {
|
||||
if (src[si + 2] == 3) { // escape
|
||||
dst[di++] = 0;
|
||||
dst[di++] = 0;
|
||||
@@ -1233,6 +1233,7 @@ static void free_tables(H264Context *h, int free_rbsp)
|
||||
av_buffer_pool_uninit(&h->ref_index_pool);
|
||||
|
||||
if (free_rbsp && h->DPB) {
|
||||
memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
|
||||
for (i = 0; i < MAX_PICTURE_COUNT; i++)
|
||||
unref_picture(h, &h->DPB[i]);
|
||||
av_freep(&h->DPB);
|
||||
@@ -1821,6 +1822,17 @@ static int decode_update_thread_context(AVCodecContext *dst,
|
||||
h->mb_type_pool = NULL;
|
||||
h->ref_index_pool = NULL;
|
||||
h->motion_val_pool = NULL;
|
||||
h->intra4x4_pred_mode= NULL;
|
||||
h->non_zero_count = NULL;
|
||||
h->slice_table_base = NULL;
|
||||
h->slice_table = NULL;
|
||||
h->cbp_table = NULL;
|
||||
h->chroma_pred_mode_table = NULL;
|
||||
memset(h->mvd_table, 0, sizeof(h->mvd_table));
|
||||
h->direct_table = NULL;
|
||||
h->list_counts = NULL;
|
||||
h->mb2b_xy = NULL;
|
||||
h->mb2br_xy = NULL;
|
||||
for (i = 0; i < 2; i++) {
|
||||
h->rbsp_buffer[i] = NULL;
|
||||
h->rbsp_buffer_size[i] = 0;
|
||||
@@ -2695,6 +2707,16 @@ int ff_pred_weight_table(H264Context *h)
|
||||
h->luma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||
if (h->sps.chroma_format_idc)
|
||||
h->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||
|
||||
if (h->luma_log2_weight_denom > 7U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", h->luma_log2_weight_denom);
|
||||
h->luma_log2_weight_denom = 0;
|
||||
}
|
||||
if (h->chroma_log2_weight_denom > 7U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", h->chroma_log2_weight_denom);
|
||||
h->chroma_log2_weight_denom = 0;
|
||||
}
|
||||
|
||||
luma_def = 1 << h->luma_log2_weight_denom;
|
||||
chroma_def = 1 << h->chroma_log2_weight_denom;
|
||||
|
||||
@@ -3169,76 +3191,79 @@ static int h264_set_parameter_from_sps(H264Context *h)
|
||||
|
||||
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
|
||||
{
|
||||
enum AVPixelFormat pix_fmts[2];
|
||||
const enum AVPixelFormat *choices = pix_fmts;
|
||||
int i;
|
||||
|
||||
pix_fmts[1] = AV_PIX_FMT_NONE;
|
||||
|
||||
switch (h->sps.bit_depth_luma) {
|
||||
case 9:
|
||||
if (CHROMA444(h)) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
return AV_PIX_FMT_GBRP9;
|
||||
pix_fmts[0] = AV_PIX_FMT_GBRP9;
|
||||
} else
|
||||
return AV_PIX_FMT_YUV444P9;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV444P9;
|
||||
} else if (CHROMA422(h))
|
||||
return AV_PIX_FMT_YUV422P9;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV422P9;
|
||||
else
|
||||
return AV_PIX_FMT_YUV420P9;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV420P9;
|
||||
break;
|
||||
case 10:
|
||||
if (CHROMA444(h)) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
return AV_PIX_FMT_GBRP10;
|
||||
pix_fmts[0] = AV_PIX_FMT_GBRP10;
|
||||
} else
|
||||
return AV_PIX_FMT_YUV444P10;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV444P10;
|
||||
} else if (CHROMA422(h))
|
||||
return AV_PIX_FMT_YUV422P10;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV422P10;
|
||||
else
|
||||
return AV_PIX_FMT_YUV420P10;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV420P10;
|
||||
break;
|
||||
case 12:
|
||||
if (CHROMA444(h)) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
return AV_PIX_FMT_GBRP12;
|
||||
pix_fmts[0] = AV_PIX_FMT_GBRP12;
|
||||
} else
|
||||
return AV_PIX_FMT_YUV444P12;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV444P12;
|
||||
} else if (CHROMA422(h))
|
||||
return AV_PIX_FMT_YUV422P12;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV422P12;
|
||||
else
|
||||
return AV_PIX_FMT_YUV420P12;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV420P12;
|
||||
break;
|
||||
case 14:
|
||||
if (CHROMA444(h)) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
return AV_PIX_FMT_GBRP14;
|
||||
pix_fmts[0] = AV_PIX_FMT_GBRP14;
|
||||
} else
|
||||
return AV_PIX_FMT_YUV444P14;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV444P14;
|
||||
} else if (CHROMA422(h))
|
||||
return AV_PIX_FMT_YUV422P14;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV422P14;
|
||||
else
|
||||
return AV_PIX_FMT_YUV420P14;
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV420P14;
|
||||
break;
|
||||
case 8:
|
||||
if (CHROMA444(h)) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
av_log(h->avctx, AV_LOG_DEBUG, "Detected GBR colorspace.\n");
|
||||
return AV_PIX_FMT_GBR24P;
|
||||
} else if (h->avctx->colorspace == AVCOL_SPC_YCGCO) {
|
||||
if (h->avctx->colorspace == AVCOL_SPC_YCGCO)
|
||||
av_log(h->avctx, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
|
||||
}
|
||||
return h->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ444P
|
||||
: AV_PIX_FMT_YUV444P;
|
||||
if (h->avctx->colorspace == AVCOL_SPC_RGB)
|
||||
pix_fmts[0] = AV_PIX_FMT_GBRP;
|
||||
else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
|
||||
pix_fmts[0] = AV_PIX_FMT_YUVJ444P;
|
||||
else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV444P;
|
||||
} else if (CHROMA422(h)) {
|
||||
return h->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ422P
|
||||
: AV_PIX_FMT_YUV422P;
|
||||
if (h->avctx->color_range == AVCOL_RANGE_JPEG)
|
||||
pix_fmts[0] = AV_PIX_FMT_YUVJ422P;
|
||||
else
|
||||
pix_fmts[0] = AV_PIX_FMT_YUV422P;
|
||||
} else {
|
||||
int i;
|
||||
const enum AVPixelFormat * fmt = h->avctx->codec->pix_fmts ?
|
||||
h->avctx->codec->pix_fmts :
|
||||
h->avctx->color_range == AVCOL_RANGE_JPEG ?
|
||||
h264_hwaccel_pixfmt_list_jpeg_420 :
|
||||
h264_hwaccel_pixfmt_list_420;
|
||||
|
||||
for (i=0; fmt[i] != AV_PIX_FMT_NONE; i++)
|
||||
if (fmt[i] == h->avctx->pix_fmt && !force_callback)
|
||||
return fmt[i];
|
||||
return ff_thread_get_format(h->avctx, fmt);
|
||||
if (h->avctx->codec->pix_fmts)
|
||||
choices = h->avctx->codec->pix_fmts;
|
||||
else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
|
||||
choices = h264_hwaccel_pixfmt_list_jpeg_420;
|
||||
else
|
||||
choices = h264_hwaccel_pixfmt_list_420;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@@ -3246,6 +3271,11 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
|
||||
"Unsupported bit depth %d\n", h->sps.bit_depth_luma);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
|
||||
if (choices[i] == h->avctx->pix_fmt && !force_callback)
|
||||
return choices[i];
|
||||
return ff_thread_get_format(h->avctx, choices);
|
||||
}
|
||||
|
||||
/* export coded and cropped frame dimensions to AVCodecContext */
|
||||
|
@@ -37,6 +37,7 @@
|
||||
#include "h264dsp.h"
|
||||
#include "h264pred.h"
|
||||
#include "h264qpel.h"
|
||||
#include "internal.h" // for avpriv_find_start_code()
|
||||
#include "rectangle.h"
|
||||
|
||||
#define MAX_SPS_COUNT 32
|
||||
@@ -285,6 +286,7 @@ typedef struct MMCO {
|
||||
* H264Context
|
||||
*/
|
||||
typedef struct H264Context {
|
||||
AVClass *av_class;
|
||||
AVCodecContext *avctx;
|
||||
VideoDSPContext vdsp;
|
||||
H264DSPContext h264dsp;
|
||||
@@ -1028,6 +1030,33 @@ static av_always_inline int get_dct8x8_allowed(H264Context *h)
|
||||
0x0001000100010001ULL));
|
||||
}
|
||||
|
||||
static inline int find_start_code(const uint8_t *buf, int buf_size,
|
||||
int buf_index, int next_avc)
|
||||
{
|
||||
uint32_t state = -1;
|
||||
|
||||
buf_index = avpriv_find_start_code(buf + buf_index, buf + next_avc + 1, &state) - buf - 1;
|
||||
|
||||
return FFMIN(buf_index, buf_size);
|
||||
}
|
||||
|
||||
static inline int get_avc_nalsize(H264Context *h, const uint8_t *buf,
|
||||
int buf_size, int *buf_index)
|
||||
{
|
||||
int i, nalsize = 0;
|
||||
|
||||
if (*buf_index >= buf_size - h->nal_length_size)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < h->nal_length_size; i++)
|
||||
nalsize = ((unsigned)nalsize << 8) | buf[(*buf_index)++];
|
||||
if (nalsize <= 0 || nalsize > buf_size - *buf_index) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"AVC: nal size %d\n", nalsize);
|
||||
return -1;
|
||||
}
|
||||
return nalsize;
|
||||
}
|
||||
void ff_h264_draw_horiz_band(H264Context *h, int y, int height);
|
||||
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc);
|
||||
int ff_pred_weight_table(H264Context *h);
|
||||
|
@@ -202,10 +202,10 @@ static int scan_mmco_reset(AVCodecParserContext *s)
|
||||
*/
|
||||
static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
AVCodecContext *avctx,
|
||||
const uint8_t *buf, int buf_size)
|
||||
const uint8_t * const buf, int buf_size)
|
||||
{
|
||||
H264Context *h = s->priv_data;
|
||||
const uint8_t *buf_end = buf + buf_size;
|
||||
int buf_index, next_avc;
|
||||
unsigned int pps_id;
|
||||
unsigned int slice_type;
|
||||
int state = -1, got_reset = 0;
|
||||
@@ -225,26 +225,26 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
if (!buf_size)
|
||||
return 0;
|
||||
|
||||
buf_index = 0;
|
||||
next_avc = h->is_avc ? 0 : buf_size;
|
||||
for (;;) {
|
||||
int src_length, dst_length, consumed, nalsize = 0;
|
||||
if (h->is_avc) {
|
||||
int i;
|
||||
if (h->nal_length_size >= buf_end - buf) break;
|
||||
nalsize = 0;
|
||||
for (i = 0; i < h->nal_length_size; i++)
|
||||
nalsize = (nalsize << 8) | *buf++;
|
||||
if (nalsize <= 0 || nalsize > buf_end - buf) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "AVC: nal size %d\n", nalsize);
|
||||
|
||||
if (buf_index >= next_avc) {
|
||||
nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
|
||||
if (nalsize < 0)
|
||||
break;
|
||||
}
|
||||
src_length = nalsize;
|
||||
next_avc = buf_index + nalsize;
|
||||
} else {
|
||||
buf = avpriv_find_start_code(buf, buf_end, &state);
|
||||
if (buf >= buf_end)
|
||||
break;
|
||||
--buf;
|
||||
src_length = buf_end - buf;
|
||||
buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
|
||||
if (buf_index >= buf_size)
|
||||
break;
|
||||
if (buf_index >= next_avc)
|
||||
continue;
|
||||
}
|
||||
src_length = next_avc - buf_index;
|
||||
|
||||
state = buf[buf_index];
|
||||
switch (state & 0x1f) {
|
||||
case NAL_SLICE:
|
||||
case NAL_IDR_SLICE:
|
||||
@@ -261,10 +261,13 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
}
|
||||
break;
|
||||
}
|
||||
ptr = ff_h264_decode_nal(h, buf, &dst_length, &consumed, src_length);
|
||||
ptr = ff_h264_decode_nal(h, buf + buf_index, &dst_length,
|
||||
&consumed, src_length);
|
||||
if (ptr == NULL || dst_length < 0)
|
||||
break;
|
||||
|
||||
buf_index += consumed;
|
||||
|
||||
init_get_bits(&h->gb, ptr, 8 * dst_length);
|
||||
switch (h->nal_unit_type) {
|
||||
case NAL_SPS:
|
||||
@@ -439,7 +442,6 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
|
||||
return 0; /* no need to evaluate the rest */
|
||||
}
|
||||
buf += h->is_avc ? nalsize : consumed;
|
||||
}
|
||||
if (q264)
|
||||
return 0;
|
||||
|
@@ -112,7 +112,7 @@ static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
|
||||
if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
|
||||
goto fail;
|
||||
|
||||
s->filter_slice_edges = av_malloc(ctb_count);
|
||||
s->filter_slice_edges = av_mallocz(ctb_count);
|
||||
s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
|
||||
sizeof(*s->tab_slice_address));
|
||||
s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
|
||||
@@ -2568,17 +2568,30 @@ static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
|
||||
|
||||
if (s->nals_allocated < s->nb_nals + 1) {
|
||||
int new_size = s->nals_allocated + 1;
|
||||
HEVCNAL *tmp = av_realloc_array(s->nals, new_size, sizeof(*tmp));
|
||||
void *tmp = av_realloc_array(s->nals, new_size, sizeof(*s->nals));
|
||||
ret = AVERROR(ENOMEM);
|
||||
if (!tmp) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
s->nals = tmp;
|
||||
memset(s->nals + s->nals_allocated, 0,
|
||||
(new_size - s->nals_allocated) * sizeof(*tmp));
|
||||
av_reallocp_array(&s->skipped_bytes_nal, new_size, sizeof(*s->skipped_bytes_nal));
|
||||
av_reallocp_array(&s->skipped_bytes_pos_size_nal, new_size, sizeof(*s->skipped_bytes_pos_size_nal));
|
||||
av_reallocp_array(&s->skipped_bytes_pos_nal, new_size, sizeof(*s->skipped_bytes_pos_nal));
|
||||
(new_size - s->nals_allocated) * sizeof(*s->nals));
|
||||
|
||||
tmp = av_realloc_array(s->skipped_bytes_nal, new_size, sizeof(*s->skipped_bytes_nal));
|
||||
if (!tmp)
|
||||
goto fail;
|
||||
s->skipped_bytes_nal = tmp;
|
||||
|
||||
tmp = av_realloc_array(s->skipped_bytes_pos_size_nal, new_size, sizeof(*s->skipped_bytes_pos_size_nal));
|
||||
if (!tmp)
|
||||
goto fail;
|
||||
s->skipped_bytes_pos_size_nal = tmp;
|
||||
|
||||
tmp = av_realloc_array(s->skipped_bytes_pos_nal, new_size, sizeof(*s->skipped_bytes_pos_nal));
|
||||
if (!tmp)
|
||||
goto fail;
|
||||
s->skipped_bytes_pos_nal = tmp;
|
||||
|
||||
s->skipped_bytes_pos_size_nal[s->nals_allocated] = 1024; // initial buffer size
|
||||
s->skipped_bytes_pos_nal[s->nals_allocated] = av_malloc_array(s->skipped_bytes_pos_size_nal[s->nals_allocated], sizeof(*s->skipped_bytes_pos));
|
||||
s->nals_allocated = new_size;
|
||||
|
@@ -843,6 +843,11 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
||||
sps->long_term_ref_pics_present_flag = get_bits1(gb);
|
||||
if (sps->long_term_ref_pics_present_flag) {
|
||||
sps->num_long_term_ref_pics_sps = get_ue_golomb_long(gb);
|
||||
if (sps->num_long_term_ref_pics_sps > 31U) {
|
||||
av_log(0, AV_LOG_ERROR, "num_long_term_ref_pics_sps %d is out of range.\n",
|
||||
sps->num_long_term_ref_pics_sps);
|
||||
goto err;
|
||||
}
|
||||
for (i = 0; i < sps->num_long_term_ref_pics_sps; i++) {
|
||||
sps->lt_ref_pic_poc_lsb_sps[i] = get_bits(gb, sps->log2_max_poc_lsb);
|
||||
sps->used_by_curr_pic_lt_sps_flag[i] = get_bits1(gb);
|
||||
@@ -1067,6 +1072,14 @@ int ff_hevc_decode_nal_pps(HEVCContext *s)
|
||||
if (pps->cu_qp_delta_enabled_flag)
|
||||
pps->diff_cu_qp_delta_depth = get_ue_golomb_long(gb);
|
||||
|
||||
if (pps->diff_cu_qp_delta_depth < 0 ||
|
||||
pps->diff_cu_qp_delta_depth > sps->log2_diff_max_min_coding_block_size) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "diff_cu_qp_delta_depth %d is invalid\n",
|
||||
pps->diff_cu_qp_delta_depth);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
|
||||
pps->cb_qp_offset = get_se_golomb(gb);
|
||||
if (pps->cb_qp_offset < -12 || pps->cb_qp_offset > 12) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "pps_cb_qp_offset out of range: %d\n",
|
||||
|
@@ -879,14 +879,14 @@ static int imc_decode_block(AVCodecContext *avctx, IMCContext *q, int ch)
|
||||
|
||||
flag = get_bits1(&q->gb);
|
||||
if (stream_format_code & 0x1)
|
||||
imc_decode_level_coefficients_raw(q, chctx->levlCoeffBuf,
|
||||
chctx->flcoeffs1, chctx->flcoeffs2);
|
||||
else if (stream_format_code & 0x1)
|
||||
imc_read_level_coeffs_raw(q, stream_format_code, chctx->levlCoeffBuf);
|
||||
else
|
||||
imc_read_level_coeffs(q, stream_format_code, chctx->levlCoeffBuf);
|
||||
|
||||
if (stream_format_code & 0x4)
|
||||
if (stream_format_code & 0x1)
|
||||
imc_decode_level_coefficients_raw(q, chctx->levlCoeffBuf,
|
||||
chctx->flcoeffs1, chctx->flcoeffs2);
|
||||
else if (stream_format_code & 0x4)
|
||||
imc_decode_level_coefficients(q, chctx->levlCoeffBuf,
|
||||
chctx->flcoeffs1, chctx->flcoeffs2);
|
||||
else
|
||||
|
@@ -94,7 +94,7 @@ typedef struct Indeo3DecodeContext {
|
||||
|
||||
int16_t width, height;
|
||||
uint32_t frame_num; ///< current frame number (zero-based)
|
||||
uint32_t data_size; ///< size of the frame data in bytes
|
||||
int data_size; ///< size of the frame data in bytes
|
||||
uint16_t frame_flags; ///< frame properties
|
||||
uint8_t cb_offset; ///< needed for selecting VQ tables
|
||||
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
||||
@@ -899,7 +899,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
GetByteContext gb;
|
||||
const uint8_t *bs_hdr;
|
||||
uint32_t frame_num, word2, check_sum, data_size;
|
||||
uint32_t y_offset, u_offset, v_offset, starts[3], ends[3];
|
||||
int y_offset, u_offset, v_offset;
|
||||
uint32_t starts[3], ends[3];
|
||||
uint16_t height, width;
|
||||
int i, j;
|
||||
|
||||
@@ -981,7 +982,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
ctx->y_data_size = ends[0] - starts[0];
|
||||
ctx->v_data_size = ends[1] - starts[1];
|
||||
ctx->u_data_size = ends[2] - starts[2];
|
||||
if (FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||
if (FFMIN3(y_offset, v_offset, u_offset) < 0 ||
|
||||
FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||
FFMIN3(y_offset, v_offset, u_offset) < gb.buffer - bs_hdr + 16 ||
|
||||
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
||||
|
@@ -217,6 +217,11 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s,
|
||||
x += stride;
|
||||
}
|
||||
|
||||
if (x >= w) {
|
||||
av_log(NULL, AV_LOG_ERROR, "run overflow\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* decode run termination value */
|
||||
Rb = R(last, x);
|
||||
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
||||
|
@@ -43,6 +43,13 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
JvContext *s = avctx->priv_data;
|
||||
|
||||
if (!avctx->width || !avctx->height ||
|
||||
(avctx->width & 7) || (avctx->height & 7)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n",
|
||||
avctx->width, avctx->height);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->frame = av_frame_alloc();
|
||||
if (!s->frame)
|
||||
return AVERROR(ENOMEM);
|
||||
|
@@ -96,8 +96,7 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) frame->data[0],
|
||||
(const WebRtc_UWord16*) buf, &s->decoder, 1);
|
||||
WebRtcIlbcfix_DecodeImpl((int16_t *) frame->data[0], (const uint16_t *) buf, &s->decoder, 1);
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
@@ -170,7 +169,7 @@ static int ilbc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
|
||||
return ret;
|
||||
|
||||
WebRtcIlbcfix_EncodeImpl((WebRtc_UWord16*) avpkt->data, (const WebRtc_Word16*) frame->data[0], &s->encoder);
|
||||
WebRtcIlbcfix_EncodeImpl((uint16_t *) avpkt->data, (const int16_t *) frame->data[0], &s->encoder);
|
||||
|
||||
avpkt->size = s->encoder.no_of_bytes;
|
||||
*got_packet_ptr = 1;
|
||||
|
@@ -239,9 +239,9 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
|
||||
|
||||
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
{
|
||||
int len, nb_components, i, width, height, pix_fmt_id, ret;
|
||||
int h_count[MAX_COMPONENTS];
|
||||
int v_count[MAX_COMPONENTS];
|
||||
int len, nb_components, i, width, height, bits, pix_fmt_id, ret;
|
||||
int h_count[MAX_COMPONENTS] = { 0 };
|
||||
int v_count[MAX_COMPONENTS] = { 0 };
|
||||
|
||||
s->cur_scan = 0;
|
||||
s->upscale_h = s->upscale_v = 0;
|
||||
@@ -249,11 +249,11 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
/* XXX: verify len field validity */
|
||||
len = get_bits(&s->gb, 16);
|
||||
s->avctx->bits_per_raw_sample =
|
||||
s->bits = get_bits(&s->gb, 8);
|
||||
bits = get_bits(&s->gb, 8);
|
||||
|
||||
if (s->pegasus_rct)
|
||||
s->bits = 9;
|
||||
if (s->bits == 9 && !s->pegasus_rct)
|
||||
bits = 9;
|
||||
if (bits == 9 && !s->pegasus_rct)
|
||||
s->rct = 1; // FIXME ugly
|
||||
|
||||
if(s->lossless && s->avctx->lowres){
|
||||
@@ -283,7 +283,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
if (s->ls && !(s->bits <= 8 || nb_components == 1)) {
|
||||
if (s->ls && !(bits <= 8 || nb_components == 1)) {
|
||||
avpriv_report_missing_feature(s->avctx,
|
||||
"JPEG-LS that is not <= 8 "
|
||||
"bits/component or 16-bit gray");
|
||||
@@ -292,8 +292,6 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
s->nb_components = nb_components;
|
||||
s->h_max = 1;
|
||||
s->v_max = 1;
|
||||
memset(h_count, 0, sizeof(h_count));
|
||||
memset(v_count, 0, sizeof(v_count));
|
||||
for (i = 0; i < nb_components; i++) {
|
||||
/* component id */
|
||||
s->component_id[i] = get_bits(&s->gb, 8) - 1;
|
||||
@@ -328,12 +326,13 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
|
||||
|
||||
/* if different size, realloc/alloc picture */
|
||||
if ( width != s->width || height != s->height
|
||||
|| memcmp(s->h_count, h_count, sizeof(h_count))
|
||||
|| memcmp(s->v_count, v_count, sizeof(v_count))) {
|
||||
if (width != s->width || height != s->height || bits != s->bits ||
|
||||
memcmp(s->h_count, h_count, sizeof(h_count)) ||
|
||||
memcmp(s->v_count, v_count, sizeof(v_count))) {
|
||||
|
||||
s->width = width;
|
||||
s->height = height;
|
||||
s->bits = bits;
|
||||
memcpy(s->h_count, h_count, sizeof(h_count));
|
||||
memcpy(s->v_count, v_count, sizeof(v_count));
|
||||
s->interlaced = 0;
|
||||
@@ -1551,6 +1550,8 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||
}
|
||||
|
||||
if (id == AV_RB32("LJIF")) {
|
||||
int rgb = s->rgb;
|
||||
int pegasus_rct = s->pegasus_rct;
|
||||
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||
av_log(s->avctx, AV_LOG_INFO,
|
||||
"Pegasus lossless jpeg header found\n");
|
||||
@@ -1560,17 +1561,27 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||
skip_bits(&s->gb, 16); /* unknown always 0? */
|
||||
switch (i=get_bits(&s->gb, 8)) {
|
||||
case 1:
|
||||
s->rgb = 1;
|
||||
s->pegasus_rct = 0;
|
||||
rgb = 1;
|
||||
pegasus_rct = 0;
|
||||
break;
|
||||
case 2:
|
||||
s->rgb = 1;
|
||||
s->pegasus_rct = 1;
|
||||
rgb = 1;
|
||||
pegasus_rct = 1;
|
||||
break;
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
|
||||
}
|
||||
|
||||
len -= 9;
|
||||
if (s->got_picture)
|
||||
if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->rgb = rgb;
|
||||
s->pegasus_rct = pegasus_rct;
|
||||
|
||||
goto out;
|
||||
}
|
||||
if (id == AV_RL32("colr") && len > 0) {
|
||||
|
@@ -61,6 +61,13 @@ static av_cold int mm_decode_init(AVCodecContext *avctx)
|
||||
|
||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
|
||||
if (!avctx->width || !avctx->height ||
|
||||
(avctx->width & 1) || (avctx->height & 1)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n",
|
||||
avctx->width, avctx->height);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->frame = av_frame_alloc();
|
||||
if (!s->frame)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -111,7 +118,7 @@ static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
|
||||
|
||||
if (color) {
|
||||
memset(s->frame->data[0] + y*s->frame->linesize[0] + x, color, run_length);
|
||||
if (half_vert)
|
||||
if (half_vert && y + half_vert < s->avctx->height)
|
||||
memset(s->frame->data[0] + (y+1)*s->frame->linesize[0] + x, color, run_length);
|
||||
}
|
||||
x+= run_length;
|
||||
|
@@ -189,7 +189,13 @@ static av_always_inline int cmp_inline(MpegEncContext *s, const int x, const int
|
||||
int uvdxy; /* no, it might not be used uninitialized */
|
||||
if(dxy){
|
||||
if(qpel){
|
||||
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
||||
if (h << size == 16) {
|
||||
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
||||
} else if (size == 0 && h == 8) {
|
||||
c->qpel_put[1][dxy](c->temp , ref[0] + x + y*stride , stride);
|
||||
c->qpel_put[1][dxy](c->temp + 8, ref[0] + x + y*stride + 8, stride);
|
||||
} else
|
||||
av_assert2(0);
|
||||
if(chroma){
|
||||
int cx= hx/2;
|
||||
int cy= hy/2;
|
||||
|
@@ -1206,6 +1206,16 @@ static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = {
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
|
||||
static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
|
||||
AV_PIX_FMT_YUV422P,
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
|
||||
static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
|
||||
AV_PIX_FMT_YUV444P,
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
|
||||
static inline int uses_vdpau(AVCodecContext *avctx) {
|
||||
return avctx->pix_fmt == AV_PIX_FMT_VDPAU_MPEG1 || avctx->pix_fmt == AV_PIX_FMT_VDPAU_MPEG2;
|
||||
}
|
||||
@@ -1214,16 +1224,18 @@ static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
|
||||
{
|
||||
Mpeg1Context *s1 = avctx->priv_data;
|
||||
MpegEncContext *s = &s1->mpeg_enc_ctx;
|
||||
const enum AVPixelFormat *pix_fmts;
|
||||
|
||||
if (s->chroma_format < 2)
|
||||
return ff_thread_get_format(avctx,
|
||||
avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO ?
|
||||
pix_fmts = avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO ?
|
||||
mpeg1_hwaccel_pixfmt_list_420 :
|
||||
mpeg2_hwaccel_pixfmt_list_420);
|
||||
mpeg2_hwaccel_pixfmt_list_420;
|
||||
else if (s->chroma_format == 2)
|
||||
return AV_PIX_FMT_YUV422P;
|
||||
pix_fmts = mpeg12_pixfmt_list_422;
|
||||
else
|
||||
return AV_PIX_FMT_YUV444P;
|
||||
pix_fmts = mpeg12_pixfmt_list_444;
|
||||
|
||||
return ff_thread_get_format(avctx, pix_fmts);
|
||||
}
|
||||
|
||||
static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx)
|
||||
|
@@ -73,20 +73,21 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
|
||||
if (i > 4)
|
||||
s->header_count = -2;
|
||||
} else {
|
||||
int header_threshold = avctx->codec_id != AV_CODEC_ID_NONE && avctx->codec_id != codec_id;
|
||||
if((state&SAME_HEADER_MASK) != (s->header&SAME_HEADER_MASK) && s->header)
|
||||
s->header_count= -3;
|
||||
s->header= state;
|
||||
s->header_count++;
|
||||
s->frame_size = ret-4;
|
||||
|
||||
if (s->header_count > 0 + (avctx->codec_id != AV_CODEC_ID_NONE && avctx->codec_id != codec_id)) {
|
||||
if (s->header_count > header_threshold) {
|
||||
avctx->sample_rate= sr;
|
||||
avctx->channels = channels;
|
||||
s1->duration = frame_size;
|
||||
avctx->codec_id = codec_id;
|
||||
if (s->no_bitrate || !avctx->bit_rate) {
|
||||
s->no_bitrate = 1;
|
||||
avctx->bit_rate += (bit_rate - avctx->bit_rate) / s->header_count;
|
||||
avctx->bit_rate += (bit_rate - avctx->bit_rate) / (s->header_count - header_threshold);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@@ -1368,6 +1368,9 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
||||
{
|
||||
int i, err = 0;
|
||||
|
||||
if (!s->context_initialized)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
if (s->slice_context_count > 1) {
|
||||
for (i = 0; i < s->slice_context_count; i++) {
|
||||
free_duplicate_context(s->thread_context[i]);
|
||||
@@ -1397,8 +1400,8 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
||||
s->mb_height = (s->height + 15) / 16;
|
||||
|
||||
if ((s->width || s->height) &&
|
||||
av_image_check_size(s->width, s->height, 0, s->avctx))
|
||||
return AVERROR_INVALIDDATA;
|
||||
(err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
|
||||
goto fail;
|
||||
|
||||
if ((err = init_context_frame(s)))
|
||||
goto fail;
|
||||
@@ -1414,7 +1417,7 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
||||
}
|
||||
|
||||
for (i = 0; i < nb_slices; i++) {
|
||||
if (init_duplicate_context(s->thread_context[i]) < 0)
|
||||
if ((err = init_duplicate_context(s->thread_context[i])) < 0)
|
||||
goto fail;
|
||||
s->thread_context[i]->start_mb_y =
|
||||
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
||||
|
@@ -94,8 +94,8 @@ static const AVOption avcodec_options[] = {
|
||||
{"extradata_size", NULL, OFFSET(extradata_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
||||
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|D|E},
|
||||
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|D|E},
|
||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||
{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
||||
{"frame_size", NULL, OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
||||
{"frame_number", NULL, OFFSET(frame_number), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||
|
@@ -565,6 +565,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
case MKTAG('I', 'H', 'D', 'R'):
|
||||
if (length != 13)
|
||||
goto fail;
|
||||
|
||||
if (s->state & PNG_IDAT) {
|
||||
av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
s->width = bytestream2_get_be32(&s->gb);
|
||||
s->height = bytestream2_get_be32(&s->gb);
|
||||
if (av_image_check_size(s->width, s->height, 0, avctx)) {
|
||||
@@ -633,7 +639,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
} else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
|
||||
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
} else if (s->bit_depth == 1) {
|
||||
} else if (s->bit_depth == 1 && s->bits_per_pixel == 1) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
|
||||
} else if (s->bit_depth == 8 &&
|
||||
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
||||
@@ -841,10 +847,11 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
int i, j;
|
||||
uint8_t *pd = p->data[0];
|
||||
uint8_t *pd_last = s->last_picture.f->data[0];
|
||||
int ls = FFMIN(av_image_get_linesize(p->format, s->width, 0), s->width * s->bpp);
|
||||
|
||||
ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
|
||||
for (j = 0; j < s->height; j++) {
|
||||
for (i = 0; i < s->width * s->bpp; i++) {
|
||||
for (i = 0; i < ls; i++) {
|
||||
pd[i] += pd_last[i];
|
||||
}
|
||||
pd += s->image_linesize;
|
||||
|
@@ -210,6 +210,7 @@ typedef struct ProresContext {
|
||||
int bits_per_mb;
|
||||
int force_quant;
|
||||
int alpha_bits;
|
||||
int warn;
|
||||
|
||||
char *vendor;
|
||||
int quant_sel;
|
||||
@@ -566,15 +567,14 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
|
||||
get_alpha_data(ctx, src, linesize, xp, yp,
|
||||
pwidth, avctx->height / ctx->pictures_per_frame,
|
||||
ctx->blocks[0], mbs_per_slice, ctx->alpha_bits);
|
||||
sizes[i] = encode_alpha_plane(ctx, pb,
|
||||
mbs_per_slice, ctx->blocks[0],
|
||||
quant);
|
||||
sizes[i] = encode_alpha_plane(ctx, pb, mbs_per_slice,
|
||||
ctx->blocks[0], quant);
|
||||
}
|
||||
total_size += sizes[i];
|
||||
if (put_bits_left(pb) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Serious underevaluation of"
|
||||
"required buffer size");
|
||||
return AVERROR_BUFFER_TOO_SMALL;
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Underestimated required buffer size.\n");
|
||||
return AVERROR_BUG;
|
||||
}
|
||||
}
|
||||
return total_size;
|
||||
@@ -940,6 +940,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
int slice_hdr_size = 2 + 2 * (ctx->num_planes - 1);
|
||||
int frame_size, picture_size, slice_size;
|
||||
int pkt_size, ret;
|
||||
int max_slice_size = (ctx->frame_size_upper_bound - 200) / (ctx->pictures_per_frame * ctx->slices_per_picture + 1);
|
||||
uint8_t frame_flags;
|
||||
|
||||
*avctx->coded_frame = *pic;
|
||||
@@ -1024,8 +1025,42 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
bytestream_put_byte(&buf, slice_hdr_size << 3);
|
||||
slice_hdr = buf;
|
||||
buf += slice_hdr_size - 1;
|
||||
if (pkt_size <= buf - orig_buf + 2 * max_slice_size) {
|
||||
uint8_t *start = pkt->data;
|
||||
// Recompute new size according to max_slice_size
|
||||
// and deduce delta
|
||||
int delta = 200 + (ctx->pictures_per_frame *
|
||||
ctx->slices_per_picture + 1) *
|
||||
max_slice_size - pkt_size;
|
||||
|
||||
delta = FFMAX(delta, 2 * max_slice_size);
|
||||
ctx->frame_size_upper_bound += delta;
|
||||
|
||||
if (!ctx->warn) {
|
||||
avpriv_request_sample(avctx,
|
||||
"Packet too small: is %i,"
|
||||
" needs %i (slice: %i). "
|
||||
"Correct allocation",
|
||||
pkt_size, delta, max_slice_size);
|
||||
ctx->warn = 1;
|
||||
}
|
||||
|
||||
ret = av_grow_packet(pkt, delta);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
pkt_size += delta;
|
||||
// restore pointers
|
||||
orig_buf = pkt->data + (orig_buf - start);
|
||||
buf = pkt->data + (buf - start);
|
||||
picture_size_pos = pkt->data + (picture_size_pos - start);
|
||||
slice_sizes = pkt->data + (slice_sizes - start);
|
||||
slice_hdr = pkt->data + (slice_hdr - start);
|
||||
tmp = pkt->data + (tmp - start);
|
||||
}
|
||||
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8);
|
||||
ret = encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice);
|
||||
ret = encode_slice(avctx, pic, &pb, sizes, x, y, q,
|
||||
mbs_per_slice);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@@ -1037,6 +1072,8 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
}
|
||||
bytestream_put_be16(&slice_sizes, slice_size);
|
||||
buf += slice_size - slice_hdr_size;
|
||||
if (max_slice_size < slice_size)
|
||||
max_slice_size = slice_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1209,16 +1246,22 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
ctx->bits_per_mb = ls * 8;
|
||||
if (ctx->chroma_factor == CFACTOR_Y444)
|
||||
ctx->bits_per_mb += ls * 4;
|
||||
if (ctx->num_planes == 4)
|
||||
ctx->bits_per_mb += ls * 4;
|
||||
}
|
||||
|
||||
ctx->frame_size_upper_bound = ctx->pictures_per_frame *
|
||||
ctx->slices_per_picture *
|
||||
ctx->frame_size_upper_bound = (ctx->pictures_per_frame *
|
||||
ctx->slices_per_picture + 1) *
|
||||
(2 + 2 * ctx->num_planes +
|
||||
(mps * ctx->bits_per_mb) / 8)
|
||||
+ 200;
|
||||
|
||||
if (ctx->alpha_bits) {
|
||||
// The alpha plane is run-coded and might exceed the bit budget.
|
||||
ctx->frame_size_upper_bound += (ctx->pictures_per_frame *
|
||||
ctx->slices_per_picture + 1) *
|
||||
/* num pixels per slice */ (ctx->mbs_per_slice * 256 *
|
||||
/* bits per pixel */ (1 + ctx->alpha_bits + 1) + 7 >> 3);
|
||||
}
|
||||
|
||||
avctx->codec_tag = ctx->profile_info->tag;
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
|
@@ -120,12 +120,13 @@ static void av_noinline qpeg_decode_inter(QpegContext *qctx, uint8_t *dst,
|
||||
int filled = 0;
|
||||
int orig_height;
|
||||
|
||||
if(!refdata)
|
||||
refdata= dst;
|
||||
|
||||
/* copy prev frame */
|
||||
for(i = 0; i < height; i++)
|
||||
memcpy(dst + (i * stride), refdata + (i * stride), width);
|
||||
if (refdata) {
|
||||
/* copy prev frame */
|
||||
for (i = 0; i < height; i++)
|
||||
memcpy(dst + (i * stride), refdata + (i * stride), width);
|
||||
} else {
|
||||
refdata = dst;
|
||||
}
|
||||
|
||||
orig_height = height;
|
||||
height--;
|
||||
@@ -163,7 +164,7 @@ static void av_noinline qpeg_decode_inter(QpegContext *qctx, uint8_t *dst,
|
||||
|
||||
/* check motion vector */
|
||||
if ((me_x + filled < 0) || (me_x + me_w + filled > width) ||
|
||||
(height - me_y - me_h < 0) || (height - me_y > orig_height) ||
|
||||
(height - me_y - me_h < 0) || (height - me_y >= orig_height) ||
|
||||
(filled + me_w > width) || (height - me_h < 0))
|
||||
av_log(NULL, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n",
|
||||
me_x, me_y, me_w, me_h, filled, height);
|
||||
|
@@ -34,7 +34,6 @@
|
||||
#include "celp_filters.h"
|
||||
#include "ra144.h"
|
||||
|
||||
|
||||
static av_cold int ra144_encode_close(AVCodecContext *avctx)
|
||||
{
|
||||
RA144Context *ractx = avctx->priv_data;
|
||||
|
@@ -136,6 +136,9 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
|
||||
context->frame_size = avpicture_get_size(avctx->pix_fmt, avctx->width,
|
||||
avctx->height);
|
||||
}
|
||||
if (context->frame_size < 0)
|
||||
return context->frame_size;
|
||||
|
||||
|
||||
if ((avctx->extradata_size >= 9 &&
|
||||
!memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
|
||||
|
@@ -70,7 +70,7 @@ typedef struct SmcContext {
|
||||
row_ptr += stride * 4; \
|
||||
} \
|
||||
total_blocks--; \
|
||||
if (total_blocks < 0) \
|
||||
if (total_blocks < !!n_blocks) \
|
||||
{ \
|
||||
av_log(s->avctx, AV_LOG_INFO, "warning: block counter just went negative (this should not happen)\n"); \
|
||||
return; \
|
||||
|
@@ -655,7 +655,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
||||
if(v){
|
||||
v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
|
||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
|
||||
|
||||
if ((uint16_t)v != v) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||
v = 1;
|
||||
}
|
||||
xc->x=x;
|
||||
(xc++)->coeff= v;
|
||||
}
|
||||
@@ -665,6 +668,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
||||
else run= INT_MAX;
|
||||
v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
|
||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
|
||||
if ((uint16_t)v != v) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||
v = 1;
|
||||
}
|
||||
|
||||
xc->x=x;
|
||||
(xc++)->coeff= v;
|
||||
|
@@ -499,7 +499,7 @@ static int svq1_decode_delta_block(AVCodecContext *avctx, HpelDSPContext *hdsp,
|
||||
return result;
|
||||
}
|
||||
|
||||
static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
|
||||
static void svq1_parse_string(GetBitContext *bitbuf, uint8_t out[257])
|
||||
{
|
||||
uint8_t seed;
|
||||
int i;
|
||||
@@ -511,6 +511,7 @@ static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
|
||||
out[i] = get_bits(bitbuf, 8) ^ seed;
|
||||
seed = string_table[out[i] ^ seed];
|
||||
}
|
||||
out[i] = 0;
|
||||
}
|
||||
|
||||
static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
|
||||
@@ -553,12 +554,12 @@ static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
|
||||
}
|
||||
|
||||
if ((s->frame_code ^ 0x10) >= 0x50) {
|
||||
uint8_t msg[256];
|
||||
uint8_t msg[257];
|
||||
|
||||
svq1_parse_string(bitbuf, msg);
|
||||
|
||||
av_log(avctx, AV_LOG_INFO,
|
||||
"embedded message:\n%s\n", (char *)msg);
|
||||
"embedded message:\n%s\n", ((char *)msg) + 1);
|
||||
}
|
||||
|
||||
skip_bits(bitbuf, 2);
|
||||
|
@@ -1168,7 +1168,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
h->cur_pic_ptr = s->cur_pic;
|
||||
av_frame_unref(&h->cur_pic.f);
|
||||
h->cur_pic = *s->cur_pic;
|
||||
memcpy(&h->cur_pic.tf, &s->cur_pic->tf, sizeof(h->cur_pic) - offsetof(Picture, tf));
|
||||
ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@@ -604,13 +604,13 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||
s->height = value;
|
||||
break;
|
||||
case TIFF_BPP:
|
||||
s->bppcount = count;
|
||||
if (count > 4) {
|
||||
if (count > 4U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
value, count);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->bppcount = count;
|
||||
if (count == 1)
|
||||
s->bpp = value;
|
||||
else {
|
||||
@@ -628,6 +628,13 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||
s->bpp = -1;
|
||||
}
|
||||
}
|
||||
if (s->bpp > 64U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
s->bpp = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
break;
|
||||
case TIFF_SAMPLES_PER_PIXEL:
|
||||
if (count != 1) {
|
||||
|
@@ -65,6 +65,9 @@
|
||||
#include "compat/os2threads.h"
|
||||
#endif
|
||||
|
||||
#include "libavutil/ffversion.h"
|
||||
const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
|
||||
|
||||
#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS
|
||||
static int default_lockmgr_cb(void **arg, enum AVLockOp op)
|
||||
{
|
||||
@@ -281,6 +284,12 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
int i;
|
||||
int w_align = 1;
|
||||
int h_align = 1;
|
||||
AVPixFmtDescriptor const *desc = av_pix_fmt_desc_get(s->pix_fmt);
|
||||
|
||||
if (desc) {
|
||||
w_align = 1 << desc->log2_chroma_w;
|
||||
h_align = 1 << desc->log2_chroma_h;
|
||||
}
|
||||
|
||||
switch (s->pix_fmt) {
|
||||
case AV_PIX_FMT_YUV420P:
|
||||
@@ -357,6 +366,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
case AV_PIX_FMT_GBRP12BE:
|
||||
case AV_PIX_FMT_GBRP14LE:
|
||||
case AV_PIX_FMT_GBRP14BE:
|
||||
case AV_PIX_FMT_GBRP16LE:
|
||||
case AV_PIX_FMT_GBRP16BE:
|
||||
w_align = 16; //FIXME assume 16 pixel per macroblock
|
||||
h_align = 16 * 2; // interlaced needs 2 macroblocks height
|
||||
break;
|
||||
@@ -386,6 +397,10 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
w_align = 4;
|
||||
h_align = 4;
|
||||
}
|
||||
if (s->codec_id == AV_CODEC_ID_JV) {
|
||||
w_align = 8;
|
||||
h_align = 8;
|
||||
}
|
||||
break;
|
||||
case AV_PIX_FMT_BGR24:
|
||||
if ((s->codec_id == AV_CODEC_ID_MSZH) ||
|
||||
@@ -401,8 +416,6 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
w_align = 1;
|
||||
h_align = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -3472,6 +3485,11 @@ int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf)
|
||||
ret = av_bprint_finalize(buf, &str);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!av_bprint_is_complete(buf)) {
|
||||
av_free(str);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
avctx->extradata = str;
|
||||
/* Note: the string is NUL terminated (so extradata can be read as a
|
||||
* string), but the ending character is not accounted in the size (in
|
||||
|
@@ -212,6 +212,8 @@ static void restore_median(uint8_t *src, int step, int stride,
|
||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||
slice_start;
|
||||
|
||||
if (!slice_height)
|
||||
continue;
|
||||
bsrc = src + slice_start * stride;
|
||||
|
||||
// first line - left neighbour prediction
|
||||
@@ -222,7 +224,7 @@ static void restore_median(uint8_t *src, int step, int stride,
|
||||
A = bsrc[i];
|
||||
}
|
||||
bsrc += stride;
|
||||
if (slice_height == 1)
|
||||
if (slice_height <= 1)
|
||||
continue;
|
||||
// second line - first element has top prediction, the rest uses median
|
||||
C = bsrc[-stride];
|
||||
@@ -267,6 +269,8 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||
slice_start;
|
||||
slice_height >>= 1;
|
||||
if (!slice_height)
|
||||
continue;
|
||||
|
||||
bsrc = src + slice_start * stride;
|
||||
|
||||
@@ -282,7 +286,7 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
||||
A = bsrc[stride + i];
|
||||
}
|
||||
bsrc += stride2;
|
||||
if (slice_height == 1)
|
||||
if (slice_height <= 1)
|
||||
continue;
|
||||
// second line - first element has top prediction, the rest uses median
|
||||
C = bsrc[-stride2];
|
||||
|
@@ -387,7 +387,7 @@ static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
|
||||
}
|
||||
|
||||
static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
||||
uint8_t *dst, int stride,
|
||||
uint8_t *dst, int stride, int plane_no,
|
||||
int width, int height, PutByteContext *pb)
|
||||
{
|
||||
UtvideoContext *c = avctx->priv_data;
|
||||
@@ -397,6 +397,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
||||
HuffEntry he[256];
|
||||
|
||||
uint32_t offset = 0, slice_len = 0;
|
||||
const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
|
||||
int i, sstart, send = 0;
|
||||
int symbol;
|
||||
int ret;
|
||||
@@ -406,7 +407,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
||||
case PRED_NONE:
|
||||
for (i = 0; i < c->slices; i++) {
|
||||
sstart = send;
|
||||
send = height * (i + 1) / c->slices;
|
||||
send = height * (i + 1) / c->slices & cmask;
|
||||
av_image_copy_plane(dst + sstart * width, width,
|
||||
src + sstart * stride, stride,
|
||||
width, send - sstart);
|
||||
@@ -415,7 +416,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
||||
case PRED_LEFT:
|
||||
for (i = 0; i < c->slices; i++) {
|
||||
sstart = send;
|
||||
send = height * (i + 1) / c->slices;
|
||||
send = height * (i + 1) / c->slices & cmask;
|
||||
left_predict(src + sstart * stride, dst + sstart * width,
|
||||
stride, width, send - sstart);
|
||||
}
|
||||
@@ -423,7 +424,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
||||
case PRED_MEDIAN:
|
||||
for (i = 0; i < c->slices; i++) {
|
||||
sstart = send;
|
||||
send = height * (i + 1) / c->slices;
|
||||
send = height * (i + 1) / c->slices & cmask;
|
||||
median_predict(c, src + sstart * stride, dst + sstart * width,
|
||||
stride, width, send - sstart);
|
||||
}
|
||||
@@ -487,7 +488,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
||||
send = 0;
|
||||
for (i = 0; i < c->slices; i++) {
|
||||
sstart = send;
|
||||
send = height * (i + 1) / c->slices;
|
||||
send = height * (i + 1) / c->slices & cmask;
|
||||
|
||||
/*
|
||||
* Write the huffman codes to a buffer,
|
||||
@@ -569,7 +570,7 @@ static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
case AV_PIX_FMT_RGBA:
|
||||
for (i = 0; i < c->planes; i++) {
|
||||
ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
|
||||
c->slice_buffer[i], c->slice_stride,
|
||||
c->slice_buffer[i], c->slice_stride, i,
|
||||
width, height, &pb);
|
||||
|
||||
if (ret) {
|
||||
@@ -581,7 +582,7 @@ static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
case AV_PIX_FMT_YUV422P:
|
||||
for (i = 0; i < c->planes; i++) {
|
||||
ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
|
||||
pic->linesize[i], width >> !!i, height, &pb);
|
||||
pic->linesize[i], i, width >> !!i, height, &pb);
|
||||
|
||||
if (ret) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
|
||||
@@ -592,7 +593,7 @@ static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
case AV_PIX_FMT_YUV420P:
|
||||
for (i = 0; i < c->planes; i++) {
|
||||
ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
|
||||
pic->linesize[i], width >> !!i, height >> !!i,
|
||||
pic->linesize[i], i, width >> !!i, height >> !!i,
|
||||
&pb);
|
||||
|
||||
if (ret) {
|
||||
|
@@ -352,6 +352,9 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
|
||||
ofs += slen;
|
||||
bytestream2_skip(&gb, len);
|
||||
} else {
|
||||
if (ofs + len > frame_width ||
|
||||
bytestream2_get_bytes_left(&gb) < len)
|
||||
return AVERROR_INVALIDDATA;
|
||||
bytestream2_get_buffer(&gb, &dp[ofs], len);
|
||||
ofs += len;
|
||||
}
|
||||
|
@@ -1314,7 +1314,9 @@ static av_always_inline int setup_classifs(vorbis_context *vc,
|
||||
vorbis_residue *vr,
|
||||
uint8_t *do_not_decode,
|
||||
unsigned ch_used,
|
||||
int partition_count)
|
||||
int partition_count,
|
||||
int ptns_to_read
|
||||
)
|
||||
{
|
||||
int p, j, i;
|
||||
unsigned c_p_c = vc->codebooks[vr->classbook].dimensions;
|
||||
@@ -1336,7 +1338,7 @@ static av_always_inline int setup_classifs(vorbis_context *vc,
|
||||
for (i = partition_count + c_p_c - 1; i >= partition_count; i--) {
|
||||
temp2 = (((uint64_t)temp) * inverse_class) >> 32;
|
||||
|
||||
if (i < vr->ptns_to_read)
|
||||
if (i < ptns_to_read)
|
||||
vr->classifs[p + i] = temp - temp2 * vr->classifications;
|
||||
temp = temp2;
|
||||
}
|
||||
@@ -1344,13 +1346,13 @@ static av_always_inline int setup_classifs(vorbis_context *vc,
|
||||
for (i = partition_count + c_p_c - 1; i >= partition_count; i--) {
|
||||
temp2 = temp / vr->classifications;
|
||||
|
||||
if (i < vr->ptns_to_read)
|
||||
if (i < ptns_to_read)
|
||||
vr->classifs[p + i] = temp - temp2 * vr->classifications;
|
||||
temp = temp2;
|
||||
}
|
||||
}
|
||||
}
|
||||
p += vr->ptns_to_read;
|
||||
p += ptns_to_read;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1404,7 +1406,7 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
|
||||
for (partition_count = 0; partition_count < ptns_to_read;) { // SPEC error
|
||||
if (!pass) {
|
||||
int ret;
|
||||
if ((ret = setup_classifs(vc, vr, do_not_decode, ch_used, partition_count)) < 0)
|
||||
if ((ret = setup_classifs(vc, vr, do_not_decode, ch_used, partition_count, ptns_to_read)) < 0)
|
||||
return ret;
|
||||
}
|
||||
for (i = 0; (i < c_p_c) && (partition_count < ptns_to_read); ++i) {
|
||||
|
@@ -77,6 +77,8 @@ static int parse(AVCodecParserContext *ctx,
|
||||
idx += a; \
|
||||
if (sz > size) { \
|
||||
s->n_frames = 0; \
|
||||
*out_size = 0; \
|
||||
*out_data = data; \
|
||||
av_log(avctx, AV_LOG_ERROR, \
|
||||
"Superframe packet size too big: %u > %d\n", \
|
||||
sz, size); \
|
||||
|
@@ -253,6 +253,10 @@ static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb,
|
||||
return sign ? ~ret : ret;
|
||||
|
||||
error:
|
||||
ret = get_bits_left(gb);
|
||||
if (ret <= 0) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Too few bits (%d) left\n", ret);
|
||||
}
|
||||
*last = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -2876,10 +2876,11 @@ static int wavpack_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, s->block_samples * avctx->channels * 8)) < 0)
|
||||
buf_size = s->block_samples * avctx->channels * 8
|
||||
+ 200 /* for headers */;
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, buf_size)) < 0)
|
||||
return ret;
|
||||
buf = avpkt->data;
|
||||
buf_size = avpkt->size;
|
||||
|
||||
for (s->ch_offset = 0; s->ch_offset < avctx->channels;) {
|
||||
set_samplerate(s);
|
||||
|
@@ -1024,7 +1024,7 @@ static int apply_color_indexing_transform(WebPContext *s)
|
||||
ImageContext *img;
|
||||
ImageContext *pal;
|
||||
int i, x, y;
|
||||
uint8_t *p, *pi;
|
||||
uint8_t *p;
|
||||
|
||||
img = &s->image[IMAGE_ROLE_ARGB];
|
||||
pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
|
||||
@@ -1062,11 +1062,11 @@ static int apply_color_indexing_transform(WebPContext *s)
|
||||
p = GET_PIXEL(img->frame, x, y);
|
||||
i = p[2];
|
||||
if (i >= pal->frame->width) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid palette index %d\n", i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
AV_WB32(p, 0x00000000);
|
||||
} else {
|
||||
const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
|
||||
AV_COPY32(p, pi);
|
||||
}
|
||||
pi = GET_PIXEL(pal->frame, i, 0);
|
||||
AV_COPY32(p, pi);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -420,6 +420,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
offset &= ~3;
|
||||
if (offset > s->sfb_offsets[i][band - 1])
|
||||
s->sfb_offsets[i][band++] = offset;
|
||||
|
||||
if (offset >= subframe_len)
|
||||
break;
|
||||
}
|
||||
s->sfb_offsets[i][band - 1] = subframe_len;
|
||||
s->num_sfb[i] = band - 1;
|
||||
|
@@ -60,6 +60,7 @@ void ff_synth_filter_inner_sse2(float *synth_buf_ptr, float synth_buf2[32],
|
||||
const float window[512],
|
||||
float out[32], intptr_t offset, float scale);
|
||||
|
||||
#if HAVE_YASM
|
||||
static void synth_filter_sse2(FFTContext *imdct,
|
||||
float *synth_buf_ptr, int *synth_buf_offset,
|
||||
float synth_buf2[32], const float window[512],
|
||||
@@ -74,12 +75,15 @@ static void synth_filter_sse2(FFTContext *imdct,
|
||||
|
||||
*synth_buf_offset = (*synth_buf_offset - 32) & 511;
|
||||
}
|
||||
#endif
|
||||
|
||||
av_cold void ff_synth_filter_init_x86(SynthFilterContext *s)
|
||||
{
|
||||
#if HAVE_YASM
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (EXTERNAL_SSE2(cpu_flags)) {
|
||||
s->synth_filter_float = synth_filter_sse2;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@@ -61,6 +61,9 @@ cglobal scalarproduct_int16, 3,3,3, v1, v2, order
|
||||
%endif
|
||||
paddd m2, m0
|
||||
movd eax, m2
|
||||
%if mmsize == 8
|
||||
emms
|
||||
%endif
|
||||
RET
|
||||
|
||||
; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
|
||||
|
@@ -20,10 +20,10 @@
|
||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
;******************************************************************************
|
||||
|
||||
%if ARCH_X86_64
|
||||
|
||||
%include "libavutil/x86/x86util.asm"
|
||||
|
||||
%if ARCH_X86_64
|
||||
|
||||
SECTION_RODATA
|
||||
|
||||
cextern pb_3
|
||||
|
@@ -34,7 +34,7 @@ OBJS-$(CONFIG_OPENGL_OUTDEV) += opengl_enc.o
|
||||
OBJS-$(CONFIG_OSS_INDEV) += oss_audio.o
|
||||
OBJS-$(CONFIG_OSS_OUTDEV) += oss_audio.o
|
||||
OBJS-$(CONFIG_PULSE_INDEV) += pulse_audio_dec.o \
|
||||
pulse_audio_common.o
|
||||
pulse_audio_common.o timefilter.o
|
||||
OBJS-$(CONFIG_PULSE_OUTDEV) += pulse_audio_enc.o \
|
||||
pulse_audio_common.o
|
||||
OBJS-$(CONFIG_SDL_OUTDEV) += sdl.o
|
||||
|
@@ -20,6 +20,9 @@
|
||||
#include "avdevice.h"
|
||||
#include "config.h"
|
||||
|
||||
#include "libavutil/ffversion.h"
|
||||
const char av_device_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
|
||||
|
||||
unsigned avdevice_version(void)
|
||||
{
|
||||
av_assert0(LIBAVDEVICE_VERSION_MICRO >= 100);
|
||||
@@ -48,9 +51,9 @@ int avdevice_app_to_dev_control_message(struct AVFormatContext *s, enum AVAppToD
|
||||
int avdevice_dev_to_app_control_message(struct AVFormatContext *s, enum AVDevToAppMessageType type,
|
||||
void *data, size_t data_size)
|
||||
{
|
||||
if (!s->control_message_cb)
|
||||
if (!av_format_get_control_message_cb(s))
|
||||
return AVERROR(ENOSYS);
|
||||
return s->control_message_cb(s, type, data, data_size);
|
||||
return av_format_get_control_message_cb(s)(s, type, data, data_size);
|
||||
}
|
||||
|
||||
int avdevice_list_devices(AVFormatContext *s, AVDeviceInfoList **device_list)
|
||||
|
@@ -30,6 +30,7 @@
|
||||
#include <pulse/error.h>
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/internal.h"
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "pulse_audio_common.h"
|
||||
|
||||
@@ -47,6 +48,7 @@ typedef struct PulseData {
|
||||
pa_simple *s;
|
||||
int64_t pts;
|
||||
int64_t frame_duration;
|
||||
int wallclock;
|
||||
} PulseData;
|
||||
|
||||
static av_cold int pulse_read_header(AVFormatContext *s)
|
||||
@@ -131,6 +133,8 @@ static int pulse_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
}
|
||||
|
||||
pd->pts = -latency;
|
||||
if (pd->wallclock)
|
||||
pd->pts += av_gettime();
|
||||
}
|
||||
|
||||
pkt->pts = pd->pts;
|
||||
@@ -158,6 +162,7 @@ static const AVOption options[] = {
|
||||
{ "channels", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
|
||||
{ "frame_size", "set number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
|
||||
{ "fragment_size", "set buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
|
||||
{ "wallclock", "set the initial pts using the current time", OFFSET(wallclock), AV_OPT_TYPE_INT, {.i64 = 1}, -1, 1, D },
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
|
@@ -871,9 +871,6 @@ static int v4l2_read_header(AVFormatContext *s1)
|
||||
|
||||
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
|
||||
|
||||
if ((res = v4l2_set_parameters(s1)) < 0)
|
||||
return res;
|
||||
|
||||
if (s->pixel_format) {
|
||||
AVCodec *codec = avcodec_find_decoder_by_name(s->pixel_format);
|
||||
|
||||
@@ -925,6 +922,9 @@ static int v4l2_read_header(AVFormatContext *s1)
|
||||
|
||||
s->frame_format = desired_format;
|
||||
|
||||
if ((res = v4l2_set_parameters(s1)) < 0)
|
||||
return res;
|
||||
|
||||
st->codec->pix_fmt = avpriv_fmt_v4l2ff(desired_format, codec_id);
|
||||
s->frame_size =
|
||||
avpicture_get_size(st->codec->pix_fmt, s->width, s->height);
|
||||
|
@@ -496,6 +496,8 @@ static av_cold int init(AVFilterContext *ctx)
|
||||
snprintf(name, sizeof(name), "input%d", i);
|
||||
pad.type = AVMEDIA_TYPE_AUDIO;
|
||||
pad.name = av_strdup(name);
|
||||
if (!pad.name)
|
||||
return AVERROR(ENOMEM);
|
||||
pad.filter_frame = filter_frame;
|
||||
|
||||
ff_insert_inpad(ctx, i, &pad);
|
||||
|
@@ -214,6 +214,8 @@ static av_cold int join_init(AVFilterContext *ctx)
|
||||
snprintf(name, sizeof(name), "input%d", i);
|
||||
pad.type = AVMEDIA_TYPE_AUDIO;
|
||||
pad.name = av_strdup(name);
|
||||
if (!pad.name)
|
||||
return AVERROR(ENOMEM);
|
||||
pad.filter_frame = filter_frame;
|
||||
|
||||
pad.needs_fifo = 1;
|
||||
|
@@ -41,6 +41,7 @@ typedef struct ResampleContext {
|
||||
AVDictionary *options;
|
||||
|
||||
int64_t next_pts;
|
||||
int64_t next_in_pts;
|
||||
|
||||
/* set by filter_frame() to signal an output frame to request_frame() */
|
||||
int got_output;
|
||||
@@ -153,6 +154,7 @@ static int config_output(AVFilterLink *outlink)
|
||||
|
||||
outlink->time_base = (AVRational){ 1, outlink->sample_rate };
|
||||
s->next_pts = AV_NOPTS_VALUE;
|
||||
s->next_in_pts = AV_NOPTS_VALUE;
|
||||
|
||||
av_get_channel_layout_string(buf1, sizeof(buf1),
|
||||
-1, inlink ->channel_layout);
|
||||
@@ -259,7 +261,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
}
|
||||
|
||||
out->sample_rate = outlink->sample_rate;
|
||||
if (in->pts != AV_NOPTS_VALUE) {
|
||||
/* Only convert in->pts if there is a discontinuous jump.
|
||||
This ensures that out->pts tracks the number of samples actually
|
||||
output by the resampler in the absence of such a jump.
|
||||
Otherwise, the rounding in av_rescale_q() and av_rescale()
|
||||
causes off-by-1 errors. */
|
||||
if (in->pts != AV_NOPTS_VALUE && in->pts != s->next_in_pts) {
|
||||
out->pts = av_rescale_q(in->pts, inlink->time_base,
|
||||
outlink->time_base) -
|
||||
av_rescale(delay, outlink->sample_rate,
|
||||
@@ -268,6 +275,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
out->pts = s->next_pts;
|
||||
|
||||
s->next_pts = out->pts + out->nb_samples;
|
||||
s->next_in_pts = in->pts + in->nb_samples;
|
||||
|
||||
ret = ff_filter_frame(outlink, out);
|
||||
s->got_output = 1;
|
||||
|
@@ -37,6 +37,9 @@
|
||||
#include "formats.h"
|
||||
#include "internal.h"
|
||||
|
||||
#include "libavutil/ffversion.h"
|
||||
const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
|
||||
|
||||
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame);
|
||||
|
||||
void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
|
||||
|
@@ -24,6 +24,8 @@
|
||||
* video presentation timestamp (PTS) modification filter
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "libavutil/eval.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
@@ -175,21 +177,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
|
||||
frame->pts = D2TS(d);
|
||||
|
||||
av_log(inlink->dst, AV_LOG_DEBUG,
|
||||
"N:%"PRId64" PTS:%s T:%f POS:%s",
|
||||
(int64_t)setpts->var_values[VAR_N],
|
||||
d2istr(setpts->var_values[VAR_PTS]),
|
||||
setpts->var_values[VAR_T],
|
||||
d2istr(setpts->var_values[VAR_POS]));
|
||||
av_dlog(inlink->dst,
|
||||
"N:%"PRId64" PTS:%s T:%f POS:%s",
|
||||
(int64_t)setpts->var_values[VAR_N],
|
||||
d2istr(setpts->var_values[VAR_PTS]),
|
||||
setpts->var_values[VAR_T],
|
||||
d2istr(setpts->var_values[VAR_POS]));
|
||||
switch (inlink->type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
av_log(inlink->dst, AV_LOG_DEBUG, " INTERLACED:%"PRId64,
|
||||
(int64_t)setpts->var_values[VAR_INTERLACED]);
|
||||
av_dlog(inlink->dst, " INTERLACED:%"PRId64,
|
||||
(int64_t)setpts->var_values[VAR_INTERLACED]);
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
av_log(inlink->dst, AV_LOG_DEBUG, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
|
||||
(int64_t)setpts->var_values[VAR_NB_SAMPLES],
|
||||
(int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
|
||||
av_dlog(inlink->dst, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
|
||||
(int64_t)setpts->var_values[VAR_NB_SAMPLES],
|
||||
(int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
|
||||
break;
|
||||
}
|
||||
av_log(inlink->dst, AV_LOG_DEBUG, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
|
||||
|
@@ -52,6 +52,8 @@ static av_cold int split_init(AVFilterContext *ctx)
|
||||
snprintf(name, sizeof(name), "output%d", i);
|
||||
pad.type = ctx->filter->inputs[0].type;
|
||||
pad.name = av_strdup(name);
|
||||
if (!pad.name)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ff_insert_outpad(ctx, i, &pad);
|
||||
}
|
||||
|
@@ -292,6 +292,8 @@ static av_cold int movie_common_init(AVFilterContext *ctx)
|
||||
snprintf(name, sizeof(name), "out%d", i);
|
||||
pad.type = movie->st[i].st->codec->codec_type;
|
||||
pad.name = av_strdup(name);
|
||||
if (!pad.name)
|
||||
return AVERROR(ENOMEM);
|
||||
pad.config_props = movie_config_output_props;
|
||||
pad.request_frame = movie_request_frame;
|
||||
ff_insert_outpad(ctx, i, &pad);
|
||||
|
@@ -82,6 +82,7 @@ static int config_output(AVFilterLink *outlink)
|
||||
int width = ctx->inputs[LEFT]->w;
|
||||
int height = ctx->inputs[LEFT]->h;
|
||||
AVRational time_base = ctx->inputs[LEFT]->time_base;
|
||||
AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
|
||||
|
||||
// check size and fps match on the other input
|
||||
if (width != ctx->inputs[RIGHT]->w ||
|
||||
@@ -93,11 +94,18 @@ static int config_output(AVFilterLink *outlink)
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Left and right framerates differ (%d/%d vs %d/%d).\n",
|
||||
"Left and right time bases differ (%d/%d vs %d/%d).\n",
|
||||
time_base.num, time_base.den,
|
||||
ctx->inputs[RIGHT]->time_base.num,
|
||||
ctx->inputs[RIGHT]->time_base.den);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Left and right framerates differ (%d/%d vs %d/%d).\n",
|
||||
frame_rate.num, frame_rate.den,
|
||||
ctx->inputs[RIGHT]->frame_rate.num,
|
||||
ctx->inputs[RIGHT]->frame_rate.den);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->pix_desc = av_pix_fmt_desc_get(outlink->format);
|
||||
@@ -108,6 +116,8 @@ static int config_output(AVFilterLink *outlink)
|
||||
switch (s->format) {
|
||||
case AV_STEREO3D_FRAMESEQUENCE:
|
||||
time_base.den *= 2;
|
||||
frame_rate.num *= 2;
|
||||
|
||||
s->double_pts = AV_NOPTS_VALUE;
|
||||
break;
|
||||
case AV_STEREO3D_COLUMNS:
|
||||
@@ -126,6 +136,7 @@ static int config_output(AVFilterLink *outlink)
|
||||
outlink->w = width;
|
||||
outlink->h = height;
|
||||
outlink->time_base = time_base;
|
||||
outlink->frame_rate= frame_rate;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -220,6 +220,19 @@ static int config_props(AVFilterLink *inlink)
|
||||
|
||||
#define NB_PLANES 4
|
||||
|
||||
static inline int mirror(int x, int w)
|
||||
{
|
||||
if (!w)
|
||||
return 0;
|
||||
|
||||
while ((unsigned)x > (unsigned)w) {
|
||||
x = -x;
|
||||
if (x < 0)
|
||||
x += 2 * w;
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
static void blur(uint8_t *dst, const int dst_linesize,
|
||||
const uint8_t *src, const int src_linesize,
|
||||
const int w, const int h, FilterParam *fp)
|
||||
@@ -253,8 +266,7 @@ static void blur(uint8_t *dst, const int dst_linesize,
|
||||
for (dy = 0; dy < radius*2 + 1; dy++) {
|
||||
int dx;
|
||||
int iy = y+dy - radius;
|
||||
if (iy < 0) iy = -iy;
|
||||
else if (iy >= h) iy = h+h-iy-1;
|
||||
iy = mirror(iy, h-1);
|
||||
|
||||
for (dx = 0; dx < radius*2 + 1; dx++) {
|
||||
const int ix = x+dx - radius;
|
||||
@@ -265,13 +277,11 @@ static void blur(uint8_t *dst, const int dst_linesize,
|
||||
for (dy = 0; dy < radius*2+1; dy++) {
|
||||
int dx;
|
||||
int iy = y+dy - radius;
|
||||
if (iy < 0) iy = -iy;
|
||||
else if (iy >= h) iy = h+h-iy-1;
|
||||
iy = mirror(iy, h-1);
|
||||
|
||||
for (dx = 0; dx < radius*2 + 1; dx++) {
|
||||
int ix = x+dx - radius;
|
||||
if (ix < 0) ix = -ix;
|
||||
else if (ix >= w) ix = w+w-ix-1;
|
||||
ix = mirror(ix, w-1);
|
||||
UPDATE_FACTOR;
|
||||
}
|
||||
}
|
||||
|
@@ -53,8 +53,10 @@ static int ape_tag_read_field(AVFormatContext *s)
|
||||
av_log(s, AV_LOG_WARNING, "Invalid APE tag key '%s'.\n", key);
|
||||
return -1;
|
||||
}
|
||||
if (size >= UINT_MAX)
|
||||
return -1;
|
||||
if (size > INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
|
||||
av_log(s, AV_LOG_ERROR, "APE tag size too large.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (flags & APE_TAG_FLAG_IS_BINARY) {
|
||||
uint8_t filename[1024];
|
||||
enum AVCodecID id;
|
||||
|
@@ -1046,7 +1046,7 @@ start_sync:
|
||||
goto start_sync;
|
||||
}
|
||||
|
||||
n = avi->dv_demux ? 0 : get_stream_idx(d);
|
||||
n = get_stream_idx(d);
|
||||
|
||||
if (!((i - avi->last_pkt_pos) & 1) &&
|
||||
get_stream_idx(d + 1) < s->nb_streams)
|
||||
@@ -1058,6 +1058,9 @@ start_sync:
|
||||
goto start_sync;
|
||||
}
|
||||
|
||||
if (avi->dv_demux && n != 0)
|
||||
continue;
|
||||
|
||||
// parse ##dc/##wb
|
||||
if (n < s->nb_streams) {
|
||||
AVStream *st;
|
||||
|
@@ -219,6 +219,9 @@ int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
|
||||
return offset1;
|
||||
offset += offset1;
|
||||
}
|
||||
if (offset < 0)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
offset1 = offset - pos;
|
||||
if (!s->must_flush && (!s->direct || !s->seek) &&
|
||||
offset1 >= 0 && offset1 <= buffer_size) {
|
||||
|
@@ -127,6 +127,8 @@ static int cdxl_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
height = AV_RB16(&cdxl->header[16]);
|
||||
palette_size = AV_RB16(&cdxl->header[20]);
|
||||
audio_size = AV_RB16(&cdxl->header[22]);
|
||||
if (FFALIGN(width, 16) * (uint64_t)height * cdxl->header[19] > INT_MAX)
|
||||
return AVERROR_INVALIDDATA;
|
||||
image_size = FFALIGN(width, 16) * height * cdxl->header[19] / 8;
|
||||
video_size = palette_size + image_size;
|
||||
|
||||
|
@@ -376,7 +376,7 @@ static int amf_parse_object(AVFormatContext *s, AVStream *astream,
|
||||
FLVContext *flv = s->priv_data;
|
||||
AVIOContext *ioc;
|
||||
AMFDataType amf_type;
|
||||
char str_val[256];
|
||||
char str_val[1024];
|
||||
double num_val;
|
||||
|
||||
num_val = 0;
|
||||
@@ -541,13 +541,13 @@ static int flv_read_metabody(AVFormatContext *s, int64_t next_pos)
|
||||
type = avio_r8(ioc);
|
||||
if (type != AMF_DATA_TYPE_STRING ||
|
||||
amf_get_string(ioc, buffer, sizeof(buffer)) < 0)
|
||||
return -1;
|
||||
return 2;
|
||||
|
||||
if (!strcmp(buffer, "onTextData"))
|
||||
return 1;
|
||||
|
||||
if (strcmp(buffer, "onMetaData"))
|
||||
return -1;
|
||||
return 2;
|
||||
|
||||
// find the streams now so that amf_parse_object doesn't need to do
|
||||
// the lookup every time it is called.
|
||||
@@ -614,7 +614,7 @@ static int flv_read_close(AVFormatContext *s)
|
||||
|
||||
static int flv_get_extradata(AVFormatContext *s, AVStream *st, int size)
|
||||
{
|
||||
av_free(st->codec->extradata);
|
||||
av_freep(&st->codec->extradata);
|
||||
if (ff_get_extradata(st->codec, s->pb, size) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
@@ -813,7 +813,7 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
stream_type=FLV_STREAM_TYPE_DATA;
|
||||
if (size > 13 + 1 + 4 && dts == 0) { // Header-type metadata stuff
|
||||
meta_pos = avio_tell(s->pb);
|
||||
if (flv_read_metabody(s, next) == 0) {
|
||||
if (flv_read_metabody(s, next) <= 0) {
|
||||
goto skip;
|
||||
}
|
||||
avio_seek(s->pb, meta_pos, SEEK_SET);
|
||||
|
@@ -145,15 +145,15 @@ static void hds_free(AVFormatContext *s)
|
||||
if (os->ctx && os->ctx_inited)
|
||||
av_write_trailer(os->ctx);
|
||||
if (os->ctx && os->ctx->pb)
|
||||
av_free(os->ctx->pb);
|
||||
av_freep(&os->ctx->pb);
|
||||
if (os->ctx)
|
||||
avformat_free_context(os->ctx);
|
||||
av_free(os->metadata);
|
||||
av_freep(&os->metadata);
|
||||
for (j = 0; j < os->nb_extra_packets; j++)
|
||||
av_free(os->extra_packets[j]);
|
||||
av_freep(&os->extra_packets[j]);
|
||||
for (j = 0; j < os->nb_fragments; j++)
|
||||
av_free(os->fragments[j]);
|
||||
av_free(os->fragments);
|
||||
av_freep(&os->fragments[j]);
|
||||
av_freep(&os->fragments);
|
||||
}
|
||||
av_freep(&c->streams);
|
||||
}
|
||||
@@ -509,7 +509,7 @@ static int hds_flush(AVFormatContext *s, OutputStream *os, int final,
|
||||
if (remove > 0) {
|
||||
for (i = 0; i < remove; i++) {
|
||||
unlink(os->fragments[i]->file);
|
||||
av_free(os->fragments[i]);
|
||||
av_freep(&os->fragments[i]);
|
||||
}
|
||||
os->nb_fragments -= remove;
|
||||
memmove(os->fragments, os->fragments + remove,
|
||||
|
@@ -308,9 +308,10 @@ static int hls_write_trailer(struct AVFormatContext *s)
|
||||
|
||||
av_write_trailer(oc);
|
||||
avio_closep(&oc->pb);
|
||||
avformat_free_context(oc);
|
||||
av_free(hls->basename);
|
||||
append_entry(hls, hls->duration);
|
||||
avformat_free_context(oc);
|
||||
hls->avf = NULL;
|
||||
hls_window(s, 1);
|
||||
|
||||
free_entries(hls);
|
||||
|
@@ -167,6 +167,7 @@ typedef struct MOVContext {
|
||||
int64_t next_root_atom; ///< offset of the next root atom
|
||||
int *bitrates; ///< bitrates read before streams creation
|
||||
int bitrates_count;
|
||||
int atom_depth;
|
||||
} MOVContext;
|
||||
|
||||
int ff_mp4_read_descr_len(AVIOContext *pb);
|
||||
|
@@ -33,13 +33,15 @@ static int mpeg4video_probe(AVProbeData *probe_packet)
|
||||
|
||||
for(i=0; i<probe_packet->buf_size; i++){
|
||||
temp_buffer = (temp_buffer<<8) + probe_packet->buf[i];
|
||||
if ((temp_buffer & 0xffffff00) != 0x100)
|
||||
if (temp_buffer & 0xfffffe00)
|
||||
continue;
|
||||
if (temp_buffer < 2)
|
||||
continue;
|
||||
|
||||
if (temp_buffer == VOP_START_CODE) VOP++;
|
||||
else if (temp_buffer == VISUAL_OBJECT_START_CODE) VISO++;
|
||||
else if (temp_buffer < 0x120) VO++;
|
||||
else if (temp_buffer < 0x130) VOL++;
|
||||
else if (temp_buffer >= 0x100 && temp_buffer < 0x120) VO++;
|
||||
else if (temp_buffer >= 0x120 && temp_buffer < 0x130) VOL++;
|
||||
else if ( !(0x1AF < temp_buffer && temp_buffer < 0x1B7)
|
||||
&& !(0x1B9 < temp_buffer && temp_buffer < 0x1C4)) res++;
|
||||
}
|
||||
|
@@ -1051,7 +1051,7 @@ static void ebml_free(EbmlSyntax *syntax, void *data)
|
||||
char *ptr = list->elem;
|
||||
for (j=0; j<list->nb_elem; j++, ptr+=syntax[i].list_elem_size)
|
||||
ebml_free(syntax[i].def.n, ptr);
|
||||
av_free(list->elem);
|
||||
av_freep(&list->elem);
|
||||
} else
|
||||
ebml_free(syntax[i].def.n, data_off);
|
||||
default: break;
|
||||
@@ -1427,13 +1427,17 @@ static void matroska_execute_seekhead(MatroskaDemuxContext *matroska)
|
||||
EbmlList *seekhead_list = &matroska->seekhead;
|
||||
int64_t before_pos = avio_tell(matroska->ctx->pb);
|
||||
int i;
|
||||
int nb_elem;
|
||||
|
||||
// we should not do any seeking in the streaming case
|
||||
if (!matroska->ctx->pb->seekable ||
|
||||
(matroska->ctx->flags & AVFMT_FLAG_IGNIDX))
|
||||
return;
|
||||
|
||||
for (i = 0; i < seekhead_list->nb_elem; i++) {
|
||||
// do not read entries that are added while parsing seekhead entries
|
||||
nb_elem = seekhead_list->nb_elem;
|
||||
|
||||
for (i = 0; i < nb_elem; i++) {
|
||||
MatroskaSeekhead *seekhead = seekhead_list->elem;
|
||||
if (seekhead[i].pos <= before_pos)
|
||||
continue;
|
||||
@@ -1903,7 +1907,8 @@ static int matroska_read_header(AVFormatContext *s)
|
||||
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
|
||||
1000000000, track->default_duration, 30000);
|
||||
#if FF_API_R_FRAME_RATE
|
||||
if (st->avg_frame_rate.num < st->avg_frame_rate.den * 1000L)
|
||||
if ( st->avg_frame_rate.num < st->avg_frame_rate.den * 1000L
|
||||
&& st->avg_frame_rate.num > st->avg_frame_rate.den * 5L)
|
||||
st->r_frame_rate = st->avg_frame_rate;
|
||||
#endif
|
||||
}
|
||||
@@ -2028,7 +2033,7 @@ static int matroska_deliver_packet(MatroskaDemuxContext *matroska,
|
||||
{
|
||||
if (matroska->num_packets > 0) {
|
||||
memcpy(pkt, matroska->packets[0], sizeof(AVPacket));
|
||||
av_free(matroska->packets[0]);
|
||||
av_freep(&matroska->packets[0]);
|
||||
if (matroska->num_packets > 1) {
|
||||
void *newpackets;
|
||||
memmove(&matroska->packets[0], &matroska->packets[1],
|
||||
@@ -2058,7 +2063,7 @@ static void matroska_clear_queue(MatroskaDemuxContext *matroska)
|
||||
int n;
|
||||
for (n = 0; n < matroska->num_packets; n++) {
|
||||
av_free_packet(matroska->packets[n]);
|
||||
av_free(matroska->packets[n]);
|
||||
av_freep(&matroska->packets[n]);
|
||||
}
|
||||
av_freep(&matroska->packets);
|
||||
matroska->num_packets = 0;
|
||||
@@ -2898,7 +2903,7 @@ static int matroska_read_close(AVFormatContext *s)
|
||||
|
||||
for (n=0; n < matroska->tracks.nb_elem; n++)
|
||||
if (tracks[n].type == MATROSKA_TRACK_TYPE_AUDIO)
|
||||
av_free(tracks[n].audio.buf);
|
||||
av_freep(&tracks[n].audio.buf);
|
||||
ebml_free(matroska_cluster, &matroska->current_cluster);
|
||||
ebml_free(matroska_segment, matroska);
|
||||
|
||||
|
@@ -279,7 +279,11 @@ static int mov_read_covr(MOVContext *c, AVIOContext *pb, int type, int len)
|
||||
static int mov_metadata_raw(MOVContext *c, AVIOContext *pb,
|
||||
unsigned len, const char *key)
|
||||
{
|
||||
char *value = av_malloc(len + 1);
|
||||
char *value;
|
||||
// Check for overflow.
|
||||
if (len >= INT_MAX)
|
||||
return AVERROR(EINVAL);
|
||||
value = av_malloc(len + 1);
|
||||
if (!value)
|
||||
return AVERROR(ENOMEM);
|
||||
avio_read(pb, value, len);
|
||||
@@ -383,7 +387,7 @@ static int mov_read_udta_string(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
|
||||
if (!key)
|
||||
return 0;
|
||||
if (atom.size < 0)
|
||||
if (atom.size < 0 || str_size >= INT_MAX/2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
str_size = FFMIN3(sizeof(str)-1, str_size, atom.size);
|
||||
@@ -1200,10 +1204,12 @@ static int mov_read_stco(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
|
||||
if (!entries)
|
||||
return 0;
|
||||
if (entries >= UINT_MAX/sizeof(int64_t))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
sc->chunk_offsets = av_malloc(entries * sizeof(int64_t));
|
||||
if (sc->chunk_offsets)
|
||||
av_log(c->fc, AV_LOG_WARNING, "Duplicate STCO atom\n");
|
||||
av_free(sc->chunk_offsets);
|
||||
sc->chunk_count = 0;
|
||||
sc->chunk_offsets = av_malloc_array(entries, sizeof(*sc->chunk_offsets));
|
||||
if (!sc->chunk_offsets)
|
||||
return AVERROR(ENOMEM);
|
||||
sc->chunk_count = entries;
|
||||
@@ -1474,7 +1480,7 @@ static void mov_parse_stsd_audio(MOVContext *c, AVIOContext *pb,
|
||||
|
||||
static void mov_parse_stsd_subtitle(MOVContext *c, AVIOContext *pb,
|
||||
AVStream *st, MOVStreamContext *sc,
|
||||
int size)
|
||||
int64_t size)
|
||||
{
|
||||
// ttxt stsd contains display flags, justification, background
|
||||
// color, fonts, and default styles, so fake an atom to read it
|
||||
@@ -1488,10 +1494,10 @@ static void mov_parse_stsd_subtitle(MOVContext *c, AVIOContext *pb,
|
||||
|
||||
static int mov_parse_stsd_data(MOVContext *c, AVIOContext *pb,
|
||||
AVStream *st, MOVStreamContext *sc,
|
||||
int size)
|
||||
int64_t size)
|
||||
{
|
||||
if (st->codec->codec_tag == MKTAG('t','m','c','d')) {
|
||||
if (ff_get_extradata(st->codec, pb, size) < 0)
|
||||
if ((int)size != size || ff_get_extradata(st->codec, pb, size) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
if (size > 16) {
|
||||
MOVStreamContext *tmcd_ctx = st->priv_data;
|
||||
@@ -1716,9 +1722,11 @@ static int mov_read_stsc(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
|
||||
if (!entries)
|
||||
return 0;
|
||||
if (entries >= UINT_MAX / sizeof(*sc->stsc_data))
|
||||
return AVERROR_INVALIDDATA;
|
||||
sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data));
|
||||
if (sc->stsc_data)
|
||||
av_log(c->fc, AV_LOG_WARNING, "Duplicate STSC atom\n");
|
||||
av_free(sc->stsc_data);
|
||||
sc->stsc_count = 0;
|
||||
sc->stsc_data = av_malloc_array(entries, sizeof(*sc->stsc_data));
|
||||
if (!sc->stsc_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -1750,9 +1758,11 @@ static int mov_read_stps(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
avio_rb32(pb); // version + flags
|
||||
|
||||
entries = avio_rb32(pb);
|
||||
if (entries >= UINT_MAX / sizeof(*sc->stps_data))
|
||||
return AVERROR_INVALIDDATA;
|
||||
sc->stps_data = av_malloc(entries * sizeof(*sc->stps_data));
|
||||
if (sc->stps_data)
|
||||
av_log(c->fc, AV_LOG_WARNING, "Duplicate STPS atom\n");
|
||||
av_free(sc->stps_data);
|
||||
sc->stps_count = 0;
|
||||
sc->stps_data = av_malloc_array(entries, sizeof(*sc->stps_data));
|
||||
if (!sc->stps_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -1794,9 +1804,13 @@ static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
st->need_parsing = AVSTREAM_PARSE_HEADERS;
|
||||
return 0;
|
||||
}
|
||||
if (sc->keyframes)
|
||||
av_log(c->fc, AV_LOG_WARNING, "Duplicated STSS atom\n");
|
||||
if (entries >= UINT_MAX / sizeof(int))
|
||||
return AVERROR_INVALIDDATA;
|
||||
sc->keyframes = av_malloc(entries * sizeof(int));
|
||||
av_freep(&sc->keyframes);
|
||||
sc->keyframe_count = 0;
|
||||
sc->keyframes = av_malloc_array(entries, sizeof(*sc->keyframes));
|
||||
if (!sc->keyframes)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -1855,9 +1869,13 @@ static int mov_read_stsz(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
|
||||
if (!entries)
|
||||
return 0;
|
||||
if (entries >= UINT_MAX / sizeof(int) || entries >= (UINT_MAX - 4) / field_size)
|
||||
if (entries >= (UINT_MAX - 4) / field_size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
sc->sample_sizes = av_malloc(entries * sizeof(int));
|
||||
if (sc->sample_sizes)
|
||||
av_log(c->fc, AV_LOG_WARNING, "Duplicate STSZ atom\n");
|
||||
av_free(sc->sample_sizes);
|
||||
sc->sample_count = 0;
|
||||
sc->sample_sizes = av_malloc_array(entries, sizeof(*sc->sample_sizes));
|
||||
if (!sc->sample_sizes)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -1911,11 +1929,11 @@ static int mov_read_stts(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
av_dlog(c->fc, "track[%i].stts.entries = %i\n",
|
||||
c->fc->nb_streams-1, entries);
|
||||
|
||||
if (entries >= UINT_MAX / sizeof(*sc->stts_data))
|
||||
return -1;
|
||||
|
||||
if (sc->stts_data)
|
||||
av_log(c->fc, AV_LOG_WARNING, "Duplicate STTS atom\n");
|
||||
av_free(sc->stts_data);
|
||||
sc->stts_data = av_malloc(entries * sizeof(*sc->stts_data));
|
||||
sc->stts_count = 0;
|
||||
sc->stts_data = av_malloc_array(entries, sizeof(*sc->stts_data));
|
||||
if (!sc->stts_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -2054,9 +2072,11 @@ static int mov_read_sbgp(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
entries = avio_rb32(pb);
|
||||
if (!entries)
|
||||
return 0;
|
||||
if (entries >= UINT_MAX / sizeof(*sc->rap_group))
|
||||
return AVERROR_INVALIDDATA;
|
||||
sc->rap_group = av_malloc(entries * sizeof(*sc->rap_group));
|
||||
if (sc->rap_group)
|
||||
av_log(c->fc, AV_LOG_WARNING, "Duplicate SBGP atom\n");
|
||||
av_free(sc->rap_group);
|
||||
sc->rap_group_count = 0;
|
||||
sc->rap_group = av_malloc_array(entries, sizeof(*sc->rap_group));
|
||||
if (!sc->rap_group)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -3006,6 +3026,12 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
MOVAtom a;
|
||||
int i;
|
||||
|
||||
if (c->atom_depth > 10) {
|
||||
av_log(c->fc, AV_LOG_ERROR, "Atoms too deeply nested\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->atom_depth ++;
|
||||
|
||||
if (atom.size < 0)
|
||||
atom.size = INT64_MAX;
|
||||
while (total_size + 8 <= atom.size && !url_feof(pb)) {
|
||||
@@ -3022,11 +3048,12 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
{
|
||||
av_log(c->fc, AV_LOG_ERROR, "Broken file, trak/mdat not at top-level\n");
|
||||
avio_skip(pb, -8);
|
||||
c->atom_depth --;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
total_size += 8;
|
||||
if (a.size == 1) { /* 64 bit extended size */
|
||||
if (a.size == 1 && total_size + 8 <= atom.size) { /* 64 bit extended size */
|
||||
a.size = avio_rb64(pb) - 8;
|
||||
total_size += 8;
|
||||
}
|
||||
@@ -3058,13 +3085,16 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
int64_t start_pos = avio_tell(pb);
|
||||
int64_t left;
|
||||
int err = parse(c, pb, a);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
c->atom_depth --;
|
||||
return err;
|
||||
}
|
||||
if (c->found_moov && c->found_mdat &&
|
||||
((!pb->seekable || c->fc->flags & AVFMT_FLAG_IGNIDX) ||
|
||||
start_pos + a.size == avio_size(pb))) {
|
||||
if (!pb->seekable || c->fc->flags & AVFMT_FLAG_IGNIDX)
|
||||
c->next_root_atom = start_pos + a.size;
|
||||
c->atom_depth --;
|
||||
return 0;
|
||||
}
|
||||
left = a.size - avio_tell(pb) + start_pos;
|
||||
@@ -3084,6 +3114,7 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
if (total_size < atom.size && atom.size < 0x7ffff)
|
||||
avio_skip(pb, atom.size - total_size);
|
||||
|
||||
c->atom_depth --;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -2033,7 +2033,8 @@ static int mov_write_mvhd_tag(AVIOContext *pb, MOVMuxContext *mov)
|
||||
}
|
||||
|
||||
version = max_track_len < UINT32_MAX ? 0 : 1;
|
||||
(version == 1) ? avio_wb32(pb, 120) : avio_wb32(pb, 108); /* size */
|
||||
avio_wb32(pb, version == 1 ? 120 : 108); /* size */
|
||||
|
||||
ffio_wfourcc(pb, "mvhd");
|
||||
avio_w8(pb, version);
|
||||
avio_wb24(pb, 0); /* flags */
|
||||
|
@@ -295,6 +295,8 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
|
||||
int64_t ret = av_index_search_timestamp(st, timestamp, flags);
|
||||
int i, j;
|
||||
int dir = (flags&AVSEEK_FLAG_BACKWARD) ? -1 : 1;
|
||||
int64_t best_pos;
|
||||
int best_score;
|
||||
|
||||
if (mp3->is_cbr && st->duration > 0 && mp3->header_filesize > s->data_offset) {
|
||||
int64_t filesize = avio_size(s->pb);
|
||||
@@ -318,28 +320,37 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (dir < 0)
|
||||
avio_seek(s->pb, FFMAX(ie->pos - 4096, 0), SEEK_SET);
|
||||
avio_seek(s->pb, FFMAX(ie->pos - 4096, 0), SEEK_SET);
|
||||
ret = avio_seek(s->pb, ie->pos, SEEK_SET);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
#define MIN_VALID 3
|
||||
best_pos = ie->pos;
|
||||
best_score = 999;
|
||||
for(i=0; i<4096; i++) {
|
||||
int64_t pos = ie->pos + i*dir;
|
||||
int64_t pos = ie->pos + (dir > 0 ? i - 1024 : -i);
|
||||
int64_t candidate = -1;
|
||||
int score = 999;
|
||||
for(j=0; j<MIN_VALID; j++) {
|
||||
ret = check(s, pos);
|
||||
if(ret < 0)
|
||||
break;
|
||||
if ((ie->pos - pos)*dir <= 0 && abs(MIN_VALID/2-j) < score) {
|
||||
candidate = pos;
|
||||
score = abs(MIN_VALID/2-j);
|
||||
}
|
||||
pos += ret;
|
||||
}
|
||||
if(j==MIN_VALID)
|
||||
break;
|
||||
if (best_score > score && j == MIN_VALID) {
|
||||
best_pos = candidate;
|
||||
best_score = score;
|
||||
if(score == 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(j!=MIN_VALID)
|
||||
i=0;
|
||||
|
||||
ret = avio_seek(s->pb, ie->pos + i*dir, SEEK_SET);
|
||||
ret = avio_seek(s->pb, best_pos, SEEK_SET);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_update_cur_dts(s, st, ie->timestamp);
|
||||
|
@@ -91,16 +91,20 @@ static int mpegps_probe(AVProbeData *p)
|
||||
if(vid+audio > invalid+1) /* invalid VDR files nd short PES streams */
|
||||
score = AVPROBE_SCORE_EXTENSION / 2;
|
||||
|
||||
if(sys>invalid && sys*9 <= pspack*10)
|
||||
return (audio > 12 || vid > 3 || pspack > 2) ? AVPROBE_SCORE_EXTENSION + 2 : AVPROBE_SCORE_EXTENSION / 2; // 1 more than .mpg
|
||||
if(pspack > invalid && (priv1+vid+audio)*10 >= pspack*9)
|
||||
return pspack > 2 ? AVPROBE_SCORE_EXTENSION + 2 : AVPROBE_SCORE_EXTENSION / 2; // 1 more than .mpg
|
||||
if((!!vid ^ !!audio) && (audio > 4 || vid > 1) && !sys && !pspack && p->buf_size>2048 && vid + audio > invalid) /* PES stream */
|
||||
return (audio > 12 || vid > 3 + 2*invalid) ? AVPROBE_SCORE_EXTENSION + 2 : AVPROBE_SCORE_EXTENSION / 2;
|
||||
if (sys > invalid && sys * 9 <= pspack * 10)
|
||||
return (audio > 12 || vid > 3 || pspack > 2) ? AVPROBE_SCORE_EXTENSION + 2
|
||||
: AVPROBE_SCORE_EXTENSION / 2 + 1; // 1 more than .mpg
|
||||
if (pspack > invalid && (priv1 + vid + audio) * 10 >= pspack * 9)
|
||||
return pspack > 2 ? AVPROBE_SCORE_EXTENSION + 2
|
||||
: AVPROBE_SCORE_EXTENSION / 2; // 1 more than .mpg
|
||||
if ((!!vid ^ !!audio) && (audio > 4 || vid > 1) && !sys &&
|
||||
!pspack && p->buf_size > 2048 && vid + audio > invalid) /* PES stream */
|
||||
return (audio > 12 || vid > 3 + 2 * invalid) ? AVPROBE_SCORE_EXTENSION + 2
|
||||
: AVPROBE_SCORE_EXTENSION / 2;
|
||||
|
||||
//02-Penguin.flac has sys:0 priv1:0 pspack:0 vid:0 audio:1
|
||||
//mp3_misidentified_2.mp3 has sys:0 priv1:0 pspack:0 vid:0 audio:6
|
||||
//Have\ Yourself\ a\ Merry\ Little\ Christmas.mp3 0 0 0 5 0 1 len:21618
|
||||
// 02-Penguin.flac has sys:0 priv1:0 pspack:0 vid:0 audio:1
|
||||
// mp3_misidentified_2.mp3 has sys:0 priv1:0 pspack:0 vid:0 audio:6
|
||||
// Have\ Yourself\ a\ Merry\ Little\ Christmas.mp3 0 0 0 5 0 1 len:21618
|
||||
return score;
|
||||
}
|
||||
|
||||
|
@@ -1882,7 +1882,7 @@ static void sdt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
||||
break;
|
||||
desc_len = get8(&p, desc_list_end);
|
||||
desc_end = p + desc_len;
|
||||
if (desc_end > desc_list_end)
|
||||
if (desc_len < 0 || desc_end > desc_list_end)
|
||||
break;
|
||||
|
||||
av_dlog(ts->stream, "tag: 0x%02x len=%d\n",
|
||||
|
@@ -92,6 +92,10 @@ typedef struct MpegTSWrite {
|
||||
#define DEFAULT_PES_HEADER_FREQ 16
|
||||
#define DEFAULT_PES_PAYLOAD_SIZE ((DEFAULT_PES_HEADER_FREQ - 1) * 184 + 170)
|
||||
|
||||
/* The section length is 12 bits. The first 2 are set to 0, the remaining
|
||||
* 10 bits should not exceed 1021. */
|
||||
#define SECTION_LENGTH 1020
|
||||
|
||||
static const AVOption options[] = {
|
||||
{ "mpegts_transport_stream_id", "Set transport_stream_id field.",
|
||||
offsetof(MpegTSWrite, transport_stream_id), AV_OPT_TYPE_INT, {.i64 = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
|
||||
@@ -246,7 +250,7 @@ static void mpegts_write_pat(AVFormatContext *s)
|
||||
{
|
||||
MpegTSWrite *ts = s->priv_data;
|
||||
MpegTSService *service;
|
||||
uint8_t data[1012], *q;
|
||||
uint8_t data[SECTION_LENGTH], *q;
|
||||
int i;
|
||||
|
||||
q = data;
|
||||
@@ -262,8 +266,8 @@ static void mpegts_write_pat(AVFormatContext *s)
|
||||
static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
{
|
||||
MpegTSWrite *ts = s->priv_data;
|
||||
uint8_t data[1012], *q, *desc_length_ptr, *program_info_length_ptr;
|
||||
int val, stream_type, i;
|
||||
uint8_t data[SECTION_LENGTH], *q, *desc_length_ptr, *program_info_length_ptr;
|
||||
int val, stream_type, i, err = 0;
|
||||
|
||||
q = data;
|
||||
put16(&q, 0xe000 | service->pcr_pid);
|
||||
@@ -280,7 +284,12 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
for(i = 0; i < s->nb_streams; i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
MpegTSWriteStream *ts_st = st->priv_data;
|
||||
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL,0);
|
||||
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
|
||||
|
||||
if (q - data > SECTION_LENGTH - 32) {
|
||||
err = 1;
|
||||
break;
|
||||
}
|
||||
switch(st->codec->codec_id) {
|
||||
case AV_CODEC_ID_MPEG1VIDEO:
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
@@ -316,9 +325,6 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
break;
|
||||
}
|
||||
|
||||
if (q - data > sizeof(data) - 32)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
*q++ = stream_type;
|
||||
put16(&q, 0xe000 | ts_st->pid);
|
||||
desc_length_ptr = q;
|
||||
@@ -350,7 +356,11 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
len_ptr = q++;
|
||||
*len_ptr = 0;
|
||||
|
||||
for (p = lang->value; next && *len_ptr < 255 / 4 * 4 && q - data < sizeof(data) - 4; p = next + 1) {
|
||||
for (p = lang->value; next && *len_ptr < 255 / 4 * 4; p = next + 1) {
|
||||
if (q - data > SECTION_LENGTH - 4) {
|
||||
err = 1;
|
||||
break;
|
||||
}
|
||||
next = strchr(p, ',');
|
||||
if (strlen(p) != 3 && (!next || next != p + 3))
|
||||
continue; /* not a 3-letter code */
|
||||
@@ -387,7 +397,11 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
*q++ = 0x59; /* subtitling_descriptor */
|
||||
len_ptr = q++;
|
||||
|
||||
while (strlen(language) >= 3 && (sizeof(data) - (q - data)) >= 8) { /* 8 bytes per DVB subtitle substream data */
|
||||
while (strlen(language) >= 3) {
|
||||
if (sizeof(data) - (q - data) < 8) { /* 8 bytes per DVB subtitle substream data */
|
||||
err = 1;
|
||||
break;
|
||||
}
|
||||
*q++ = *language++;
|
||||
*q++ = *language++;
|
||||
*q++ = *language++;
|
||||
@@ -478,6 +492,13 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
desc_length_ptr[0] = val >> 8;
|
||||
desc_length_ptr[1] = val;
|
||||
}
|
||||
|
||||
if (err)
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"The PMT section cannot fit stream %d and all following streams.\n"
|
||||
"Try reducing the number of languages in the audio streams "
|
||||
"or the total number of streams.\n", i);
|
||||
|
||||
mpegts_write_section1(&service->pmt, PMT_TID, service->sid, ts->tables_version, 0, 0,
|
||||
data, q - data);
|
||||
return 0;
|
||||
@@ -504,7 +525,7 @@ static void mpegts_write_sdt(AVFormatContext *s)
|
||||
{
|
||||
MpegTSWrite *ts = s->priv_data;
|
||||
MpegTSService *service;
|
||||
uint8_t data[1012], *q, *desc_list_len_ptr, *desc_len_ptr;
|
||||
uint8_t data[SECTION_LENGTH], *q, *desc_list_len_ptr, *desc_len_ptr;
|
||||
int i, running_status, free_ca_mode, val;
|
||||
|
||||
q = data;
|
||||
|
@@ -795,7 +795,6 @@ retry:
|
||||
10);
|
||||
if(side_data == NULL) {
|
||||
av_free_packet(pkt);
|
||||
av_free(pkt);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
AV_WL32(side_data + 4, os->end_trimming);
|
||||
|
@@ -208,11 +208,15 @@ int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc)
|
||||
void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc,
|
||||
const AVCodecTag *tags, int for_asf, int ignore_extradata)
|
||||
{
|
||||
int keep_height = enc->extradata_size >= 9 &&
|
||||
!memcmp(enc->extradata + enc->extradata_size - 9, "BottomUp", 9);
|
||||
int extradata_size = enc->extradata_size - 9*keep_height;
|
||||
|
||||
/* size */
|
||||
avio_wl32(pb, 40 + (ignore_extradata ? 0 : enc->extradata_size));
|
||||
avio_wl32(pb, 40 + (ignore_extradata ? 0 :extradata_size));
|
||||
avio_wl32(pb, enc->width);
|
||||
//We always store RGB TopDown
|
||||
avio_wl32(pb, enc->codec_tag ? enc->height : -enc->height);
|
||||
avio_wl32(pb, enc->codec_tag || keep_height ? enc->height : -enc->height);
|
||||
/* planes */
|
||||
avio_wl16(pb, 1);
|
||||
/* depth */
|
||||
@@ -226,9 +230,9 @@ void ff_put_bmp_header(AVIOContext *pb, AVCodecContext *enc,
|
||||
avio_wl32(pb, 0);
|
||||
|
||||
if (!ignore_extradata) {
|
||||
avio_write(pb, enc->extradata, enc->extradata_size);
|
||||
avio_write(pb, enc->extradata, extradata_size);
|
||||
|
||||
if (!for_asf && enc->extradata_size & 1)
|
||||
if (!for_asf && extradata_size & 1)
|
||||
avio_w8(pb, 0);
|
||||
}
|
||||
}
|
||||
|
@@ -312,6 +312,9 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
|
||||
int64_t codec_pos;
|
||||
int ret;
|
||||
|
||||
if (codec_data_size < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
avpriv_set_pts_info(st, 64, 1, 1000);
|
||||
codec_pos = avio_tell(pb);
|
||||
v = avio_rb32(pb);
|
||||
@@ -394,7 +397,11 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
|
||||
skip:
|
||||
/* skip codec info */
|
||||
size = avio_tell(pb) - codec_pos;
|
||||
avio_skip(pb, codec_data_size - size);
|
||||
if (codec_data_size >= size) {
|
||||
avio_skip(pb, codec_data_size - size);
|
||||
} else {
|
||||
av_log(s, AV_LOG_WARNING, "codec_data_size %u < size %d\n", codec_data_size, size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user