Compare commits
219 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
b05d355040 | ||
![]() |
f253fa9552 | ||
![]() |
a28c276b8d | ||
![]() |
26da47a09b | ||
![]() |
23fdcd3b0a | ||
![]() |
653329dfcb | ||
![]() |
b0964918d8 | ||
![]() |
d8fda618d0 | ||
![]() |
43881c7732 | ||
![]() |
42bdcebf33 | ||
![]() |
d1e71ecbb0 | ||
![]() |
64be1a45eb | ||
![]() |
f8bd98ae4d | ||
![]() |
f2c6e2c3b4 | ||
![]() |
8acfae6901 | ||
![]() |
e5ccd894d1 | ||
![]() |
6287107eae | ||
![]() |
53330b30fd | ||
![]() |
9726c1a5f8 | ||
![]() |
f1dd343007 | ||
![]() |
be894938c4 | ||
![]() |
fe0fd3de22 | ||
![]() |
fe12b3a7a6 | ||
![]() |
a1605af9b5 | ||
![]() |
b0937dd61d | ||
![]() |
ac0dea5dd7 | ||
![]() |
5df02760dd | ||
![]() |
014dee89d0 | ||
![]() |
da1a8191c5 | ||
![]() |
dd9b24a488 | ||
![]() |
16b5df17ea | ||
![]() |
99af97ea11 | ||
![]() |
6c66ea5e73 | ||
![]() |
252356cf06 | ||
![]() |
56fb830c30 | ||
![]() |
d19b55649c | ||
![]() |
fd6230e8f0 | ||
![]() |
9406d3c910 | ||
![]() |
9e1ce9a8ee | ||
![]() |
b4d2888ce8 | ||
![]() |
b3f30cb6d6 | ||
![]() |
0484d7ad7e | ||
![]() |
85b829bff9 | ||
![]() |
f1685bd31a | ||
![]() |
8d10d6e127 | ||
![]() |
f93f739eca | ||
![]() |
1a6218954a | ||
![]() |
1f8e0f7e06 | ||
![]() |
6dd718e416 | ||
![]() |
1aab060996 | ||
![]() |
3bc5aa65bb | ||
![]() |
da1dfea076 | ||
![]() |
e940d15a98 | ||
![]() |
481118615c | ||
![]() |
4b6e46c427 | ||
![]() |
516ba41f05 | ||
![]() |
1a642b7217 | ||
![]() |
58b5b062b8 | ||
![]() |
f13f5a7d4b | ||
![]() |
d14696c99c | ||
![]() |
d0af7d5745 | ||
![]() |
f2abf8df7a | ||
![]() |
40c7613ecf | ||
![]() |
1a7d1793d6 | ||
![]() |
9fcc632249 | ||
![]() |
f7395926f2 | ||
![]() |
7bc37641e3 | ||
![]() |
3ac0638d57 | ||
![]() |
051ac5c0f5 | ||
![]() |
5ac46a0969 | ||
![]() |
8b55f67e3e | ||
![]() |
bb1d75e6c5 | ||
![]() |
459a84ada3 | ||
![]() |
35fe089dd9 | ||
![]() |
9e6d8c309f | ||
![]() |
afbaf6b367 | ||
![]() |
f1da6691a4 | ||
![]() |
5b1a953960 | ||
![]() |
37e2d574dd | ||
![]() |
f25f5f8c62 | ||
![]() |
a437298de5 | ||
![]() |
e912b0777b | ||
![]() |
b3f48a5044 | ||
![]() |
ee9e966296 | ||
![]() |
493a92313f | ||
![]() |
1578986a0d | ||
![]() |
7788297a59 | ||
![]() |
23376ae2f0 | ||
![]() |
8231764784 | ||
![]() |
49fa398858 | ||
![]() |
1ad1723c24 | ||
![]() |
7740b111dd | ||
![]() |
c0ad5f9333 | ||
![]() |
bb7f236c7f | ||
![]() |
0397d43405 | ||
![]() |
ffc66ac0d6 | ||
![]() |
588e7226ed | ||
![]() |
0dc5868f14 | ||
![]() |
723512ac71 | ||
![]() |
963514ea1a | ||
![]() |
c11b3010c2 | ||
![]() |
3301b248b0 | ||
![]() |
7b67ce9ade | ||
![]() |
90a1c5e95c | ||
![]() |
45a529d805 | ||
![]() |
97cbad3d2c | ||
![]() |
6419569a9d | ||
![]() |
67134ad31f | ||
![]() |
d513c6a0ee | ||
![]() |
18f48e05a2 | ||
![]() |
5bf5a35fb5 | ||
![]() |
6598aaea1a | ||
![]() |
a5992a274f | ||
![]() |
1b99667005 | ||
![]() |
1f4d779e87 | ||
![]() |
f543d32455 | ||
![]() |
2cbdbc3670 | ||
![]() |
aa943bd31f | ||
![]() |
bea14966e2 | ||
![]() |
6be5a3c045 | ||
![]() |
119131fcbf | ||
![]() |
c13e38bac7 | ||
![]() |
01c4fe7ee7 | ||
![]() |
ad13a5c8fa | ||
![]() |
ce248bf7ee | ||
![]() |
8eaefbe1be | ||
![]() |
81d8bad786 | ||
![]() |
0aee436728 | ||
![]() |
cdaf9fb2a0 | ||
![]() |
cf41ff4889 | ||
![]() |
6120ad315b | ||
![]() |
2105f046f5 | ||
![]() |
815d3225e3 | ||
![]() |
51dd23c448 | ||
![]() |
a4e1532ee7 | ||
![]() |
f7b147548e | ||
![]() |
f99b17bd32 | ||
![]() |
21f6b07a97 | ||
![]() |
c9f1456a41 | ||
![]() |
77b789b406 | ||
![]() |
9ed1aa0465 | ||
![]() |
188ce941ec | ||
![]() |
7fc9c7c35b | ||
![]() |
07015d9f91 | ||
![]() |
744b406ff3 | ||
![]() |
2273e5ed99 | ||
![]() |
a1f7844a11 | ||
![]() |
3ef8b4322c | ||
![]() |
9dc112e277 | ||
![]() |
75ff0e8c50 | ||
![]() |
f02221d651 | ||
![]() |
92c4973752 | ||
![]() |
f4e0869560 | ||
![]() |
6135baa85b | ||
![]() |
3573256037 | ||
![]() |
d396987c30 | ||
![]() |
b20a8ad619 | ||
![]() |
01a550bda2 | ||
![]() |
f6b3dce952 | ||
![]() |
b8e57113ec | ||
![]() |
407912d178 | ||
![]() |
12bbd819cb | ||
![]() |
f9204ec56a | ||
![]() |
4ddac7199b | ||
![]() |
3e78f86891 | ||
![]() |
68fd80ee1c | ||
![]() |
0edc799626 | ||
![]() |
3cf6135729 | ||
![]() |
160e91de89 | ||
![]() |
9f8e3e6d12 | ||
![]() |
9752ab6b9e | ||
![]() |
64e069efac | ||
![]() |
e064cce972 | ||
![]() |
b8102ce56d | ||
![]() |
f38c42b913 | ||
![]() |
a770a61e6d | ||
![]() |
b2111ad4df | ||
![]() |
7c81afdba5 | ||
![]() |
65b839e43a | ||
![]() |
d47e96090c | ||
![]() |
0bbd46c690 | ||
![]() |
5f1f1868b5 | ||
![]() |
ebf381168a | ||
![]() |
0d90143972 | ||
![]() |
00049f193d | ||
![]() |
d832020bd8 | ||
![]() |
95e91aaf33 | ||
![]() |
40dd29653a | ||
![]() |
52dd1a933e | ||
![]() |
564c023eba | ||
![]() |
771564945a | ||
![]() |
16f7cbef56 | ||
![]() |
4ec1acc6e4 | ||
![]() |
74f6df745a | ||
![]() |
46c477c2a1 | ||
![]() |
fcbcc561e0 | ||
![]() |
6d899d0206 | ||
![]() |
e0a03d1f9c | ||
![]() |
d07be523f5 | ||
![]() |
e4cdde96b3 | ||
![]() |
252a0ccb80 | ||
![]() |
dad0c9d686 | ||
![]() |
e173834af8 | ||
![]() |
63e3a97815 | ||
![]() |
61796a8999 | ||
![]() |
7d9c059a35 | ||
![]() |
52572ca1b3 | ||
![]() |
220bbc44c2 | ||
![]() |
f378636d90 | ||
![]() |
98f33430a2 | ||
![]() |
6672f672d9 | ||
![]() |
3002e5976d | ||
![]() |
1e8ff7d21d | ||
![]() |
dae6c19995 | ||
![]() |
989adf5ee5 | ||
![]() |
09d406eec8 | ||
![]() |
d1b62a9a07 | ||
![]() |
29c8fac3f7 | ||
![]() |
8c33d40a7b |
18
Changelog
18
Changelog
@@ -1,8 +1,24 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 2.2:
|
||||
version 2.2.7
|
||||
- snow: fix null pointer dereference
|
||||
- iff: fix out of array access
|
||||
- svq1dec: fix input data corruption
|
||||
- proresenc_ks: check buffer size
|
||||
|
||||
|
||||
version 2.2.6
|
||||
- fix infinite loop in dvbsub parser
|
||||
- fix some interlaced MPEG-2 videos
|
||||
- fix decoding issues in dv (Ticket2340, 2341)
|
||||
- fix v4l2 and v4l2enc crashes
|
||||
- fix theoretical librtmp crash
|
||||
- fix theoretical eamad crash
|
||||
- support dimension change in g2meet
|
||||
|
||||
|
||||
version 2.2:
|
||||
- HNM version 4 demuxer and video decoder
|
||||
- Live HDS muxer
|
||||
- setsar/setdar filters now support variables in ratio expressions
|
||||
|
1
LICENSE
1
LICENSE
@@ -33,6 +33,7 @@ Specifically, the GPL parts of FFmpeg are
|
||||
- vf_geq.c
|
||||
- vf_histeq.c
|
||||
- vf_hqdn3d.c
|
||||
- vf_interlace.c
|
||||
- vf_kerndeint.c
|
||||
- vf_mcdeint.c
|
||||
- vf_mp.c
|
||||
|
@@ -224,7 +224,7 @@ int opt_opencl_bench(void *optctx, const char *opt, const char *arg)
|
||||
av_log(NULL, AV_LOG_ERROR, "No OpenCL device detected!\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (!(devices = av_malloc(sizeof(OpenCLDeviceBenchmark) * nb_devices))) {
|
||||
if (!(devices = av_malloc_array(nb_devices, sizeof(OpenCLDeviceBenchmark)))) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not allocate buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
1
configure
vendored
1
configure
vendored
@@ -4287,6 +4287,7 @@ EOF
|
||||
fi
|
||||
|
||||
check_ldflags -Wl,--as-needed
|
||||
check_ldflags -Wl,-z,noexecstack
|
||||
|
||||
if check_func dlopen; then
|
||||
ldl=
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.2.3
|
||||
PROJECT_NUMBER = 2.2.9
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -1405,11 +1405,11 @@ ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
|
||||
You can put many streams of the same type in the output:
|
||||
|
||||
@example
|
||||
ffmpeg -i test1.avi -i test2.avi -map 0:3 -map 0:2 -map 0:1 -map 0:0 -c copy test12.nut
|
||||
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
|
||||
@end example
|
||||
|
||||
The resulting output file @file{test12.avi} will contain first four streams from
|
||||
the input file in reverse order.
|
||||
The resulting output file @file{test12.nut} will contain the first four streams
|
||||
from the input files in reverse order.
|
||||
|
||||
@item
|
||||
To force CBR video output:
|
||||
|
@@ -491,7 +491,7 @@ aeval=val(ch)/2:c=same
|
||||
@item
|
||||
Invert phase of the second channel:
|
||||
@example
|
||||
eval=val(0)|-val(1)
|
||||
aeval=val(0)|-val(1)
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@@ -8850,7 +8850,7 @@ Default value is "all", which will cycle through the list of all tests.
|
||||
|
||||
For example the following:
|
||||
@example
|
||||
testsrc=t=dc_luma
|
||||
mptestsrc=t=dc_luma
|
||||
@end example
|
||||
|
||||
will generate a "dc_luma" test pattern.
|
||||
|
32
ffmpeg.c
32
ffmpeg.c
@@ -621,7 +621,8 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
bsfc = bsfc->next;
|
||||
}
|
||||
|
||||
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
|
||||
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
|
||||
if(
|
||||
(avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
|
||||
pkt->dts != AV_NOPTS_VALUE &&
|
||||
ost->last_mux_dts != AV_NOPTS_VALUE) {
|
||||
@@ -642,6 +643,16 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
pkt->pts = FFMAX(pkt->pts, max);
|
||||
pkt->dts = max;
|
||||
}
|
||||
}
|
||||
if (pkt->dts != AV_NOPTS_VALUE &&
|
||||
pkt->pts != AV_NOPTS_VALUE &&
|
||||
pkt->dts > pkt->pts) {
|
||||
av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d\n",
|
||||
pkt->dts, pkt->pts,
|
||||
ost->file_index, ost->st->index);
|
||||
pkt->pts = AV_NOPTS_VALUE;
|
||||
pkt->dts = AV_NOPTS_VALUE;
|
||||
}
|
||||
}
|
||||
ost->last_mux_dts = pkt->dts;
|
||||
|
||||
@@ -1091,6 +1102,19 @@ static void do_video_stats(OutputStream *ost, int frame_size)
|
||||
}
|
||||
}
|
||||
|
||||
static void finish_output_stream(OutputStream *ost)
|
||||
{
|
||||
OutputFile *of = output_files[ost->file_index];
|
||||
int i;
|
||||
|
||||
ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
|
||||
|
||||
if (of->shortest) {
|
||||
for (i = 0; i < of->ctx->nb_streams; i++)
|
||||
output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get and encode new output from any of the filtergraphs, without causing
|
||||
* activity.
|
||||
@@ -1968,7 +1992,7 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
|
||||
if (avpkt.duration) {
|
||||
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
} else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
|
||||
int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
|
||||
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->st->codec->ticks_per_frame;
|
||||
duration = ((int64_t)AV_TIME_BASE *
|
||||
ist->st->codec->time_base.num * ticks) /
|
||||
ist->st->codec->time_base.den;
|
||||
@@ -2025,7 +2049,7 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
|
||||
} else if (pkt->duration) {
|
||||
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
} else if(ist->st->codec->time_base.num != 0) {
|
||||
int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
|
||||
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
|
||||
ist->next_dts += ((int64_t)AV_TIME_BASE *
|
||||
ist->st->codec->time_base.num * ticks) /
|
||||
ist->st->codec->time_base.den;
|
||||
@@ -3147,7 +3171,7 @@ static int process_input(int file_index)
|
||||
|
||||
if (ost->source_index == ifile->ist_index + i &&
|
||||
(ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
|
||||
close_output_stream(ost);
|
||||
finish_output_stream(ost);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -828,6 +828,12 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
av_freep(&ifilter->name);
|
||||
DESCRIBE_FILTER_LINK(ifilter, in, 1);
|
||||
|
||||
if (!ifilter->ist->dec) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"No decoder for stream #%d:%d, filtering impossible\n",
|
||||
ifilter->ist->file_index, ifilter->ist->st->index);
|
||||
return AVERROR_DECODER_NOT_FOUND;
|
||||
}
|
||||
switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
|
||||
case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
|
||||
case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
|
||||
|
59
ffmpeg_opt.c
59
ffmpeg_opt.c
@@ -1783,7 +1783,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
/* pick the "best" stream of each type */
|
||||
|
||||
/* video: highest resolution */
|
||||
if (!o->video_disable && oc->oformat->video_codec != AV_CODEC_ID_NONE) {
|
||||
if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) {
|
||||
int area = 0, idx = -1;
|
||||
int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
@@ -1805,7 +1805,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
}
|
||||
|
||||
/* audio: most channels */
|
||||
if (!o->audio_disable && oc->oformat->audio_codec != AV_CODEC_ID_NONE) {
|
||||
if (!o->audio_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
|
||||
int channels = 0, idx = -1;
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
ist = input_streams[i];
|
||||
@@ -2119,7 +2119,8 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
for (j = 0; j < nb_input_files; j++) {
|
||||
for (i = 0; i < input_files[j]->nb_streams; i++) {
|
||||
AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
|
||||
if (c->codec_type != AVMEDIA_TYPE_VIDEO)
|
||||
if (c->codec_type != AVMEDIA_TYPE_VIDEO ||
|
||||
!c->time_base.num)
|
||||
continue;
|
||||
fr = c->time_base.den * 1000 / c->time_base.num;
|
||||
if (fr == 25000) {
|
||||
@@ -2152,19 +2153,19 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
|
||||
parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "327680", AV_DICT_DONT_OVERWRITE); // 40*1024*8;
|
||||
opt_default(NULL, "b:v", "1150000");
|
||||
opt_default(NULL, "maxrate", "1150000");
|
||||
opt_default(NULL, "minrate", "1150000");
|
||||
opt_default(NULL, "bufsize", "327680"); // 40*1024*8;
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
parse_option(o, "ar", "44100", options);
|
||||
parse_option(o, "ac", "2", options);
|
||||
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->format_opts, "muxrate", "1411200", AV_DICT_DONT_OVERWRITE); // 2352 * 75 * 8;
|
||||
opt_default(NULL, "packetsize", "2324");
|
||||
opt_default(NULL, "muxrate", "1411200"); // 2352 * 75 * 8;
|
||||
|
||||
/* We have to offset the PTS, so that it is consistent with the SCR.
|
||||
SCR starts at 36000, but the first two packs contain only padding
|
||||
@@ -2181,18 +2182,18 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
parse_option(o, "pix_fmt", "yuv420p", options);
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "2040000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "2516000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1145000;
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
|
||||
av_dict_set(&o->g->codec_opts, "scan_offset", "1", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "b:v", "2040000");
|
||||
opt_default(NULL, "maxrate", "2516000");
|
||||
opt_default(NULL, "minrate", "0"); // 1145000;
|
||||
opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
|
||||
opt_default(NULL, "scan_offset", "1");
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
parse_option(o, "ar", "44100", options);
|
||||
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "packetsize", "2324");
|
||||
|
||||
} else if (!strcmp(arg, "dvd")) {
|
||||
|
||||
@@ -2203,17 +2204,17 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
parse_option(o, "pix_fmt", "yuv420p", options);
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "6000000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "9000000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1500000;
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
|
||||
opt_default(NULL, "b:v", "6000000");
|
||||
opt_default(NULL, "maxrate", "9000000");
|
||||
opt_default(NULL, "minrate", "0"); // 1500000;
|
||||
opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
|
||||
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2048", AV_DICT_DONT_OVERWRITE); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
|
||||
av_dict_set(&o->g->format_opts, "muxrate", "10080000", AV_DICT_DONT_OVERWRITE); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
|
||||
opt_default(NULL, "packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
|
||||
opt_default(NULL, "muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
|
||||
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "448000", AV_DICT_DONT_OVERWRITE);
|
||||
opt_default(NULL, "b:a", "448000");
|
||||
parse_option(o, "ar", "48000", options);
|
||||
|
||||
} else if (!strncmp(arg, "dv", 2)) {
|
||||
@@ -2232,6 +2233,10 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
av_dict_copy(&o->g->codec_opts, codec_opts, AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_copy(&o->g->format_opts, format_opts, AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -2989,6 +2989,8 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
AVDictionaryEntry *entry = av_dict_get(stream->metadata, "title", NULL, 0);
|
||||
int i;
|
||||
|
||||
*pbuffer = NULL;
|
||||
|
||||
avc = avformat_alloc_context();
|
||||
if (avc == NULL || !rtp_format) {
|
||||
return -1;
|
||||
@@ -3025,7 +3027,7 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
av_free(avc);
|
||||
av_free(avs);
|
||||
|
||||
return strlen(*pbuffer);
|
||||
return *pbuffer ? strlen(*pbuffer) : AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
static void rtsp_cmd_options(HTTPContext *c, const char *url)
|
||||
|
@@ -34,7 +34,7 @@ static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
int size;
|
||||
union {
|
||||
uint64_t u64;
|
||||
uint8_t u8[8];
|
||||
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
} tmp;
|
||||
|
||||
tmp.u64 = av_be2ne64(state);
|
||||
|
@@ -166,7 +166,7 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
int err;
|
||||
union {
|
||||
uint64_t u64;
|
||||
uint8_t u8[8];
|
||||
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
} tmp = { av_be2ne64(state) };
|
||||
AC3HeaderInfo hdr, *phdr = &hdr;
|
||||
GetBitContext gbc;
|
||||
|
@@ -256,7 +256,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
|
||||
energy_cpl = energy[blk][CPL_CH][bnd];
|
||||
energy_ch = energy[blk][ch][bnd];
|
||||
blk1 = blk+1;
|
||||
while (!s->blocks[blk1].new_cpl_coords[ch] && blk1 < s->num_blocks) {
|
||||
while (blk1 < s->num_blocks && !s->blocks[blk1].new_cpl_coords[ch]) {
|
||||
if (s->blocks[blk1].cpl_in_use) {
|
||||
energy_cpl += energy[blk1][CPL_CH][bnd];
|
||||
energy_ch += energy[blk1][ch][bnd];
|
||||
|
@@ -549,10 +549,11 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
put_bits(&pb, 7, status->step_index);
|
||||
if (avctx->trellis > 0) {
|
||||
uint8_t buf[64];
|
||||
adpcm_compress_trellis(avctx, &samples_p[ch][1], buf, status,
|
||||
adpcm_compress_trellis(avctx, &samples_p[ch][0], buf, status,
|
||||
64, 1);
|
||||
for (i = 0; i < 64; i++)
|
||||
put_bits(&pb, 4, buf[i ^ 1]);
|
||||
status->prev_sample = status->predictor;
|
||||
} else {
|
||||
for (i = 0; i < 64; i += 2) {
|
||||
int t1, t2;
|
||||
|
@@ -150,6 +150,7 @@ typedef struct AICContext {
|
||||
int16_t *data_ptr[NUM_BANDS];
|
||||
|
||||
DECLARE_ALIGNED(16, int16_t, block)[64];
|
||||
DECLARE_ALIGNED(16, uint8_t, quant_matrix)[64];
|
||||
} AICContext;
|
||||
|
||||
static int aic_decode_header(AICContext *ctx, const uint8_t *src, int size)
|
||||
@@ -285,7 +286,7 @@ static void recombine_block_il(int16_t *dst, const uint8_t *scan,
|
||||
}
|
||||
}
|
||||
|
||||
static void unquant_block(int16_t *block, int q)
|
||||
static void unquant_block(int16_t *block, int q, uint8_t *quant_matrix)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -293,7 +294,7 @@ static void unquant_block(int16_t *block, int q)
|
||||
int val = (uint16_t)block[i];
|
||||
int sign = val & 1;
|
||||
|
||||
block[i] = (((val >> 1) ^ -sign) * q * aic_quant_matrix[i] >> 4)
|
||||
block[i] = (((val >> 1) ^ -sign) * q * quant_matrix[i] >> 4)
|
||||
+ sign;
|
||||
}
|
||||
}
|
||||
@@ -334,7 +335,7 @@ static int aic_decode_slice(AICContext *ctx, int mb_x, int mb_y,
|
||||
else
|
||||
recombine_block_il(ctx->block, ctx->scantable.permutated,
|
||||
&base_y, &ext_y, blk);
|
||||
unquant_block(ctx->block, ctx->quant);
|
||||
unquant_block(ctx->block, ctx->quant, ctx->quant_matrix);
|
||||
ctx->dsp.idct(ctx->block);
|
||||
|
||||
if (!ctx->interlaced) {
|
||||
@@ -352,7 +353,7 @@ static int aic_decode_slice(AICContext *ctx, int mb_x, int mb_y,
|
||||
for (blk = 0; blk < 2; blk++) {
|
||||
recombine_block(ctx->block, ctx->scantable.permutated,
|
||||
&base_c, &ext_c);
|
||||
unquant_block(ctx->block, ctx->quant);
|
||||
unquant_block(ctx->block, ctx->quant, ctx->quant_matrix);
|
||||
ctx->dsp.idct(ctx->block);
|
||||
ctx->dsp.put_signed_pixels_clamped(ctx->block, C[blk],
|
||||
ctx->frame->linesize[blk + 1]);
|
||||
@@ -430,6 +431,8 @@ static av_cold int aic_decode_init(AVCodecContext *avctx)
|
||||
for (i = 0; i < 64; i++)
|
||||
scan[i] = i;
|
||||
ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, scan);
|
||||
for (i = 0; i < 64; i++)
|
||||
ctx->quant_matrix[ctx->dsp.idct_permutation[i]] = aic_quant_matrix[i];
|
||||
|
||||
ctx->mb_width = FFALIGN(avctx->width, 16) >> 4;
|
||||
ctx->mb_height = FFALIGN(avctx->height, 16) >> 4;
|
||||
|
@@ -280,7 +280,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
GetBitContext gb;
|
||||
uint64_t ht_size;
|
||||
int i, config_offset;
|
||||
MPEG4AudioConfig m4ac;
|
||||
MPEG4AudioConfig m4ac = {0};
|
||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||
AVCodecContext *avctx = ctx->avctx;
|
||||
uint32_t als_id, header_size, trailer_size;
|
||||
|
@@ -108,8 +108,12 @@ av_cold void ff_h264dsp_init_arm(H264DSPContext *c, const int bit_depth,
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (have_armv6(cpu_flags))
|
||||
if (have_armv6(cpu_flags) && !(have_vfpv3(cpu_flags) || have_neon(cpu_flags))) {
|
||||
// This function uses the 'setend' instruction which is deprecated
|
||||
// on ARMv8. This instruction is serializing on some ARMv7 cores as
|
||||
// well. Therefore, only use the function on ARMv6.
|
||||
c->h264_find_start_code_candidate = ff_h264_find_start_code_candidate_armv6;
|
||||
}
|
||||
if (have_neon(cpu_flags))
|
||||
h264dsp_init_neon(c, bit_depth, chroma_format_idc);
|
||||
}
|
||||
|
@@ -507,7 +507,6 @@ int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
|
||||
dst->convergence_duration = src->convergence_duration;
|
||||
dst->flags = src->flags;
|
||||
dst->stream_index = src->stream_index;
|
||||
dst->side_data_elems = src->side_data_elems;
|
||||
|
||||
for (i = 0; i < src->side_data_elems; i++) {
|
||||
enum AVPacketSideDataType type = src->side_data[i].type;
|
||||
|
@@ -214,6 +214,7 @@ typedef struct AVSContext {
|
||||
int luma_scan[4];
|
||||
int qp;
|
||||
int qp_fixed;
|
||||
int pic_qp_fixed;
|
||||
int cbp;
|
||||
ScanTable scantable;
|
||||
|
||||
|
@@ -904,7 +904,7 @@ static inline int decode_slice_header(AVSContext *h, GetBitContext *gb)
|
||||
|
||||
/* mark top macroblocks as unavailable */
|
||||
h->flags &= ~(B_AVAIL | C_AVAIL);
|
||||
if ((h->mby == 0) && (!h->qp_fixed)) {
|
||||
if (!h->pic_qp_fixed) {
|
||||
h->qp_fixed = get_bits1(gb);
|
||||
h->qp = get_bits(gb, 6);
|
||||
}
|
||||
@@ -1027,6 +1027,7 @@ static int decode_pic(AVSContext *h)
|
||||
skip_bits1(&h->gb); //advanced_pred_mode_disable
|
||||
skip_bits1(&h->gb); //top_field_first
|
||||
skip_bits1(&h->gb); //repeat_first_field
|
||||
h->pic_qp_fixed =
|
||||
h->qp_fixed = get_bits1(&h->gb);
|
||||
h->qp = get_bits(&h->gb, 6);
|
||||
if (h->cur.f->pict_type == AV_PICTURE_TYPE_I) {
|
||||
|
@@ -261,7 +261,7 @@ static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data,
|
||||
static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *got_frame, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
GetByteContext gb;
|
||||
int buf_size = avpkt->size;
|
||||
int ret;
|
||||
uint8_t command, inst;
|
||||
@@ -278,6 +278,8 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
bytestream2_init(&gb, avpkt->data, avpkt->size);
|
||||
|
||||
if ((ret = ff_reget_buffer(avctx, cc->frame)) < 0)
|
||||
return ret;
|
||||
if (!avctx->frame_number) {
|
||||
@@ -285,13 +287,11 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
memset(cc->frame->data[1], 0, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
command = bytestream_get_byte(&buf);
|
||||
inst = bytestream_get_byte(&buf);
|
||||
command = bytestream2_get_byte(&gb);
|
||||
inst = bytestream2_get_byte(&gb);
|
||||
inst &= CDG_MASK;
|
||||
buf += 2; /// skipping 2 unneeded bytes
|
||||
|
||||
if (buf_size > CDG_HEADER_SIZE)
|
||||
bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE);
|
||||
bytestream2_skip(&gb, 2);
|
||||
bytestream2_get_buffer(&gb, cdg_data, sizeof(cdg_data));
|
||||
|
||||
if ((command & CDG_MASK) == CDG_COMMAND) {
|
||||
switch (inst) {
|
||||
@@ -353,10 +353,9 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
*got_frame = 1;
|
||||
} else {
|
||||
*got_frame = 0;
|
||||
buf_size = 0;
|
||||
}
|
||||
|
||||
return buf_size;
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
static av_cold int cdg_decode_end(AVCodecContext *avctx)
|
||||
|
@@ -135,7 +135,7 @@ static int cinepak_decode_vectors (CinepakContext *s, cvid_strip *strip,
|
||||
const uint8_t *eod = (data + size);
|
||||
uint32_t flag, mask;
|
||||
uint8_t *cb0, *cb1, *cb2, *cb3;
|
||||
unsigned int x, y;
|
||||
int x, y;
|
||||
char *ip0, *ip1, *ip2, *ip3;
|
||||
|
||||
flag = 0;
|
||||
|
@@ -45,8 +45,11 @@ static int dvdsub_parse(AVCodecParserContext *s,
|
||||
DVDSubParseContext *pc = s->priv_data;
|
||||
|
||||
if (pc->packet_index == 0) {
|
||||
if (buf_size < 2)
|
||||
return 0;
|
||||
if (buf_size < 2 || AV_RB16(buf) && buf_size < 6) {
|
||||
if (buf_size)
|
||||
av_log(avctx, AV_LOG_DEBUG, "Parser input %d too small\n", buf_size);
|
||||
return buf_size;
|
||||
}
|
||||
pc->packet_len = AV_RB16(buf);
|
||||
if (pc->packet_len == 0) /* HD-DVD subpicture packet */
|
||||
pc->packet_len = AV_RB32(buf+2);
|
||||
|
@@ -29,6 +29,7 @@
|
||||
*/
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "get_bits.h"
|
||||
#include "aandcttab.h"
|
||||
#include "eaidct.h"
|
||||
@@ -237,30 +238,32 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
const uint8_t *buf_end = buf+buf_size;
|
||||
MadContext *s = avctx->priv_data;
|
||||
AVFrame *frame = data;
|
||||
GetByteContext gb;
|
||||
int width, height;
|
||||
int chunk_type;
|
||||
int inter, ret;
|
||||
|
||||
if (buf_size < 26) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n");
|
||||
*got_frame = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
bytestream2_init(&gb, buf, buf_size);
|
||||
|
||||
chunk_type = AV_RL32(&buf[0]);
|
||||
chunk_type = bytestream2_get_le32(&gb);
|
||||
inter = (chunk_type == MADm_TAG || chunk_type == MADe_TAG);
|
||||
buf += 8;
|
||||
bytestream2_skip(&gb, 10);
|
||||
|
||||
av_reduce(&avctx->time_base.num, &avctx->time_base.den,
|
||||
AV_RL16(&buf[6]), 1000, 1<<30);
|
||||
bytestream2_get_le16(&gb), 1000, 1<<30);
|
||||
|
||||
width = AV_RL16(&buf[8]);
|
||||
height = AV_RL16(&buf[10]);
|
||||
calc_quant_matrix(s, buf[13]);
|
||||
buf += 16;
|
||||
width = bytestream2_get_le16(&gb);
|
||||
height = bytestream2_get_le16(&gb);
|
||||
bytestream2_skip(&gb, 1);
|
||||
calc_quant_matrix(s, bytestream2_get_byte(&gb));
|
||||
bytestream2_skip(&gb, 2);
|
||||
|
||||
if (bytestream2_get_bytes_left(&gb) < 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Input data too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (width < 16 || height < 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions too small\n");
|
||||
@@ -269,7 +272,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if (avctx->width != width || avctx->height != height) {
|
||||
av_frame_unref(s->last_frame);
|
||||
if((width * height)/2048*7 > buf_end-buf)
|
||||
if((width * height)/2048*7 > bytestream2_get_bytes_left(&gb))
|
||||
return AVERROR_INVALIDDATA;
|
||||
if ((ret = ff_set_dimensions(avctx, width, height)) < 0)
|
||||
return ret;
|
||||
@@ -292,12 +295,13 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size,
|
||||
buf_end - buf);
|
||||
bytestream2_get_bytes_left(&gb));
|
||||
if (!s->bitstream_buf)
|
||||
return AVERROR(ENOMEM);
|
||||
s->dsp.bswap16_buf(s->bitstream_buf, (const uint16_t*)buf, (buf_end-buf)/2);
|
||||
memset((uint8_t*)s->bitstream_buf + (buf_end-buf), 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
init_get_bits(&s->gb, s->bitstream_buf, 8*(buf_end-buf));
|
||||
s->dsp.bswap16_buf(s->bitstream_buf, (const uint16_t *)(buf + bytestream2_tell(&gb)),
|
||||
bytestream2_get_bytes_left(&gb) / 2);
|
||||
memset((uint8_t*)s->bitstream_buf + bytestream2_get_bytes_left(&gb), 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
init_get_bits(&s->gb, s->bitstream_buf, 8*(bytestream2_get_bytes_left(&gb)));
|
||||
|
||||
for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++)
|
||||
for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++)
|
||||
|
@@ -117,6 +117,7 @@ static void fft_ref(FFTComplex *tabr, FFTComplex *tab, int nbits)
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_MDCT
|
||||
static void imdct_ref(FFTSample *out, FFTSample *in, int nbits)
|
||||
{
|
||||
int n = 1<<nbits;
|
||||
@@ -151,8 +152,10 @@ static void mdct_ref(FFTSample *output, FFTSample *input, int nbits)
|
||||
output[k] = REF_SCALE(s, nbits - 1);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_MDCT */
|
||||
|
||||
#if FFT_FLOAT
|
||||
#if CONFIG_DCT
|
||||
static void idct_ref(FFTSample *output, FFTSample *input, int nbits)
|
||||
{
|
||||
int n = 1<<nbits;
|
||||
@@ -185,6 +188,7 @@ static void dct_ref(FFTSample *output, FFTSample *input, int nbits)
|
||||
output[k] = s;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_DCT */
|
||||
#endif
|
||||
|
||||
|
||||
@@ -310,6 +314,7 @@ int main(int argc, char **argv)
|
||||
tab2 = av_malloc(fft_size * sizeof(FFTSample));
|
||||
|
||||
switch (transform) {
|
||||
#if CONFIG_MDCT
|
||||
case TRANSFORM_MDCT:
|
||||
av_log(NULL, AV_LOG_INFO,"Scale factor is set to %f\n", scale);
|
||||
if (do_inverse)
|
||||
@@ -318,6 +323,7 @@ int main(int argc, char **argv)
|
||||
av_log(NULL, AV_LOG_INFO,"MDCT");
|
||||
ff_mdct_init(m, fft_nbits, do_inverse, scale);
|
||||
break;
|
||||
#endif /* CONFIG_MDCT */
|
||||
case TRANSFORM_FFT:
|
||||
if (do_inverse)
|
||||
av_log(NULL, AV_LOG_INFO,"IFFT");
|
||||
@@ -327,6 +333,7 @@ int main(int argc, char **argv)
|
||||
fft_ref_init(fft_nbits, do_inverse);
|
||||
break;
|
||||
#if FFT_FLOAT
|
||||
# if CONFIG_RDFT
|
||||
case TRANSFORM_RDFT:
|
||||
if (do_inverse)
|
||||
av_log(NULL, AV_LOG_INFO,"IDFT_C2R");
|
||||
@@ -335,6 +342,7 @@ int main(int argc, char **argv)
|
||||
ff_rdft_init(r, fft_nbits, do_inverse ? IDFT_C2R : DFT_R2C);
|
||||
fft_ref_init(fft_nbits, do_inverse);
|
||||
break;
|
||||
# endif /* CONFIG_RDFT */
|
||||
# if CONFIG_DCT
|
||||
case TRANSFORM_DCT:
|
||||
if (do_inverse)
|
||||
@@ -343,7 +351,7 @@ int main(int argc, char **argv)
|
||||
av_log(NULL, AV_LOG_INFO,"DCT_II");
|
||||
ff_dct_init(d, fft_nbits, do_inverse ? DCT_III : DCT_II);
|
||||
break;
|
||||
# endif
|
||||
# endif /* CONFIG_DCT */
|
||||
#endif
|
||||
default:
|
||||
av_log(NULL, AV_LOG_ERROR, "Requested transform not supported\n");
|
||||
@@ -362,6 +370,7 @@ int main(int argc, char **argv)
|
||||
av_log(NULL, AV_LOG_INFO,"Checking...\n");
|
||||
|
||||
switch (transform) {
|
||||
#if CONFIG_MDCT
|
||||
case TRANSFORM_MDCT:
|
||||
if (do_inverse) {
|
||||
imdct_ref((FFTSample *)tab_ref, (FFTSample *)tab1, fft_nbits);
|
||||
@@ -375,6 +384,7 @@ int main(int argc, char **argv)
|
||||
err = check_diff((FFTSample *)tab_ref, tab2, fft_size / 2, scale);
|
||||
}
|
||||
break;
|
||||
#endif /* CONFIG_MDCT */
|
||||
case TRANSFORM_FFT:
|
||||
memcpy(tab, tab1, fft_size * sizeof(FFTComplex));
|
||||
s->fft_permute(s, tab);
|
||||
@@ -384,6 +394,7 @@ int main(int argc, char **argv)
|
||||
err = check_diff((FFTSample *)tab_ref, (FFTSample *)tab, fft_size * 2, 1.0);
|
||||
break;
|
||||
#if FFT_FLOAT
|
||||
#if CONFIG_RDFT
|
||||
case TRANSFORM_RDFT:
|
||||
fft_size_2 = fft_size >> 1;
|
||||
if (do_inverse) {
|
||||
@@ -415,6 +426,8 @@ int main(int argc, char **argv)
|
||||
err = check_diff((float *)tab_ref, (float *)tab2, fft_size, 1.0);
|
||||
}
|
||||
break;
|
||||
#endif /* CONFIG_RDFT */
|
||||
#if CONFIG_DCT
|
||||
case TRANSFORM_DCT:
|
||||
memcpy(tab, tab1, fft_size * sizeof(FFTComplex));
|
||||
d->dct_calc(d, (FFTSample *)tab);
|
||||
@@ -425,6 +438,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
err = check_diff((float *)tab_ref, (float *)tab, fft_size, 1.0);
|
||||
break;
|
||||
#endif /* CONFIG_DCT */
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -476,21 +490,25 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
switch (transform) {
|
||||
#if CONFIG_MDCT
|
||||
case TRANSFORM_MDCT:
|
||||
ff_mdct_end(m);
|
||||
break;
|
||||
#endif /* CONFIG_MDCT */
|
||||
case TRANSFORM_FFT:
|
||||
ff_fft_end(s);
|
||||
break;
|
||||
#if FFT_FLOAT
|
||||
# if CONFIG_RDFT
|
||||
case TRANSFORM_RDFT:
|
||||
ff_rdft_end(r);
|
||||
break;
|
||||
# endif /* CONFIG_RDFT */
|
||||
# if CONFIG_DCT
|
||||
case TRANSFORM_DCT:
|
||||
ff_dct_end(d);
|
||||
break;
|
||||
# endif
|
||||
# endif /* CONFIG_DCT */
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@@ -622,31 +622,31 @@ static int read_header(FFV1Context *f)
|
||||
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
||||
}
|
||||
|
||||
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
|
||||
chroma_planes = get_rac(c, state);
|
||||
chroma_h_shift = get_symbol(c, state, 0);
|
||||
chroma_v_shift = get_symbol(c, state, 0);
|
||||
transparency = get_rac(c, state);
|
||||
chroma_planes = get_rac(c, state);
|
||||
chroma_h_shift = get_symbol(c, state, 0);
|
||||
chroma_v_shift = get_symbol(c, state, 0);
|
||||
transparency = get_rac(c, state);
|
||||
|
||||
if (f->plane_count) {
|
||||
if ( colorspace != f->colorspace
|
||||
|| bits_per_raw_sample != f->avctx->bits_per_raw_sample
|
||||
|| chroma_planes != f->chroma_planes
|
||||
|| chroma_h_shift!= f->chroma_h_shift
|
||||
|| chroma_v_shift!= f->chroma_v_shift
|
||||
|| transparency != f->transparency) {
|
||||
if (colorspace != f->colorspace ||
|
||||
bits_per_raw_sample != f->avctx->bits_per_raw_sample ||
|
||||
chroma_planes != f->chroma_planes ||
|
||||
chroma_h_shift != f->chroma_h_shift ||
|
||||
chroma_v_shift != f->chroma_v_shift ||
|
||||
transparency != f->transparency) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
f->colorspace = colorspace;
|
||||
f->colorspace = colorspace;
|
||||
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
||||
f->chroma_planes = chroma_planes;
|
||||
f->chroma_h_shift = chroma_h_shift;
|
||||
f->chroma_v_shift = chroma_v_shift;
|
||||
f->transparency = transparency;
|
||||
f->chroma_planes = chroma_planes;
|
||||
f->chroma_h_shift = chroma_h_shift;
|
||||
f->chroma_v_shift = chroma_v_shift;
|
||||
f->transparency = transparency;
|
||||
|
||||
f->plane_count = 2 + f->transparency;
|
||||
}
|
||||
|
@@ -87,6 +87,7 @@ typedef struct G2MContext {
|
||||
|
||||
int compression;
|
||||
int width, height, bpp;
|
||||
int orig_width, orig_height;
|
||||
int tile_width, tile_height;
|
||||
int tiles_x, tiles_y, tile_x, tile_y;
|
||||
|
||||
@@ -700,8 +701,8 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
c->width = bytestream2_get_be32(&bc);
|
||||
c->height = bytestream2_get_be32(&bc);
|
||||
if (c->width < 16 || c->width > avctx->width ||
|
||||
c->height < 16 || c->height > avctx->height) {
|
||||
if (c->width < 16 || c->width > c->orig_width ||
|
||||
c->height < 16 || c->height > c->orig_height) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid frame dimensions %dx%d\n",
|
||||
c->width, c->height);
|
||||
@@ -867,6 +868,10 @@ static av_cold int g2m_decode_init(AVCodecContext *avctx)
|
||||
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
||||
|
||||
// store original sizes and check against those if resize happens
|
||||
c->orig_width = avctx->width;
|
||||
c->orig_height = avctx->height;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -258,26 +258,21 @@ static int gif_read_image(GifState *s, AVFrame *frame)
|
||||
case 1:
|
||||
y1 += 8;
|
||||
ptr += linesize * 8;
|
||||
if (y1 >= height) {
|
||||
y1 = pass ? 2 : 4;
|
||||
ptr = ptr1 + linesize * y1;
|
||||
pass++;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
y1 += 4;
|
||||
ptr += linesize * 4;
|
||||
if (y1 >= height) {
|
||||
y1 = 1;
|
||||
ptr = ptr1 + linesize;
|
||||
pass++;
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
y1 += 2;
|
||||
ptr += linesize * 2;
|
||||
break;
|
||||
}
|
||||
while (y1 >= height) {
|
||||
y1 = 4 >> pass;
|
||||
ptr = ptr1 + linesize * y1;
|
||||
pass++;
|
||||
}
|
||||
} else {
|
||||
ptr += linesize;
|
||||
}
|
||||
|
@@ -319,6 +319,14 @@ static int decode_slice(MpegEncContext *s)
|
||||
}
|
||||
}
|
||||
|
||||
if (s->codec_id == AV_CODEC_ID_H263 &&
|
||||
(s->workaround_bugs & FF_BUG_AUTODETECT) &&
|
||||
get_bits_left(&s->gb) >= 64 &&
|
||||
AV_RB64(s->gb.buffer_end - 8) == 0xCDCDCDCDFC7F0000) {
|
||||
|
||||
s->padding_bug_score += 32;
|
||||
}
|
||||
|
||||
if (s->workaround_bugs & FF_BUG_AUTODETECT) {
|
||||
if (s->padding_bug_score > -2 && !s->data_partitioning)
|
||||
s->workaround_bugs |= FF_BUG_NO_PADDING;
|
||||
|
@@ -601,18 +601,18 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
|
||||
|
||||
if ((h->left_samples_available & 0x8080) != 0x8080) {
|
||||
mode = left[mode];
|
||||
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
||||
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
||||
(!(h->left_samples_available & 0x8000)) +
|
||||
2 * (mode == DC_128_PRED8x8);
|
||||
}
|
||||
if (mode < 0) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"left block unavailable for requested intra mode at %d %d\n",
|
||||
h->mb_x, h->mb_y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
||||
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
||||
(!(h->left_samples_available & 0x8000)) +
|
||||
2 * (mode == DC_128_PRED8x8);
|
||||
}
|
||||
}
|
||||
|
||||
return mode;
|
||||
@@ -634,7 +634,7 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
|
||||
|
||||
#define STARTCODE_TEST \
|
||||
if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
|
||||
if (src[i + 2] != 3) { \
|
||||
if (src[i + 2] != 3 && src[i + 2] != 0) { \
|
||||
/* startcode, so we must be past the end */ \
|
||||
length = i; \
|
||||
} \
|
||||
@@ -707,7 +707,7 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
|
||||
if (src[si + 2] > 3) {
|
||||
dst[di++] = src[si++];
|
||||
dst[di++] = src[si++];
|
||||
} else if (src[si] == 0 && src[si + 1] == 0) {
|
||||
} else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) {
|
||||
if (src[si + 2] == 3) { // escape
|
||||
dst[di++] = 0;
|
||||
dst[di++] = 0;
|
||||
@@ -2142,10 +2142,10 @@ static void decode_postinit(H264Context *h, int setup_finished)
|
||||
stereo->type = AV_STEREO3D_CHECKERBOARD;
|
||||
break;
|
||||
case 1:
|
||||
stereo->type = AV_STEREO3D_LINES;
|
||||
stereo->type = AV_STEREO3D_COLUMNS;
|
||||
break;
|
||||
case 2:
|
||||
stereo->type = AV_STEREO3D_COLUMNS;
|
||||
stereo->type = AV_STEREO3D_LINES;
|
||||
break;
|
||||
case 3:
|
||||
if (h->quincunx_subsampling)
|
||||
@@ -4950,6 +4950,7 @@ again:
|
||||
if(!idr_cleared)
|
||||
idr(h); // FIXME ensure we don't lose some frames if there is reordering
|
||||
idr_cleared = 1;
|
||||
h->has_recovery_point = 1;
|
||||
case NAL_SLICE:
|
||||
init_get_bits(&hx->gb, ptr, bit_length);
|
||||
hx->intra_gb_ptr =
|
||||
|
@@ -677,6 +677,8 @@ typedef struct H264Context {
|
||||
|
||||
int frame_recovered; ///< Initial frame has been completely recovered
|
||||
|
||||
int has_recovery_point;
|
||||
|
||||
int luma_weight_flag[2]; ///< 7.4.3.2 luma_weight_lX_flag
|
||||
int chroma_weight_flag[2]; ///< 7.4.3.2 chroma_weight_lX_flag
|
||||
|
||||
@@ -689,7 +691,7 @@ typedef struct H264Context {
|
||||
|
||||
int16_t slice_row[MAX_SLICES]; ///< to detect when MAX_SLICES is too low
|
||||
|
||||
uint8_t parse_history[4];
|
||||
uint8_t parse_history[6];
|
||||
int parse_history_count;
|
||||
int parse_last_mb;
|
||||
uint8_t *edge_emu_buffer;
|
||||
|
@@ -28,6 +28,7 @@
|
||||
typedef struct H264BSFContext {
|
||||
uint8_t length_size;
|
||||
uint8_t first_idr;
|
||||
uint8_t idr_sps_pps_seen;
|
||||
int extradata_parsed;
|
||||
} H264BSFContext;
|
||||
|
||||
@@ -155,6 +156,7 @@ static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
|
||||
return ret;
|
||||
ctx->length_size = ret;
|
||||
ctx->first_idr = 1;
|
||||
ctx->idr_sps_pps_seen = 0;
|
||||
ctx->extradata_parsed = 1;
|
||||
}
|
||||
|
||||
@@ -174,8 +176,17 @@ static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
|
||||
if (buf + nal_size > buf_end || nal_size < 0)
|
||||
goto fail;
|
||||
|
||||
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
||||
if (ctx->first_idr && (unit_type == 5 || unit_type == 7 || unit_type == 8)) {
|
||||
if (ctx->first_idr && (unit_type == 7 || unit_type == 8))
|
||||
ctx->idr_sps_pps_seen = 1;
|
||||
|
||||
/* if this is a new IDR picture following an IDR picture, reset the idr flag.
|
||||
* Just check first_mb_in_slice to be 0 as this is the simplest solution.
|
||||
* This could be checking idr_pic_id instead, but would complexify the parsing. */
|
||||
if (!ctx->first_idr && unit_type == 5 && (buf[1] & 0x80))
|
||||
ctx->first_idr = 1;
|
||||
|
||||
/* prepend only to the first type 5 NAL unit of an IDR picture, if no sps/pps are already present */
|
||||
if (ctx->first_idr && unit_type == 5 && !ctx->idr_sps_pps_seen) {
|
||||
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
||||
avctx->extradata, avctx->extradata_size,
|
||||
buf, nal_size)) < 0)
|
||||
@@ -185,8 +196,10 @@ static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
|
||||
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
||||
NULL, 0, buf, nal_size)) < 0)
|
||||
goto fail;
|
||||
if (!ctx->first_idr && unit_type == 1)
|
||||
if (!ctx->first_idr && unit_type == 1) {
|
||||
ctx->first_idr = 1;
|
||||
ctx->idr_sps_pps_seen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
buf += nal_size;
|
||||
|
@@ -91,7 +91,7 @@ static int h264_find_frame_end(H264Context *h, const uint8_t *buf,
|
||||
state = 7;
|
||||
} else {
|
||||
h->parse_history[h->parse_history_count++]= buf[i];
|
||||
if (h->parse_history_count>3) {
|
||||
if (h->parse_history_count>5) {
|
||||
unsigned int mb, last_mb= h->parse_last_mb;
|
||||
GetBitContext gb;
|
||||
|
||||
@@ -119,7 +119,7 @@ found:
|
||||
pc->frame_start_found = 0;
|
||||
if (h->is_avc)
|
||||
return next_avc;
|
||||
return i - (state & 5) - 3 * (state > 7);
|
||||
return i - (state & 5) - 5 * (state > 7);
|
||||
}
|
||||
|
||||
static int scan_mmco_reset(AVCodecParserContext *s)
|
||||
|
@@ -771,7 +771,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
|
||||
if ( err >= 0
|
||||
&& h->long_ref_count==0
|
||||
&& (h->short_ref_count<=2 || h->pps.ref_count[0] <= 1 && h->pps.ref_count[1] <= 1 && pps_count == 1)
|
||||
&& h->pps.ref_count[0]<=2 + (h->picture_structure != PICT_FRAME)
|
||||
&& h->pps.ref_count[0]<=2 + (h->picture_structure != PICT_FRAME) + (2*!h->has_recovery_point)
|
||||
&& h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
h->cur_pic_ptr->recovered |= 1;
|
||||
if(!h->avctx->has_b_frames)
|
||||
|
@@ -183,6 +183,8 @@ static int decode_recovery_point(H264Context *h)
|
||||
if (h->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||
av_log(h->avctx, AV_LOG_DEBUG, "sei_recovery_frame_cnt: %d\n", h->sei_recovery_frame_cnt);
|
||||
|
||||
h->has_recovery_point = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -102,26 +102,26 @@ static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
|
||||
goto fail;
|
||||
|
||||
s->skip_flag = av_malloc(pic_size_in_ctb);
|
||||
s->tab_ct_depth = av_malloc(sps->min_cb_height * sps->min_cb_width);
|
||||
s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
|
||||
if (!s->skip_flag || !s->tab_ct_depth)
|
||||
goto fail;
|
||||
|
||||
s->cbf_luma = av_malloc(sps->min_tb_width * sps->min_tb_height);
|
||||
s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
|
||||
s->tab_ipm = av_mallocz(min_pu_size);
|
||||
s->is_pcm = av_malloc(min_pu_size);
|
||||
if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
|
||||
goto fail;
|
||||
|
||||
s->filter_slice_edges = av_malloc(ctb_count);
|
||||
s->tab_slice_address = av_malloc(pic_size_in_ctb *
|
||||
s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
|
||||
sizeof(*s->tab_slice_address));
|
||||
s->qp_y_tab = av_malloc(pic_size_in_ctb *
|
||||
s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
|
||||
sizeof(*s->qp_y_tab));
|
||||
if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
|
||||
goto fail;
|
||||
|
||||
s->horizontal_bs = av_mallocz(2 * s->bs_width * (s->bs_height + 1));
|
||||
s->vertical_bs = av_mallocz(2 * s->bs_width * (s->bs_height + 1));
|
||||
s->horizontal_bs = av_mallocz_array(2 * s->bs_width, (s->bs_height + 1));
|
||||
s->vertical_bs = av_mallocz_array(2 * s->bs_width, (s->bs_height + 1));
|
||||
if (!s->horizontal_bs || !s->vertical_bs)
|
||||
goto fail;
|
||||
|
||||
@@ -652,9 +652,9 @@ static int hls_slice_header(HEVCContext *s)
|
||||
av_freep(&sh->entry_point_offset);
|
||||
av_freep(&sh->offset);
|
||||
av_freep(&sh->size);
|
||||
sh->entry_point_offset = av_malloc(sh->num_entry_point_offsets * sizeof(int));
|
||||
sh->offset = av_malloc(sh->num_entry_point_offsets * sizeof(int));
|
||||
sh->size = av_malloc(sh->num_entry_point_offsets * sizeof(int));
|
||||
sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
|
||||
sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
|
||||
sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
|
||||
if (!sh->entry_point_offset || !sh->offset || !sh->size) {
|
||||
sh->num_entry_point_offsets = 0;
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
|
||||
@@ -2059,8 +2059,8 @@ static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int
|
||||
static int hls_slice_data_wpp(HEVCContext *s, const uint8_t *nal, int length)
|
||||
{
|
||||
HEVCLocalContext *lc = s->HEVClc;
|
||||
int *ret = av_malloc((s->sh.num_entry_point_offsets + 1) * sizeof(int));
|
||||
int *arg = av_malloc((s->sh.num_entry_point_offsets + 1) * sizeof(int));
|
||||
int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
|
||||
int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
|
||||
int offset;
|
||||
int startheader, cmpt = 0;
|
||||
int i, j, res = 0;
|
||||
@@ -2818,6 +2818,8 @@ static av_cold int hevc_decode_free(AVCodecContext *avctx)
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++)
|
||||
av_buffer_unref(&s->pps_list[i]);
|
||||
|
||||
av_buffer_unref(&s->current_sps);
|
||||
|
||||
av_freep(&s->sh.entry_point_offset);
|
||||
av_freep(&s->sh.offset);
|
||||
av_freep(&s->sh.size);
|
||||
@@ -2939,6 +2941,13 @@ static int hevc_update_thread_context(AVCodecContext *dst,
|
||||
}
|
||||
}
|
||||
|
||||
av_buffer_unref(&s->current_sps);
|
||||
if (s0->current_sps) {
|
||||
s->current_sps = av_buffer_ref(s0->current_sps);
|
||||
if (!s->current_sps)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
if (s->sps != s0->sps)
|
||||
ret = set_sps(s, s0->sps);
|
||||
|
||||
|
@@ -776,6 +776,8 @@ typedef struct HEVCContext {
|
||||
AVBufferRef *sps_list[MAX_SPS_COUNT];
|
||||
AVBufferRef *pps_list[MAX_PPS_COUNT];
|
||||
|
||||
AVBufferRef *current_sps;
|
||||
|
||||
AVBufferPool *tab_mvf_pool;
|
||||
AVBufferPool *rpl_tab_pool;
|
||||
|
||||
|
@@ -331,6 +331,9 @@ static void hevc_close(AVCodecParserContext *s)
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(h->pps_list); i++)
|
||||
av_buffer_unref(&h->pps_list[i]);
|
||||
|
||||
av_buffer_unref(&h->current_sps);
|
||||
h->sps = NULL;
|
||||
|
||||
for (i = 0; i < h->nals_allocated; i++)
|
||||
av_freep(&h->nals[i].rbsp_buffer);
|
||||
av_freep(&h->nals);
|
||||
|
@@ -956,6 +956,12 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
||||
if (s->pps_list[i] && ((HEVCPPS*)s->pps_list[i]->data)->sps_id == sps_id)
|
||||
av_buffer_unref(&s->pps_list[i]);
|
||||
}
|
||||
if (s->sps_list[sps_id] && s->sps == (HEVCSPS*)s->sps_list[sps_id]->data) {
|
||||
av_buffer_unref(&s->current_sps);
|
||||
s->current_sps = av_buffer_ref(s->sps_list[sps_id]);
|
||||
if (!s->current_sps)
|
||||
s->sps = NULL;
|
||||
}
|
||||
av_buffer_unref(&s->sps_list[sps_id]);
|
||||
s->sps_list[sps_id] = sps_buf;
|
||||
}
|
||||
|
@@ -186,7 +186,8 @@ static int generate_joint_tables(HYuvContext *s)
|
||||
}
|
||||
}
|
||||
ff_free_vlc(&s->vlc[4]);
|
||||
if ((ret = init_vlc(&s->vlc[4], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0)) < 0)
|
||||
if ((ret = init_vlc(&s->vlc[4], VLC_BITS, i, len, 1, 1,
|
||||
bits, 2, 2, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
@@ -195,21 +196,20 @@ static int generate_joint_tables(HYuvContext *s)
|
||||
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
|
||||
{
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
int i, ret;
|
||||
int count = 3;
|
||||
|
||||
init_get_bits(&gb, src, length * 8);
|
||||
if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
|
||||
return ret;
|
||||
|
||||
if (s->version > 2)
|
||||
count = 1 + s->alpha + 2*s->chroma;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (read_len_table(s->len[i], &gb, s->vlc_n) < 0)
|
||||
return -1;
|
||||
if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) {
|
||||
return -1;
|
||||
}
|
||||
if ((ret = read_len_table(s->len[i], &gb, s->vlc_n)) < 0)
|
||||
return ret;
|
||||
if ((ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n)) < 0)
|
||||
return ret;
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, s->vlc_n, s->len[i], 1, 1,
|
||||
s->bits[i], 4, 4, 0)) < 0)
|
||||
@@ -225,18 +225,17 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
|
||||
static int read_old_huffman_tables(HYuvContext *s)
|
||||
{
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
init_get_bits(&gb, classic_shift_luma,
|
||||
classic_shift_luma_table_size * 8);
|
||||
if (read_len_table(s->len[0], &gb, 256) < 0)
|
||||
return -1;
|
||||
if ((ret = read_len_table(s->len[0], &gb, 256)) < 0)
|
||||
return ret;
|
||||
|
||||
init_get_bits(&gb, classic_shift_chroma,
|
||||
classic_shift_chroma_table_size * 8);
|
||||
if (read_len_table(s->len[1], &gb, 256) < 0)
|
||||
return -1;
|
||||
if ((ret = read_len_table(s->len[1], &gb, 256)) < 0)
|
||||
return ret;
|
||||
|
||||
for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
|
||||
for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
|
||||
@@ -264,6 +263,7 @@ static int read_old_huffman_tables(HYuvContext *s)
|
||||
static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
HYuvContext *s = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
memset(s->vlc, 0, 4 * sizeof(VLC));
|
||||
|
||||
@@ -313,10 +313,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
|
||||
s->context = ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
|
||||
|
||||
if ( read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
|
||||
avctx->extradata_size - 4) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}else{
|
||||
if ((ret = read_huffman_tables(s, avctx->extradata + 4,
|
||||
avctx->extradata_size - 4)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
switch (avctx->bits_per_coded_sample & 7) {
|
||||
case 1:
|
||||
s->predictor = LEFT;
|
||||
@@ -342,8 +342,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
|
||||
s->context = 0;
|
||||
|
||||
if (read_old_huffman_tables(s) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if ((ret = read_old_huffman_tables(s)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (s->version <= 2) {
|
||||
@@ -520,13 +520,16 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P && avctx->width%4) {
|
||||
av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 this colorspace and predictor\n");
|
||||
if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P &&
|
||||
avctx->width % 4) {
|
||||
av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 "
|
||||
"for this combination of colorspace and predictor type.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (ff_huffyuv_alloc_temp(s)) {
|
||||
|
||||
if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
|
||||
ff_huffyuv_common_end(s);
|
||||
return AVERROR(ENOMEM);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -535,23 +538,23 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
|
||||
{
|
||||
HYuvContext *s = avctx->priv_data;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
if (ff_huffyuv_alloc_temp(s)) {
|
||||
if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
|
||||
ff_huffyuv_common_end(s);
|
||||
return AVERROR(ENOMEM);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
s->vlc[i].table = NULL;
|
||||
|
||||
if (s->version >= 2) {
|
||||
if (read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
|
||||
avctx->extradata_size) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if ((ret = read_huffman_tables(s, avctx->extradata + 4,
|
||||
avctx->extradata_size)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
if (read_old_huffman_tables(s) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if ((ret = read_old_huffman_tables(s)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -798,14 +801,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
if (s->context) {
|
||||
table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
|
||||
if (table_size < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
return table_size;
|
||||
}
|
||||
|
||||
if ((unsigned)(buf_size-table_size) >= INT_MAX / 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
init_get_bits(&s->gb, s->bitstream_buffer+table_size,
|
||||
(buf_size-table_size) * 8);
|
||||
if ((ret = init_get_bits(&s->gb, s->bitstream_buffer + table_size,
|
||||
(buf_size - table_size) * 8)) < 0)
|
||||
return ret;
|
||||
|
||||
fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
|
||||
fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
|
||||
|
@@ -847,9 +847,9 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
break;
|
||||
case 4:
|
||||
bytestream2_init(&gb, buf, buf_size);
|
||||
if (avctx->codec_tag == MKTAG('R', 'G', 'B', '8'))
|
||||
if (avctx->codec_tag == MKTAG('R', 'G', 'B', '8') && avctx->pix_fmt == AV_PIX_FMT_RGB32)
|
||||
decode_rgb8(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
|
||||
else if (avctx->codec_tag == MKTAG('R', 'G', 'B', 'N'))
|
||||
else if (avctx->codec_tag == MKTAG('R', 'G', 'B', 'N') && avctx->pix_fmt == AV_PIX_FMT_RGB444)
|
||||
decode_rgbn(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
|
||||
else
|
||||
return unsupported(avctx);
|
||||
|
@@ -224,7 +224,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
||||
if (!comp->i_data)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
comp->reslevel = av_calloc(codsty->nreslevels, sizeof(*comp->reslevel));
|
||||
comp->reslevel = av_mallocz_array(codsty->nreslevels, sizeof(*comp->reslevel));
|
||||
if (!comp->reslevel)
|
||||
return AVERROR(ENOMEM);
|
||||
/* LOOP on resolution levels */
|
||||
@@ -272,7 +272,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
||||
reslevel->log2_prec_height) -
|
||||
(reslevel->coord[1][0] >> reslevel->log2_prec_height);
|
||||
|
||||
reslevel->band = av_calloc(reslevel->nbands, sizeof(*reslevel->band));
|
||||
reslevel->band = av_mallocz_array(reslevel->nbands, sizeof(*reslevel->band));
|
||||
if (!reslevel->band)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -368,9 +368,9 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
||||
for (j = 0; j < 2; j++)
|
||||
band->coord[1][j] = ff_jpeg2000_ceildiv(band->coord[1][j], dy);
|
||||
|
||||
band->prec = av_calloc(reslevel->num_precincts_x *
|
||||
(uint64_t)reslevel->num_precincts_y,
|
||||
sizeof(*band->prec));
|
||||
band->prec = av_mallocz_array(reslevel->num_precincts_x *
|
||||
(uint64_t)reslevel->num_precincts_y,
|
||||
sizeof(*band->prec));
|
||||
if (!band->prec)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -504,22 +504,29 @@ void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty)
|
||||
for (reslevelno = 0;
|
||||
comp->reslevel && reslevelno < codsty->nreslevels;
|
||||
reslevelno++) {
|
||||
Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno;
|
||||
Jpeg2000ResLevel *reslevel;
|
||||
|
||||
if (!comp->reslevel)
|
||||
continue;
|
||||
|
||||
reslevel = comp->reslevel + reslevelno;
|
||||
for (bandno = 0; bandno < reslevel->nbands; bandno++) {
|
||||
if (reslevel->band) {
|
||||
Jpeg2000Band *band = reslevel->band + bandno;
|
||||
for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) {
|
||||
if (band->prec) {
|
||||
Jpeg2000Prec *prec = band->prec + precno;
|
||||
av_freep(&prec->zerobits);
|
||||
av_freep(&prec->cblkincl);
|
||||
av_freep(&prec->cblk);
|
||||
}
|
||||
}
|
||||
Jpeg2000Band *band;
|
||||
|
||||
av_freep(&band->prec);
|
||||
if (!reslevel->band)
|
||||
continue;
|
||||
|
||||
band = reslevel->band + bandno;
|
||||
for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) {
|
||||
if (band->prec) {
|
||||
Jpeg2000Prec *prec = band->prec + precno;
|
||||
av_freep(&prec->zerobits);
|
||||
av_freep(&prec->cblkincl);
|
||||
av_freep(&prec->cblk);
|
||||
}
|
||||
}
|
||||
|
||||
av_freep(&band->prec);
|
||||
}
|
||||
av_freep(&reslevel->band);
|
||||
}
|
||||
|
@@ -217,6 +217,11 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s,
|
||||
x += stride;
|
||||
}
|
||||
|
||||
if (x >= w) {
|
||||
av_log(NULL, AV_LOG_ERROR, "run overflow\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* decode run termination value */
|
||||
Rb = R(last, x);
|
||||
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
||||
|
@@ -96,8 +96,7 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) frame->data[0],
|
||||
(const WebRtc_UWord16*) buf, &s->decoder, 1);
|
||||
WebRtcIlbcfix_DecodeImpl((int16_t *) frame->data[0], (const uint16_t *) buf, &s->decoder, 1);
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
@@ -170,7 +169,7 @@ static int ilbc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
|
||||
return ret;
|
||||
|
||||
WebRtcIlbcfix_EncodeImpl((WebRtc_UWord16*) avpkt->data, (const WebRtc_Word16*) frame->data[0], &s->encoder);
|
||||
WebRtcIlbcfix_EncodeImpl((uint16_t *) avpkt->data, (const int16_t *) frame->data[0], &s->encoder);
|
||||
|
||||
avpkt->size = s->encoder.no_of_bytes;
|
||||
*got_packet_ptr = 1;
|
||||
|
@@ -183,6 +183,7 @@ static int mp3lame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
MPADecodeHeader hdr;
|
||||
int len, ret, ch;
|
||||
int lame_result;
|
||||
uint32_t h;
|
||||
|
||||
if (frame) {
|
||||
switch (avctx->sample_fmt) {
|
||||
@@ -238,7 +239,12 @@ static int mp3lame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
determine the frame size. */
|
||||
if (s->buffer_index < 4)
|
||||
return 0;
|
||||
if (avpriv_mpegaudio_decode_header(&hdr, AV_RB32(s->buffer))) {
|
||||
h = AV_RB32(s->buffer);
|
||||
if (ff_mpa_check_header(h) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid mp3 header at start of buffer\n");
|
||||
return AVERROR_BUG;
|
||||
}
|
||||
if (avpriv_mpegaudio_decode_header(&hdr, h)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "free format output not supported\n");
|
||||
return -1;
|
||||
}
|
||||
|
@@ -202,10 +202,10 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
|
||||
case AV_STEREO3D_CHECKERBOARD:
|
||||
fpa_type = 0;
|
||||
break;
|
||||
case AV_STEREO3D_LINES:
|
||||
case AV_STEREO3D_COLUMNS:
|
||||
fpa_type = 1;
|
||||
break;
|
||||
case AV_STEREO3D_COLUMNS:
|
||||
case AV_STEREO3D_LINES:
|
||||
fpa_type = 2;
|
||||
break;
|
||||
case AV_STEREO3D_SIDEBYSIDE:
|
||||
|
@@ -239,7 +239,7 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
|
||||
|
||||
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
{
|
||||
int len, nb_components, i, width, height, pix_fmt_id, ret;
|
||||
int len, nb_components, i, width, height, bits, pix_fmt_id, ret;
|
||||
int h_count[MAX_COMPONENTS];
|
||||
int v_count[MAX_COMPONENTS];
|
||||
|
||||
@@ -249,11 +249,11 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
/* XXX: verify len field validity */
|
||||
len = get_bits(&s->gb, 16);
|
||||
s->avctx->bits_per_raw_sample =
|
||||
s->bits = get_bits(&s->gb, 8);
|
||||
bits = get_bits(&s->gb, 8);
|
||||
|
||||
if (s->pegasus_rct)
|
||||
s->bits = 9;
|
||||
if (s->bits == 9 && !s->pegasus_rct)
|
||||
bits = 9;
|
||||
if (bits == 9 && !s->pegasus_rct)
|
||||
s->rct = 1; // FIXME ugly
|
||||
|
||||
if(s->lossless && s->avctx->lowres){
|
||||
@@ -283,7 +283,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
if (s->ls && !(s->bits <= 8 || nb_components == 1)) {
|
||||
if (s->ls && !(bits <= 8 || nb_components == 1)) {
|
||||
avpriv_report_missing_feature(s->avctx,
|
||||
"JPEG-LS that is not <= 8 "
|
||||
"bits/component or 16-bit gray");
|
||||
@@ -329,11 +329,13 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
|
||||
/* if different size, realloc/alloc picture */
|
||||
if ( width != s->width || height != s->height
|
||||
|| bits != s->bits
|
||||
|| memcmp(s->h_count, h_count, sizeof(h_count))
|
||||
|| memcmp(s->v_count, v_count, sizeof(v_count))) {
|
||||
|
||||
s->width = width;
|
||||
s->height = height;
|
||||
s->bits = bits;
|
||||
memcpy(s->h_count, h_count, sizeof(h_count));
|
||||
memcpy(s->v_count, v_count, sizeof(v_count));
|
||||
s->interlaced = 0;
|
||||
@@ -1718,7 +1720,7 @@ static int mjpeg_decode_com(MJpegDecodeContext *s)
|
||||
parse_avid(s, cbuf, len);
|
||||
} else if (!strcmp(cbuf, "CS=ITU601"))
|
||||
s->cs_itu601 = 1;
|
||||
else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32)) ||
|
||||
else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
|
||||
(!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
|
||||
s->flipped = 1;
|
||||
|
||||
|
@@ -111,7 +111,7 @@ static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
|
||||
|
||||
if (color) {
|
||||
memset(s->frame->data[0] + y*s->frame->linesize[0] + x, color, run_length);
|
||||
if (half_vert)
|
||||
if (half_vert && y + half_vert < s->avctx->height)
|
||||
memset(s->frame->data[0] + (y+1)*s->frame->linesize[0] + x, color, run_length);
|
||||
}
|
||||
x+= run_length;
|
||||
|
@@ -1878,6 +1878,14 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
|
||||
} else
|
||||
goto eos;
|
||||
}
|
||||
if (s->mb_y >= ((s->height + 15) >> 4) &&
|
||||
s->progressive_frame &&
|
||||
!s->progressive_sequence &&
|
||||
get_bits_left(&s->gb) <= 8 &&
|
||||
get_bits_left(&s->gb) >= 0 &&
|
||||
s->mb_skip_run == -1 &&
|
||||
show_bits(&s->gb, 8) == 0)
|
||||
goto eos;
|
||||
|
||||
ff_init_block_index(s);
|
||||
}
|
||||
|
@@ -24,6 +24,8 @@
|
||||
* MPEG Audio header decoder.
|
||||
*/
|
||||
|
||||
#include "libavutil/common.h"
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "mpegaudio.h"
|
||||
#include "mpegaudiodata.h"
|
||||
@@ -45,6 +47,8 @@ int avpriv_mpegaudio_decode_header(MPADecodeHeader *s, uint32_t header)
|
||||
s->layer = 4 - ((header >> 17) & 3);
|
||||
/* extract frequency */
|
||||
sample_rate_index = (header >> 10) & 3;
|
||||
if (sample_rate_index >= FF_ARRAY_ELEMS(avpriv_mpa_freq_tab))
|
||||
sample_rate_index = 0;
|
||||
sample_rate = avpriv_mpa_freq_tab[sample_rate_index] >> (s->lsf + mpeg25);
|
||||
sample_rate_index += 3 * (s->lsf + mpeg25);
|
||||
s->sample_rate_index = sample_rate_index;
|
||||
|
@@ -1368,6 +1368,9 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
||||
{
|
||||
int i, err = 0;
|
||||
|
||||
if (!s->context_initialized)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
if (s->slice_context_count > 1) {
|
||||
for (i = 0; i < s->slice_context_count; i++) {
|
||||
free_duplicate_context(s->thread_context[i]);
|
||||
@@ -1397,8 +1400,8 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
||||
s->mb_height = (s->height + 15) / 16;
|
||||
|
||||
if ((s->width || s->height) &&
|
||||
av_image_check_size(s->width, s->height, 0, s->avctx))
|
||||
return AVERROR_INVALIDDATA;
|
||||
(err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
|
||||
goto fail;
|
||||
|
||||
if ((err = init_context_frame(s)))
|
||||
goto fail;
|
||||
@@ -1414,7 +1417,7 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
||||
}
|
||||
|
||||
for (i = 0; i < nb_slices; i++) {
|
||||
if (init_duplicate_context(s->thread_context[i]) < 0)
|
||||
if ((err = init_duplicate_context(s->thread_context[i])) < 0)
|
||||
goto fail;
|
||||
s->thread_context[i]->start_mb_y =
|
||||
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
||||
|
@@ -214,6 +214,13 @@ static int parse_picture_segment(AVCodecContext *avctx,
|
||||
/* Decode rle bitmap length, stored size includes width/height data */
|
||||
rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;
|
||||
|
||||
if (buf_size > rle_bitmap_len) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Buffer dimension %d larger than the expected RLE data %d\n",
|
||||
buf_size, rle_bitmap_len);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Get bitmap dimensions from data */
|
||||
width = bytestream_get_be16(&buf);
|
||||
height = bytestream_get_be16(&buf);
|
||||
@@ -224,11 +231,6 @@ static int parse_picture_segment(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (buf_size > rle_bitmap_len) {
|
||||
av_log(avctx, AV_LOG_ERROR, "too much RLE data\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ctx->pictures[picture_id].w = width;
|
||||
ctx->pictures[picture_id].h = height;
|
||||
|
||||
|
@@ -633,7 +633,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
} else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
|
||||
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
} else if (s->bit_depth == 1) {
|
||||
} else if (s->bit_depth == 1 && s->bits_per_pixel == 1) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
|
||||
} else if (s->bit_depth == 8 &&
|
||||
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
||||
@@ -841,10 +841,11 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
int i, j;
|
||||
uint8_t *pd = p->data[0];
|
||||
uint8_t *pd_last = s->last_picture.f->data[0];
|
||||
int ls = FFMIN(av_image_get_linesize(p->format, s->width, 0), s->width * s->bpp);
|
||||
|
||||
ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
|
||||
for (j = 0; j < s->height; j++) {
|
||||
for (i = 0; i < s->width * s->bpp; i++) {
|
||||
for (i = 0; i < ls; i++) {
|
||||
pd[i] += pd_last[i];
|
||||
}
|
||||
pd += s->image_linesize;
|
||||
|
@@ -210,6 +210,7 @@ typedef struct ProresContext {
|
||||
int bits_per_mb;
|
||||
int force_quant;
|
||||
int alpha_bits;
|
||||
int warn;
|
||||
|
||||
char *vendor;
|
||||
int quant_sel;
|
||||
@@ -472,7 +473,6 @@ static void put_alpha_run(PutBitContext *pb, int run)
|
||||
|
||||
// todo alpha quantisation for high quants
|
||||
static int encode_alpha_plane(ProresContext *ctx, PutBitContext *pb,
|
||||
const uint16_t *src, int linesize,
|
||||
int mbs_per_slice, uint16_t *blocks,
|
||||
int quant)
|
||||
{
|
||||
@@ -567,11 +567,15 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
|
||||
get_alpha_data(ctx, src, linesize, xp, yp,
|
||||
pwidth, avctx->height / ctx->pictures_per_frame,
|
||||
ctx->blocks[0], mbs_per_slice, ctx->alpha_bits);
|
||||
sizes[i] = encode_alpha_plane(ctx, pb, src, linesize,
|
||||
mbs_per_slice, ctx->blocks[0],
|
||||
quant);
|
||||
sizes[i] = encode_alpha_plane(ctx, pb, mbs_per_slice,
|
||||
ctx->blocks[0], quant);
|
||||
}
|
||||
total_size += sizes[i];
|
||||
if (put_bits_left(pb) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Underestimated required buffer size.\n");
|
||||
return AVERROR_BUG;
|
||||
}
|
||||
}
|
||||
return total_size;
|
||||
}
|
||||
@@ -936,15 +940,16 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
int slice_hdr_size = 2 + 2 * (ctx->num_planes - 1);
|
||||
int frame_size, picture_size, slice_size;
|
||||
int pkt_size, ret;
|
||||
int max_slice_size = (ctx->frame_size_upper_bound - 200) / (ctx->pictures_per_frame * ctx->slices_per_picture + 1);
|
||||
uint8_t frame_flags;
|
||||
|
||||
*avctx->coded_frame = *pic;
|
||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||
avctx->coded_frame->key_frame = 1;
|
||||
|
||||
pkt_size = ctx->frame_size_upper_bound + FF_MIN_BUFFER_SIZE;
|
||||
pkt_size = ctx->frame_size_upper_bound;
|
||||
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size + FF_MIN_BUFFER_SIZE)) < 0)
|
||||
return ret;
|
||||
|
||||
orig_buf = pkt->data;
|
||||
@@ -1020,8 +1025,44 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
bytestream_put_byte(&buf, slice_hdr_size << 3);
|
||||
slice_hdr = buf;
|
||||
buf += slice_hdr_size - 1;
|
||||
if (pkt_size <= buf - orig_buf + 2 * max_slice_size) {
|
||||
uint8_t *start = pkt->data;
|
||||
// Recompute new size according to max_slice_size
|
||||
// and deduce delta
|
||||
int delta = 200 + (ctx->pictures_per_frame *
|
||||
ctx->slices_per_picture + 1) *
|
||||
max_slice_size - pkt_size;
|
||||
|
||||
delta = FFMAX(delta, 2 * max_slice_size);
|
||||
ctx->frame_size_upper_bound += delta;
|
||||
|
||||
if (!ctx->warn) {
|
||||
avpriv_request_sample(avctx,
|
||||
"Packet too small: is %i,"
|
||||
" needs %i (slice: %i). "
|
||||
"Correct allocation",
|
||||
pkt_size, delta, max_slice_size);
|
||||
ctx->warn = 1;
|
||||
}
|
||||
|
||||
ret = av_grow_packet(pkt, delta);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
pkt_size += delta;
|
||||
// restore pointers
|
||||
orig_buf = pkt->data + (orig_buf - start);
|
||||
buf = pkt->data + (buf - start);
|
||||
picture_size_pos = pkt->data + (picture_size_pos - start);
|
||||
slice_sizes = pkt->data + (slice_sizes - start);
|
||||
slice_hdr = pkt->data + (slice_hdr - start);
|
||||
tmp = pkt->data + (tmp - start);
|
||||
}
|
||||
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8);
|
||||
encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice);
|
||||
ret = encode_slice(avctx, pic, &pb, sizes, x, y, q,
|
||||
mbs_per_slice);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
bytestream_put_byte(&slice_hdr, q);
|
||||
slice_size = slice_hdr_size + sizes[ctx->num_planes - 1];
|
||||
@@ -1031,6 +1072,8 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
}
|
||||
bytestream_put_be16(&slice_sizes, slice_size);
|
||||
buf += slice_size - slice_hdr_size;
|
||||
if (max_slice_size < slice_size)
|
||||
max_slice_size = slice_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1203,16 +1246,22 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
ctx->bits_per_mb = ls * 8;
|
||||
if (ctx->chroma_factor == CFACTOR_Y444)
|
||||
ctx->bits_per_mb += ls * 4;
|
||||
if (ctx->num_planes == 4)
|
||||
ctx->bits_per_mb += ls * 4;
|
||||
}
|
||||
|
||||
ctx->frame_size_upper_bound = ctx->pictures_per_frame *
|
||||
ctx->slices_per_picture *
|
||||
ctx->frame_size_upper_bound = (ctx->pictures_per_frame *
|
||||
ctx->slices_per_picture + 1) *
|
||||
(2 + 2 * ctx->num_planes +
|
||||
(mps * ctx->bits_per_mb) / 8)
|
||||
+ 200;
|
||||
|
||||
if (ctx->alpha_bits) {
|
||||
// The alpha plane is run-coded and might exceed the bit budget.
|
||||
ctx->frame_size_upper_bound += (ctx->pictures_per_frame *
|
||||
ctx->slices_per_picture + 1) *
|
||||
/* num pixels per slice */ (ctx->mbs_per_slice * 256 *
|
||||
/* bits per pixel */ (1 + ctx->alpha_bits + 1) + 7 >> 3);
|
||||
}
|
||||
|
||||
avctx->codec_tag = ctx->profile_info->tag;
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
|
@@ -163,7 +163,7 @@ static void av_noinline qpeg_decode_inter(QpegContext *qctx, uint8_t *dst,
|
||||
|
||||
/* check motion vector */
|
||||
if ((me_x + filled < 0) || (me_x + me_w + filled > width) ||
|
||||
(height - me_y - me_h < 0) || (height - me_y > orig_height) ||
|
||||
(height - me_y - me_h < 0) || (height - me_y >= orig_height) ||
|
||||
(filled + me_w > width) || (height - me_h < 0))
|
||||
av_log(NULL, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n",
|
||||
me_x, me_y, me_w, me_h, filled, height);
|
||||
|
@@ -34,7 +34,6 @@
|
||||
#include "celp_filters.h"
|
||||
#include "ra144.h"
|
||||
|
||||
|
||||
static av_cold int ra144_encode_close(AVCodecContext *avctx)
|
||||
{
|
||||
RA144Context *ractx = avctx->priv_data;
|
||||
|
@@ -70,7 +70,7 @@ typedef struct SmcContext {
|
||||
row_ptr += stride * 4; \
|
||||
} \
|
||||
total_blocks--; \
|
||||
if (total_blocks < 0) \
|
||||
if (total_blocks < 0 + !!n_blocks) \
|
||||
{ \
|
||||
av_log(s->avctx, AV_LOG_INFO, "warning: block counter just went negative (this should not happen)\n"); \
|
||||
return; \
|
||||
|
@@ -689,7 +689,7 @@ av_cold void ff_snow_common_end(SnowContext *s)
|
||||
for(i=0; i<MAX_REF_FRAMES; i++){
|
||||
av_freep(&s->ref_mvs[i]);
|
||||
av_freep(&s->ref_scores[i]);
|
||||
if(s->last_picture[i]->data[0]) {
|
||||
if(s->last_picture[i] && s->last_picture[i]->data[0]) {
|
||||
av_assert0(s->last_picture[i]->data[0] != s->current_picture->data[0]);
|
||||
}
|
||||
av_frame_free(&s->last_picture[i]);
|
||||
|
@@ -655,7 +655,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
||||
if(v){
|
||||
v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
|
||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
|
||||
|
||||
if ((uint16_t)v != v) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||
v = 1;
|
||||
}
|
||||
xc->x=x;
|
||||
(xc++)->coeff= v;
|
||||
}
|
||||
@@ -665,6 +668,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
||||
else run= INT_MAX;
|
||||
v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
|
||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
|
||||
if ((uint16_t)v != v) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||
v = 1;
|
||||
}
|
||||
|
||||
xc->x=x;
|
||||
(xc++)->coeff= v;
|
||||
|
@@ -60,6 +60,10 @@ typedef struct SVQ1Context {
|
||||
HpelDSPContext hdsp;
|
||||
GetBitContext gb;
|
||||
AVFrame *prev;
|
||||
|
||||
uint8_t *pkt_swapped;
|
||||
int pkt_swapped_allocated;
|
||||
|
||||
int width;
|
||||
int height;
|
||||
int frame_code;
|
||||
@@ -624,7 +628,24 @@ static int svq1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
/* swap some header bytes (why?) */
|
||||
if (s->frame_code != 0x20) {
|
||||
uint32_t *src = (uint32_t *)(buf + 4);
|
||||
uint32_t *src;
|
||||
|
||||
if (buf_size < 9 * 4) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Input packet too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
av_fast_padded_malloc(&s->pkt_swapped, &s->pkt_swapped_allocated,
|
||||
buf_size);
|
||||
if (!s->pkt_swapped)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
memcpy(s->pkt_swapped, buf, buf_size);
|
||||
buf = s->pkt_swapped;
|
||||
init_get_bits(&s->gb, buf, buf_size * 8);
|
||||
skip_bits(&s->gb, 22);
|
||||
|
||||
src = (uint32_t *)(s->pkt_swapped + 4);
|
||||
|
||||
if (buf_size < 36)
|
||||
return AVERROR_INVALIDDATA;
|
||||
@@ -796,6 +817,8 @@ static av_cold int svq1_decode_end(AVCodecContext *avctx)
|
||||
SVQ1Context *s = avctx->priv_data;
|
||||
|
||||
av_frame_free(&s->prev);
|
||||
av_freep(&s->pkt_swapped);
|
||||
s->pkt_swapped_allocated = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -604,13 +604,13 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||
s->height = value;
|
||||
break;
|
||||
case TIFF_BPP:
|
||||
s->bppcount = count;
|
||||
if (count > 4) {
|
||||
if (count > 4U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
value, count);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->bppcount = count;
|
||||
if (count == 1)
|
||||
s->bpp = value;
|
||||
else {
|
||||
@@ -628,6 +628,13 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||
s->bpp = -1;
|
||||
}
|
||||
}
|
||||
if (s->bpp > 64U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
s->bpp = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
break;
|
||||
case TIFF_SAMPLES_PER_PIXEL:
|
||||
if (count != 1) {
|
||||
|
@@ -357,6 +357,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
case AV_PIX_FMT_GBRP12BE:
|
||||
case AV_PIX_FMT_GBRP14LE:
|
||||
case AV_PIX_FMT_GBRP14BE:
|
||||
case AV_PIX_FMT_GBRP16LE:
|
||||
case AV_PIX_FMT_GBRP16BE:
|
||||
w_align = 16; //FIXME assume 16 pixel per macroblock
|
||||
h_align = 16 * 2; // interlaced needs 2 macroblocks height
|
||||
break;
|
||||
@@ -386,6 +388,10 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
w_align = 4;
|
||||
h_align = 4;
|
||||
}
|
||||
if (s->codec_id == AV_CODEC_ID_JV) {
|
||||
w_align = 8;
|
||||
h_align = 8;
|
||||
}
|
||||
break;
|
||||
case AV_PIX_FMT_BGR24:
|
||||
if ((s->codec_id == AV_CODEC_ID_MSZH) ||
|
||||
|
@@ -70,7 +70,7 @@ static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
|
||||
code += 0x80000000u >> (he[i].len - 1);
|
||||
}
|
||||
|
||||
return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 10), last + 1,
|
||||
return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
|
||||
bits, sizeof(*bits), sizeof(*bits),
|
||||
codes, sizeof(*codes), sizeof(*codes),
|
||||
syms, sizeof(*syms), sizeof(*syms), 0);
|
||||
|
@@ -6005,7 +6005,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
/* skip B-frames if we don't have reference frames */
|
||||
if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
|
||||
goto err;
|
||||
goto end;
|
||||
}
|
||||
if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
|
||||
(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
|
||||
|
@@ -1314,7 +1314,9 @@ static av_always_inline int setup_classifs(vorbis_context *vc,
|
||||
vorbis_residue *vr,
|
||||
uint8_t *do_not_decode,
|
||||
unsigned ch_used,
|
||||
int partition_count)
|
||||
int partition_count,
|
||||
int ptns_to_read
|
||||
)
|
||||
{
|
||||
int p, j, i;
|
||||
unsigned c_p_c = vc->codebooks[vr->classbook].dimensions;
|
||||
@@ -1336,7 +1338,7 @@ static av_always_inline int setup_classifs(vorbis_context *vc,
|
||||
for (i = partition_count + c_p_c - 1; i >= partition_count; i--) {
|
||||
temp2 = (((uint64_t)temp) * inverse_class) >> 32;
|
||||
|
||||
if (i < vr->ptns_to_read)
|
||||
if (i < ptns_to_read)
|
||||
vr->classifs[p + i] = temp - temp2 * vr->classifications;
|
||||
temp = temp2;
|
||||
}
|
||||
@@ -1344,13 +1346,13 @@ static av_always_inline int setup_classifs(vorbis_context *vc,
|
||||
for (i = partition_count + c_p_c - 1; i >= partition_count; i--) {
|
||||
temp2 = temp / vr->classifications;
|
||||
|
||||
if (i < vr->ptns_to_read)
|
||||
if (i < ptns_to_read)
|
||||
vr->classifs[p + i] = temp - temp2 * vr->classifications;
|
||||
temp = temp2;
|
||||
}
|
||||
}
|
||||
}
|
||||
p += vr->ptns_to_read;
|
||||
p += ptns_to_read;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1404,7 +1406,7 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
|
||||
for (partition_count = 0; partition_count < ptns_to_read;) { // SPEC error
|
||||
if (!pass) {
|
||||
int ret;
|
||||
if ((ret = setup_classifs(vc, vr, do_not_decode, ch_used, partition_count)) < 0)
|
||||
if ((ret = setup_classifs(vc, vr, do_not_decode, ch_used, partition_count, ptns_to_read)) < 0)
|
||||
return ret;
|
||||
}
|
||||
for (i = 0; (i < c_p_c) && (partition_count < ptns_to_read); ++i) {
|
||||
|
@@ -253,6 +253,10 @@ static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb,
|
||||
return sign ? ~ret : ret;
|
||||
|
||||
error:
|
||||
ret = get_bits_left(gb);
|
||||
if (ret <= 0) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Too few bits (%d) left\n", ret);
|
||||
}
|
||||
*last = 1;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -638,7 +638,7 @@ static uint32_t log2sample(uint32_t v, int limit, uint32_t *result)
|
||||
|
||||
if ((v += v >> 9) < (1 << 8)) {
|
||||
dbits = nbits_table[v];
|
||||
result += (dbits << 8) + wp_log2_table[(v << (9 - dbits)) & 0xff];
|
||||
*result += (dbits << 8) + wp_log2_table[(v << (9 - dbits)) & 0xff];
|
||||
} else {
|
||||
if (v < (1L << 16))
|
||||
dbits = nbits_table[v >> 8] + 8;
|
||||
@@ -647,7 +647,7 @@ static uint32_t log2sample(uint32_t v, int limit, uint32_t *result)
|
||||
else
|
||||
dbits = nbits_table[v >> 24] + 24;
|
||||
|
||||
result += dbits = (dbits << 8) + wp_log2_table[(v >> (dbits - 9)) & 0xff];
|
||||
*result += dbits = (dbits << 8) + wp_log2_table[(v >> (dbits - 9)) & 0xff];
|
||||
|
||||
if (limit && dbits >= limit)
|
||||
return 1;
|
||||
@@ -2876,10 +2876,11 @@ static int wavpack_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, s->block_samples * avctx->channels * 8)) < 0)
|
||||
buf_size = s->block_samples * avctx->channels * 8
|
||||
+ 200 /* for headers */;
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, buf_size)) < 0)
|
||||
return ret;
|
||||
buf = avpkt->data;
|
||||
buf_size = avpkt->size;
|
||||
|
||||
for (s->ch_offset = 0; s->ch_offset < avctx->channels;) {
|
||||
set_samplerate(s);
|
||||
|
@@ -1024,7 +1024,7 @@ static int apply_color_indexing_transform(WebPContext *s)
|
||||
ImageContext *img;
|
||||
ImageContext *pal;
|
||||
int i, x, y;
|
||||
uint8_t *p, *pi;
|
||||
uint8_t *p;
|
||||
|
||||
img = &s->image[IMAGE_ROLE_ARGB];
|
||||
pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
|
||||
@@ -1062,11 +1062,11 @@ static int apply_color_indexing_transform(WebPContext *s)
|
||||
p = GET_PIXEL(img->frame, x, y);
|
||||
i = p[2];
|
||||
if (i >= pal->frame->width) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid palette index %d\n", i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
AV_WB32(p, 0x00000000);
|
||||
} else {
|
||||
const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
|
||||
AV_COPY32(p, pi);
|
||||
}
|
||||
pi = GET_PIXEL(pal->frame, i, 0);
|
||||
AV_COPY32(p, pi);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -60,6 +60,7 @@ void ff_synth_filter_inner_sse2(float *synth_buf_ptr, float synth_buf2[32],
|
||||
const float window[512],
|
||||
float out[32], intptr_t offset, float scale);
|
||||
|
||||
#if HAVE_YASM
|
||||
static void synth_filter_sse2(FFTContext *imdct,
|
||||
float *synth_buf_ptr, int *synth_buf_offset,
|
||||
float synth_buf2[32], const float window[512],
|
||||
@@ -74,12 +75,15 @@ static void synth_filter_sse2(FFTContext *imdct,
|
||||
|
||||
*synth_buf_offset = (*synth_buf_offset - 32) & 511;
|
||||
}
|
||||
#endif
|
||||
|
||||
av_cold void ff_synth_filter_init_x86(SynthFilterContext *s)
|
||||
{
|
||||
#if HAVE_YASM
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (EXTERNAL_SSE2(cpu_flags)) {
|
||||
s->synth_filter_float = synth_filter_sse2;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@@ -61,6 +61,9 @@ cglobal scalarproduct_int16, 3,3,3, v1, v2, order
|
||||
%endif
|
||||
paddd m2, m0
|
||||
movd eax, m2
|
||||
%if mmsize == 8
|
||||
emms
|
||||
%endif
|
||||
RET
|
||||
|
||||
; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
|
||||
|
@@ -817,13 +817,13 @@ cglobal vp9_ipred_vl_4x4, 4, 4, 0, dst, stride, l, a
|
||||
psrlq m2, m1, 8
|
||||
LOWPASS 2, 1, 0, 3
|
||||
pavgb m1, m0
|
||||
movq [dstq+strideq*0], m1
|
||||
movq [dstq+strideq*1], m2
|
||||
movd [dstq+strideq*0], m1
|
||||
movd [dstq+strideq*1], m2
|
||||
lea dstq, [dstq+strideq*2]
|
||||
psrlq m1, 8
|
||||
psrlq m2, 8
|
||||
movq [dstq+strideq*0], m1
|
||||
movq [dstq+strideq*1], m2
|
||||
movd [dstq+strideq*0], m1
|
||||
movd [dstq+strideq*1], m2
|
||||
RET
|
||||
|
||||
%macro VL_XMM_FUNCS 1
|
||||
|
@@ -20,10 +20,10 @@
|
||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
;******************************************************************************
|
||||
|
||||
%if ARCH_X86_64
|
||||
|
||||
%include "libavutil/x86/x86util.asm"
|
||||
|
||||
%if ARCH_X86_64
|
||||
|
||||
SECTION_RODATA
|
||||
|
||||
cextern pb_3
|
||||
|
@@ -30,6 +30,7 @@
|
||||
#include <pulse/error.h>
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/internal.h"
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "pulse_audio_common.h"
|
||||
|
||||
@@ -47,6 +48,7 @@ typedef struct PulseData {
|
||||
pa_simple *s;
|
||||
int64_t pts;
|
||||
int64_t frame_duration;
|
||||
int wallclock;
|
||||
} PulseData;
|
||||
|
||||
static av_cold int pulse_read_header(AVFormatContext *s)
|
||||
@@ -131,6 +133,8 @@ static int pulse_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
}
|
||||
|
||||
pd->pts = -latency;
|
||||
if (pd->wallclock)
|
||||
pd->pts += av_gettime();
|
||||
}
|
||||
|
||||
pkt->pts = pd->pts;
|
||||
@@ -158,6 +162,7 @@ static const AVOption options[] = {
|
||||
{ "channels", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
|
||||
{ "frame_size", "set number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
|
||||
{ "fragment_size", "set buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
|
||||
{ "wallclock", "set the initial pts using the current time", OFFSET(wallclock), AV_OPT_TYPE_INT, {.i64 = 1}, -1, 1, D },
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
|
@@ -22,6 +22,7 @@
|
||||
#include "avdevice.h"
|
||||
|
||||
typedef struct {
|
||||
AVClass *class;
|
||||
int fd;
|
||||
} V4L2Context;
|
||||
|
||||
|
@@ -278,7 +278,13 @@ static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
|
||||
s->delay_index = dindex;
|
||||
|
||||
av_frame_free(&frame);
|
||||
return out_frame ? ff_filter_frame(ctx->outputs[0], out_frame) : 0;
|
||||
|
||||
if (out_frame) {
|
||||
err = ff_filter_frame(ctx->outputs[0], out_frame);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int compand_drain(AVFilterLink *outlink)
|
||||
@@ -533,7 +539,7 @@ static int request_frame(AVFilterLink *outlink)
|
||||
{
|
||||
AVFilterContext *ctx = outlink->src;
|
||||
CompandContext *s = ctx->priv;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
ret = ff_request_frame(ctx->inputs[0]);
|
||||
|
||||
|
@@ -412,7 +412,7 @@ static int config_audio_output(AVFilterLink *outlink)
|
||||
if (ebur128->peak_mode & PEAK_MODE_TRUE_PEAKS) {
|
||||
int ret;
|
||||
|
||||
ebur128->swr_buf = av_malloc(19200 * nb_channels * sizeof(double));
|
||||
ebur128->swr_buf = av_malloc_array(nb_channels, 19200 * sizeof(double));
|
||||
ebur128->true_peaks = av_calloc(nb_channels, sizeof(*ebur128->true_peaks));
|
||||
ebur128->true_peaks_per_frame = av_calloc(nb_channels, sizeof(*ebur128->true_peaks_per_frame));
|
||||
ebur128->swr_ctx = swr_alloc();
|
||||
|
@@ -308,6 +308,7 @@ static void select_frame(AVFilterContext *ctx, AVFrame *frame)
|
||||
select->var_values[VAR_PTS] = TS2D(frame->pts);
|
||||
select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
|
||||
select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
|
||||
select->var_values[VAR_KEY] = frame->key_frame;
|
||||
|
||||
switch (inlink->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -337,21 +338,20 @@ static void select_frame(AVFilterContext *ctx, AVFrame *frame)
|
||||
select->var_values[VAR_N],
|
||||
select->var_values[VAR_PTS],
|
||||
select->var_values[VAR_T],
|
||||
(int)select->var_values[VAR_KEY]);
|
||||
frame->key_frame);
|
||||
|
||||
switch (inlink->type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
|
||||
select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_P ? 'P' :
|
||||
select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_T ? 'T' :
|
||||
select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_B ? 'B' : '?',
|
||||
av_get_picture_type_char(select->var_values[VAR_PICT_TYPE]),
|
||||
(!frame->interlaced_frame) ? 'P' :
|
||||
frame->top_field_first ? 'T' : 'B',
|
||||
av_get_picture_type_char(frame->pict_type),
|
||||
select->var_values[VAR_SCENE]);
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%d",
|
||||
(int)select->var_values[VAR_SAMPLES_N],
|
||||
(int)select->var_values[VAR_CONSUMED_SAMPLES_N]);
|
||||
av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
|
||||
frame->nb_samples,
|
||||
select->var_values[VAR_CONSUMED_SAMPLES_N]);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@@ -24,6 +24,8 @@
|
||||
* video presentation timestamp (PTS) modification filter
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "libavutil/eval.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
@@ -175,21 +177,21 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
|
||||
frame->pts = D2TS(d);
|
||||
|
||||
av_log(inlink->dst, AV_LOG_DEBUG,
|
||||
"N:%"PRId64" PTS:%s T:%f POS:%s",
|
||||
(int64_t)setpts->var_values[VAR_N],
|
||||
d2istr(setpts->var_values[VAR_PTS]),
|
||||
setpts->var_values[VAR_T],
|
||||
d2istr(setpts->var_values[VAR_POS]));
|
||||
av_dlog(inlink->dst,
|
||||
"N:%"PRId64" PTS:%s T:%f POS:%s",
|
||||
(int64_t)setpts->var_values[VAR_N],
|
||||
d2istr(setpts->var_values[VAR_PTS]),
|
||||
setpts->var_values[VAR_T],
|
||||
d2istr(setpts->var_values[VAR_POS]));
|
||||
switch (inlink->type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
av_log(inlink->dst, AV_LOG_DEBUG, " INTERLACED:%"PRId64,
|
||||
(int64_t)setpts->var_values[VAR_INTERLACED]);
|
||||
av_dlog(inlink->dst, " INTERLACED:%"PRId64,
|
||||
(int64_t)setpts->var_values[VAR_INTERLACED]);
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
av_log(inlink->dst, AV_LOG_DEBUG, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
|
||||
(int64_t)setpts->var_values[VAR_NB_SAMPLES],
|
||||
(int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
|
||||
av_dlog(inlink->dst, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
|
||||
(int64_t)setpts->var_values[VAR_NB_SAMPLES],
|
||||
(int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
|
||||
break;
|
||||
}
|
||||
av_log(inlink->dst, AV_LOG_DEBUG, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
|
||||
|
@@ -69,7 +69,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
|
||||
#define ABS(a) (((a) ^ ((a) >> 31)) - ((a) >> 31))
|
||||
|
||||
static int diff_c(const uint8_t *a, const uint8_t *b, int s)
|
||||
static int diff_c(const uint8_t *a, const uint8_t *b, ptrdiff_t s)
|
||||
{
|
||||
int i, j, diff = 0;
|
||||
|
||||
@@ -83,7 +83,7 @@ static int diff_c(const uint8_t *a, const uint8_t *b, int s)
|
||||
return diff;
|
||||
}
|
||||
|
||||
static int comb_c(const uint8_t *a, const uint8_t *b, int s)
|
||||
static int comb_c(const uint8_t *a, const uint8_t *b, ptrdiff_t s)
|
||||
{
|
||||
int i, j, comb = 0;
|
||||
|
||||
@@ -98,7 +98,7 @@ static int comb_c(const uint8_t *a, const uint8_t *b, int s)
|
||||
return comb;
|
||||
}
|
||||
|
||||
static int var_c(const uint8_t *a, const uint8_t *b, int s)
|
||||
static int var_c(const uint8_t *a, const uint8_t *b, ptrdiff_t s)
|
||||
{
|
||||
int i, j, var = 0;
|
||||
|
||||
@@ -531,7 +531,7 @@ static void pullup_release_frame(PullupFrame *f)
|
||||
|
||||
static void compute_metric(PullupContext *s, int *dest,
|
||||
PullupField *fa, int pa, PullupField *fb, int pb,
|
||||
int (*func)(const uint8_t *, const uint8_t *, int))
|
||||
int (*func)(const uint8_t *, const uint8_t *, ptrdiff_t))
|
||||
{
|
||||
int mp = s->metric_plane;
|
||||
int xstep = 8;
|
||||
|
@@ -61,9 +61,9 @@ typedef struct PullupContext {
|
||||
PullupBuffer buffers[10];
|
||||
PullupFrame frame;
|
||||
|
||||
int (*diff)(const uint8_t *a, const uint8_t *b, int s);
|
||||
int (*comb)(const uint8_t *a, const uint8_t *b, int s);
|
||||
int (*var )(const uint8_t *a, const uint8_t *b, int s);
|
||||
int (*diff)(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
|
||||
int (*comb)(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
|
||||
int (*var )(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
|
||||
} PullupContext;
|
||||
|
||||
void ff_pullup_init_x86(PullupContext *s);
|
||||
|
@@ -68,7 +68,7 @@ cglobal pullup_filter_comb, 3, 5, 8, first, second, size
|
||||
sub secondq, sizeq
|
||||
|
||||
.loop:
|
||||
movq m0, [secondq]
|
||||
movq m0, [firstq]
|
||||
movq m1, [secondq]
|
||||
punpcklbw m0, m7
|
||||
movq m2, [secondq+sizeq]
|
||||
|
@@ -23,9 +23,9 @@
|
||||
#include "libavutil/x86/cpu.h"
|
||||
#include "libavfilter/vf_pullup.h"
|
||||
|
||||
int ff_pullup_filter_diff_mmx(const uint8_t *a, const uint8_t *b, int s);
|
||||
int ff_pullup_filter_comb_mmx(const uint8_t *a, const uint8_t *b, int s);
|
||||
int ff_pullup_filter_var_mmx (const uint8_t *a, const uint8_t *b, int s);
|
||||
int ff_pullup_filter_diff_mmx(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
|
||||
int ff_pullup_filter_comb_mmx(const uint8_t *a, const uint8_t *b, ptrdiff_t s);
|
||||
int ff_pullup_filter_var_mmx (const uint8_t *a, const uint8_t *b, ptrdiff_t s);
|
||||
|
||||
av_cold void ff_pullup_init_x86(PullupContext *s)
|
||||
{
|
||||
|
@@ -344,10 +344,16 @@ static int aiff_read_packet(AVFormatContext *s,
|
||||
return AVERROR_EOF;
|
||||
|
||||
/* Now for that packet */
|
||||
if (st->codec->block_align >= 17) // GSM, QCLP, IMA4
|
||||
switch (st->codec->codec_id) {
|
||||
case AV_CODEC_ID_ADPCM_IMA_QT:
|
||||
case AV_CODEC_ID_GSM:
|
||||
case AV_CODEC_ID_QDM2:
|
||||
case AV_CODEC_ID_QCELP:
|
||||
size = st->codec->block_align;
|
||||
else
|
||||
break;
|
||||
default:
|
||||
size = (MAX_SIZE / st->codec->block_align) * st->codec->block_align;
|
||||
}
|
||||
size = FFMIN(max_size, size);
|
||||
res = av_get_packet(s->pb, pkt, size);
|
||||
if (res < 0)
|
||||
|
@@ -53,8 +53,10 @@ static int ape_tag_read_field(AVFormatContext *s)
|
||||
av_log(s, AV_LOG_WARNING, "Invalid APE tag key '%s'.\n", key);
|
||||
return -1;
|
||||
}
|
||||
if (size >= UINT_MAX)
|
||||
return -1;
|
||||
if (size > INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
|
||||
av_log(s, AV_LOG_ERROR, "APE tag size too large.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (flags & APE_TAG_FLAG_IS_BINARY) {
|
||||
uint8_t filename[1024];
|
||||
enum AVCodecID id;
|
||||
|
@@ -370,7 +370,8 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
|
||||
|
||||
if (!(asf->hdr.flags & 0x01)) { // if we aren't streaming...
|
||||
int64_t fsize = avio_size(pb);
|
||||
if (fsize <= 0 || (int64_t)asf->hdr.file_size <= 0 || FFABS(fsize - (int64_t)asf->hdr.file_size) < 10000)
|
||||
if (fsize <= 0 || (int64_t)asf->hdr.file_size <= 0 ||
|
||||
FFABS(fsize - (int64_t)asf->hdr.file_size) / (float)FFMIN(fsize, asf->hdr.file_size) < 0.05)
|
||||
st->duration = asf->hdr.play_time /
|
||||
(10000000 / 1000) - start_time;
|
||||
}
|
||||
|
@@ -1005,6 +1005,7 @@ typedef struct AVStream {
|
||||
|
||||
AVRational av_stream_get_r_frame_rate(const AVStream *s);
|
||||
void av_stream_set_r_frame_rate(AVStream *s, AVRational r);
|
||||
struct AVCodecParserContext *av_stream_get_parser(const AVStream *s);
|
||||
|
||||
#define AV_PROGRAM_RUNNING 1
|
||||
|
||||
|
@@ -1685,8 +1685,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index,
|
||||
continue;
|
||||
|
||||
// av_assert1(st2->codec->block_align);
|
||||
av_assert0((int64_t)st2->time_base.num * ast2->rate ==
|
||||
(int64_t)st2->time_base.den * ast2->scale);
|
||||
av_assert0(fabs(av_q2d(st2->time_base) - ast2->scale / (double)ast2->rate) < av_q2d(st2->time_base) * 0.00000001);
|
||||
index = av_index_search_timestamp(st2,
|
||||
av_rescale_q(timestamp,
|
||||
st->time_base,
|
||||
|
@@ -61,7 +61,7 @@ static int cavsvideo_probe(AVProbeData *p)
|
||||
}
|
||||
}
|
||||
if(seq && seq*9<=pic*10)
|
||||
return AVPROBE_SCORE_EXTENSION;
|
||||
return AVPROBE_SCORE_EXTENSION+1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -72,30 +72,33 @@ static inline uint16_t dv_audio_12to16(uint16_t sample)
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the dumbest implementation of all -- it simply looks at
|
||||
* a fixed offset and if pack isn't there -- fails. We might want
|
||||
* to have a fallback mechanism for complete search of missing packs.
|
||||
*/
|
||||
static const uint8_t *dv_extract_pack(uint8_t *frame, enum dv_pack_type t)
|
||||
{
|
||||
int offs;
|
||||
int c;
|
||||
|
||||
switch (t) {
|
||||
case dv_audio_source:
|
||||
offs = (80 * 6 + 80 * 16 * 3 + 3);
|
||||
break;
|
||||
case dv_audio_control:
|
||||
offs = (80 * 6 + 80 * 16 * 4 + 3);
|
||||
break;
|
||||
case dv_video_control:
|
||||
offs = (80 * 5 + 48 + 5);
|
||||
break;
|
||||
case dv_timecode:
|
||||
offs = (80*1 + 3 + 3);
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
for (c = 0; c < 10; c++) {
|
||||
switch (t) {
|
||||
case dv_audio_source:
|
||||
if (c&1) offs = (80 * 6 + 80 * 16 * 0 + 3 + c*12000);
|
||||
else offs = (80 * 6 + 80 * 16 * 3 + 3 + c*12000);
|
||||
break;
|
||||
case dv_audio_control:
|
||||
if (c&1) offs = (80 * 6 + 80 * 16 * 1 + 3 + c*12000);
|
||||
else offs = (80 * 6 + 80 * 16 * 4 + 3 + c*12000);
|
||||
break;
|
||||
case dv_video_control:
|
||||
if (c&1) offs = (80 * 3 + 8 + c*12000);
|
||||
else offs = (80 * 5 + 48 + 5 + c*12000);
|
||||
break;
|
||||
case dv_timecode:
|
||||
offs = (80*1 + 3 + 3);
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
if (frame[offs] == t)
|
||||
break;
|
||||
}
|
||||
|
||||
return frame[offs] == t ? &frame[offs] : NULL;
|
||||
|
@@ -223,6 +223,18 @@ static int flv_write_header(AVFormatContext *s)
|
||||
avcodec_get_name(enc->codec_id), i);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (enc->codec_id == AV_CODEC_ID_MPEG4 ||
|
||||
enc->codec_id == AV_CODEC_ID_H263) {
|
||||
int error = enc->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL;
|
||||
av_log(s, error ? AV_LOG_ERROR : AV_LOG_WARNING,
|
||||
"Codec %s is not supported in the official FLV specification,\n", avcodec_get_name(enc->codec_id));
|
||||
|
||||
if (error) {
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"use vstrict=-1 / -strict -1 to use it anyway.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
if (audio_enc) {
|
||||
|
@@ -38,6 +38,7 @@ typedef struct LibRTMPContext {
|
||||
RTMP rtmp;
|
||||
char *app;
|
||||
char *playpath;
|
||||
char *temp_filename;
|
||||
} LibRTMPContext;
|
||||
|
||||
static void rtmp_log(int level, const char *fmt, va_list args)
|
||||
@@ -62,6 +63,7 @@ static int rtmp_close(URLContext *s)
|
||||
RTMP *r = &ctx->rtmp;
|
||||
|
||||
RTMP_Close(r);
|
||||
av_freep(&ctx->temp_filename);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -101,7 +103,7 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
|
||||
if (ctx->app) len += strlen(ctx->app) + sizeof(" app=");
|
||||
if (ctx->playpath) len += strlen(ctx->playpath) + sizeof(" playpath=");
|
||||
|
||||
if (!(filename = av_malloc(len)))
|
||||
if (!(ctx->temp_filename = filename = av_malloc(len)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
av_strlcpy(filename, s->filename, len);
|
||||
@@ -130,10 +132,9 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
|
||||
}
|
||||
|
||||
s->is_streamed = 1;
|
||||
rc = 0;
|
||||
return 0;
|
||||
fail:
|
||||
if (filename != s->filename)
|
||||
av_freep(&filename);
|
||||
av_freep(&ctx->temp_filename);
|
||||
if (rc)
|
||||
RTMP_Close(r);
|
||||
|
||||
|
@@ -33,13 +33,15 @@ static int mpeg4video_probe(AVProbeData *probe_packet)
|
||||
|
||||
for(i=0; i<probe_packet->buf_size; i++){
|
||||
temp_buffer = (temp_buffer<<8) + probe_packet->buf[i];
|
||||
if ((temp_buffer & 0xffffff00) != 0x100)
|
||||
if (temp_buffer & 0xfffffe00)
|
||||
continue;
|
||||
if (temp_buffer < 2)
|
||||
continue;
|
||||
|
||||
if (temp_buffer == VOP_START_CODE) VOP++;
|
||||
else if (temp_buffer == VISUAL_OBJECT_START_CODE) VISO++;
|
||||
else if (temp_buffer < 0x120) VO++;
|
||||
else if (temp_buffer < 0x130) VOL++;
|
||||
else if (temp_buffer >= 0x100 && temp_buffer < 0x120) VO++;
|
||||
else if (temp_buffer >= 0x120 && temp_buffer < 0x130) VOL++;
|
||||
else if ( !(0x1AF < temp_buffer && temp_buffer < 0x1B7)
|
||||
&& !(0x1B9 < temp_buffer && temp_buffer < 0x1C4)) res++;
|
||||
}
|
||||
|
@@ -124,7 +124,7 @@ static int mp3_write_xing(AVFormatContext *s)
|
||||
int best_bitrate_error = INT_MAX;
|
||||
int xing_offset;
|
||||
int ver = 0;
|
||||
int bytes_needed, lsf;
|
||||
int bytes_needed;
|
||||
const char *vendor = (codec->flags & CODEC_FLAG_BITEXACT) ? "Lavf" : LIBAVFORMAT_IDENT;
|
||||
|
||||
if (!s->pb->seekable || !mp3->write_xing)
|
||||
@@ -161,7 +161,7 @@ static int mp3_write_xing(AVFormatContext *s)
|
||||
header |= channels << 6;
|
||||
|
||||
for (bitrate_idx = 1; bitrate_idx < 15; bitrate_idx++) {
|
||||
int bit_rate = 1000 * avpriv_mpa_bitrate_tab[lsf][3 - 1][bitrate_idx];
|
||||
int bit_rate = 1000 * avpriv_mpa_bitrate_tab[ver != 3][3 - 1][bitrate_idx];
|
||||
int error = FFABS(bit_rate - codec->bit_rate);
|
||||
|
||||
if (error < best_bitrate_error) {
|
||||
@@ -262,19 +262,19 @@ static int mp3_write_audio_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
if (pkt->data && pkt->size >= 4) {
|
||||
MPADecodeHeader mpah;
|
||||
int av_unused base;
|
||||
uint32_t head = AV_RB32(pkt->data);
|
||||
uint32_t h;
|
||||
|
||||
if (ff_mpa_check_header(head) < 0) {
|
||||
h = AV_RB32(pkt->data);
|
||||
if (ff_mpa_check_header(h) == 0) {
|
||||
avpriv_mpegaudio_decode_header(&mpah, h);
|
||||
if (!mp3->initial_bitrate)
|
||||
mp3->initial_bitrate = mpah.bit_rate;
|
||||
if ((mpah.bit_rate == 0) || (mp3->initial_bitrate != mpah.bit_rate))
|
||||
mp3->has_variable_bitrate = 1;
|
||||
} else {
|
||||
av_log(s, AV_LOG_WARNING, "Audio packet of size %d (starting with %08X...) "
|
||||
"is invalid, writing it anyway.\n", pkt->size, head);
|
||||
return ff_raw_write_packet(s, pkt);
|
||||
"is invalid, writing it anyway.\n", pkt->size, h);
|
||||
}
|
||||
avpriv_mpegaudio_decode_header(&mpah, head);
|
||||
|
||||
if (!mp3->initial_bitrate)
|
||||
mp3->initial_bitrate = mpah.bit_rate;
|
||||
if ((mpah.bit_rate == 0) || (mp3->initial_bitrate != mpah.bit_rate))
|
||||
mp3->has_variable_bitrate = 1;
|
||||
|
||||
#ifdef FILTER_VBR_HEADERS
|
||||
/* filter out XING and INFO headers. */
|
||||
|
@@ -152,7 +152,7 @@ static int mpc_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
}
|
||||
c->curbits = (curbits + size2) & 0x1F;
|
||||
|
||||
if ((ret = av_new_packet(pkt, size)) < 0)
|
||||
if ((ret = av_new_packet(pkt, size + 4)) < 0)
|
||||
return ret;
|
||||
|
||||
pkt->data[0] = curbits;
|
||||
|
@@ -1882,7 +1882,7 @@ static void sdt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
||||
break;
|
||||
desc_len = get8(&p, desc_list_end);
|
||||
desc_end = p + desc_len;
|
||||
if (desc_end > desc_list_end)
|
||||
if (desc_len < 0 || desc_end > desc_list_end)
|
||||
break;
|
||||
|
||||
av_dlog(ts->stream, "tag: 0x%02x len=%d\n",
|
||||
|
@@ -92,6 +92,10 @@ typedef struct MpegTSWrite {
|
||||
#define DEFAULT_PES_HEADER_FREQ 16
|
||||
#define DEFAULT_PES_PAYLOAD_SIZE ((DEFAULT_PES_HEADER_FREQ - 1) * 184 + 170)
|
||||
|
||||
/* The section length is 12 bits. The first 2 are set to 0, the remaining
|
||||
* 10 bits should not exceed 1021. */
|
||||
#define SECTION_LENGTH 1020
|
||||
|
||||
static const AVOption options[] = {
|
||||
{ "mpegts_transport_stream_id", "Set transport_stream_id field.",
|
||||
offsetof(MpegTSWrite, transport_stream_id), AV_OPT_TYPE_INT, {.i64 = 0x0001 }, 0x0001, 0xffff, AV_OPT_FLAG_ENCODING_PARAM},
|
||||
@@ -246,7 +250,7 @@ static void mpegts_write_pat(AVFormatContext *s)
|
||||
{
|
||||
MpegTSWrite *ts = s->priv_data;
|
||||
MpegTSService *service;
|
||||
uint8_t data[1012], *q;
|
||||
uint8_t data[SECTION_LENGTH], *q;
|
||||
int i;
|
||||
|
||||
q = data;
|
||||
@@ -262,8 +266,8 @@ static void mpegts_write_pat(AVFormatContext *s)
|
||||
static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
{
|
||||
MpegTSWrite *ts = s->priv_data;
|
||||
uint8_t data[1012], *q, *desc_length_ptr, *program_info_length_ptr;
|
||||
int val, stream_type, i;
|
||||
uint8_t data[SECTION_LENGTH], *q, *desc_length_ptr, *program_info_length_ptr;
|
||||
int val, stream_type, i, err = 0;
|
||||
|
||||
q = data;
|
||||
put16(&q, 0xe000 | service->pcr_pid);
|
||||
@@ -280,7 +284,12 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
for(i = 0; i < s->nb_streams; i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
MpegTSWriteStream *ts_st = st->priv_data;
|
||||
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL,0);
|
||||
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
|
||||
|
||||
if (q - data > SECTION_LENGTH - 32) {
|
||||
err = 1;
|
||||
break;
|
||||
}
|
||||
switch(st->codec->codec_id) {
|
||||
case AV_CODEC_ID_MPEG1VIDEO:
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
@@ -316,9 +325,6 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
break;
|
||||
}
|
||||
|
||||
if (q - data > sizeof(data) - 32)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
*q++ = stream_type;
|
||||
put16(&q, 0xe000 | ts_st->pid);
|
||||
desc_length_ptr = q;
|
||||
@@ -350,7 +356,11 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
len_ptr = q++;
|
||||
*len_ptr = 0;
|
||||
|
||||
for (p = lang->value; next && *len_ptr < 255 / 4 * 4 && q - data < sizeof(data) - 4; p = next + 1) {
|
||||
for (p = lang->value; next && *len_ptr < 255 / 4 * 4; p = next + 1) {
|
||||
if (q - data > SECTION_LENGTH - 4) {
|
||||
err = 1;
|
||||
break;
|
||||
}
|
||||
next = strchr(p, ',');
|
||||
if (strlen(p) != 3 && (!next || next != p + 3))
|
||||
continue; /* not a 3-letter code */
|
||||
@@ -387,7 +397,11 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
*q++ = 0x59; /* subtitling_descriptor */
|
||||
len_ptr = q++;
|
||||
|
||||
while (strlen(language) >= 3 && (sizeof(data) - (q - data)) >= 8) { /* 8 bytes per DVB subtitle substream data */
|
||||
while (strlen(language) >= 3) {
|
||||
if (sizeof(data) - (q - data) < 8) { /* 8 bytes per DVB subtitle substream data */
|
||||
err = 1;
|
||||
break;
|
||||
}
|
||||
*q++ = *language++;
|
||||
*q++ = *language++;
|
||||
*q++ = *language++;
|
||||
@@ -478,6 +492,13 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
desc_length_ptr[0] = val >> 8;
|
||||
desc_length_ptr[1] = val;
|
||||
}
|
||||
|
||||
if (err)
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"The PMT section cannot fit stream %d and all following streams.\n"
|
||||
"Try reducing the number of languages in the audio streams "
|
||||
"or the total number of streams.\n", i);
|
||||
|
||||
mpegts_write_section1(&service->pmt, PMT_TID, service->sid, ts->tables_version, 0, 0,
|
||||
data, q - data);
|
||||
return 0;
|
||||
@@ -504,7 +525,7 @@ static void mpegts_write_sdt(AVFormatContext *s)
|
||||
{
|
||||
MpegTSWrite *ts = s->priv_data;
|
||||
MpegTSService *service;
|
||||
uint8_t data[1012], *q, *desc_list_len_ptr, *desc_len_ptr;
|
||||
uint8_t data[SECTION_LENGTH], *q, *desc_list_len_ptr, *desc_len_ptr;
|
||||
int i, running_status, free_ca_mode, val;
|
||||
|
||||
q = data;
|
||||
|
@@ -795,7 +795,6 @@ retry:
|
||||
10);
|
||||
if(side_data == NULL) {
|
||||
av_free_packet(pkt);
|
||||
av_free(pkt);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
AV_WL32(side_data + 4, os->end_trimming);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user