Compare commits
207 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
db27f50e06 | ||
![]() |
8327bef1c9 | ||
![]() |
a620c463f0 | ||
![]() |
63b5cb1fb0 | ||
![]() |
7959b9a0f3 | ||
![]() |
bbfe0f7b08 | ||
![]() |
27a910a857 | ||
![]() |
ffe915b6f5 | ||
![]() |
22558d6f6e | ||
![]() |
f03888b449 | ||
![]() |
11c0531099 | ||
![]() |
0b033cd3a1 | ||
![]() |
c9a25ff5a0 | ||
![]() |
3ee4a610c2 | ||
![]() |
d071c1f0e1 | ||
![]() |
15601df419 | ||
![]() |
103cf56c62 | ||
![]() |
d4c45f9249 | ||
![]() |
841ce9a838 | ||
![]() |
8a01fb3729 | ||
![]() |
b5dbe93c8b | ||
![]() |
185e55279c | ||
![]() |
730826275f | ||
![]() |
afd7fac3f1 | ||
![]() |
3353a00d58 | ||
![]() |
b052ea0f5b | ||
![]() |
c8fb53357d | ||
![]() |
30e7dae22c | ||
![]() |
a74a0a5c0c | ||
![]() |
0f77303c57 | ||
![]() |
dbe690b572 | ||
![]() |
6eca20aaec | ||
![]() |
7ef11e8221 | ||
![]() |
942806cbe4 | ||
![]() |
557e3790ef | ||
![]() |
2b15ceec62 | ||
![]() |
c3cd7b8a29 | ||
![]() |
0621421ee2 | ||
![]() |
0e9fe8510e | ||
![]() |
1f59cfe65b | ||
![]() |
a6f808b36a | ||
![]() |
4a495766d1 | ||
![]() |
cedb96db37 | ||
![]() |
de75b4063d | ||
![]() |
bb70b6673f | ||
![]() |
a07dfcdd6d | ||
![]() |
5630d5cdc2 | ||
![]() |
87d0339d67 | ||
![]() |
fac6ae0814 | ||
![]() |
0c51b26729 | ||
![]() |
e0822b147f | ||
![]() |
1bfd23d2c9 | ||
![]() |
419bd6e303 | ||
![]() |
494d3d14db | ||
![]() |
6c63eb5909 | ||
![]() |
142896f2d0 | ||
![]() |
dca70c5931 | ||
![]() |
17ff5d3f88 | ||
![]() |
693d0d3ac5 | ||
![]() |
bb2c09310c | ||
![]() |
66261cfa77 | ||
![]() |
b89b136c00 | ||
![]() |
6ed35a6674 | ||
![]() |
d4c70c8b50 | ||
![]() |
05bc6f8ba6 | ||
![]() |
c2517fb363 | ||
![]() |
0cc15f7c83 | ||
![]() |
b6ff3acafc | ||
![]() |
8047380514 | ||
![]() |
b152305bb3 | ||
![]() |
a0605792c2 | ||
![]() |
4b8cb3fe51 | ||
![]() |
48bf926bad | ||
![]() |
ab43652c67 | ||
![]() |
48b586ca4e | ||
![]() |
4e2e997faf | ||
![]() |
19ccc06d8b | ||
![]() |
193b949f71 | ||
![]() |
63ed7e09dd | ||
![]() |
10464ca0eb | ||
![]() |
4f515913a2 | ||
![]() |
b44506c393 | ||
![]() |
9395a3a96b | ||
![]() |
25d2a4dde7 | ||
![]() |
bfee1e9072 | ||
![]() |
0db579445f | ||
![]() |
e3275571c7 | ||
![]() |
7b7d12ea04 | ||
![]() |
e26fd791ef | ||
![]() |
ad98b2891c | ||
![]() |
1ec6a3c768 | ||
![]() |
5c791b1c9c | ||
![]() |
4a03c31728 | ||
![]() |
c3c8857263 | ||
![]() |
4fbdac00e9 | ||
![]() |
0f4c149730 | ||
![]() |
d887a12145 | ||
![]() |
f2fba07494 | ||
![]() |
1ebd7d2ccf | ||
![]() |
5f7e48a113 | ||
![]() |
e2e0c5b7f6 | ||
![]() |
279003eb9e | ||
![]() |
5926bea980 | ||
![]() |
cbfd6b1fa4 | ||
![]() |
fdc8f4e5b4 | ||
![]() |
603f4ecd14 | ||
![]() |
1ac4ae2a32 | ||
![]() |
3998071039 | ||
![]() |
6fb9bfb1a3 | ||
![]() |
693faadd30 | ||
![]() |
9806028fbb | ||
![]() |
433e15bb87 | ||
![]() |
c3af801c63 | ||
![]() |
01817d508b | ||
![]() |
b5ef1eee45 | ||
![]() |
e2c5f88237 | ||
![]() |
d005e2ecce | ||
![]() |
57bdb3f3dd | ||
![]() |
5e34dded10 | ||
![]() |
45ca270ec9 | ||
![]() |
ca2ccd85d7 | ||
![]() |
437f6fb488 | ||
![]() |
f913da3e15 | ||
![]() |
ed9c6529f0 | ||
![]() |
aa40bbb492 | ||
![]() |
8e276fc96a | ||
![]() |
4a4e30a6d8 | ||
![]() |
1c733a440a | ||
![]() |
5c3bc127ca | ||
![]() |
be94d15a03 | ||
![]() |
9c57328b81 | ||
![]() |
6952f6f39b | ||
![]() |
6359be6751 | ||
![]() |
beb55b3981 | ||
![]() |
80aec733ad | ||
![]() |
77bb6b5bcc | ||
![]() |
f68395f7fc | ||
![]() |
eefb6b654d | ||
![]() |
d18d48def6 | ||
![]() |
8df77c3758 | ||
![]() |
08f56b846c | ||
![]() |
f903147f2d | ||
![]() |
9a840d5e17 | ||
![]() |
9e43d92d6a | ||
![]() |
e13e928baa | ||
![]() |
d3bfb66a66 | ||
![]() |
17a6ca7d31 | ||
![]() |
8a20224059 | ||
![]() |
29ee8b72c4 | ||
![]() |
25864cf562 | ||
![]() |
f74206cb40 | ||
![]() |
148d9cd122 | ||
![]() |
bc259185cb | ||
![]() |
3b6bde3b3d | ||
![]() |
4f187f0af1 | ||
![]() |
10c2d22ba1 | ||
![]() |
35738e5898 | ||
![]() |
59d98fc050 | ||
![]() |
60bfa9154d | ||
![]() |
9794727ccd | ||
![]() |
b88de7b31a | ||
![]() |
11420649d0 | ||
![]() |
dbf5d7e5cd | ||
![]() |
6badd558ce | ||
![]() |
a1fe3b4150 | ||
![]() |
c5129da726 | ||
![]() |
fc57959fd5 | ||
![]() |
83956309cc | ||
![]() |
ed15be7519 | ||
![]() |
6928193493 | ||
![]() |
a72b7286e6 | ||
![]() |
901e275697 | ||
![]() |
7a6b5d7a86 | ||
![]() |
fdfe94f4b1 | ||
![]() |
b63ec0cb0f | ||
![]() |
2eb72d5bdc | ||
![]() |
638c3aca64 | ||
![]() |
76d59f1b34 | ||
![]() |
aa0a8ef50e | ||
![]() |
d6173ae341 | ||
![]() |
3ed27832e7 | ||
![]() |
594b1fa961 | ||
![]() |
7a1262fca3 | ||
![]() |
6d2219e9f9 | ||
![]() |
7c2d152f56 | ||
![]() |
1f58590e1e | ||
![]() |
64bbbcd7b0 | ||
![]() |
de9d3f22f0 | ||
![]() |
ea5bb5613f | ||
![]() |
c61ac696e5 | ||
![]() |
6a250c858e | ||
![]() |
5411040802 | ||
![]() |
ab1ea597bd | ||
![]() |
ee606fd031 | ||
![]() |
2f71aeb301 | ||
![]() |
65259b4d68 | ||
![]() |
8f53d32dfb | ||
![]() |
fcc6568a10 | ||
![]() |
489d066d49 | ||
![]() |
9cb45f6ad2 | ||
![]() |
0f04e2741e | ||
![]() |
84642ec879 | ||
![]() |
bef4d9bf87 | ||
![]() |
bc4f6ae88e | ||
![]() |
2678b25099 | ||
![]() |
e322496054 | ||
![]() |
7fa72ff19c |
26
Changelog
26
Changelog
@ -1,7 +1,31 @@
|
|||||||
Entries are sorted chronologically from oldest to youngest within each release,
|
Entries are sorted chronologically from oldest to youngest within each release,
|
||||||
releases are sorted from youngest to oldest.
|
releases are sorted from youngest to oldest.
|
||||||
|
|
||||||
version <next>:
|
version 2.3.3:
|
||||||
|
- h264: fix grayscale only decoding with weighted prediction
|
||||||
|
- mjpegdec: support AV_PIX_FMT_YUV420P16 with upscale_h
|
||||||
|
- proresenc_ks: fix buffer overflow
|
||||||
|
- matroskadec: fix crash
|
||||||
|
|
||||||
|
version 2.3.2:
|
||||||
|
- snow: fix null pointer dereference
|
||||||
|
- huffyucdec: fix overread
|
||||||
|
- vc1dec: fix crash
|
||||||
|
- iff: fix out of array access
|
||||||
|
- matroskaenc: fix assertion failure
|
||||||
|
- cdgraphics: fix infinite loop
|
||||||
|
- dvdsub_parser: fix infinite loop
|
||||||
|
- mpeg12dec: support decoding some broken files
|
||||||
|
- v4l2enc: fix crash
|
||||||
|
- h264_parser: fix handling huge resolutions
|
||||||
|
- h264_mp4toannexb_bsf: multiple bugfixes
|
||||||
|
|
||||||
|
version 2.3.1:
|
||||||
|
- public AVDCT API/ABI for DCT functions
|
||||||
|
- g2meet: allow size changes within original sizes
|
||||||
|
- dv: improved error resilience, fixing Ticket2340 and Ticket2341
|
||||||
|
|
||||||
|
version 2.3:
|
||||||
- AC3 fixed-point decoding
|
- AC3 fixed-point decoding
|
||||||
- shuffleplanes filter
|
- shuffleplanes filter
|
||||||
- subfile protocol
|
- subfile protocol
|
||||||
|
@ -528,8 +528,8 @@ x86 Michael Niedermayer
|
|||||||
Releases
|
Releases
|
||||||
========
|
========
|
||||||
|
|
||||||
|
2.3 Michael Niedermayer
|
||||||
2.2 Michael Niedermayer
|
2.2 Michael Niedermayer
|
||||||
2.1 Michael Niedermayer
|
|
||||||
1.2 Michael Niedermayer
|
1.2 Michael Niedermayer
|
||||||
|
|
||||||
If you want to maintain an older release, please contact us
|
If you want to maintain an older release, please contact us
|
||||||
|
2
Makefile
2
Makefile
@ -110,7 +110,7 @@ endef
|
|||||||
|
|
||||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
||||||
|
|
||||||
ffprobe.o cmdutils.o : libavutil/ffversion.h
|
ffprobe.o cmdutils.o libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||||
|
|
||||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||||
$(CP) $< $@
|
$(CP) $< $@
|
||||||
|
@ -7,9 +7,10 @@
|
|||||||
since the release of FFmpeg 2.2.
|
since the release of FFmpeg 2.2.
|
||||||
|
|
||||||
In this release, there are lots of internal overhauls that make FFmpeg a
|
In this release, there are lots of internal overhauls that make FFmpeg a
|
||||||
more accessible project for new developers. Many important new features
|
more accessible project for new developers. Many important new
|
||||||
like QTKit and AVFoundation input devices are committed. Contributions done
|
optimizations and features like QTKit and AVFoundation input devices are
|
||||||
by Libav such as a new native Opus decoder are also merged.
|
committed. Contributions done by Libav such as a new native Opus decoder
|
||||||
|
are also merged.
|
||||||
|
|
||||||
Because of the increasing difficulty to maintain and lack of maintainers,
|
Because of the increasing difficulty to maintain and lack of maintainers,
|
||||||
we are very sorry to say that we have removed all Blackfin and SPARC
|
we are very sorry to say that we have removed all Blackfin and SPARC
|
||||||
@ -17,8 +18,9 @@
|
|||||||
interested in maintaining optimization for these two architecture, feel
|
interested in maintaining optimization for these two architecture, feel
|
||||||
free to contact us and we will restore the code!
|
free to contact us and we will restore the code!
|
||||||
|
|
||||||
Since this release, the traditional Changelog file is upgraded to this
|
Oh, and since this release, this modern-looking release note is provided in
|
||||||
modern-looking release note. Old changelogs are moved to doc/Changelog.old.
|
addition to the old-style Changelog file, to make it easier for you to
|
||||||
|
focus on the most important features in this release.
|
||||||
|
|
||||||
Enjoy!
|
Enjoy!
|
||||||
|
|
||||||
@ -33,9 +35,9 @@
|
|||||||
|
|
||||||
• libavutil 52.92.100
|
• libavutil 52.92.100
|
||||||
• libavcodec 55.69.100
|
• libavcodec 55.69.100
|
||||||
• libavformat 55.47.100
|
• libavformat 55.48.100
|
||||||
• libavdevice 55.13.102
|
• libavdevice 55.13.102
|
||||||
• libavfilter 4.10.100
|
• libavfilter 4.11.100
|
||||||
• libswscale 2. 6.100
|
• libswscale 2. 6.100
|
||||||
• libswresample 0.19.100
|
• libswresample 0.19.100
|
||||||
• libpostproc 52. 3.100
|
• libpostproc 52. 3.100
|
||||||
@ -103,7 +105,7 @@
|
|||||||
Other interesting new features including hqx video filter, a pixel art
|
Other interesting new features including hqx video filter, a pixel art
|
||||||
scaling filter; a fixed-point AC-3 decoder contributed by Imagination
|
scaling filter; a fixed-point AC-3 decoder contributed by Imagination
|
||||||
Technologies; an On2 TrueMotion VP7 video decoder; an HTML5 WebVTT
|
Technologies; an On2 TrueMotion VP7 video decoder; an HTML5 WebVTT
|
||||||
subtitle decoder that allows creation of WebVTT from any text-based
|
subtitle encoder that allows creation of WebVTT from any text-based
|
||||||
subtitles; and an 1-bit Direct Stream Digital audio decoder.
|
subtitles; and an 1-bit Direct Stream Digital audio decoder.
|
||||||
|
|
||||||
┌────────────────────────────┐
|
┌────────────────────────────┐
|
||||||
|
@ -1857,7 +1857,7 @@ int read_yesno(void)
|
|||||||
|
|
||||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||||
{
|
{
|
||||||
int ret;
|
int64_t ret;
|
||||||
FILE *f = av_fopen_utf8(filename, "rb");
|
FILE *f = av_fopen_utf8(filename, "rb");
|
||||||
|
|
||||||
if (!f) {
|
if (!f) {
|
||||||
|
2
configure
vendored
2
configure
vendored
@ -4528,6 +4528,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
check_ldflags -Wl,--as-needed
|
check_ldflags -Wl,--as-needed
|
||||||
|
check_ldflags -Wl,-z,noexecstack
|
||||||
|
|
||||||
if check_func dlopen; then
|
if check_func dlopen; then
|
||||||
ldl=
|
ldl=
|
||||||
@ -5534,6 +5535,7 @@ enabled getenv || echo "#define getenv(x) NULL" >> $TMPH
|
|||||||
|
|
||||||
|
|
||||||
mkdir -p doc
|
mkdir -p doc
|
||||||
|
mkdir -p tests
|
||||||
echo "@c auto-generated by configure" > doc/config.texi
|
echo "@c auto-generated by configure" > doc/config.texi
|
||||||
|
|
||||||
print_config ARCH_ "$config_files" $ARCH_LIST
|
print_config ARCH_ "$config_files" $ARCH_LIST
|
||||||
|
@ -15,13 +15,13 @@ libavutil: 2012-10-22
|
|||||||
|
|
||||||
API changes, most recent first:
|
API changes, most recent first:
|
||||||
|
|
||||||
2014-07-14 - xxxxxxx - lavf 55.47.100 - avformat.h
|
2014-07-14 - 62227a7 - lavf 55.47.100 - avformat.h
|
||||||
Add av_stream_get_parser()
|
Add av_stream_get_parser()
|
||||||
|
|
||||||
2014-07-xx - xxxxxxx - lavu 53.18.0 - display.h
|
2014-07-09 - c67690f / a54f03b - lavu 52.92.100 / 53.18.0 - display.h
|
||||||
Add av_display_matrix_flip() to flip the transformation matrix.
|
Add av_display_matrix_flip() to flip the transformation matrix.
|
||||||
|
|
||||||
2014-07-xx - xxxxxxx - lavc 55.56.0 - dv_profile.h
|
2014-07-09 - 1b58f13 / f6ee61f - lavc 55.69.100 / 55.56.0 - dv_profile.h
|
||||||
Add a public API for DV profile handling.
|
Add a public API for DV profile handling.
|
||||||
|
|
||||||
2014-06-20 - 0dceefc / 9e500ef - lavu 52.90.100 / 53.17.0 - imgutils.h
|
2014-06-20 - 0dceefc / 9e500ef - lavu 52.90.100 / 53.17.0 - imgutils.h
|
||||||
@ -35,6 +35,10 @@ API changes, most recent first:
|
|||||||
is now setting AVStream.time_base, instead of AVStream.codec.time_base as was
|
is now setting AVStream.time_base, instead of AVStream.codec.time_base as was
|
||||||
done previously. The old method is now deprecated.
|
done previously. The old method is now deprecated.
|
||||||
|
|
||||||
|
2014-06-11 - 67d29da - lavc 55.66.101 - avcodec.h
|
||||||
|
Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
|
||||||
|
it
|
||||||
|
|
||||||
2014-06-10 - xxxxxxx - lavf 55.43.100 - avformat.h
|
2014-06-10 - xxxxxxx - lavf 55.43.100 - avformat.h
|
||||||
New field int64_t max_analyze_duration2 instead of deprecated
|
New field int64_t max_analyze_duration2 instead of deprecated
|
||||||
int max_analyze_duration.
|
int max_analyze_duration.
|
||||||
@ -42,7 +46,7 @@ API changes, most recent first:
|
|||||||
2014-05-30 - 00759d7 - lavu 52.89.100 - opt.h
|
2014-05-30 - 00759d7 - lavu 52.89.100 - opt.h
|
||||||
Add av_opt_copy()
|
Add av_opt_copy()
|
||||||
|
|
||||||
2014-04-xx - 03bb99a / 0957b27 - lavc 55.66.100 / 55.54.0 - avcodec.h
|
2014-06-01 - 03bb99a / 0957b27 - lavc 55.66.100 / 55.54.0 - avcodec.h
|
||||||
Add AVCodecContext.side_data_only_packets to allow encoders to output packets
|
Add AVCodecContext.side_data_only_packets to allow encoders to output packets
|
||||||
with only side data. This option may become mandatory in the future, so all
|
with only side data. This option may become mandatory in the future, so all
|
||||||
users are recommended to update their code and enable this option.
|
users are recommended to update their code and enable this option.
|
||||||
@ -52,7 +56,7 @@ API changes, most recent first:
|
|||||||
AVColorTransferCharacteristic, and AVChromaLocation) inside lavu.
|
AVColorTransferCharacteristic, and AVChromaLocation) inside lavu.
|
||||||
And add AVFrame fields for them.
|
And add AVFrame fields for them.
|
||||||
|
|
||||||
2014-04-xx - bdb2e80 / b2d4565 - lavr 1.3.0 - avresample.h
|
2014-05-29 - bdb2e80 / b2d4565 - lavr 1.3.0 - avresample.h
|
||||||
Add avresample_max_output_samples
|
Add avresample_max_output_samples
|
||||||
|
|
||||||
2014-05-24 - d858ee7 / 6d21259 - lavf 55.42.100 / 55.19.0 - avformat.h
|
2014-05-24 - d858ee7 / 6d21259 - lavf 55.42.100 / 55.19.0 - avformat.h
|
||||||
@ -101,10 +105,10 @@ API changes, most recent first:
|
|||||||
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
||||||
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
||||||
|
|
||||||
2014-05-xx - xxxxxxx - lavu 52.82.0 - fifo.h
|
2014-05-xx - xxxxxxx - lavu 52.82.100 - fifo.h
|
||||||
Add av_fifo_freep() function.
|
Add av_fifo_freep() function.
|
||||||
|
|
||||||
2014-05-02 - ba52fb11 - lavu 52.81.0 - opt.h
|
2014-05-02 - ba52fb11 - lavu 52.81.100 - opt.h
|
||||||
Add av_opt_set_dict2() function.
|
Add av_opt_set_dict2() function.
|
||||||
|
|
||||||
2014-05-01 - e77b985 / a2941c8 - lavc 55.60.103 / 55.50.3 - avcodec.h
|
2014-05-01 - e77b985 / a2941c8 - lavc 55.60.103 / 55.50.3 - avcodec.h
|
||||||
|
@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# This could be handy for archiving the generated documentation or
|
# This could be handy for archiving the generated documentation or
|
||||||
# if some version control system is used.
|
# if some version control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER =
|
PROJECT_NUMBER = 2.3.6
|
||||||
|
|
||||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||||
# in the documentation. The maximum height of the logo should not exceed 55
|
# in the documentation. The maximum height of the logo should not exceed 55
|
||||||
|
@ -116,6 +116,10 @@ static int open_output_file(const char *filename)
|
|||||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||||
/* in this example, we choose transcoding to same codec */
|
/* in this example, we choose transcoding to same codec */
|
||||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||||
|
if (!encoder) {
|
||||||
|
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
/* In this example, we transcode to same properties (picture size,
|
/* In this example, we transcode to same properties (picture size,
|
||||||
* sample rate etc.). These properties can be changed for output
|
* sample rate etc.). These properties can be changed for output
|
||||||
|
@ -491,7 +491,7 @@ aeval=val(ch)/2:c=same
|
|||||||
@item
|
@item
|
||||||
Invert phase of the second channel:
|
Invert phase of the second channel:
|
||||||
@example
|
@example
|
||||||
eval=val(0)|-val(1)
|
aeval=val(0)|-val(1)
|
||||||
@end example
|
@end example
|
||||||
@end itemize
|
@end itemize
|
||||||
|
|
||||||
@ -9318,7 +9318,7 @@ Default value is "all", which will cycle through the list of all tests.
|
|||||||
|
|
||||||
Some examples:
|
Some examples:
|
||||||
@example
|
@example
|
||||||
testsrc=t=dc_luma
|
mptestsrc=t=dc_luma
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
will generate a "dc_luma" test pattern.
|
will generate a "dc_luma" test pattern.
|
||||||
|
@ -483,6 +483,21 @@ ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
|||||||
|
|
||||||
@end itemize
|
@end itemize
|
||||||
|
|
||||||
|
@section libcdio
|
||||||
|
|
||||||
|
Audio-CD input device based on cdio.
|
||||||
|
|
||||||
|
To enable this input device during configuration you need libcdio
|
||||||
|
installed on your system.
|
||||||
|
|
||||||
|
This device allows playing and grabbing from an Audio-CD.
|
||||||
|
|
||||||
|
For example to copy with @command{ffmpeg} the entire Audio-CD in /dev/sr0,
|
||||||
|
you may run the command:
|
||||||
|
@example
|
||||||
|
ffmpeg -f libcdio -i /dev/sr0 cd.wav
|
||||||
|
@end example
|
||||||
|
|
||||||
@section libdc1394
|
@section libdc1394
|
||||||
|
|
||||||
IIDC1394 input device, based on libdc1394 and libraw1394.
|
IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||||
|
19
ffmpeg.c
19
ffmpeg.c
@ -578,6 +578,14 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
|||||||
AVCodecContext *avctx = ost->st->codec;
|
AVCodecContext *avctx = ost->st->codec;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
|
||||||
|
ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
|
if (ost->st->codec->extradata) {
|
||||||
|
memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
|
||||||
|
ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
|
if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
|
||||||
(avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
|
(avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
|
||||||
pkt->pts = pkt->dts = AV_NOPTS_VALUE;
|
pkt->pts = pkt->dts = AV_NOPTS_VALUE;
|
||||||
@ -1799,18 +1807,10 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
|
|||||||
for (i = 0; i < nb_filtergraphs; i++)
|
for (i = 0; i < nb_filtergraphs; i++)
|
||||||
if (ist_in_filtergraph(filtergraphs[i], ist)) {
|
if (ist_in_filtergraph(filtergraphs[i], ist)) {
|
||||||
FilterGraph *fg = filtergraphs[i];
|
FilterGraph *fg = filtergraphs[i];
|
||||||
int j;
|
|
||||||
if (configure_filtergraph(fg) < 0) {
|
if (configure_filtergraph(fg) < 0) {
|
||||||
av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
|
av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
for (j = 0; j < fg->nb_outputs; j++) {
|
|
||||||
OutputStream *ost = fg->outputs[j]->ost;
|
|
||||||
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
|
|
||||||
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
|
|
||||||
av_buffersink_set_frame_size(ost->filter->filter,
|
|
||||||
ost->enc_ctx->frame_size);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3419,7 +3419,7 @@ static int process_input(int file_index)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* add the stream-global side data to the first packet */
|
/* add the stream-global side data to the first packet */
|
||||||
if (ist->nb_packets == 1)
|
if (ist->nb_packets == 1) {
|
||||||
if (ist->st->nb_side_data)
|
if (ist->st->nb_side_data)
|
||||||
av_packet_split_side_data(&pkt);
|
av_packet_split_side_data(&pkt);
|
||||||
for (i = 0; i < ist->st->nb_side_data; i++) {
|
for (i = 0; i < ist->st->nb_side_data; i++) {
|
||||||
@ -3435,6 +3435,7 @@ static int process_input(int file_index)
|
|||||||
|
|
||||||
memcpy(dst_data, src_sd->data, src_sd->size);
|
memcpy(dst_data, src_sd->data, src_sd->size);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (pkt.dts != AV_NOPTS_VALUE)
|
if (pkt.dts != AV_NOPTS_VALUE)
|
||||||
pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
|
pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
|
||||||
|
@ -919,6 +919,16 @@ int configure_filtergraph(FilterGraph *fg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
fg->reconfiguration = 1;
|
fg->reconfiguration = 1;
|
||||||
|
|
||||||
|
for (i = 0; i < fg->nb_outputs; i++) {
|
||||||
|
OutputStream *ost = fg->outputs[i]->ost;
|
||||||
|
if (ost &&
|
||||||
|
ost->enc->type == AVMEDIA_TYPE_AUDIO &&
|
||||||
|
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
|
||||||
|
av_buffersink_set_frame_size(ost->filter->filter,
|
||||||
|
ost->enc_ctx->frame_size);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1828,7 +1828,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
/* pick the "best" stream of each type */
|
/* pick the "best" stream of each type */
|
||||||
|
|
||||||
/* video: highest resolution */
|
/* video: highest resolution */
|
||||||
if (!o->video_disable && oc->oformat->video_codec != AV_CODEC_ID_NONE) {
|
if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) {
|
||||||
int area = 0, idx = -1;
|
int area = 0, idx = -1;
|
||||||
int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
|
int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
|
||||||
for (i = 0; i < nb_input_streams; i++) {
|
for (i = 0; i < nb_input_streams; i++) {
|
||||||
@ -1850,7 +1850,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* audio: most channels */
|
/* audio: most channels */
|
||||||
if (!o->audio_disable && oc->oformat->audio_codec != AV_CODEC_ID_NONE) {
|
if (!o->audio_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
|
||||||
int channels = 0, idx = -1;
|
int channels = 0, idx = -1;
|
||||||
for (i = 0; i < nb_input_streams; i++) {
|
for (i = 0; i < nb_input_streams; i++) {
|
||||||
ist = input_streams[i];
|
ist = input_streams[i];
|
||||||
|
@ -2977,6 +2977,8 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
|||||||
AVDictionaryEntry *entry = av_dict_get(stream->metadata, "title", NULL, 0);
|
AVDictionaryEntry *entry = av_dict_get(stream->metadata, "title", NULL, 0);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
*pbuffer = NULL;
|
||||||
|
|
||||||
avc = avformat_alloc_context();
|
avc = avformat_alloc_context();
|
||||||
if (avc == NULL || !rtp_format) {
|
if (avc == NULL || !rtp_format) {
|
||||||
return -1;
|
return -1;
|
||||||
@ -3013,7 +3015,7 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
|||||||
av_free(avc);
|
av_free(avc);
|
||||||
av_free(avs);
|
av_free(avs);
|
||||||
|
|
||||||
return strlen(*pbuffer);
|
return *pbuffer ? strlen(*pbuffer) : AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rtsp_cmd_options(HTTPContext *c, const char *url)
|
static void rtsp_cmd_options(HTTPContext *c, const char *url)
|
||||||
|
@ -15,6 +15,7 @@ HEADERS = avcodec.h \
|
|||||||
|
|
||||||
OBJS = allcodecs.o \
|
OBJS = allcodecs.o \
|
||||||
audioconvert.o \
|
audioconvert.o \
|
||||||
|
avdct.o \
|
||||||
avpacket.o \
|
avpacket.o \
|
||||||
avpicture.o \
|
avpicture.o \
|
||||||
bitstream.o \
|
bitstream.o \
|
||||||
|
@ -34,7 +34,7 @@ static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
|||||||
int size;
|
int size;
|
||||||
union {
|
union {
|
||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
uint8_t u8[8];
|
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||||
} tmp;
|
} tmp;
|
||||||
|
|
||||||
tmp.u64 = av_be2ne64(state);
|
tmp.u64 = av_be2ne64(state);
|
||||||
|
@ -166,7 +166,7 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
|||||||
int err;
|
int err;
|
||||||
union {
|
union {
|
||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
uint8_t u8[8];
|
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||||
} tmp = { av_be2ne64(state) };
|
} tmp = { av_be2ne64(state) };
|
||||||
AC3HeaderInfo hdr, *phdr = &hdr;
|
AC3HeaderInfo hdr, *phdr = &hdr;
|
||||||
GetBitContext gbc;
|
GetBitContext gbc;
|
||||||
|
@ -263,7 +263,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
|
|||||||
energy_cpl = energy[blk][CPL_CH][bnd];
|
energy_cpl = energy[blk][CPL_CH][bnd];
|
||||||
energy_ch = energy[blk][ch][bnd];
|
energy_ch = energy[blk][ch][bnd];
|
||||||
blk1 = blk+1;
|
blk1 = blk+1;
|
||||||
while (!s->blocks[blk1].new_cpl_coords[ch] && blk1 < s->num_blocks) {
|
while (blk1 < s->num_blocks && !s->blocks[blk1].new_cpl_coords[ch]) {
|
||||||
if (s->blocks[blk1].cpl_in_use) {
|
if (s->blocks[blk1].cpl_in_use) {
|
||||||
energy_cpl += energy[blk1][CPL_CH][bnd];
|
energy_cpl += energy[blk1][CPL_CH][bnd];
|
||||||
energy_ch += energy[blk1][ch][bnd];
|
energy_ch += energy[blk1][ch][bnd];
|
||||||
|
124
libavcodec/avdct.c
Normal file
124
libavcodec/avdct.c
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014 Michael Niedermayer <michaelni@gmx.at>
|
||||||
|
*
|
||||||
|
* This file is part of FFmpeg.
|
||||||
|
*
|
||||||
|
* FFmpeg is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation; either
|
||||||
|
* version 2.1 of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* FFmpeg is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with FFmpeg; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "avcodec.h"
|
||||||
|
#include "idctdsp.h"
|
||||||
|
#include "fdctdsp.h"
|
||||||
|
#include "avdct.h"
|
||||||
|
|
||||||
|
#define OFFSET(x) offsetof(AVDCT,x)
|
||||||
|
#define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
|
||||||
|
//these names are too long to be readable
|
||||||
|
#define V AV_OPT_FLAG_VIDEO_PARAM
|
||||||
|
#define A AV_OPT_FLAG_AUDIO_PARAM
|
||||||
|
#define E AV_OPT_FLAG_ENCODING_PARAM
|
||||||
|
#define D AV_OPT_FLAG_DECODING_PARAM
|
||||||
|
|
||||||
|
static const AVOption avdct_options[] = {
|
||||||
|
{"dct", "DCT algorithm", OFFSET(dct_algo), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, V|E, "dct"},
|
||||||
|
{"auto", "autoselect a good one (default)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_AUTO }, INT_MIN, INT_MAX, V|E, "dct"},
|
||||||
|
{"fastint", "fast integer (experimental / for debugging)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_FASTINT }, INT_MIN, INT_MAX, V|E, "dct"},
|
||||||
|
{"int", "accurate integer", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_INT }, INT_MIN, INT_MAX, V|E, "dct"},
|
||||||
|
{"mmx", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_MMX }, INT_MIN, INT_MAX, V|E, "dct"},
|
||||||
|
{"altivec", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_ALTIVEC }, INT_MIN, INT_MAX, V|E, "dct"},
|
||||||
|
{"faan", "floating point AAN DCT (experimental / for debugging)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DCT_FAAN }, INT_MIN, INT_MAX, V|E, "dct"},
|
||||||
|
|
||||||
|
{"idct", "select IDCT implementation", OFFSET(idct_algo), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"auto", "autoselect a good one (default)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_AUTO }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"int", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_INT }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"simple", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLE }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"simplemmx", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEMMX }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"arm", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_ARM }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"altivec", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_ALTIVEC }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
#if FF_API_ARCH_SH4
|
||||||
|
{"sh4", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SH4 }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
#endif
|
||||||
|
{"simplearm", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEARM }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"simplearmv5te", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEARMV5TE }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"simplearmv6", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEARMV6 }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"simpleneon", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLENEON }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
#if FF_API_ARCH_ALPHA
|
||||||
|
{"simplealpha", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEALPHA }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
#endif
|
||||||
|
{"ipp", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_IPP }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"xvidmmx", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_XVIDMMX }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{"faani", "floating point AAN IDCT (experimental / for debugging)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_FAAN }, INT_MIN, INT_MAX, V|D|E, "idct"},
|
||||||
|
{"simpleauto", "experimental / for debugging", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEAUTO }, INT_MIN, INT_MAX, V|E|D, "idct"},
|
||||||
|
{NULL},
|
||||||
|
};
|
||||||
|
|
||||||
|
static const AVClass avdct_class = {
|
||||||
|
.class_name = "AVDCT",
|
||||||
|
.option = avdct_options,
|
||||||
|
.version = LIBAVUTIL_VERSION_INT,
|
||||||
|
};
|
||||||
|
|
||||||
|
const AVClass *avcodec_dct_get_class(void)
|
||||||
|
{
|
||||||
|
return &avdct_class;
|
||||||
|
}
|
||||||
|
|
||||||
|
AVDCT *avcodec_dct_alloc(void)
|
||||||
|
{
|
||||||
|
AVDCT *dsp = av_mallocz(sizeof(AVDCT));
|
||||||
|
|
||||||
|
if (!dsp)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
dsp->av_class = &avdct_class;
|
||||||
|
av_opt_set_defaults(dsp);
|
||||||
|
|
||||||
|
return dsp;
|
||||||
|
}
|
||||||
|
|
||||||
|
int avcodec_dct_init(AVDCT *dsp)
|
||||||
|
{
|
||||||
|
AVCodecContext *avctx = avcodec_alloc_context3(NULL);
|
||||||
|
|
||||||
|
if (!avctx)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
|
avctx->idct_algo = dsp->idct_algo;
|
||||||
|
avctx->dct_algo = dsp->dct_algo;
|
||||||
|
|
||||||
|
#define COPY(src, name) memcpy(&dsp->name, &src.name, sizeof(dsp->name))
|
||||||
|
|
||||||
|
#if CONFIG_IDCTDSP
|
||||||
|
{
|
||||||
|
IDCTDSPContext idsp;
|
||||||
|
ff_idctdsp_init(&idsp, avctx);
|
||||||
|
COPY(idsp, idct);
|
||||||
|
COPY(idsp, idct_permutation);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if CONFIG_FDCTDSP
|
||||||
|
{
|
||||||
|
FDCTDSPContext fdsp;
|
||||||
|
ff_fdctdsp_init(&fdsp, avctx);
|
||||||
|
COPY(fdsp, fdct);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
avcodec_close(avctx);
|
||||||
|
av_free(avctx);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
78
libavcodec/avdct.h
Normal file
78
libavcodec/avdct.h
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
/*
|
||||||
|
* This file is part of FFmpeg.
|
||||||
|
*
|
||||||
|
* FFmpeg is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation; either
|
||||||
|
* version 2.1 of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* FFmpeg is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with FFmpeg; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef AVCODEC_AVDCT_H
|
||||||
|
#define AVCODEC_AVDCT_H
|
||||||
|
|
||||||
|
#include "libavutil/opt.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* AVDCT context.
|
||||||
|
* @note function pointers can be NULL if the specific features have been
|
||||||
|
* disabled at build time.
|
||||||
|
*/
|
||||||
|
typedef struct AVDCT {
|
||||||
|
const AVClass *av_class;
|
||||||
|
|
||||||
|
void (*idct)(int16_t *block /* align 16 */);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* IDCT input permutation.
|
||||||
|
* Several optimized IDCTs need a permutated input (relative to the
|
||||||
|
* normal order of the reference IDCT).
|
||||||
|
* This permutation must be performed before the idct_put/add.
|
||||||
|
* Note, normally this can be merged with the zigzag/alternate scan<br>
|
||||||
|
* An example to avoid confusion:
|
||||||
|
* - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...)
|
||||||
|
* - (x -> reference DCT -> reference IDCT -> x)
|
||||||
|
* - (x -> reference DCT -> simple_mmx_perm = idct_permutation
|
||||||
|
* -> simple_idct_mmx -> x)
|
||||||
|
* - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant
|
||||||
|
* -> simple_idct_mmx -> ...)
|
||||||
|
*/
|
||||||
|
uint8_t idct_permutation[64];
|
||||||
|
|
||||||
|
void (*fdct)(int16_t *block /* align 16 */);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DCT algorithm.
|
||||||
|
* must use AVOptions to set this field.
|
||||||
|
*/
|
||||||
|
int dct_algo;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* IDCT algorithm.
|
||||||
|
* must use AVOptions to set this field.
|
||||||
|
*/
|
||||||
|
int idct_algo;
|
||||||
|
} AVDCT;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocates a AVDCT context.
|
||||||
|
* This needs to be initialized with avcodec_dct_init() after optionally
|
||||||
|
* configuring it with AVOptions.
|
||||||
|
*
|
||||||
|
* To free it use av_free()
|
||||||
|
*/
|
||||||
|
AVDCT *avcodec_dct_alloc(void);
|
||||||
|
int avcodec_dct_init(AVDCT *);
|
||||||
|
|
||||||
|
const AVClass *avcodec_dct_get_class(void);
|
||||||
|
|
||||||
|
#endif /* AVCODEC_AVDCT_H */
|
@ -32,6 +32,10 @@
|
|||||||
#include "cabac.h"
|
#include "cabac.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
#ifndef UNCHECKED_BITSTREAM_READER
|
||||||
|
#define UNCHECKED_BITSTREAM_READER !CONFIG_SAFE_BITSTREAM_READER
|
||||||
|
#endif
|
||||||
|
|
||||||
#if ARCH_AARCH64
|
#if ARCH_AARCH64
|
||||||
# include "aarch64/cabac.h"
|
# include "aarch64/cabac.h"
|
||||||
#endif
|
#endif
|
||||||
|
@ -353,10 +353,9 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
|||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
} else {
|
} else {
|
||||||
*got_frame = 0;
|
*got_frame = 0;
|
||||||
buf_size = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf_size;
|
return avpkt->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int cdg_decode_end(AVCodecContext *avctx)
|
static av_cold int cdg_decode_end(AVCodecContext *avctx)
|
||||||
|
@ -135,7 +135,7 @@ static int cinepak_decode_vectors (CinepakContext *s, cvid_strip *strip,
|
|||||||
const uint8_t *eod = (data + size);
|
const uint8_t *eod = (data + size);
|
||||||
uint32_t flag, mask;
|
uint32_t flag, mask;
|
||||||
uint8_t *cb0, *cb1, *cb2, *cb3;
|
uint8_t *cb0, *cb1, *cb2, *cb3;
|
||||||
unsigned int x, y;
|
int x, y;
|
||||||
char *ip0, *ip1, *ip2, *ip3;
|
char *ip0, *ip1, *ip2, *ip3;
|
||||||
|
|
||||||
flag = 0;
|
flag = 0;
|
||||||
|
@ -1215,8 +1215,8 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
q->num_subpackets++;
|
q->num_subpackets++;
|
||||||
s++;
|
s++;
|
||||||
if (s > MAX_SUBPACKETS) {
|
if (s > FFMIN(MAX_SUBPACKETS, avctx->block_align)) {
|
||||||
avpriv_request_sample(avctx, "subpackets > %d", MAX_SUBPACKETS);
|
avpriv_request_sample(avctx, "subpackets > %d", FFMIN(MAX_SUBPACKETS, avctx->block_align));
|
||||||
return AVERROR_PATCHWELCOME;
|
return AVERROR_PATCHWELCOME;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2359,6 +2359,10 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||||||
#else
|
#else
|
||||||
if (s->xch_present && !s->xch_disable) {
|
if (s->xch_present && !s->xch_disable) {
|
||||||
#endif
|
#endif
|
||||||
|
if (avctx->channel_layout & AV_CH_BACK_CENTER) {
|
||||||
|
avpriv_request_sample(avctx, "XCh with Back center channel");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
avctx->channel_layout |= AV_CH_BACK_CENTER;
|
avctx->channel_layout |= AV_CH_BACK_CENTER;
|
||||||
if (s->lfe) {
|
if (s->lfe) {
|
||||||
avctx->channel_layout |= AV_CH_LOW_FREQUENCY;
|
avctx->channel_layout |= AV_CH_LOW_FREQUENCY;
|
||||||
|
@ -171,6 +171,10 @@ static inline int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_c
|
|||||||
{
|
{
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
while (!dirac_get_arith_bit(c, follow_ctx)) {
|
while (!dirac_get_arith_bit(c, follow_ctx)) {
|
||||||
|
if (ret >= 0x40000000) {
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "dirac_get_arith_uint overflow\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
ret <<= 1;
|
ret <<= 1;
|
||||||
ret += dirac_get_arith_bit(c, data_ctx);
|
ret += dirac_get_arith_bit(c, data_ctx);
|
||||||
follow_ctx = ff_dirac_next_ctx[follow_ctx];
|
follow_ctx = ff_dirac_next_ctx[follow_ctx];
|
||||||
|
@ -611,10 +611,10 @@ static av_always_inline void decode_subband_internal(DiracContext *s, SubBand *b
|
|||||||
|
|
||||||
top = 0;
|
top = 0;
|
||||||
for (cb_y = 0; cb_y < cb_height; cb_y++) {
|
for (cb_y = 0; cb_y < cb_height; cb_y++) {
|
||||||
bottom = (b->height * (cb_y+1)) / cb_height;
|
bottom = (b->height * (cb_y+1LL)) / cb_height;
|
||||||
left = 0;
|
left = 0;
|
||||||
for (cb_x = 0; cb_x < cb_width; cb_x++) {
|
for (cb_x = 0; cb_x < cb_width; cb_x++) {
|
||||||
right = (b->width * (cb_x+1)) / cb_width;
|
right = (b->width * (cb_x+1LL)) / cb_width;
|
||||||
codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
|
codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
|
||||||
left = right;
|
left = right;
|
||||||
}
|
}
|
||||||
@ -1003,8 +1003,8 @@ static int dirac_unpack_idwt_params(DiracContext *s)
|
|||||||
/* Codeblock parameters (core syntax only) */
|
/* Codeblock parameters (core syntax only) */
|
||||||
if (get_bits1(gb)) {
|
if (get_bits1(gb)) {
|
||||||
for (i = 0; i <= s->wavelet_depth; i++) {
|
for (i = 0; i <= s->wavelet_depth; i++) {
|
||||||
CHECKEDREAD(s->codeblock[i].width , tmp < 1, "codeblock width invalid\n")
|
CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
|
||||||
CHECKEDREAD(s->codeblock[i].height, tmp < 1, "codeblock height invalid\n")
|
CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
|
CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
|
||||||
|
@ -38,6 +38,7 @@ typedef struct DNXHDContext {
|
|||||||
BlockDSPContext bdsp;
|
BlockDSPContext bdsp;
|
||||||
int64_t cid; ///< compression id
|
int64_t cid; ///< compression id
|
||||||
unsigned int width, height;
|
unsigned int width, height;
|
||||||
|
enum AVPixelFormat pix_fmt;
|
||||||
unsigned int mb_width, mb_height;
|
unsigned int mb_width, mb_height;
|
||||||
uint32_t mb_scan_index[68]; /* max for 1080p */
|
uint32_t mb_scan_index[68]; /* max for 1080p */
|
||||||
int cur_field; ///< current interlaced field
|
int cur_field; ///< current interlaced field
|
||||||
@ -141,7 +142,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
|||||||
|
|
||||||
ctx->is_444 = 0;
|
ctx->is_444 = 0;
|
||||||
if (buf[0x4] == 0x2) {
|
if (buf[0x4] == 0x2) {
|
||||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
|
ctx->pix_fmt = AV_PIX_FMT_YUV444P10;
|
||||||
ctx->avctx->bits_per_raw_sample = 10;
|
ctx->avctx->bits_per_raw_sample = 10;
|
||||||
if (ctx->bit_depth != 10) {
|
if (ctx->bit_depth != 10) {
|
||||||
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
||||||
@ -151,7 +152,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
|||||||
}
|
}
|
||||||
ctx->is_444 = 1;
|
ctx->is_444 = 1;
|
||||||
} else if (buf[0x21] & 0x40) {
|
} else if (buf[0x21] & 0x40) {
|
||||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
ctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
||||||
ctx->avctx->bits_per_raw_sample = 10;
|
ctx->avctx->bits_per_raw_sample = 10;
|
||||||
if (ctx->bit_depth != 10) {
|
if (ctx->bit_depth != 10) {
|
||||||
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
||||||
@ -160,7 +161,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
|||||||
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
|
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
ctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||||
ctx->avctx->bits_per_raw_sample = 8;
|
ctx->avctx->bits_per_raw_sample = 8;
|
||||||
if (ctx->bit_depth != 8) {
|
if (ctx->bit_depth != 8) {
|
||||||
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
||||||
@ -446,7 +447,13 @@ decode_coding_unit:
|
|||||||
avctx->width, avctx->height, ctx->width, ctx->height);
|
avctx->width, avctx->height, ctx->width, ctx->height);
|
||||||
first_field = 1;
|
first_field = 1;
|
||||||
}
|
}
|
||||||
|
if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) {
|
||||||
|
av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n",
|
||||||
|
av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt));
|
||||||
|
first_field = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
avctx->pix_fmt = ctx->pix_fmt;
|
||||||
ret = ff_set_dimensions(avctx, ctx->width, ctx->height);
|
ret = ff_set_dimensions(avctx, ctx->width, ctx->height);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -45,8 +45,11 @@ static int dvdsub_parse(AVCodecParserContext *s,
|
|||||||
DVDSubParseContext *pc = s->priv_data;
|
DVDSubParseContext *pc = s->priv_data;
|
||||||
|
|
||||||
if (pc->packet_index == 0) {
|
if (pc->packet_index == 0) {
|
||||||
if (buf_size < 2)
|
if (buf_size < 2 || AV_RB16(buf) && buf_size < 6) {
|
||||||
return 0;
|
if (buf_size)
|
||||||
|
av_log(avctx, AV_LOG_DEBUG, "Parser input %d too small\n", buf_size);
|
||||||
|
return buf_size;
|
||||||
|
}
|
||||||
pc->packet_len = AV_RB16(buf);
|
pc->packet_len = AV_RB16(buf);
|
||||||
if (pc->packet_len == 0) /* HD-DVD subpicture packet */
|
if (pc->packet_len == 0) /* HD-DVD subpicture packet */
|
||||||
pc->packet_len = AV_RB32(buf+2);
|
pc->packet_len = AV_RB32(buf+2);
|
||||||
|
@ -105,6 +105,9 @@ static int decode_rle(uint8_t *bitmap, int linesize, int w, int h,
|
|||||||
int x, y, len, color;
|
int x, y, len, color;
|
||||||
uint8_t *d;
|
uint8_t *d;
|
||||||
|
|
||||||
|
if (start >= buf_size)
|
||||||
|
return -1;
|
||||||
|
|
||||||
bit_len = (buf_size - start) * 8;
|
bit_len = (buf_size - start) * 8;
|
||||||
init_get_bits(&gb, buf + start, bit_len);
|
init_get_bits(&gb, buf + start, bit_len);
|
||||||
|
|
||||||
@ -356,10 +359,12 @@ static int decode_dvd_subtitles(DVDSubContext *ctx, AVSubtitle *sub_header,
|
|||||||
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
||||||
sub_header->num_rects = 1;
|
sub_header->num_rects = 1;
|
||||||
sub_header->rects[0]->pict.data[0] = bitmap;
|
sub_header->rects[0]->pict.data[0] = bitmap;
|
||||||
decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
if (decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
||||||
buf, offset1, buf_size, is_8bit);
|
buf, offset1, buf_size, is_8bit) < 0)
|
||||||
decode_rle(bitmap + w, w * 2, w, h / 2,
|
goto fail;
|
||||||
buf, offset2, buf_size, is_8bit);
|
if (decode_rle(bitmap + w, w * 2, w, h / 2,
|
||||||
|
buf, offset2, buf_size, is_8bit) < 0)
|
||||||
|
goto fail;
|
||||||
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
||||||
if (is_8bit) {
|
if (is_8bit) {
|
||||||
if (yuv_palette == 0)
|
if (yuv_palette == 0)
|
||||||
|
@ -329,6 +329,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
{
|
{
|
||||||
DxaDecContext * const c = avctx->priv_data;
|
DxaDecContext * const c = avctx->priv_data;
|
||||||
|
|
||||||
|
if (avctx->width%4 || avctx->height%4) {
|
||||||
|
avpriv_request_sample(avctx, "dimensions are not a multiple of 4");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
c->prev = av_frame_alloc();
|
c->prev = av_frame_alloc();
|
||||||
if (!c->prev)
|
if (!c->prev)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
@ -471,10 +471,10 @@ static int decode_frame(FLACContext *s)
|
|||||||
ret = allocate_buffers(s);
|
ret = allocate_buffers(s);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
|
||||||
s->got_streaminfo = 1;
|
s->got_streaminfo = 1;
|
||||||
dump_headers(s->avctx, (FLACStreaminfo *)s);
|
dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||||
}
|
}
|
||||||
|
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||||
|
|
||||||
// dump_headers(s->avctx, (FLACStreaminfo *)s);
|
// dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||||
|
|
||||||
|
@ -90,6 +90,7 @@ typedef struct G2MContext {
|
|||||||
|
|
||||||
int compression;
|
int compression;
|
||||||
int width, height, bpp;
|
int width, height, bpp;
|
||||||
|
int orig_width, orig_height;
|
||||||
int tile_width, tile_height;
|
int tile_width, tile_height;
|
||||||
int tiles_x, tiles_y, tile_x, tile_y;
|
int tiles_x, tiles_y, tile_x, tile_y;
|
||||||
|
|
||||||
@ -712,8 +713,8 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
c->width = bytestream2_get_be32(&bc);
|
c->width = bytestream2_get_be32(&bc);
|
||||||
c->height = bytestream2_get_be32(&bc);
|
c->height = bytestream2_get_be32(&bc);
|
||||||
if (c->width < 16 || c->width > avctx->width ||
|
if (c->width < 16 || c->width > c->orig_width ||
|
||||||
c->height < 16 || c->height > avctx->height) {
|
c->height < 16 || c->height > c->orig_height) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Invalid frame dimensions %dx%d\n",
|
"Invalid frame dimensions %dx%d\n",
|
||||||
c->width, c->height);
|
c->width, c->height);
|
||||||
@ -735,8 +736,10 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
c->tile_width = bytestream2_get_be32(&bc);
|
c->tile_width = bytestream2_get_be32(&bc);
|
||||||
c->tile_height = bytestream2_get_be32(&bc);
|
c->tile_height = bytestream2_get_be32(&bc);
|
||||||
if (!c->tile_width || !c->tile_height ||
|
if (c->tile_width <= 0 || c->tile_height <= 0 ||
|
||||||
((c->tile_width | c->tile_height) & 0xF)) {
|
((c->tile_width | c->tile_height) & 0xF) ||
|
||||||
|
c->tile_width * 4LL * c->tile_height >= INT_MAX
|
||||||
|
) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Invalid tile dimensions %dx%d\n",
|
"Invalid tile dimensions %dx%d\n",
|
||||||
c->tile_width, c->tile_height);
|
c->tile_width, c->tile_height);
|
||||||
@ -882,6 +885,10 @@ static av_cold int g2m_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
||||||
|
|
||||||
|
// store original sizes and check against those if resize happens
|
||||||
|
c->orig_width = avctx->width;
|
||||||
|
c->orig_height = avctx->height;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,6 +113,9 @@ typedef struct RL_VLC_ELEM {
|
|||||||
* LAST_SKIP_BITS(name, gb, num)
|
* LAST_SKIP_BITS(name, gb, num)
|
||||||
* Like SKIP_BITS, to be used if next call is UPDATE_CACHE or CLOSE_READER.
|
* Like SKIP_BITS, to be used if next call is UPDATE_CACHE or CLOSE_READER.
|
||||||
*
|
*
|
||||||
|
* BITS_LEFT(name, gb)
|
||||||
|
* Return the number of bits left
|
||||||
|
*
|
||||||
* For examples see get_bits, show_bits, skip_bits, get_vlc.
|
* For examples see get_bits, show_bits, skip_bits, get_vlc.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -179,6 +182,8 @@ typedef struct RL_VLC_ELEM {
|
|||||||
name ## _index = FFMIN(name ## _size_plus8, name ## _index + (num))
|
name ## _index = FFMIN(name ## _size_plus8, name ## _index + (num))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define BITS_LEFT(name, gb) ((int)((gb)->size_in_bits - name ## _index))
|
||||||
|
|
||||||
#define SKIP_BITS(name, gb, num) \
|
#define SKIP_BITS(name, gb, num) \
|
||||||
do { \
|
do { \
|
||||||
SKIP_CACHE(name, gb, num); \
|
SKIP_CACHE(name, gb, num); \
|
||||||
|
@ -258,26 +258,21 @@ static int gif_read_image(GifState *s, AVFrame *frame)
|
|||||||
case 1:
|
case 1:
|
||||||
y1 += 8;
|
y1 += 8;
|
||||||
ptr += linesize * 8;
|
ptr += linesize * 8;
|
||||||
if (y1 >= height) {
|
|
||||||
y1 = pass ? 2 : 4;
|
|
||||||
ptr = ptr1 + linesize * y1;
|
|
||||||
pass++;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
y1 += 4;
|
y1 += 4;
|
||||||
ptr += linesize * 4;
|
ptr += linesize * 4;
|
||||||
if (y1 >= height) {
|
|
||||||
y1 = 1;
|
|
||||||
ptr = ptr1 + linesize;
|
|
||||||
pass++;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
y1 += 2;
|
y1 += 2;
|
||||||
ptr += linesize * 2;
|
ptr += linesize * 2;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
while (y1 >= height) {
|
||||||
|
y1 = 4 >> pass;
|
||||||
|
ptr = ptr1 + linesize * y1;
|
||||||
|
pass++;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ptr += linesize;
|
ptr += linesize;
|
||||||
}
|
}
|
||||||
|
@ -336,6 +336,14 @@ static int decode_slice(MpegEncContext *s)
|
|||||||
s->padding_bug_score += 32;
|
s->padding_bug_score += 32;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (s->codec_id == AV_CODEC_ID_H263 &&
|
||||||
|
(s->workaround_bugs & FF_BUG_AUTODETECT) &&
|
||||||
|
get_bits_left(&s->gb) >= 64 &&
|
||||||
|
AV_RB64(s->gb.buffer_end - 8) == 0xCDCDCDCDFC7F0000) {
|
||||||
|
|
||||||
|
s->padding_bug_score += 32;
|
||||||
|
}
|
||||||
|
|
||||||
if (s->workaround_bugs & FF_BUG_AUTODETECT) {
|
if (s->workaround_bugs & FF_BUG_AUTODETECT) {
|
||||||
if (s->padding_bug_score > -2 && !s->data_partitioning)
|
if (s->padding_bug_score > -2 && !s->data_partitioning)
|
||||||
s->workaround_bugs |= FF_BUG_NO_PADDING;
|
s->workaround_bugs |= FF_BUG_NO_PADDING;
|
||||||
|
@ -215,18 +215,18 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
|
|||||||
|
|
||||||
if ((h->left_samples_available & 0x8080) != 0x8080) {
|
if ((h->left_samples_available & 0x8080) != 0x8080) {
|
||||||
mode = left[mode];
|
mode = left[mode];
|
||||||
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
|
||||||
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
|
||||||
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
|
||||||
(!(h->left_samples_available & 0x8000)) +
|
|
||||||
2 * (mode == DC_128_PRED8x8);
|
|
||||||
}
|
|
||||||
if (mode < 0) {
|
if (mode < 0) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR,
|
av_log(h->avctx, AV_LOG_ERROR,
|
||||||
"left block unavailable for requested intra mode at %d %d\n",
|
"left block unavailable for requested intra mode at %d %d\n",
|
||||||
h->mb_x, h->mb_y);
|
h->mb_x, h->mb_y);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
||||||
|
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||||
|
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
||||||
|
(!(h->left_samples_available & 0x8000)) +
|
||||||
|
2 * (mode == DC_128_PRED8x8);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return mode;
|
return mode;
|
||||||
@ -248,7 +248,7 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
|
|||||||
|
|
||||||
#define STARTCODE_TEST \
|
#define STARTCODE_TEST \
|
||||||
if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
|
if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
|
||||||
if (src[i + 2] != 3) { \
|
if (src[i + 2] != 3 && src[i + 2] != 0) { \
|
||||||
/* startcode, so we must be past the end */ \
|
/* startcode, so we must be past the end */ \
|
||||||
length = i; \
|
length = i; \
|
||||||
} \
|
} \
|
||||||
@ -321,7 +321,7 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
|
|||||||
if (src[si + 2] > 3) {
|
if (src[si + 2] > 3) {
|
||||||
dst[di++] = src[si++];
|
dst[di++] = src[si++];
|
||||||
dst[di++] = src[si++];
|
dst[di++] = src[si++];
|
||||||
} else if (src[si] == 0 && src[si + 1] == 0) {
|
} else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) {
|
||||||
if (src[si + 2] == 3) { // escape
|
if (src[si + 2] == 3) { // escape
|
||||||
dst[di++] = 0;
|
dst[di++] = 0;
|
||||||
dst[di++] = 0;
|
dst[di++] = 0;
|
||||||
@ -392,6 +392,7 @@ void ff_h264_free_tables(H264Context *h, int free_rbsp)
|
|||||||
if (free_rbsp && h->DPB) {
|
if (free_rbsp && h->DPB) {
|
||||||
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
||||||
ff_h264_unref_picture(h, &h->DPB[i]);
|
ff_h264_unref_picture(h, &h->DPB[i]);
|
||||||
|
memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
|
||||||
av_freep(&h->DPB);
|
av_freep(&h->DPB);
|
||||||
} else if (h->DPB) {
|
} else if (h->DPB) {
|
||||||
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
|
||||||
@ -841,10 +842,10 @@ static void decode_postinit(H264Context *h, int setup_finished)
|
|||||||
stereo->type = AV_STEREO3D_CHECKERBOARD;
|
stereo->type = AV_STEREO3D_CHECKERBOARD;
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
stereo->type = AV_STEREO3D_LINES;
|
stereo->type = AV_STEREO3D_COLUMNS;
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
stereo->type = AV_STEREO3D_COLUMNS;
|
stereo->type = AV_STEREO3D_LINES;
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
if (h->quincunx_subsampling)
|
if (h->quincunx_subsampling)
|
||||||
@ -990,6 +991,16 @@ int ff_pred_weight_table(H264Context *h)
|
|||||||
h->luma_log2_weight_denom = get_ue_golomb(&h->gb);
|
h->luma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||||
if (h->sps.chroma_format_idc)
|
if (h->sps.chroma_format_idc)
|
||||||
h->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
|
h->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||||
|
|
||||||
|
if (h->luma_log2_weight_denom > 7U) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", h->luma_log2_weight_denom);
|
||||||
|
h->luma_log2_weight_denom = 0;
|
||||||
|
}
|
||||||
|
if (h->chroma_log2_weight_denom > 7U) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", h->chroma_log2_weight_denom);
|
||||||
|
h->chroma_log2_weight_denom = 0;
|
||||||
|
}
|
||||||
|
|
||||||
luma_def = 1 << h->luma_log2_weight_denom;
|
luma_def = 1 << h->luma_log2_weight_denom;
|
||||||
chroma_def = 1 << h->chroma_log2_weight_denom;
|
chroma_def = 1 << h->chroma_log2_weight_denom;
|
||||||
|
|
||||||
@ -1330,43 +1341,6 @@ int ff_set_ref_count(H264Context *h)
|
|||||||
|
|
||||||
static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
|
static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
|
||||||
|
|
||||||
static int find_start_code(const uint8_t *buf, int buf_size,
|
|
||||||
int buf_index, int next_avc)
|
|
||||||
{
|
|
||||||
// start code prefix search
|
|
||||||
for (; buf_index + 3 < next_avc; buf_index++)
|
|
||||||
// This should always succeed in the first iteration.
|
|
||||||
if (buf[buf_index] == 0 &&
|
|
||||||
buf[buf_index + 1] == 0 &&
|
|
||||||
buf[buf_index + 2] == 1)
|
|
||||||
break;
|
|
||||||
|
|
||||||
buf_index += 3;
|
|
||||||
|
|
||||||
if (buf_index >= buf_size)
|
|
||||||
return buf_size;
|
|
||||||
|
|
||||||
return buf_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int get_avc_nalsize(H264Context *h, const uint8_t *buf,
|
|
||||||
int buf_size, int *buf_index)
|
|
||||||
{
|
|
||||||
int i, nalsize = 0;
|
|
||||||
|
|
||||||
if (*buf_index >= buf_size - h->nal_length_size)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
for (i = 0; i < h->nal_length_size; i++)
|
|
||||||
nalsize = (nalsize << 8) | buf[(*buf_index)++];
|
|
||||||
if (nalsize <= 0 || nalsize > buf_size - *buf_index) {
|
|
||||||
av_log(h->avctx, AV_LOG_ERROR,
|
|
||||||
"AVC: nal size %d\n", nalsize);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
return nalsize;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int get_bit_length(H264Context *h, const uint8_t *buf,
|
static int get_bit_length(H264Context *h, const uint8_t *buf,
|
||||||
const uint8_t *ptr, int dst_length,
|
const uint8_t *ptr, int dst_length,
|
||||||
int i, int next_avc)
|
int i, int next_avc)
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#include "h264dsp.h"
|
#include "h264dsp.h"
|
||||||
#include "h264pred.h"
|
#include "h264pred.h"
|
||||||
#include "h264qpel.h"
|
#include "h264qpel.h"
|
||||||
|
#include "internal.h" // for avpriv_find_start_code()
|
||||||
#include "mpegutils.h"
|
#include "mpegutils.h"
|
||||||
#include "parser.h"
|
#include "parser.h"
|
||||||
#include "qpeldsp.h"
|
#include "qpeldsp.h"
|
||||||
@ -337,6 +338,7 @@ typedef struct H264Picture {
|
|||||||
* H264Context
|
* H264Context
|
||||||
*/
|
*/
|
||||||
typedef struct H264Context {
|
typedef struct H264Context {
|
||||||
|
AVClass *av_class;
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
VideoDSPContext vdsp;
|
VideoDSPContext vdsp;
|
||||||
H264DSPContext h264dsp;
|
H264DSPContext h264dsp;
|
||||||
@ -749,7 +751,7 @@ typedef struct H264Context {
|
|||||||
|
|
||||||
int16_t slice_row[MAX_SLICES]; ///< to detect when MAX_SLICES is too low
|
int16_t slice_row[MAX_SLICES]; ///< to detect when MAX_SLICES is too low
|
||||||
|
|
||||||
uint8_t parse_history[4];
|
uint8_t parse_history[6];
|
||||||
int parse_history_count;
|
int parse_history_count;
|
||||||
int parse_last_mb;
|
int parse_last_mb;
|
||||||
uint8_t *edge_emu_buffer;
|
uint8_t *edge_emu_buffer;
|
||||||
@ -1092,6 +1094,34 @@ static av_always_inline int get_dct8x8_allowed(H264Context *h)
|
|||||||
0x0001000100010001ULL));
|
0x0001000100010001ULL));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int find_start_code(const uint8_t *buf, int buf_size,
|
||||||
|
int buf_index, int next_avc)
|
||||||
|
{
|
||||||
|
uint32_t state = -1;
|
||||||
|
|
||||||
|
buf_index = avpriv_find_start_code(buf + buf_index, buf + next_avc + 1, &state) - buf - 1;
|
||||||
|
|
||||||
|
return FFMIN(buf_index, buf_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int get_avc_nalsize(H264Context *h, const uint8_t *buf,
|
||||||
|
int buf_size, int *buf_index)
|
||||||
|
{
|
||||||
|
int i, nalsize = 0;
|
||||||
|
|
||||||
|
if (*buf_index >= buf_size - h->nal_length_size)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
for (i = 0; i < h->nal_length_size; i++)
|
||||||
|
nalsize = ((unsigned)nalsize << 8) | buf[(*buf_index)++];
|
||||||
|
if (nalsize <= 0 || nalsize > buf_size - *buf_index) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR,
|
||||||
|
"AVC: nal size %d\n", nalsize);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return nalsize;
|
||||||
|
}
|
||||||
|
|
||||||
int ff_h264_field_end(H264Context *h, int in_setup);
|
int ff_h264_field_end(H264Context *h, int in_setup);
|
||||||
|
|
||||||
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src);
|
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src);
|
||||||
|
@ -420,10 +420,12 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square,
|
|||||||
int weight1 = 64 - weight0;
|
int weight1 = 64 - weight0;
|
||||||
luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
|
luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
|
||||||
height, 5, weight0, weight1, 0);
|
height, 5, weight0, weight1, 0);
|
||||||
|
if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
|
||||||
chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
|
chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
|
||||||
chroma_height, 5, weight0, weight1, 0);
|
chroma_height, 5, weight0, weight1, 0);
|
||||||
chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
|
chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
|
||||||
chroma_height, 5, weight0, weight1, 0);
|
chroma_height, 5, weight0, weight1, 0);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
|
luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
|
||||||
h->luma_log2_weight_denom,
|
h->luma_log2_weight_denom,
|
||||||
@ -431,6 +433,7 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square,
|
|||||||
h->luma_weight[refn1][1][0],
|
h->luma_weight[refn1][1][0],
|
||||||
h->luma_weight[refn0][0][1] +
|
h->luma_weight[refn0][0][1] +
|
||||||
h->luma_weight[refn1][1][1]);
|
h->luma_weight[refn1][1][1]);
|
||||||
|
if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
|
||||||
chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
|
chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
|
||||||
h->chroma_log2_weight_denom,
|
h->chroma_log2_weight_denom,
|
||||||
h->chroma_weight[refn0][0][0][0],
|
h->chroma_weight[refn0][0][0][0],
|
||||||
@ -444,6 +447,7 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square,
|
|||||||
h->chroma_weight[refn0][0][1][1] +
|
h->chroma_weight[refn0][0][1][1] +
|
||||||
h->chroma_weight[refn1][1][1][1]);
|
h->chroma_weight[refn1][1][1][1]);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
int list = list1 ? 1 : 0;
|
int list = list1 ? 1 : 0;
|
||||||
int refn = h->ref_cache[list][scan8[n]];
|
int refn = h->ref_cache[list][scan8[n]];
|
||||||
@ -456,6 +460,7 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square,
|
|||||||
h->luma_log2_weight_denom,
|
h->luma_log2_weight_denom,
|
||||||
h->luma_weight[refn][list][0],
|
h->luma_weight[refn][list][0],
|
||||||
h->luma_weight[refn][list][1]);
|
h->luma_weight[refn][list][1]);
|
||||||
|
if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
|
||||||
if (h->use_weight_chroma) {
|
if (h->use_weight_chroma) {
|
||||||
chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
|
chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
|
||||||
h->chroma_log2_weight_denom,
|
h->chroma_log2_weight_denom,
|
||||||
@ -467,6 +472,7 @@ static av_always_inline void mc_part_weighted(H264Context *h, int n, int square,
|
|||||||
h->chroma_weight[refn][list][1][1]);
|
h->chroma_weight[refn][list][1][1]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_always_inline void prefetch_motion(H264Context *h, int list,
|
static av_always_inline void prefetch_motion(H264Context *h, int list,
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
typedef struct H264BSFContext {
|
typedef struct H264BSFContext {
|
||||||
uint8_t length_size;
|
uint8_t length_size;
|
||||||
uint8_t first_idr;
|
uint8_t first_idr;
|
||||||
|
uint8_t idr_sps_pps_seen;
|
||||||
int extradata_parsed;
|
int extradata_parsed;
|
||||||
} H264BSFContext;
|
} H264BSFContext;
|
||||||
|
|
||||||
@ -155,6 +156,7 @@ static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
|
|||||||
return ret;
|
return ret;
|
||||||
ctx->length_size = ret;
|
ctx->length_size = ret;
|
||||||
ctx->first_idr = 1;
|
ctx->first_idr = 1;
|
||||||
|
ctx->idr_sps_pps_seen = 0;
|
||||||
ctx->extradata_parsed = 1;
|
ctx->extradata_parsed = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,8 +176,17 @@ static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
|
|||||||
if (buf + nal_size > buf_end || nal_size < 0)
|
if (buf + nal_size > buf_end || nal_size < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
if (ctx->first_idr && (unit_type == 7 || unit_type == 8))
|
||||||
if (ctx->first_idr && (unit_type == 5 || unit_type == 7 || unit_type == 8)) {
|
ctx->idr_sps_pps_seen = 1;
|
||||||
|
|
||||||
|
/* if this is a new IDR picture following an IDR picture, reset the idr flag.
|
||||||
|
* Just check first_mb_in_slice to be 0 as this is the simplest solution.
|
||||||
|
* This could be checking idr_pic_id instead, but would complexify the parsing. */
|
||||||
|
if (!ctx->first_idr && unit_type == 5 && (buf[1] & 0x80))
|
||||||
|
ctx->first_idr = 1;
|
||||||
|
|
||||||
|
/* prepend only to the first type 5 NAL unit of an IDR picture, if no sps/pps are already present */
|
||||||
|
if (ctx->first_idr && unit_type == 5 && !ctx->idr_sps_pps_seen) {
|
||||||
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
||||||
avctx->extradata, avctx->extradata_size,
|
avctx->extradata, avctx->extradata_size,
|
||||||
buf, nal_size)) < 0)
|
buf, nal_size)) < 0)
|
||||||
@ -185,8 +196,10 @@ static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
|
|||||||
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
||||||
NULL, 0, buf, nal_size)) < 0)
|
NULL, 0, buf, nal_size)) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
if (!ctx->first_idr && unit_type == 1)
|
if (!ctx->first_idr && unit_type == 1) {
|
||||||
ctx->first_idr = 1;
|
ctx->first_idr = 1;
|
||||||
|
ctx->idr_sps_pps_seen = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buf += nal_size;
|
buf += nal_size;
|
||||||
|
@ -92,7 +92,7 @@ static int h264_find_frame_end(H264Context *h, const uint8_t *buf,
|
|||||||
state = 7;
|
state = 7;
|
||||||
} else {
|
} else {
|
||||||
h->parse_history[h->parse_history_count++]= buf[i];
|
h->parse_history[h->parse_history_count++]= buf[i];
|
||||||
if (h->parse_history_count>3) {
|
if (h->parse_history_count>5) {
|
||||||
unsigned int mb, last_mb= h->parse_last_mb;
|
unsigned int mb, last_mb= h->parse_last_mb;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ found:
|
|||||||
pc->frame_start_found = 0;
|
pc->frame_start_found = 0;
|
||||||
if (h->is_avc)
|
if (h->is_avc)
|
||||||
return next_avc;
|
return next_avc;
|
||||||
return i - (state & 5) - 3 * (state > 7);
|
return i - (state & 5) - 5 * (state > 7);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int scan_mmco_reset(AVCodecParserContext *s)
|
static int scan_mmco_reset(AVCodecParserContext *s)
|
||||||
@ -203,10 +203,10 @@ static int scan_mmco_reset(AVCodecParserContext *s)
|
|||||||
*/
|
*/
|
||||||
static inline int parse_nal_units(AVCodecParserContext *s,
|
static inline int parse_nal_units(AVCodecParserContext *s,
|
||||||
AVCodecContext *avctx,
|
AVCodecContext *avctx,
|
||||||
const uint8_t *buf, int buf_size)
|
const uint8_t * const buf, int buf_size)
|
||||||
{
|
{
|
||||||
H264Context *h = s->priv_data;
|
H264Context *h = s->priv_data;
|
||||||
const uint8_t *buf_end = buf + buf_size;
|
int buf_index, next_avc;
|
||||||
unsigned int pps_id;
|
unsigned int pps_id;
|
||||||
unsigned int slice_type;
|
unsigned int slice_type;
|
||||||
int state = -1, got_reset = 0;
|
int state = -1, got_reset = 0;
|
||||||
@ -226,26 +226,26 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
|||||||
if (!buf_size)
|
if (!buf_size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
buf_index = 0;
|
||||||
|
next_avc = h->is_avc ? 0 : buf_size;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
int src_length, dst_length, consumed, nalsize = 0;
|
int src_length, dst_length, consumed, nalsize = 0;
|
||||||
if (h->is_avc) {
|
|
||||||
int i;
|
if (buf_index >= next_avc) {
|
||||||
if (h->nal_length_size >= buf_end - buf) break;
|
nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
|
||||||
nalsize = 0;
|
if (nalsize < 0)
|
||||||
for (i = 0; i < h->nal_length_size; i++)
|
|
||||||
nalsize = (nalsize << 8) | *buf++;
|
|
||||||
if (nalsize <= 0 || nalsize > buf_end - buf) {
|
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "AVC: nal size %d\n", nalsize);
|
|
||||||
break;
|
break;
|
||||||
}
|
next_avc = buf_index + nalsize;
|
||||||
src_length = nalsize;
|
|
||||||
} else {
|
} else {
|
||||||
buf = avpriv_find_start_code(buf, buf_end, &state);
|
buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
|
||||||
if (buf >= buf_end)
|
if (buf_index >= buf_size)
|
||||||
break;
|
break;
|
||||||
--buf;
|
if (buf_index >= next_avc)
|
||||||
src_length = buf_end - buf;
|
continue;
|
||||||
}
|
}
|
||||||
|
src_length = next_avc - buf_index;
|
||||||
|
|
||||||
|
state = buf[buf_index];
|
||||||
switch (state & 0x1f) {
|
switch (state & 0x1f) {
|
||||||
case NAL_SLICE:
|
case NAL_SLICE:
|
||||||
case NAL_IDR_SLICE:
|
case NAL_IDR_SLICE:
|
||||||
@ -262,10 +262,13 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
ptr = ff_h264_decode_nal(h, buf, &dst_length, &consumed, src_length);
|
ptr = ff_h264_decode_nal(h, buf + buf_index, &dst_length,
|
||||||
|
&consumed, src_length);
|
||||||
if (ptr == NULL || dst_length < 0)
|
if (ptr == NULL || dst_length < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
buf_index += consumed;
|
||||||
|
|
||||||
init_get_bits(&h->gb, ptr, 8 * dst_length);
|
init_get_bits(&h->gb, ptr, 8 * dst_length);
|
||||||
switch (h->nal_unit_type) {
|
switch (h->nal_unit_type) {
|
||||||
case NAL_SPS:
|
case NAL_SPS:
|
||||||
@ -440,7 +443,6 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
|||||||
|
|
||||||
return 0; /* no need to evaluate the rest */
|
return 0; /* no need to evaluate the rest */
|
||||||
}
|
}
|
||||||
buf += h->is_avc ? nalsize : consumed;
|
|
||||||
}
|
}
|
||||||
if (q264)
|
if (q264)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -586,6 +586,17 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
|
|||||||
h->mb_type_pool = NULL;
|
h->mb_type_pool = NULL;
|
||||||
h->ref_index_pool = NULL;
|
h->ref_index_pool = NULL;
|
||||||
h->motion_val_pool = NULL;
|
h->motion_val_pool = NULL;
|
||||||
|
h->intra4x4_pred_mode= NULL;
|
||||||
|
h->non_zero_count = NULL;
|
||||||
|
h->slice_table_base = NULL;
|
||||||
|
h->slice_table = NULL;
|
||||||
|
h->cbp_table = NULL;
|
||||||
|
h->chroma_pred_mode_table = NULL;
|
||||||
|
memset(h->mvd_table, 0, sizeof(h->mvd_table));
|
||||||
|
h->direct_table = NULL;
|
||||||
|
h->list_counts = NULL;
|
||||||
|
h->mb2b_xy = NULL;
|
||||||
|
h->mb2br_xy = NULL;
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
h->rbsp_buffer[i] = NULL;
|
h->rbsp_buffer[i] = NULL;
|
||||||
h->rbsp_buffer_size[i] = 0;
|
h->rbsp_buffer_size[i] = 0;
|
||||||
|
@ -108,7 +108,7 @@ static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
|
|||||||
if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
|
if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
s->filter_slice_edges = av_malloc(ctb_count);
|
s->filter_slice_edges = av_mallocz(ctb_count);
|
||||||
s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
|
s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
|
||||||
sizeof(*s->tab_slice_address));
|
sizeof(*s->tab_slice_address));
|
||||||
s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
|
s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
|
||||||
@ -978,7 +978,7 @@ static int hls_transform_unit(HEVCContext *s, int x0, int y0,
|
|||||||
for (i = 0; i < (size * size); i++) {
|
for (i = 0; i < (size * size); i++) {
|
||||||
coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
|
coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
|
||||||
}
|
}
|
||||||
s->hevcdsp.transform_add[log2_trafo_size-2](dst, coeffs, stride);
|
s->hevcdsp.transform_add[log2_trafo_size_c-2](dst, coeffs, stride);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1007,7 +1007,7 @@ static int hls_transform_unit(HEVCContext *s, int x0, int y0,
|
|||||||
for (i = 0; i < (size * size); i++) {
|
for (i = 0; i < (size * size); i++) {
|
||||||
coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
|
coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
|
||||||
}
|
}
|
||||||
s->hevcdsp.transform_add[log2_trafo_size-2](dst, coeffs, stride);
|
s->hevcdsp.transform_add[log2_trafo_size_c-2](dst, coeffs, stride);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (blk_idx == 3) {
|
} else if (blk_idx == 3) {
|
||||||
|
@ -865,6 +865,11 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
|||||||
sps->long_term_ref_pics_present_flag = get_bits1(gb);
|
sps->long_term_ref_pics_present_flag = get_bits1(gb);
|
||||||
if (sps->long_term_ref_pics_present_flag) {
|
if (sps->long_term_ref_pics_present_flag) {
|
||||||
sps->num_long_term_ref_pics_sps = get_ue_golomb_long(gb);
|
sps->num_long_term_ref_pics_sps = get_ue_golomb_long(gb);
|
||||||
|
if (sps->num_long_term_ref_pics_sps > 31U) {
|
||||||
|
av_log(0, AV_LOG_ERROR, "num_long_term_ref_pics_sps %d is out of range.\n",
|
||||||
|
sps->num_long_term_ref_pics_sps);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
for (i = 0; i < sps->num_long_term_ref_pics_sps; i++) {
|
for (i = 0; i < sps->num_long_term_ref_pics_sps; i++) {
|
||||||
sps->lt_ref_pic_poc_lsb_sps[i] = get_bits(gb, sps->log2_max_poc_lsb);
|
sps->lt_ref_pic_poc_lsb_sps[i] = get_bits(gb, sps->log2_max_poc_lsb);
|
||||||
sps->used_by_curr_pic_lt_sps_flag[i] = get_bits1(gb);
|
sps->used_by_curr_pic_lt_sps_flag[i] = get_bits1(gb);
|
||||||
@ -1164,6 +1169,14 @@ int ff_hevc_decode_nal_pps(HEVCContext *s)
|
|||||||
if (pps->cu_qp_delta_enabled_flag)
|
if (pps->cu_qp_delta_enabled_flag)
|
||||||
pps->diff_cu_qp_delta_depth = get_ue_golomb_long(gb);
|
pps->diff_cu_qp_delta_depth = get_ue_golomb_long(gb);
|
||||||
|
|
||||||
|
if (pps->diff_cu_qp_delta_depth < 0 ||
|
||||||
|
pps->diff_cu_qp_delta_depth > sps->log2_diff_max_min_coding_block_size) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "diff_cu_qp_delta_depth %d is invalid\n",
|
||||||
|
pps->diff_cu_qp_delta_depth);
|
||||||
|
ret = AVERROR_INVALIDDATA;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
pps->cb_qp_offset = get_se_golomb(gb);
|
pps->cb_qp_offset = get_se_golomb(gb);
|
||||||
if (pps->cb_qp_offset < -12 || pps->cb_qp_offset > 12) {
|
if (pps->cb_qp_offset < -12 || pps->cb_qp_offset > 12) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "pps_cb_qp_offset out of range: %d\n",
|
av_log(s->avctx, AV_LOG_ERROR, "pps_cb_qp_offset out of range: %d\n",
|
||||||
@ -1289,7 +1302,8 @@ int ff_hevc_decode_nal_pps(HEVCContext *s)
|
|||||||
if (sps->ptl.general_ptl.profile_idc == FF_PROFILE_HEVC_REXT && pps_range_extensions_flag) {
|
if (sps->ptl.general_ptl.profile_idc == FF_PROFILE_HEVC_REXT && pps_range_extensions_flag) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"PPS extension flag is partially implemented.\n");
|
"PPS extension flag is partially implemented.\n");
|
||||||
pps_range_extensions(s, pps, sps);
|
if ((ret = pps_range_extensions(s, pps, sps)) < 0)
|
||||||
|
goto err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -625,9 +625,9 @@ static void decode_422_bitstream(HYuvContext *s, int count)
|
|||||||
READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
|
READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
|
||||||
READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
|
READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
|
||||||
}
|
}
|
||||||
for (; i < count && get_bits_left(&s->gb) > 0; i++) {
|
for (; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
|
||||||
READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
|
READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
|
||||||
if (get_bits_left(&s->gb) <= 0) break;
|
if (BITS_LEFT(re, &s->gb) <= 0) break;
|
||||||
READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
|
READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
|
||||||
}
|
}
|
||||||
for (; i < count; i++)
|
for (; i < count; i++)
|
||||||
@ -666,7 +666,7 @@ static void decode_plane_bitstream(HYuvContext *s, int count, int plane)
|
|||||||
if (s->bps <= 8) {
|
if (s->bps <= 8) {
|
||||||
OPEN_READER(re, &s->gb);
|
OPEN_READER(re, &s->gb);
|
||||||
if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
|
if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
|
||||||
for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
|
for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
|
||||||
READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
|
READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -678,7 +678,7 @@ static void decode_plane_bitstream(HYuvContext *s, int count, int plane)
|
|||||||
} else if (s->bps <= 14) {
|
} else if (s->bps <= 14) {
|
||||||
OPEN_READER(re, &s->gb);
|
OPEN_READER(re, &s->gb);
|
||||||
if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
|
if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
|
||||||
for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
|
for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
|
||||||
READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
|
READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -707,7 +707,7 @@ static void decode_gray_bitstream(HYuvContext *s, int count)
|
|||||||
count/=2;
|
count/=2;
|
||||||
|
|
||||||
if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
|
if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
|
||||||
for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
|
for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
|
||||||
READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
|
READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -724,7 +724,7 @@ static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
|
|||||||
int i;
|
int i;
|
||||||
OPEN_READER(re, &s->gb);
|
OPEN_READER(re, &s->gb);
|
||||||
|
|
||||||
for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
|
for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
|
||||||
unsigned int index;
|
unsigned int index;
|
||||||
int code, n;
|
int code, n;
|
||||||
|
|
||||||
|
@ -677,11 +677,15 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf_end = buf + buf_size;
|
const uint8_t *buf_end = buf + buf_size;
|
||||||
int y, plane, res;
|
int y, plane, res;
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
|
const AVPixFmtDescriptor *desc;
|
||||||
|
|
||||||
if ((res = extract_header(avctx, avpkt)) < 0)
|
if ((res = extract_header(avctx, avpkt)) < 0)
|
||||||
return res;
|
return res;
|
||||||
if ((res = ff_reget_buffer(avctx, s->frame)) < 0)
|
if ((res = ff_reget_buffer(avctx, s->frame)) < 0)
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
|
desc = av_pix_fmt_desc_get(avctx->pix_fmt);
|
||||||
|
|
||||||
if (!s->init && avctx->bits_per_coded_sample <= 8 &&
|
if (!s->init && avctx->bits_per_coded_sample <= 8 &&
|
||||||
avctx->pix_fmt == AV_PIX_FMT_PAL8) {
|
avctx->pix_fmt == AV_PIX_FMT_PAL8) {
|
||||||
if ((res = cmap_read_palette(avctx, (uint32_t *)s->frame->data[1])) < 0)
|
if ((res = cmap_read_palette(avctx, (uint32_t *)s->frame->data[1])) < 0)
|
||||||
@ -721,7 +725,6 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
} else
|
} else
|
||||||
return unsupported(avctx);
|
return unsupported(avctx);
|
||||||
} else if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) {
|
} else if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) {
|
||||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
|
|
||||||
int raw_width = avctx->width * (av_get_bits_per_pixel(desc) >> 3);
|
int raw_width = avctx->width * (av_get_bits_per_pixel(desc) >> 3);
|
||||||
int x;
|
int x;
|
||||||
for (y = 0; y < avctx->height && buf < buf_end; y++) {
|
for (y = 0; y < avctx->height && buf < buf_end; y++) {
|
||||||
@ -838,7 +841,6 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
} else
|
} else
|
||||||
return unsupported(avctx);
|
return unsupported(avctx);
|
||||||
} else if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) { // IFF-DEEP
|
} else if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) { // IFF-DEEP
|
||||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
|
|
||||||
if (av_get_bits_per_pixel(desc) == 32)
|
if (av_get_bits_per_pixel(desc) == 32)
|
||||||
decode_deep_rle32(s->frame->data[0], buf, buf_size, avctx->width, avctx->height, s->frame->linesize[0]);
|
decode_deep_rle32(s->frame->data[0], buf, buf_size, avctx->width, avctx->height, s->frame->linesize[0]);
|
||||||
else
|
else
|
||||||
@ -847,16 +849,15 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
bytestream2_init(&gb, buf, buf_size);
|
bytestream2_init(&gb, buf, buf_size);
|
||||||
if (avctx->codec_tag == MKTAG('R', 'G', 'B', '8'))
|
if (avctx->codec_tag == MKTAG('R', 'G', 'B', '8') && avctx->pix_fmt == AV_PIX_FMT_RGB32)
|
||||||
decode_rgb8(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
|
decode_rgb8(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
|
||||||
else if (avctx->codec_tag == MKTAG('R', 'G', 'B', 'N'))
|
else if (avctx->codec_tag == MKTAG('R', 'G', 'B', 'N') && avctx->pix_fmt == AV_PIX_FMT_RGB444)
|
||||||
decode_rgbn(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
|
decode_rgbn(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
|
||||||
else
|
else
|
||||||
return unsupported(avctx);
|
return unsupported(avctx);
|
||||||
break;
|
break;
|
||||||
case 5:
|
case 5:
|
||||||
if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) {
|
if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) {
|
||||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
|
|
||||||
if (av_get_bits_per_pixel(desc) == 32)
|
if (av_get_bits_per_pixel(desc) == 32)
|
||||||
decode_deep_tvdc32(s->frame->data[0], buf, buf_size, avctx->width, avctx->height, s->frame->linesize[0], s->tvdc);
|
decode_deep_tvdc32(s->frame->data[0], buf, buf_size, avctx->width, avctx->height, s->frame->linesize[0], s->tvdc);
|
||||||
else
|
else
|
||||||
|
@ -94,7 +94,7 @@ typedef struct Indeo3DecodeContext {
|
|||||||
|
|
||||||
int16_t width, height;
|
int16_t width, height;
|
||||||
uint32_t frame_num; ///< current frame number (zero-based)
|
uint32_t frame_num; ///< current frame number (zero-based)
|
||||||
uint32_t data_size; ///< size of the frame data in bytes
|
int data_size; ///< size of the frame data in bytes
|
||||||
uint16_t frame_flags; ///< frame properties
|
uint16_t frame_flags; ///< frame properties
|
||||||
uint8_t cb_offset; ///< needed for selecting VQ tables
|
uint8_t cb_offset; ///< needed for selecting VQ tables
|
||||||
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
||||||
@ -899,7 +899,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
|||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
const uint8_t *bs_hdr;
|
const uint8_t *bs_hdr;
|
||||||
uint32_t frame_num, word2, check_sum, data_size;
|
uint32_t frame_num, word2, check_sum, data_size;
|
||||||
uint32_t y_offset, u_offset, v_offset, starts[3], ends[3];
|
int y_offset, u_offset, v_offset;
|
||||||
|
uint32_t starts[3], ends[3];
|
||||||
uint16_t height, width;
|
uint16_t height, width;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
@ -981,7 +982,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
|||||||
ctx->y_data_size = ends[0] - starts[0];
|
ctx->y_data_size = ends[0] - starts[0];
|
||||||
ctx->v_data_size = ends[1] - starts[1];
|
ctx->v_data_size = ends[1] - starts[1];
|
||||||
ctx->u_data_size = ends[2] - starts[2];
|
ctx->u_data_size = ends[2] - starts[2];
|
||||||
if (FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
if (FFMIN3(y_offset, v_offset, u_offset) < 0 ||
|
||||||
|
FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||||
FFMIN3(y_offset, v_offset, u_offset) < gb.buffer - bs_hdr + 16 ||
|
FFMIN3(y_offset, v_offset, u_offset) < gb.buffer - bs_hdr + 16 ||
|
||||||
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
||||||
|
@ -269,6 +269,11 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s,
|
|||||||
x += stride;
|
x += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (x >= w) {
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "run overflow\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* decode run termination value */
|
/* decode run termination value */
|
||||||
Rb = R(last, x);
|
Rb = R(last, x);
|
||||||
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
||||||
|
@ -43,6 +43,13 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
{
|
{
|
||||||
JvContext *s = avctx->priv_data;
|
JvContext *s = avctx->priv_data;
|
||||||
|
|
||||||
|
if (!avctx->width || !avctx->height ||
|
||||||
|
(avctx->width & 7) || (avctx->height & 7)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n",
|
||||||
|
avctx->width, avctx->height);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
s->frame = av_frame_alloc();
|
s->frame = av_frame_alloc();
|
||||||
if (!s->frame)
|
if (!s->frame)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
@ -96,8 +96,7 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) frame->data[0],
|
WebRtcIlbcfix_DecodeImpl((int16_t *) frame->data[0], (const uint16_t *) buf, &s->decoder, 1);
|
||||||
(const WebRtc_UWord16*) buf, &s->decoder, 1);
|
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
|
|
||||||
@ -170,7 +169,7 @@ static int ilbc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
WebRtcIlbcfix_EncodeImpl((WebRtc_UWord16*) avpkt->data, (const WebRtc_Word16*) frame->data[0], &s->encoder);
|
WebRtcIlbcfix_EncodeImpl((uint16_t *) avpkt->data, (const int16_t *) frame->data[0], &s->encoder);
|
||||||
|
|
||||||
avpkt->size = s->encoder.no_of_bytes;
|
avpkt->size = s->encoder.no_of_bytes;
|
||||||
*got_packet_ptr = 1;
|
*got_packet_ptr = 1;
|
||||||
|
@ -236,10 +236,10 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
|
|||||||
case AV_STEREO3D_CHECKERBOARD:
|
case AV_STEREO3D_CHECKERBOARD:
|
||||||
fpa_type = 0;
|
fpa_type = 0;
|
||||||
break;
|
break;
|
||||||
case AV_STEREO3D_LINES:
|
case AV_STEREO3D_COLUMNS:
|
||||||
fpa_type = 1;
|
fpa_type = 1;
|
||||||
break;
|
break;
|
||||||
case AV_STEREO3D_COLUMNS:
|
case AV_STEREO3D_LINES:
|
||||||
fpa_type = 2;
|
fpa_type = 2;
|
||||||
break;
|
break;
|
||||||
case AV_STEREO3D_SIDEBYSIDE:
|
case AV_STEREO3D_SIDEBYSIDE:
|
||||||
|
@ -244,7 +244,8 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||||
{
|
{
|
||||||
int len, nb_components, i, width, height, pix_fmt_id, ret;
|
int len, nb_components, i, width, height, bits, ret;
|
||||||
|
unsigned pix_fmt_id;
|
||||||
int h_count[MAX_COMPONENTS];
|
int h_count[MAX_COMPONENTS];
|
||||||
int v_count[MAX_COMPONENTS];
|
int v_count[MAX_COMPONENTS];
|
||||||
|
|
||||||
@ -254,11 +255,11 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
/* XXX: verify len field validity */
|
/* XXX: verify len field validity */
|
||||||
len = get_bits(&s->gb, 16);
|
len = get_bits(&s->gb, 16);
|
||||||
s->avctx->bits_per_raw_sample =
|
s->avctx->bits_per_raw_sample =
|
||||||
s->bits = get_bits(&s->gb, 8);
|
bits = get_bits(&s->gb, 8);
|
||||||
|
|
||||||
if (s->pegasus_rct)
|
if (s->pegasus_rct)
|
||||||
s->bits = 9;
|
bits = 9;
|
||||||
if (s->bits == 9 && !s->pegasus_rct)
|
if (bits == 9 && !s->pegasus_rct)
|
||||||
s->rct = 1; // FIXME ugly
|
s->rct = 1; // FIXME ugly
|
||||||
|
|
||||||
if(s->lossless && s->avctx->lowres){
|
if(s->lossless && s->avctx->lowres){
|
||||||
@ -291,7 +292,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s->ls && !(s->bits <= 8 || nb_components == 1)) {
|
if (s->ls && !(bits <= 8 || nb_components == 1)) {
|
||||||
avpriv_report_missing_feature(s->avctx,
|
avpriv_report_missing_feature(s->avctx,
|
||||||
"JPEG-LS that is not <= 8 "
|
"JPEG-LS that is not <= 8 "
|
||||||
"bits/component or 16-bit gray");
|
"bits/component or 16-bit gray");
|
||||||
@ -337,11 +338,13 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
/* if different size, realloc/alloc picture */
|
/* if different size, realloc/alloc picture */
|
||||||
if ( width != s->width || height != s->height
|
if ( width != s->width || height != s->height
|
||||||
|
|| bits != s->bits
|
||||||
|| memcmp(s->h_count, h_count, sizeof(h_count))
|
|| memcmp(s->h_count, h_count, sizeof(h_count))
|
||||||
|| memcmp(s->v_count, v_count, sizeof(v_count))) {
|
|| memcmp(s->v_count, v_count, sizeof(v_count))) {
|
||||||
|
|
||||||
s->width = width;
|
s->width = width;
|
||||||
s->height = height;
|
s->height = height;
|
||||||
|
s->bits = bits;
|
||||||
memcpy(s->h_count, h_count, sizeof(h_count));
|
memcpy(s->h_count, h_count, sizeof(h_count));
|
||||||
memcpy(s->v_count, v_count, sizeof(v_count));
|
memcpy(s->v_count, v_count, sizeof(v_count));
|
||||||
s->interlaced = 0;
|
s->interlaced = 0;
|
||||||
@ -376,7 +379,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
else if (!s->lossless)
|
else if (!s->lossless)
|
||||||
s->rgb = 0;
|
s->rgb = 0;
|
||||||
/* XXX: not complete test ! */
|
/* XXX: not complete test ! */
|
||||||
pix_fmt_id = (s->h_count[0] << 28) | (s->v_count[0] << 24) |
|
pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
|
||||||
(s->h_count[1] << 20) | (s->v_count[1] << 16) |
|
(s->h_count[1] << 20) | (s->v_count[1] << 16) |
|
||||||
(s->h_count[2] << 12) | (s->v_count[2] << 8) |
|
(s->h_count[2] << 12) | (s->v_count[2] << 8) |
|
||||||
(s->h_count[3] << 4) | s->v_count[3];
|
(s->h_count[3] << 4) | s->v_count[3];
|
||||||
@ -511,6 +514,8 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
|
else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
|
||||||
s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
|
s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
|
||||||
if (pix_fmt_id == 0x42111100) {
|
if (pix_fmt_id == 0x42111100) {
|
||||||
|
if (s->bits > 8)
|
||||||
|
goto unk_pixfmt;
|
||||||
s->upscale_h = 6;
|
s->upscale_h = 6;
|
||||||
s->chroma_height = s->height / 2;
|
s->chroma_height = s->height / 2;
|
||||||
}
|
}
|
||||||
@ -1591,6 +1596,8 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (id == AV_RB32("LJIF")) {
|
if (id == AV_RB32("LJIF")) {
|
||||||
|
int rgb = s->rgb;
|
||||||
|
int pegasus_rct = s->pegasus_rct;
|
||||||
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||||
av_log(s->avctx, AV_LOG_INFO,
|
av_log(s->avctx, AV_LOG_INFO,
|
||||||
"Pegasus lossless jpeg header found\n");
|
"Pegasus lossless jpeg header found\n");
|
||||||
@ -1600,17 +1607,27 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
|||||||
skip_bits(&s->gb, 16); /* unknown always 0? */
|
skip_bits(&s->gb, 16); /* unknown always 0? */
|
||||||
switch (i=get_bits(&s->gb, 8)) {
|
switch (i=get_bits(&s->gb, 8)) {
|
||||||
case 1:
|
case 1:
|
||||||
s->rgb = 1;
|
rgb = 1;
|
||||||
s->pegasus_rct = 0;
|
pegasus_rct = 0;
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
s->rgb = 1;
|
rgb = 1;
|
||||||
s->pegasus_rct = 1;
|
pegasus_rct = 1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
|
av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
|
||||||
}
|
}
|
||||||
|
|
||||||
len -= 9;
|
len -= 9;
|
||||||
|
if (s->got_picture)
|
||||||
|
if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
|
||||||
|
av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
s->rgb = rgb;
|
||||||
|
s->pegasus_rct = pegasus_rct;
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (id == AV_RL32("colr") && len > 0) {
|
if (id == AV_RL32("colr") && len > 0) {
|
||||||
@ -1894,6 +1911,7 @@ int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
int start_code;
|
int start_code;
|
||||||
int i, index;
|
int i, index;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
int is16bit;
|
||||||
|
|
||||||
av_dict_free(&s->exif_metadata);
|
av_dict_free(&s->exif_metadata);
|
||||||
av_freep(&s->stereo3d);
|
av_freep(&s->stereo3d);
|
||||||
@ -2072,6 +2090,9 @@ fail:
|
|||||||
s->got_picture = 0;
|
s->got_picture = 0;
|
||||||
return ret;
|
return ret;
|
||||||
the_end:
|
the_end:
|
||||||
|
|
||||||
|
is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step_minus1;
|
||||||
|
|
||||||
if (s->upscale_h) {
|
if (s->upscale_h) {
|
||||||
int p;
|
int p;
|
||||||
av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
|
av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
|
||||||
@ -2081,6 +2102,7 @@ the_end:
|
|||||||
avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
|
avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
|
||||||
avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
|
avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
|
||||||
avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
|
avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
|
||||||
|
avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
|
||||||
avctx->pix_fmt == AV_PIX_FMT_GBRAP
|
avctx->pix_fmt == AV_PIX_FMT_GBRAP
|
||||||
);
|
);
|
||||||
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
|
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
|
||||||
@ -2091,9 +2113,16 @@ the_end:
|
|||||||
continue;
|
continue;
|
||||||
if (p==1 || p==2)
|
if (p==1 || p==2)
|
||||||
w >>= hshift;
|
w >>= hshift;
|
||||||
|
av_assert0(w > 0);
|
||||||
for (i = 0; i < s->chroma_height; i++) {
|
for (i = 0; i < s->chroma_height; i++) {
|
||||||
for (index = w - 1; index; index--)
|
if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
|
||||||
|
else line[w - 1] = line[(w - 1) / 2];
|
||||||
|
for (index = w - 2; index > 0; index--) {
|
||||||
|
if (is16bit)
|
||||||
|
((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
|
||||||
|
else
|
||||||
line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
|
line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
|
||||||
|
}
|
||||||
line += s->linesize[p];
|
line += s->linesize[p];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2114,11 +2143,11 @@ the_end:
|
|||||||
if (!(s->upscale_v & (1<<p)))
|
if (!(s->upscale_v & (1<<p)))
|
||||||
continue;
|
continue;
|
||||||
if (p==1 || p==2)
|
if (p==1 || p==2)
|
||||||
w >>= hshift;
|
w = FF_CEIL_RSHIFT(w, hshift);
|
||||||
for (i = s->height - 1; i; i--) {
|
for (i = s->height - 1; i; i--) {
|
||||||
uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i / 2 * s->linesize[p]];
|
uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i / 2 * s->linesize[p]];
|
||||||
uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) / 2 * s->linesize[p]];
|
uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) / 2 * s->linesize[p]];
|
||||||
if (src1 == src2) {
|
if (src1 == src2 || i == s->height - 1) {
|
||||||
memcpy(dst, src1, w);
|
memcpy(dst, src1, w);
|
||||||
} else {
|
} else {
|
||||||
for (index = 0; index < w; index++)
|
for (index = 0; index < w; index++)
|
||||||
|
@ -61,6 +61,13 @@ static av_cold int mm_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||||
|
|
||||||
|
if (!avctx->width || !avctx->height ||
|
||||||
|
(avctx->width & 1) || (avctx->height & 1)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n",
|
||||||
|
avctx->width, avctx->height);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
s->frame = av_frame_alloc();
|
s->frame = av_frame_alloc();
|
||||||
if (!s->frame)
|
if (!s->frame)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@ -111,7 +118,7 @@ static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
|
|||||||
|
|
||||||
if (color) {
|
if (color) {
|
||||||
memset(s->frame->data[0] + y*s->frame->linesize[0] + x, color, run_length);
|
memset(s->frame->data[0] + y*s->frame->linesize[0] + x, color, run_length);
|
||||||
if (half_vert)
|
if (half_vert && y + half_vert < s->avctx->height)
|
||||||
memset(s->frame->data[0] + (y+1)*s->frame->linesize[0] + x, color, run_length);
|
memset(s->frame->data[0] + (y+1)*s->frame->linesize[0] + x, color, run_length);
|
||||||
}
|
}
|
||||||
x+= run_length;
|
x+= run_length;
|
||||||
|
@ -190,7 +190,13 @@ static av_always_inline int cmp_inline(MpegEncContext *s, const int x, const int
|
|||||||
int uvdxy; /* no, it might not be used uninitialized */
|
int uvdxy; /* no, it might not be used uninitialized */
|
||||||
if(dxy){
|
if(dxy){
|
||||||
if(qpel){
|
if(qpel){
|
||||||
|
if (h << size == 16) {
|
||||||
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
c->qpel_put[size][dxy](c->temp, ref[0] + x + y*stride, stride); //FIXME prototype (add h)
|
||||||
|
} else if (size == 0 && h == 8) {
|
||||||
|
c->qpel_put[1][dxy](c->temp , ref[0] + x + y*stride , stride);
|
||||||
|
c->qpel_put[1][dxy](c->temp + 8, ref[0] + x + y*stride + 8, stride);
|
||||||
|
} else
|
||||||
|
av_assert2(0);
|
||||||
if(chroma){
|
if(chroma){
|
||||||
int cx= hx/2;
|
int cx= hx/2;
|
||||||
int cy= hy/2;
|
int cy= hy/2;
|
||||||
|
@ -1884,6 +1884,14 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
|
|||||||
} else
|
} else
|
||||||
goto eos;
|
goto eos;
|
||||||
}
|
}
|
||||||
|
if (s->mb_y >= ((s->height + 15) >> 4) &&
|
||||||
|
s->progressive_frame &&
|
||||||
|
!s->progressive_sequence &&
|
||||||
|
get_bits_left(&s->gb) <= 8 &&
|
||||||
|
get_bits_left(&s->gb) >= 0 &&
|
||||||
|
s->mb_skip_run == -1 &&
|
||||||
|
show_bits(&s->gb, 8) == 0)
|
||||||
|
goto eos;
|
||||||
|
|
||||||
ff_init_block_index(s);
|
ff_init_block_index(s);
|
||||||
}
|
}
|
||||||
|
@ -73,20 +73,21 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
|
|||||||
if (i > 4)
|
if (i > 4)
|
||||||
s->header_count = -2;
|
s->header_count = -2;
|
||||||
} else {
|
} else {
|
||||||
|
int header_threshold = avctx->codec_id != AV_CODEC_ID_NONE && avctx->codec_id != codec_id;
|
||||||
if((state&SAME_HEADER_MASK) != (s->header&SAME_HEADER_MASK) && s->header)
|
if((state&SAME_HEADER_MASK) != (s->header&SAME_HEADER_MASK) && s->header)
|
||||||
s->header_count= -3;
|
s->header_count= -3;
|
||||||
s->header= state;
|
s->header= state;
|
||||||
s->header_count++;
|
s->header_count++;
|
||||||
s->frame_size = ret-4;
|
s->frame_size = ret-4;
|
||||||
|
|
||||||
if (s->header_count > 0 + (avctx->codec_id != AV_CODEC_ID_NONE && avctx->codec_id != codec_id)) {
|
if (s->header_count > header_threshold) {
|
||||||
avctx->sample_rate= sr;
|
avctx->sample_rate= sr;
|
||||||
avctx->channels = channels;
|
avctx->channels = channels;
|
||||||
s1->duration = frame_size;
|
s1->duration = frame_size;
|
||||||
avctx->codec_id = codec_id;
|
avctx->codec_id = codec_id;
|
||||||
if (s->no_bitrate || !avctx->bit_rate) {
|
if (s->no_bitrate || !avctx->bit_rate) {
|
||||||
s->no_bitrate = 1;
|
s->no_bitrate = 1;
|
||||||
avctx->bit_rate += (bit_rate - avctx->bit_rate) / s->header_count;
|
avctx->bit_rate += (bit_rate - avctx->bit_rate) / (s->header_count - header_threshold);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -1436,6 +1436,9 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
|||||||
{
|
{
|
||||||
int i, err = 0;
|
int i, err = 0;
|
||||||
|
|
||||||
|
if (!s->context_initialized)
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
if (s->slice_context_count > 1) {
|
if (s->slice_context_count > 1) {
|
||||||
for (i = 0; i < s->slice_context_count; i++) {
|
for (i = 0; i < s->slice_context_count; i++) {
|
||||||
free_duplicate_context(s->thread_context[i]);
|
free_duplicate_context(s->thread_context[i]);
|
||||||
@ -1465,8 +1468,8 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
|||||||
s->mb_height = (s->height + 15) / 16;
|
s->mb_height = (s->height + 15) / 16;
|
||||||
|
|
||||||
if ((s->width || s->height) &&
|
if ((s->width || s->height) &&
|
||||||
av_image_check_size(s->width, s->height, 0, s->avctx))
|
(err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
|
||||||
return AVERROR_INVALIDDATA;
|
goto fail;
|
||||||
|
|
||||||
if ((err = init_context_frame(s)))
|
if ((err = init_context_frame(s)))
|
||||||
goto fail;
|
goto fail;
|
||||||
@ -1482,7 +1485,7 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nb_slices; i++) {
|
for (i = 0; i < nb_slices; i++) {
|
||||||
if (init_duplicate_context(s->thread_context[i]) < 0)
|
if ((err = init_duplicate_context(s->thread_context[i])) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
s->thread_context[i]->start_mb_y =
|
s->thread_context[i]->start_mb_y =
|
||||||
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
||||||
|
@ -908,6 +908,11 @@ static av_cold int on2avc_decode_init(AVCodecContext *avctx)
|
|||||||
On2AVCContext *c = avctx->priv_data;
|
On2AVCContext *c = avctx->priv_data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (avctx->channels > 2U) {
|
||||||
|
avpriv_request_sample(avctx, "Decoding more than 2 channels");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
c->avctx = avctx;
|
c->avctx = avctx;
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
|
avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
|
||||||
avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO
|
avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO
|
||||||
|
@ -102,8 +102,8 @@ static const AVOption avcodec_options[] = {
|
|||||||
{"extradata_size", NULL, OFFSET(extradata_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
{"extradata_size", NULL, OFFSET(extradata_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||||
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
||||||
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
||||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|D|E},
|
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||||
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|D|E},
|
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||||
{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
||||||
{"frame_size", NULL, OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
{"frame_size", NULL, OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
||||||
{"frame_number", NULL, OFFSET(frame_number), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
{"frame_number", NULL, OFFSET(frame_number), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||||
|
@ -499,6 +499,12 @@ static int opus_decode_packet(AVCodecContext *avctx, void *data,
|
|||||||
av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
|
av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
if (coded_samples != s->packet.frame_count * s->packet.frame_duration) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"Mismatching coded sample count in substream %d.\n", i);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
s->silk_samplerate = get_silk_samplerate(s->packet.config);
|
s->silk_samplerate = get_silk_samplerate(s->packet.config);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -576,6 +576,12 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
case MKTAG('I', 'H', 'D', 'R'):
|
case MKTAG('I', 'H', 'D', 'R'):
|
||||||
if (length != 13)
|
if (length != 13)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
if (s->state & PNG_IDAT) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
s->width = bytestream2_get_be32(&s->gb);
|
s->width = bytestream2_get_be32(&s->gb);
|
||||||
s->height = bytestream2_get_be32(&s->gb);
|
s->height = bytestream2_get_be32(&s->gb);
|
||||||
if (av_image_check_size(s->width, s->height, 0, avctx)) {
|
if (av_image_check_size(s->width, s->height, 0, avctx)) {
|
||||||
@ -644,7 +650,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
} else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
|
} else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
|
||||||
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||||
} else if (s->bit_depth == 1) {
|
} else if (s->bit_depth == 1 && s->bits_per_pixel == 1) {
|
||||||
avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
|
avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
|
||||||
} else if (s->bit_depth == 8 &&
|
} else if (s->bit_depth == 8 &&
|
||||||
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
||||||
@ -851,10 +857,11 @@ exit_loop:
|
|||||||
int i, j;
|
int i, j;
|
||||||
uint8_t *pd = p->data[0];
|
uint8_t *pd = p->data[0];
|
||||||
uint8_t *pd_last = s->last_picture.f->data[0];
|
uint8_t *pd_last = s->last_picture.f->data[0];
|
||||||
|
int ls = FFMIN(av_image_get_linesize(p->format, s->width, 0), s->width * s->bpp);
|
||||||
|
|
||||||
ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
|
ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
|
||||||
for (j = 0; j < s->height; j++) {
|
for (j = 0; j < s->height; j++) {
|
||||||
for (i = 0; i < s->width * s->bpp; i++)
|
for (i = 0; i < ls; i++)
|
||||||
pd[i] += pd_last[i];
|
pd[i] += pd_last[i];
|
||||||
pd += s->image_linesize;
|
pd += s->image_linesize;
|
||||||
pd_last += s->image_linesize;
|
pd_last += s->image_linesize;
|
||||||
|
@ -471,7 +471,6 @@ static void put_alpha_run(PutBitContext *pb, int run)
|
|||||||
|
|
||||||
// todo alpha quantisation for high quants
|
// todo alpha quantisation for high quants
|
||||||
static int encode_alpha_plane(ProresContext *ctx, PutBitContext *pb,
|
static int encode_alpha_plane(ProresContext *ctx, PutBitContext *pb,
|
||||||
const uint16_t *src, int linesize,
|
|
||||||
int mbs_per_slice, uint16_t *blocks,
|
int mbs_per_slice, uint16_t *blocks,
|
||||||
int quant)
|
int quant)
|
||||||
{
|
{
|
||||||
@ -566,11 +565,16 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
|
|||||||
get_alpha_data(ctx, src, linesize, xp, yp,
|
get_alpha_data(ctx, src, linesize, xp, yp,
|
||||||
pwidth, avctx->height / ctx->pictures_per_frame,
|
pwidth, avctx->height / ctx->pictures_per_frame,
|
||||||
ctx->blocks[0], mbs_per_slice, ctx->alpha_bits);
|
ctx->blocks[0], mbs_per_slice, ctx->alpha_bits);
|
||||||
sizes[i] = encode_alpha_plane(ctx, pb, src, linesize,
|
sizes[i] = encode_alpha_plane(ctx, pb,
|
||||||
mbs_per_slice, ctx->blocks[0],
|
mbs_per_slice, ctx->blocks[0],
|
||||||
quant);
|
quant);
|
||||||
}
|
}
|
||||||
total_size += sizes[i];
|
total_size += sizes[i];
|
||||||
|
if (put_bits_left(pb) < 0) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Serious underevaluation of"
|
||||||
|
"required buffer size");
|
||||||
|
return AVERROR_BUFFER_TOO_SMALL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return total_size;
|
return total_size;
|
||||||
}
|
}
|
||||||
@ -941,9 +945,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
avctx->coded_frame->key_frame = 1;
|
avctx->coded_frame->key_frame = 1;
|
||||||
|
|
||||||
pkt_size = ctx->frame_size_upper_bound + FF_MIN_BUFFER_SIZE;
|
pkt_size = ctx->frame_size_upper_bound;
|
||||||
|
|
||||||
if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size + FF_MIN_BUFFER_SIZE)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
orig_buf = pkt->data;
|
orig_buf = pkt->data;
|
||||||
@ -1020,7 +1024,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
slice_hdr = buf;
|
slice_hdr = buf;
|
||||||
buf += slice_hdr_size - 1;
|
buf += slice_hdr_size - 1;
|
||||||
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8);
|
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8);
|
||||||
encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice);
|
ret = encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
bytestream_put_byte(&slice_hdr, q);
|
bytestream_put_byte(&slice_hdr, q);
|
||||||
slice_size = slice_hdr_size + sizes[ctx->num_planes - 1];
|
slice_size = slice_hdr_size + sizes[ctx->num_planes - 1];
|
||||||
@ -1202,8 +1208,6 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
|||||||
ctx->bits_per_mb = ls * 8;
|
ctx->bits_per_mb = ls * 8;
|
||||||
if (ctx->chroma_factor == CFACTOR_Y444)
|
if (ctx->chroma_factor == CFACTOR_Y444)
|
||||||
ctx->bits_per_mb += ls * 4;
|
ctx->bits_per_mb += ls * 4;
|
||||||
if (ctx->num_planes == 4)
|
|
||||||
ctx->bits_per_mb += ls * 4;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->frame_size_upper_bound = ctx->pictures_per_frame *
|
ctx->frame_size_upper_bound = ctx->pictures_per_frame *
|
||||||
@ -1212,6 +1216,14 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
|||||||
(mps * ctx->bits_per_mb) / 8)
|
(mps * ctx->bits_per_mb) / 8)
|
||||||
+ 200;
|
+ 200;
|
||||||
|
|
||||||
|
if (ctx->alpha_bits) {
|
||||||
|
// alpha plane is run-coded and might run over bit budget
|
||||||
|
ctx->frame_size_upper_bound += ctx->pictures_per_frame *
|
||||||
|
ctx->slices_per_picture *
|
||||||
|
/* num pixels per slice */ (ctx->mbs_per_slice * 256 *
|
||||||
|
/* bits per pixel */ (1 + ctx->alpha_bits + 1) + 7 >> 3);
|
||||||
|
}
|
||||||
|
|
||||||
avctx->codec_tag = ctx->profile_info->tag;
|
avctx->codec_tag = ctx->profile_info->tag;
|
||||||
|
|
||||||
av_log(avctx, AV_LOG_DEBUG,
|
av_log(avctx, AV_LOG_DEBUG,
|
||||||
|
@ -163,7 +163,7 @@ static void av_noinline qpeg_decode_inter(QpegContext *qctx, uint8_t *dst,
|
|||||||
|
|
||||||
/* check motion vector */
|
/* check motion vector */
|
||||||
if ((me_x + filled < 0) || (me_x + me_w + filled > width) ||
|
if ((me_x + filled < 0) || (me_x + me_w + filled > width) ||
|
||||||
(height - me_y - me_h < 0) || (height - me_y > orig_height) ||
|
(height - me_y - me_h < 0) || (height - me_y >= orig_height) ||
|
||||||
(filled + me_w > width) || (height - me_h < 0))
|
(filled + me_w > width) || (height - me_h < 0))
|
||||||
av_log(NULL, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n",
|
av_log(NULL, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n",
|
||||||
me_x, me_y, me_w, me_h, filled, height);
|
me_x, me_y, me_w, me_h, filled, height);
|
||||||
|
@ -117,6 +117,9 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
|
|||||||
context->frame_size = avpicture_get_size(avctx->pix_fmt, avctx->width,
|
context->frame_size = avpicture_get_size(avctx->pix_fmt, avctx->width,
|
||||||
avctx->height);
|
avctx->height);
|
||||||
}
|
}
|
||||||
|
if (context->frame_size < 0)
|
||||||
|
return context->frame_size;
|
||||||
|
|
||||||
|
|
||||||
if ((avctx->extradata_size >= 9 &&
|
if ((avctx->extradata_size >= 9 &&
|
||||||
!memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
|
!memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
|
||||||
|
@ -98,7 +98,7 @@ static int expand_rle_row16(SgiState *s, uint16_t *out_buf,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
/* Check for buffer overflow. */
|
/* Check for buffer overflow. */
|
||||||
if (pixelstride * (count - 1) >= len) {
|
if (out_end - out_buf <= pixelstride * (count - 1)) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid pixel count.\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid pixel count.\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@ -145,7 +145,7 @@ static int read_rle_sgi(uint8_t *out_buf, SgiState *s)
|
|||||||
for (z = 0; z < s->depth; z++) {
|
for (z = 0; z < s->depth; z++) {
|
||||||
dest_row = out_buf;
|
dest_row = out_buf;
|
||||||
for (y = 0; y < s->height; y++) {
|
for (y = 0; y < s->height; y++) {
|
||||||
linesize = s->width * s->depth * s->bytes_per_channel;
|
linesize = s->width * s->depth;
|
||||||
dest_row -= s->linesize;
|
dest_row -= s->linesize;
|
||||||
start_offset = bytestream2_get_be32(&g_table);
|
start_offset = bytestream2_get_be32(&g_table);
|
||||||
bytestream2_seek(&s->g, start_offset, SEEK_SET);
|
bytestream2_seek(&s->g, start_offset, SEEK_SET);
|
||||||
|
@ -70,7 +70,7 @@ typedef struct SmcContext {
|
|||||||
row_ptr += stride * 4; \
|
row_ptr += stride * 4; \
|
||||||
} \
|
} \
|
||||||
total_blocks--; \
|
total_blocks--; \
|
||||||
if (total_blocks < 0) \
|
if (total_blocks < 0 + !!n_blocks) \
|
||||||
{ \
|
{ \
|
||||||
av_log(s->avctx, AV_LOG_INFO, "warning: block counter just went negative (this should not happen)\n"); \
|
av_log(s->avctx, AV_LOG_INFO, "warning: block counter just went negative (this should not happen)\n"); \
|
||||||
return; \
|
return; \
|
||||||
|
@ -713,7 +713,7 @@ av_cold void ff_snow_common_end(SnowContext *s)
|
|||||||
for(i=0; i<MAX_REF_FRAMES; i++){
|
for(i=0; i<MAX_REF_FRAMES; i++){
|
||||||
av_freep(&s->ref_mvs[i]);
|
av_freep(&s->ref_mvs[i]);
|
||||||
av_freep(&s->ref_scores[i]);
|
av_freep(&s->ref_scores[i]);
|
||||||
if(s->last_picture[i]->data[0]) {
|
if(s->last_picture[i] && s->last_picture[i]->data[0]) {
|
||||||
av_assert0(s->last_picture[i]->data[0] != s->current_picture->data[0]);
|
av_assert0(s->last_picture[i]->data[0] != s->current_picture->data[0]);
|
||||||
}
|
}
|
||||||
av_frame_free(&s->last_picture[i]);
|
av_frame_free(&s->last_picture[i]);
|
||||||
|
@ -659,7 +659,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
|||||||
if(v){
|
if(v){
|
||||||
v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
|
v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
|
||||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
|
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
|
||||||
|
if ((uint16_t)v != v) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||||
|
v = 1;
|
||||||
|
}
|
||||||
xc->x=x;
|
xc->x=x;
|
||||||
(xc++)->coeff= v;
|
(xc++)->coeff= v;
|
||||||
}
|
}
|
||||||
@ -669,6 +672,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
|||||||
else run= INT_MAX;
|
else run= INT_MAX;
|
||||||
v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
|
v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
|
||||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
|
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
|
||||||
|
if ((uint16_t)v != v) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||||
|
v = 1;
|
||||||
|
}
|
||||||
|
|
||||||
xc->x=x;
|
xc->x=x;
|
||||||
(xc++)->coeff= v;
|
(xc++)->coeff= v;
|
||||||
|
@ -495,7 +495,7 @@ static int svq1_decode_delta_block(AVCodecContext *avctx, HpelDSPContext *hdsp,
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
|
static void svq1_parse_string(GetBitContext *bitbuf, uint8_t out[257])
|
||||||
{
|
{
|
||||||
uint8_t seed;
|
uint8_t seed;
|
||||||
int i;
|
int i;
|
||||||
@ -507,6 +507,7 @@ static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
|
|||||||
out[i] = get_bits(bitbuf, 8) ^ seed;
|
out[i] = get_bits(bitbuf, 8) ^ seed;
|
||||||
seed = string_table[out[i] ^ seed];
|
seed = string_table[out[i] ^ seed];
|
||||||
}
|
}
|
||||||
|
out[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
|
static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
|
||||||
@ -549,12 +550,12 @@ static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((s->frame_code ^ 0x10) >= 0x50) {
|
if ((s->frame_code ^ 0x10) >= 0x50) {
|
||||||
uint8_t msg[256];
|
uint8_t msg[257];
|
||||||
|
|
||||||
svq1_parse_string(bitbuf, msg);
|
svq1_parse_string(bitbuf, msg);
|
||||||
|
|
||||||
av_log(avctx, AV_LOG_INFO,
|
av_log(avctx, AV_LOG_INFO,
|
||||||
"embedded message:\n%s\n", (char *)msg);
|
"embedded message:\n%s\n", ((char *)msg) + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
skip_bits(bitbuf, 2);
|
skip_bits(bitbuf, 2);
|
||||||
|
@ -1176,7 +1176,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
h->cur_pic_ptr = s->cur_pic;
|
h->cur_pic_ptr = s->cur_pic;
|
||||||
av_frame_unref(&h->cur_pic.f);
|
av_frame_unref(&h->cur_pic.f);
|
||||||
h->cur_pic = *s->cur_pic;
|
memcpy(&h->cur_pic.tf, &s->cur_pic->tf, sizeof(h->cur_pic) - offsetof(H264Picture, tf));
|
||||||
ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
|
ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -712,13 +712,13 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
|||||||
s->height = value;
|
s->height = value;
|
||||||
break;
|
break;
|
||||||
case TIFF_BPP:
|
case TIFF_BPP:
|
||||||
s->bppcount = count;
|
if (count > 4U) {
|
||||||
if (count > 4) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"This format is not supported (bpp=%d, %d components)\n",
|
"This format is not supported (bpp=%d, %d components)\n",
|
||||||
s->bpp, count);
|
value, count);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
s->bppcount = count;
|
||||||
if (count == 1)
|
if (count == 1)
|
||||||
s->bpp = value;
|
s->bpp = value;
|
||||||
else {
|
else {
|
||||||
@ -736,6 +736,13 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
|||||||
s->bpp = -1;
|
s->bpp = -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (s->bpp > 64U) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"This format is not supported (bpp=%d, %d components)\n",
|
||||||
|
s->bpp, count);
|
||||||
|
s->bpp = 0;
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case TIFF_SAMPLES_PER_PIXEL:
|
case TIFF_SAMPLES_PER_PIXEL:
|
||||||
if (count != 1) {
|
if (count != 1) {
|
||||||
|
@ -305,7 +305,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
|
|
||||||
strips = (s->height - 1) / s->rps + 1;
|
strips = (s->height - 1) / s->rps + 1;
|
||||||
|
|
||||||
packet_size = avctx->height * ((avctx->width * s->bpp + 7) >> 3) * 2 +
|
bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
|
||||||
|
s->subsampling[0] * s->subsampling[1] + 7) >> 3;
|
||||||
|
packet_size = avctx->height * bytes_per_row * 2 +
|
||||||
avctx->height * 4 + FF_MIN_BUFFER_SIZE;
|
avctx->height * 4 + FF_MIN_BUFFER_SIZE;
|
||||||
|
|
||||||
if ((ret = ff_alloc_packet2(avctx, pkt, packet_size)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, pkt, packet_size)) < 0)
|
||||||
@ -333,8 +335,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
|
|
||||||
s->subsampling[0] * s->subsampling[1] + 7) >> 3;
|
|
||||||
if (is_yuv) {
|
if (is_yuv) {
|
||||||
av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
|
av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
|
||||||
if (s->yuv_line == NULL) {
|
if (s->yuv_line == NULL) {
|
||||||
|
@ -66,6 +66,9 @@
|
|||||||
#include "compat/os2threads.h"
|
#include "compat/os2threads.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#include "libavutil/ffversion.h"
|
||||||
|
const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
|
||||||
|
|
||||||
#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS
|
#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS
|
||||||
static int default_lockmgr_cb(void **arg, enum AVLockOp op)
|
static int default_lockmgr_cb(void **arg, enum AVLockOp op)
|
||||||
{
|
{
|
||||||
@ -297,6 +300,12 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
int i;
|
int i;
|
||||||
int w_align = 1;
|
int w_align = 1;
|
||||||
int h_align = 1;
|
int h_align = 1;
|
||||||
|
AVPixFmtDescriptor const *desc = av_pix_fmt_desc_get(s->pix_fmt);
|
||||||
|
|
||||||
|
if (desc) {
|
||||||
|
w_align = 1 << desc->log2_chroma_w;
|
||||||
|
h_align = 1 << desc->log2_chroma_h;
|
||||||
|
}
|
||||||
|
|
||||||
switch (s->pix_fmt) {
|
switch (s->pix_fmt) {
|
||||||
case AV_PIX_FMT_YUV420P:
|
case AV_PIX_FMT_YUV420P:
|
||||||
@ -374,6 +383,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
case AV_PIX_FMT_GBRP12BE:
|
case AV_PIX_FMT_GBRP12BE:
|
||||||
case AV_PIX_FMT_GBRP14LE:
|
case AV_PIX_FMT_GBRP14LE:
|
||||||
case AV_PIX_FMT_GBRP14BE:
|
case AV_PIX_FMT_GBRP14BE:
|
||||||
|
case AV_PIX_FMT_GBRP16LE:
|
||||||
|
case AV_PIX_FMT_GBRP16BE:
|
||||||
w_align = 16; //FIXME assume 16 pixel per macroblock
|
w_align = 16; //FIXME assume 16 pixel per macroblock
|
||||||
h_align = 16 * 2; // interlaced needs 2 macroblocks height
|
h_align = 16 * 2; // interlaced needs 2 macroblocks height
|
||||||
break;
|
break;
|
||||||
@ -403,6 +414,10 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
w_align = 4;
|
w_align = 4;
|
||||||
h_align = 4;
|
h_align = 4;
|
||||||
}
|
}
|
||||||
|
if (s->codec_id == AV_CODEC_ID_JV) {
|
||||||
|
w_align = 8;
|
||||||
|
h_align = 8;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_BGR24:
|
case AV_PIX_FMT_BGR24:
|
||||||
if ((s->codec_id == AV_CODEC_ID_MSZH) ||
|
if ((s->codec_id == AV_CODEC_ID_MSZH) ||
|
||||||
@ -418,8 +433,6 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
w_align = 1;
|
|
||||||
h_align = 1;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3591,6 +3604,11 @@ int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf)
|
|||||||
ret = av_bprint_finalize(buf, &str);
|
ret = av_bprint_finalize(buf, &str);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
if (!av_bprint_is_complete(buf)) {
|
||||||
|
av_free(str);
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
avctx->extradata = str;
|
avctx->extradata = str;
|
||||||
/* Note: the string is NUL terminated (so extradata can be read as a
|
/* Note: the string is NUL terminated (so extradata can be read as a
|
||||||
* string), but the ending character is not accounted in the size (in
|
* string), but the ending character is not accounted in the size (in
|
||||||
|
@ -214,6 +214,8 @@ static void restore_median(uint8_t *src, int step, int stride,
|
|||||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||||
slice_start;
|
slice_start;
|
||||||
|
|
||||||
|
if (!slice_height)
|
||||||
|
continue;
|
||||||
bsrc = src + slice_start * stride;
|
bsrc = src + slice_start * stride;
|
||||||
|
|
||||||
// first line - left neighbour prediction
|
// first line - left neighbour prediction
|
||||||
@ -224,7 +226,7 @@ static void restore_median(uint8_t *src, int step, int stride,
|
|||||||
A = bsrc[i];
|
A = bsrc[i];
|
||||||
}
|
}
|
||||||
bsrc += stride;
|
bsrc += stride;
|
||||||
if (slice_height == 1)
|
if (slice_height <= 1)
|
||||||
continue;
|
continue;
|
||||||
// second line - first element has top prediction, the rest uses median
|
// second line - first element has top prediction, the rest uses median
|
||||||
C = bsrc[-stride];
|
C = bsrc[-stride];
|
||||||
@ -269,6 +271,8 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
|||||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||||
slice_start;
|
slice_start;
|
||||||
slice_height >>= 1;
|
slice_height >>= 1;
|
||||||
|
if (!slice_height)
|
||||||
|
continue;
|
||||||
|
|
||||||
bsrc = src + slice_start * stride;
|
bsrc = src + slice_start * stride;
|
||||||
|
|
||||||
@ -284,7 +288,7 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
|||||||
A = bsrc[stride + i];
|
A = bsrc[stride + i];
|
||||||
}
|
}
|
||||||
bsrc += stride2;
|
bsrc += stride2;
|
||||||
if (slice_height == 1)
|
if (slice_height <= 1)
|
||||||
continue;
|
continue;
|
||||||
// second line - first element has top prediction, the rest uses median
|
// second line - first element has top prediction, the rest uses median
|
||||||
C = bsrc[-stride2];
|
C = bsrc[-stride2];
|
||||||
|
@ -389,7 +389,7 @@ static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
||||||
uint8_t *dst, int stride,
|
uint8_t *dst, int stride, int plane_no,
|
||||||
int width, int height, PutByteContext *pb)
|
int width, int height, PutByteContext *pb)
|
||||||
{
|
{
|
||||||
UtvideoContext *c = avctx->priv_data;
|
UtvideoContext *c = avctx->priv_data;
|
||||||
@ -399,6 +399,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
|||||||
HuffEntry he[256];
|
HuffEntry he[256];
|
||||||
|
|
||||||
uint32_t offset = 0, slice_len = 0;
|
uint32_t offset = 0, slice_len = 0;
|
||||||
|
const int cmask = ~(!plane_no && avctx->pix_fmt == AV_PIX_FMT_YUV420P);
|
||||||
int i, sstart, send = 0;
|
int i, sstart, send = 0;
|
||||||
int symbol;
|
int symbol;
|
||||||
int ret;
|
int ret;
|
||||||
@ -408,7 +409,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
|||||||
case PRED_NONE:
|
case PRED_NONE:
|
||||||
for (i = 0; i < c->slices; i++) {
|
for (i = 0; i < c->slices; i++) {
|
||||||
sstart = send;
|
sstart = send;
|
||||||
send = height * (i + 1) / c->slices;
|
send = height * (i + 1) / c->slices & cmask;
|
||||||
av_image_copy_plane(dst + sstart * width, width,
|
av_image_copy_plane(dst + sstart * width, width,
|
||||||
src + sstart * stride, stride,
|
src + sstart * stride, stride,
|
||||||
width, send - sstart);
|
width, send - sstart);
|
||||||
@ -417,7 +418,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
|||||||
case PRED_LEFT:
|
case PRED_LEFT:
|
||||||
for (i = 0; i < c->slices; i++) {
|
for (i = 0; i < c->slices; i++) {
|
||||||
sstart = send;
|
sstart = send;
|
||||||
send = height * (i + 1) / c->slices;
|
send = height * (i + 1) / c->slices & cmask;
|
||||||
left_predict(src + sstart * stride, dst + sstart * width,
|
left_predict(src + sstart * stride, dst + sstart * width,
|
||||||
stride, width, send - sstart);
|
stride, width, send - sstart);
|
||||||
}
|
}
|
||||||
@ -425,7 +426,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
|||||||
case PRED_MEDIAN:
|
case PRED_MEDIAN:
|
||||||
for (i = 0; i < c->slices; i++) {
|
for (i = 0; i < c->slices; i++) {
|
||||||
sstart = send;
|
sstart = send;
|
||||||
send = height * (i + 1) / c->slices;
|
send = height * (i + 1) / c->slices & cmask;
|
||||||
median_predict(c, src + sstart * stride, dst + sstart * width,
|
median_predict(c, src + sstart * stride, dst + sstart * width,
|
||||||
stride, width, send - sstart);
|
stride, width, send - sstart);
|
||||||
}
|
}
|
||||||
@ -489,7 +490,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
|||||||
send = 0;
|
send = 0;
|
||||||
for (i = 0; i < c->slices; i++) {
|
for (i = 0; i < c->slices; i++) {
|
||||||
sstart = send;
|
sstart = send;
|
||||||
send = height * (i + 1) / c->slices;
|
send = height * (i + 1) / c->slices & cmask;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write the huffman codes to a buffer,
|
* Write the huffman codes to a buffer,
|
||||||
@ -571,7 +572,7 @@ static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
case AV_PIX_FMT_RGBA:
|
case AV_PIX_FMT_RGBA:
|
||||||
for (i = 0; i < c->planes; i++) {
|
for (i = 0; i < c->planes; i++) {
|
||||||
ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
|
ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
|
||||||
c->slice_buffer[i], c->slice_stride,
|
c->slice_buffer[i], c->slice_stride, i,
|
||||||
width, height, &pb);
|
width, height, &pb);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -583,7 +584,7 @@ static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
case AV_PIX_FMT_YUV422P:
|
case AV_PIX_FMT_YUV422P:
|
||||||
for (i = 0; i < c->planes; i++) {
|
for (i = 0; i < c->planes; i++) {
|
||||||
ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
|
ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
|
||||||
pic->linesize[i], width >> !!i, height, &pb);
|
pic->linesize[i], i, width >> !!i, height, &pb);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
|
av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
|
||||||
@ -594,7 +595,7 @@ static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
case AV_PIX_FMT_YUV420P:
|
case AV_PIX_FMT_YUV420P:
|
||||||
for (i = 0; i < c->planes; i++) {
|
for (i = 0; i < c->planes; i++) {
|
||||||
ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
|
ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
|
||||||
pic->linesize[i], width >> !!i, height >> !!i,
|
pic->linesize[i], i, width >> !!i, height >> !!i,
|
||||||
&pb);
|
&pb);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -5484,7 +5484,7 @@ static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!s->current_picture.f->data[0]) {
|
if (!s->current_picture.f || !s->current_picture.f->data[0]) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
|
av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -5514,7 +5514,7 @@ static void vc1_sprite_flush(AVCodecContext *avctx)
|
|||||||
Since we can't enforce it, clear to black the missing sprite. This is
|
Since we can't enforce it, clear to black the missing sprite. This is
|
||||||
wrong but it looks better than doing nothing. */
|
wrong but it looks better than doing nothing. */
|
||||||
|
|
||||||
if (f->data[0])
|
if (f && f->data[0])
|
||||||
for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
|
for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
|
||||||
for (i = 0; i < v->sprite_height>>!!plane; i++)
|
for (i = 0; i < v->sprite_height>>!!plane; i++)
|
||||||
memset(f->data[plane] + i * f->linesize[plane],
|
memset(f->data[plane] + i * f->linesize[plane],
|
||||||
|
@ -339,6 +339,9 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
|
|||||||
ofs += slen;
|
ofs += slen;
|
||||||
bytestream2_skip(&gb, len);
|
bytestream2_skip(&gb, len);
|
||||||
} else {
|
} else {
|
||||||
|
if (ofs + len > frame_width ||
|
||||||
|
bytestream2_get_bytes_left(&gb) < len)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
bytestream2_get_buffer(&gb, &dp[ofs], len);
|
bytestream2_get_buffer(&gb, &dp[ofs], len);
|
||||||
ofs += len;
|
ofs += len;
|
||||||
}
|
}
|
||||||
|
@ -1314,7 +1314,9 @@ static av_always_inline int setup_classifs(vorbis_context *vc,
|
|||||||
vorbis_residue *vr,
|
vorbis_residue *vr,
|
||||||
uint8_t *do_not_decode,
|
uint8_t *do_not_decode,
|
||||||
unsigned ch_used,
|
unsigned ch_used,
|
||||||
int partition_count)
|
int partition_count,
|
||||||
|
int ptns_to_read
|
||||||
|
)
|
||||||
{
|
{
|
||||||
int p, j, i;
|
int p, j, i;
|
||||||
unsigned c_p_c = vc->codebooks[vr->classbook].dimensions;
|
unsigned c_p_c = vc->codebooks[vr->classbook].dimensions;
|
||||||
@ -1336,7 +1338,7 @@ static av_always_inline int setup_classifs(vorbis_context *vc,
|
|||||||
for (i = partition_count + c_p_c - 1; i >= partition_count; i--) {
|
for (i = partition_count + c_p_c - 1; i >= partition_count; i--) {
|
||||||
temp2 = (((uint64_t)temp) * inverse_class) >> 32;
|
temp2 = (((uint64_t)temp) * inverse_class) >> 32;
|
||||||
|
|
||||||
if (i < vr->ptns_to_read)
|
if (i < ptns_to_read)
|
||||||
vr->classifs[p + i] = temp - temp2 * vr->classifications;
|
vr->classifs[p + i] = temp - temp2 * vr->classifications;
|
||||||
temp = temp2;
|
temp = temp2;
|
||||||
}
|
}
|
||||||
@ -1344,13 +1346,13 @@ static av_always_inline int setup_classifs(vorbis_context *vc,
|
|||||||
for (i = partition_count + c_p_c - 1; i >= partition_count; i--) {
|
for (i = partition_count + c_p_c - 1; i >= partition_count; i--) {
|
||||||
temp2 = temp / vr->classifications;
|
temp2 = temp / vr->classifications;
|
||||||
|
|
||||||
if (i < vr->ptns_to_read)
|
if (i < ptns_to_read)
|
||||||
vr->classifs[p + i] = temp - temp2 * vr->classifications;
|
vr->classifs[p + i] = temp - temp2 * vr->classifications;
|
||||||
temp = temp2;
|
temp = temp2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
p += vr->ptns_to_read;
|
p += ptns_to_read;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1404,7 +1406,7 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
|
|||||||
for (partition_count = 0; partition_count < ptns_to_read;) { // SPEC error
|
for (partition_count = 0; partition_count < ptns_to_read;) { // SPEC error
|
||||||
if (!pass) {
|
if (!pass) {
|
||||||
int ret;
|
int ret;
|
||||||
if ((ret = setup_classifs(vc, vr, do_not_decode, ch_used, partition_count)) < 0)
|
if ((ret = setup_classifs(vc, vr, do_not_decode, ch_used, partition_count, ptns_to_read)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
for (i = 0; (i < c_p_c) && (partition_count < ptns_to_read); ++i) {
|
for (i = 0; (i < c_p_c) && (partition_count < ptns_to_read); ++i) {
|
||||||
|
@ -253,6 +253,10 @@ static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb,
|
|||||||
return sign ? ~ret : ret;
|
return sign ? ~ret : ret;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
ret = get_bits_left(gb);
|
||||||
|
if (ret <= 0) {
|
||||||
|
av_log(ctx->avctx, AV_LOG_ERROR, "Too few bits (%d) left\n", ret);
|
||||||
|
}
|
||||||
*last = 1;
|
*last = 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -638,7 +638,7 @@ static uint32_t log2sample(uint32_t v, int limit, uint32_t *result)
|
|||||||
|
|
||||||
if ((v += v >> 9) < (1 << 8)) {
|
if ((v += v >> 9) < (1 << 8)) {
|
||||||
dbits = nbits_table[v];
|
dbits = nbits_table[v];
|
||||||
result += (dbits << 8) + wp_log2_table[(v << (9 - dbits)) & 0xff];
|
*result += (dbits << 8) + wp_log2_table[(v << (9 - dbits)) & 0xff];
|
||||||
} else {
|
} else {
|
||||||
if (v < (1L << 16))
|
if (v < (1L << 16))
|
||||||
dbits = nbits_table[v >> 8] + 8;
|
dbits = nbits_table[v >> 8] + 8;
|
||||||
@ -647,7 +647,7 @@ static uint32_t log2sample(uint32_t v, int limit, uint32_t *result)
|
|||||||
else
|
else
|
||||||
dbits = nbits_table[v >> 24] + 24;
|
dbits = nbits_table[v >> 24] + 24;
|
||||||
|
|
||||||
result += dbits = (dbits << 8) + wp_log2_table[(v >> (dbits - 9)) & 0xff];
|
*result += dbits = (dbits << 8) + wp_log2_table[(v >> (dbits - 9)) & 0xff];
|
||||||
|
|
||||||
if (limit && dbits >= limit)
|
if (limit && dbits >= limit)
|
||||||
return 1;
|
return 1;
|
||||||
@ -2876,10 +2876,11 @@ static int wavpack_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ret = ff_alloc_packet2(avctx, avpkt, s->block_samples * avctx->channels * 8)) < 0)
|
buf_size = s->block_samples * avctx->channels * 8
|
||||||
|
+ 200 /* for headers */;
|
||||||
|
if ((ret = ff_alloc_packet2(avctx, avpkt, buf_size)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
buf = avpkt->data;
|
buf = avpkt->data;
|
||||||
buf_size = avpkt->size;
|
|
||||||
|
|
||||||
for (s->ch_offset = 0; s->ch_offset < avctx->channels;) {
|
for (s->ch_offset = 0; s->ch_offset < avctx->channels;) {
|
||||||
set_samplerate(s);
|
set_samplerate(s);
|
||||||
|
@ -1028,7 +1028,7 @@ static int apply_color_indexing_transform(WebPContext *s)
|
|||||||
ImageContext *img;
|
ImageContext *img;
|
||||||
ImageContext *pal;
|
ImageContext *pal;
|
||||||
int i, x, y;
|
int i, x, y;
|
||||||
uint8_t *p, *pi;
|
uint8_t *p;
|
||||||
|
|
||||||
img = &s->image[IMAGE_ROLE_ARGB];
|
img = &s->image[IMAGE_ROLE_ARGB];
|
||||||
pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
|
pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
|
||||||
@ -1066,13 +1066,13 @@ static int apply_color_indexing_transform(WebPContext *s)
|
|||||||
p = GET_PIXEL(img->frame, x, y);
|
p = GET_PIXEL(img->frame, x, y);
|
||||||
i = p[2];
|
i = p[2];
|
||||||
if (i >= pal->frame->width) {
|
if (i >= pal->frame->width) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "invalid palette index %d\n", i);
|
AV_WB32(p, 0x00000000);
|
||||||
return AVERROR_INVALIDDATA;
|
} else {
|
||||||
}
|
const uint8_t *pi = GET_PIXEL(pal->frame, i, 0);
|
||||||
pi = GET_PIXEL(pal->frame, i, 0);
|
|
||||||
AV_COPY32(p, pi);
|
AV_COPY32(p, pi);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -422,6 +422,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
offset &= ~3;
|
offset &= ~3;
|
||||||
if (offset > s->sfb_offsets[i][band - 1])
|
if (offset > s->sfb_offsets[i][band - 1])
|
||||||
s->sfb_offsets[i][band++] = offset;
|
s->sfb_offsets[i][band++] = offset;
|
||||||
|
|
||||||
|
if (offset >= subframe_len)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
s->sfb_offsets[i][band - 1] = subframe_len;
|
s->sfb_offsets[i][band - 1] = subframe_len;
|
||||||
s->num_sfb[i] = band - 1;
|
s->num_sfb[i] = band - 1;
|
||||||
|
@ -342,7 +342,7 @@ QPEL_TABLE 10, 4, w, sse4
|
|||||||
|
|
||||||
%macro LOOP_END 4
|
%macro LOOP_END 4
|
||||||
lea %1q, [%1q+2*%2q] ; dst += dststride
|
lea %1q, [%1q+2*%2q] ; dst += dststride
|
||||||
lea %3q, [%3q+ %4q] ; src += srcstride
|
add %3q, %4q ; src += srcstride
|
||||||
dec heightd ; cmp height
|
dec heightd ; cmp height
|
||||||
jnz .loop ; height loop
|
jnz .loop ; height loop
|
||||||
%endmacro
|
%endmacro
|
||||||
|
@ -20,10 +20,10 @@
|
|||||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
;******************************************************************************
|
;******************************************************************************
|
||||||
|
|
||||||
%if ARCH_X86_64
|
|
||||||
|
|
||||||
%include "libavutil/x86/x86util.asm"
|
%include "libavutil/x86/x86util.asm"
|
||||||
|
|
||||||
|
%if ARCH_X86_64
|
||||||
|
|
||||||
SECTION_RODATA
|
SECTION_RODATA
|
||||||
|
|
||||||
cextern pb_3
|
cextern pb_3
|
||||||
|
@ -34,7 +34,7 @@ OBJS-$(CONFIG_OPENGL_OUTDEV) += opengl_enc.o
|
|||||||
OBJS-$(CONFIG_OSS_INDEV) += oss_audio.o
|
OBJS-$(CONFIG_OSS_INDEV) += oss_audio.o
|
||||||
OBJS-$(CONFIG_OSS_OUTDEV) += oss_audio.o
|
OBJS-$(CONFIG_OSS_OUTDEV) += oss_audio.o
|
||||||
OBJS-$(CONFIG_PULSE_INDEV) += pulse_audio_dec.o \
|
OBJS-$(CONFIG_PULSE_INDEV) += pulse_audio_dec.o \
|
||||||
pulse_audio_common.o
|
pulse_audio_common.o timefilter.o
|
||||||
OBJS-$(CONFIG_PULSE_OUTDEV) += pulse_audio_enc.o \
|
OBJS-$(CONFIG_PULSE_OUTDEV) += pulse_audio_enc.o \
|
||||||
pulse_audio_common.o
|
pulse_audio_common.o
|
||||||
OBJS-$(CONFIG_QTKIT_INDEV) += qtkit.o
|
OBJS-$(CONFIG_QTKIT_INDEV) += qtkit.o
|
||||||
|
@ -23,6 +23,9 @@
|
|||||||
#include "avdevice.h"
|
#include "avdevice.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
#include "libavutil/ffversion.h"
|
||||||
|
const char av_device_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
|
||||||
|
|
||||||
#define E AV_OPT_FLAG_ENCODING_PARAM
|
#define E AV_OPT_FLAG_ENCODING_PARAM
|
||||||
#define D AV_OPT_FLAG_DECODING_PARAM
|
#define D AV_OPT_FLAG_DECODING_PARAM
|
||||||
#define A AV_OPT_FLAG_AUDIO_PARAM
|
#define A AV_OPT_FLAG_AUDIO_PARAM
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
LIBAVDEVICE_$MAJOR {
|
LIBAVDEVICE_$MAJOR {
|
||||||
global: avdevice_*;
|
global: avdevice_*; av_*;
|
||||||
local: *;
|
local: *;
|
||||||
};
|
};
|
||||||
|
@ -681,7 +681,7 @@ static int pulse_write_frame(AVFormatContext *h, int stream_index,
|
|||||||
AVERROR(EINVAL) : 0;
|
AVERROR(EINVAL) : 0;
|
||||||
|
|
||||||
pkt.data = (*frame)->data[0];
|
pkt.data = (*frame)->data[0];
|
||||||
pkt.size = (*frame)->nb_samples * av_get_bytes_per_sample((*frame)->format) * (*frame)->channels;
|
pkt.size = (*frame)->nb_samples * av_get_bytes_per_sample((*frame)->format) * av_frame_get_channels(*frame);
|
||||||
pkt.dts = (*frame)->pkt_dts;
|
pkt.dts = (*frame)->pkt_dts;
|
||||||
pkt.duration = av_frame_get_pkt_duration(*frame);
|
pkt.duration = av_frame_get_pkt_duration(*frame);
|
||||||
return pulse_write_packet(h, &pkt);
|
return pulse_write_packet(h, &pkt);
|
||||||
|
@ -868,9 +868,6 @@ static int v4l2_read_header(AVFormatContext *s1)
|
|||||||
|
|
||||||
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
|
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
|
||||||
|
|
||||||
if ((res = v4l2_set_parameters(s1)) < 0)
|
|
||||||
return res;
|
|
||||||
|
|
||||||
if (s->pixel_format) {
|
if (s->pixel_format) {
|
||||||
AVCodec *codec = avcodec_find_decoder_by_name(s->pixel_format);
|
AVCodec *codec = avcodec_find_decoder_by_name(s->pixel_format);
|
||||||
|
|
||||||
@ -922,6 +919,9 @@ static int v4l2_read_header(AVFormatContext *s1)
|
|||||||
|
|
||||||
s->frame_format = desired_format;
|
s->frame_format = desired_format;
|
||||||
|
|
||||||
|
if ((res = v4l2_set_parameters(s1)) < 0)
|
||||||
|
return res;
|
||||||
|
|
||||||
st->codec->pix_fmt = avpriv_fmt_v4l2ff(desired_format, codec_id);
|
st->codec->pix_fmt = avpriv_fmt_v4l2ff(desired_format, codec_id);
|
||||||
s->frame_size =
|
s->frame_size =
|
||||||
avpicture_get_size(st->codec->pix_fmt, s->width, s->height);
|
avpicture_get_size(st->codec->pix_fmt, s->width, s->height);
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
#include "avdevice.h"
|
#include "avdevice.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
AVClass *class;
|
||||||
int fd;
|
int fd;
|
||||||
} V4L2Context;
|
} V4L2Context;
|
||||||
|
|
||||||
|
@ -496,6 +496,8 @@ static av_cold int init(AVFilterContext *ctx)
|
|||||||
snprintf(name, sizeof(name), "input%d", i);
|
snprintf(name, sizeof(name), "input%d", i);
|
||||||
pad.type = AVMEDIA_TYPE_AUDIO;
|
pad.type = AVMEDIA_TYPE_AUDIO;
|
||||||
pad.name = av_strdup(name);
|
pad.name = av_strdup(name);
|
||||||
|
if (!pad.name)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
pad.filter_frame = filter_frame;
|
pad.filter_frame = filter_frame;
|
||||||
|
|
||||||
ff_insert_inpad(ctx, i, &pad);
|
ff_insert_inpad(ctx, i, &pad);
|
||||||
|
@ -214,6 +214,8 @@ static av_cold int join_init(AVFilterContext *ctx)
|
|||||||
snprintf(name, sizeof(name), "input%d", i);
|
snprintf(name, sizeof(name), "input%d", i);
|
||||||
pad.type = AVMEDIA_TYPE_AUDIO;
|
pad.type = AVMEDIA_TYPE_AUDIO;
|
||||||
pad.name = av_strdup(name);
|
pad.name = av_strdup(name);
|
||||||
|
if (!pad.name)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
pad.filter_frame = filter_frame;
|
pad.filter_frame = filter_frame;
|
||||||
|
|
||||||
pad.needs_fifo = 1;
|
pad.needs_fifo = 1;
|
||||||
|
@ -37,6 +37,9 @@
|
|||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
#include "libavutil/ffversion.h"
|
||||||
|
const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
|
||||||
|
|
||||||
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame);
|
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame);
|
||||||
|
|
||||||
void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
|
void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
|
||||||
|
@ -52,6 +52,8 @@ static av_cold int split_init(AVFilterContext *ctx)
|
|||||||
snprintf(name, sizeof(name), "output%d", i);
|
snprintf(name, sizeof(name), "output%d", i);
|
||||||
pad.type = ctx->filter->inputs[0].type;
|
pad.type = ctx->filter->inputs[0].type;
|
||||||
pad.name = av_strdup(name);
|
pad.name = av_strdup(name);
|
||||||
|
if (!pad.name)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
ff_insert_outpad(ctx, i, &pad);
|
ff_insert_outpad(ctx, i, &pad);
|
||||||
}
|
}
|
||||||
|
@ -292,6 +292,8 @@ static av_cold int movie_common_init(AVFilterContext *ctx)
|
|||||||
snprintf(name, sizeof(name), "out%d", i);
|
snprintf(name, sizeof(name), "out%d", i);
|
||||||
pad.type = movie->st[i].st->codec->codec_type;
|
pad.type = movie->st[i].st->codec->codec_type;
|
||||||
pad.name = av_strdup(name);
|
pad.name = av_strdup(name);
|
||||||
|
if (!pad.name)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
pad.config_props = movie_config_output_props;
|
pad.config_props = movie_config_output_props;
|
||||||
pad.request_frame = movie_request_frame;
|
pad.request_frame = movie_request_frame;
|
||||||
ff_insert_outpad(ctx, i, &pad);
|
ff_insert_outpad(ctx, i, &pad);
|
||||||
|
@ -220,6 +220,19 @@ static int config_props(AVFilterLink *inlink)
|
|||||||
|
|
||||||
#define NB_PLANES 4
|
#define NB_PLANES 4
|
||||||
|
|
||||||
|
static inline int mirror(int x, int w)
|
||||||
|
{
|
||||||
|
if (!w)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
while ((unsigned)x > (unsigned)w) {
|
||||||
|
x = -x;
|
||||||
|
if (x < 0)
|
||||||
|
x += 2 * w;
|
||||||
|
}
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
static void blur(uint8_t *dst, const int dst_linesize,
|
static void blur(uint8_t *dst, const int dst_linesize,
|
||||||
const uint8_t *src, const int src_linesize,
|
const uint8_t *src, const int src_linesize,
|
||||||
const int w, const int h, FilterParam *fp)
|
const int w, const int h, FilterParam *fp)
|
||||||
@ -253,8 +266,7 @@ static void blur(uint8_t *dst, const int dst_linesize,
|
|||||||
for (dy = 0; dy < radius*2 + 1; dy++) {
|
for (dy = 0; dy < radius*2 + 1; dy++) {
|
||||||
int dx;
|
int dx;
|
||||||
int iy = y+dy - radius;
|
int iy = y+dy - radius;
|
||||||
if (iy < 0) iy = -iy;
|
iy = mirror(iy, h-1);
|
||||||
else if (iy >= h) iy = h+h-iy-1;
|
|
||||||
|
|
||||||
for (dx = 0; dx < radius*2 + 1; dx++) {
|
for (dx = 0; dx < radius*2 + 1; dx++) {
|
||||||
const int ix = x+dx - radius;
|
const int ix = x+dx - radius;
|
||||||
@ -265,13 +277,11 @@ static void blur(uint8_t *dst, const int dst_linesize,
|
|||||||
for (dy = 0; dy < radius*2+1; dy++) {
|
for (dy = 0; dy < radius*2+1; dy++) {
|
||||||
int dx;
|
int dx;
|
||||||
int iy = y+dy - radius;
|
int iy = y+dy - radius;
|
||||||
if (iy < 0) iy = -iy;
|
iy = mirror(iy, h-1);
|
||||||
else if (iy >= h) iy = h+h-iy-1;
|
|
||||||
|
|
||||||
for (dx = 0; dx < radius*2 + 1; dx++) {
|
for (dx = 0; dx < radius*2 + 1; dx++) {
|
||||||
int ix = x+dx - radius;
|
int ix = x+dx - radius;
|
||||||
if (ix < 0) ix = -ix;
|
ix = mirror(ix, w-1);
|
||||||
else if (ix >= w) ix = w+w-ix-1;
|
|
||||||
UPDATE_FACTOR;
|
UPDATE_FACTOR;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -233,9 +233,9 @@ static void filter(SPPContext *p, uint8_t *dst, uint8_t *src,
|
|||||||
const int y1 = y + offset[i + count - 1][1];
|
const int y1 = y + offset[i + count - 1][1];
|
||||||
const int index = x1 + y1*linesize;
|
const int index = x1 + y1*linesize;
|
||||||
p->pdsp.get_pixels(block, p->src + index, linesize);
|
p->pdsp.get_pixels(block, p->src + index, linesize);
|
||||||
p->fdsp.fdct(block);
|
p->dct->fdct(block);
|
||||||
p->requantize(block2, block, qp, p->idsp.idct_permutation);
|
p->requantize(block2, block, qp, p->dct->idct_permutation);
|
||||||
p->idsp.idct(block2);
|
p->dct->idct(block2);
|
||||||
add_block(p->temp + index, linesize, block2);
|
add_block(p->temp + index, linesize, block2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -378,11 +378,11 @@ static av_cold int init(AVFilterContext *ctx)
|
|||||||
SPPContext *spp = ctx->priv;
|
SPPContext *spp = ctx->priv;
|
||||||
|
|
||||||
spp->avctx = avcodec_alloc_context3(NULL);
|
spp->avctx = avcodec_alloc_context3(NULL);
|
||||||
if (!spp->avctx)
|
spp->dct = avcodec_dct_alloc();
|
||||||
|
if (!spp->avctx || !spp->dct)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
ff_idctdsp_init(&spp->idsp, spp->avctx);
|
|
||||||
ff_fdctdsp_init(&spp->fdsp, spp->avctx);
|
|
||||||
ff_pixblockdsp_init(&spp->pdsp, spp->avctx);
|
ff_pixblockdsp_init(&spp->pdsp, spp->avctx);
|
||||||
|
avcodec_dct_init(spp->dct);
|
||||||
spp->store_slice = store_slice_c;
|
spp->store_slice = store_slice_c;
|
||||||
switch (spp->mode) {
|
switch (spp->mode) {
|
||||||
case MODE_HARD: spp->requantize = hardthresh_c; break;
|
case MODE_HARD: spp->requantize = hardthresh_c; break;
|
||||||
@ -403,6 +403,7 @@ static av_cold void uninit(AVFilterContext *ctx)
|
|||||||
avcodec_close(spp->avctx);
|
avcodec_close(spp->avctx);
|
||||||
av_freep(&spp->avctx);
|
av_freep(&spp->avctx);
|
||||||
}
|
}
|
||||||
|
av_freep(&spp->dct);
|
||||||
av_freep(&spp->non_b_qp_table);
|
av_freep(&spp->non_b_qp_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,8 +24,7 @@
|
|||||||
|
|
||||||
#include "libavcodec/avcodec.h"
|
#include "libavcodec/avcodec.h"
|
||||||
#include "libavcodec/pixblockdsp.h"
|
#include "libavcodec/pixblockdsp.h"
|
||||||
#include "libavcodec/idctdsp.h"
|
#include "libavcodec/avdct.h"
|
||||||
#include "libavcodec/fdctdsp.h"
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
|
||||||
#define MAX_LEVEL 6 /* quality levels */
|
#define MAX_LEVEL 6 /* quality levels */
|
||||||
@ -41,9 +40,8 @@ typedef struct {
|
|||||||
uint8_t *src;
|
uint8_t *src;
|
||||||
int16_t *temp;
|
int16_t *temp;
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
IDCTDSPContext idsp;
|
|
||||||
FDCTDSPContext fdsp;
|
|
||||||
PixblockDSPContext pdsp;
|
PixblockDSPContext pdsp;
|
||||||
|
AVDCT *dct;
|
||||||
int8_t *non_b_qp_table;
|
int8_t *non_b_qp_table;
|
||||||
int non_b_qp_alloc_size;
|
int non_b_qp_alloc_size;
|
||||||
int use_bframe_qp;
|
int use_bframe_qp;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user