Compare commits
198 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a248117f26 | ||
![]() |
bd66456866 | ||
![]() |
90ee388b28 | ||
![]() |
e3d643cf75 | ||
![]() |
1b05b0005b | ||
![]() |
38ca79b04d | ||
![]() |
0dff3171ce | ||
![]() |
ff29290e26 | ||
![]() |
ba7cd748c1 | ||
![]() |
510da4fe2b | ||
![]() |
eec833b10d | ||
![]() |
9b0736c08a | ||
![]() |
70a1182a48 | ||
![]() |
49d597f058 | ||
![]() |
44ebb2556d | ||
![]() |
0a41da3e9d | ||
![]() |
afe09e490a | ||
![]() |
f8c4dbe45e | ||
![]() |
c997dcd38b | ||
![]() |
2a1bebfc83 | ||
![]() |
23d835f611 | ||
![]() |
d04dc7b5a7 | ||
![]() |
3197a9c4fa | ||
![]() |
ade4f3e746 | ||
![]() |
053c19cd88 | ||
![]() |
96481c5e18 | ||
![]() |
9b052bfb86 | ||
![]() |
f844cb9bce | ||
![]() |
76c97f1963 | ||
![]() |
280998b13c | ||
![]() |
96cf80609a | ||
![]() |
33c9e18b09 | ||
![]() |
9c713f30e4 | ||
![]() |
530d10792d | ||
![]() |
799000af70 | ||
![]() |
f8d3bb8961 | ||
![]() |
78889be3fb | ||
![]() |
c65763a2c6 | ||
![]() |
6d4d186e9e | ||
![]() |
5ebb5a32bd | ||
![]() |
a694b2b158 | ||
![]() |
d785f69401 | ||
![]() |
5025dbc577 | ||
![]() |
5bfa208e65 | ||
![]() |
d86a5ce03f | ||
![]() |
7d4c38d58d | ||
![]() |
c313f3160a | ||
![]() |
7e6625a9af | ||
![]() |
f13f6f82c6 | ||
![]() |
8489c0599f | ||
![]() |
ee6b868ac8 | ||
![]() |
b6783b8826 | ||
![]() |
e2d529424f | ||
![]() |
537c173853 | ||
![]() |
c10582e703 | ||
![]() |
dfeef3a209 | ||
![]() |
2b6f3be082 | ||
![]() |
0a57df38f4 | ||
![]() |
17966ae5bb | ||
![]() |
5af2fd317d | ||
![]() |
8aedb75156 | ||
![]() |
1fd86f9a21 | ||
![]() |
377fabc9e6 | ||
![]() |
41eda87048 | ||
![]() |
e6ac11e417 | ||
![]() |
2cac35086c | ||
![]() |
af343f5cdd | ||
![]() |
391e0fc6c9 | ||
![]() |
caeca53a09 | ||
![]() |
760929117d | ||
![]() |
acada70ffb | ||
![]() |
4f91c45644 | ||
![]() |
e4831bb9a6 | ||
![]() |
db5b454c3d | ||
![]() |
301761792a | ||
![]() |
440e98574b | ||
![]() |
604d72aa0d | ||
![]() |
03ddc26066 | ||
![]() |
801eff785a | ||
![]() |
b59ee5dcf1 | ||
![]() |
e163d884ef | ||
![]() |
56cc629a64 | ||
![]() |
685321e4bd | ||
![]() |
3f1a58db6f | ||
![]() |
597d709eb4 | ||
![]() |
dd0c5e0fa9 | ||
![]() |
ad02537746 | ||
![]() |
3bc9cfe66e | ||
![]() |
910c1f2352 | ||
![]() |
55065315ca | ||
![]() |
8081879655 | ||
![]() |
a39c6bf1b8 | ||
![]() |
884a9b0d29 | ||
![]() |
4457e6137d | ||
![]() |
08d9fd611e | ||
![]() |
5fa739e685 | ||
![]() |
b143844ea0 | ||
![]() |
10ff052c60 | ||
![]() |
4ede95e69c | ||
![]() |
ce8910d861 | ||
![]() |
3d0c9c9af6 | ||
![]() |
f3f22f183f | ||
![]() |
bfbff1c748 | ||
![]() |
7fd7950174 | ||
![]() |
700fb8c8dd | ||
![]() |
9f80712454 | ||
![]() |
fe9cbf582b | ||
![]() |
642d758a2d | ||
![]() |
aa45b90804 | ||
![]() |
549b8083d6 | ||
![]() |
ec6719f655 | ||
![]() |
11ecd8574a | ||
![]() |
5754176b5b | ||
![]() |
fb3189ce8b | ||
![]() |
8168a7cec9 | ||
![]() |
562d6fd5b5 | ||
![]() |
dd14723602 | ||
![]() |
9474c93028 | ||
![]() |
7e070cf202 | ||
![]() |
1b48a426a9 | ||
![]() |
e3e369f696 | ||
![]() |
6996a2f796 | ||
![]() |
05f5a2eb62 | ||
![]() |
4a636a5e43 | ||
![]() |
44da556815 | ||
![]() |
aa097b4d5f | ||
![]() |
8148833193 | ||
![]() |
3c0f84402b | ||
![]() |
601fa56582 | ||
![]() |
c0df6a24ce | ||
![]() |
2d63f9b4ef | ||
![]() |
4c849c6991 | ||
![]() |
42c3a3719b | ||
![]() |
7a0ff7566b | ||
![]() |
10c244cc89 | ||
![]() |
99008ba366 | ||
![]() |
a81c1ea2eb | ||
![]() |
0892a6340f | ||
![]() |
d3e2f35f7a | ||
![]() |
e39fc137ae | ||
![]() |
a2ae183a38 | ||
![]() |
80b8dc30dc | ||
![]() |
7b91e52eb9 | ||
![]() |
e28814e0e1 | ||
![]() |
d6e250abfc | ||
![]() |
61ece41372 | ||
![]() |
b6c5848a1f | ||
![]() |
b6ba39f931 | ||
![]() |
77d43bf42d | ||
![]() |
899d95efe1 | ||
![]() |
8812b5f164 | ||
![]() |
f31170d4e7 | ||
![]() |
0173a7966b | ||
![]() |
a60eb6ef12 | ||
![]() |
8582e6e9a3 | ||
![]() |
9a5e81235e | ||
![]() |
c497d71a02 | ||
![]() |
0054d70f23 | ||
![]() |
b102d5d97d | ||
![]() |
858c3158b5 | ||
![]() |
5e87fa347c | ||
![]() |
6a441ee78e | ||
![]() |
316589e1db | ||
![]() |
35bf5f7966 | ||
![]() |
89409be50c | ||
![]() |
a4bf9033c3 | ||
![]() |
8502b4aef6 | ||
![]() |
03e404740e | ||
![]() |
688da036b1 | ||
![]() |
c761e144f6 | ||
![]() |
b3e5c8de6a | ||
![]() |
ee6c1670df | ||
![]() |
9e4a68a76c | ||
![]() |
25594f0018 | ||
![]() |
a85c3fff37 | ||
![]() |
0f5840b51a | ||
![]() |
1285fe5530 | ||
![]() |
0aefcb6aa8 | ||
![]() |
64bc5f3bf7 | ||
![]() |
b61e311b0e | ||
![]() |
ee66a7198e | ||
![]() |
50336dc4f1 | ||
![]() |
269dbc5359 | ||
![]() |
850298ef25 | ||
![]() |
628b82294a | ||
![]() |
75d8cccf0e | ||
![]() |
d87997b56f | ||
![]() |
b15e85d820 | ||
![]() |
654b24f68a | ||
![]() |
2f2fd8c6d1 | ||
![]() |
c5f7c755cf | ||
![]() |
b581580bd1 | ||
![]() |
3313f31f01 | ||
![]() |
c71c77e56f | ||
![]() |
08c81f7365 | ||
![]() |
50073e2395 | ||
![]() |
3fc967f6c7 | ||
![]() |
26ac878cc2 |
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.8.11
|
||||
PROJECT_NUMBER = 0.8.15
|
||||
|
||||
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
|
||||
# base path where the generated documentation will be put.
|
||||
|
@@ -57,7 +57,7 @@ AVFormatContext *avformat_opts;
|
||||
struct SwsContext *sws_opts;
|
||||
AVDictionary *format_opts, *video_opts, *audio_opts, *sub_opts;
|
||||
|
||||
static const int this_year = 2011;
|
||||
static const int this_year = 2013;
|
||||
|
||||
void init_opts(void)
|
||||
{
|
||||
|
4
configure
vendored
4
configure
vendored
@@ -1057,6 +1057,7 @@ HAVE_LIST="
|
||||
dlfcn_h
|
||||
dlopen
|
||||
dos_paths
|
||||
dxva_h
|
||||
ebp_available
|
||||
ebx_available
|
||||
exp2
|
||||
@@ -2378,7 +2379,7 @@ check_host_cflags -std=c99
|
||||
check_host_cflags -Wall
|
||||
|
||||
case "$arch" in
|
||||
alpha|ia64|mips|parisc|sparc)
|
||||
alpha|ia64|mips|parisc|ppc|sparc)
|
||||
spic=$shared
|
||||
;;
|
||||
x86)
|
||||
@@ -2859,6 +2860,7 @@ check_func_headers windows.h MapViewOfFile
|
||||
check_func_headers windows.h VirtualAlloc
|
||||
|
||||
check_header dlfcn.h
|
||||
check_header dxva.h
|
||||
check_header dxva2api.h
|
||||
check_header libcrystalhd/libcrystalhd_if.h
|
||||
check_header malloc.h
|
||||
|
120
doc/APIchanges
120
doc/APIchanges
@@ -66,16 +66,16 @@ API changes, most recent first:
|
||||
2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays
|
||||
Add avfilter_get_audio_buffer_ref_from_arrays() to avfilter.h.
|
||||
|
||||
2011-06-09 - d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
2011-06-09 - f9ecb84 / d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
Move AVMetadata from lavf to lavu and rename it to
|
||||
AVDictionary -- new installed header dict.h.
|
||||
All av_metadata_* functions renamed to av_dict_*.
|
||||
|
||||
2011-06-07 - a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
2011-06-07 - d552f61 / a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
Add av_get_bytes_per_sample() in libavutil/samplefmt.h.
|
||||
Deprecate av_get_bits_per_sample_fmt().
|
||||
|
||||
2011-06-xx - b39b062 - lavu 51.8.0 - opt.h
|
||||
2011-06-xx - f956924 / b39b062 - lavu 51.8.0 - opt.h
|
||||
Add av_opt_free convenience function.
|
||||
|
||||
2011-06-06 - 95a0242 - lavfi 2.14.0 - AVFilterBufferRefAudioProps
|
||||
@@ -105,7 +105,7 @@ API changes, most recent first:
|
||||
Add av_get_pix_fmt_name() in libavutil/pixdesc.h, and deprecate
|
||||
avcodec_get_pix_fmt_name() in libavcodec/avcodec.h in its favor.
|
||||
|
||||
2011-05-25 - 30315a8 - lavf 53.3.0 - avformat.h
|
||||
2011-05-25 - 39e4206 / 30315a8 - lavf 53.3.0 - avformat.h
|
||||
Add fps_probe_size to AVFormatContext.
|
||||
|
||||
2011-05-22 - 5ecdfd0 - lavf 53.2.0 - avformat.h
|
||||
@@ -121,10 +121,10 @@ API changes, most recent first:
|
||||
2011-05-14 - 9fdf772 - lavfi 2.6.0 - avcodec.h
|
||||
Add avfilter_get_video_buffer_ref_from_frame() to libavfilter/avcodec.h.
|
||||
|
||||
2011-05-18 - 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
2011-05-18 - 75a37b5 / 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
Add request_sample_fmt field to AVCodecContext.
|
||||
|
||||
2011-05-10 - 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
2011-05-10 - 59eb12f / 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
Deprecate AVLPCType and the following fields in
|
||||
AVCodecContext: lpc_coeff_precision, prediction_order_method,
|
||||
min_partition_order, max_partition_order, lpc_type, lpc_passes.
|
||||
@@ -154,81 +154,81 @@ API changes, most recent first:
|
||||
Add av_dynarray_add function for adding
|
||||
an element to a dynamic array.
|
||||
|
||||
2011-04-26 - bebe72f - lavu 51.1.0 - avutil.h
|
||||
2011-04-26 - d7e5aeb / bebe72f - lavu 51.1.0 - avutil.h
|
||||
Add AVPictureType enum and av_get_picture_type_char(), deprecate
|
||||
FF_*_TYPE defines and av_get_pict_type_char() defined in
|
||||
libavcodec/avcodec.h.
|
||||
|
||||
2011-04-26 - 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
2011-04-26 - d7e5aeb / 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
Add pict_type and key_frame fields to AVFilterBufferRefVideo.
|
||||
|
||||
2011-04-26 - 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
2011-04-26 - d7e5aeb / 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
Add sample_aspect_ratio fields to vsrc_buffer arguments
|
||||
|
||||
2011-04-21 - 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
2011-04-21 - 8772156 / 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
Add CODEC_CAP_SLICE_THREADS for codecs supporting sliced threading.
|
||||
|
||||
2011-04-15 - lavc 52.120.0 - avcodec.h
|
||||
AVPacket structure got additional members for passing side information:
|
||||
4de339e introduce side information for AVPacket
|
||||
2d8591c make containers pass palette change in AVPacket
|
||||
c407984 / 4de339e introduce side information for AVPacket
|
||||
c407984 / 2d8591c make containers pass palette change in AVPacket
|
||||
|
||||
2011-04-12 - lavf 52.107.0 - avio.h
|
||||
Avio cleanup, part II - deprecate the entire URLContext API:
|
||||
175389c add avio_check as a replacement for url_exist
|
||||
ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
c55780d / 175389c add avio_check as a replacement for url_exist
|
||||
9891004 / ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
for _av_url_read_fseek/fpause
|
||||
cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
d4d0932 / cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
should be used instead.
|
||||
80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
f8270bb add avio_enum_protocols.
|
||||
5593f03 deprecate URLProtocol.
|
||||
c486dad deprecate URLContext.
|
||||
026e175 deprecate the typedef for URLInterruptCB
|
||||
8e76a19 deprecate av_register_protocol2.
|
||||
b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
1305d93 deprecate av_url_read_seek
|
||||
fa104e1 deprecate av_url_read_pause
|
||||
727c7aa deprecate url_get_filename().
|
||||
5958df3 deprecate url_max_packet_size().
|
||||
1869ea0 deprecate url_get_file_handle().
|
||||
32a97d4 deprecate url_filesize().
|
||||
e52a914 deprecate url_close().
|
||||
58a48c6 deprecate url_seek().
|
||||
925e908 deprecate url_write().
|
||||
dce3756 deprecate url_read_complete().
|
||||
bc371ac deprecate url_read().
|
||||
0589da0 deprecate url_open().
|
||||
62eaaea deprecate url_connect.
|
||||
5652bb9 deprecate url_alloc.
|
||||
333e894 deprecate url_open_protocol
|
||||
e230705 deprecate url_poll and URLPollEntry
|
||||
c88caa5 / 80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
c88caa5 / f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
d4d0932 / f8270bb add avio_enum_protocols.
|
||||
d4d0932 / 5593f03 deprecate URLProtocol.
|
||||
d4d0932 / c486dad deprecate URLContext.
|
||||
d4d0932 / 026e175 deprecate the typedef for URLInterruptCB
|
||||
c88caa5 / 8e76a19 deprecate av_register_protocol2.
|
||||
11d7841 / b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
11d7841 / 1305d93 deprecate av_url_read_seek
|
||||
11d7841 / fa104e1 deprecate av_url_read_pause
|
||||
434f248 / 727c7aa deprecate url_get_filename().
|
||||
434f248 / 5958df3 deprecate url_max_packet_size().
|
||||
434f248 / 1869ea0 deprecate url_get_file_handle().
|
||||
434f248 / 32a97d4 deprecate url_filesize().
|
||||
434f248 / e52a914 deprecate url_close().
|
||||
434f248 / 58a48c6 deprecate url_seek().
|
||||
434f248 / 925e908 deprecate url_write().
|
||||
434f248 / dce3756 deprecate url_read_complete().
|
||||
434f248 / bc371ac deprecate url_read().
|
||||
434f248 / 0589da0 deprecate url_open().
|
||||
434f248 / 62eaaea deprecate url_connect.
|
||||
434f248 / 5652bb9 deprecate url_alloc.
|
||||
434f248 / 333e894 deprecate url_open_protocol
|
||||
434f248 / e230705 deprecate url_poll and URLPollEntry
|
||||
|
||||
2011-04-08 - lavf 52.106.0 - avformat.h
|
||||
Minor avformat.h cleanup:
|
||||
a9bf9d8 deprecate av_guess_image2_codec
|
||||
c3675df rename avf_sdp_create->av_sdp_create
|
||||
d4d0932 / a9bf9d8 deprecate av_guess_image2_codec
|
||||
d4d0932 / c3675df rename avf_sdp_create->av_sdp_create
|
||||
|
||||
2011-04-03 - lavf 52.105.0 - avio.h
|
||||
Large-scale renaming/deprecating of AVIOContext-related functions:
|
||||
724f6a0 deprecate url_fdopen
|
||||
403ee83 deprecate url_open_dyn_packet_buf
|
||||
6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
2cae980 / 724f6a0 deprecate url_fdopen
|
||||
2cae980 / 403ee83 deprecate url_open_dyn_packet_buf
|
||||
2cae980 / 6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
2cae980 / b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
2cae980 / 8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
AVIOContext.is_streamed and url_is_streamed()
|
||||
b64030f deprecate get_checksum()
|
||||
4c4427a deprecate init_checksum()
|
||||
4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
933e90a deprecate av_url_read_fseek/fpause
|
||||
8d9769a deprecate url_fileno
|
||||
b7f2fdd rename put_flush_packet -> avio_flush
|
||||
35f1023 deprecate url_close_buf
|
||||
83fddae deprecate url_open_buf
|
||||
d9d86e0 rename url_fprintf -> avio_printf
|
||||
59f65d9 deprecate url_setbufsize
|
||||
3e68b3b deprecate url_ferror
|
||||
1caa412 / b64030f deprecate get_checksum()
|
||||
1caa412 / 4c4427a deprecate init_checksum()
|
||||
2fd41c9 / 4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
4fa0e24 / 933e90a deprecate av_url_read_fseek/fpause
|
||||
4fa0e24 / 8d9769a deprecate url_fileno
|
||||
0fecf26 / b7f2fdd rename put_flush_packet -> avio_flush
|
||||
0fecf26 / 35f1023 deprecate url_close_buf
|
||||
0fecf26 / 83fddae deprecate url_open_buf
|
||||
0fecf26 / d9d86e0 rename url_fprintf -> avio_printf
|
||||
0fecf26 / 59f65d9 deprecate url_setbufsize
|
||||
6947b0c / 3e68b3b deprecate url_ferror
|
||||
66e5b1d deprecate url_feof
|
||||
e8bb2e2 deprecate url_fget_max_packet_size
|
||||
76aa876 rename url_fsize -> avio_size
|
||||
@@ -250,7 +250,7 @@ API changes, most recent first:
|
||||
b3db9ce deprecate get_partial_buffer
|
||||
8d9ac96 rename av_alloc_put_byte -> avio_alloc_context
|
||||
|
||||
2011-03-25 - 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
2011-03-25 - 27ef7b1 / 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
Add audio_service_type field to AVCodecContext.
|
||||
|
||||
2011-03-17 - e309fdc - lavu 50.40.0 - pixfmt.h
|
||||
@@ -288,11 +288,11 @@ API changes, most recent first:
|
||||
2011-02-10 - 12c14cd - lavf 52.99.0 - AVStream.disposition
|
||||
Add AV_DISPOSITION_HEARING_IMPAIRED and AV_DISPOSITION_VISUAL_IMPAIRED.
|
||||
|
||||
2011-02-09 - 5592734 - lavc 52.112.0 - avcodec_thread_init()
|
||||
2011-02-09 - c0b102c - lavc 52.112.0 - avcodec_thread_init()
|
||||
Deprecate avcodec_thread_init()/avcodec_thread_free() use; instead
|
||||
set thread_count before calling avcodec_open.
|
||||
|
||||
2011-02-09 - 778b08a - lavc 52.111.0 - threading API
|
||||
2011-02-09 - 37b00b4 - lavc 52.111.0 - threading API
|
||||
Add CODEC_CAP_FRAME_THREADS with new restrictions on get_buffer()/
|
||||
release_buffer()/draw_horiz_band() callbacks for appropriate codecs.
|
||||
Add thread_type and active_thread_type fields to AVCodecContext.
|
||||
|
@@ -299,6 +299,10 @@ prefix is ``ffmpeg2pass''. The complete file name will be
|
||||
@file{PREFIX-N.log}, where N is a number specific to the output
|
||||
stream.
|
||||
|
||||
Note that this option is overwritten by a local option of the same name
|
||||
when using @code{-vcodec libx264}. That option maps to the x264 option stats
|
||||
which has a different syntax.
|
||||
|
||||
@item -newvideo
|
||||
Add a new video stream to the current output stream.
|
||||
|
||||
|
@@ -82,7 +82,7 @@ Follows a BNF description for the filtergraph syntax:
|
||||
@var{LINKLABEL} ::= "[" @var{NAME} "]"
|
||||
@var{LINKLABELS} ::= @var{LINKLABEL} [@var{LINKLABELS}]
|
||||
@var{FILTER_ARGUMENTS} ::= sequence of chars (eventually quoted)
|
||||
@var{FILTER} ::= [@var{LINKNAMES}] @var{NAME} ["=" @var{ARGUMENTS}] [@var{LINKNAMES}]
|
||||
@var{FILTER} ::= [@var{LINKLABELS}] @var{NAME} ["=" @var{FILTER_ARGUMENTS}] [@var{LINKLABELS}]
|
||||
@var{FILTERCHAIN} ::= @var{FILTER} [,@var{FILTERCHAIN}]
|
||||
@var{FILTERGRAPH} ::= @var{FILTERCHAIN} [;@var{FILTERGRAPH}]
|
||||
@end example
|
||||
|
@@ -15,7 +15,7 @@ be properly added to the respective issue.
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://ffmpeg.org/trac/ffmpeg
|
||||
http(s)://trac.ffmpeg.org
|
||||
|
||||
NOTE: issue = (bug report || patch || feature request)
|
||||
|
||||
|
12
ffmpeg.c
12
ffmpeg.c
@@ -313,6 +313,7 @@ typedef struct AVOutputStream {
|
||||
#endif
|
||||
|
||||
int sws_flags;
|
||||
char *forced_key_frames;
|
||||
} AVOutputStream;
|
||||
|
||||
static AVOutputStream **output_streams_for_file[MAX_FILES] = { NULL };
|
||||
@@ -2336,6 +2337,9 @@ static int transcode(AVFormatContext **output_files,
|
||||
"Please consider specifiying a lower framerate, a different muxer or -vsync 2\n");
|
||||
}
|
||||
|
||||
if (ost->forced_key_frames)
|
||||
parse_forced_key_frames(ost->forced_key_frames, ost, codec);
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
if (configure_video_filters(ist, ost)) {
|
||||
fprintf(stderr, "Error opening filters!\n");
|
||||
@@ -2857,6 +2861,7 @@ static int transcode(AVFormatContext **output_files,
|
||||
av_freep(&ost->st->codec->subtitle_header);
|
||||
av_free(ost->resample_frame.data[0]);
|
||||
av_free(ost->forced_kf_pts);
|
||||
av_free(ost->forced_key_frames);
|
||||
if (ost->video_resample)
|
||||
sws_freeContext(ost->img_resample_ctx);
|
||||
if (ost->resample)
|
||||
@@ -3655,8 +3660,10 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
|
||||
}
|
||||
}
|
||||
|
||||
if (forced_key_frames)
|
||||
parse_forced_key_frames(forced_key_frames, ost, video_enc);
|
||||
if (forced_key_frames) {
|
||||
ost->forced_key_frames = forced_key_frames;
|
||||
forced_key_frames = NULL;
|
||||
}
|
||||
}
|
||||
if (video_language) {
|
||||
av_dict_set(&st->metadata, "language", video_language, 0);
|
||||
@@ -3666,7 +3673,6 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
|
||||
/* reset some key parameters */
|
||||
video_disable = 0;
|
||||
av_freep(&video_codec_name);
|
||||
av_freep(&forced_key_frames);
|
||||
video_stream_copy = 0;
|
||||
frame_pix_fmt = PIX_FMT_NONE;
|
||||
}
|
||||
|
@@ -44,7 +44,7 @@ typedef struct EightSvxContext {
|
||||
/* buffer used to store the whole audio decoded/interleaved chunk,
|
||||
* which is sent with the first packet */
|
||||
uint8_t *samples;
|
||||
size_t samples_size;
|
||||
int64_t samples_size;
|
||||
int samples_idx;
|
||||
} EightSvxContext;
|
||||
|
||||
|
@@ -183,6 +183,8 @@ static av_cold int che_configure(AACContext *ac,
|
||||
enum ChannelPosition che_pos[4][MAX_ELEM_ID],
|
||||
int type, int id, int *channels)
|
||||
{
|
||||
if (*channels >= MAX_CHANNELS)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (che_pos[type][id]) {
|
||||
if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -568,6 +570,11 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
output_scale_factor = 1.0;
|
||||
}
|
||||
|
||||
if (avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Too many channels\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
AAC_INIT_VLC_STATIC( 0, 304);
|
||||
AAC_INIT_VLC_STATIC( 1, 270);
|
||||
AAC_INIT_VLC_STATIC( 2, 550);
|
||||
@@ -1694,7 +1701,7 @@ static void apply_tns(float coef[1024], TemporalNoiseShaping *tns,
|
||||
int w, filt, m, i;
|
||||
int bottom, top, order, start, end, size, inc;
|
||||
float lpc[TNS_MAX_ORDER];
|
||||
float tmp[TNS_MAX_ORDER];
|
||||
float tmp[TNS_MAX_ORDER + 1];
|
||||
|
||||
for (w = 0; w < ics->num_windows; w++) {
|
||||
bottom = ics->num_swb;
|
||||
|
@@ -1183,14 +1183,15 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
|
||||
{
|
||||
int i, n;
|
||||
const float *sbr_qmf_window = div ? sbr_qmf_window_ds : sbr_qmf_window_us;
|
||||
const int step = 128 >> div;
|
||||
float *v;
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (*v_off < 128 >> div) {
|
||||
if (*v_off < step) {
|
||||
int saved_samples = (1280 - 128) >> div;
|
||||
memcpy(&v0[SBR_SYNTHESIS_BUF_SIZE - saved_samples], v0, saved_samples * sizeof(float));
|
||||
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - (128 >> div);
|
||||
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - step;
|
||||
} else {
|
||||
*v_off -= 128 >> div;
|
||||
*v_off -= step;
|
||||
}
|
||||
v = v0 + *v_off;
|
||||
if (div) {
|
||||
|
@@ -778,9 +778,13 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
|
||||
static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
{
|
||||
ADPCMContext *c = avctx->priv_data;
|
||||
unsigned int min_channels = 1;
|
||||
unsigned int max_channels = 2;
|
||||
|
||||
switch(avctx->codec->id) {
|
||||
case CODEC_ID_ADPCM_EA:
|
||||
min_channels = 2;
|
||||
break;
|
||||
case CODEC_ID_ADPCM_EA_R1:
|
||||
case CODEC_ID_ADPCM_EA_R2:
|
||||
case CODEC_ID_ADPCM_EA_R3:
|
||||
@@ -788,8 +792,10 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
max_channels = 6;
|
||||
break;
|
||||
}
|
||||
if(avctx->channels > max_channels){
|
||||
return -1;
|
||||
|
||||
if (avctx->channels < min_channels || avctx->channels > max_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
switch(avctx->codec->id) {
|
||||
|
@@ -664,10 +664,9 @@ static av_cold int alac_decode_init(AVCodecContext * avctx)
|
||||
alac->numchannels = alac->avctx->channels;
|
||||
|
||||
/* initialize from the extradata */
|
||||
if (alac->avctx->extradata_size != ALAC_EXTRADATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "alac: expected %d extradata bytes\n",
|
||||
ALAC_EXTRADATA_SIZE);
|
||||
return -1;
|
||||
if (alac->avctx->extradata_size < ALAC_EXTRADATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "alac: extradata is too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (alac_set_info(alac)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "alac: set_info failed\n");
|
||||
|
@@ -257,7 +257,7 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
|
||||
// generate warm-up samples
|
||||
residual[0] = samples[0];
|
||||
for(i=1;i<=lpc.lpc_order;i++)
|
||||
residual[i] = samples[i] - samples[i-1];
|
||||
residual[i] = sign_extend(samples[i] - samples[i-1], s->write_sample_size);
|
||||
|
||||
// perform lpc on remaining samples
|
||||
for(i = lpc.lpc_order + 1; i < s->avctx->frame_size; i++) {
|
||||
|
@@ -551,12 +551,15 @@ static void get_block_sizes(ALSDecContext *ctx, unsigned int *div_blocks,
|
||||
|
||||
/** Read the block data for a constant block
|
||||
*/
|
||||
static void read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
static int read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
{
|
||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||
AVCodecContext *avctx = ctx->avctx;
|
||||
GetBitContext *gb = &ctx->gb;
|
||||
|
||||
if (bd->block_length <= 0)
|
||||
return -1;
|
||||
|
||||
*bd->raw_samples = 0;
|
||||
*bd->const_block = get_bits1(gb); // 1 = constant value, 0 = zero block (silence)
|
||||
bd->js_blocks = get_bits1(gb);
|
||||
@@ -571,6 +574,8 @@ static void read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
|
||||
// ensure constant block decoding by reusing this field
|
||||
*bd->const_block = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -650,6 +655,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
for (k = 1; k < sub_blocks; k++)
|
||||
s[k] = s[k - 1] + decode_rice(gb, 0);
|
||||
}
|
||||
for (k = 1; k < sub_blocks; k++)
|
||||
if (s[k] > 32) {
|
||||
av_log(avctx, AV_LOG_ERROR, "k invalid for rice code.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (get_bits1(gb))
|
||||
*bd->shift_lsbs = get_bits(gb, 4) + 1;
|
||||
@@ -662,6 +672,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
|
||||
2, sconf->max_order + 1));
|
||||
*bd->opt_order = get_bits(gb, opt_order_length);
|
||||
if (*bd->opt_order > sconf->max_order) {
|
||||
*bd->opt_order = sconf->max_order;
|
||||
av_log(avctx, AV_LOG_ERROR, "Predictor order too large!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
*bd->opt_order = sconf->max_order;
|
||||
}
|
||||
@@ -694,6 +709,10 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int rice_param = parcor_rice_table[sconf->coef_table][k][1];
|
||||
int offset = parcor_rice_table[sconf->coef_table][k][0];
|
||||
quant_cof[k] = decode_rice(gb, rice_param) + offset;
|
||||
if (quant_cof[k] < -64 || quant_cof[k] > 63) {
|
||||
av_log(avctx, AV_LOG_ERROR, "quant_cof %d is out of range\n", quant_cof[k]);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
// read coefficients 20 to 126
|
||||
@@ -726,7 +745,7 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
bd->ltp_gain[0] = decode_rice(gb, 1) << 3;
|
||||
bd->ltp_gain[1] = decode_rice(gb, 2) << 3;
|
||||
|
||||
r = get_unary(gb, 0, 4);
|
||||
r = get_unary(gb, 0, 3);
|
||||
c = get_bits(gb, 2);
|
||||
bd->ltp_gain[2] = ltp_gain_values[r][c];
|
||||
|
||||
@@ -755,7 +774,6 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int delta[8];
|
||||
unsigned int k [8];
|
||||
unsigned int b = av_clip((av_ceil_log2(bd->block_length) - 3) >> 1, 0, 5);
|
||||
unsigned int i = start;
|
||||
|
||||
// read most significant bits
|
||||
unsigned int high;
|
||||
@@ -766,29 +784,30 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
|
||||
current_res = bd->raw_samples + start;
|
||||
|
||||
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
|
||||
for (sb = 0; sb < sub_blocks; sb++) {
|
||||
unsigned int sb_len = sb_length - (sb ? 0 : start);
|
||||
|
||||
k [sb] = s[sb] > b ? s[sb] - b : 0;
|
||||
delta[sb] = 5 - s[sb] + k[sb];
|
||||
|
||||
ff_bgmc_decode(gb, sb_length, current_res,
|
||||
ff_bgmc_decode(gb, sb_len, current_res,
|
||||
delta[sb], sx[sb], &high, &low, &value, ctx->bgmc_lut, ctx->bgmc_lut_status);
|
||||
|
||||
current_res += sb_length;
|
||||
current_res += sb_len;
|
||||
}
|
||||
|
||||
ff_bgmc_decode_end(gb);
|
||||
|
||||
|
||||
// read least significant bits and tails
|
||||
i = start;
|
||||
current_res = bd->raw_samples + start;
|
||||
|
||||
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
|
||||
for (sb = 0; sb < sub_blocks; sb++, start = 0) {
|
||||
unsigned int cur_tail_code = tail_code[sx[sb]][delta[sb]];
|
||||
unsigned int cur_k = k[sb];
|
||||
unsigned int cur_s = s[sb];
|
||||
|
||||
for (; i < sb_length; i++) {
|
||||
for (; start < sb_length; start++) {
|
||||
int32_t res = *current_res;
|
||||
|
||||
if (res == cur_tail_code) {
|
||||
@@ -956,7 +975,8 @@ static int read_block(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
if (read_var_block_data(ctx, bd))
|
||||
return -1;
|
||||
} else {
|
||||
read_const_block_data(ctx, bd);
|
||||
if (read_const_block_data(ctx, bd) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -366,7 +366,7 @@ int ff_ass_split_override_codes(const ASSCodesCallbacks *callbacks, void *priv,
|
||||
char new_line[2];
|
||||
int text_len = 0;
|
||||
|
||||
while (*buf) {
|
||||
while (buf && *buf) {
|
||||
if (text && callbacks->text &&
|
||||
(sscanf(buf, "\\%1[nN]", new_line) == 1 ||
|
||||
!strncmp(buf, "{\\", 2))) {
|
||||
|
@@ -179,8 +179,11 @@ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){
|
||||
uint32_t* obuf = (uint32_t*) out;
|
||||
|
||||
off = (intptr_t)inbuffer & 3;
|
||||
buf = (const uint32_t*) (inbuffer - off);
|
||||
c = av_be2ne32((0x537F6103 >> (off*8)) | (0x537F6103 << (32-(off*8))));
|
||||
buf = (const uint32_t *)(inbuffer - off);
|
||||
if (off)
|
||||
c = av_be2ne32((0x537F6103U >> (off * 8)) | (0x537F6103U << (32 - (off * 8))));
|
||||
else
|
||||
c = av_be2ne32(0x537F6103U);
|
||||
bytes += 3 + off;
|
||||
for (i = 0; i < bytes/4; i++)
|
||||
obuf[i] = c ^ buf[i];
|
||||
|
@@ -160,6 +160,7 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
|
||||
AvsContext *const avs = avctx->priv_data;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
avcodec_get_frame_defaults(&avs->picture);
|
||||
avcodec_set_dimensions(avctx, 318, 198);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -85,9 +85,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
frame_len_bits = 11;
|
||||
}
|
||||
|
||||
if (avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "too many channels: %d\n", avctx->channels);
|
||||
return -1;
|
||||
if (avctx->channels < 1 || avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n", avctx->channels);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (avctx->extradata && avctx->extradata_size > 0)
|
||||
|
@@ -219,9 +219,6 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
if(comp == BMP_RLE4 || comp == BMP_RLE8)
|
||||
memset(p->data[0], 0, avctx->height * p->linesize[0]);
|
||||
|
||||
if(depth == 4 || depth == 8)
|
||||
memset(p->data[1], 0, 1024);
|
||||
|
||||
if(height > 0){
|
||||
ptr = p->data[0] + (avctx->height - 1) * p->linesize[0];
|
||||
linesize = -p->linesize[0];
|
||||
@@ -232,6 +229,9 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(avctx->pix_fmt == PIX_FMT_PAL8){
|
||||
int colors = 1 << depth;
|
||||
|
||||
memset(p->data[1], 0, 1024);
|
||||
|
||||
if(ihsize >= 36){
|
||||
int t;
|
||||
buf = buf0 + 46;
|
||||
|
@@ -26,6 +26,10 @@
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
typedef struct {
|
||||
const uint8_t *buffer, *buffer_end;
|
||||
} GetByteContext;
|
||||
|
||||
#define DEF_T(type, name, bytes, read, write) \
|
||||
static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\
|
||||
(*b) += bytes;\
|
||||
@@ -34,6 +38,18 @@ static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\
|
||||
static av_always_inline void bytestream_put_ ##name(uint8_t **b, const type value){\
|
||||
write(*b, value);\
|
||||
(*b) += bytes;\
|
||||
}\
|
||||
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return bytestream_get_ ## name(&g->buffer);\
|
||||
}\
|
||||
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return read(g->buffer);\
|
||||
}
|
||||
|
||||
#define DEF(name, bytes, read, write) \
|
||||
@@ -55,6 +71,34 @@ DEF (byte, 1, AV_RB8 , AV_WB8 )
|
||||
#undef DEF64
|
||||
#undef DEF_T
|
||||
|
||||
static av_always_inline void bytestream2_init(GetByteContext *g,
|
||||
const uint8_t *buf, int buf_size)
|
||||
{
|
||||
g->buffer = buf;
|
||||
g->buffer_end = buf + buf_size;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
|
||||
{
|
||||
return g->buffer_end - g->buffer;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_skip(GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
g->buffer += FFMIN(g->buffer_end - g->buffer, size);
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2 = FFMIN(g->buffer_end - g->buffer, size);
|
||||
memcpy(dst, g->buffer, size2);
|
||||
g->buffer += size2;
|
||||
return size2;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
|
||||
{
|
||||
memcpy(dst, *b, size);
|
||||
|
@@ -609,12 +609,21 @@ static int decode_pic(AVSContext *h) {
|
||||
static int decode_seq_header(AVSContext *h) {
|
||||
MpegEncContext *s = &h->s;
|
||||
int frame_rate_code;
|
||||
int width, height;
|
||||
|
||||
h->profile = get_bits(&s->gb,8);
|
||||
h->level = get_bits(&s->gb,8);
|
||||
skip_bits1(&s->gb); //progressive sequence
|
||||
s->width = get_bits(&s->gb,14);
|
||||
s->height = get_bits(&s->gb,14);
|
||||
|
||||
width = get_bits(&s->gb, 14);
|
||||
height = get_bits(&s->gb, 14);
|
||||
if ((s->width || s->height) && (s->width != width || s->height != height)) {
|
||||
av_log_missing_feature(s, "Width/height changing in CAVS is", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
s->width = width;
|
||||
s->height = height;
|
||||
|
||||
skip_bits(&s->gb,2); //chroma format
|
||||
skip_bits(&s->gb,3); //sample_precision
|
||||
h->aspect_ratio = get_bits(&s->gb,4);
|
||||
|
@@ -280,6 +280,10 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (buf_size > CDG_HEADER_SIZE + CDG_DATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "buffer too big for decoder\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
ret = avctx->reget_buffer(avctx, &cc->frame);
|
||||
if (ret) {
|
||||
|
@@ -133,9 +133,8 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
out2 -= val * old_out2;
|
||||
out3 -= val * old_out3;
|
||||
|
||||
old_out3 = out[-5];
|
||||
|
||||
for (i = 5; i <= filter_length; i += 2) {
|
||||
old_out3 = out[-i];
|
||||
val = filter_coeffs[i-1];
|
||||
|
||||
out0 -= val * old_out3;
|
||||
@@ -154,7 +153,6 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
|
||||
FFSWAP(float, old_out0, old_out2);
|
||||
old_out1 = old_out3;
|
||||
old_out3 = out[-i-2];
|
||||
}
|
||||
|
||||
tmp0 = out0;
|
||||
|
@@ -1086,6 +1086,11 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
if (extradata_size >= 8){
|
||||
bytestream_get_be32(&edata_ptr); //Unknown unused
|
||||
q->subpacket[s].js_subband_start = bytestream_get_be16(&edata_ptr);
|
||||
if (q->subpacket[s].js_subband_start >= 51) {
|
||||
av_log(avctx, AV_LOG_ERROR, "js_subband_start %d is too large\n", q->subpacket[s].js_subband_start);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
q->subpacket[s].js_vlc_bits = bytestream_get_be16(&edata_ptr);
|
||||
extradata_size -= 8;
|
||||
}
|
||||
|
@@ -23,6 +23,8 @@
|
||||
#include "avcodec.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "bytestream.h"
|
||||
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/lzo.h" // for av_memcpy_backptr
|
||||
|
||||
typedef struct DfaContext {
|
||||
@@ -35,9 +37,13 @@ typedef struct DfaContext {
|
||||
static av_cold int dfa_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
DfaContext *s = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
|
||||
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
|
||||
return ret;
|
||||
|
||||
s->frame_buf = av_mallocz(avctx->width * avctx->height + AV_LZO_OUTPUT_PADDING);
|
||||
if (!s->frame_buf)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -153,8 +159,7 @@ static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
bitbuf = bytestream_get_le16(&src);
|
||||
mask = 1;
|
||||
}
|
||||
if (src_end - src < 2 || frame_end - frame < 2)
|
||||
return -1;
|
||||
|
||||
if (bitbuf & mask) {
|
||||
v = bytestream_get_le16(&src);
|
||||
offset = (v & 0x1FFF) << 2;
|
||||
@@ -168,8 +173,13 @@ static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
frame += 2;
|
||||
}
|
||||
} else if (bitbuf & (mask << 1)) {
|
||||
frame += bytestream_get_le16(&src) * 2;
|
||||
v = bytestream_get_le16(&src)*2;
|
||||
if (frame - frame_end < v)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += v;
|
||||
} else {
|
||||
if (frame_end - frame < width + 3)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame[0] = frame[1] =
|
||||
frame[width] = frame[width + 1] = *src++;
|
||||
frame += 2;
|
||||
@@ -231,6 +241,7 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
uint8_t *line_ptr;
|
||||
int count, i, v, lines, segments;
|
||||
int y = 0;
|
||||
|
||||
lines = bytestream_get_le16(&src);
|
||||
if (lines > height || src >= src_end)
|
||||
@@ -239,10 +250,12 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
while (lines--) {
|
||||
segments = bytestream_get_le16(&src);
|
||||
while ((segments & 0xC000) == 0xC000) {
|
||||
unsigned skip_lines = -(int16_t)segments;
|
||||
unsigned delta = -((int16_t)segments * width);
|
||||
if (frame_end - frame <= delta)
|
||||
if (frame_end - frame <= delta || y + lines + skip_lines > height)
|
||||
return -1;
|
||||
frame += delta;
|
||||
y += skip_lines;
|
||||
segments = bytestream_get_le16(&src);
|
||||
}
|
||||
if (segments & 0x8000) {
|
||||
@@ -250,7 +263,10 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
segments = bytestream_get_le16(&src);
|
||||
}
|
||||
line_ptr = frame;
|
||||
if (frame_end - frame < width)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += width;
|
||||
y++;
|
||||
while (segments--) {
|
||||
if (src_end - src < 2)
|
||||
return -1;
|
||||
|
@@ -169,6 +169,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx,
|
||||
int in, out = 0;
|
||||
int predictor[2];
|
||||
int channel_number = 0;
|
||||
int stereo = s->channels - 1;
|
||||
short *output_samples = data;
|
||||
int shift[2];
|
||||
unsigned char byte;
|
||||
@@ -177,6 +178,9 @@ static int dpcm_decode_frame(AVCodecContext *avctx,
|
||||
if (!buf_size)
|
||||
return 0;
|
||||
|
||||
if (stereo && (buf_size & 1))
|
||||
buf_size--;
|
||||
|
||||
// almost every DPCM variant expands one byte of data into two
|
||||
if(*data_size/2 < buf_size)
|
||||
return -1;
|
||||
@@ -295,7 +299,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
*data_size = out * sizeof(short);
|
||||
return buf_size;
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
#define DPCM_DECODER(id, name, long_name_) \
|
||||
|
@@ -1914,7 +1914,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
|
||||
|
||||
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
||||
long i;
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src+i);
|
||||
long b = *(long*)(dst+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
@@ -1939,7 +1939,7 @@ static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
|
||||
}
|
||||
}else
|
||||
#endif
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
||||
@@ -2836,7 +2836,7 @@ int ff_check_alignment(void){
|
||||
|
||||
av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
ff_check_alignment();
|
||||
|
||||
@@ -3222,11 +3222,15 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
|
||||
if (ARCH_SH4) dsputil_init_sh4 (c, avctx);
|
||||
if (ARCH_BFIN) dsputil_init_bfin (c, avctx);
|
||||
|
||||
for(i=0; i<64; i++){
|
||||
if(!c->put_2tap_qpel_pixels_tab[0][i])
|
||||
c->put_2tap_qpel_pixels_tab[0][i]= c->put_h264_qpel_pixels_tab[0][i];
|
||||
if(!c->avg_2tap_qpel_pixels_tab[0][i])
|
||||
c->avg_2tap_qpel_pixels_tab[0][i]= c->avg_h264_qpel_pixels_tab[0][i];
|
||||
for (i = 0; i < 4; i++) {
|
||||
for (j = 0; j < 16; j++) {
|
||||
if(!c->put_2tap_qpel_pixels_tab[i][j])
|
||||
c->put_2tap_qpel_pixels_tab[i][j] =
|
||||
c->put_h264_qpel_pixels_tab[i][j];
|
||||
if(!c->avg_2tap_qpel_pixels_tab[i][j])
|
||||
c->avg_2tap_qpel_pixels_tab[i][j] =
|
||||
c->avg_h264_qpel_pixels_tab[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
c->put_rv30_tpel_pixels_tab[0][0] = c->put_h264_qpel_pixels_tab[0][0];
|
||||
|
@@ -25,7 +25,14 @@
|
||||
|
||||
#define _WIN32_WINNT 0x0600
|
||||
#define COBJMACROS
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include "dxva2.h"
|
||||
#if HAVE_DXVA_H
|
||||
#include <dxva.h>
|
||||
#endif
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "mpegvideo.h"
|
||||
|
||||
|
@@ -249,7 +249,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
int chunk_type;
|
||||
int inter;
|
||||
|
||||
if (buf_size < 17) {
|
||||
if (buf_size < 26) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n");
|
||||
*data_size = 0;
|
||||
return -1;
|
||||
|
@@ -59,12 +59,15 @@ static av_cold int tqi_decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tqi_decode_mb(MpegEncContext *s, DCTELEM (*block)[64])
|
||||
static int tqi_decode_mb(MpegEncContext *s, DCTELEM (*block)[64])
|
||||
{
|
||||
int n;
|
||||
s->dsp.clear_blocks(block[0]);
|
||||
for (n=0; n<6; n++)
|
||||
ff_mpeg1_decode_block_intra(s, block[n], n);
|
||||
if (ff_mpeg1_decode_block_intra(s, block[n], n) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void tqi_idct_put(TqiContext *t, DCTELEM (*block)[64])
|
||||
@@ -136,7 +139,8 @@ static int tqi_decode_frame(AVCodecContext *avctx,
|
||||
for (s->mb_y=0; s->mb_y<(avctx->height+15)/16; s->mb_y++)
|
||||
for (s->mb_x=0; s->mb_x<(avctx->width+15)/16; s->mb_x++)
|
||||
{
|
||||
tqi_decode_mb(s, t->block);
|
||||
if (tqi_decode_mb(s, t->block) < 0)
|
||||
break;
|
||||
tqi_idct_put(t, t->block);
|
||||
}
|
||||
|
||||
|
@@ -522,7 +522,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w,
|
||||
int run_mode=0;
|
||||
|
||||
if(s->ac){
|
||||
if(c->bytestream_end - c->bytestream < w*20){
|
||||
if(c->bytestream_end - c->bytestream < w*35){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||
return -1;
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@ const int ff_flac_sample_rate_table[16] =
|
||||
8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000,
|
||||
0, 0, 0, 0 };
|
||||
|
||||
const int16_t ff_flac_blocksize_table[16] = {
|
||||
const int32_t ff_flac_blocksize_table[16] = {
|
||||
0, 192, 576<<0, 576<<1, 576<<2, 576<<3, 0, 0,
|
||||
256<<0, 256<<1, 256<<2, 256<<3, 256<<4, 256<<5, 256<<6, 256<<7
|
||||
};
|
||||
|
@@ -26,6 +26,6 @@
|
||||
|
||||
extern const int ff_flac_sample_rate_table[16];
|
||||
|
||||
extern const int16_t ff_flac_blocksize_table[16];
|
||||
extern const int32_t ff_flac_blocksize_table[16];
|
||||
|
||||
#endif /* AVCODEC_FLACDATA_H */
|
||||
|
@@ -937,14 +937,16 @@ static int encode_residual_ch(FlacEncodeContext *s, int ch)
|
||||
omethod == ORDER_METHOD_8LEVEL) {
|
||||
int levels = 1 << omethod;
|
||||
uint32_t bits[1 << ORDER_METHOD_8LEVEL];
|
||||
int order;
|
||||
int order = -1;
|
||||
int opt_index = levels-1;
|
||||
opt_order = max_order-1;
|
||||
bits[opt_index] = UINT32_MAX;
|
||||
for (i = levels-1; i >= 0; i--) {
|
||||
int last_order = order;
|
||||
order = min_order + (((max_order-min_order+1) * (i+1)) / levels)-1;
|
||||
if (order < 0)
|
||||
order = 0;
|
||||
order = av_clip(order, min_order - 1, max_order - 1);
|
||||
if (order == last_order)
|
||||
continue;
|
||||
encode_residual_lpc(res, smp, n, order+1, coefs[order], shift[order]);
|
||||
bits[i] = find_subframe_rice_params(s, sub, order+1);
|
||||
if (bits[i] < bits[opt_index]) {
|
||||
|
@@ -98,7 +98,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
|
||||
}
|
||||
}
|
||||
|
||||
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc;
|
||||
int16_t *dc_val;
|
||||
@@ -226,7 +226,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
}
|
||||
}
|
||||
|
||||
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc, scale, i;
|
||||
int16_t *dc_val, *ac_val, *ac_val1;
|
||||
@@ -313,8 +313,8 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
|
||||
}
|
||||
|
||||
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py)
|
||||
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py)
|
||||
{
|
||||
int wrap;
|
||||
int16_t *A, *B, *C, (*mot_val)[2];
|
||||
|
@@ -38,16 +38,16 @@
|
||||
extern const AVRational ff_h263_pixel_aspect[16];
|
||||
extern const uint8_t ff_h263_cbpy_tab[16][2];
|
||||
|
||||
extern const uint8_t cbpc_b_tab[4][2];
|
||||
extern const uint8_t ff_cbpc_b_tab[4][2];
|
||||
|
||||
extern const uint8_t mvtab[33][2];
|
||||
extern const uint8_t ff_mvtab[33][2];
|
||||
|
||||
extern const uint8_t ff_h263_intra_MCBPC_code[9];
|
||||
extern const uint8_t ff_h263_intra_MCBPC_bits[9];
|
||||
|
||||
extern const uint8_t ff_h263_inter_MCBPC_code[28];
|
||||
extern const uint8_t ff_h263_inter_MCBPC_bits[28];
|
||||
extern const uint8_t h263_mbtype_b_tab[15][2];
|
||||
extern const uint8_t ff_h263_mbtype_b_tab[15][2];
|
||||
|
||||
extern VLC ff_h263_intra_MCBPC_vlc;
|
||||
extern VLC ff_h263_inter_MCBPC_vlc;
|
||||
@@ -55,41 +55,41 @@ extern VLC ff_h263_cbpy_vlc;
|
||||
|
||||
extern RLTable ff_h263_rl_inter;
|
||||
|
||||
extern RLTable rl_intra_aic;
|
||||
extern RLTable ff_rl_intra_aic;
|
||||
|
||||
extern const uint16_t h263_format[8][2];
|
||||
extern const uint8_t modified_quant_tab[2][32];
|
||||
extern const uint16_t ff_h263_format[8][2];
|
||||
extern const uint8_t ff_modified_quant_tab[2][32];
|
||||
extern uint16_t ff_mba_max[6];
|
||||
extern uint8_t ff_mba_length[7];
|
||||
|
||||
extern uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
|
||||
|
||||
|
||||
int h263_decode_motion(MpegEncContext * s, int pred, int f_code);
|
||||
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code);
|
||||
av_const int ff_h263_aspect_to_info(AVRational aspect);
|
||||
int ff_h263_decode_init(AVCodecContext *avctx);
|
||||
int ff_h263_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
AVPacket *avpkt);
|
||||
int ff_h263_decode_end(AVCodecContext *avctx);
|
||||
void h263_encode_mb(MpegEncContext *s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y);
|
||||
void h263_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void h263_encode_gob_header(MpegEncContext * s, int mb_line);
|
||||
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py);
|
||||
void h263_encode_init(MpegEncContext *s);
|
||||
void h263_decode_init_vlc(MpegEncContext *s);
|
||||
int h263_decode_picture_header(MpegEncContext *s);
|
||||
void ff_h263_encode_mb(MpegEncContext *s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y);
|
||||
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line);
|
||||
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py);
|
||||
void ff_h263_encode_init(MpegEncContext *s);
|
||||
void ff_h263_decode_init_vlc(MpegEncContext *s);
|
||||
int ff_h263_decode_picture_header(MpegEncContext *s);
|
||||
int ff_h263_decode_gob_header(MpegEncContext *s);
|
||||
void ff_h263_update_motion_val(MpegEncContext * s);
|
||||
void ff_h263_loop_filter(MpegEncContext * s);
|
||||
int ff_h263_decode_mba(MpegEncContext *s);
|
||||
void ff_h263_encode_mba(MpegEncContext *s);
|
||||
void ff_init_qscale_tab(MpegEncContext *s);
|
||||
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
|
||||
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
|
||||
|
||||
|
||||
/**
|
||||
@@ -119,7 +119,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
|
||||
int l, bit_size, code;
|
||||
|
||||
if (val == 0) {
|
||||
return mvtab[0][1];
|
||||
return ff_mvtab[0][1];
|
||||
} else {
|
||||
bit_size = f_code - 1;
|
||||
/* modulo encoding */
|
||||
@@ -128,7 +128,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
|
||||
val--;
|
||||
code = (val >> bit_size) + 1;
|
||||
|
||||
return mvtab[code][1] + 1 + bit_size;
|
||||
return ff_mvtab[code][1] + 1 + bit_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -57,7 +57,7 @@ const uint8_t ff_h263_inter_MCBPC_bits[28] = {
|
||||
11, 13, 13, 13,/* inter4Q*/
|
||||
};
|
||||
|
||||
const uint8_t h263_mbtype_b_tab[15][2] = {
|
||||
const uint8_t ff_h263_mbtype_b_tab[15][2] = {
|
||||
{1, 1},
|
||||
{3, 3},
|
||||
{1, 5},
|
||||
@@ -75,7 +75,7 @@ const uint8_t h263_mbtype_b_tab[15][2] = {
|
||||
{1, 8},
|
||||
};
|
||||
|
||||
const uint8_t cbpc_b_tab[4][2] = {
|
||||
const uint8_t ff_cbpc_b_tab[4][2] = {
|
||||
{0, 1},
|
||||
{2, 2},
|
||||
{7, 3},
|
||||
@@ -88,7 +88,7 @@ const uint8_t ff_h263_cbpy_tab[16][2] =
|
||||
{2,5}, {3,6}, {5,4}, {10,4}, {4,4}, {8,4}, {6,4}, {3,2}
|
||||
};
|
||||
|
||||
const uint8_t mvtab[33][2] =
|
||||
const uint8_t ff_mvtab[33][2] =
|
||||
{
|
||||
{1,1}, {1,2}, {1,3}, {1,4}, {3,6}, {5,7}, {4,7}, {3,7},
|
||||
{11,9}, {10,9}, {9,9}, {17,10}, {16,10}, {15,10}, {14,10}, {13,10},
|
||||
@@ -98,7 +98,7 @@ const uint8_t mvtab[33][2] =
|
||||
};
|
||||
|
||||
/* third non intra table */
|
||||
const uint16_t inter_vlc[103][2] = {
|
||||
const uint16_t ff_inter_vlc[103][2] = {
|
||||
{ 0x2, 2 },{ 0xf, 4 },{ 0x15, 6 },{ 0x17, 7 },
|
||||
{ 0x1f, 8 },{ 0x25, 9 },{ 0x24, 9 },{ 0x21, 10 },
|
||||
{ 0x20, 10 },{ 0x7, 11 },{ 0x6, 11 },{ 0x20, 11 },
|
||||
@@ -127,7 +127,7 @@ const uint16_t inter_vlc[103][2] = {
|
||||
{ 0x5e, 12 },{ 0x5f, 12 },{ 0x3, 7 },
|
||||
};
|
||||
|
||||
const int8_t inter_level[102] = {
|
||||
const int8_t ff_inter_level[102] = {
|
||||
1, 2, 3, 4, 5, 6, 7, 8,
|
||||
9, 10, 11, 12, 1, 2, 3, 4,
|
||||
5, 6, 1, 2, 3, 4, 1, 2,
|
||||
@@ -143,7 +143,7 @@ const int8_t inter_level[102] = {
|
||||
1, 1, 1, 1, 1, 1,
|
||||
};
|
||||
|
||||
const int8_t inter_run[102] = {
|
||||
const int8_t ff_inter_run[102] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 1, 1, 1, 1,
|
||||
1, 1, 2, 2, 2, 2, 3, 3,
|
||||
@@ -162,9 +162,9 @@ const int8_t inter_run[102] = {
|
||||
RLTable ff_h263_rl_inter = {
|
||||
102,
|
||||
58,
|
||||
inter_vlc,
|
||||
inter_run,
|
||||
inter_level,
|
||||
ff_inter_vlc,
|
||||
ff_inter_run,
|
||||
ff_inter_level,
|
||||
};
|
||||
|
||||
static const uint16_t intra_vlc_aic[103][2] = {
|
||||
@@ -228,7 +228,7 @@ static const int8_t intra_level_aic[102] = {
|
||||
1, 1, 1, 1, 1, 1,
|
||||
};
|
||||
|
||||
RLTable rl_intra_aic = {
|
||||
RLTable ff_rl_intra_aic = {
|
||||
102,
|
||||
58,
|
||||
intra_vlc_aic,
|
||||
@@ -236,7 +236,7 @@ RLTable rl_intra_aic = {
|
||||
intra_level_aic,
|
||||
};
|
||||
|
||||
const uint16_t h263_format[8][2] = {
|
||||
const uint16_t ff_h263_format[8][2] = {
|
||||
{ 0, 0 },
|
||||
{ 128, 96 },
|
||||
{ 176, 144 },
|
||||
@@ -250,7 +250,7 @@ const uint8_t ff_aic_dc_scale_table[32]={
|
||||
0, 2, 4, 6, 8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62
|
||||
};
|
||||
|
||||
const uint8_t modified_quant_tab[2][32]={
|
||||
const uint8_t ff_modified_quant_tab[2][32]={
|
||||
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
{
|
||||
0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,10,11,12,13,14,15,16,17,18,18,19,20,21,22,23,24,25,26,27,28
|
||||
|
@@ -111,7 +111,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
h263_decode_init_vlc(s);
|
||||
ff_h263_decode_init_vlc(s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -429,7 +429,7 @@ retry:
|
||||
} else if (CONFIG_FLV_DECODER && s->h263_flv) {
|
||||
ret = ff_flv_decode_picture_header(s);
|
||||
} else {
|
||||
ret = h263_decode_picture_header(s);
|
||||
ret = ff_h263_decode_picture_header(s);
|
||||
}
|
||||
|
||||
if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size);
|
||||
@@ -438,6 +438,13 @@ retry:
|
||||
if (ret < 0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
|
||||
return -1;
|
||||
} else if ((s->width != avctx->coded_width ||
|
||||
s->height != avctx->coded_height ||
|
||||
(s->width + 15) >> 4 != s->mb_width ||
|
||||
(s->height + 15) >> 4 != s->mb_height) &&
|
||||
(HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))) {
|
||||
av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
|
||||
return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
|
||||
}
|
||||
|
||||
avctx->has_b_frames= !s->low_delay;
|
||||
|
@@ -2617,12 +2617,18 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
else
|
||||
s->height= 16*s->mb_height - (4>>CHROMA444)*FFMIN(h->sps.crop_bottom, (8<<CHROMA444)-1);
|
||||
|
||||
if (FFALIGN(s->avctx->width, 16) == s->width &&
|
||||
FFALIGN(s->avctx->height, 16) == s->height) {
|
||||
s->width = s->avctx->width;
|
||||
s->height = s->avctx->height;
|
||||
}
|
||||
|
||||
if (s->context_initialized
|
||||
&& ( s->width != s->avctx->width || s->height != s->avctx->height
|
||||
|| av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) {
|
||||
if(h != h0) {
|
||||
if(h != h0 || (HAVE_THREADS && h->s.avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log_missing_feature(s->avctx, "Width/height changing with threads is", 0);
|
||||
return -1; // width / height changed during parallelized decoding
|
||||
return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
|
||||
}
|
||||
free_tables(h, 0);
|
||||
flush_dpb(s->avctx);
|
||||
@@ -2895,8 +2901,13 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
|
||||
if(num_ref_idx_active_override_flag){
|
||||
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
|
||||
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
|
||||
if (h->ref_count[0] < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
|
||||
if (h->ref_count[1] < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
if (h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
|
||||
@@ -3545,7 +3556,9 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
|
||||
|
||||
return 0;
|
||||
}else{
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y,
|
||||
s->mb_x - 1, s->mb_y,
|
||||
(AC_END|DC_END|MV_END)&part_mask);
|
||||
|
||||
return -1;
|
||||
}
|
||||
@@ -3707,7 +3720,11 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
break;
|
||||
}
|
||||
|
||||
if(buf_index+3 >= buf_size) break;
|
||||
|
||||
if (buf_index + 3 >= buf_size) {
|
||||
buf_index = buf_size;
|
||||
break;
|
||||
}
|
||||
|
||||
buf_index+=3;
|
||||
if(buf_index >= next_avc) continue;
|
||||
@@ -3833,6 +3850,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
hx->inter_gb_ptr= &hx->inter_gb;
|
||||
|
||||
if(hx->redundant_pic_count==0 && hx->intra_gb_ptr && hx->s.data_partitioning
|
||||
&& s->current_picture_ptr
|
||||
&& s->context_initialized
|
||||
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
|
||||
@@ -3848,13 +3866,26 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
init_get_bits(&s->gb, ptr, bit_length);
|
||||
ff_h264_decode_seq_parameter_set(h);
|
||||
|
||||
if (s->flags& CODEC_FLAG_LOW_DELAY ||
|
||||
(h->sps.bitstream_restriction_flag && !h->sps.num_reorder_frames))
|
||||
s->low_delay=1;
|
||||
if (s->flags & CODEC_FLAG_LOW_DELAY ||
|
||||
(h->sps.bitstream_restriction_flag &&
|
||||
!h->sps.num_reorder_frames)) {
|
||||
if (s->avctx->has_b_frames > 1 || h->delayed_pic[0])
|
||||
av_log(avctx, AV_LOG_WARNING, "Delayed frames seen "
|
||||
"reenabling low delay requires a codec "
|
||||
"flush.\n");
|
||||
else
|
||||
s->low_delay = 1;
|
||||
}
|
||||
|
||||
if(avctx->has_b_frames < 2)
|
||||
avctx->has_b_frames= !s->low_delay;
|
||||
|
||||
if (h->sps.bit_depth_luma != h->sps.bit_depth_chroma) {
|
||||
av_log_missing_feature(s->avctx,
|
||||
"Different bit depth between chroma and luma", 1);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
if (avctx->bits_per_raw_sample != h->sps.bit_depth_luma) {
|
||||
if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
|
||||
avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
|
||||
|
@@ -621,7 +621,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
|
||||
down the code */
|
||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
||||
if(s->mb_skip_run==-1)
|
||||
s->mb_skip_run= get_ue_golomb(&s->gb);
|
||||
s->mb_skip_run= get_ue_golomb_long(&s->gb);
|
||||
|
||||
if (s->mb_skip_run--) {
|
||||
if(FRAME_MBAFF && (s->mb_y&1) == 0){
|
||||
|
@@ -37,6 +37,9 @@
|
||||
//#undef NDEBUG
|
||||
#include <assert.h>
|
||||
|
||||
#define MAX_LOG2_MAX_FRAME_NUM (12 + 4)
|
||||
#define MIN_LOG2_MAX_FRAME_NUM 4
|
||||
|
||||
static const AVRational pixel_aspect[17]={
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
@@ -311,7 +314,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
MpegEncContext * const s = &h->s;
|
||||
int profile_idc, level_idc, constraint_set_flags = 0;
|
||||
unsigned int sps_id;
|
||||
int i;
|
||||
int i, log2_max_frame_num_minus4;
|
||||
SPS *sps;
|
||||
|
||||
profile_idc= get_bits(&s->gb, 8);
|
||||
@@ -340,14 +343,18 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8));
|
||||
sps->scaling_matrix_present = 0;
|
||||
|
||||
if(sps->profile_idc >= 100){ //high profile
|
||||
if (sps->profile_idc == 100 || sps->profile_idc == 110 ||
|
||||
sps->profile_idc == 122 || sps->profile_idc == 244 ||
|
||||
sps->profile_idc == 44 || sps->profile_idc == 83 ||
|
||||
sps->profile_idc == 86 || sps->profile_idc == 118 ||
|
||||
sps->profile_idc == 128 || sps->profile_idc == 144) {
|
||||
sps->chroma_format_idc= get_ue_golomb_31(&s->gb);
|
||||
if (sps->chroma_format_idc > 3U) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "chroma_format_idc %d is illegal\n", sps->chroma_format_idc);
|
||||
goto fail;
|
||||
}
|
||||
if(sps->chroma_format_idc == 3)
|
||||
} else if(sps->chroma_format_idc == 3) {
|
||||
sps->residual_color_transform_flag = get_bits1(&s->gb);
|
||||
}
|
||||
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
||||
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
||||
if (sps->bit_depth_luma > 12U || sps->bit_depth_chroma > 12U) {
|
||||
@@ -363,7 +370,16 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
sps->bit_depth_chroma = 8;
|
||||
}
|
||||
|
||||
sps->log2_max_frame_num= get_ue_golomb(&s->gb) + 4;
|
||||
log2_max_frame_num_minus4 = get_ue_golomb(&s->gb);
|
||||
if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 ||
|
||||
log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"log2_max_frame_num_minus4 out of range (0-12): %d\n",
|
||||
log2_max_frame_num_minus4);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4;
|
||||
|
||||
sps->poc_type= get_ue_golomb_31(&s->gb);
|
||||
|
||||
if(sps->poc_type == 0){ //FIXME #define
|
||||
@@ -490,6 +506,9 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){
|
||||
if(pps_id >= MAX_PPS_COUNT) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "pps_id (%d) out of range\n", pps_id);
|
||||
return -1;
|
||||
} else if (h->sps.bit_depth_luma > 10) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Unimplemented luma bit depth=%d (max=10)\n", h->sps.bit_depth_luma);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
pps= av_mallocz(sizeof(PPS));
|
||||
|
@@ -28,6 +28,7 @@
|
||||
* huffyuv codec for libavcodec.
|
||||
*/
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "avcodec.h"
|
||||
#include "get_bits.h"
|
||||
#include "put_bits.h"
|
||||
@@ -283,12 +284,13 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
for(i=y=0; y<256; y++){
|
||||
int len0 = s->len[0][y];
|
||||
int limit = VLC_BITS - len0;
|
||||
if(limit <= 0)
|
||||
if(limit <= 0 || !len0)
|
||||
continue;
|
||||
for(u=0; u<256; u++){
|
||||
int len1 = s->len[p][u];
|
||||
if(len1 > limit)
|
||||
if (len1 > limit || !len1)
|
||||
continue;
|
||||
av_assert0(i < (1 << VLC_BITS));
|
||||
len[i] = len0 + len1;
|
||||
bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
|
||||
symbols[i] = (y<<8) + u;
|
||||
@@ -310,18 +312,19 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
for(i=0, g=-16; g<16; g++){
|
||||
int len0 = s->len[p0][g&255];
|
||||
int limit0 = VLC_BITS - len0;
|
||||
if(limit0 < 2)
|
||||
if (limit0 < 2 || !len0)
|
||||
continue;
|
||||
for(b=-16; b<16; b++){
|
||||
int len1 = s->len[p1][b&255];
|
||||
int limit1 = limit0 - len1;
|
||||
if(limit1 < 1)
|
||||
if (limit1 < 1 || !len1)
|
||||
continue;
|
||||
code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
|
||||
for(r=-16; r<16; r++){
|
||||
int len2 = s->len[2][r&255];
|
||||
if(len2 > limit1)
|
||||
if (len2 > limit1 || !len2)
|
||||
continue;
|
||||
av_assert0(i < (1 << VLC_BITS));
|
||||
len[i] = len0 + len1 + len2;
|
||||
bits[i] = (code << len2) + s->bits[2][r&255];
|
||||
if(s->decorrelate){
|
||||
@@ -345,6 +348,7 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_get_bits(&gb, src, length*8);
|
||||
|
||||
@@ -355,7 +359,8 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
|
||||
return -1;
|
||||
}
|
||||
free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
generate_joint_tables(s);
|
||||
@@ -367,6 +372,7 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
#if 1
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
|
||||
if(read_len_table(s->len[0], &gb)<0)
|
||||
@@ -387,7 +393,8 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
|
||||
for(i=0; i<3; i++){
|
||||
free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
generate_joint_tables(s);
|
||||
|
@@ -176,7 +176,13 @@ static int extract_header(AVCodecContext *const avctx,
|
||||
const uint8_t *buf;
|
||||
unsigned buf_size;
|
||||
IffContext *s = avctx->priv_data;
|
||||
int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
int palette_size;
|
||||
|
||||
if (avctx->extradata_size < 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
|
||||
if (avpkt) {
|
||||
int image_size;
|
||||
@@ -192,8 +198,6 @@ static int extract_header(AVCodecContext *const avctx,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
if (avctx->extradata_size < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
buf = avctx->extradata;
|
||||
buf_size = bytestream_get_be16(&buf);
|
||||
if (buf_size <= 1 || palette_size < 0) {
|
||||
@@ -281,7 +285,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
int err;
|
||||
|
||||
if (avctx->bits_per_coded_sample <= 8) {
|
||||
int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
int palette_size;
|
||||
|
||||
if (avctx->extradata_size >= 2)
|
||||
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
else
|
||||
palette_size = 0;
|
||||
avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
|
||||
(avctx->extradata_size >= 2 && palette_size) ? PIX_FMT_PAL8 : PIX_FMT_GRAY8;
|
||||
} else if (avctx->bits_per_coded_sample <= 32) {
|
||||
|
@@ -625,7 +625,8 @@ static enum PixelFormat avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask,
|
||||
/* find exact color match with smallest size */
|
||||
dst_pix_fmt = PIX_FMT_NONE;
|
||||
min_dist = 0x7fffffff;
|
||||
for(i = 0;i < PIX_FMT_NB; i++) {
|
||||
/* test only the first 64 pixel formats to avoid undefined behaviour */
|
||||
for (i = 0; i < 64; i++) {
|
||||
if (pix_fmt_mask & (1ULL << i)) {
|
||||
loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
|
||||
if (loss == 0) {
|
||||
|
@@ -76,6 +76,8 @@ typedef struct {
|
||||
int is_scalable;
|
||||
uint32_t lock_word;
|
||||
IVIPicConfig pic_conf;
|
||||
|
||||
int gop_invalid;
|
||||
} IVI5DecContext;
|
||||
|
||||
|
||||
@@ -219,6 +221,10 @@ static int decode_gop_header(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
if (band->blk_size == 8) {
|
||||
if(quant_mat >= 5){
|
||||
av_log(avctx, AV_LOG_ERROR, "quant_mat %d too large!\n", quant_mat);
|
||||
return -1;
|
||||
}
|
||||
band->intra_base = &ivi5_base_quant_8x8_intra[quant_mat][0];
|
||||
band->inter_base = &ivi5_base_quant_8x8_inter[quant_mat][0];
|
||||
band->intra_scale = &ivi5_scale_quant_8x8_intra[quant_mat][0];
|
||||
@@ -335,8 +341,12 @@ static int decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
ctx->frame_num = get_bits(&ctx->gb, 8);
|
||||
|
||||
if (ctx->frame_type == FRAMETYPE_INTRA) {
|
||||
if (decode_gop_header(ctx, avctx))
|
||||
return -1;
|
||||
ctx->gop_invalid = 1;
|
||||
if (decode_gop_header(ctx, avctx)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid GOP header, skipping frames.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
ctx->gop_invalid = 0;
|
||||
}
|
||||
|
||||
if (ctx->frame_type != FRAMETYPE_NULL) {
|
||||
@@ -453,6 +463,16 @@ static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
ref_mb = tile->ref_mbs;
|
||||
offs = tile->ypos * band->pitch + tile->xpos;
|
||||
|
||||
if (!ref_mb &&
|
||||
((band->qdelta_present && band->inherit_qdelta) || band->inherit_mv))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches parameters %d\n",
|
||||
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* scale factor for motion vectors */
|
||||
mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3);
|
||||
mv_x = mv_y = 0;
|
||||
@@ -603,8 +623,10 @@ static int decode_band(IVI5DecContext *ctx, int plane_num,
|
||||
|
||||
tile->is_empty = get_bits1(&ctx->gb);
|
||||
if (tile->is_empty) {
|
||||
ff_ivi_process_empty_tile(avctx, band, tile,
|
||||
result = ff_ivi_process_empty_tile(avctx, band, tile,
|
||||
(ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3));
|
||||
if (result < 0)
|
||||
break;
|
||||
} else {
|
||||
tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb);
|
||||
|
||||
@@ -751,6 +773,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
"Error while decoding picture header: %d\n", result);
|
||||
return -1;
|
||||
}
|
||||
if (ctx->gop_invalid)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (ctx->gop_flags & IVI5_IS_PROTECTED) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Password-protected clip!\n");
|
||||
@@ -780,6 +804,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
ctx->frame.reference = 0;
|
||||
avcodec_set_dimensions(avctx, ctx->planes[0].width, ctx->planes[0].height);
|
||||
if (avctx->get_buffer(avctx, &ctx->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
|
@@ -65,8 +65,8 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
s->pb_frame = get_bits1(&s->gb);
|
||||
|
||||
if (format < 6) {
|
||||
s->width = h263_format[format][0];
|
||||
s->height = h263_format[format][1];
|
||||
s->width = ff_h263_format[format][0];
|
||||
s->height = ff_h263_format[format][1];
|
||||
s->avctx->sample_aspect_ratio.num = 12;
|
||||
s->avctx->sample_aspect_ratio.den = 11;
|
||||
} else {
|
||||
@@ -77,7 +77,7 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
}
|
||||
if(get_bits(&s->gb, 2))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
|
||||
s->loop_filter = get_bits1(&s->gb);
|
||||
s->loop_filter = get_bits1(&s->gb) * !s->avctx->lowres;
|
||||
if(get_bits1(&s->gb))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
|
||||
if(get_bits1(&s->gb))
|
||||
|
@@ -100,7 +100,7 @@ static VLC cbpc_b_vlc;
|
||||
/* init vlcs */
|
||||
|
||||
/* XXX: find a better solution to handle static init */
|
||||
void h263_decode_init_vlc(MpegEncContext *s)
|
||||
void ff_h263_decode_init_vlc(MpegEncContext *s)
|
||||
{
|
||||
static int done = 0;
|
||||
|
||||
@@ -117,18 +117,18 @@ void h263_decode_init_vlc(MpegEncContext *s)
|
||||
&ff_h263_cbpy_tab[0][1], 2, 1,
|
||||
&ff_h263_cbpy_tab[0][0], 2, 1, 64);
|
||||
INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 538);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 538);
|
||||
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
INIT_VLC_RL(ff_h263_rl_inter, 554);
|
||||
INIT_VLC_RL(rl_intra_aic, 554);
|
||||
INIT_VLC_RL(ff_rl_intra_aic, 554);
|
||||
INIT_VLC_STATIC(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15,
|
||||
&h263_mbtype_b_tab[0][1], 2, 1,
|
||||
&h263_mbtype_b_tab[0][0], 2, 1, 80);
|
||||
&ff_h263_mbtype_b_tab[0][1], 2, 1,
|
||||
&ff_h263_mbtype_b_tab[0][0], 2, 1, 80);
|
||||
INIT_VLC_STATIC(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4,
|
||||
&cbpc_b_tab[0][1], 2, 1,
|
||||
&cbpc_b_tab[0][0], 2, 1, 8);
|
||||
&ff_cbpc_b_tab[0][1], 2, 1,
|
||||
&ff_cbpc_b_tab[0][0], 2, 1, 8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -268,7 +268,7 @@ int ff_h263_resync(MpegEncContext *s){
|
||||
return -1;
|
||||
}
|
||||
|
||||
int h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
{
|
||||
int code, val, sign, shift, l;
|
||||
code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
|
||||
@@ -379,16 +379,16 @@ static void preview_obmc(MpegEncContext *s){
|
||||
if ((cbpc & 16) == 0) {
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
|
||||
mot_val[0 ]= mot_val[2 ]=
|
||||
mot_val[0+stride]= mot_val[2+stride]= mx;
|
||||
@@ -397,16 +397,16 @@ static void preview_obmc(MpegEncContext *s){
|
||||
} else {
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
|
||||
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
|
||||
mot_val[0] = mx;
|
||||
@@ -430,7 +430,7 @@ static void h263_decode_dquant(MpegEncContext *s){
|
||||
|
||||
if(s->modified_quant){
|
||||
if(get_bits1(&s->gb))
|
||||
s->qscale= modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
|
||||
s->qscale= ff_modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
|
||||
else
|
||||
s->qscale= get_bits(&s->gb, 5);
|
||||
}else
|
||||
@@ -448,7 +448,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
|
||||
|
||||
scan_table = s->intra_scantable.permutated;
|
||||
if (s->h263_aic && s->mb_intra) {
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
i = 0;
|
||||
if (s->ac_pred) {
|
||||
if (s->h263_aic_dir)
|
||||
@@ -537,7 +537,7 @@ retry:
|
||||
if (i >= 64){
|
||||
if(s->alt_inter_vlc && rl == &ff_h263_rl_inter && !s->mb_intra){
|
||||
//Looks like a hack but no, it's the way it is supposed to work ...
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
i = 0;
|
||||
s->gb= gb;
|
||||
s->dsp.clear_block(block);
|
||||
@@ -554,7 +554,7 @@ retry:
|
||||
}
|
||||
not_coded:
|
||||
if (s->mb_intra && s->h263_aic) {
|
||||
h263_pred_acdc(s, block, n);
|
||||
ff_h263_pred_acdc(s, block, n);
|
||||
i = 63;
|
||||
}
|
||||
s->block_last_index[n] = i;
|
||||
@@ -653,11 +653,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
@@ -665,7 +665,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
@@ -678,18 +678,18 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->mv[0][i][0] = mx;
|
||||
@@ -761,11 +761,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
//FIXME UMV
|
||||
|
||||
if(USES_LIST(mb_type, 0)){
|
||||
int16_t *mot_val= h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
mx = h263_decode_motion(s, mx, 1);
|
||||
my = h263_decode_motion(s, my, 1);
|
||||
mx = ff_h263_decode_motion(s, mx, 1);
|
||||
my = ff_h263_decode_motion(s, my, 1);
|
||||
|
||||
s->mv[0][0][0] = mx;
|
||||
s->mv[0][0][1] = my;
|
||||
@@ -774,11 +774,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
}
|
||||
|
||||
if(USES_LIST(mb_type, 1)){
|
||||
int16_t *mot_val= h263_pred_motion(s, 0, 1, &mx, &my);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, 0, 1, &mx, &my);
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
mx = h263_decode_motion(s, mx, 1);
|
||||
my = h263_decode_motion(s, my, 1);
|
||||
mx = ff_h263_decode_motion(s, mx, 1);
|
||||
my = ff_h263_decode_motion(s, my, 1);
|
||||
|
||||
s->mv[1][0][0] = mx;
|
||||
s->mv[1][0][1] = my;
|
||||
@@ -829,8 +829,8 @@ intra:
|
||||
}
|
||||
|
||||
while(pb_mv_count--){
|
||||
h263_decode_motion(s, 0, 1);
|
||||
h263_decode_motion(s, 0, 1);
|
||||
ff_h263_decode_motion(s, 0, 1);
|
||||
ff_h263_decode_motion(s, 0, 1);
|
||||
}
|
||||
|
||||
/* decode each block */
|
||||
@@ -864,7 +864,7 @@ end:
|
||||
}
|
||||
|
||||
/* most is hardcoded. should extend to handle all h263 streams */
|
||||
int h263_decode_picture_header(MpegEncContext *s)
|
||||
int ff_h263_decode_picture_header(MpegEncContext *s)
|
||||
{
|
||||
int format, width, height, i;
|
||||
uint32_t startcode;
|
||||
@@ -916,8 +916,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
if (format != 7 && format != 6) {
|
||||
s->h263_plus = 0;
|
||||
/* H.263v1 */
|
||||
width = h263_format[format][0];
|
||||
height = h263_format[format][1];
|
||||
width = ff_h263_format[format][0];
|
||||
height = ff_h263_format[format][1];
|
||||
if (!width)
|
||||
return -1;
|
||||
|
||||
@@ -961,6 +961,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
s->h263_aic = get_bits1(&s->gb); /* Advanced Intra Coding (AIC) */
|
||||
s->loop_filter= get_bits1(&s->gb);
|
||||
s->unrestricted_mv = s->umvplus || s->obmc || s->loop_filter;
|
||||
if(s->avctx->lowres)
|
||||
s->loop_filter = 0;
|
||||
|
||||
s->h263_slice_structured= get_bits1(&s->gb);
|
||||
if (get_bits1(&s->gb) != 0) {
|
||||
@@ -1024,8 +1026,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
s->avctx->sample_aspect_ratio= ff_h263_pixel_aspect[s->aspect_ratio_info];
|
||||
}
|
||||
} else {
|
||||
width = h263_format[format][0];
|
||||
height = h263_format[format][1];
|
||||
width = ff_h263_format[format][0];
|
||||
height = ff_h263_format[format][1];
|
||||
s->avctx->sample_aspect_ratio= (AVRational){12,11};
|
||||
}
|
||||
if ((width == 0) || (height == 0))
|
||||
|
@@ -102,7 +102,7 @@ av_const int ff_h263_aspect_to_info(AVRational aspect){
|
||||
return FF_ASPECT_EXTENDED;
|
||||
}
|
||||
|
||||
void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
{
|
||||
int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
|
||||
int best_clock_code=1;
|
||||
@@ -141,7 +141,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
put_bits(&s->pb, 1, 0); /* camera off */
|
||||
put_bits(&s->pb, 1, 0); /* freeze picture release off */
|
||||
|
||||
format = ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height);
|
||||
format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height);
|
||||
if (!s->h263_plus) {
|
||||
/* H.263v1 */
|
||||
put_bits(&s->pb, 3, format);
|
||||
@@ -247,7 +247,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
/**
|
||||
* Encode a group of blocks header.
|
||||
*/
|
||||
void h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
{
|
||||
put_bits(&s->pb, 17, 1); /* GBSC */
|
||||
|
||||
@@ -333,7 +333,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
} else {
|
||||
i = 0;
|
||||
if (s->h263_aic && s->mb_intra)
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
|
||||
if(s->alt_inter_vlc && !s->mb_intra){
|
||||
int aic_vlc_bits=0;
|
||||
@@ -353,14 +353,14 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
if(level<0) level= -level;
|
||||
|
||||
code = get_rl_index(rl, last, run, level);
|
||||
aic_code = get_rl_index(&rl_intra_aic, last, run, level);
|
||||
aic_code = get_rl_index(&ff_rl_intra_aic, last, run, level);
|
||||
inter_vlc_bits += rl->table_vlc[code][1]+1;
|
||||
aic_vlc_bits += rl_intra_aic.table_vlc[aic_code][1]+1;
|
||||
aic_vlc_bits += ff_rl_intra_aic.table_vlc[aic_code][1]+1;
|
||||
|
||||
if (code == rl->n) {
|
||||
inter_vlc_bits += 1+6+8-1;
|
||||
}
|
||||
if (aic_code == rl_intra_aic.n) {
|
||||
if (aic_code == ff_rl_intra_aic.n) {
|
||||
aic_vlc_bits += 1+6+8-1;
|
||||
wrong_pos += run + 1;
|
||||
}else
|
||||
@@ -370,7 +370,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
}
|
||||
i = 0;
|
||||
if(aic_vlc_bits < inter_vlc_bits && wrong_pos > 63)
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -454,9 +454,9 @@ static void h263p_encode_umotion(MpegEncContext * s, int val)
|
||||
}
|
||||
}
|
||||
|
||||
void h263_encode_mb(MpegEncContext * s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y)
|
||||
void ff_h263_encode_mb(MpegEncContext * s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y)
|
||||
{
|
||||
int cbpc, cbpy, i, cbp, pred_x, pred_y;
|
||||
int16_t pred_dc;
|
||||
@@ -500,7 +500,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x16 mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
if (!s->umvplus) {
|
||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||
@@ -527,7 +527,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
|
||||
for(i=0; i<4; i++){
|
||||
/* motion vectors: 8x8 mode*/
|
||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0];
|
||||
motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1];
|
||||
@@ -561,7 +561,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
if(i<4) scale= s->y_dc_scale;
|
||||
else scale= s->c_dc_scale;
|
||||
|
||||
pred_dc = h263_pred_dc(s, i, &dc_ptr[i]);
|
||||
pred_dc = ff_h263_pred_dc(s, i, &dc_ptr[i]);
|
||||
level -= pred_dc;
|
||||
/* Quant */
|
||||
if (level >= 0)
|
||||
@@ -662,7 +662,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
|
||||
if (val == 0) {
|
||||
/* zero vector */
|
||||
code = 0;
|
||||
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
|
||||
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
|
||||
} else {
|
||||
bit_size = f_code - 1;
|
||||
range = 1 << bit_size;
|
||||
@@ -677,7 +677,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
|
||||
code = (val >> bit_size) + 1;
|
||||
bits = val & (range - 1);
|
||||
|
||||
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
|
||||
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
|
||||
if (bit_size > 0) {
|
||||
put_bits(&s->pb, bit_size, bits);
|
||||
}
|
||||
@@ -693,7 +693,7 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
for(mv=-MAX_MV; mv<=MAX_MV; mv++){
|
||||
int len;
|
||||
|
||||
if(mv==0) len= mvtab[0][1];
|
||||
if(mv==0) len= ff_mvtab[0][1];
|
||||
else{
|
||||
int val, bit_size, code;
|
||||
|
||||
@@ -705,9 +705,9 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
val--;
|
||||
code = (val >> bit_size) + 1;
|
||||
if(code<33){
|
||||
len= mvtab[code][1] + 1 + bit_size;
|
||||
len= ff_mvtab[code][1] + 1 + bit_size;
|
||||
}else{
|
||||
len= mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
|
||||
len= ff_mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -769,7 +769,7 @@ static void init_uni_h263_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_t
|
||||
}
|
||||
}
|
||||
|
||||
void h263_encode_init(MpegEncContext *s)
|
||||
void ff_h263_encode_init(MpegEncContext *s)
|
||||
{
|
||||
static int done = 0;
|
||||
|
||||
@@ -777,9 +777,9 @@ void h263_encode_init(MpegEncContext *s)
|
||||
done = 1;
|
||||
|
||||
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
|
||||
init_uni_h263_rl_tab(&rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
|
||||
init_uni_h263_rl_tab(&ff_rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
|
||||
init_uni_h263_rl_tab(&ff_h263_rl_inter , NULL, uni_h263_inter_rl_len);
|
||||
|
||||
init_mv_penalty_and_fcode(s);
|
||||
|
@@ -123,6 +123,10 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
|
||||
if (huff_tab->tab_sel == 7) {
|
||||
/* custom huffman table (explicitly encoded) */
|
||||
new_huff.num_rows = get_bits(gb, 4);
|
||||
if (!new_huff.num_rows) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty custom Huffman table!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i = 0; i < new_huff.num_rows; i++)
|
||||
new_huff.xbits[i] = get_bits(gb, 4);
|
||||
@@ -136,9 +140,10 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
|
||||
result = ff_ivi_create_huff_from_desc(&huff_tab->cust_desc,
|
||||
&huff_tab->cust_tab, 0);
|
||||
if (result) {
|
||||
huff_tab->cust_desc.num_rows = 0; // reset faulty description
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while initializing custom vlc table!\n");
|
||||
return -1;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
huff_tab->tab = &huff_tab->cust_tab;
|
||||
@@ -207,14 +212,15 @@ int av_cold ff_ivi_init_planes(IVIPlaneDesc *planes, const IVIPicConfig *cfg)
|
||||
band->width = b_width;
|
||||
band->height = b_height;
|
||||
band->pitch = width_aligned;
|
||||
band->bufs[0] = av_malloc(buf_size);
|
||||
band->bufs[1] = av_malloc(buf_size);
|
||||
band->aheight = height_aligned;
|
||||
band->bufs[0] = av_mallocz(buf_size);
|
||||
band->bufs[1] = av_mallocz(buf_size);
|
||||
if (!band->bufs[0] || !band->bufs[1])
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* allocate the 3rd band buffer for scalability mode */
|
||||
if (cfg->luma_bands > 1) {
|
||||
band->bufs[2] = av_malloc(buf_size);
|
||||
band->bufs[2] = av_mallocz(buf_size);
|
||||
if (!band->bufs[2])
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -377,6 +383,21 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
mv_x >>= 1;
|
||||
mv_y >>= 1; /* convert halfpel vectors into fullpel ones */
|
||||
}
|
||||
if (mb->type) {
|
||||
int dmv_x, dmv_y, cx, cy;
|
||||
|
||||
dmv_x = mb->mv_x >> band->is_halfpel;
|
||||
dmv_y = mb->mv_y >> band->is_halfpel;
|
||||
cx = mb->mv_x & band->is_halfpel;
|
||||
cy = mb->mv_y & band->is_halfpel;
|
||||
|
||||
if ( mb->xpos + dmv_x < 0
|
||||
|| mb->xpos + dmv_x + band->mb_size + cx > band->pitch
|
||||
|| mb->ypos + dmv_y < 0
|
||||
|| mb->ypos + dmv_y + band->mb_size + cy > band->aheight) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (blk = 0; blk < num_blocks; blk++) {
|
||||
@@ -389,6 +410,11 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
}
|
||||
|
||||
if (cbp & 1) { /* block coded ? */
|
||||
if (!band->scan) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Scan pattern is not set.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
scan_pos = -1;
|
||||
memset(trvec, 0, num_coeffs*sizeof(trvec[0])); /* zero transform vector */
|
||||
memset(col_flags, 0, sizeof(col_flags)); /* zero column flags */
|
||||
@@ -469,7 +495,7 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
int ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale)
|
||||
{
|
||||
int x, y, need_mc, mbn, blk, num_blocks, mv_x, mv_y, mc_type;
|
||||
@@ -480,6 +506,13 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
void (*mc_no_delta_func)(int16_t *buf, const int16_t *ref_buf, uint32_t pitch,
|
||||
int mc_type);
|
||||
|
||||
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches "
|
||||
"parameters %d in ivi_process_empty_tile()\n",
|
||||
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
offs = tile->ypos * band->pitch + tile->xpos;
|
||||
mb = tile->mbs;
|
||||
ref_mb = tile->ref_mbs;
|
||||
@@ -560,6 +593,8 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
dst += band->pitch;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -132,6 +132,7 @@ typedef struct {
|
||||
int band_num; ///< band number
|
||||
int width;
|
||||
int height;
|
||||
int aheight; ///< aligned band height
|
||||
const uint8_t *data_ptr; ///< ptr to the first byte of the band data
|
||||
int data_size; ///< size of the band data
|
||||
int16_t *buf; ///< pointer to the output buffer for this band
|
||||
@@ -324,7 +325,7 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile);
|
||||
* @param[in] tile pointer to the tile descriptor
|
||||
* @param[in] mv_scale scaling factor for motion vectors
|
||||
*/
|
||||
void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
int ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale);
|
||||
|
||||
/**
|
||||
|
@@ -28,6 +28,7 @@
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "j2k.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/common.h"
|
||||
|
||||
#define JP2_SIG_TYPE 0x6A502020
|
||||
@@ -283,6 +284,10 @@ static int get_cox(J2kDecoderContext *s, J2kCodingStyle *c)
|
||||
c->log2_cblk_width = bytestream_get_byte(&s->buf) + 2; // cblk width
|
||||
c->log2_cblk_height = bytestream_get_byte(&s->buf) + 2; // cblk height
|
||||
|
||||
if (c->log2_cblk_width > 6 || c->log2_cblk_height > 6) {
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
c->cblk_style = bytestream_get_byte(&s->buf);
|
||||
if (c->cblk_style != 0){ // cblk style
|
||||
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
|
||||
@@ -699,6 +704,9 @@ static int decode_cblk(J2kDecoderContext *s, J2kCodingStyle *codsty, J2kT1Contex
|
||||
int bpass_csty_symbol = J2K_CBLK_BYPASS & codsty->cblk_style;
|
||||
int vert_causal_ctx_csty_symbol = J2K_CBLK_VSC & codsty->cblk_style;
|
||||
|
||||
av_assert0(width <= J2K_MAX_CBLKW);
|
||||
av_assert0(height <= J2K_MAX_CBLKH);
|
||||
|
||||
for (y = 0; y < height+2; y++)
|
||||
memset(t1->flags[y], 0, (width+2)*sizeof(int));
|
||||
|
||||
|
@@ -143,6 +143,10 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
buf += 5;
|
||||
|
||||
if (video_size) {
|
||||
if(video_size < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "video size %d invalid\n", video_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (avctx->reget_buffer(avctx, &s->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#define KMVC_KEYFRAME 0x80
|
||||
#define KMVC_PALETTE 0x40
|
||||
#define KMVC_METHOD 0x0F
|
||||
#define MAX_PALSIZE 256
|
||||
|
||||
/*
|
||||
* Decoder context
|
||||
@@ -43,7 +44,7 @@ typedef struct KmvcContext {
|
||||
|
||||
int setpal;
|
||||
int palsize;
|
||||
uint32_t pal[256];
|
||||
uint32_t pal[MAX_PALSIZE];
|
||||
uint8_t *cur, *prev;
|
||||
uint8_t *frm0, *frm1;
|
||||
} KmvcContext;
|
||||
@@ -414,6 +415,10 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
||||
c->palsize = 127;
|
||||
} else {
|
||||
c->palsize = AV_RL16(avctx->extradata + 10);
|
||||
if (c->palsize >= MAX_PALSIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "KMVC palette too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
if (avctx->extradata_size == 1036) { // palette in extradata
|
||||
|
@@ -322,6 +322,11 @@ static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
|
||||
output_zeros:
|
||||
if (l->zeros_rem) {
|
||||
count = FFMIN(l->zeros_rem, width - i);
|
||||
if (end - dst < count) {
|
||||
av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
memset(dst, 0, count);
|
||||
l->zeros_rem -= count;
|
||||
dst += count;
|
||||
|
@@ -55,6 +55,11 @@ static av_cold int mp_decode_init(AVCodecContext *avctx)
|
||||
int w4 = (avctx->width + 3) & ~3;
|
||||
int h4 = (avctx->height + 3) & ~3;
|
||||
|
||||
if(avctx->extradata_size < 2){
|
||||
av_log(avctx, AV_LOG_ERROR, "extradata too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
motionpixels_tableinit();
|
||||
mp->avctx = avctx;
|
||||
dsputil_init(&mp->dsp, avctx);
|
||||
@@ -191,10 +196,13 @@ static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y)
|
||||
p = mp_get_yuv_from_rgb(mp, x - 1, y);
|
||||
} else {
|
||||
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
|
||||
p.y = av_clip(p.y, 0, 31);
|
||||
if ((x & 3) == 0) {
|
||||
if ((y & 3) == 0) {
|
||||
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
|
||||
p.v = av_clip(p.v, -32, 31);
|
||||
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
|
||||
p.u = av_clip(p.u, -32, 31);
|
||||
mp->hpt[((y / 4) * mp->avctx->width + x) / 4] = p;
|
||||
} else {
|
||||
p.v = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].v;
|
||||
@@ -218,9 +226,12 @@ static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb)
|
||||
p = mp_get_yuv_from_rgb(mp, 0, y);
|
||||
} else {
|
||||
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
|
||||
p.y = av_clip(p.y, 0, 31);
|
||||
if ((y & 3) == 0) {
|
||||
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
|
||||
p.v = av_clip(p.v, -32, 31);
|
||||
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
|
||||
p.u = av_clip(p.u, -32, 31);
|
||||
}
|
||||
mp->vpt[y] = p;
|
||||
mp_set_rgb_from_yuv(mp, 0, y, &p);
|
||||
|
@@ -138,7 +138,8 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
|
||||
c->frames = 1 << (get_bits(&gb, 3) * 2);
|
||||
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||
avctx->channel_layout = (channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||
avctx->channels = channels;
|
||||
|
||||
if(vlc_initialized) return 0;
|
||||
av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n");
|
||||
|
@@ -1151,6 +1151,7 @@ typedef struct Mpeg1Context {
|
||||
int save_width, save_height, save_progressive_seq;
|
||||
AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator
|
||||
int sync; ///< Did we reach a sync point like a GOP/SEQ/KEYFrame?
|
||||
int extradata_decoded;
|
||||
} Mpeg1Context;
|
||||
|
||||
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
|
||||
@@ -1287,7 +1288,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
|
||||
s1->save_width != s->width ||
|
||||
s1->save_height != s->height ||
|
||||
s1->save_aspect_info != s->aspect_ratio_info||
|
||||
s1->save_progressive_seq != s->progressive_sequence ||
|
||||
(s1->save_progressive_seq != s->progressive_sequence && (s->height&31)) ||
|
||||
0)
|
||||
{
|
||||
|
||||
@@ -2315,8 +2316,10 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
s->slice_count= 0;
|
||||
|
||||
if(avctx->extradata && !avctx->frame_number)
|
||||
if (avctx->extradata && !s->extradata_decoded) {
|
||||
decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size);
|
||||
s->extradata_decoded = 1;
|
||||
}
|
||||
|
||||
return decode_chunks(avctx, picture, data_size, buf, buf_size);
|
||||
}
|
||||
|
@@ -651,13 +651,13 @@ try_again:
|
||||
if ((cbpc & 16) == 0) {
|
||||
/* 16x16 motion prediction */
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if(!s->mcsel){
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
@@ -675,12 +675,12 @@ try_again:
|
||||
int i;
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
for(i=0;i<4;i++) {
|
||||
int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
mot_val[0] = mx;
|
||||
@@ -1245,14 +1245,14 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->field_select[0][0]= get_bits1(&s->gb);
|
||||
s->field_select[0][1]= get_bits1(&s->gb);
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y/2, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y/2, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
|
||||
@@ -1263,13 +1263,13 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
@@ -1280,12 +1280,12 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->mv[0][i][0] = mx;
|
||||
@@ -1381,8 +1381,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(USES_LIST(mb_type, 0)){
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
mx = h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
|
||||
my = h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
|
||||
s->last_mv[0][1][0]= s->last_mv[0][0][0]= s->mv[0][0][0] = mx;
|
||||
s->last_mv[0][1][1]= s->last_mv[0][0][1]= s->mv[0][0][1] = my;
|
||||
}
|
||||
@@ -1390,8 +1390,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(USES_LIST(mb_type, 1)){
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
mx = h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
|
||||
my = h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
|
||||
s->last_mv[1][1][0]= s->last_mv[1][0][0]= s->mv[1][0][0] = mx;
|
||||
s->last_mv[1][1][1]= s->last_mv[1][0][1]= s->mv[1][0][1] = my;
|
||||
}
|
||||
@@ -1402,8 +1402,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
|
||||
my = h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
|
||||
s->last_mv[0][i][0]= s->mv[0][i][0] = mx;
|
||||
s->last_mv[0][i][1]= (s->mv[0][i][1] = my)*2;
|
||||
}
|
||||
@@ -1413,8 +1413,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
|
||||
my = h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
|
||||
s->last_mv[1][i][0]= s->mv[1][i][0] = mx;
|
||||
s->last_mv[1][i][1]= (s->mv[1][i][1] = my)*2;
|
||||
}
|
||||
@@ -1426,8 +1426,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(IS_SKIP(mb_type))
|
||||
mx=my=0;
|
||||
else{
|
||||
mx = h263_decode_motion(s, 0, 1);
|
||||
my = h263_decode_motion(s, 0, 1);
|
||||
mx = ff_h263_decode_motion(s, 0, 1);
|
||||
my = ff_h263_decode_motion(s, 0, 1);
|
||||
}
|
||||
|
||||
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
|
||||
|
@@ -727,7 +727,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x16 mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||
motion_y - pred_y, s->f_code);
|
||||
@@ -751,7 +751,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x8 interlaced mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
pred_y /=2;
|
||||
|
||||
put_bits(&s->pb, 1, s->field_select[0][0]);
|
||||
@@ -779,7 +779,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
|
||||
for(i=0; i<4; i++){
|
||||
/* motion vectors: 8x8 mode*/
|
||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x,
|
||||
s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
|
||||
|
@@ -210,7 +210,7 @@ static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){
|
||||
else
|
||||
g->long_end = 4; /* 8000 Hz */
|
||||
|
||||
g->short_start = 2 + (s->sample_rate_index != 8);
|
||||
g->short_start = 3;
|
||||
} else {
|
||||
g->long_end = 0;
|
||||
g->short_start = 0;
|
||||
|
@@ -725,7 +725,8 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
||||
0, 0, 0,
|
||||
ref_picture, pix_op, qpix_op,
|
||||
s->mv[dir][0][0], s->mv[dir][0][1], 16);
|
||||
}else if(!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) && s->mspel && s->codec_id == CODEC_ID_WMV2){
|
||||
} else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
|
||||
s->mspel && s->codec_id == CODEC_ID_WMV2) {
|
||||
ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][0][0], s->mv[dir][0][1], 16);
|
||||
|
@@ -582,7 +582,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
break;
|
||||
case CODEC_ID_H263:
|
||||
if (!CONFIG_H263_ENCODER) return -1;
|
||||
if (ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height) == 8) {
|
||||
if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height) == 8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height);
|
||||
return -1;
|
||||
}
|
||||
@@ -708,7 +708,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
|
||||
ff_h261_encode_init(s);
|
||||
if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
|
||||
h263_encode_init(s);
|
||||
ff_h263_encode_init(s);
|
||||
if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
|
||||
ff_msmpeg4_encode_init(s);
|
||||
if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
|
||||
@@ -1768,7 +1768,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
|
||||
case CODEC_ID_RV10:
|
||||
case CODEC_ID_RV20:
|
||||
if (CONFIG_H263_ENCODER)
|
||||
h263_encode_mb(s, s->block, motion_x, motion_y);
|
||||
ff_h263_encode_mb(s, s->block, motion_x, motion_y);
|
||||
break;
|
||||
case CODEC_ID_MJPEG:
|
||||
if (CONFIG_MJPEG_ENCODER)
|
||||
@@ -2200,7 +2200,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
case CODEC_ID_H263:
|
||||
case CODEC_ID_H263P:
|
||||
if (CONFIG_H263_ENCODER)
|
||||
h263_encode_gob_header(s, mb_y);
|
||||
ff_h263_encode_gob_header(s, mb_y);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2950,7 +2950,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
|
||||
ff_flv_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_H263_ENCODER)
|
||||
h263_encode_picture_header(s, picture_number);
|
||||
ff_h263_encode_picture_header(s, picture_number);
|
||||
break;
|
||||
case FMT_MPEG1:
|
||||
if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
|
||||
|
@@ -511,7 +511,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
|
||||
if (val == 0) {
|
||||
/* zero vector */
|
||||
code = 0;
|
||||
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
|
||||
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
|
||||
} else {
|
||||
bit_size = s->f_code - 1;
|
||||
range = 1 << bit_size;
|
||||
@@ -530,7 +530,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
|
||||
code = (val >> bit_size) + 1;
|
||||
bits = val & (range - 1);
|
||||
|
||||
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
|
||||
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
|
||||
if (bit_size > 0) {
|
||||
put_bits(&s->pb, bit_size, bits);
|
||||
}
|
||||
@@ -579,7 +579,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
|
||||
|
||||
s->misc_bits += get_bits_diff(s);
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
msmpeg4v2_encode_motion(s, motion_x - pred_x);
|
||||
msmpeg4v2_encode_motion(s, motion_y - pred_y);
|
||||
}else{
|
||||
@@ -590,7 +590,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
|
||||
s->misc_bits += get_bits_diff(s);
|
||||
|
||||
/* motion vector */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_msmpeg4_encode_motion(s, motion_x - pred_x,
|
||||
motion_y - pred_y);
|
||||
}
|
||||
@@ -1138,7 +1138,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
cbp|= cbpy<<2;
|
||||
if(s->msmpeg4_version==1 || (cbp&3) != 3) cbp^= 0x3C;
|
||||
|
||||
h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
mx= msmpeg4v2_decode_motion(s, mx, 1);
|
||||
my= msmpeg4v2_decode_motion(s, my, 1);
|
||||
|
||||
@@ -1224,7 +1224,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
s->rl_table_index = decode012(&s->gb);
|
||||
s->rl_chroma_table_index = s->rl_table_index;
|
||||
}
|
||||
h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
if (ff_msmpeg4_decode_motion(s, &mx, &my) < 0)
|
||||
return -1;
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
@@ -1320,8 +1320,8 @@ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
|
||||
&v2_mb_type[0][1], 2, 1,
|
||||
&v2_mb_type[0][0], 2, 1, 128);
|
||||
INIT_VLC_STATIC(&v2_mv_vlc, V2_MV_VLC_BITS, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 538);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 538);
|
||||
|
||||
INIT_VLC_STATIC(&ff_mb_non_intra_vlc[0], MB_NON_INTRA_VLC_BITS, 128,
|
||||
&wmv2_inter_table[0][0][1], 8, 4,
|
||||
|
@@ -592,9 +592,9 @@ static const int8_t table4_run[168] = {
|
||||
29, 30, 31, 32, 33, 34, 35, 36,
|
||||
};
|
||||
|
||||
extern const uint16_t inter_vlc[103][2];
|
||||
extern const int8_t inter_level[102];
|
||||
extern const int8_t inter_run[102];
|
||||
extern const uint16_t ff_inter_vlc[103][2];
|
||||
extern const int8_t ff_inter_level[102];
|
||||
extern const int8_t ff_inter_run[102];
|
||||
|
||||
extern const uint16_t ff_mpeg4_intra_vlc[103][2];
|
||||
extern const int8_t ff_mpeg4_intra_level[102];
|
||||
@@ -647,9 +647,9 @@ RLTable rl_table[NB_RL_TABLES] = {
|
||||
{
|
||||
102,
|
||||
58,
|
||||
inter_vlc,
|
||||
inter_run,
|
||||
inter_level,
|
||||
ff_inter_vlc,
|
||||
ff_inter_run,
|
||||
ff_inter_level,
|
||||
},
|
||||
};
|
||||
|
||||
|
@@ -191,9 +191,10 @@ retry:
|
||||
}
|
||||
if (c->codec_frameheader) {
|
||||
int w, h, q, res;
|
||||
if (buf_size < 12) {
|
||||
if (buf_size < RTJPEG_HEADER_SIZE || buf[4] != RTJPEG_HEADER_SIZE ||
|
||||
buf[5] != RTJPEG_FILE_VERSION) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid nuv video frame\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
w = AV_RL16(&buf[6]);
|
||||
h = AV_RL16(&buf[8]);
|
||||
@@ -207,8 +208,8 @@ retry:
|
||||
size_change = 1;
|
||||
goto retry;
|
||||
}
|
||||
buf = &buf[12];
|
||||
buf_size -= 12;
|
||||
buf = &buf[RTJPEG_HEADER_SIZE];
|
||||
buf_size -= RTJPEG_HEADER_SIZE;
|
||||
}
|
||||
|
||||
if ((size_change || keyframe) && c->pic.data[0])
|
||||
|
@@ -242,8 +242,10 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
if(next == END_NOT_FOUND){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer)
|
||||
if(!new_buffer) {
|
||||
pc->index = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
|
||||
pc->index += *buf_size;
|
||||
@@ -256,11 +258,15 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
/* append to buffer */
|
||||
if(pc->index){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer)
|
||||
if(!new_buffer) {
|
||||
pc->overread_index =
|
||||
pc->index = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
|
||||
if (next > -FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
memcpy(&pc->buffer[pc->index], *buf,
|
||||
next + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pc->index = 0;
|
||||
*buf= pc->buffer;
|
||||
}
|
||||
|
@@ -107,7 +107,7 @@ static void png_put_interlaced_row(uint8_t *dst, int width,
|
||||
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
|
||||
{
|
||||
long i;
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
@@ -148,7 +148,7 @@ static void add_paeth_prediction_c(uint8_t *dst, uint8_t *src, uint8_t *top, int
|
||||
if(bpp >= 2) g = dst[1];\
|
||||
if(bpp >= 3) b = dst[2];\
|
||||
if(bpp >= 4) a = dst[3];\
|
||||
for(; i < size; i+=bpp) {\
|
||||
for(; i <= size - bpp; i+=bpp) {\
|
||||
dst[i+0] = r = op(r, src[i+0], last[i+0]);\
|
||||
if(bpp == 1) continue;\
|
||||
dst[i+1] = g = op(g, src[i+1], last[i+1]);\
|
||||
@@ -164,13 +164,9 @@ static void add_paeth_prediction_c(uint8_t *dst, uint8_t *src, uint8_t *top, int
|
||||
else if(bpp == 2) UNROLL1(2, op)\
|
||||
else if(bpp == 3) UNROLL1(3, op)\
|
||||
else if(bpp == 4) UNROLL1(4, op)\
|
||||
else {\
|
||||
for (; i < size; i += bpp) {\
|
||||
int j;\
|
||||
for (j = 0; j < bpp; j++)\
|
||||
dst[i+j] = op(dst[i+j-bpp], src[i+j], last[i+j]);\
|
||||
}\
|
||||
}
|
||||
for (; i < size; i++) {\
|
||||
dst[i] = op(dst[i-bpp], src[i], last[i]);\
|
||||
}\
|
||||
|
||||
/* NOTE: 'dst' can be equal to 'last' */
|
||||
static void png_filter_row(PNGDecContext *s, uint8_t *dst, int filter_type,
|
||||
@@ -469,11 +465,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
avctx->pix_fmt = PIX_FMT_RGB48BE;
|
||||
} else if (s->bit_depth == 1) {
|
||||
avctx->pix_fmt = PIX_FMT_MONOBLACK;
|
||||
} else if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||
} else if (s->bit_depth == 8 &&
|
||||
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
} else if (s->bit_depth == 8 &&
|
||||
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
||||
avctx->pix_fmt = PIX_FMT_GRAY8A;
|
||||
avctx->pix_fmt = PIX_FMT_Y400A;
|
||||
} else {
|
||||
goto fail;
|
||||
}
|
||||
|
@@ -881,9 +881,13 @@ static void synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int l
|
||||
break;
|
||||
|
||||
case 30:
|
||||
if (BITS_LEFT(length,gb) >= 4)
|
||||
samples[0] = type30_dequant[qdm2_get_vlc(gb, &vlc_tab_type30, 0, 1)];
|
||||
else
|
||||
if (BITS_LEFT(length,gb) >= 4) {
|
||||
unsigned index = qdm2_get_vlc(gb, &vlc_tab_type30, 0, 1);
|
||||
if (index < FF_ARRAY_ELEMS(type30_dequant)) {
|
||||
samples[0] = type30_dequant[index];
|
||||
} else
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
} else
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
|
||||
run = 1;
|
||||
@@ -897,8 +901,12 @@ static void synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int l
|
||||
type34_predictor = samples[0];
|
||||
type34_first = 0;
|
||||
} else {
|
||||
samples[0] = type34_delta[qdm2_get_vlc(gb, &vlc_tab_type34, 0, 1)] / type34_div + type34_predictor;
|
||||
type34_predictor = samples[0];
|
||||
unsigned index = qdm2_get_vlc(gb, &vlc_tab_type34, 0, 1);
|
||||
if (index < FF_ARRAY_ELEMS(type34_delta)) {
|
||||
samples[0] = type34_delta[index] / type34_div + type34_predictor;
|
||||
type34_predictor = samples[0];
|
||||
} else
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
}
|
||||
} else {
|
||||
samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
|
||||
@@ -1230,6 +1238,11 @@ static void qdm2_decode_super_block (QDM2Context *q)
|
||||
for (i = 0; packet_bytes > 0; i++) {
|
||||
int j;
|
||||
|
||||
if (i>=FF_ARRAY_ELEMS(q->sub_packet_list_A)) {
|
||||
SAMPLES_NEEDED_2("too many packet bytes");
|
||||
return;
|
||||
}
|
||||
|
||||
q->sub_packet_list_A[i].next = NULL;
|
||||
|
||||
if (i > 0) {
|
||||
@@ -1871,6 +1884,10 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
|
||||
av_log(avctx, AV_LOG_ERROR, "Unknown FFT order (%d), contact the developers!\n", s->fft_order);
|
||||
return -1;
|
||||
}
|
||||
if (s->fft_size != (1 << (s->fft_order - 1))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "FFT size %d not power of 2.\n", s->fft_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ff_rdft_init(&s->rdft_ctx, s->fft_order, IDFT_C2R);
|
||||
ff_mpadsp_init(&s->mpadsp);
|
||||
|
@@ -37,7 +37,7 @@
|
||||
#include "libavcodec/qdm2_tables.h"
|
||||
#else
|
||||
static uint16_t softclip_table[HARDCLIP_THRESHOLD - SOFTCLIP_THRESHOLD + 1];
|
||||
static float noise_table[4096];
|
||||
static float noise_table[4096 + 20];
|
||||
static uint8_t random_dequant_index[256][5];
|
||||
static uint8_t random_dequant_type24[128][3];
|
||||
static float noise_samples[128];
|
||||
|
@@ -157,6 +157,12 @@ static av_cold int roq_decode_init(AVCodecContext *avctx)
|
||||
RoqContext *s = avctx->priv_data;
|
||||
|
||||
s->avctx = avctx;
|
||||
|
||||
if (avctx->width%16 || avctx->height%16) {
|
||||
av_log_ask_for_sample(avctx, "dimensions not being a multiple of 16 are unsupported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
s->width = avctx->width;
|
||||
s->height = avctx->height;
|
||||
avcodec_get_frame_defaults(&s->frames[0]);
|
||||
|
@@ -83,7 +83,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
unsigned short *pixels = (unsigned short *)s->frame.data[0];
|
||||
|
||||
int row_ptr = 0;
|
||||
int pixel_ptr = 0;
|
||||
int pixel_ptr = -4;
|
||||
int block_ptr;
|
||||
int pixel_x, pixel_y;
|
||||
int total_blocks;
|
||||
@@ -139,6 +139,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
colorA = AV_RB16 (&s->buf[stream_ptr]);
|
||||
stream_ptr += 2;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK()
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -147,7 +148,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -186,6 +186,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
if (s->size - stream_ptr < n_blocks * 4)
|
||||
return;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
index = s->buf[stream_ptr++];
|
||||
@@ -196,7 +197,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -204,6 +204,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
case 0x00:
|
||||
if (s->size - stream_ptr < 16)
|
||||
return;
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -217,7 +218,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
break;
|
||||
|
||||
/* Unknown opcode */
|
||||
|
@@ -25,6 +25,9 @@
|
||||
#include <stdint.h>
|
||||
#include "dsputil.h"
|
||||
|
||||
#define RTJPEG_FILE_VERSION 0
|
||||
#define RTJPEG_HEADER_SIZE 12
|
||||
|
||||
typedef struct {
|
||||
int w, h;
|
||||
DSPContext *dsp;
|
||||
|
@@ -362,6 +362,11 @@ static int rv20_decode_picture_header(MpegEncContext *s)
|
||||
f= get_bits(&s->gb, av_log2(v)+1);
|
||||
|
||||
if(f){
|
||||
if (s->avctx->extradata_size < 8 + 2 * f) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Extradata too small.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f];
|
||||
new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f];
|
||||
}else{
|
||||
@@ -498,7 +503,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
h263_decode_init_vlc(s);
|
||||
ff_h263_decode_init_vlc(s);
|
||||
|
||||
/* init rv vlc */
|
||||
if (!done) {
|
||||
|
@@ -1280,6 +1280,14 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
|
||||
if(s->width != r->si.width || s->height != r->si.height){
|
||||
|
||||
if (HAVE_THREADS &&
|
||||
(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log_missing_feature(s->avctx, "Width/height changing with "
|
||||
"frame threading is", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Changing dimensions to %dx%d\n", r->si.width,r->si.height);
|
||||
MPV_common_end(s);
|
||||
s->width = r->si.width;
|
||||
@@ -1455,15 +1463,20 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
if(get_slice_offset(avctx, slices_hdr, 0) < 0 ||
|
||||
get_slice_offset(avctx, slices_hdr, 0) > buf_size){
|
||||
av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, 0), (buf_size-get_slice_offset(avctx, slices_hdr, 0))*8);
|
||||
if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
|
||||
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == AV_PICTURE_TYPE_B)
|
||||
return -1;
|
||||
if ((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) &&
|
||||
si.type == AV_PICTURE_TYPE_B) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
|
||||
"reference data.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
|
@@ -123,7 +123,7 @@ static const uint8_t rv34_quant_to_vlc_set[2][31] = {
|
||||
|
||||
/**
|
||||
* table for obtaining the quantizer difference
|
||||
* @todo Use with modified_quant_tab from h263data.h.
|
||||
* @todo Use with ff_modified_quant_tab from h263data.h.
|
||||
*/
|
||||
static const uint8_t rv34_dquant_tab[2][32]={
|
||||
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
|
@@ -78,7 +78,7 @@ typedef struct ShortenContext {
|
||||
GetBitContext gb;
|
||||
|
||||
int min_framesize, max_framesize;
|
||||
int channels;
|
||||
unsigned channels;
|
||||
|
||||
int32_t *decoded[MAX_CHANNELS];
|
||||
int32_t *decoded_base[MAX_CHANNELS];
|
||||
@@ -119,11 +119,11 @@ static int allocate_buffers(ShortenContext *s)
|
||||
for (chan=0; chan<s->channels; chan++) {
|
||||
if(FFMAX(1, s->nmean) >= UINT_MAX/sizeof(int32_t)){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "nmean too large\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if(s->blocksize + s->nwrap >= UINT_MAX/sizeof(int32_t) || s->blocksize + s->nwrap <= (unsigned)s->nwrap){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "s->blocksize + s->nwrap too large\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
tmp_ptr = av_realloc(s->offset[chan], sizeof(int32_t)*FFMAX(1, s->nmean));
|
||||
@@ -209,14 +209,14 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
init_get_bits(&hb, header, header_size*8);
|
||||
if (get_le32(&hb) != MKTAG('R','I','F','F')) {
|
||||
av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
skip_bits_long(&hb, 32); /* chunk_size */
|
||||
|
||||
if (get_le32(&hb) != MKTAG('W','A','V','E')) {
|
||||
av_log(avctx, AV_LOG_ERROR, "missing WAVE tag\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
while (get_le32(&hb) != MKTAG('f','m','t',' ')) {
|
||||
@@ -227,7 +227,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
|
||||
if (len < 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "fmt chunk was too short\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
wave_format = get_le16(&hb);
|
||||
@@ -237,7 +237,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported wave format\n");
|
||||
return -1;
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
avctx->channels = get_le16(&hb);
|
||||
@@ -248,7 +248,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
|
||||
if (avctx->bits_per_coded_sample != 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported number of bits per sample\n");
|
||||
return -1;
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
len -= 16;
|
||||
@@ -342,8 +342,13 @@ static int shorten_decode_frame(AVCodecContext *avctx,
|
||||
s->internal_ftype = get_uint(s, TYPESIZE);
|
||||
|
||||
s->channels = get_uint(s, CHANSIZE);
|
||||
if (s->channels > MAX_CHANNELS) {
|
||||
if (!s->channels) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "No channels reported\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->channels <= 0 || s->channels > MAX_CHANNELS) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "too many channels: %d\n", s->channels);
|
||||
s->channels = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -506,7 +511,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
|
||||
s->bitshift = get_ur_golomb_shorten(&s->gb, BITSHIFTSIZE);
|
||||
break;
|
||||
case FN_BLOCKSIZE: {
|
||||
int blocksize = get_uint(s, av_log2(s->blocksize));
|
||||
unsigned blocksize = get_uint(s, av_log2(s->blocksize));
|
||||
if (blocksize > s->blocksize) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Increasing block size is not supported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
@@ -534,7 +539,7 @@ frame_done:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", i - buf_size);
|
||||
s->bitstream_size=0;
|
||||
s->bitstream_index=0;
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->bitstream_size) {
|
||||
s->bitstream_index += i;
|
||||
|
@@ -645,7 +645,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
}
|
||||
if(bits) { //decode 16-bit data
|
||||
for(i = stereo; i >= 0; i--)
|
||||
pred[i] = av_bswap16(get_bits(&gb, 16));
|
||||
pred[i] = sign_extend(av_bswap16(get_bits(&gb, 16)), 16);
|
||||
for(i = 0; i <= stereo; i++)
|
||||
*samples++ = pred[i];
|
||||
for(; i < unp_size / 2; i++) {
|
||||
|
@@ -2299,7 +2299,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
|
||||
s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
|
||||
s->m.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t));
|
||||
h263_encode_init(&s->m); //mv_penalty
|
||||
ff_h263_encode_init(&s->m); //mv_penalty
|
||||
|
||||
s->max_ref_frames = FFMAX(FFMIN(avctx->refs, MAX_REF_FRAMES), 1);
|
||||
|
||||
|
@@ -43,7 +43,7 @@
|
||||
#undef NDEBUG
|
||||
#include <assert.h>
|
||||
|
||||
extern const uint8_t mvtab[33][2];
|
||||
extern const uint8_t ff_mvtab[33][2];
|
||||
|
||||
static VLC svq1_block_type;
|
||||
static VLC svq1_motion_component;
|
||||
@@ -768,8 +768,8 @@ static av_cold int svq1_decode_init(AVCodecContext *avctx)
|
||||
&ff_svq1_block_type_vlc[0][0], 2, 1, 6);
|
||||
|
||||
INIT_VLC_STATIC(&svq1_motion_component, 7, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 176);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 176);
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
static const uint8_t sizes[2][6] = {{14, 10, 14, 18, 16, 18}, {10, 10, 14, 14, 14, 16}};
|
||||
|
@@ -406,7 +406,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
|
||||
int mx, my, pred_x, pred_y, dxy;
|
||||
int16_t *motion_ptr;
|
||||
|
||||
motion_ptr= h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
|
||||
motion_ptr= ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
|
||||
if(s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER){
|
||||
for(i=0; i<6; i++)
|
||||
init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32);
|
||||
@@ -496,7 +496,7 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
|
||||
s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
|
||||
s->mb_type = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int16_t));
|
||||
s->dummy = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int32_t));
|
||||
h263_encode_init(&s->m); //mv_penalty
|
||||
ff_h263_encode_init(&s->m); //mv_penalty
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -360,6 +360,11 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
"Samples per pixel requires a single value, many provided\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (value > 4U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Samples per pixel %d is too large\n", value);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->bppcount == 1)
|
||||
s->bpp *= value;
|
||||
s->bppcount = value;
|
||||
@@ -396,7 +401,7 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
break;
|
||||
case TIFF_ROWSPERSTRIP:
|
||||
if (type == TIFF_LONG && value == UINT_MAX)
|
||||
value = s->avctx->height;
|
||||
value = s->height;
|
||||
if(value < 1){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n");
|
||||
return -1;
|
||||
|
@@ -305,6 +305,10 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
|
||||
|
||||
strip_sizes = av_mallocz(sizeof(*strip_sizes) * strips);
|
||||
strip_offsets = av_mallocz(sizeof(*strip_offsets) * strips);
|
||||
if (!strip_sizes || !strip_offsets) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
bytes_per_row = (((s->width - 1)/s->subsampling[0] + 1) * s->bpp
|
||||
* s->subsampling[0] * s->subsampling[1] + 7) >> 3;
|
||||
@@ -312,6 +316,7 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
|
||||
yuv_line = av_malloc(bytes_per_row);
|
||||
if (yuv_line == NULL){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@@ -324,6 +329,10 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
|
||||
|
||||
zlen = bytes_per_row * s->rps;
|
||||
zbuf = av_malloc(zlen);
|
||||
if (!zbuf) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
strip_offsets[0] = ptr - buf;
|
||||
zn = 0;
|
||||
for (j = 0; j < s->rps; j++) {
|
||||
@@ -348,8 +357,13 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if(s->compr == TIFF_LZW)
|
||||
if (s->compr == TIFF_LZW) {
|
||||
s->lzws = av_malloc(ff_lzw_encode_state_size);
|
||||
if (!s->lzws) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < s->height; i++) {
|
||||
if (strip_sizes[i / s->rps] == 0) {
|
||||
if(s->compr == TIFF_LZW){
|
||||
|
@@ -520,6 +520,10 @@ hres,vres,i,i%vres (0 < i < 4)
|
||||
}
|
||||
|
||||
#define APPLY_C_PREDICTOR() \
|
||||
if(index > 1023){\
|
||||
av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
|
||||
return; \
|
||||
}\
|
||||
predictor_pair = s->c_predictor_table[index]; \
|
||||
horiz_pred += (predictor_pair >> 1); \
|
||||
if (predictor_pair & 1) { \
|
||||
@@ -537,6 +541,10 @@ hres,vres,i,i%vres (0 < i < 4)
|
||||
index++;
|
||||
|
||||
#define APPLY_C_PREDICTOR_24() \
|
||||
if(index > 1023){\
|
||||
av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
|
||||
return; \
|
||||
}\
|
||||
predictor_pair = s->c_predictor_table[index]; \
|
||||
horiz_pred += (predictor_pair >> 1); \
|
||||
if (predictor_pair & 1) { \
|
||||
@@ -555,6 +563,10 @@ hres,vres,i,i%vres (0 < i < 4)
|
||||
|
||||
|
||||
#define APPLY_Y_PREDICTOR() \
|
||||
if(index > 1023){\
|
||||
av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
|
||||
return; \
|
||||
}\
|
||||
predictor_pair = s->y_predictor_table[index]; \
|
||||
horiz_pred += (predictor_pair >> 1); \
|
||||
if (predictor_pair & 1) { \
|
||||
@@ -572,6 +584,10 @@ hres,vres,i,i%vres (0 < i < 4)
|
||||
index++;
|
||||
|
||||
#define APPLY_Y_PREDICTOR_24() \
|
||||
if(index > 1023){\
|
||||
av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
|
||||
return; \
|
||||
}\
|
||||
predictor_pair = s->y_predictor_table[index]; \
|
||||
horiz_pred += (predictor_pair >> 1); \
|
||||
if (predictor_pair & 1) { \
|
||||
|
@@ -170,9 +170,10 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int l
|
||||
case PIX_FMT_PAL8:
|
||||
case PIX_FMT_BGR8:
|
||||
case PIX_FMT_RGB8:
|
||||
if(s->codec_id == CODEC_ID_SMC){
|
||||
w_align=4;
|
||||
h_align=4;
|
||||
if (s->codec_id == CODEC_ID_SMC ||
|
||||
s->codec_id == CODEC_ID_CINEPAK) {
|
||||
w_align = 4;
|
||||
h_align = 4;
|
||||
}
|
||||
break;
|
||||
case PIX_FMT_BGR24:
|
||||
@@ -181,6 +182,12 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int l
|
||||
h_align=4;
|
||||
}
|
||||
break;
|
||||
case PIX_FMT_RGB24:
|
||||
if (s->codec_id == CODEC_ID_CINEPAK) {
|
||||
w_align = 4;
|
||||
h_align = 4;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
w_align= 1;
|
||||
h_align= 1;
|
||||
|
@@ -3840,9 +3840,11 @@ AVCodec ff_vc1_decoder = {
|
||||
vc1_decode_frame,
|
||||
CODEC_CAP_DR1 | CODEC_CAP_DELAY,
|
||||
NULL,
|
||||
.flush = ff_mpeg_flush,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
|
||||
.pix_fmts = ff_hwaccel_pixfmt_list_420,
|
||||
.profiles = NULL_IF_CONFIG_SMALL(profiles)
|
||||
.profiles = NULL_IF_CONFIG_SMALL(profiles),
|
||||
.flush = ff_mpeg_flush,
|
||||
};
|
||||
|
||||
#if CONFIG_WMV3_DECODER
|
||||
@@ -3857,9 +3859,11 @@ AVCodec ff_wmv3_decoder = {
|
||||
vc1_decode_frame,
|
||||
CODEC_CAP_DR1 | CODEC_CAP_DELAY,
|
||||
NULL,
|
||||
.flush = ff_mpeg_flush,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
|
||||
.pix_fmts = ff_hwaccel_pixfmt_list_420,
|
||||
.profiles = NULL_IF_CONFIG_SMALL(profiles)
|
||||
.profiles = NULL_IF_CONFIG_SMALL(profiles),
|
||||
.flush = ff_mpeg_flush,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@@ -117,7 +117,8 @@ int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values)
|
||||
int ff_vorbis_ready_floor1_list(AVCodecContext *avccontext,
|
||||
vorbis_floor1_entry *list, int values)
|
||||
{
|
||||
int i;
|
||||
list[0].sort = 0;
|
||||
@@ -141,6 +142,11 @@ void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values)
|
||||
for (i = 0; i < values - 1; i++) {
|
||||
int j;
|
||||
for (j = i + 1; j < values; j++) {
|
||||
if (list[i].x == list[j].x) {
|
||||
av_log(avccontext, AV_LOG_ERROR,
|
||||
"Duplicate value found in floor 1 X coordinates\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (list[list[i].sort].x > list[list[j].sort].x) {
|
||||
int tmp = list[i].sort;
|
||||
list[i].sort = list[j].sort;
|
||||
@@ -148,6 +154,7 @@ void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values)
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void render_line_unrolled(intptr_t x, int y, int x1,
|
||||
|
@@ -36,7 +36,8 @@ typedef struct {
|
||||
uint16_t high;
|
||||
} vorbis_floor1_entry;
|
||||
|
||||
void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values);
|
||||
int ff_vorbis_ready_floor1_list(AVCodecContext *avccontext,
|
||||
vorbis_floor1_entry *list, int values);
|
||||
unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n); // x^(1/n)
|
||||
int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num);
|
||||
void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values,
|
||||
|
@@ -559,7 +559,11 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc)
|
||||
}
|
||||
|
||||
// Precalculate order of x coordinates - needed for decode
|
||||
ff_vorbis_ready_floor1_list(floor_setup->data.t1.list, floor_setup->data.t1.x_list_dim);
|
||||
if (ff_vorbis_ready_floor1_list(vc->avccontext,
|
||||
floor_setup->data.t1.list,
|
||||
floor_setup->data.t1.x_list_dim)) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else if (floor_setup->floor_type == 0) {
|
||||
unsigned max_codebook_dim = 0;
|
||||
|
||||
@@ -568,6 +572,11 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc)
|
||||
floor_setup->data.t0.order = get_bits(gb, 8);
|
||||
floor_setup->data.t0.rate = get_bits(gb, 16);
|
||||
floor_setup->data.t0.bark_map_size = get_bits(gb, 16);
|
||||
if (floor_setup->data.t0.bark_map_size == 0) {
|
||||
av_log(vc->avccontext, AV_LOG_ERROR,
|
||||
"Floor 0 bark map size is 0.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
floor_setup->data.t0.amplitude_bits = get_bits(gb, 6);
|
||||
/* zero would result in a div by zero later *
|
||||
* 2^0 - 1 == 0 */
|
||||
|
@@ -155,7 +155,7 @@ static int cb_lookup_vals(int lookup, int dimentions, int entries)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ready_codebook(vorbis_enc_codebook *cb)
|
||||
static int ready_codebook(vorbis_enc_codebook *cb)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -167,6 +167,8 @@ static void ready_codebook(vorbis_enc_codebook *cb)
|
||||
int vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries);
|
||||
cb->dimentions = av_malloc(sizeof(float) * cb->nentries * cb->ndimentions);
|
||||
cb->pow2 = av_mallocz(sizeof(float) * cb->nentries);
|
||||
if (!cb->dimentions || !cb->pow2)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < cb->nentries; i++) {
|
||||
float last = 0;
|
||||
int j;
|
||||
@@ -187,13 +189,16 @@ static void ready_codebook(vorbis_enc_codebook *cb)
|
||||
cb->pow2[i] /= 2.;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc)
|
||||
static int ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc)
|
||||
{
|
||||
int i;
|
||||
assert(rc->type == 2);
|
||||
rc->maxes = av_mallocz(sizeof(float[2]) * rc->classifications);
|
||||
if (!rc->maxes)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < rc->classifications; i++) {
|
||||
int j;
|
||||
vorbis_enc_codebook * cb;
|
||||
@@ -223,15 +228,16 @@ static void ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc)
|
||||
rc->maxes[i][0] += 0.8;
|
||||
rc->maxes[i][1] += 0.8;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
AVCodecContext *avccontext)
|
||||
static int create_vorbis_context(vorbis_enc_context *venc,
|
||||
AVCodecContext *avccontext)
|
||||
{
|
||||
vorbis_enc_floor *fc;
|
||||
vorbis_enc_residue *rc;
|
||||
vorbis_enc_mapping *mc;
|
||||
int i, book;
|
||||
int i, book, ret;
|
||||
|
||||
venc->channels = avccontext->channels;
|
||||
venc->sample_rate = avccontext->sample_rate;
|
||||
@@ -239,6 +245,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
|
||||
venc->ncodebooks = FF_ARRAY_ELEMS(cvectors);
|
||||
venc->codebooks = av_malloc(sizeof(vorbis_enc_codebook) * venc->ncodebooks);
|
||||
if (!venc->codebooks)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// codebook 0..14 - floor1 book, values 0..255
|
||||
// codebook 15 residue masterbook
|
||||
@@ -255,27 +263,36 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
|
||||
cb->lens = av_malloc(sizeof(uint8_t) * cb->nentries);
|
||||
cb->codewords = av_malloc(sizeof(uint32_t) * cb->nentries);
|
||||
if (!cb->lens || !cb->codewords)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(cb->lens, cvectors[book].clens, cvectors[book].len);
|
||||
memset(cb->lens + cvectors[book].len, 0, cb->nentries - cvectors[book].len);
|
||||
|
||||
if (cb->lookup) {
|
||||
vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries);
|
||||
cb->quantlist = av_malloc(sizeof(int) * vals);
|
||||
if (!cb->quantlist)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < vals; i++)
|
||||
cb->quantlist[i] = cvectors[book].quant[i];
|
||||
} else {
|
||||
cb->quantlist = NULL;
|
||||
}
|
||||
ready_codebook(cb);
|
||||
if ((ret = ready_codebook(cb)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
venc->nfloors = 1;
|
||||
venc->floors = av_malloc(sizeof(vorbis_enc_floor) * venc->nfloors);
|
||||
if (!venc->floors)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// just 1 floor
|
||||
fc = &venc->floors[0];
|
||||
fc->partitions = NUM_FLOOR_PARTITIONS;
|
||||
fc->partition_to_class = av_malloc(sizeof(int) * fc->partitions);
|
||||
if (!fc->partition_to_class)
|
||||
return AVERROR(ENOMEM);
|
||||
fc->nclasses = 0;
|
||||
for (i = 0; i < fc->partitions; i++) {
|
||||
static const int a[] = {0, 1, 2, 2, 3, 3, 4, 4};
|
||||
@@ -284,6 +301,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
}
|
||||
fc->nclasses++;
|
||||
fc->classes = av_malloc(sizeof(vorbis_enc_floor_class) * fc->nclasses);
|
||||
if (!fc->classes)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < fc->nclasses; i++) {
|
||||
vorbis_enc_floor_class * c = &fc->classes[i];
|
||||
int j, books;
|
||||
@@ -292,6 +311,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
c->masterbook = floor_classes[i].masterbook;
|
||||
books = (1 << c->subclass);
|
||||
c->books = av_malloc(sizeof(int) * books);
|
||||
if (!c->books)
|
||||
return AVERROR(ENOMEM);
|
||||
for (j = 0; j < books; j++)
|
||||
c->books[j] = floor_classes[i].nbooks[j];
|
||||
}
|
||||
@@ -303,6 +324,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
fc->values += fc->classes[fc->partition_to_class[i]].dim;
|
||||
|
||||
fc->list = av_malloc(sizeof(vorbis_floor1_entry) * fc->values);
|
||||
if (!fc->list)
|
||||
return AVERROR(ENOMEM);
|
||||
fc->list[0].x = 0;
|
||||
fc->list[1].x = 1 << fc->rangebits;
|
||||
for (i = 2; i < fc->values; i++) {
|
||||
@@ -313,10 +336,13 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
};
|
||||
fc->list[i].x = a[i - 2];
|
||||
}
|
||||
ff_vorbis_ready_floor1_list(fc->list, fc->values);
|
||||
if (ff_vorbis_ready_floor1_list(avccontext, fc->list, fc->values))
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
venc->nresidues = 1;
|
||||
venc->residues = av_malloc(sizeof(vorbis_enc_residue) * venc->nresidues);
|
||||
if (!venc->residues)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// single residue
|
||||
rc = &venc->residues[0];
|
||||
@@ -327,6 +353,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
rc->classifications = 10;
|
||||
rc->classbook = 15;
|
||||
rc->books = av_malloc(sizeof(*rc->books) * rc->classifications);
|
||||
if (!rc->books)
|
||||
return AVERROR(ENOMEM);
|
||||
{
|
||||
static const int8_t a[10][8] = {
|
||||
{ -1, -1, -1, -1, -1, -1, -1, -1, },
|
||||
@@ -342,19 +370,26 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
};
|
||||
memcpy(rc->books, a, sizeof a);
|
||||
}
|
||||
ready_residue(rc, venc);
|
||||
if ((ret = ready_residue(rc, venc)) < 0)
|
||||
return ret;
|
||||
|
||||
venc->nmappings = 1;
|
||||
venc->mappings = av_malloc(sizeof(vorbis_enc_mapping) * venc->nmappings);
|
||||
if (!venc->mappings)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// single mapping
|
||||
mc = &venc->mappings[0];
|
||||
mc->submaps = 1;
|
||||
mc->mux = av_malloc(sizeof(int) * venc->channels);
|
||||
if (!mc->mux)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < venc->channels; i++)
|
||||
mc->mux[i] = 0;
|
||||
mc->floor = av_malloc(sizeof(int) * mc->submaps);
|
||||
mc->residue = av_malloc(sizeof(int) * mc->submaps);
|
||||
if (!mc->floor || !mc->residue)
|
||||
return AVERROR(ENOMEM);
|
||||
for (i = 0; i < mc->submaps; i++) {
|
||||
mc->floor[i] = 0;
|
||||
mc->residue[i] = 0;
|
||||
@@ -362,6 +397,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
mc->coupling_steps = venc->channels == 2 ? 1 : 0;
|
||||
mc->magnitude = av_malloc(sizeof(int) * mc->coupling_steps);
|
||||
mc->angle = av_malloc(sizeof(int) * mc->coupling_steps);
|
||||
if (!mc->magnitude || !mc->angle)
|
||||
return AVERROR(ENOMEM);
|
||||
if (mc->coupling_steps) {
|
||||
mc->magnitude[0] = 0;
|
||||
mc->angle[0] = 1;
|
||||
@@ -369,6 +406,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
|
||||
venc->nmodes = 1;
|
||||
venc->modes = av_malloc(sizeof(vorbis_enc_mode) * venc->nmodes);
|
||||
if (!venc->modes)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// single mode
|
||||
venc->modes[0].blockflag = 0;
|
||||
@@ -379,12 +418,18 @@ static void create_vorbis_context(vorbis_enc_context *venc,
|
||||
venc->samples = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]));
|
||||
venc->floor = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2);
|
||||
venc->coeffs = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2);
|
||||
if (!venc->saved || !venc->samples || !venc->floor || !venc->coeffs)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
venc->win[0] = ff_vorbis_vwin[venc->log2_blocksize[0] - 6];
|
||||
venc->win[1] = ff_vorbis_vwin[venc->log2_blocksize[1] - 6];
|
||||
|
||||
ff_mdct_init(&venc->mdct[0], venc->log2_blocksize[0], 0, 1.0);
|
||||
ff_mdct_init(&venc->mdct[1], venc->log2_blocksize[1], 0, 1.0);
|
||||
if ((ret = ff_mdct_init(&venc->mdct[0], venc->log2_blocksize[0], 0, 1.0)) < 0)
|
||||
return ret;
|
||||
if ((ret = ff_mdct_init(&venc->mdct[1], venc->log2_blocksize[1], 0, 1.0)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void put_float(PutBitContext *pb, float f)
|
||||
@@ -647,6 +692,8 @@ static int put_main_header(vorbis_enc_context *venc, uint8_t **out)
|
||||
|
||||
len = hlens[0] + hlens[1] + hlens[2];
|
||||
p = *out = av_mallocz(64 + len + len/255);
|
||||
if (!p)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
*p++ = 2;
|
||||
p += av_xiphlacing(p, hlens[0]);
|
||||
@@ -952,33 +999,6 @@ static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *a
|
||||
return 1;
|
||||
}
|
||||
|
||||
static av_cold int vorbis_encode_init(AVCodecContext *avccontext)
|
||||
{
|
||||
vorbis_enc_context *venc = avccontext->priv_data;
|
||||
|
||||
if (avccontext->channels != 2) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "Current FFmpeg Vorbis encoder only supports 2 channels.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
create_vorbis_context(venc, avccontext);
|
||||
|
||||
if (avccontext->flags & CODEC_FLAG_QSCALE)
|
||||
venc->quality = avccontext->global_quality / (float)FF_QP2LAMBDA / 10.;
|
||||
else
|
||||
venc->quality = 0.03;
|
||||
venc->quality *= venc->quality;
|
||||
|
||||
avccontext->extradata_size = put_main_header(venc, (uint8_t**)&avccontext->extradata);
|
||||
|
||||
avccontext->frame_size = 1 << (venc->log2_blocksize[0] - 1);
|
||||
|
||||
avccontext->coded_frame = avcodec_alloc_frame();
|
||||
avccontext->coded_frame->key_frame = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vorbis_encode_frame(AVCodecContext *avccontext,
|
||||
unsigned char *packets,
|
||||
int buf_size, void *data)
|
||||
@@ -1102,6 +1122,43 @@ static av_cold int vorbis_encode_close(AVCodecContext *avccontext)
|
||||
return 0 ;
|
||||
}
|
||||
|
||||
static av_cold int vorbis_encode_init(AVCodecContext *avccontext)
|
||||
{
|
||||
vorbis_enc_context *venc = avccontext->priv_data;
|
||||
int ret;
|
||||
|
||||
if (avccontext->channels != 2) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "Current FFmpeg Vorbis encoder only supports 2 channels.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((ret = create_vorbis_context(venc, avccontext)) < 0)
|
||||
goto error;
|
||||
|
||||
if (avccontext->flags & CODEC_FLAG_QSCALE)
|
||||
venc->quality = avccontext->global_quality / (float)FF_QP2LAMBDA / 10.;
|
||||
else
|
||||
venc->quality = 0.03;
|
||||
venc->quality *= venc->quality;
|
||||
|
||||
if ((ret = put_main_header(venc, (uint8_t**)&avccontext->extradata)) < 0)
|
||||
goto error;
|
||||
avccontext->extradata_size = ret;
|
||||
|
||||
avccontext->frame_size = 1 << (venc->log2_blocksize[0] - 1);
|
||||
|
||||
avccontext->coded_frame = avcodec_alloc_frame();
|
||||
if (!avccontext->coded_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
error:
|
||||
vorbis_encode_close(avccontext);
|
||||
return ret;
|
||||
}
|
||||
|
||||
AVCodec ff_vorbis_encoder = {
|
||||
"vorbis",
|
||||
AVMEDIA_TYPE_AUDIO,
|
||||
|
@@ -47,18 +47,18 @@ static int vp5_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
{
|
||||
vp56_rac_gets(c, 8);
|
||||
if(vp56_rac_gets(c, 5) > 5)
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
vp56_rac_gets(c, 2);
|
||||
if (vp56_rac_get(c)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n");
|
||||
return 0;
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
rows = vp56_rac_gets(c, 8); /* number of stored macroblock rows */
|
||||
cols = vp56_rac_gets(c, 8); /* number of stored macroblock cols */
|
||||
if (!rows || !cols) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n",
|
||||
cols << 4, rows << 4);
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
vp56_rac_gets(c, 8); /* number of displayed macroblock rows */
|
||||
vp56_rac_gets(c, 8); /* number of displayed macroblock cols */
|
||||
@@ -67,11 +67,11 @@ static int vp5_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
16*cols != s->avctx->coded_width ||
|
||||
16*rows != s->avctx->coded_height) {
|
||||
avcodec_set_dimensions(s->avctx, 16*cols, 16*rows);
|
||||
return 2;
|
||||
return VP56_SIZE_CHANGE;
|
||||
}
|
||||
} else if (!s->macroblocks)
|
||||
return 0;
|
||||
return 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vp5_parse_vector_adjustment(VP56Context *s, VP56mv *vect)
|
||||
|
@@ -511,10 +511,16 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
s->modelp = &s->models[is_alpha];
|
||||
|
||||
res = s->parse_header(s, buf, remaining_buf_size, &golden_frame);
|
||||
if (!res)
|
||||
return -1;
|
||||
if (res < 0) {
|
||||
int i;
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (s->frames[i].data[0])
|
||||
avctx->release_buffer(avctx, &s->frames[i]);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
if (res == 2) {
|
||||
if (res == VP56_SIZE_CHANGE) {
|
||||
int i;
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (s->frames[i].data[0])
|
||||
@@ -533,7 +539,7 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (res == 2)
|
||||
if (res == VP56_SIZE_CHANGE)
|
||||
if (vp56_size_changed(avctx)) {
|
||||
avctx->release_buffer(avctx, p);
|
||||
return -1;
|
||||
|
@@ -38,6 +38,8 @@ typedef struct {
|
||||
int16_t y;
|
||||
} DECLARE_ALIGNED(4, , VP56mv);
|
||||
|
||||
#define VP56_SIZE_CHANGE 1
|
||||
|
||||
typedef void (*VP56ParseVectorAdjustment)(VP56Context *s,
|
||||
VP56mv *vect);
|
||||
typedef void (*VP56Filter)(VP56Context *s, uint8_t *dst, uint8_t *src,
|
||||
|
@@ -50,7 +50,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
int vrt_shift = 0;
|
||||
int sub_version;
|
||||
int rows, cols;
|
||||
int res = 1;
|
||||
int res = 0;
|
||||
int separated_coeff = buf[0] & 1;
|
||||
|
||||
s->framep[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80);
|
||||
@@ -59,11 +59,11 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
if (s->framep[VP56_FRAME_CURRENT]->key_frame) {
|
||||
sub_version = buf[1] >> 3;
|
||||
if (sub_version > 8)
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
s->filter_header = buf[1] & 0x06;
|
||||
if (buf[1] & 1) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n");
|
||||
return 0;
|
||||
av_log_missing_feature(s->avctx, "Interlacing", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
if (separated_coeff || !s->filter_header) {
|
||||
coeff_offset = AV_RB16(buf+2) - 2;
|
||||
@@ -77,7 +77,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
/* buf[5] is number of displayed macroblock cols */
|
||||
if (!rows || !cols) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n", cols << 4, rows << 4);
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (!s->macroblocks || /* first frame */
|
||||
@@ -88,7 +88,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
s->avctx->width -= s->avctx->extradata[0] >> 4;
|
||||
s->avctx->height -= s->avctx->extradata[0] & 0x0F;
|
||||
}
|
||||
res = 2;
|
||||
res = VP56_SIZE_CHANGE;
|
||||
}
|
||||
|
||||
ff_vp56_init_range_decoder(c, buf+6, buf_size-6);
|
||||
@@ -100,7 +100,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
s->sub_version = sub_version;
|
||||
} else {
|
||||
if (!s->sub_version || !s->avctx->coded_width || !s->avctx->coded_height)
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (separated_coeff || !s->filter_header) {
|
||||
coeff_offset = AV_RB16(buf+1) - 2;
|
||||
@@ -144,7 +144,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
if (buf_size < 0) {
|
||||
if (s->framep[VP56_FRAME_CURRENT]->key_frame)
|
||||
avcodec_set_dimensions(s->avctx, 0, 0);
|
||||
return 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->use_huffman) {
|
||||
s->parse_coeff = vp6_parse_coeff_huffman;
|
||||
|
@@ -274,6 +274,7 @@ static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
|
||||
memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
|
||||
memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
|
||||
memset(&s->segmentation, 0, sizeof(s->segmentation));
|
||||
memset(&s->lf_delta, 0, sizeof(s->lf_delta));
|
||||
}
|
||||
|
||||
if (!s->macroblocks_base || /* first frame */
|
||||
|
@@ -159,6 +159,12 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->width & (s->vector_width - 1) ||
|
||||
s->height & (s->vector_height - 1)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Image size not multiple of block size\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* allocate codebooks */
|
||||
s->codebook_size = MAX_CODEBOOK_SIZE;
|
||||
s->codebook = av_malloc(s->codebook_size);
|
||||
@@ -521,6 +527,11 @@ static void vqa_decode_chunk(VqaContext *s)
|
||||
chunk_size = AV_RB32(&s->buf[cbp0_chunk + 4]);
|
||||
cbp0_chunk += CHUNK_PREAMBLE_SIZE;
|
||||
|
||||
if (chunk_size > MAX_CODEBOOK_SIZE - s->next_codebook_buffer_index) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "cbp0 chunk too large (0x%X bytes)\n", chunk_size);
|
||||
return;
|
||||
}
|
||||
|
||||
/* accumulate partial codebook */
|
||||
memcpy(&s->next_codebook_buffer[s->next_codebook_buffer_index],
|
||||
&s->buf[cbp0_chunk], chunk_size);
|
||||
@@ -544,6 +555,11 @@ static void vqa_decode_chunk(VqaContext *s)
|
||||
chunk_size = AV_RB32(&s->buf[cbpz_chunk + 4]);
|
||||
cbpz_chunk += CHUNK_PREAMBLE_SIZE;
|
||||
|
||||
if (chunk_size > MAX_CODEBOOK_SIZE - s->next_codebook_buffer_index) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "cbpz chunk too large (0x%X bytes)\n", chunk_size);
|
||||
return;
|
||||
}
|
||||
|
||||
/* accumulate partial codebook */
|
||||
memcpy(&s->next_codebook_buffer[s->next_codebook_buffer_index],
|
||||
&s->buf[cbpz_chunk], chunk_size);
|
||||
|
@@ -177,6 +177,10 @@ int ff_wma_init(AVCodecContext *avctx, int flags2)
|
||||
|
||||
bps = (float)s->bit_rate / (float)(s->nb_channels * s->sample_rate);
|
||||
s->byte_offset_bits = av_log2((int)(bps * s->frame_len / 8.0 + 0.5)) + 2;
|
||||
if (s->byte_offset_bits + 3 > MIN_CACHE_BITS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "byte_offset_bits %d is too large\n", s->byte_offset_bits);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
/* compute high frequency value and choose if noise coding should
|
||||
be activated */
|
||||
|
@@ -85,6 +85,11 @@ static int wma_decode_init(AVCodecContext * avctx)
|
||||
int i, flags2;
|
||||
uint8_t *extradata;
|
||||
|
||||
if (!avctx->block_align) {
|
||||
av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->avctx = avctx;
|
||||
|
||||
/* extract flag infos */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user