Compare commits
162 Commits
n0.7.13
...
release/0.
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a064b4eb12 | ||
![]() |
8925c44db1 | ||
![]() |
115efdefc5 | ||
![]() |
a248117f26 | ||
![]() |
bd66456866 | ||
![]() |
90ee388b28 | ||
![]() |
e3d643cf75 | ||
![]() |
1b05b0005b | ||
![]() |
38ca79b04d | ||
![]() |
0dff3171ce | ||
![]() |
ff29290e26 | ||
![]() |
ba7cd748c1 | ||
![]() |
510da4fe2b | ||
![]() |
eec833b10d | ||
![]() |
9b0736c08a | ||
![]() |
70a1182a48 | ||
![]() |
49d597f058 | ||
![]() |
44ebb2556d | ||
![]() |
0a41da3e9d | ||
![]() |
afe09e490a | ||
![]() |
f8c4dbe45e | ||
![]() |
c997dcd38b | ||
![]() |
2a1bebfc83 | ||
![]() |
23d835f611 | ||
![]() |
d04dc7b5a7 | ||
![]() |
3197a9c4fa | ||
![]() |
ade4f3e746 | ||
![]() |
053c19cd88 | ||
![]() |
96481c5e18 | ||
![]() |
9b052bfb86 | ||
![]() |
f844cb9bce | ||
![]() |
76c97f1963 | ||
![]() |
280998b13c | ||
![]() |
96cf80609a | ||
![]() |
33c9e18b09 | ||
![]() |
9c713f30e4 | ||
![]() |
530d10792d | ||
![]() |
799000af70 | ||
![]() |
f8d3bb8961 | ||
![]() |
78889be3fb | ||
![]() |
c65763a2c6 | ||
![]() |
6d4d186e9e | ||
![]() |
5ebb5a32bd | ||
![]() |
a694b2b158 | ||
![]() |
d785f69401 | ||
![]() |
5025dbc577 | ||
![]() |
5bfa208e65 | ||
![]() |
d86a5ce03f | ||
![]() |
7d4c38d58d | ||
![]() |
c313f3160a | ||
![]() |
7e6625a9af | ||
![]() |
f13f6f82c6 | ||
![]() |
8489c0599f | ||
![]() |
ee6b868ac8 | ||
![]() |
b6783b8826 | ||
![]() |
e2d529424f | ||
![]() |
537c173853 | ||
![]() |
c10582e703 | ||
![]() |
dfeef3a209 | ||
![]() |
2b6f3be082 | ||
![]() |
0a57df38f4 | ||
![]() |
17966ae5bb | ||
![]() |
5af2fd317d | ||
![]() |
8aedb75156 | ||
![]() |
1fd86f9a21 | ||
![]() |
377fabc9e6 | ||
![]() |
41eda87048 | ||
![]() |
e6ac11e417 | ||
![]() |
2cac35086c | ||
![]() |
af343f5cdd | ||
![]() |
391e0fc6c9 | ||
![]() |
caeca53a09 | ||
![]() |
760929117d | ||
![]() |
acada70ffb | ||
![]() |
4f91c45644 | ||
![]() |
e4831bb9a6 | ||
![]() |
db5b454c3d | ||
![]() |
301761792a | ||
![]() |
440e98574b | ||
![]() |
604d72aa0d | ||
![]() |
03ddc26066 | ||
![]() |
801eff785a | ||
![]() |
b59ee5dcf1 | ||
![]() |
e163d884ef | ||
![]() |
56cc629a64 | ||
![]() |
685321e4bd | ||
![]() |
3f1a58db6f | ||
![]() |
597d709eb4 | ||
![]() |
dd0c5e0fa9 | ||
![]() |
ad02537746 | ||
![]() |
3bc9cfe66e | ||
![]() |
910c1f2352 | ||
![]() |
55065315ca | ||
![]() |
8081879655 | ||
![]() |
a39c6bf1b8 | ||
![]() |
884a9b0d29 | ||
![]() |
4457e6137d | ||
![]() |
08d9fd611e | ||
![]() |
5fa739e685 | ||
![]() |
b143844ea0 | ||
![]() |
10ff052c60 | ||
![]() |
4ede95e69c | ||
![]() |
ce8910d861 | ||
![]() |
3d0c9c9af6 | ||
![]() |
f3f22f183f | ||
![]() |
bfbff1c748 | ||
![]() |
7fd7950174 | ||
![]() |
700fb8c8dd | ||
![]() |
9f80712454 | ||
![]() |
fe9cbf582b | ||
![]() |
642d758a2d | ||
![]() |
aa45b90804 | ||
![]() |
549b8083d6 | ||
![]() |
ec6719f655 | ||
![]() |
11ecd8574a | ||
![]() |
5754176b5b | ||
![]() |
fb3189ce8b | ||
![]() |
8168a7cec9 | ||
![]() |
562d6fd5b5 | ||
![]() |
dd14723602 | ||
![]() |
9474c93028 | ||
![]() |
7e070cf202 | ||
![]() |
1b48a426a9 | ||
![]() |
e3e369f696 | ||
![]() |
6996a2f796 | ||
![]() |
05f5a2eb62 | ||
![]() |
4a636a5e43 | ||
![]() |
44da556815 | ||
![]() |
aa097b4d5f | ||
![]() |
8148833193 | ||
![]() |
3c0f84402b | ||
![]() |
601fa56582 | ||
![]() |
c0df6a24ce | ||
![]() |
2d63f9b4ef | ||
![]() |
4c849c6991 | ||
![]() |
42c3a3719b | ||
![]() |
7a0ff7566b | ||
![]() |
10c244cc89 | ||
![]() |
99008ba366 | ||
![]() |
a81c1ea2eb | ||
![]() |
0892a6340f | ||
![]() |
d3e2f35f7a | ||
![]() |
e39fc137ae | ||
![]() |
a2ae183a38 | ||
![]() |
80b8dc30dc | ||
![]() |
7b91e52eb9 | ||
![]() |
e28814e0e1 | ||
![]() |
d6e250abfc | ||
![]() |
61ece41372 | ||
![]() |
b6c5848a1f | ||
![]() |
b6ba39f931 | ||
![]() |
77d43bf42d | ||
![]() |
899d95efe1 | ||
![]() |
8812b5f164 | ||
![]() |
f31170d4e7 | ||
![]() |
0173a7966b | ||
![]() |
a60eb6ef12 | ||
![]() |
8582e6e9a3 | ||
![]() |
9a5e81235e | ||
![]() |
c497d71a02 | ||
![]() |
0054d70f23 | ||
![]() |
b102d5d97d |
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.7.13
|
||||
PROJECT_NUMBER = 0.8.15
|
||||
|
||||
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
|
||||
# base path where the generated documentation will be put.
|
||||
|
@@ -8,7 +8,6 @@ FFmpeg code.
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
Michael Niedermayer
|
||||
final design decisions
|
||||
|
||||
|
||||
|
@@ -57,7 +57,7 @@ AVFormatContext *avformat_opts;
|
||||
struct SwsContext *sws_opts;
|
||||
AVDictionary *format_opts, *video_opts, *audio_opts, *sub_opts;
|
||||
|
||||
static const int this_year = 2011;
|
||||
static const int this_year = 2013;
|
||||
|
||||
void init_opts(void)
|
||||
{
|
||||
|
4
configure
vendored
4
configure
vendored
@@ -1057,6 +1057,7 @@ HAVE_LIST="
|
||||
dlfcn_h
|
||||
dlopen
|
||||
dos_paths
|
||||
dxva_h
|
||||
ebp_available
|
||||
ebx_available
|
||||
exp2
|
||||
@@ -2378,7 +2379,7 @@ check_host_cflags -std=c99
|
||||
check_host_cflags -Wall
|
||||
|
||||
case "$arch" in
|
||||
alpha|ia64|mips|parisc|sparc)
|
||||
alpha|ia64|mips|parisc|ppc|sparc)
|
||||
spic=$shared
|
||||
;;
|
||||
x86)
|
||||
@@ -2859,6 +2860,7 @@ check_func_headers windows.h MapViewOfFile
|
||||
check_func_headers windows.h VirtualAlloc
|
||||
|
||||
check_header dlfcn.h
|
||||
check_header dxva.h
|
||||
check_header dxva2api.h
|
||||
check_header libcrystalhd/libcrystalhd_if.h
|
||||
check_header malloc.h
|
||||
|
120
doc/APIchanges
120
doc/APIchanges
@@ -66,16 +66,16 @@ API changes, most recent first:
|
||||
2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays
|
||||
Add avfilter_get_audio_buffer_ref_from_arrays() to avfilter.h.
|
||||
|
||||
2011-06-09 - d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
2011-06-09 - f9ecb84 / d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
Move AVMetadata from lavf to lavu and rename it to
|
||||
AVDictionary -- new installed header dict.h.
|
||||
All av_metadata_* functions renamed to av_dict_*.
|
||||
|
||||
2011-06-07 - a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
2011-06-07 - d552f61 / a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
Add av_get_bytes_per_sample() in libavutil/samplefmt.h.
|
||||
Deprecate av_get_bits_per_sample_fmt().
|
||||
|
||||
2011-06-xx - b39b062 - lavu 51.8.0 - opt.h
|
||||
2011-06-xx - f956924 / b39b062 - lavu 51.8.0 - opt.h
|
||||
Add av_opt_free convenience function.
|
||||
|
||||
2011-06-06 - 95a0242 - lavfi 2.14.0 - AVFilterBufferRefAudioProps
|
||||
@@ -105,7 +105,7 @@ API changes, most recent first:
|
||||
Add av_get_pix_fmt_name() in libavutil/pixdesc.h, and deprecate
|
||||
avcodec_get_pix_fmt_name() in libavcodec/avcodec.h in its favor.
|
||||
|
||||
2011-05-25 - 30315a8 - lavf 53.3.0 - avformat.h
|
||||
2011-05-25 - 39e4206 / 30315a8 - lavf 53.3.0 - avformat.h
|
||||
Add fps_probe_size to AVFormatContext.
|
||||
|
||||
2011-05-22 - 5ecdfd0 - lavf 53.2.0 - avformat.h
|
||||
@@ -121,10 +121,10 @@ API changes, most recent first:
|
||||
2011-05-14 - 9fdf772 - lavfi 2.6.0 - avcodec.h
|
||||
Add avfilter_get_video_buffer_ref_from_frame() to libavfilter/avcodec.h.
|
||||
|
||||
2011-05-18 - 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
2011-05-18 - 75a37b5 / 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
Add request_sample_fmt field to AVCodecContext.
|
||||
|
||||
2011-05-10 - 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
2011-05-10 - 59eb12f / 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
Deprecate AVLPCType and the following fields in
|
||||
AVCodecContext: lpc_coeff_precision, prediction_order_method,
|
||||
min_partition_order, max_partition_order, lpc_type, lpc_passes.
|
||||
@@ -154,81 +154,81 @@ API changes, most recent first:
|
||||
Add av_dynarray_add function for adding
|
||||
an element to a dynamic array.
|
||||
|
||||
2011-04-26 - bebe72f - lavu 51.1.0 - avutil.h
|
||||
2011-04-26 - d7e5aeb / bebe72f - lavu 51.1.0 - avutil.h
|
||||
Add AVPictureType enum and av_get_picture_type_char(), deprecate
|
||||
FF_*_TYPE defines and av_get_pict_type_char() defined in
|
||||
libavcodec/avcodec.h.
|
||||
|
||||
2011-04-26 - 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
2011-04-26 - d7e5aeb / 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
Add pict_type and key_frame fields to AVFilterBufferRefVideo.
|
||||
|
||||
2011-04-26 - 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
2011-04-26 - d7e5aeb / 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
Add sample_aspect_ratio fields to vsrc_buffer arguments
|
||||
|
||||
2011-04-21 - 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
2011-04-21 - 8772156 / 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
Add CODEC_CAP_SLICE_THREADS for codecs supporting sliced threading.
|
||||
|
||||
2011-04-15 - lavc 52.120.0 - avcodec.h
|
||||
AVPacket structure got additional members for passing side information:
|
||||
4de339e introduce side information for AVPacket
|
||||
2d8591c make containers pass palette change in AVPacket
|
||||
c407984 / 4de339e introduce side information for AVPacket
|
||||
c407984 / 2d8591c make containers pass palette change in AVPacket
|
||||
|
||||
2011-04-12 - lavf 52.107.0 - avio.h
|
||||
Avio cleanup, part II - deprecate the entire URLContext API:
|
||||
175389c add avio_check as a replacement for url_exist
|
||||
ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
c55780d / 175389c add avio_check as a replacement for url_exist
|
||||
9891004 / ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
for _av_url_read_fseek/fpause
|
||||
cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
d4d0932 / cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
should be used instead.
|
||||
80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
f8270bb add avio_enum_protocols.
|
||||
5593f03 deprecate URLProtocol.
|
||||
c486dad deprecate URLContext.
|
||||
026e175 deprecate the typedef for URLInterruptCB
|
||||
8e76a19 deprecate av_register_protocol2.
|
||||
b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
1305d93 deprecate av_url_read_seek
|
||||
fa104e1 deprecate av_url_read_pause
|
||||
727c7aa deprecate url_get_filename().
|
||||
5958df3 deprecate url_max_packet_size().
|
||||
1869ea0 deprecate url_get_file_handle().
|
||||
32a97d4 deprecate url_filesize().
|
||||
e52a914 deprecate url_close().
|
||||
58a48c6 deprecate url_seek().
|
||||
925e908 deprecate url_write().
|
||||
dce3756 deprecate url_read_complete().
|
||||
bc371ac deprecate url_read().
|
||||
0589da0 deprecate url_open().
|
||||
62eaaea deprecate url_connect.
|
||||
5652bb9 deprecate url_alloc.
|
||||
333e894 deprecate url_open_protocol
|
||||
e230705 deprecate url_poll and URLPollEntry
|
||||
c88caa5 / 80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
c88caa5 / f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
d4d0932 / f8270bb add avio_enum_protocols.
|
||||
d4d0932 / 5593f03 deprecate URLProtocol.
|
||||
d4d0932 / c486dad deprecate URLContext.
|
||||
d4d0932 / 026e175 deprecate the typedef for URLInterruptCB
|
||||
c88caa5 / 8e76a19 deprecate av_register_protocol2.
|
||||
11d7841 / b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
11d7841 / 1305d93 deprecate av_url_read_seek
|
||||
11d7841 / fa104e1 deprecate av_url_read_pause
|
||||
434f248 / 727c7aa deprecate url_get_filename().
|
||||
434f248 / 5958df3 deprecate url_max_packet_size().
|
||||
434f248 / 1869ea0 deprecate url_get_file_handle().
|
||||
434f248 / 32a97d4 deprecate url_filesize().
|
||||
434f248 / e52a914 deprecate url_close().
|
||||
434f248 / 58a48c6 deprecate url_seek().
|
||||
434f248 / 925e908 deprecate url_write().
|
||||
434f248 / dce3756 deprecate url_read_complete().
|
||||
434f248 / bc371ac deprecate url_read().
|
||||
434f248 / 0589da0 deprecate url_open().
|
||||
434f248 / 62eaaea deprecate url_connect.
|
||||
434f248 / 5652bb9 deprecate url_alloc.
|
||||
434f248 / 333e894 deprecate url_open_protocol
|
||||
434f248 / e230705 deprecate url_poll and URLPollEntry
|
||||
|
||||
2011-04-08 - lavf 52.106.0 - avformat.h
|
||||
Minor avformat.h cleanup:
|
||||
a9bf9d8 deprecate av_guess_image2_codec
|
||||
c3675df rename avf_sdp_create->av_sdp_create
|
||||
d4d0932 / a9bf9d8 deprecate av_guess_image2_codec
|
||||
d4d0932 / c3675df rename avf_sdp_create->av_sdp_create
|
||||
|
||||
2011-04-03 - lavf 52.105.0 - avio.h
|
||||
Large-scale renaming/deprecating of AVIOContext-related functions:
|
||||
724f6a0 deprecate url_fdopen
|
||||
403ee83 deprecate url_open_dyn_packet_buf
|
||||
6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
2cae980 / 724f6a0 deprecate url_fdopen
|
||||
2cae980 / 403ee83 deprecate url_open_dyn_packet_buf
|
||||
2cae980 / 6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
2cae980 / b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
2cae980 / 8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
AVIOContext.is_streamed and url_is_streamed()
|
||||
b64030f deprecate get_checksum()
|
||||
4c4427a deprecate init_checksum()
|
||||
4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
933e90a deprecate av_url_read_fseek/fpause
|
||||
8d9769a deprecate url_fileno
|
||||
b7f2fdd rename put_flush_packet -> avio_flush
|
||||
35f1023 deprecate url_close_buf
|
||||
83fddae deprecate url_open_buf
|
||||
d9d86e0 rename url_fprintf -> avio_printf
|
||||
59f65d9 deprecate url_setbufsize
|
||||
3e68b3b deprecate url_ferror
|
||||
1caa412 / b64030f deprecate get_checksum()
|
||||
1caa412 / 4c4427a deprecate init_checksum()
|
||||
2fd41c9 / 4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
4fa0e24 / 933e90a deprecate av_url_read_fseek/fpause
|
||||
4fa0e24 / 8d9769a deprecate url_fileno
|
||||
0fecf26 / b7f2fdd rename put_flush_packet -> avio_flush
|
||||
0fecf26 / 35f1023 deprecate url_close_buf
|
||||
0fecf26 / 83fddae deprecate url_open_buf
|
||||
0fecf26 / d9d86e0 rename url_fprintf -> avio_printf
|
||||
0fecf26 / 59f65d9 deprecate url_setbufsize
|
||||
6947b0c / 3e68b3b deprecate url_ferror
|
||||
66e5b1d deprecate url_feof
|
||||
e8bb2e2 deprecate url_fget_max_packet_size
|
||||
76aa876 rename url_fsize -> avio_size
|
||||
@@ -250,7 +250,7 @@ API changes, most recent first:
|
||||
b3db9ce deprecate get_partial_buffer
|
||||
8d9ac96 rename av_alloc_put_byte -> avio_alloc_context
|
||||
|
||||
2011-03-25 - 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
2011-03-25 - 27ef7b1 / 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
Add audio_service_type field to AVCodecContext.
|
||||
|
||||
2011-03-17 - e309fdc - lavu 50.40.0 - pixfmt.h
|
||||
@@ -288,11 +288,11 @@ API changes, most recent first:
|
||||
2011-02-10 - 12c14cd - lavf 52.99.0 - AVStream.disposition
|
||||
Add AV_DISPOSITION_HEARING_IMPAIRED and AV_DISPOSITION_VISUAL_IMPAIRED.
|
||||
|
||||
2011-02-09 - 5592734 - lavc 52.112.0 - avcodec_thread_init()
|
||||
2011-02-09 - c0b102c - lavc 52.112.0 - avcodec_thread_init()
|
||||
Deprecate avcodec_thread_init()/avcodec_thread_free() use; instead
|
||||
set thread_count before calling avcodec_open.
|
||||
|
||||
2011-02-09 - 778b08a - lavc 52.111.0 - threading API
|
||||
2011-02-09 - 37b00b4 - lavc 52.111.0 - threading API
|
||||
Add CODEC_CAP_FRAME_THREADS with new restrictions on get_buffer()/
|
||||
release_buffer()/draw_horiz_band() callbacks for appropriate codecs.
|
||||
Add thread_type and active_thread_type fields to AVCodecContext.
|
||||
|
@@ -2,7 +2,7 @@ Release Notes
|
||||
=============
|
||||
|
||||
* 0.8 "Love" June, 2011
|
||||
* 0.7.1 "Peace" June, 2011 (identical to 0.8 but using 0.6 ABI/API)
|
||||
* 0.7 "Peace" June, 2011 (identical to 0.8 but using 0.6 ABI/API)
|
||||
|
||||
|
||||
General notes
|
||||
|
@@ -479,7 +479,7 @@ int main(int argc, char **argv)
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
if (avio_open(&oc->pb, filename, AVIO_WRONLY) < 0) {
|
||||
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
|
||||
fprintf(stderr, "Could not open '%s'\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
@@ -299,6 +299,10 @@ prefix is ``ffmpeg2pass''. The complete file name will be
|
||||
@file{PREFIX-N.log}, where N is a number specific to the output
|
||||
stream.
|
||||
|
||||
Note that this option is overwritten by a local option of the same name
|
||||
when using @code{-vcodec libx264}. That option maps to the x264 option stats
|
||||
which has a different syntax.
|
||||
|
||||
@item -newvideo
|
||||
Add a new video stream to the current output stream.
|
||||
|
||||
|
@@ -82,7 +82,7 @@ Follows a BNF description for the filtergraph syntax:
|
||||
@var{LINKLABEL} ::= "[" @var{NAME} "]"
|
||||
@var{LINKLABELS} ::= @var{LINKLABEL} [@var{LINKLABELS}]
|
||||
@var{FILTER_ARGUMENTS} ::= sequence of chars (eventually quoted)
|
||||
@var{FILTER} ::= [@var{LINKNAMES}] @var{NAME} ["=" @var{ARGUMENTS}] [@var{LINKNAMES}]
|
||||
@var{FILTER} ::= [@var{LINKLABELS}] @var{NAME} ["=" @var{FILTER_ARGUMENTS}] [@var{LINKLABELS}]
|
||||
@var{FILTERCHAIN} ::= @var{FILTER} [,@var{FILTERCHAIN}]
|
||||
@var{FILTERGRAPH} ::= @var{FILTERCHAIN} [;@var{FILTERGRAPH}]
|
||||
@end example
|
||||
|
@@ -15,7 +15,7 @@ be properly added to the respective issue.
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://ffmpeg.org/trac/ffmpeg
|
||||
http(s)://trac.ffmpeg.org
|
||||
|
||||
NOTE: issue = (bug report || patch || feature request)
|
||||
|
||||
|
23
ffmpeg.c
23
ffmpeg.c
@@ -31,7 +31,7 @@
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/audioconvert.h"
|
||||
#include "libavutil/audioconvert.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
@@ -113,9 +113,7 @@ typedef struct AVChapterMap {
|
||||
static const OptionDef options[];
|
||||
|
||||
#define MAX_FILES 100
|
||||
#if !FF_API_MAX_STREAMS
|
||||
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
|
||||
#endif
|
||||
|
||||
static const char *last_asked_format = NULL;
|
||||
static int64_t input_files_ts_offset[MAX_FILES];
|
||||
@@ -315,6 +313,7 @@ typedef struct AVOutputStream {
|
||||
#endif
|
||||
|
||||
int sws_flags;
|
||||
char *forced_key_frames;
|
||||
} AVOutputStream;
|
||||
|
||||
static AVOutputStream **output_streams_for_file[MAX_FILES] = { NULL };
|
||||
@@ -714,6 +713,7 @@ static int read_ffserver_streams(AVFormatContext *s, const char *filename)
|
||||
return err;
|
||||
/* copy stream format */
|
||||
s->nb_streams = 0;
|
||||
s->streams = av_mallocz(sizeof(AVStream *) * ic->nb_streams);
|
||||
for(i=0;i<ic->nb_streams;i++) {
|
||||
AVStream *st;
|
||||
AVCodec *codec;
|
||||
@@ -2337,6 +2337,9 @@ static int transcode(AVFormatContext **output_files,
|
||||
"Please consider specifiying a lower framerate, a different muxer or -vsync 2\n");
|
||||
}
|
||||
|
||||
if (ost->forced_key_frames)
|
||||
parse_forced_key_frames(ost->forced_key_frames, ost, codec);
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
if (configure_video_filters(ist, ost)) {
|
||||
fprintf(stderr, "Error opening filters!\n");
|
||||
@@ -2858,6 +2861,7 @@ static int transcode(AVFormatContext **output_files,
|
||||
av_freep(&ost->st->codec->subtitle_header);
|
||||
av_free(ost->resample_frame.data[0]);
|
||||
av_free(ost->forced_kf_pts);
|
||||
av_free(ost->forced_key_frames);
|
||||
if (ost->video_resample)
|
||||
sws_freeContext(ost->img_resample_ctx);
|
||||
if (ost->resample)
|
||||
@@ -3656,8 +3660,10 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
|
||||
}
|
||||
}
|
||||
|
||||
if (forced_key_frames)
|
||||
parse_forced_key_frames(forced_key_frames, ost, video_enc);
|
||||
if (forced_key_frames) {
|
||||
ost->forced_key_frames = forced_key_frames;
|
||||
forced_key_frames = NULL;
|
||||
}
|
||||
}
|
||||
if (video_language) {
|
||||
av_dict_set(&st->metadata, "language", video_language, 0);
|
||||
@@ -3667,7 +3673,6 @@ static void new_video_stream(AVFormatContext *oc, int file_idx)
|
||||
/* reset some key parameters */
|
||||
video_disable = 0;
|
||||
av_freep(&video_codec_name);
|
||||
av_freep(&forced_key_frames);
|
||||
video_stream_copy = 0;
|
||||
frame_pix_fmt = PIX_FMT_NONE;
|
||||
}
|
||||
@@ -3958,7 +3963,7 @@ static int opt_output_file(const char *opt, const char *filename)
|
||||
/* check filename in case of an image number is expected */
|
||||
if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
|
||||
if (!av_filename_number_test(oc->filename)) {
|
||||
print_error(oc->filename, AVERROR_NUMEXPECTED);
|
||||
print_error(oc->filename, AVERROR(EINVAL));
|
||||
ffmpeg_exit(1);
|
||||
}
|
||||
}
|
||||
@@ -3969,7 +3974,7 @@ static int opt_output_file(const char *opt, const char *filename)
|
||||
(strchr(filename, ':') == NULL ||
|
||||
filename[1] == ':' ||
|
||||
av_strstart(filename, "file:", NULL))) {
|
||||
if (url_exist(filename)) {
|
||||
if (avio_check(filename, 0) == 0) {
|
||||
if (!using_stdin) {
|
||||
fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
|
||||
fflush(stderr);
|
||||
@@ -3986,7 +3991,7 @@ static int opt_output_file(const char *opt, const char *filename)
|
||||
}
|
||||
|
||||
/* open the file */
|
||||
if ((err = avio_open(&oc->pb, filename, AVIO_WRONLY)) < 0) {
|
||||
if ((err = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE)) < 0) {
|
||||
print_error(filename, err);
|
||||
ffmpeg_exit(1);
|
||||
}
|
||||
|
2
ffplay.c
2
ffplay.c
@@ -35,7 +35,7 @@
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libavcodec/audioconvert.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/avfft.h"
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
|
@@ -23,7 +23,7 @@
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
|
42
ffserver.c
42
ffserver.c
@@ -39,7 +39,7 @@
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/random_seed.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavcodec/opt.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include <stdarg.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
@@ -94,9 +94,7 @@ static const char *http_state[] = {
|
||||
"RTSP_SEND_PACKET",
|
||||
};
|
||||
|
||||
#if !FF_API_MAX_STREAMS
|
||||
#define MAX_STREAMS 20
|
||||
#endif
|
||||
|
||||
#define IOBUFFER_INIT_SIZE 8192
|
||||
|
||||
@@ -2232,11 +2230,11 @@ static int http_prepare_data(HTTPContext *c)
|
||||
av_dict_set(&c->fmt_ctx.metadata, "copyright", c->stream->copyright, 0);
|
||||
av_dict_set(&c->fmt_ctx.metadata, "title" , c->stream->title , 0);
|
||||
|
||||
c->fmt_ctx.streams = av_mallocz(sizeof(AVStream *) * c->stream->nb_streams);
|
||||
|
||||
for(i=0;i<c->stream->nb_streams;i++) {
|
||||
AVStream *st;
|
||||
AVStream *src;
|
||||
st = av_mallocz(sizeof(AVStream));
|
||||
c->fmt_ctx.streams[i] = st;
|
||||
c->fmt_ctx.streams[i] = av_mallocz(sizeof(AVStream));
|
||||
/* if file or feed, then just take streams from FFStream struct */
|
||||
if (!c->stream->feed ||
|
||||
c->stream->feed == c->stream)
|
||||
@@ -2244,9 +2242,9 @@ static int http_prepare_data(HTTPContext *c)
|
||||
else
|
||||
src = c->stream->feed->streams[c->stream->feed_streams[i]];
|
||||
|
||||
*st = *src;
|
||||
st->priv_data = 0;
|
||||
st->codec->frame_number = 0; /* XXX: should be done in
|
||||
*(c->fmt_ctx.streams[i]) = *src;
|
||||
c->fmt_ctx.streams[i]->priv_data = 0;
|
||||
c->fmt_ctx.streams[i]->codec->frame_number = 0; /* XXX: should be done in
|
||||
AVStream, not in codec */
|
||||
}
|
||||
/* set output format parameters */
|
||||
@@ -2944,11 +2942,9 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
snprintf(avc->filename, 1024, "rtp://0.0.0.0");
|
||||
}
|
||||
|
||||
#if !FF_API_MAX_STREAMS
|
||||
if (avc->nb_streams >= INT_MAX/sizeof(*avc->streams) ||
|
||||
!(avc->streams = av_malloc(avc->nb_streams * sizeof(*avc->streams))))
|
||||
goto sdp_done;
|
||||
#endif
|
||||
if (avc->nb_streams >= INT_MAX/sizeof(*avs) ||
|
||||
!(avs = av_malloc(avc->nb_streams * sizeof(*avs))))
|
||||
goto sdp_done;
|
||||
@@ -2961,10 +2957,8 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
av_sdp_create(&avc, 1, *pbuffer, 2048);
|
||||
|
||||
sdp_done:
|
||||
#if !FF_API_MAX_STREAMS
|
||||
av_free(avc->streams);
|
||||
#endif
|
||||
av_metadata_free(&avc->metadata);
|
||||
av_dict_free(&avc->metadata);
|
||||
av_free(avc);
|
||||
av_free(avs);
|
||||
|
||||
@@ -3392,6 +3386,9 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
if (!st)
|
||||
goto fail;
|
||||
ctx->nb_streams = 1;
|
||||
ctx->streams = av_mallocz(sizeof(AVStream *) * ctx->nb_streams);
|
||||
if (!ctx->streams)
|
||||
goto fail;
|
||||
ctx->streams[0] = st;
|
||||
|
||||
if (!c->stream->feed ||
|
||||
@@ -3425,7 +3422,7 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
"rtp://%s:%d", ipaddr, ntohs(dest_addr->sin_port));
|
||||
}
|
||||
|
||||
if (url_open(&h, ctx->filename, AVIO_WRONLY) < 0)
|
||||
if (url_open(&h, ctx->filename, AVIO_FLAG_WRITE) < 0)
|
||||
goto fail;
|
||||
c->rtp_handles[stream_index] = h;
|
||||
max_packet_size = url_get_max_packet_size(h);
|
||||
@@ -3678,7 +3675,7 @@ static void build_feed_streams(void)
|
||||
for(feed = first_feed; feed != NULL; feed = feed->next_feed) {
|
||||
int fd;
|
||||
|
||||
if (url_exist(feed->feed_filename)) {
|
||||
if (avio_check(feed->feed_filename, AVIO_FLAG_READ) > 0) {
|
||||
/* See if it matches */
|
||||
AVFormatContext *s = NULL;
|
||||
int matches = 0;
|
||||
@@ -3751,7 +3748,7 @@ static void build_feed_streams(void)
|
||||
unlink(feed->feed_filename);
|
||||
}
|
||||
}
|
||||
if (!url_exist(feed->feed_filename)) {
|
||||
if (avio_check(feed->feed_filename, AVIO_FLAG_WRITE) <= 0) {
|
||||
AVFormatContext s1 = {0}, *s = &s1;
|
||||
|
||||
if (feed->readonly) {
|
||||
@@ -3761,20 +3758,15 @@ static void build_feed_streams(void)
|
||||
}
|
||||
|
||||
/* only write the header of the ffm file */
|
||||
if (avio_open(&s->pb, feed->feed_filename, AVIO_WRONLY) < 0) {
|
||||
if (avio_open(&s->pb, feed->feed_filename, AVIO_FLAG_WRITE) < 0) {
|
||||
http_log("Could not open output feed file '%s'\n",
|
||||
feed->feed_filename);
|
||||
exit(1);
|
||||
}
|
||||
s->oformat = feed->fmt;
|
||||
s->nb_streams = feed->nb_streams;
|
||||
for(i=0;i<s->nb_streams;i++) {
|
||||
AVStream *st;
|
||||
st = feed->streams[i];
|
||||
s->streams[i] = st;
|
||||
}
|
||||
av_set_parameters(s, NULL);
|
||||
if (av_write_header(s) < 0) {
|
||||
s->streams = feed->streams;
|
||||
if (avformat_write_header(s, NULL) < 0) {
|
||||
http_log("Container doesn't supports the required parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
|
@@ -50,6 +50,8 @@ typedef struct EightBpsContext {
|
||||
|
||||
unsigned char planes;
|
||||
unsigned char planemap[4];
|
||||
|
||||
uint32_t pal[256];
|
||||
} EightBpsContext;
|
||||
|
||||
|
||||
@@ -129,13 +131,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
}
|
||||
}
|
||||
|
||||
if (avctx->palctrl) {
|
||||
memcpy (c->pic.data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (avctx->palctrl->palette_changed) {
|
||||
if (avctx->bits_per_coded_sample <= 8) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt,
|
||||
AV_PKT_DATA_PALETTE,
|
||||
NULL);
|
||||
if (pal) {
|
||||
c->pic.palette_has_changed = 1;
|
||||
avctx->palctrl->palette_changed = 0;
|
||||
} else
|
||||
c->pic.palette_has_changed = 0;
|
||||
memcpy(c->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
@@ -165,10 +170,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
c->planes = 1;
|
||||
c->planemap[0] = 0; // 1st plane is palette indexes
|
||||
if (avctx->palctrl == NULL) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error: PAL8 format but no palette from demuxer.\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case 24:
|
||||
avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
|
||||
|
@@ -15,7 +15,6 @@ OBJS = allcodecs.o \
|
||||
fmtconvert.o \
|
||||
imgconvert.o \
|
||||
jrevdct.o \
|
||||
opt.o \
|
||||
options.o \
|
||||
parser.o \
|
||||
raw.o \
|
||||
|
@@ -183,6 +183,8 @@ static av_cold int che_configure(AACContext *ac,
|
||||
enum ChannelPosition che_pos[4][MAX_ELEM_ID],
|
||||
int type, int id, int *channels)
|
||||
{
|
||||
if (*channels >= MAX_CHANNELS)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (che_pos[type][id]) {
|
||||
if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -568,6 +570,11 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
output_scale_factor = 1.0;
|
||||
}
|
||||
|
||||
if (avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Too many channels\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
AAC_INIT_VLC_STATIC( 0, 304);
|
||||
AAC_INIT_VLC_STATIC( 1, 270);
|
||||
AAC_INIT_VLC_STATIC( 2, 550);
|
||||
@@ -1694,7 +1701,7 @@ static void apply_tns(float coef[1024], TemporalNoiseShaping *tns,
|
||||
int w, filt, m, i;
|
||||
int bottom, top, order, start, end, size, inc;
|
||||
float lpc[TNS_MAX_ORDER];
|
||||
float tmp[TNS_MAX_ORDER];
|
||||
float tmp[TNS_MAX_ORDER + 1];
|
||||
|
||||
for (w = 0; w < ics->num_windows; w++) {
|
||||
bottom = ics->num_swb;
|
||||
|
@@ -664,10 +664,9 @@ static av_cold int alac_decode_init(AVCodecContext * avctx)
|
||||
alac->numchannels = alac->avctx->channels;
|
||||
|
||||
/* initialize from the extradata */
|
||||
if (alac->avctx->extradata_size != ALAC_EXTRADATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "alac: expected %d extradata bytes\n",
|
||||
ALAC_EXTRADATA_SIZE);
|
||||
return -1;
|
||||
if (alac->avctx->extradata_size < ALAC_EXTRADATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "alac: extradata is too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (alac_set_info(alac)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "alac: set_info failed\n");
|
||||
|
@@ -257,7 +257,7 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
|
||||
// generate warm-up samples
|
||||
residual[0] = samples[0];
|
||||
for(i=1;i<=lpc.lpc_order;i++)
|
||||
residual[i] = samples[i] - samples[i-1];
|
||||
residual[i] = sign_extend(samples[i] - samples[i-1], s->write_sample_size);
|
||||
|
||||
// perform lpc on remaining samples
|
||||
for(i = lpc.lpc_order + 1; i < s->avctx->frame_size; i++) {
|
||||
|
@@ -551,12 +551,15 @@ static void get_block_sizes(ALSDecContext *ctx, unsigned int *div_blocks,
|
||||
|
||||
/** Read the block data for a constant block
|
||||
*/
|
||||
static void read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
static int read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
{
|
||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||
AVCodecContext *avctx = ctx->avctx;
|
||||
GetBitContext *gb = &ctx->gb;
|
||||
|
||||
if (bd->block_length <= 0)
|
||||
return -1;
|
||||
|
||||
*bd->raw_samples = 0;
|
||||
*bd->const_block = get_bits1(gb); // 1 = constant value, 0 = zero block (silence)
|
||||
bd->js_blocks = get_bits1(gb);
|
||||
@@ -571,6 +574,8 @@ static void read_const_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
|
||||
// ensure constant block decoding by reusing this field
|
||||
*bd->const_block = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -650,6 +655,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
for (k = 1; k < sub_blocks; k++)
|
||||
s[k] = s[k - 1] + decode_rice(gb, 0);
|
||||
}
|
||||
for (k = 1; k < sub_blocks; k++)
|
||||
if (s[k] > 32) {
|
||||
av_log(avctx, AV_LOG_ERROR, "k invalid for rice code.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (get_bits1(gb))
|
||||
*bd->shift_lsbs = get_bits(gb, 4) + 1;
|
||||
@@ -662,6 +672,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
|
||||
2, sconf->max_order + 1));
|
||||
*bd->opt_order = get_bits(gb, opt_order_length);
|
||||
if (*bd->opt_order > sconf->max_order) {
|
||||
*bd->opt_order = sconf->max_order;
|
||||
av_log(avctx, AV_LOG_ERROR, "Predictor order too large!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
*bd->opt_order = sconf->max_order;
|
||||
}
|
||||
@@ -694,6 +709,10 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int rice_param = parcor_rice_table[sconf->coef_table][k][1];
|
||||
int offset = parcor_rice_table[sconf->coef_table][k][0];
|
||||
quant_cof[k] = decode_rice(gb, rice_param) + offset;
|
||||
if (quant_cof[k] < -64 || quant_cof[k] > 63) {
|
||||
av_log(avctx, AV_LOG_ERROR, "quant_cof %d is out of range\n", quant_cof[k]);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
// read coefficients 20 to 126
|
||||
@@ -726,7 +745,7 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
bd->ltp_gain[0] = decode_rice(gb, 1) << 3;
|
||||
bd->ltp_gain[1] = decode_rice(gb, 2) << 3;
|
||||
|
||||
r = get_unary(gb, 0, 4);
|
||||
r = get_unary(gb, 0, 3);
|
||||
c = get_bits(gb, 2);
|
||||
bd->ltp_gain[2] = ltp_gain_values[r][c];
|
||||
|
||||
@@ -755,7 +774,6 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int delta[8];
|
||||
unsigned int k [8];
|
||||
unsigned int b = av_clip((av_ceil_log2(bd->block_length) - 3) >> 1, 0, 5);
|
||||
unsigned int i = start;
|
||||
|
||||
// read most significant bits
|
||||
unsigned int high;
|
||||
@@ -766,29 +784,30 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
|
||||
current_res = bd->raw_samples + start;
|
||||
|
||||
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
|
||||
for (sb = 0; sb < sub_blocks; sb++) {
|
||||
unsigned int sb_len = sb_length - (sb ? 0 : start);
|
||||
|
||||
k [sb] = s[sb] > b ? s[sb] - b : 0;
|
||||
delta[sb] = 5 - s[sb] + k[sb];
|
||||
|
||||
ff_bgmc_decode(gb, sb_length, current_res,
|
||||
ff_bgmc_decode(gb, sb_len, current_res,
|
||||
delta[sb], sx[sb], &high, &low, &value, ctx->bgmc_lut, ctx->bgmc_lut_status);
|
||||
|
||||
current_res += sb_length;
|
||||
current_res += sb_len;
|
||||
}
|
||||
|
||||
ff_bgmc_decode_end(gb);
|
||||
|
||||
|
||||
// read least significant bits and tails
|
||||
i = start;
|
||||
current_res = bd->raw_samples + start;
|
||||
|
||||
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
|
||||
for (sb = 0; sb < sub_blocks; sb++, start = 0) {
|
||||
unsigned int cur_tail_code = tail_code[sx[sb]][delta[sb]];
|
||||
unsigned int cur_k = k[sb];
|
||||
unsigned int cur_s = s[sb];
|
||||
|
||||
for (; i < sb_length; i++) {
|
||||
for (; start < sb_length; start++) {
|
||||
int32_t res = *current_res;
|
||||
|
||||
if (res == cur_tail_code) {
|
||||
@@ -956,7 +975,8 @@ static int read_block(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
if (read_var_block_data(ctx, bd))
|
||||
return -1;
|
||||
} else {
|
||||
read_const_block_data(ctx, bd);
|
||||
if (read_const_block_data(ctx, bd) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -67,10 +67,10 @@ function ff_scalarproduct_int16_neon, export=1
|
||||
|
||||
3: vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
@@ -107,10 +107,10 @@ function ff_scalarproduct_and_madd_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
|
@@ -366,7 +366,7 @@ int ff_ass_split_override_codes(const ASSCodesCallbacks *callbacks, void *priv,
|
||||
char new_line[2];
|
||||
int text_len = 0;
|
||||
|
||||
while (*buf) {
|
||||
while (buf && *buf) {
|
||||
if (text && callbacks->text &&
|
||||
(sscanf(buf, "\\%1[nN]", new_line) == 1 ||
|
||||
!strncmp(buf, "{\\", 2))) {
|
||||
|
@@ -179,8 +179,11 @@ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){
|
||||
uint32_t* obuf = (uint32_t*) out;
|
||||
|
||||
off = (intptr_t)inbuffer & 3;
|
||||
buf = (const uint32_t*) (inbuffer - off);
|
||||
c = av_be2ne32((0x537F6103 >> (off*8)) | (0x537F6103 << (32-(off*8))));
|
||||
buf = (const uint32_t *)(inbuffer - off);
|
||||
if (off)
|
||||
c = av_be2ne32((0x537F6103U >> (off * 8)) | (0x537F6103U << (32 - (off * 8))));
|
||||
else
|
||||
c = av_be2ne32(0x537F6103U);
|
||||
bytes += 3 + off;
|
||||
for (i = 0; i < bytes/4; i++)
|
||||
obuf[i] = c ^ buf[i];
|
||||
|
@@ -34,12 +34,6 @@
|
||||
|
||||
#include "libavcodec/version.h"
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
# define FF_INTERNALC_MEM_TYPE unsigned int
|
||||
#else
|
||||
# define FF_INTERNALC_MEM_TYPE size_t
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Identify the syntax and semantics of the bitstream.
|
||||
* The principle is roughly:
|
||||
@@ -118,9 +112,6 @@ enum CodecID {
|
||||
CODEC_ID_QDRAW,
|
||||
CODEC_ID_VIXL,
|
||||
CODEC_ID_QPEG,
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
CODEC_ID_XVID,
|
||||
#endif
|
||||
CODEC_ID_PNG,
|
||||
CODEC_ID_PPM,
|
||||
CODEC_ID_PBM,
|
||||
@@ -369,18 +360,6 @@ enum CodecID {
|
||||
CODEC_ID_FFMETADATA=0x21000, ///< Dummy codec for streams containing only metadata information.
|
||||
};
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
#define CodecType AVMediaType
|
||||
|
||||
#define CODEC_TYPE_UNKNOWN AVMEDIA_TYPE_UNKNOWN
|
||||
#define CODEC_TYPE_VIDEO AVMEDIA_TYPE_VIDEO
|
||||
#define CODEC_TYPE_AUDIO AVMEDIA_TYPE_AUDIO
|
||||
#define CODEC_TYPE_DATA AVMEDIA_TYPE_DATA
|
||||
#define CODEC_TYPE_SUBTITLE AVMEDIA_TYPE_SUBTITLE
|
||||
#define CODEC_TYPE_ATTACHMENT AVMEDIA_TYPE_ATTACHMENT
|
||||
#define CODEC_TYPE_NB AVMEDIA_TYPE_NB
|
||||
#endif
|
||||
|
||||
#if FF_API_OLD_SAMPLE_FMT
|
||||
#define SampleFormat AVSampleFormat
|
||||
|
||||
@@ -1092,6 +1071,10 @@ typedef struct AVPanScan{
|
||||
#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
|
||||
#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
|
||||
|
||||
enum AVPacketSideDataType {
|
||||
AV_PKT_DATA_PALETTE,
|
||||
};
|
||||
|
||||
typedef struct AVPacket {
|
||||
/**
|
||||
* Presentation timestamp in AVStream->time_base units; the time at which
|
||||
@@ -1113,6 +1096,17 @@ typedef struct AVPacket {
|
||||
int size;
|
||||
int stream_index;
|
||||
int flags;
|
||||
/**
|
||||
* Additional packet data that can be provided by the container.
|
||||
* Packet can contain several types of side information.
|
||||
*/
|
||||
struct {
|
||||
uint8_t *data;
|
||||
int size;
|
||||
enum AVPacketSideDataType type;
|
||||
} *side_data;
|
||||
int side_data_elems;
|
||||
|
||||
/**
|
||||
* Duration of this packet in AVStream->time_base units, 0 if unknown.
|
||||
* Equals next_pts - this_pts in presentation order.
|
||||
@@ -1142,9 +1136,6 @@ typedef struct AVPacket {
|
||||
int64_t convergence_duration;
|
||||
} AVPacket;
|
||||
#define AV_PKT_FLAG_KEY 0x0001
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
#define PKT_FLAG_KEY AV_PKT_FLAG_KEY
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Audio Video Frame.
|
||||
@@ -1265,16 +1256,6 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
enum PixelFormat pix_fmt;
|
||||
|
||||
#if FF_API_RATE_EMU
|
||||
/**
|
||||
* Frame rate emulation. If not zero, the lower layer (i.e. format handler)
|
||||
* has to read frames at native frame rate.
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int rate_emu;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* If non NULL, 'draw_horiz_band' is called by the libavcodec
|
||||
* decoder to draw a horizontal band. It improves cache usage. Not
|
||||
@@ -1319,9 +1300,6 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
int frame_size;
|
||||
int frame_number; ///< audio or video frame number
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
int real_pict_num; ///< Returns the real picture number of previous encoded frame.
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Number of frames the decoded output will be delayed relative to
|
||||
@@ -1379,16 +1357,6 @@ typedef struct AVCodecContext {
|
||||
|
||||
int b_frame_strategy;
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
/**
|
||||
* hurry up amount
|
||||
* - encoding: unused
|
||||
* - decoding: Set by user. 1-> Skip B-frames, 2-> Skip IDCT/dequant too, 5-> Skip everything except header
|
||||
* @deprecated Deprecated in favor of skip_idct and skip_frame.
|
||||
*/
|
||||
attribute_deprecated int hurry_up;
|
||||
#endif
|
||||
|
||||
struct AVCodec *codec;
|
||||
|
||||
void *priv_data;
|
||||
@@ -1506,9 +1474,6 @@ typedef struct AVCodecContext {
|
||||
#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
|
||||
#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
|
||||
#define FF_COMPLIANCE_NORMAL 0
|
||||
#if FF_API_INOFFICIAL
|
||||
#define FF_COMPLIANCE_INOFFICIAL -1 ///< Allow inofficial extensions (deprecated - use FF_COMPLIANCE_UNOFFICIAL instead).
|
||||
#endif
|
||||
#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
|
||||
#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
|
||||
|
||||
@@ -1782,25 +1747,6 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
unsigned dsp_mask;
|
||||
|
||||
#if FF_API_MM_FLAGS
|
||||
#define FF_MM_FORCE AV_CPU_FLAG_FORCE
|
||||
#define FF_MM_MMX AV_CPU_FLAG_MMX
|
||||
#define FF_MM_3DNOW AV_CPU_FLAG_3DNOW
|
||||
#define FF_MM_MMXEXT AV_CPU_FLAG_MMX2
|
||||
#define FF_MM_MMX2 AV_CPU_FLAG_MMX2
|
||||
#define FF_MM_SSE AV_CPU_FLAG_SSE
|
||||
#define FF_MM_SSE2 AV_CPU_FLAG_SSE2
|
||||
#define FF_MM_SSE2SLOW AV_CPU_FLAG_SSE2SLOW
|
||||
#define FF_MM_3DNOWEXT AV_CPU_FLAG_3DNOWEXT
|
||||
#define FF_MM_SSE3 AV_CPU_FLAG_SSE3
|
||||
#define FF_MM_SSE3SLOW AV_CPU_FLAG_SSE3SLOW
|
||||
#define FF_MM_SSSE3 AV_CPU_FLAG_SSSE3
|
||||
#define FF_MM_SSE4 AV_CPU_FLAG_SSE4
|
||||
#define FF_MM_SSE42 AV_CPU_FLAG_SSE42
|
||||
#define FF_MM_IWMMXT AV_CPU_FLAG_IWMMXT
|
||||
#define FF_MM_ALTIVEC AV_CPU_FLAG_ALTIVEC
|
||||
#endif
|
||||
|
||||
/**
|
||||
* bits per sample/pixel from the demuxer (needed for huffyuv).
|
||||
* - encoding: Set by libavcodec.
|
||||
@@ -1875,22 +1821,6 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
uint64_t error[4];
|
||||
|
||||
#if FF_API_MB_Q
|
||||
/**
|
||||
* minimum MB quantizer
|
||||
* - encoding: unused
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int mb_qmin;
|
||||
|
||||
/**
|
||||
* maximum MB quantizer
|
||||
* - encoding: unused
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int mb_qmax;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* motion estimation comparison function
|
||||
* - encoding: Set by user.
|
||||
@@ -2592,23 +2522,6 @@ typedef struct AVCodecContext {
|
||||
int compression_level;
|
||||
#define FF_COMPRESSION_DEFAULT -1
|
||||
|
||||
#if FF_API_USE_LPC
|
||||
/**
|
||||
* Sets whether to use LPC mode - used by FLAC encoder.
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
* @deprecated Deprecated in favor of lpc_type and lpc_passes.
|
||||
*/
|
||||
int use_lpc;
|
||||
|
||||
/**
|
||||
* LPC coefficient precision - used by FLAC encoder
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
int lpc_coeff_precision;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
@@ -2628,6 +2541,13 @@ typedef struct AVCodecContext {
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* LPC coefficient precision - used by FLAC encoder
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
attribute_deprecated int lpc_coeff_precision;
|
||||
|
||||
/**
|
||||
* search method for selecting prediction order
|
||||
* - encoding: Set by user.
|
||||
@@ -3291,6 +3211,33 @@ int av_dup_packet(AVPacket *pkt);
|
||||
*/
|
||||
void av_free_packet(AVPacket *pkt);
|
||||
|
||||
/**
|
||||
* Allocate new information of a packet.
|
||||
*
|
||||
* @param pkt packet
|
||||
* @param type side information type
|
||||
* @param size side information size
|
||||
* @return pointer to fresh allocated data or NULL otherwise
|
||||
*/
|
||||
uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int size);
|
||||
|
||||
/**
|
||||
* Get side information from packet.
|
||||
*
|
||||
* @param pkt packet
|
||||
* @param type desired side information type
|
||||
* @param size pointer for side information size to store (optional)
|
||||
* @return pointer to data if present or NULL otherwise
|
||||
*/
|
||||
uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int *size);
|
||||
|
||||
int av_packet_merge_side_data(AVPacket *pkt);
|
||||
|
||||
int av_packet_split_side_data(AVPacket *pkt);
|
||||
|
||||
|
||||
/* resample.c */
|
||||
|
||||
struct ReSampleContext;
|
||||
@@ -3298,14 +3245,6 @@ struct AVResampleContext;
|
||||
|
||||
typedef struct ReSampleContext ReSampleContext;
|
||||
|
||||
#if FF_API_AUDIO_OLD
|
||||
/**
|
||||
* @deprecated Use av_audio_resample_init() instead.
|
||||
*/
|
||||
attribute_deprecated ReSampleContext *audio_resample_init(int output_channels, int input_channels,
|
||||
int output_rate, int input_rate);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Initialize audio resampling context.
|
||||
*
|
||||
@@ -3469,23 +3408,6 @@ const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt);
|
||||
|
||||
void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* Return the pixel format corresponding to the name name.
|
||||
*
|
||||
* If there is no pixel format with name name, then look for a
|
||||
* pixel format with the name corresponding to the native endian
|
||||
* format of name.
|
||||
* For example in a little-endian system, first look for "gray16",
|
||||
* then for "gray16le".
|
||||
*
|
||||
* Finally if no pixel format has been found, return PIX_FMT_NONE.
|
||||
*
|
||||
* @deprecated Deprecated in favor of av_get_pix_fmt().
|
||||
*/
|
||||
attribute_deprecated enum PixelFormat avcodec_get_pix_fmt(const char* name);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Return a value representing the fourCC code associated to the
|
||||
* pixel format pix_fmt, or 0 if no associated fourCC code can be
|
||||
@@ -3554,14 +3476,6 @@ int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_
|
||||
enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt,
|
||||
int has_alpha, int *loss_ptr);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Use av_get_pix_fmt_string() instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
void avcodec_pix_fmt_string (char *buf, int buf_size, enum PixelFormat pix_fmt);
|
||||
#endif
|
||||
|
||||
#define FF_ALPHA_TRANSP 0x0001 /* image has some totally transparent pixels */
|
||||
#define FF_ALPHA_SEMI_TRANSP 0x0002 /* image has some transparent pixels */
|
||||
|
||||
@@ -3612,13 +3526,6 @@ const char *avcodec_license(void);
|
||||
*/
|
||||
void avcodec_init(void);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Deprecated in favor of avcodec_register().
|
||||
*/
|
||||
attribute_deprecated void register_avcodec(AVCodec *codec);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Register the codec codec and initialize libavcodec.
|
||||
*
|
||||
@@ -3763,14 +3670,6 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
|
||||
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
int linesize_align[4]);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Deprecated in favor of av_check_image_size().
|
||||
*/
|
||||
attribute_deprecated
|
||||
int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h);
|
||||
#endif
|
||||
|
||||
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
|
||||
|
||||
#if FF_API_THREAD_INIT
|
||||
@@ -3779,8 +3678,8 @@ enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum
|
||||
*/
|
||||
attribute_deprecated
|
||||
int avcodec_thread_init(AVCodecContext *s, int thread_count);
|
||||
void avcodec_thread_free(AVCodecContext *s);
|
||||
#endif
|
||||
|
||||
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
|
||||
int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
|
||||
//FIXME func typedef
|
||||
@@ -3851,25 +3750,6 @@ int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
|
||||
*/
|
||||
int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options);
|
||||
|
||||
#if FF_API_AUDIO_OLD
|
||||
/**
|
||||
* Decode an audio frame from buf into samples.
|
||||
* Wrapper function which calls avcodec_decode_audio3.
|
||||
*
|
||||
* @deprecated Use avcodec_decode_audio3 instead.
|
||||
* @param avctx the codec context
|
||||
* @param[out] samples the output buffer
|
||||
* @param[in,out] frame_size_ptr the output buffer size in bytes
|
||||
* @param[in] buf the input buffer
|
||||
* @param[in] buf_size the input buffer size in bytes
|
||||
* @return On error a negative value is returned, otherwise the number of bytes
|
||||
* used or zero if no frame could be decompressed.
|
||||
*/
|
||||
attribute_deprecated int avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples,
|
||||
int *frame_size_ptr,
|
||||
const uint8_t *buf, int buf_size);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Decode the audio frame of size avpkt->size from avpkt->data into samples.
|
||||
* Some decoders may support multiple frames in a single AVPacket, such
|
||||
@@ -3913,25 +3793,6 @@ int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
|
||||
int *frame_size_ptr,
|
||||
AVPacket *avpkt);
|
||||
|
||||
#if FF_API_VIDEO_OLD
|
||||
/**
|
||||
* Decode a video frame from buf into picture.
|
||||
* Wrapper function which calls avcodec_decode_video2.
|
||||
*
|
||||
* @deprecated Use avcodec_decode_video2 instead.
|
||||
* @param avctx the codec context
|
||||
* @param[out] picture The AVFrame in which the decoded video frame will be stored.
|
||||
* @param[in] buf the input buffer
|
||||
* @param[in] buf_size the size of the input buffer in bytes
|
||||
* @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
|
||||
* @return On error a negative value is returned, otherwise the number of bytes
|
||||
* used or zero if no frame could be decompressed.
|
||||
*/
|
||||
attribute_deprecated int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
|
||||
int *got_picture_ptr,
|
||||
const uint8_t *buf, int buf_size);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Decode the video frame of size avpkt->size from avpkt->data into picture.
|
||||
* Some decoders may support multiple frames in a single AVPacket, such
|
||||
@@ -3976,15 +3837,6 @@ int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
|
||||
int *got_picture_ptr,
|
||||
AVPacket *avpkt);
|
||||
|
||||
#if FF_API_SUBTITLE_OLD
|
||||
/* Decode a subtitle message. Return -1 if error, otherwise return the
|
||||
* number of bytes used. If no subtitle could be decompressed,
|
||||
* got_sub_ptr is zero. Otherwise, the subtitle is stored in *sub. */
|
||||
attribute_deprecated int avcodec_decode_subtitle(AVCodecContext *avctx, AVSubtitle *sub,
|
||||
int *got_sub_ptr,
|
||||
const uint8_t *buf, int buf_size);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Decode a subtitle message.
|
||||
* Return a negative value on error, otherwise return the number of bytes used.
|
||||
@@ -4253,15 +4105,6 @@ AVCodecParser *av_parser_next(AVCodecParser *c);
|
||||
void av_register_codec_parser(AVCodecParser *parser);
|
||||
AVCodecParserContext *av_parser_init(int codec_id);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
attribute_deprecated
|
||||
int av_parser_parse(AVCodecParserContext *s,
|
||||
AVCodecContext *avctx,
|
||||
uint8_t **poutbuf, int *poutbuf_size,
|
||||
const uint8_t *buf, int buf_size,
|
||||
int64_t pts, int64_t dts);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Parse a packet.
|
||||
*
|
||||
@@ -4340,7 +4183,7 @@ AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f);
|
||||
*
|
||||
* @see av_realloc
|
||||
*/
|
||||
void *av_fast_realloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_size);
|
||||
void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
|
||||
|
||||
/**
|
||||
* Allocate a buffer, reusing the given one if large enough.
|
||||
@@ -4354,17 +4197,7 @@ void *av_fast_realloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_s
|
||||
* @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
|
||||
* *size 0 if an error occurred.
|
||||
*/
|
||||
void av_fast_malloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_size);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Deprecated in favor of av_image_copy().
|
||||
*/
|
||||
attribute_deprecated
|
||||
void av_picture_data_copy(uint8_t *dst_data[4], int dst_linesize[4],
|
||||
uint8_t *src_data[4], int src_linesize[4],
|
||||
enum PixelFormat pix_fmt, int width, int height);
|
||||
#endif
|
||||
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
|
||||
|
||||
/**
|
||||
* Copy image src to dst. Wraps av_picture_data_copy() above.
|
||||
@@ -4393,22 +4226,6 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
|
||||
*/
|
||||
unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* Parse str and put in width_ptr and height_ptr the detected values.
|
||||
*
|
||||
* @deprecated Deprecated in favor of av_parse_video_size().
|
||||
*/
|
||||
attribute_deprecated int av_parse_video_frame_size(int *width_ptr, int *height_ptr, const char *str);
|
||||
|
||||
/**
|
||||
* Parse str and store the detected values in *frame_rate.
|
||||
*
|
||||
* @deprecated Deprecated in favor of av_parse_video_rate().
|
||||
*/
|
||||
attribute_deprecated int av_parse_video_frame_rate(AVRational *frame_rate, const char *str);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Logs a generic warning message about a missing feature. This function is
|
||||
* intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
|
||||
|
@@ -26,12 +26,21 @@
|
||||
void av_destruct_packet_nofree(AVPacket *pkt)
|
||||
{
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
void av_destruct_packet(AVPacket *pkt)
|
||||
{
|
||||
int i;
|
||||
|
||||
av_free(pkt->data);
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
|
||||
for (i = 0; i < pkt->side_data_elems; i++)
|
||||
av_free(pkt->side_data[i].data);
|
||||
av_freep(&pkt->side_data);
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
void av_init_packet(AVPacket *pkt)
|
||||
@@ -44,6 +53,8 @@ void av_init_packet(AVPacket *pkt)
|
||||
pkt->flags = 0;
|
||||
pkt->stream_index = 0;
|
||||
pkt->destruct= NULL;
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
int av_new_packet(AVPacket *pkt, int size)
|
||||
@@ -89,23 +100,52 @@ int av_grow_packet(AVPacket *pkt, int grow_by)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DUP_DATA(dst, src, size, padding) \
|
||||
do { \
|
||||
void *data; \
|
||||
if (padding) { \
|
||||
if ((unsigned)(size) > (unsigned)(size) + FF_INPUT_BUFFER_PADDING_SIZE) \
|
||||
goto failed_alloc; \
|
||||
data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); \
|
||||
} else { \
|
||||
data = av_malloc(size); \
|
||||
} \
|
||||
if (!data) \
|
||||
goto failed_alloc; \
|
||||
memcpy(data, src, size); \
|
||||
if (padding) \
|
||||
memset((uint8_t*)data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE); \
|
||||
dst = data; \
|
||||
} while(0)
|
||||
|
||||
int av_dup_packet(AVPacket *pkt)
|
||||
{
|
||||
AVPacket tmp_pkt;
|
||||
|
||||
if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
|
||||
uint8_t *data;
|
||||
/* We duplicate the packet and don't forget to add the padding again. */
|
||||
if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
return AVERROR(ENOMEM);
|
||||
data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!data) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(data, pkt->data, pkt->size);
|
||||
memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->data = data;
|
||||
tmp_pkt = *pkt;
|
||||
|
||||
pkt->data = NULL;
|
||||
pkt->side_data = NULL;
|
||||
DUP_DATA(pkt->data, tmp_pkt.data, pkt->size, 1);
|
||||
pkt->destruct = av_destruct_packet;
|
||||
|
||||
if (pkt->side_data_elems) {
|
||||
int i;
|
||||
|
||||
DUP_DATA(pkt->side_data, tmp_pkt.side_data,
|
||||
pkt->side_data_elems * sizeof(*pkt->side_data), 0);
|
||||
memset(pkt->side_data, 0, pkt->side_data_elems * sizeof(*pkt->side_data));
|
||||
for (i = 0; i < pkt->side_data_elems; i++) {
|
||||
DUP_DATA(pkt->side_data[i].data, tmp_pkt.side_data[i].data,
|
||||
pkt->side_data[i].size, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
failed_alloc:
|
||||
av_destruct_packet(pkt);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
void av_free_packet(AVPacket *pkt)
|
||||
@@ -113,6 +153,125 @@ void av_free_packet(AVPacket *pkt)
|
||||
if (pkt) {
|
||||
if (pkt->destruct) pkt->destruct(pkt);
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int size)
|
||||
{
|
||||
int elems = pkt->side_data_elems;
|
||||
|
||||
if ((unsigned)elems + 1 > INT_MAX / sizeof(*pkt->side_data))
|
||||
return NULL;
|
||||
if ((unsigned)size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
return NULL;
|
||||
|
||||
pkt->side_data = av_realloc(pkt->side_data, (elems + 1) * sizeof(*pkt->side_data));
|
||||
if (!pkt->side_data)
|
||||
return NULL;
|
||||
|
||||
pkt->side_data[elems].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!pkt->side_data[elems].data)
|
||||
return NULL;
|
||||
pkt->side_data[elems].size = size;
|
||||
pkt->side_data[elems].type = type;
|
||||
pkt->side_data_elems++;
|
||||
|
||||
return pkt->side_data[elems].data;
|
||||
}
|
||||
|
||||
uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
|
||||
int *size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pkt->side_data_elems; i++) {
|
||||
if (pkt->side_data[i].type == type) {
|
||||
if (size)
|
||||
*size = pkt->side_data[i].size;
|
||||
return pkt->side_data[i].data;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define FF_MERGE_MARKER 0x8c4d9d108e25e9feULL
|
||||
|
||||
int av_packet_merge_side_data(AVPacket *pkt){
|
||||
if(pkt->side_data_elems){
|
||||
int i;
|
||||
uint8_t *p;
|
||||
uint64_t size= pkt->size + 8LL + FF_INPUT_BUFFER_PADDING_SIZE;
|
||||
AVPacket old= *pkt;
|
||||
for (i=0; i<old.side_data_elems; i++) {
|
||||
size += old.side_data[i].size + 5LL;
|
||||
}
|
||||
if (size > INT_MAX)
|
||||
return AVERROR(EINVAL);
|
||||
p = av_malloc(size);
|
||||
if (!p)
|
||||
return AVERROR(ENOMEM);
|
||||
pkt->data = p;
|
||||
pkt->destruct = av_destruct_packet;
|
||||
pkt->size = size - FF_INPUT_BUFFER_PADDING_SIZE;
|
||||
bytestream_put_buffer(&p, old.data, old.size);
|
||||
for (i=old.side_data_elems-1; i>=0; i--) {
|
||||
bytestream_put_buffer(&p, old.side_data[i].data, old.side_data[i].size);
|
||||
bytestream_put_be32(&p, old.side_data[i].size);
|
||||
*p++ = old.side_data[i].type | ((i==old.side_data_elems-1)*128);
|
||||
}
|
||||
bytestream_put_be64(&p, FF_MERGE_MARKER);
|
||||
av_assert0(p-pkt->data == pkt->size);
|
||||
memset(p, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
av_free_packet(&old);
|
||||
pkt->side_data_elems = 0;
|
||||
pkt->side_data = NULL;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int av_packet_split_side_data(AVPacket *pkt){
|
||||
if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
|
||||
int i;
|
||||
unsigned int size;
|
||||
uint8_t *p= pkt->data + pkt->size - 8 - 5;
|
||||
|
||||
av_dup_packet(pkt);
|
||||
|
||||
for (i=1; ; i++){
|
||||
size = AV_RB32(p);
|
||||
if (size>INT_MAX || p - pkt->data <= size)
|
||||
return 0;
|
||||
if (p[4]&128)
|
||||
break;
|
||||
p-= size+5;
|
||||
}
|
||||
|
||||
pkt->side_data = av_malloc(i * sizeof(*pkt->side_data));
|
||||
if (!pkt->side_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
p= pkt->data + pkt->size - 8 - 5;
|
||||
for (i=0; ; i++){
|
||||
size= AV_RB32(p);
|
||||
av_assert0(size<=INT_MAX && p - pkt->data > size);
|
||||
pkt->side_data[i].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].size = size;
|
||||
pkt->side_data[i].type = p[4]&127;
|
||||
if (!pkt->side_data[i].data)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(pkt->side_data[i].data, p-size, size);
|
||||
pkt->size -= size + 5;
|
||||
if(p[4]&128)
|
||||
break;
|
||||
p-= size+5;
|
||||
}
|
||||
pkt->size -= 8;
|
||||
pkt->side_data_elems = i+1;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -160,6 +160,7 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
|
||||
AvsContext *const avs = avctx->priv_data;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
avcodec_get_frame_defaults(&avs->picture);
|
||||
avcodec_set_dimensions(avctx, 318, 198);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -219,9 +219,6 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
if(comp == BMP_RLE4 || comp == BMP_RLE8)
|
||||
memset(p->data[0], 0, avctx->height * p->linesize[0]);
|
||||
|
||||
if(depth == 4 || depth == 8)
|
||||
memset(p->data[1], 0, 1024);
|
||||
|
||||
if(height > 0){
|
||||
ptr = p->data[0] + (avctx->height - 1) * p->linesize[0];
|
||||
linesize = -p->linesize[0];
|
||||
@@ -232,6 +229,9 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(avctx->pix_fmt == PIX_FMT_PAL8){
|
||||
int colors = 1 << depth;
|
||||
|
||||
memset(p->data[1], 0, 1024);
|
||||
|
||||
if(ihsize >= 36){
|
||||
int t;
|
||||
buf = buf0 + 46;
|
||||
|
@@ -26,6 +26,10 @@
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
typedef struct {
|
||||
const uint8_t *buffer, *buffer_end;
|
||||
} GetByteContext;
|
||||
|
||||
#define DEF_T(type, name, bytes, read, write) \
|
||||
static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\
|
||||
(*b) += bytes;\
|
||||
@@ -34,6 +38,18 @@ static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\
|
||||
static av_always_inline void bytestream_put_ ##name(uint8_t **b, const type value){\
|
||||
write(*b, value);\
|
||||
(*b) += bytes;\
|
||||
}\
|
||||
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return bytestream_get_ ## name(&g->buffer);\
|
||||
}\
|
||||
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return read(g->buffer);\
|
||||
}
|
||||
|
||||
#define DEF(name, bytes, read, write) \
|
||||
@@ -55,6 +71,34 @@ DEF (byte, 1, AV_RB8 , AV_WB8 )
|
||||
#undef DEF64
|
||||
#undef DEF_T
|
||||
|
||||
static av_always_inline void bytestream2_init(GetByteContext *g,
|
||||
const uint8_t *buf, int buf_size)
|
||||
{
|
||||
g->buffer = buf;
|
||||
g->buffer_end = buf + buf_size;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
|
||||
{
|
||||
return g->buffer_end - g->buffer;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_skip(GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
g->buffer += FFMIN(g->buffer_end - g->buffer, size);
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2 = FFMIN(g->buffer_end - g->buffer, size);
|
||||
memcpy(dst, g->buffer, size2);
|
||||
g->buffer += size2;
|
||||
return size2;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
|
||||
{
|
||||
memcpy(dst, *b, size);
|
||||
|
@@ -609,12 +609,21 @@ static int decode_pic(AVSContext *h) {
|
||||
static int decode_seq_header(AVSContext *h) {
|
||||
MpegEncContext *s = &h->s;
|
||||
int frame_rate_code;
|
||||
int width, height;
|
||||
|
||||
h->profile = get_bits(&s->gb,8);
|
||||
h->level = get_bits(&s->gb,8);
|
||||
skip_bits1(&s->gb); //progressive sequence
|
||||
s->width = get_bits(&s->gb,14);
|
||||
s->height = get_bits(&s->gb,14);
|
||||
|
||||
width = get_bits(&s->gb, 14);
|
||||
height = get_bits(&s->gb, 14);
|
||||
if ((s->width || s->height) && (s->width != width || s->height != height)) {
|
||||
av_log_missing_feature(s, "Width/height changing in CAVS is", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
s->width = width;
|
||||
s->height = height;
|
||||
|
||||
skip_bits(&s->gb,2); //chroma format
|
||||
skip_bits(&s->gb,3); //sample_precision
|
||||
h->aspect_ratio = get_bits(&s->gb,4);
|
||||
|
@@ -67,6 +67,7 @@ typedef struct CinepakContext {
|
||||
|
||||
int sega_film_skip_bytes;
|
||||
|
||||
uint32_t pal[256];
|
||||
} CinepakContext;
|
||||
|
||||
static void cinepak_decode_codebook (cvid_codebook *codebook,
|
||||
@@ -398,7 +399,7 @@ static av_cold int cinepak_decode_init(AVCodecContext *avctx)
|
||||
s->sega_film_skip_bytes = -1; /* uninitialized state */
|
||||
|
||||
// check for paletted data
|
||||
if ((avctx->palctrl == NULL) || (avctx->bits_per_coded_sample == 40)) {
|
||||
if (avctx->bits_per_coded_sample != 8) {
|
||||
s->palette_video = 0;
|
||||
avctx->pix_fmt = PIX_FMT_YUV420P;
|
||||
} else {
|
||||
@@ -431,16 +432,18 @@ static int cinepak_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->palette_video) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
cinepak_decode(s);
|
||||
|
||||
if (s->palette_video) {
|
||||
memcpy (s->frame.data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (avctx->palctrl->palette_changed) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
avctx->palctrl->palette_changed = 0;
|
||||
} else
|
||||
s->frame.palette_has_changed = 0;
|
||||
}
|
||||
if (s->palette_video)
|
||||
memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->frame;
|
||||
|
@@ -1086,6 +1086,11 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
if (extradata_size >= 8){
|
||||
bytestream_get_be32(&edata_ptr); //Unknown unused
|
||||
q->subpacket[s].js_subband_start = bytestream_get_be16(&edata_ptr);
|
||||
if (q->subpacket[s].js_subband_start >= 51) {
|
||||
av_log(avctx, AV_LOG_ERROR, "js_subband_start %d is too large\n", q->subpacket[s].js_subband_start);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
q->subpacket[s].js_vlc_bits = bytestream_get_be16(&edata_ptr);
|
||||
extradata_size -= 8;
|
||||
}
|
||||
|
@@ -23,6 +23,8 @@
|
||||
#include "avcodec.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "bytestream.h"
|
||||
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/lzo.h" // for av_memcpy_backptr
|
||||
|
||||
typedef struct DfaContext {
|
||||
@@ -35,9 +37,13 @@ typedef struct DfaContext {
|
||||
static av_cold int dfa_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
DfaContext *s = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
|
||||
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
|
||||
return ret;
|
||||
|
||||
s->frame_buf = av_mallocz(avctx->width * avctx->height + AV_LZO_OUTPUT_PADDING);
|
||||
if (!s->frame_buf)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -153,8 +159,7 @@ static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
bitbuf = bytestream_get_le16(&src);
|
||||
mask = 1;
|
||||
}
|
||||
if (src_end - src < 2 || frame_end - frame < 2)
|
||||
return -1;
|
||||
|
||||
if (bitbuf & mask) {
|
||||
v = bytestream_get_le16(&src);
|
||||
offset = (v & 0x1FFF) << 2;
|
||||
@@ -168,8 +173,13 @@ static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
frame += 2;
|
||||
}
|
||||
} else if (bitbuf & (mask << 1)) {
|
||||
frame += bytestream_get_le16(&src) * 2;
|
||||
v = bytestream_get_le16(&src)*2;
|
||||
if (frame - frame_end < v)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += v;
|
||||
} else {
|
||||
if (frame_end - frame < width + 3)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame[0] = frame[1] =
|
||||
frame[width] = frame[width + 1] = *src++;
|
||||
frame += 2;
|
||||
@@ -231,6 +241,7 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
uint8_t *line_ptr;
|
||||
int count, i, v, lines, segments;
|
||||
int y = 0;
|
||||
|
||||
lines = bytestream_get_le16(&src);
|
||||
if (lines > height || src >= src_end)
|
||||
@@ -239,10 +250,12 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
while (lines--) {
|
||||
segments = bytestream_get_le16(&src);
|
||||
while ((segments & 0xC000) == 0xC000) {
|
||||
unsigned skip_lines = -(int16_t)segments;
|
||||
unsigned delta = -((int16_t)segments * width);
|
||||
if (frame_end - frame <= delta)
|
||||
if (frame_end - frame <= delta || y + lines + skip_lines > height)
|
||||
return -1;
|
||||
frame += delta;
|
||||
y += skip_lines;
|
||||
segments = bytestream_get_le16(&src);
|
||||
}
|
||||
if (segments & 0x8000) {
|
||||
@@ -250,7 +263,10 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
segments = bytestream_get_le16(&src);
|
||||
}
|
||||
line_ptr = frame;
|
||||
if (frame_end - frame < width)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += width;
|
||||
y++;
|
||||
while (segments--) {
|
||||
if (src_end - src < 2)
|
||||
return -1;
|
||||
|
@@ -1914,7 +1914,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
|
||||
|
||||
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
||||
long i;
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src+i);
|
||||
long b = *(long*)(dst+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
@@ -1939,7 +1939,7 @@ static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
|
||||
}
|
||||
}else
|
||||
#endif
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
||||
@@ -2836,7 +2836,7 @@ int ff_check_alignment(void){
|
||||
|
||||
av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
ff_check_alignment();
|
||||
|
||||
@@ -3222,11 +3222,15 @@ av_cold void dsputil_init(DSPContext* c, AVCodecContext *avctx)
|
||||
if (ARCH_SH4) dsputil_init_sh4 (c, avctx);
|
||||
if (ARCH_BFIN) dsputil_init_bfin (c, avctx);
|
||||
|
||||
for(i=0; i<64; i++){
|
||||
if(!c->put_2tap_qpel_pixels_tab[0][i])
|
||||
c->put_2tap_qpel_pixels_tab[0][i]= c->put_h264_qpel_pixels_tab[0][i];
|
||||
if(!c->avg_2tap_qpel_pixels_tab[0][i])
|
||||
c->avg_2tap_qpel_pixels_tab[0][i]= c->avg_h264_qpel_pixels_tab[0][i];
|
||||
for (i = 0; i < 4; i++) {
|
||||
for (j = 0; j < 16; j++) {
|
||||
if(!c->put_2tap_qpel_pixels_tab[i][j])
|
||||
c->put_2tap_qpel_pixels_tab[i][j] =
|
||||
c->put_h264_qpel_pixels_tab[i][j];
|
||||
if(!c->avg_2tap_qpel_pixels_tab[i][j])
|
||||
c->avg_2tap_qpel_pixels_tab[i][j] =
|
||||
c->avg_h264_qpel_pixels_tab[i][j];
|
||||
}
|
||||
}
|
||||
|
||||
c->put_rv30_tpel_pixels_tab[0][0] = c->put_h264_qpel_pixels_tab[0][0];
|
||||
|
@@ -120,14 +120,6 @@ void ff_bink_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block);
|
||||
void ff_ea_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block);
|
||||
|
||||
/* 1/2^n downscaling functions from imgconvert.c */
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
* @deprecated Use av_image_copy_plane() instead.
|
||||
*/
|
||||
attribute_deprecated
|
||||
void ff_img_copy_plane(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
#endif
|
||||
|
||||
void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
void ff_shrink88(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
|
||||
|
@@ -25,7 +25,14 @@
|
||||
|
||||
#define _WIN32_WINNT 0x0600
|
||||
#define COBJMACROS
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include "dxva2.h"
|
||||
#if HAVE_DXVA_H
|
||||
#include <dxva.h>
|
||||
#endif
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "mpegvideo.h"
|
||||
|
||||
|
@@ -249,7 +249,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
int chunk_type;
|
||||
int inter;
|
||||
|
||||
if (buf_size < 17) {
|
||||
if (buf_size < 26) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n");
|
||||
*data_size = 0;
|
||||
return -1;
|
||||
|
@@ -522,7 +522,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w,
|
||||
int run_mode=0;
|
||||
|
||||
if(s->ac){
|
||||
if(c->bytestream_end - c->bytestream < w*20){
|
||||
if(c->bytestream_end - c->bytestream < w*35){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||
return -1;
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@ const int ff_flac_sample_rate_table[16] =
|
||||
8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000,
|
||||
0, 0, 0, 0 };
|
||||
|
||||
const int16_t ff_flac_blocksize_table[16] = {
|
||||
const int32_t ff_flac_blocksize_table[16] = {
|
||||
0, 192, 576<<0, 576<<1, 576<<2, 576<<3, 0, 0,
|
||||
256<<0, 256<<1, 256<<2, 256<<3, 256<<4, 256<<5, 256<<6, 256<<7
|
||||
};
|
||||
|
@@ -26,6 +26,6 @@
|
||||
|
||||
extern const int ff_flac_sample_rate_table[16];
|
||||
|
||||
extern const int16_t ff_flac_blocksize_table[16];
|
||||
extern const int32_t ff_flac_blocksize_table[16];
|
||||
|
||||
#endif /* AVCODEC_FLACDATA_H */
|
||||
|
@@ -296,17 +296,6 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
|
||||
s->options.max_partition_order = ((int[]){ 2, 2, 3, 3, 3, 8, 8, 8, 8, 8, 8, 8, 8})[level];
|
||||
|
||||
/* set compression option overrides from AVCodecContext */
|
||||
#if FF_API_USE_LPC
|
||||
/* for compatibility with deprecated AVCodecContext.use_lpc */
|
||||
if (avctx->use_lpc == 0) {
|
||||
s->options.lpc_type = AV_LPC_TYPE_FIXED;
|
||||
} else if (avctx->use_lpc == 1) {
|
||||
s->options.lpc_type = AV_LPC_TYPE_LEVINSON;
|
||||
} else if (avctx->use_lpc > 1) {
|
||||
s->options.lpc_type = AV_LPC_TYPE_CHOLESKY;
|
||||
s->options.lpc_passes = avctx->use_lpc - 1;
|
||||
}
|
||||
#endif
|
||||
#if FF_API_FLAC_GLOBAL_OPTS
|
||||
if (avctx->lpc_type > FF_LPC_TYPE_DEFAULT) {
|
||||
if (avctx->lpc_type > FF_LPC_TYPE_CHOLESKY) {
|
||||
@@ -948,14 +937,16 @@ static int encode_residual_ch(FlacEncodeContext *s, int ch)
|
||||
omethod == ORDER_METHOD_8LEVEL) {
|
||||
int levels = 1 << omethod;
|
||||
uint32_t bits[1 << ORDER_METHOD_8LEVEL];
|
||||
int order;
|
||||
int order = -1;
|
||||
int opt_index = levels-1;
|
||||
opt_order = max_order-1;
|
||||
bits[opt_index] = UINT32_MAX;
|
||||
for (i = levels-1; i >= 0; i--) {
|
||||
int last_order = order;
|
||||
order = min_order + (((max_order-min_order+1) * (i+1)) / levels)-1;
|
||||
if (order < 0)
|
||||
order = 0;
|
||||
order = av_clip(order, min_order - 1, max_order - 1);
|
||||
if (order == last_order)
|
||||
continue;
|
||||
encode_residual_lpc(res, smp, n, order+1, coefs[order], shift[order]);
|
||||
bits[i] = find_subframe_rice_params(s, sub, order+1);
|
||||
if (bits[i] < bits[opt_index]) {
|
||||
|
@@ -599,10 +599,6 @@ retry:
|
||||
s->current_picture.pict_type= s->pict_type;
|
||||
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
|
@@ -98,7 +98,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
|
||||
}
|
||||
}
|
||||
|
||||
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc;
|
||||
int16_t *dc_val;
|
||||
@@ -226,7 +226,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
}
|
||||
}
|
||||
|
||||
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc, scale, i;
|
||||
int16_t *dc_val, *ac_val, *ac_val1;
|
||||
@@ -313,8 +313,8 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
|
||||
}
|
||||
|
||||
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py)
|
||||
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py)
|
||||
{
|
||||
int wrap;
|
||||
int16_t *A, *B, *C, (*mot_val)[2];
|
||||
|
@@ -38,16 +38,16 @@
|
||||
extern const AVRational ff_h263_pixel_aspect[16];
|
||||
extern const uint8_t ff_h263_cbpy_tab[16][2];
|
||||
|
||||
extern const uint8_t cbpc_b_tab[4][2];
|
||||
extern const uint8_t ff_cbpc_b_tab[4][2];
|
||||
|
||||
extern const uint8_t mvtab[33][2];
|
||||
extern const uint8_t ff_mvtab[33][2];
|
||||
|
||||
extern const uint8_t ff_h263_intra_MCBPC_code[9];
|
||||
extern const uint8_t ff_h263_intra_MCBPC_bits[9];
|
||||
|
||||
extern const uint8_t ff_h263_inter_MCBPC_code[28];
|
||||
extern const uint8_t ff_h263_inter_MCBPC_bits[28];
|
||||
extern const uint8_t h263_mbtype_b_tab[15][2];
|
||||
extern const uint8_t ff_h263_mbtype_b_tab[15][2];
|
||||
|
||||
extern VLC ff_h263_intra_MCBPC_vlc;
|
||||
extern VLC ff_h263_inter_MCBPC_vlc;
|
||||
@@ -55,41 +55,41 @@ extern VLC ff_h263_cbpy_vlc;
|
||||
|
||||
extern RLTable ff_h263_rl_inter;
|
||||
|
||||
extern RLTable rl_intra_aic;
|
||||
extern RLTable ff_rl_intra_aic;
|
||||
|
||||
extern const uint16_t h263_format[8][2];
|
||||
extern const uint8_t modified_quant_tab[2][32];
|
||||
extern const uint16_t ff_h263_format[8][2];
|
||||
extern const uint8_t ff_modified_quant_tab[2][32];
|
||||
extern uint16_t ff_mba_max[6];
|
||||
extern uint8_t ff_mba_length[7];
|
||||
|
||||
extern uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
|
||||
|
||||
|
||||
int h263_decode_motion(MpegEncContext * s, int pred, int f_code);
|
||||
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code);
|
||||
av_const int ff_h263_aspect_to_info(AVRational aspect);
|
||||
int ff_h263_decode_init(AVCodecContext *avctx);
|
||||
int ff_h263_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
AVPacket *avpkt);
|
||||
int ff_h263_decode_end(AVCodecContext *avctx);
|
||||
void h263_encode_mb(MpegEncContext *s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y);
|
||||
void h263_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void h263_encode_gob_header(MpegEncContext * s, int mb_line);
|
||||
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py);
|
||||
void h263_encode_init(MpegEncContext *s);
|
||||
void h263_decode_init_vlc(MpegEncContext *s);
|
||||
int h263_decode_picture_header(MpegEncContext *s);
|
||||
void ff_h263_encode_mb(MpegEncContext *s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y);
|
||||
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line);
|
||||
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py);
|
||||
void ff_h263_encode_init(MpegEncContext *s);
|
||||
void ff_h263_decode_init_vlc(MpegEncContext *s);
|
||||
int ff_h263_decode_picture_header(MpegEncContext *s);
|
||||
int ff_h263_decode_gob_header(MpegEncContext *s);
|
||||
void ff_h263_update_motion_val(MpegEncContext * s);
|
||||
void ff_h263_loop_filter(MpegEncContext * s);
|
||||
int ff_h263_decode_mba(MpegEncContext *s);
|
||||
void ff_h263_encode_mba(MpegEncContext *s);
|
||||
void ff_init_qscale_tab(MpegEncContext *s);
|
||||
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
|
||||
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
|
||||
|
||||
|
||||
/**
|
||||
@@ -119,7 +119,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
|
||||
int l, bit_size, code;
|
||||
|
||||
if (val == 0) {
|
||||
return mvtab[0][1];
|
||||
return ff_mvtab[0][1];
|
||||
} else {
|
||||
bit_size = f_code - 1;
|
||||
/* modulo encoding */
|
||||
@@ -128,7 +128,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
|
||||
val--;
|
||||
code = (val >> bit_size) + 1;
|
||||
|
||||
return mvtab[code][1] + 1 + bit_size;
|
||||
return ff_mvtab[code][1] + 1 + bit_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -57,7 +57,7 @@ const uint8_t ff_h263_inter_MCBPC_bits[28] = {
|
||||
11, 13, 13, 13,/* inter4Q*/
|
||||
};
|
||||
|
||||
const uint8_t h263_mbtype_b_tab[15][2] = {
|
||||
const uint8_t ff_h263_mbtype_b_tab[15][2] = {
|
||||
{1, 1},
|
||||
{3, 3},
|
||||
{1, 5},
|
||||
@@ -75,7 +75,7 @@ const uint8_t h263_mbtype_b_tab[15][2] = {
|
||||
{1, 8},
|
||||
};
|
||||
|
||||
const uint8_t cbpc_b_tab[4][2] = {
|
||||
const uint8_t ff_cbpc_b_tab[4][2] = {
|
||||
{0, 1},
|
||||
{2, 2},
|
||||
{7, 3},
|
||||
@@ -88,7 +88,7 @@ const uint8_t ff_h263_cbpy_tab[16][2] =
|
||||
{2,5}, {3,6}, {5,4}, {10,4}, {4,4}, {8,4}, {6,4}, {3,2}
|
||||
};
|
||||
|
||||
const uint8_t mvtab[33][2] =
|
||||
const uint8_t ff_mvtab[33][2] =
|
||||
{
|
||||
{1,1}, {1,2}, {1,3}, {1,4}, {3,6}, {5,7}, {4,7}, {3,7},
|
||||
{11,9}, {10,9}, {9,9}, {17,10}, {16,10}, {15,10}, {14,10}, {13,10},
|
||||
@@ -98,7 +98,7 @@ const uint8_t mvtab[33][2] =
|
||||
};
|
||||
|
||||
/* third non intra table */
|
||||
const uint16_t inter_vlc[103][2] = {
|
||||
const uint16_t ff_inter_vlc[103][2] = {
|
||||
{ 0x2, 2 },{ 0xf, 4 },{ 0x15, 6 },{ 0x17, 7 },
|
||||
{ 0x1f, 8 },{ 0x25, 9 },{ 0x24, 9 },{ 0x21, 10 },
|
||||
{ 0x20, 10 },{ 0x7, 11 },{ 0x6, 11 },{ 0x20, 11 },
|
||||
@@ -127,7 +127,7 @@ const uint16_t inter_vlc[103][2] = {
|
||||
{ 0x5e, 12 },{ 0x5f, 12 },{ 0x3, 7 },
|
||||
};
|
||||
|
||||
const int8_t inter_level[102] = {
|
||||
const int8_t ff_inter_level[102] = {
|
||||
1, 2, 3, 4, 5, 6, 7, 8,
|
||||
9, 10, 11, 12, 1, 2, 3, 4,
|
||||
5, 6, 1, 2, 3, 4, 1, 2,
|
||||
@@ -143,7 +143,7 @@ const int8_t inter_level[102] = {
|
||||
1, 1, 1, 1, 1, 1,
|
||||
};
|
||||
|
||||
const int8_t inter_run[102] = {
|
||||
const int8_t ff_inter_run[102] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 1, 1, 1, 1,
|
||||
1, 1, 2, 2, 2, 2, 3, 3,
|
||||
@@ -162,9 +162,9 @@ const int8_t inter_run[102] = {
|
||||
RLTable ff_h263_rl_inter = {
|
||||
102,
|
||||
58,
|
||||
inter_vlc,
|
||||
inter_run,
|
||||
inter_level,
|
||||
ff_inter_vlc,
|
||||
ff_inter_run,
|
||||
ff_inter_level,
|
||||
};
|
||||
|
||||
static const uint16_t intra_vlc_aic[103][2] = {
|
||||
@@ -228,7 +228,7 @@ static const int8_t intra_level_aic[102] = {
|
||||
1, 1, 1, 1, 1, 1,
|
||||
};
|
||||
|
||||
RLTable rl_intra_aic = {
|
||||
RLTable ff_rl_intra_aic = {
|
||||
102,
|
||||
58,
|
||||
intra_vlc_aic,
|
||||
@@ -236,7 +236,7 @@ RLTable rl_intra_aic = {
|
||||
intra_level_aic,
|
||||
};
|
||||
|
||||
const uint16_t h263_format[8][2] = {
|
||||
const uint16_t ff_h263_format[8][2] = {
|
||||
{ 0, 0 },
|
||||
{ 128, 96 },
|
||||
{ 176, 144 },
|
||||
@@ -250,7 +250,7 @@ const uint8_t ff_aic_dc_scale_table[32]={
|
||||
0, 2, 4, 6, 8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62
|
||||
};
|
||||
|
||||
const uint8_t modified_quant_tab[2][32]={
|
||||
const uint8_t ff_modified_quant_tab[2][32]={
|
||||
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
{
|
||||
0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,10,11,12,13,14,15,16,17,18,18,19,20,21,22,23,24,25,26,27,28
|
||||
|
@@ -111,7 +111,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
h263_decode_init_vlc(s);
|
||||
ff_h263_decode_init_vlc(s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -429,7 +429,7 @@ retry:
|
||||
} else if (CONFIG_FLV_DECODER && s->h263_flv) {
|
||||
ret = ff_flv_decode_picture_header(s);
|
||||
} else {
|
||||
ret = h263_decode_picture_header(s);
|
||||
ret = ff_h263_decode_picture_header(s);
|
||||
}
|
||||
|
||||
if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size);
|
||||
@@ -610,18 +610,10 @@ retry:
|
||||
|
||||
/* skip B-frames if we don't have reference frames */
|
||||
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size);
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip b frames if we are in a hurry */
|
||||
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return get_consumed_bytes(s, buf_size);
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return get_consumed_bytes(s, buf_size);
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
|
||||
#endif
|
||||
|
||||
if(s->next_p_frame_damaged){
|
||||
if(s->pict_type==AV_PICTURE_TYPE_B)
|
||||
|
@@ -2617,6 +2617,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
else
|
||||
s->height= 16*s->mb_height - (4>>CHROMA444)*FFMIN(h->sps.crop_bottom, (8<<CHROMA444)-1);
|
||||
|
||||
if (FFALIGN(s->avctx->width, 16) == s->width &&
|
||||
FFALIGN(s->avctx->height, 16) == s->height) {
|
||||
s->width = s->avctx->width;
|
||||
s->height = s->avctx->height;
|
||||
}
|
||||
|
||||
if (s->context_initialized
|
||||
&& ( s->width != s->avctx->width || s->height != s->avctx->height
|
||||
|| av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) {
|
||||
@@ -2895,8 +2901,13 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
|
||||
if(num_ref_idx_active_override_flag){
|
||||
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
|
||||
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
|
||||
if (h->ref_count[0] < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
|
||||
if (h->ref_count[1] < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
if (h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
|
||||
@@ -3545,7 +3556,9 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
|
||||
|
||||
return 0;
|
||||
}else{
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask);
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y,
|
||||
s->mb_x - 1, s->mb_y,
|
||||
(AC_END|DC_END|MV_END)&part_mask);
|
||||
|
||||
return -1;
|
||||
}
|
||||
@@ -3707,7 +3720,11 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
break;
|
||||
}
|
||||
|
||||
if(buf_index+3 >= buf_size) break;
|
||||
|
||||
if (buf_index + 3 >= buf_size) {
|
||||
buf_index = buf_size;
|
||||
break;
|
||||
}
|
||||
|
||||
buf_index+=3;
|
||||
if(buf_index >= next_avc) continue;
|
||||
@@ -3760,11 +3777,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
}
|
||||
|
||||
//FIXME do not discard SEI id
|
||||
if(
|
||||
#if FF_API_HURRY_UP
|
||||
(s->hurry_up == 1 && h->nal_ref_idc == 0) ||
|
||||
#endif
|
||||
(avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
|
||||
if(avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0)
|
||||
continue;
|
||||
|
||||
again:
|
||||
@@ -3801,9 +3814,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
}
|
||||
|
||||
if(hx->redundant_pic_count==0
|
||||
#if FF_API_HURRY_UP
|
||||
&& hx->s.hurry_up < 5
|
||||
#endif
|
||||
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
|
||||
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I)
|
||||
@@ -3840,10 +3850,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
hx->inter_gb_ptr= &hx->inter_gb;
|
||||
|
||||
if(hx->redundant_pic_count==0 && hx->intra_gb_ptr && hx->s.data_partitioning
|
||||
&& s->current_picture_ptr
|
||||
&& s->context_initialized
|
||||
#if FF_API_HURRY_UP
|
||||
&& s->hurry_up < 5
|
||||
#endif
|
||||
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type_nos!=AV_PICTURE_TYPE_B)
|
||||
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type_nos==AV_PICTURE_TYPE_I)
|
||||
@@ -3858,13 +3866,26 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
init_get_bits(&s->gb, ptr, bit_length);
|
||||
ff_h264_decode_seq_parameter_set(h);
|
||||
|
||||
if (s->flags& CODEC_FLAG_LOW_DELAY ||
|
||||
(h->sps.bitstream_restriction_flag && !h->sps.num_reorder_frames))
|
||||
s->low_delay=1;
|
||||
if (s->flags & CODEC_FLAG_LOW_DELAY ||
|
||||
(h->sps.bitstream_restriction_flag &&
|
||||
!h->sps.num_reorder_frames)) {
|
||||
if (s->avctx->has_b_frames > 1 || h->delayed_pic[0])
|
||||
av_log(avctx, AV_LOG_WARNING, "Delayed frames seen "
|
||||
"reenabling low delay requires a codec "
|
||||
"flush.\n");
|
||||
else
|
||||
s->low_delay = 1;
|
||||
}
|
||||
|
||||
if(avctx->has_b_frames < 2)
|
||||
avctx->has_b_frames= !s->low_delay;
|
||||
|
||||
if (h->sps.bit_depth_luma != h->sps.bit_depth_chroma) {
|
||||
av_log_missing_feature(s->avctx,
|
||||
"Different bit depth between chroma and luma", 1);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
if (avctx->bits_per_raw_sample != h->sps.bit_depth_luma) {
|
||||
if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
|
||||
avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
|
||||
@@ -3982,11 +4003,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
if(!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr){
|
||||
if (avctx->skip_frame >= AVDISCARD_NONREF
|
||||
#if FF_API_HURRY_UP
|
||||
|| s->hurry_up
|
||||
#endif
|
||||
)
|
||||
if (avctx->skip_frame >= AVDISCARD_NONREF)
|
||||
return 0;
|
||||
av_log(avctx, AV_LOG_ERROR, "no frame!\n");
|
||||
return -1;
|
||||
|
@@ -621,7 +621,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
|
||||
down the code */
|
||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
||||
if(s->mb_skip_run==-1)
|
||||
s->mb_skip_run= get_ue_golomb(&s->gb);
|
||||
s->mb_skip_run= get_ue_golomb_long(&s->gb);
|
||||
|
||||
if (s->mb_skip_run--) {
|
||||
if(FRAME_MBAFF && (s->mb_y&1) == 0){
|
||||
|
@@ -37,6 +37,9 @@
|
||||
//#undef NDEBUG
|
||||
#include <assert.h>
|
||||
|
||||
#define MAX_LOG2_MAX_FRAME_NUM (12 + 4)
|
||||
#define MIN_LOG2_MAX_FRAME_NUM 4
|
||||
|
||||
static const AVRational pixel_aspect[17]={
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
@@ -311,7 +314,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
MpegEncContext * const s = &h->s;
|
||||
int profile_idc, level_idc, constraint_set_flags = 0;
|
||||
unsigned int sps_id;
|
||||
int i;
|
||||
int i, log2_max_frame_num_minus4;
|
||||
SPS *sps;
|
||||
|
||||
profile_idc= get_bits(&s->gb, 8);
|
||||
@@ -340,7 +343,11 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8));
|
||||
sps->scaling_matrix_present = 0;
|
||||
|
||||
if(sps->profile_idc >= 100){ //high profile
|
||||
if (sps->profile_idc == 100 || sps->profile_idc == 110 ||
|
||||
sps->profile_idc == 122 || sps->profile_idc == 244 ||
|
||||
sps->profile_idc == 44 || sps->profile_idc == 83 ||
|
||||
sps->profile_idc == 86 || sps->profile_idc == 118 ||
|
||||
sps->profile_idc == 128 || sps->profile_idc == 144) {
|
||||
sps->chroma_format_idc= get_ue_golomb_31(&s->gb);
|
||||
if (sps->chroma_format_idc > 3U) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "chroma_format_idc %d is illegal\n", sps->chroma_format_idc);
|
||||
@@ -363,7 +370,16 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
sps->bit_depth_chroma = 8;
|
||||
}
|
||||
|
||||
sps->log2_max_frame_num= get_ue_golomb(&s->gb) + 4;
|
||||
log2_max_frame_num_minus4 = get_ue_golomb(&s->gb);
|
||||
if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 ||
|
||||
log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"log2_max_frame_num_minus4 out of range (0-12): %d\n",
|
||||
log2_max_frame_num_minus4);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4;
|
||||
|
||||
sps->poc_type= get_ue_golomb_31(&s->gb);
|
||||
|
||||
if(sps->poc_type == 0){ //FIXME #define
|
||||
|
@@ -28,6 +28,7 @@
|
||||
* huffyuv codec for libavcodec.
|
||||
*/
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "avcodec.h"
|
||||
#include "get_bits.h"
|
||||
#include "put_bits.h"
|
||||
@@ -283,12 +284,13 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
for(i=y=0; y<256; y++){
|
||||
int len0 = s->len[0][y];
|
||||
int limit = VLC_BITS - len0;
|
||||
if(limit <= 0)
|
||||
if(limit <= 0 || !len0)
|
||||
continue;
|
||||
for(u=0; u<256; u++){
|
||||
int len1 = s->len[p][u];
|
||||
if(len1 > limit)
|
||||
if (len1 > limit || !len1)
|
||||
continue;
|
||||
av_assert0(i < (1 << VLC_BITS));
|
||||
len[i] = len0 + len1;
|
||||
bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
|
||||
symbols[i] = (y<<8) + u;
|
||||
@@ -310,18 +312,19 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
for(i=0, g=-16; g<16; g++){
|
||||
int len0 = s->len[p0][g&255];
|
||||
int limit0 = VLC_BITS - len0;
|
||||
if(limit0 < 2)
|
||||
if (limit0 < 2 || !len0)
|
||||
continue;
|
||||
for(b=-16; b<16; b++){
|
||||
int len1 = s->len[p1][b&255];
|
||||
int limit1 = limit0 - len1;
|
||||
if(limit1 < 1)
|
||||
if (limit1 < 1 || !len1)
|
||||
continue;
|
||||
code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
|
||||
for(r=-16; r<16; r++){
|
||||
int len2 = s->len[2][r&255];
|
||||
if(len2 > limit1)
|
||||
if (len2 > limit1 || !len2)
|
||||
continue;
|
||||
av_assert0(i < (1 << VLC_BITS));
|
||||
len[i] = len0 + len1 + len2;
|
||||
bits[i] = (code << len2) + s->bits[2][r&255];
|
||||
if(s->decorrelate){
|
||||
@@ -345,6 +348,7 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_get_bits(&gb, src, length*8);
|
||||
|
||||
@@ -355,7 +359,8 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
|
||||
return -1;
|
||||
}
|
||||
free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
generate_joint_tables(s);
|
||||
@@ -367,6 +372,7 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
#if 1
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
|
||||
if(read_len_table(s->len[0], &gb)<0)
|
||||
@@ -387,7 +393,8 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
|
||||
for(i=0; i<3; i++){
|
||||
free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
generate_joint_tables(s);
|
||||
|
@@ -72,6 +72,7 @@ typedef struct IdcinContext {
|
||||
hnode huff_nodes[256][HUF_TOKENS*2];
|
||||
int num_huff_nodes[256];
|
||||
|
||||
uint32_t pal[256];
|
||||
} IdcinContext;
|
||||
|
||||
/*
|
||||
@@ -214,7 +215,7 @@ static int idcin_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
IdcinContext *s = avctx->priv_data;
|
||||
AVPaletteControl *palette_control = avctx->palctrl;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
s->buf = buf;
|
||||
s->size = buf_size;
|
||||
@@ -229,13 +230,12 @@ static int idcin_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
idcin_decode_vlcs(s);
|
||||
|
||||
/* make the palette available on the way out */
|
||||
memcpy(s->frame.data[1], palette_control->palette, PALETTE_COUNT * 4);
|
||||
/* If palette changed inform application*/
|
||||
if (palette_control->palette_changed) {
|
||||
palette_control->palette_changed = 0;
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
/* make the palette available on the way out */
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->frame;
|
||||
|
@@ -424,40 +424,11 @@ const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
enum PixelFormat avcodec_get_pix_fmt(const char *name)
|
||||
{
|
||||
return av_get_pix_fmt(name);
|
||||
}
|
||||
|
||||
void avcodec_pix_fmt_string (char *buf, int buf_size, enum PixelFormat pix_fmt)
|
||||
{
|
||||
av_get_pix_fmt_string(buf, buf_size, pix_fmt);
|
||||
}
|
||||
#endif
|
||||
|
||||
int ff_is_hwaccel_pix_fmt(enum PixelFormat pix_fmt)
|
||||
{
|
||||
return av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_HWACCEL;
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt){
|
||||
return ff_set_systematic_pal2(pal, pix_fmt);
|
||||
}
|
||||
|
||||
int ff_fill_linesize(AVPicture *picture, enum PixelFormat pix_fmt, int width)
|
||||
{
|
||||
return av_image_fill_linesizes(picture->linesize, pix_fmt, width);
|
||||
}
|
||||
|
||||
int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, enum PixelFormat pix_fmt,
|
||||
int height)
|
||||
{
|
||||
return av_image_fill_pointers(picture->data, pix_fmt, height, ptr, picture->linesize);
|
||||
}
|
||||
#endif
|
||||
|
||||
int avpicture_fill(AVPicture *picture, uint8_t *ptr,
|
||||
enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
@@ -654,7 +625,8 @@ static enum PixelFormat avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask,
|
||||
/* find exact color match with smallest size */
|
||||
dst_pix_fmt = PIX_FMT_NONE;
|
||||
min_dist = 0x7fffffff;
|
||||
for(i = 0;i < PIX_FMT_NB; i++) {
|
||||
/* test only the first 64 pixel formats to avoid undefined behaviour */
|
||||
for (i = 0; i < 64; i++) {
|
||||
if (pix_fmt_mask & (1ULL << i)) {
|
||||
loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
|
||||
if (loss == 0) {
|
||||
@@ -702,28 +674,6 @@ enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelForma
|
||||
return dst_pix_fmt;
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
|
||||
const uint8_t *src, int src_wrap,
|
||||
int width, int height)
|
||||
{
|
||||
av_image_copy_plane(dst, dst_wrap, src, src_wrap, width, height);
|
||||
}
|
||||
|
||||
int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane)
|
||||
{
|
||||
return av_image_get_linesize(pix_fmt, width, plane);
|
||||
}
|
||||
|
||||
void av_picture_data_copy(uint8_t *dst_data[4], int dst_linesize[4],
|
||||
uint8_t *src_data[4], int src_linesize[4],
|
||||
enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
av_image_copy(dst_data, dst_linesize, src_data, src_linesize,
|
||||
pix_fmt, width, height);
|
||||
}
|
||||
#endif
|
||||
|
||||
void av_picture_copy(AVPicture *dst, const AVPicture *src,
|
||||
enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
|
@@ -76,6 +76,8 @@ typedef struct {
|
||||
int is_scalable;
|
||||
uint32_t lock_word;
|
||||
IVIPicConfig pic_conf;
|
||||
|
||||
int gop_invalid;
|
||||
} IVI5DecContext;
|
||||
|
||||
|
||||
@@ -339,8 +341,12 @@ static int decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
ctx->frame_num = get_bits(&ctx->gb, 8);
|
||||
|
||||
if (ctx->frame_type == FRAMETYPE_INTRA) {
|
||||
if (decode_gop_header(ctx, avctx))
|
||||
return -1;
|
||||
ctx->gop_invalid = 1;
|
||||
if (decode_gop_header(ctx, avctx)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid GOP header, skipping frames.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
ctx->gop_invalid = 0;
|
||||
}
|
||||
|
||||
if (ctx->frame_type != FRAMETYPE_NULL) {
|
||||
@@ -457,6 +463,16 @@ static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
ref_mb = tile->ref_mbs;
|
||||
offs = tile->ypos * band->pitch + tile->xpos;
|
||||
|
||||
if (!ref_mb &&
|
||||
((band->qdelta_present && band->inherit_qdelta) || band->inherit_mv))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches parameters %d\n",
|
||||
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* scale factor for motion vectors */
|
||||
mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3);
|
||||
mv_x = mv_y = 0;
|
||||
@@ -607,8 +623,10 @@ static int decode_band(IVI5DecContext *ctx, int plane_num,
|
||||
|
||||
tile->is_empty = get_bits1(&ctx->gb);
|
||||
if (tile->is_empty) {
|
||||
ff_ivi_process_empty_tile(avctx, band, tile,
|
||||
result = ff_ivi_process_empty_tile(avctx, band, tile,
|
||||
(ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3));
|
||||
if (result < 0)
|
||||
break;
|
||||
} else {
|
||||
tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb);
|
||||
|
||||
@@ -755,6 +773,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
"Error while decoding picture header: %d\n", result);
|
||||
return -1;
|
||||
}
|
||||
if (ctx->gop_invalid)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (ctx->gop_flags & IVI5_IS_PROTECTED) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Password-protected clip!\n");
|
||||
@@ -784,6 +804,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
ctx->frame.reference = 0;
|
||||
avcodec_set_dimensions(avctx, ctx->planes[0].width, ctx->planes[0].height);
|
||||
if (avctx->get_buffer(avctx, &ctx->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
|
@@ -65,8 +65,8 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
s->pb_frame = get_bits1(&s->gb);
|
||||
|
||||
if (format < 6) {
|
||||
s->width = h263_format[format][0];
|
||||
s->height = h263_format[format][1];
|
||||
s->width = ff_h263_format[format][0];
|
||||
s->height = ff_h263_format[format][1];
|
||||
s->avctx->sample_aspect_ratio.num = 12;
|
||||
s->avctx->sample_aspect_ratio.den = 11;
|
||||
} else {
|
||||
|
@@ -69,6 +69,7 @@ typedef struct IpvideoContext {
|
||||
int stride;
|
||||
int upper_motion_limit_offset;
|
||||
|
||||
uint32_t pal[256];
|
||||
} IpvideoContext;
|
||||
|
||||
#define CHECK_STREAM_PTR(stream_ptr, stream_end, n) \
|
||||
@@ -961,7 +962,7 @@ static void ipvideo_decode_opcodes(IpvideoContext *s)
|
||||
|
||||
if (!s->is_16bpp) {
|
||||
/* this is PAL8, so make the palette available */
|
||||
memcpy(s->current_frame.data[1], s->avctx->palctrl->palette, PALETTE_COUNT * 4);
|
||||
memcpy(s->current_frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
|
||||
s->stride = s->current_frame.linesize[0];
|
||||
s->stream_ptr = s->buf + 14; /* data starts 14 bytes in */
|
||||
@@ -1015,10 +1016,6 @@ static av_cold int ipvideo_decode_init(AVCodecContext *avctx)
|
||||
|
||||
s->is_16bpp = avctx->bits_per_coded_sample == 16;
|
||||
avctx->pix_fmt = s->is_16bpp ? PIX_FMT_RGB555 : PIX_FMT_PAL8;
|
||||
if (!s->is_16bpp && s->avctx->palctrl == NULL) {
|
||||
av_log(avctx, AV_LOG_ERROR, " Interplay video: palette expected.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
dsputil_init(&s->dsp, avctx);
|
||||
|
||||
@@ -1041,7 +1038,6 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
IpvideoContext *s = avctx->priv_data;
|
||||
AVPaletteControl *palette_control = avctx->palctrl;
|
||||
|
||||
/* compressed buffer needs to be large enough to at least hold an entire
|
||||
* decoding map */
|
||||
@@ -1058,13 +1054,16 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
ipvideo_decode_opcodes(s);
|
||||
|
||||
if (!s->is_16bpp && palette_control->palette_changed) {
|
||||
palette_control->palette_changed = 0;
|
||||
s->current_frame.palette_has_changed = 1;
|
||||
if (!s->is_16bpp) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
if (pal) {
|
||||
s->current_frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
ipvideo_decode_opcodes(s);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->current_frame;
|
||||
|
||||
|
@@ -100,7 +100,7 @@ static VLC cbpc_b_vlc;
|
||||
/* init vlcs */
|
||||
|
||||
/* XXX: find a better solution to handle static init */
|
||||
void h263_decode_init_vlc(MpegEncContext *s)
|
||||
void ff_h263_decode_init_vlc(MpegEncContext *s)
|
||||
{
|
||||
static int done = 0;
|
||||
|
||||
@@ -117,18 +117,18 @@ void h263_decode_init_vlc(MpegEncContext *s)
|
||||
&ff_h263_cbpy_tab[0][1], 2, 1,
|
||||
&ff_h263_cbpy_tab[0][0], 2, 1, 64);
|
||||
INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 538);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 538);
|
||||
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
INIT_VLC_RL(ff_h263_rl_inter, 554);
|
||||
INIT_VLC_RL(rl_intra_aic, 554);
|
||||
INIT_VLC_RL(ff_rl_intra_aic, 554);
|
||||
INIT_VLC_STATIC(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15,
|
||||
&h263_mbtype_b_tab[0][1], 2, 1,
|
||||
&h263_mbtype_b_tab[0][0], 2, 1, 80);
|
||||
&ff_h263_mbtype_b_tab[0][1], 2, 1,
|
||||
&ff_h263_mbtype_b_tab[0][0], 2, 1, 80);
|
||||
INIT_VLC_STATIC(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4,
|
||||
&cbpc_b_tab[0][1], 2, 1,
|
||||
&cbpc_b_tab[0][0], 2, 1, 8);
|
||||
&ff_cbpc_b_tab[0][1], 2, 1,
|
||||
&ff_cbpc_b_tab[0][0], 2, 1, 8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -268,7 +268,7 @@ int ff_h263_resync(MpegEncContext *s){
|
||||
return -1;
|
||||
}
|
||||
|
||||
int h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
{
|
||||
int code, val, sign, shift, l;
|
||||
code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
|
||||
@@ -379,16 +379,16 @@ static void preview_obmc(MpegEncContext *s){
|
||||
if ((cbpc & 16) == 0) {
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
|
||||
mot_val[0 ]= mot_val[2 ]=
|
||||
mot_val[0+stride]= mot_val[2+stride]= mx;
|
||||
@@ -397,16 +397,16 @@ static void preview_obmc(MpegEncContext *s){
|
||||
} else {
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
|
||||
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
|
||||
mot_val[0] = mx;
|
||||
@@ -430,7 +430,7 @@ static void h263_decode_dquant(MpegEncContext *s){
|
||||
|
||||
if(s->modified_quant){
|
||||
if(get_bits1(&s->gb))
|
||||
s->qscale= modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
|
||||
s->qscale= ff_modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
|
||||
else
|
||||
s->qscale= get_bits(&s->gb, 5);
|
||||
}else
|
||||
@@ -448,7 +448,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
|
||||
|
||||
scan_table = s->intra_scantable.permutated;
|
||||
if (s->h263_aic && s->mb_intra) {
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
i = 0;
|
||||
if (s->ac_pred) {
|
||||
if (s->h263_aic_dir)
|
||||
@@ -537,7 +537,7 @@ retry:
|
||||
if (i >= 64){
|
||||
if(s->alt_inter_vlc && rl == &ff_h263_rl_inter && !s->mb_intra){
|
||||
//Looks like a hack but no, it's the way it is supposed to work ...
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
i = 0;
|
||||
s->gb= gb;
|
||||
s->dsp.clear_block(block);
|
||||
@@ -554,7 +554,7 @@ retry:
|
||||
}
|
||||
not_coded:
|
||||
if (s->mb_intra && s->h263_aic) {
|
||||
h263_pred_acdc(s, block, n);
|
||||
ff_h263_pred_acdc(s, block, n);
|
||||
i = 63;
|
||||
}
|
||||
s->block_last_index[n] = i;
|
||||
@@ -653,11 +653,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
@@ -665,7 +665,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
@@ -678,18 +678,18 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->mv[0][i][0] = mx;
|
||||
@@ -761,11 +761,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
//FIXME UMV
|
||||
|
||||
if(USES_LIST(mb_type, 0)){
|
||||
int16_t *mot_val= h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
mx = h263_decode_motion(s, mx, 1);
|
||||
my = h263_decode_motion(s, my, 1);
|
||||
mx = ff_h263_decode_motion(s, mx, 1);
|
||||
my = ff_h263_decode_motion(s, my, 1);
|
||||
|
||||
s->mv[0][0][0] = mx;
|
||||
s->mv[0][0][1] = my;
|
||||
@@ -774,11 +774,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
}
|
||||
|
||||
if(USES_LIST(mb_type, 1)){
|
||||
int16_t *mot_val= h263_pred_motion(s, 0, 1, &mx, &my);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, 0, 1, &mx, &my);
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
mx = h263_decode_motion(s, mx, 1);
|
||||
my = h263_decode_motion(s, my, 1);
|
||||
mx = ff_h263_decode_motion(s, mx, 1);
|
||||
my = ff_h263_decode_motion(s, my, 1);
|
||||
|
||||
s->mv[1][0][0] = mx;
|
||||
s->mv[1][0][1] = my;
|
||||
@@ -829,8 +829,8 @@ intra:
|
||||
}
|
||||
|
||||
while(pb_mv_count--){
|
||||
h263_decode_motion(s, 0, 1);
|
||||
h263_decode_motion(s, 0, 1);
|
||||
ff_h263_decode_motion(s, 0, 1);
|
||||
ff_h263_decode_motion(s, 0, 1);
|
||||
}
|
||||
|
||||
/* decode each block */
|
||||
@@ -864,7 +864,7 @@ end:
|
||||
}
|
||||
|
||||
/* most is hardcoded. should extend to handle all h263 streams */
|
||||
int h263_decode_picture_header(MpegEncContext *s)
|
||||
int ff_h263_decode_picture_header(MpegEncContext *s)
|
||||
{
|
||||
int format, width, height, i;
|
||||
uint32_t startcode;
|
||||
@@ -916,8 +916,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
if (format != 7 && format != 6) {
|
||||
s->h263_plus = 0;
|
||||
/* H.263v1 */
|
||||
width = h263_format[format][0];
|
||||
height = h263_format[format][1];
|
||||
width = ff_h263_format[format][0];
|
||||
height = ff_h263_format[format][1];
|
||||
if (!width)
|
||||
return -1;
|
||||
|
||||
@@ -1026,8 +1026,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
s->avctx->sample_aspect_ratio= ff_h263_pixel_aspect[s->aspect_ratio_info];
|
||||
}
|
||||
} else {
|
||||
width = h263_format[format][0];
|
||||
height = h263_format[format][1];
|
||||
width = ff_h263_format[format][0];
|
||||
height = ff_h263_format[format][1];
|
||||
s->avctx->sample_aspect_ratio= (AVRational){12,11};
|
||||
}
|
||||
if ((width == 0) || (height == 0))
|
||||
|
@@ -102,7 +102,7 @@ av_const int ff_h263_aspect_to_info(AVRational aspect){
|
||||
return FF_ASPECT_EXTENDED;
|
||||
}
|
||||
|
||||
void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
{
|
||||
int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
|
||||
int best_clock_code=1;
|
||||
@@ -141,7 +141,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
put_bits(&s->pb, 1, 0); /* camera off */
|
||||
put_bits(&s->pb, 1, 0); /* freeze picture release off */
|
||||
|
||||
format = ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height);
|
||||
format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height);
|
||||
if (!s->h263_plus) {
|
||||
/* H.263v1 */
|
||||
put_bits(&s->pb, 3, format);
|
||||
@@ -247,7 +247,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
/**
|
||||
* Encode a group of blocks header.
|
||||
*/
|
||||
void h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
{
|
||||
put_bits(&s->pb, 17, 1); /* GBSC */
|
||||
|
||||
@@ -333,7 +333,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
} else {
|
||||
i = 0;
|
||||
if (s->h263_aic && s->mb_intra)
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
|
||||
if(s->alt_inter_vlc && !s->mb_intra){
|
||||
int aic_vlc_bits=0;
|
||||
@@ -353,14 +353,14 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
if(level<0) level= -level;
|
||||
|
||||
code = get_rl_index(rl, last, run, level);
|
||||
aic_code = get_rl_index(&rl_intra_aic, last, run, level);
|
||||
aic_code = get_rl_index(&ff_rl_intra_aic, last, run, level);
|
||||
inter_vlc_bits += rl->table_vlc[code][1]+1;
|
||||
aic_vlc_bits += rl_intra_aic.table_vlc[aic_code][1]+1;
|
||||
aic_vlc_bits += ff_rl_intra_aic.table_vlc[aic_code][1]+1;
|
||||
|
||||
if (code == rl->n) {
|
||||
inter_vlc_bits += 1+6+8-1;
|
||||
}
|
||||
if (aic_code == rl_intra_aic.n) {
|
||||
if (aic_code == ff_rl_intra_aic.n) {
|
||||
aic_vlc_bits += 1+6+8-1;
|
||||
wrong_pos += run + 1;
|
||||
}else
|
||||
@@ -370,7 +370,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
}
|
||||
i = 0;
|
||||
if(aic_vlc_bits < inter_vlc_bits && wrong_pos > 63)
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -454,9 +454,9 @@ static void h263p_encode_umotion(MpegEncContext * s, int val)
|
||||
}
|
||||
}
|
||||
|
||||
void h263_encode_mb(MpegEncContext * s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y)
|
||||
void ff_h263_encode_mb(MpegEncContext * s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y)
|
||||
{
|
||||
int cbpc, cbpy, i, cbp, pred_x, pred_y;
|
||||
int16_t pred_dc;
|
||||
@@ -500,7 +500,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x16 mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
if (!s->umvplus) {
|
||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||
@@ -527,7 +527,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
|
||||
for(i=0; i<4; i++){
|
||||
/* motion vectors: 8x8 mode*/
|
||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0];
|
||||
motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1];
|
||||
@@ -561,7 +561,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
if(i<4) scale= s->y_dc_scale;
|
||||
else scale= s->c_dc_scale;
|
||||
|
||||
pred_dc = h263_pred_dc(s, i, &dc_ptr[i]);
|
||||
pred_dc = ff_h263_pred_dc(s, i, &dc_ptr[i]);
|
||||
level -= pred_dc;
|
||||
/* Quant */
|
||||
if (level >= 0)
|
||||
@@ -662,7 +662,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
|
||||
if (val == 0) {
|
||||
/* zero vector */
|
||||
code = 0;
|
||||
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
|
||||
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
|
||||
} else {
|
||||
bit_size = f_code - 1;
|
||||
range = 1 << bit_size;
|
||||
@@ -677,7 +677,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
|
||||
code = (val >> bit_size) + 1;
|
||||
bits = val & (range - 1);
|
||||
|
||||
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
|
||||
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
|
||||
if (bit_size > 0) {
|
||||
put_bits(&s->pb, bit_size, bits);
|
||||
}
|
||||
@@ -693,7 +693,7 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
for(mv=-MAX_MV; mv<=MAX_MV; mv++){
|
||||
int len;
|
||||
|
||||
if(mv==0) len= mvtab[0][1];
|
||||
if(mv==0) len= ff_mvtab[0][1];
|
||||
else{
|
||||
int val, bit_size, code;
|
||||
|
||||
@@ -705,9 +705,9 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
val--;
|
||||
code = (val >> bit_size) + 1;
|
||||
if(code<33){
|
||||
len= mvtab[code][1] + 1 + bit_size;
|
||||
len= ff_mvtab[code][1] + 1 + bit_size;
|
||||
}else{
|
||||
len= mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
|
||||
len= ff_mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -769,7 +769,7 @@ static void init_uni_h263_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_t
|
||||
}
|
||||
}
|
||||
|
||||
void h263_encode_init(MpegEncContext *s)
|
||||
void ff_h263_encode_init(MpegEncContext *s)
|
||||
{
|
||||
static int done = 0;
|
||||
|
||||
@@ -777,9 +777,9 @@ void h263_encode_init(MpegEncContext *s)
|
||||
done = 1;
|
||||
|
||||
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
|
||||
init_uni_h263_rl_tab(&rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
|
||||
init_uni_h263_rl_tab(&ff_rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
|
||||
init_uni_h263_rl_tab(&ff_h263_rl_inter , NULL, uni_h263_inter_rl_len);
|
||||
|
||||
init_mv_penalty_and_fcode(s);
|
||||
|
@@ -123,6 +123,10 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
|
||||
if (huff_tab->tab_sel == 7) {
|
||||
/* custom huffman table (explicitly encoded) */
|
||||
new_huff.num_rows = get_bits(gb, 4);
|
||||
if (!new_huff.num_rows) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty custom Huffman table!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i = 0; i < new_huff.num_rows; i++)
|
||||
new_huff.xbits[i] = get_bits(gb, 4);
|
||||
@@ -136,9 +140,10 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
|
||||
result = ff_ivi_create_huff_from_desc(&huff_tab->cust_desc,
|
||||
&huff_tab->cust_tab, 0);
|
||||
if (result) {
|
||||
huff_tab->cust_desc.num_rows = 0; // reset faulty description
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while initializing custom vlc table!\n");
|
||||
return -1;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
huff_tab->tab = &huff_tab->cust_tab;
|
||||
@@ -207,14 +212,15 @@ int av_cold ff_ivi_init_planes(IVIPlaneDesc *planes, const IVIPicConfig *cfg)
|
||||
band->width = b_width;
|
||||
band->height = b_height;
|
||||
band->pitch = width_aligned;
|
||||
band->bufs[0] = av_malloc(buf_size);
|
||||
band->bufs[1] = av_malloc(buf_size);
|
||||
band->aheight = height_aligned;
|
||||
band->bufs[0] = av_mallocz(buf_size);
|
||||
band->bufs[1] = av_mallocz(buf_size);
|
||||
if (!band->bufs[0] || !band->bufs[1])
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* allocate the 3rd band buffer for scalability mode */
|
||||
if (cfg->luma_bands > 1) {
|
||||
band->bufs[2] = av_malloc(buf_size);
|
||||
band->bufs[2] = av_mallocz(buf_size);
|
||||
if (!band->bufs[2])
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -377,6 +383,21 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
mv_x >>= 1;
|
||||
mv_y >>= 1; /* convert halfpel vectors into fullpel ones */
|
||||
}
|
||||
if (mb->type) {
|
||||
int dmv_x, dmv_y, cx, cy;
|
||||
|
||||
dmv_x = mb->mv_x >> band->is_halfpel;
|
||||
dmv_y = mb->mv_y >> band->is_halfpel;
|
||||
cx = mb->mv_x & band->is_halfpel;
|
||||
cy = mb->mv_y & band->is_halfpel;
|
||||
|
||||
if ( mb->xpos + dmv_x < 0
|
||||
|| mb->xpos + dmv_x + band->mb_size + cx > band->pitch
|
||||
|| mb->ypos + dmv_y < 0
|
||||
|| mb->ypos + dmv_y + band->mb_size + cy > band->aheight) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (blk = 0; blk < num_blocks; blk++) {
|
||||
@@ -389,6 +410,11 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
}
|
||||
|
||||
if (cbp & 1) { /* block coded ? */
|
||||
if (!band->scan) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Scan pattern is not set.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
scan_pos = -1;
|
||||
memset(trvec, 0, num_coeffs*sizeof(trvec[0])); /* zero transform vector */
|
||||
memset(col_flags, 0, sizeof(col_flags)); /* zero column flags */
|
||||
@@ -469,7 +495,7 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
int ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale)
|
||||
{
|
||||
int x, y, need_mc, mbn, blk, num_blocks, mv_x, mv_y, mc_type;
|
||||
@@ -480,6 +506,13 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
void (*mc_no_delta_func)(int16_t *buf, const int16_t *ref_buf, uint32_t pitch,
|
||||
int mc_type);
|
||||
|
||||
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches "
|
||||
"parameters %d in ivi_process_empty_tile()\n",
|
||||
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
offs = tile->ypos * band->pitch + tile->xpos;
|
||||
mb = tile->mbs;
|
||||
ref_mb = tile->ref_mbs;
|
||||
@@ -560,6 +593,8 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
dst += band->pitch;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -132,6 +132,7 @@ typedef struct {
|
||||
int band_num; ///< band number
|
||||
int width;
|
||||
int height;
|
||||
int aheight; ///< aligned band height
|
||||
const uint8_t *data_ptr; ///< ptr to the first byte of the band data
|
||||
int data_size; ///< size of the band data
|
||||
int16_t *buf; ///< pointer to the output buffer for this band
|
||||
@@ -324,7 +325,7 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile);
|
||||
* @param[in] tile pointer to the tile descriptor
|
||||
* @param[in] mv_scale scaling factor for motion vectors
|
||||
*/
|
||||
void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
int ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale);
|
||||
|
||||
/**
|
||||
|
@@ -28,6 +28,7 @@
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "j2k.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/common.h"
|
||||
|
||||
#define JP2_SIG_TYPE 0x6A502020
|
||||
@@ -283,6 +284,10 @@ static int get_cox(J2kDecoderContext *s, J2kCodingStyle *c)
|
||||
c->log2_cblk_width = bytestream_get_byte(&s->buf) + 2; // cblk width
|
||||
c->log2_cblk_height = bytestream_get_byte(&s->buf) + 2; // cblk height
|
||||
|
||||
if (c->log2_cblk_width > 6 || c->log2_cblk_height > 6) {
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
c->cblk_style = bytestream_get_byte(&s->buf);
|
||||
if (c->cblk_style != 0){ // cblk style
|
||||
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
|
||||
@@ -699,6 +704,9 @@ static int decode_cblk(J2kDecoderContext *s, J2kCodingStyle *codsty, J2kT1Contex
|
||||
int bpass_csty_symbol = J2K_CBLK_BYPASS & codsty->cblk_style;
|
||||
int vert_causal_ctx_csty_symbol = J2K_CBLK_VSC & codsty->cblk_style;
|
||||
|
||||
av_assert0(width <= J2K_MAX_CBLKW);
|
||||
av_assert0(height <= J2K_MAX_CBLKH);
|
||||
|
||||
for (y = 0; y < height+2; y++)
|
||||
memset(t1->flags[y], 0, (width+2)*sizeof(int));
|
||||
|
||||
|
@@ -284,6 +284,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
|
||||
int i;
|
||||
int header;
|
||||
int blocksize;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (ctx->pic.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->pic);
|
||||
@@ -315,13 +316,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
|
||||
ctx->pic.pict_type = AV_PICTURE_TYPE_P;
|
||||
}
|
||||
|
||||
/* if palette has been changed, copy it from palctrl */
|
||||
if (ctx->avctx->palctrl && ctx->avctx->palctrl->palette_changed) {
|
||||
memcpy(ctx->pal, ctx->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
ctx->setpal = 1;
|
||||
ctx->avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
|
||||
if (header & KMVC_PALETTE) {
|
||||
ctx->pic.palette_has_changed = 1;
|
||||
// palette starts from index 1 and has 127 entries
|
||||
@@ -330,6 +324,11 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa
|
||||
}
|
||||
}
|
||||
|
||||
if (pal) {
|
||||
ctx->pic.palette_has_changed = 1;
|
||||
memcpy(ctx->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
if (ctx->setpal) {
|
||||
ctx->setpal = 0;
|
||||
ctx->pic.palette_has_changed = 1;
|
||||
@@ -429,9 +428,6 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
||||
src += 4;
|
||||
}
|
||||
c->setpal = 1;
|
||||
if (c->avctx->palctrl) {
|
||||
c->avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
|
@@ -322,6 +322,11 @@ static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
|
||||
output_zeros:
|
||||
if (l->zeros_rem) {
|
||||
count = FFMIN(l->zeros_rem, width - i);
|
||||
if (end - dst < count) {
|
||||
av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
memset(dst, 0, count);
|
||||
l->zeros_rem -= count;
|
||||
dst += count;
|
||||
|
@@ -1151,6 +1151,7 @@ typedef struct Mpeg1Context {
|
||||
int save_width, save_height, save_progressive_seq;
|
||||
AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator
|
||||
int sync; ///< Did we reach a sync point like a GOP/SEQ/KEYFrame?
|
||||
int extradata_decoded;
|
||||
} Mpeg1Context;
|
||||
|
||||
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
|
||||
@@ -1287,7 +1288,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
|
||||
s1->save_width != s->width ||
|
||||
s1->save_height != s->height ||
|
||||
s1->save_aspect_info != s->aspect_ratio_info||
|
||||
s1->save_progressive_seq != s->progressive_sequence ||
|
||||
(s1->save_progressive_seq != s->progressive_sequence && (s->height&31)) ||
|
||||
0)
|
||||
{
|
||||
|
||||
@@ -2315,8 +2316,10 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
s->slice_count= 0;
|
||||
|
||||
if(avctx->extradata && !avctx->frame_number)
|
||||
if (avctx->extradata && !s->extradata_decoded) {
|
||||
decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size);
|
||||
s->extradata_decoded = 1;
|
||||
}
|
||||
|
||||
return decode_chunks(avctx, picture, data_size, buf, buf_size);
|
||||
}
|
||||
@@ -2475,18 +2478,10 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
/* Skip P-frames if we do not have a reference frame or we have an invalid header. */
|
||||
if(s2->pict_type==AV_PICTURE_TYPE_P && !s->sync) break;
|
||||
}
|
||||
#if FF_API_HURRY_UP
|
||||
/* Skip B-frames if we are in a hurry. */
|
||||
if(avctx->hurry_up && s2->pict_type==FF_B_TYPE) break;
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==AV_PICTURE_TYPE_B)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
break;
|
||||
#if FF_API_HURRY_UP
|
||||
/* Skip everything if we are in a hurry>=5. */
|
||||
if(avctx->hurry_up>=5) break;
|
||||
#endif
|
||||
|
||||
if (!s->mpeg_enc_ctx_allocated) break;
|
||||
|
||||
|
@@ -651,13 +651,13 @@ try_again:
|
||||
if ((cbpc & 16) == 0) {
|
||||
/* 16x16 motion prediction */
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if(!s->mcsel){
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
@@ -675,12 +675,12 @@ try_again:
|
||||
int i;
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
for(i=0;i<4;i++) {
|
||||
int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
mot_val[0] = mx;
|
||||
@@ -1245,14 +1245,14 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->field_select[0][0]= get_bits1(&s->gb);
|
||||
s->field_select[0][1]= get_bits1(&s->gb);
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y/2, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y/2, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
|
||||
@@ -1263,13 +1263,13 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
@@ -1280,12 +1280,12 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = h263_decode_motion(s, pred_x, s->f_code);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
my = h263_decode_motion(s, pred_y, s->f_code);
|
||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->mv[0][i][0] = mx;
|
||||
@@ -1381,8 +1381,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(USES_LIST(mb_type, 0)){
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
mx = h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
|
||||
my = h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
|
||||
s->last_mv[0][1][0]= s->last_mv[0][0][0]= s->mv[0][0][0] = mx;
|
||||
s->last_mv[0][1][1]= s->last_mv[0][0][1]= s->mv[0][0][1] = my;
|
||||
}
|
||||
@@ -1390,8 +1390,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(USES_LIST(mb_type, 1)){
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
mx = h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
|
||||
my = h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
|
||||
s->last_mv[1][1][0]= s->last_mv[1][0][0]= s->mv[1][0][0] = mx;
|
||||
s->last_mv[1][1][1]= s->last_mv[1][0][1]= s->mv[1][0][1] = my;
|
||||
}
|
||||
@@ -1402,8 +1402,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
|
||||
my = h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[0][i][0] , s->f_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[0][i][1]/2, s->f_code);
|
||||
s->last_mv[0][i][0]= s->mv[0][i][0] = mx;
|
||||
s->last_mv[0][i][1]= (s->mv[0][i][1] = my)*2;
|
||||
}
|
||||
@@ -1413,8 +1413,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
mx = h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
|
||||
my = h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
|
||||
mx = ff_h263_decode_motion(s, s->last_mv[1][i][0] , s->b_code);
|
||||
my = ff_h263_decode_motion(s, s->last_mv[1][i][1]/2, s->b_code);
|
||||
s->last_mv[1][i][0]= s->mv[1][i][0] = mx;
|
||||
s->last_mv[1][i][1]= (s->mv[1][i][1] = my)*2;
|
||||
}
|
||||
@@ -1426,8 +1426,8 @@ static int mpeg4_decode_mb(MpegEncContext *s,
|
||||
if(IS_SKIP(mb_type))
|
||||
mx=my=0;
|
||||
else{
|
||||
mx = h263_decode_motion(s, 0, 1);
|
||||
my = h263_decode_motion(s, 0, 1);
|
||||
mx = ff_h263_decode_motion(s, 0, 1);
|
||||
my = ff_h263_decode_motion(s, 0, 1);
|
||||
}
|
||||
|
||||
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
|
||||
|
@@ -727,7 +727,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x16 mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||
motion_y - pred_y, s->f_code);
|
||||
@@ -751,7 +751,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x8 interlaced mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
pred_y /=2;
|
||||
|
||||
put_bits(&s->pb, 1, s->field_select[0][0]);
|
||||
@@ -779,7 +779,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
|
||||
for(i=0; i<4; i++){
|
||||
/* motion vectors: 8x8 mode*/
|
||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x,
|
||||
s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
|
||||
|
@@ -210,7 +210,7 @@ static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g){
|
||||
else
|
||||
g->long_end = 4; /* 8000 Hz */
|
||||
|
||||
g->short_start = 2 + (s->sample_rate_index != 8);
|
||||
g->short_start = 3;
|
||||
} else {
|
||||
g->long_end = 0;
|
||||
g->short_start = 0;
|
||||
|
@@ -1138,9 +1138,6 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
}
|
||||
}
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
s->hurry_up= s->avctx->hurry_up;
|
||||
#endif
|
||||
s->error_recognition= avctx->error_recognition;
|
||||
|
||||
/* set dequantizer, we can't do it during init as it might change for mpeg4
|
||||
@@ -2150,9 +2147,6 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
}
|
||||
|
||||
/* skip dequant / idct if we are really late ;) */
|
||||
#if FF_API_HURRY_UP
|
||||
if(s->hurry_up>1) goto skip_idct;
|
||||
#endif
|
||||
if(s->avctx->skip_idct){
|
||||
if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
|
||||
||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
|
||||
|
@@ -391,11 +391,6 @@ typedef struct MpegEncContext {
|
||||
int no_rounding; /**< apply no rounding to motion compensation (MPEG4, msmpeg4, ...)
|
||||
for b-frames rounding mode is always 0 */
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
int hurry_up; /**< when set to 1 during decoding, b frames will be skipped
|
||||
when set to 2 idct/dequant will be skipped too */
|
||||
#endif
|
||||
|
||||
/* macroblock layer */
|
||||
int mb_x, mb_y;
|
||||
int mb_skip_run;
|
||||
|
@@ -725,7 +725,8 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
||||
0, 0, 0,
|
||||
ref_picture, pix_op, qpix_op,
|
||||
s->mv[dir][0][0], s->mv[dir][0][1], 16);
|
||||
}else if(!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) && s->mspel && s->codec_id == CODEC_ID_WMV2){
|
||||
} else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
|
||||
s->mspel && s->codec_id == CODEC_ID_WMV2) {
|
||||
ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][0][0], s->mv[dir][0][1], 16);
|
||||
|
@@ -582,7 +582,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
break;
|
||||
case CODEC_ID_H263:
|
||||
if (!CONFIG_H263_ENCODER) return -1;
|
||||
if (ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height) == 8) {
|
||||
if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height) == 8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height);
|
||||
return -1;
|
||||
}
|
||||
@@ -708,7 +708,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
|
||||
ff_h261_encode_init(s);
|
||||
if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
|
||||
h263_encode_init(s);
|
||||
ff_h263_encode_init(s);
|
||||
if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
|
||||
ff_msmpeg4_encode_init(s);
|
||||
if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
|
||||
@@ -1768,7 +1768,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
|
||||
case CODEC_ID_RV10:
|
||||
case CODEC_ID_RV20:
|
||||
if (CONFIG_H263_ENCODER)
|
||||
h263_encode_mb(s, s->block, motion_x, motion_y);
|
||||
ff_h263_encode_mb(s, s->block, motion_x, motion_y);
|
||||
break;
|
||||
case CODEC_ID_MJPEG:
|
||||
if (CONFIG_MJPEG_ENCODER)
|
||||
@@ -2200,7 +2200,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
case CODEC_ID_H263:
|
||||
case CODEC_ID_H263P:
|
||||
if (CONFIG_H263_ENCODER)
|
||||
h263_encode_gob_header(s, mb_y);
|
||||
ff_h263_encode_gob_header(s, mb_y);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2950,7 +2950,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
|
||||
ff_flv_encode_picture_header(s, picture_number);
|
||||
else if (CONFIG_H263_ENCODER)
|
||||
h263_encode_picture_header(s, picture_number);
|
||||
ff_h263_encode_picture_header(s, picture_number);
|
||||
break;
|
||||
case FMT_MPEG1:
|
||||
if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
|
||||
|
@@ -511,7 +511,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
|
||||
if (val == 0) {
|
||||
/* zero vector */
|
||||
code = 0;
|
||||
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
|
||||
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
|
||||
} else {
|
||||
bit_size = s->f_code - 1;
|
||||
range = 1 << bit_size;
|
||||
@@ -530,7 +530,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
|
||||
code = (val >> bit_size) + 1;
|
||||
bits = val & (range - 1);
|
||||
|
||||
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
|
||||
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
|
||||
if (bit_size > 0) {
|
||||
put_bits(&s->pb, bit_size, bits);
|
||||
}
|
||||
@@ -579,7 +579,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
|
||||
|
||||
s->misc_bits += get_bits_diff(s);
|
||||
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
msmpeg4v2_encode_motion(s, motion_x - pred_x);
|
||||
msmpeg4v2_encode_motion(s, motion_y - pred_y);
|
||||
}else{
|
||||
@@ -590,7 +590,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
|
||||
s->misc_bits += get_bits_diff(s);
|
||||
|
||||
/* motion vector */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_msmpeg4_encode_motion(s, motion_x - pred_x,
|
||||
motion_y - pred_y);
|
||||
}
|
||||
@@ -1138,7 +1138,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
cbp|= cbpy<<2;
|
||||
if(s->msmpeg4_version==1 || (cbp&3) != 3) cbp^= 0x3C;
|
||||
|
||||
h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
mx= msmpeg4v2_decode_motion(s, mx, 1);
|
||||
my= msmpeg4v2_decode_motion(s, my, 1);
|
||||
|
||||
@@ -1224,7 +1224,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
s->rl_table_index = decode012(&s->gb);
|
||||
s->rl_chroma_table_index = s->rl_table_index;
|
||||
}
|
||||
h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
if (ff_msmpeg4_decode_motion(s, &mx, &my) < 0)
|
||||
return -1;
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
@@ -1320,8 +1320,8 @@ av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
|
||||
&v2_mb_type[0][1], 2, 1,
|
||||
&v2_mb_type[0][0], 2, 1, 128);
|
||||
INIT_VLC_STATIC(&v2_mv_vlc, V2_MV_VLC_BITS, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 538);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 538);
|
||||
|
||||
INIT_VLC_STATIC(&ff_mb_non_intra_vlc[0], MB_NON_INTRA_VLC_BITS, 128,
|
||||
&wmv2_inter_table[0][0][1], 8, 4,
|
||||
|
@@ -592,9 +592,9 @@ static const int8_t table4_run[168] = {
|
||||
29, 30, 31, 32, 33, 34, 35, 36,
|
||||
};
|
||||
|
||||
extern const uint16_t inter_vlc[103][2];
|
||||
extern const int8_t inter_level[102];
|
||||
extern const int8_t inter_run[102];
|
||||
extern const uint16_t ff_inter_vlc[103][2];
|
||||
extern const int8_t ff_inter_level[102];
|
||||
extern const int8_t ff_inter_run[102];
|
||||
|
||||
extern const uint16_t ff_mpeg4_intra_vlc[103][2];
|
||||
extern const int8_t ff_mpeg4_intra_level[102];
|
||||
@@ -647,9 +647,9 @@ RLTable rl_table[NB_RL_TABLES] = {
|
||||
{
|
||||
102,
|
||||
58,
|
||||
inter_vlc,
|
||||
inter_run,
|
||||
inter_level,
|
||||
ff_inter_vlc,
|
||||
ff_inter_run,
|
||||
ff_inter_level,
|
||||
},
|
||||
};
|
||||
|
||||
|
@@ -26,9 +26,6 @@
|
||||
* http://www.pcisys.net/~melanson/codecs/
|
||||
*
|
||||
* The MS RLE decoder outputs PAL8 colorspace data.
|
||||
*
|
||||
* Note that this decoder expects the palette colors from the end of the
|
||||
* BITMAPINFO header passed through palctrl.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -46,6 +43,7 @@ typedef struct MsrleContext {
|
||||
const unsigned char *buf;
|
||||
int size;
|
||||
|
||||
uint32_t pal[256];
|
||||
} MsrleContext;
|
||||
|
||||
static av_cold int msrle_decode_init(AVCodecContext *avctx)
|
||||
@@ -95,13 +93,16 @@ static int msrle_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->avctx->palctrl) {
|
||||
/* make the palette available */
|
||||
memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (s->avctx->palctrl->palette_changed) {
|
||||
if (avctx->bits_per_coded_sample > 1 && avctx->bits_per_coded_sample <= 8) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
s->avctx->palctrl->palette_changed = 0;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
/* make the palette available */
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
/* FIXME how to correctly detect RLE ??? */
|
||||
|
@@ -25,9 +25,6 @@
|
||||
* For more information about the MS Video-1 format, visit:
|
||||
* http://www.pcisys.net/~melanson/codecs/
|
||||
*
|
||||
* This decoder outputs either PAL8 or RGB555 data, depending on the
|
||||
* whether a RGB palette was passed through palctrl;
|
||||
* if it's present, then the data is PAL8; RGB555 otherwise.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -55,6 +52,7 @@ typedef struct Msvideo1Context {
|
||||
|
||||
int mode_8bit; /* if it's not 8-bit, it's 16-bit */
|
||||
|
||||
uint32_t pal[256];
|
||||
} Msvideo1Context;
|
||||
|
||||
static av_cold int msvideo1_decode_init(AVCodecContext *avctx)
|
||||
@@ -64,7 +62,7 @@ static av_cold int msvideo1_decode_init(AVCodecContext *avctx)
|
||||
s->avctx = avctx;
|
||||
|
||||
/* figure out the colorspace based on the presence of a palette */
|
||||
if (s->avctx->palctrl) {
|
||||
if (s->avctx->bits_per_coded_sample == 8) {
|
||||
s->mode_8bit = 1;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
} else {
|
||||
@@ -174,13 +172,8 @@ static void msvideo1_decode_8bit(Msvideo1Context *s)
|
||||
}
|
||||
|
||||
/* make the palette available on the way out */
|
||||
if (s->avctx->pix_fmt == PIX_FMT_PAL8) {
|
||||
memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (s->avctx->palctrl->palette_changed) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
s->avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
}
|
||||
if (s->avctx->pix_fmt == PIX_FMT_PAL8)
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
static void msvideo1_decode_16bit(Msvideo1Context *s)
|
||||
@@ -310,6 +303,15 @@ static int msvideo1_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->mode_8bit) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (pal) {
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
s->frame.palette_has_changed = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (s->mode_8bit)
|
||||
msvideo1_decode_8bit(s);
|
||||
else
|
||||
|
@@ -191,9 +191,10 @@ retry:
|
||||
}
|
||||
if (c->codec_frameheader) {
|
||||
int w, h, q, res;
|
||||
if (buf_size < 12) {
|
||||
if (buf_size < RTJPEG_HEADER_SIZE || buf[4] != RTJPEG_HEADER_SIZE ||
|
||||
buf[5] != RTJPEG_FILE_VERSION) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid nuv video frame\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
w = AV_RL16(&buf[6]);
|
||||
h = AV_RL16(&buf[8]);
|
||||
@@ -207,8 +208,8 @@ retry:
|
||||
size_change = 1;
|
||||
goto retry;
|
||||
}
|
||||
buf = &buf[12];
|
||||
buf_size -= 12;
|
||||
buf = &buf[RTJPEG_HEADER_SIZE];
|
||||
buf_size -= RTJPEG_HEADER_SIZE;
|
||||
}
|
||||
|
||||
if ((size_change || keyframe) && c->pic.data[0])
|
||||
|
@@ -1,89 +0,0 @@
|
||||
/*
|
||||
* AVOptions ABI compatibility wrapper
|
||||
* Copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>
|
||||
*
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "opt.h"
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53 && CONFIG_SHARED && HAVE_SYMVER
|
||||
|
||||
FF_SYMVER(const AVOption *, av_find_opt, (void *obj, const char *name, const char *unit, int mask, int flags), "LIBAVCODEC_52"){
|
||||
return av_find_opt(obj, name, unit, mask, flags);
|
||||
}
|
||||
FF_SYMVER(int, av_set_string3, (void *obj, const char *name, const char *val, int alloc, const AVOption **o_out), "LIBAVCODEC_52"){
|
||||
return av_set_string3(obj, name, val, alloc, o_out);
|
||||
}
|
||||
FF_SYMVER(const AVOption *, av_set_double, (void *obj, const char *name, double n), "LIBAVCODEC_52"){
|
||||
return av_set_double(obj, name, n);
|
||||
}
|
||||
FF_SYMVER(const AVOption *, av_set_q, (void *obj, const char *name, AVRational n), "LIBAVCODEC_52"){
|
||||
return av_set_q(obj, name, n);
|
||||
}
|
||||
FF_SYMVER(const AVOption *, av_set_int, (void *obj, const char *name, int64_t n), "LIBAVCODEC_52"){
|
||||
return av_set_int(obj, name, n);
|
||||
}
|
||||
FF_SYMVER(double, av_get_double, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
|
||||
return av_get_double(obj, name, o_out);
|
||||
}
|
||||
FF_SYMVER(AVRational, av_get_q, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
|
||||
return av_get_q(obj, name, o_out);
|
||||
}
|
||||
FF_SYMVER(int64_t, av_get_int, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
|
||||
return av_get_int(obj, name, o_out);
|
||||
}
|
||||
FF_SYMVER(const char *, av_get_string, (void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len), "LIBAVCODEC_52"){
|
||||
return av_get_string(obj, name, o_out, buf, buf_len);
|
||||
}
|
||||
FF_SYMVER(const AVOption *, av_next_option, (void *obj, const AVOption *last), "LIBAVCODEC_52"){
|
||||
return av_next_option(obj, last);
|
||||
}
|
||||
FF_SYMVER(int, av_opt_show2, (void *obj, void *av_log_obj, int req_flags, int rej_flags), "LIBAVCODEC_52"){
|
||||
return av_opt_show2(obj, av_log_obj, req_flags, rej_flags);
|
||||
}
|
||||
FF_SYMVER(void, av_opt_set_defaults, (void *s), "LIBAVCODEC_52"){
|
||||
return av_opt_set_defaults(s);
|
||||
}
|
||||
FF_SYMVER(void, av_opt_set_defaults2, (void *s, int mask, int flags), "LIBAVCODEC_52"){
|
||||
return av_opt_set_defaults2(s, mask, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if FF_API_SET_STRING_OLD
|
||||
const AVOption *av_set_string2(void *obj, const char *name, const char *val, int alloc){
|
||||
const AVOption *o;
|
||||
if (av_set_string3(obj, name, val, alloc, &o) < 0)
|
||||
return NULL;
|
||||
return o;
|
||||
}
|
||||
|
||||
const AVOption *av_set_string(void *obj, const char *name, const char *val){
|
||||
const AVOption *o;
|
||||
if (av_set_string3(obj, name, val, 0, &o) < 0)
|
||||
return NULL;
|
||||
return o;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if FF_API_OPT_SHOW
|
||||
int av_opt_show(void *obj, void *av_log_obj){
|
||||
return av_opt_show2(obj, av_log_obj,
|
||||
AV_OPT_FLAG_ENCODING_PARAM|AV_OPT_FLAG_DECODING_PARAM, 0);
|
||||
}
|
||||
#endif
|
@@ -1,21 +1,18 @@
|
||||
/*
|
||||
* AVOptions
|
||||
* copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
@@ -28,34 +25,10 @@
|
||||
#ifndef AVCODEC_OPT_H
|
||||
#define AVCODEC_OPT_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* AVOptions
|
||||
*/
|
||||
#include "libavcodec/version.h"
|
||||
|
||||
#include "libavutil/rational.h"
|
||||
#include "avcodec.h"
|
||||
#if FF_API_OPT_H
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
#if FF_API_SET_STRING_OLD
|
||||
/**
|
||||
* @see av_set_string2()
|
||||
*/
|
||||
attribute_deprecated const AVOption *av_set_string(void *obj, const char *name, const char *val);
|
||||
|
||||
/**
|
||||
* @return a pointer to the AVOption corresponding to the field set or
|
||||
* NULL if no matching AVOption exists, or if the value val is not
|
||||
* valid
|
||||
* @see av_set_string3()
|
||||
*/
|
||||
attribute_deprecated const AVOption *av_set_string2(void *obj, const char *name, const char *val, int alloc);
|
||||
#endif
|
||||
#if FF_API_OPT_SHOW
|
||||
/**
|
||||
* @deprecated Use av_opt_show2() instead.
|
||||
*/
|
||||
attribute_deprecated int av_opt_show(void *obj, void *av_log_obj);
|
||||
#endif
|
||||
|
||||
#endif /* AVCODEC_OPT_H */
|
||||
|
@@ -438,7 +438,6 @@ static const AVOption options[]={
|
||||
{"crf_max", "in crf mode, prevents vbv from lowering quality beyond this point", OFFSET(crf_max), FF_OPT_TYPE_FLOAT, {.dbl = DEFAULT }, 0, 51, V|E},
|
||||
{"log_level_offset", "set the log level offset", OFFSET(log_level_offset), FF_OPT_TYPE_INT, {.dbl = 0 }, INT_MIN, INT_MAX },
|
||||
#if FF_API_FLAC_GLOBAL_OPTS
|
||||
{"use_lpc", "sets whether to use LPC mode (FLAC)", OFFSET(use_lpc), FF_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
|
||||
{"lpc_type", "deprecated, use flac-specific options", OFFSET(lpc_type), FF_OPT_TYPE_INT, {.dbl = AV_LPC_TYPE_DEFAULT }, AV_LPC_TYPE_DEFAULT, AV_LPC_TYPE_NB-1, A|E},
|
||||
{"none", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_NONE }, INT_MIN, INT_MAX, A|E, "lpc_type"},
|
||||
{"fixed", NULL, 0, FF_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_FIXED }, INT_MIN, INT_MAX, A|E, "lpc_type"},
|
||||
|
@@ -105,43 +105,6 @@ void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove){
|
||||
}
|
||||
}
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 53
|
||||
/**
|
||||
*
|
||||
* @param buf input
|
||||
* @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output)
|
||||
* @param pts input presentation timestamp
|
||||
* @param dts input decoding timestamp
|
||||
* @param poutbuf will contain a pointer to the first byte of the output frame
|
||||
* @param poutbuf_size will contain the length of the output frame
|
||||
* @return the number of bytes of the input bitstream used
|
||||
*
|
||||
* Example:
|
||||
* @code
|
||||
* while(in_len){
|
||||
* len = av_parser_parse(myparser, AVCodecContext, &data, &size,
|
||||
* in_data, in_len,
|
||||
* pts, dts);
|
||||
* in_data += len;
|
||||
* in_len -= len;
|
||||
*
|
||||
* if(size)
|
||||
* decode_frame(data, size);
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @deprecated Use av_parser_parse2() instead.
|
||||
*/
|
||||
int av_parser_parse(AVCodecParserContext *s,
|
||||
AVCodecContext *avctx,
|
||||
uint8_t **poutbuf, int *poutbuf_size,
|
||||
const uint8_t *buf, int buf_size,
|
||||
int64_t pts, int64_t dts)
|
||||
{
|
||||
return av_parser_parse2(s, avctx, poutbuf, poutbuf_size, buf, buf_size, pts, dts, AV_NOPTS_VALUE);
|
||||
}
|
||||
#endif
|
||||
|
||||
int av_parser_parse2(AVCodecParserContext *s,
|
||||
AVCodecContext *avctx,
|
||||
uint8_t **poutbuf, int *poutbuf_size,
|
||||
@@ -279,8 +242,10 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
if(next == END_NOT_FOUND){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer)
|
||||
if(!new_buffer) {
|
||||
pc->index = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
|
||||
pc->index += *buf_size;
|
||||
@@ -293,11 +258,15 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
/* append to buffer */
|
||||
if(pc->index){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer)
|
||||
if(!new_buffer) {
|
||||
pc->overread_index =
|
||||
pc->index = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
|
||||
if (next > -FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
memcpy(&pc->buffer[pc->index], *buf,
|
||||
next + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pc->index = 0;
|
||||
*buf= pc->buffer;
|
||||
}
|
||||
|
@@ -107,7 +107,7 @@ static void png_put_interlaced_row(uint8_t *dst, int width,
|
||||
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
|
||||
{
|
||||
long i;
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
@@ -148,7 +148,7 @@ static void add_paeth_prediction_c(uint8_t *dst, uint8_t *src, uint8_t *top, int
|
||||
if(bpp >= 2) g = dst[1];\
|
||||
if(bpp >= 3) b = dst[2];\
|
||||
if(bpp >= 4) a = dst[3];\
|
||||
for(; i < size; i+=bpp) {\
|
||||
for(; i <= size - bpp; i+=bpp) {\
|
||||
dst[i+0] = r = op(r, src[i+0], last[i+0]);\
|
||||
if(bpp == 1) continue;\
|
||||
dst[i+1] = g = op(g, src[i+1], last[i+1]);\
|
||||
@@ -164,13 +164,9 @@ static void add_paeth_prediction_c(uint8_t *dst, uint8_t *src, uint8_t *top, int
|
||||
else if(bpp == 2) UNROLL1(2, op)\
|
||||
else if(bpp == 3) UNROLL1(3, op)\
|
||||
else if(bpp == 4) UNROLL1(4, op)\
|
||||
else {\
|
||||
for (; i < size; i += bpp) {\
|
||||
int j;\
|
||||
for (j = 0; j < bpp; j++)\
|
||||
dst[i+j] = op(dst[i+j-bpp], src[i+j], last[i+j]);\
|
||||
}\
|
||||
}
|
||||
for (; i < size; i++) {\
|
||||
dst[i] = op(dst[i-bpp], src[i], last[i]);\
|
||||
}\
|
||||
|
||||
/* NOTE: 'dst' can be equal to 'last' */
|
||||
static void png_filter_row(PNGDecContext *s, uint8_t *dst, int filter_type,
|
||||
|
@@ -383,9 +383,6 @@ static void update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
|
||||
dst->release_buffer = src->release_buffer;
|
||||
|
||||
dst->opaque = src->opaque;
|
||||
#if FF_API_HURRY_UP
|
||||
dst->hurry_up = src->hurry_up;
|
||||
#endif
|
||||
dst->dsp_mask = src->dsp_mask;
|
||||
dst->debug = src->debug;
|
||||
dst->debug_mv = src->debug_mv;
|
||||
|
@@ -1238,6 +1238,11 @@ static void qdm2_decode_super_block (QDM2Context *q)
|
||||
for (i = 0; packet_bytes > 0; i++) {
|
||||
int j;
|
||||
|
||||
if (i>=FF_ARRAY_ELEMS(q->sub_packet_list_A)) {
|
||||
SAMPLES_NEEDED_2("too many packet bytes");
|
||||
return;
|
||||
}
|
||||
|
||||
q->sub_packet_list_A[i].next = NULL;
|
||||
|
||||
if (i > 0) {
|
||||
@@ -1879,6 +1884,10 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
|
||||
av_log(avctx, AV_LOG_ERROR, "Unknown FFT order (%d), contact the developers!\n", s->fft_order);
|
||||
return -1;
|
||||
}
|
||||
if (s->fft_size != (1 << (s->fft_order - 1))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "FFT size %d not power of 2.\n", s->fft_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ff_rdft_init(&s->rdft_ctx, s->fft_order, IDFT_C2R);
|
||||
ff_mpadsp_init(&s->mpadsp);
|
||||
|
@@ -37,7 +37,7 @@
|
||||
#include "libavcodec/qdm2_tables.h"
|
||||
#else
|
||||
static uint16_t softclip_table[HARDCLIP_THRESHOLD - SOFTCLIP_THRESHOLD + 1];
|
||||
static float noise_table[4096];
|
||||
static float noise_table[4096 + 20];
|
||||
static uint8_t random_dequant_index[256][5];
|
||||
static uint8_t random_dequant_type24[128][3];
|
||||
static float noise_samples[128];
|
||||
|
@@ -260,6 +260,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
AVFrame * ref= (AVFrame*)&a->ref;
|
||||
uint8_t* outdata;
|
||||
int delta;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if(ref->data[0])
|
||||
avctx->release_buffer(avctx, ref);
|
||||
@@ -279,11 +280,11 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
/* make the palette available on the way out */
|
||||
memcpy(a->pic.data[1], a->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (a->avctx->palctrl->palette_changed) {
|
||||
if (pal) {
|
||||
a->pic.palette_has_changed = 1;
|
||||
a->avctx->palctrl->palette_changed = 0;
|
||||
memcpy(a->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
memcpy(a->pic.data[1], a->pal, AVPALETTE_SIZE);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = a->pic;
|
||||
@@ -294,10 +295,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
static av_cold int decode_init(AVCodecContext *avctx){
|
||||
QpegContext * const a = avctx->priv_data;
|
||||
|
||||
if (!avctx->palctrl) {
|
||||
av_log(avctx, AV_LOG_FATAL, "Missing required palette via palctrl\n");
|
||||
return -1;
|
||||
}
|
||||
avcodec_get_frame_defaults(&a->pic);
|
||||
avcodec_get_frame_defaults(&a->ref);
|
||||
a->avctx = avctx;
|
||||
|
@@ -46,6 +46,7 @@ typedef struct QtrleContext {
|
||||
const unsigned char *buf;
|
||||
int size;
|
||||
|
||||
uint32_t pal[256];
|
||||
} QtrleContext;
|
||||
|
||||
#define CHECK_STREAM_PTR(n) \
|
||||
@@ -519,12 +520,15 @@ static int qtrle_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
if(has_palette) {
|
||||
/* make the palette available on the way out */
|
||||
memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (s->avctx->palctrl->palette_changed) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
s->avctx->palctrl->palette_changed = 0;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
/* make the palette available on the way out */
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
done:
|
||||
|
@@ -184,9 +184,13 @@ static int raw_decode(AVCodecContext *avctx,
|
||||
(av_pix_fmt_descriptors[avctx->pix_fmt].flags & PIX_FMT_PAL))){
|
||||
frame->data[1]= context->palette;
|
||||
}
|
||||
if (avctx->palctrl && avctx->palctrl->palette_changed) {
|
||||
memcpy(frame->data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
avctx->palctrl->palette_changed = 0;
|
||||
if (avctx->pix_fmt == PIX_FMT_PAL8) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
if (pal) {
|
||||
memcpy(frame->data[1], pal, AVPALETTE_SIZE);
|
||||
frame->palette_has_changed = 1;
|
||||
}
|
||||
}
|
||||
if(avctx->pix_fmt==PIX_FMT_BGR24 && ((frame->linesize[0]+3)&~3)*avctx->height <= buf_size)
|
||||
frame->linesize[0] = (frame->linesize[0]+3)&~3;
|
||||
|
@@ -275,17 +275,6 @@ ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
|
||||
return s;
|
||||
}
|
||||
|
||||
#if FF_API_AUDIO_OLD
|
||||
ReSampleContext *audio_resample_init(int output_channels, int input_channels,
|
||||
int output_rate, int input_rate)
|
||||
{
|
||||
return av_audio_resample_init(output_channels, input_channels,
|
||||
output_rate, input_rate,
|
||||
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16,
|
||||
TAPS, 10, 0, 0.8);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* resample audio. 'nb_samples' is the number of input samples */
|
||||
/* XXX: optimize it ! */
|
||||
int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples)
|
||||
|
@@ -157,6 +157,12 @@ static av_cold int roq_decode_init(AVCodecContext *avctx)
|
||||
RoqContext *s = avctx->priv_data;
|
||||
|
||||
s->avctx = avctx;
|
||||
|
||||
if (avctx->width%16 || avctx->height%16) {
|
||||
av_log_ask_for_sample(avctx, "dimensions not being a multiple of 16 are unsupported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
s->width = avctx->width;
|
||||
s->height = avctx->height;
|
||||
avcodec_get_frame_defaults(&s->frames[0]);
|
||||
|
@@ -83,7 +83,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
unsigned short *pixels = (unsigned short *)s->frame.data[0];
|
||||
|
||||
int row_ptr = 0;
|
||||
int pixel_ptr = 0;
|
||||
int pixel_ptr = -4;
|
||||
int block_ptr;
|
||||
int pixel_x, pixel_y;
|
||||
int total_blocks;
|
||||
@@ -139,6 +139,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
colorA = AV_RB16 (&s->buf[stream_ptr]);
|
||||
stream_ptr += 2;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK()
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -147,7 +148,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -186,6 +186,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
if (s->size - stream_ptr < n_blocks * 4)
|
||||
return;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
index = s->buf[stream_ptr++];
|
||||
@@ -196,7 +197,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -204,6 +204,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
case 0x00:
|
||||
if (s->size - stream_ptr < 16)
|
||||
return;
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -217,7 +218,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
break;
|
||||
|
||||
/* Unknown opcode */
|
||||
|
@@ -25,6 +25,9 @@
|
||||
#include <stdint.h>
|
||||
#include "dsputil.h"
|
||||
|
||||
#define RTJPEG_FILE_VERSION 0
|
||||
#define RTJPEG_HEADER_SIZE 12
|
||||
|
||||
typedef struct {
|
||||
int w, h;
|
||||
DSPContext *dsp;
|
||||
|
@@ -362,6 +362,11 @@ static int rv20_decode_picture_header(MpegEncContext *s)
|
||||
f= get_bits(&s->gb, av_log2(v)+1);
|
||||
|
||||
if(f){
|
||||
if (s->avctx->extradata_size < 8 + 2 * f) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Extradata too small.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f];
|
||||
new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f];
|
||||
}else{
|
||||
@@ -498,7 +503,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
h263_decode_init_vlc(s);
|
||||
ff_h263_decode_init_vlc(s);
|
||||
|
||||
/* init rv vlc */
|
||||
if (!done) {
|
||||
|
@@ -1280,6 +1280,14 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
|
||||
if(s->width != r->si.width || s->height != r->si.height){
|
||||
|
||||
if (HAVE_THREADS &&
|
||||
(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log_missing_feature(s->avctx, "Width/height changing with "
|
||||
"frame threading is", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Changing dimensions to %dx%d\n", r->si.width,r->si.height);
|
||||
MPV_common_end(s);
|
||||
s->width = r->si.width;
|
||||
@@ -1455,27 +1463,24 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
if(get_slice_offset(avctx, slices_hdr, 0) < 0 ||
|
||||
get_slice_offset(avctx, slices_hdr, 0) > buf_size){
|
||||
av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, 0), (buf_size-get_slice_offset(avctx, slices_hdr, 0))*8);
|
||||
if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
|
||||
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) && si.type == AV_PICTURE_TYPE_B)
|
||||
return -1;
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip b frames if we are in a hurry */
|
||||
if(avctx->hurry_up && si.type==FF_B_TYPE) return buf_size;
|
||||
#endif
|
||||
if ((!s->last_picture_ptr || !s->last_picture_ptr->data[0]) &&
|
||||
si.type == AV_PICTURE_TYPE_B) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
|
||||
"reference data.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL) return avpkt->size;
|
||||
#if FF_API_HURRY_UP
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5)
|
||||
return buf_size;
|
||||
#endif
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return avpkt->size;
|
||||
|
||||
for(i=0; i<slice_count; i++){
|
||||
int offset= get_slice_offset(avctx, slices_hdr, i);
|
||||
|
@@ -123,7 +123,7 @@ static const uint8_t rv34_quant_to_vlc_set[2][31] = {
|
||||
|
||||
/**
|
||||
* table for obtaining the quantizer difference
|
||||
* @todo Use with modified_quant_tab from h263data.h.
|
||||
* @todo Use with ff_modified_quant_tab from h263data.h.
|
||||
*/
|
||||
static const uint8_t rv34_dquant_tab[2][32]={
|
||||
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
|
@@ -78,7 +78,7 @@ typedef struct ShortenContext {
|
||||
GetBitContext gb;
|
||||
|
||||
int min_framesize, max_framesize;
|
||||
int channels;
|
||||
unsigned channels;
|
||||
|
||||
int32_t *decoded[MAX_CHANNELS];
|
||||
int32_t *decoded_base[MAX_CHANNELS];
|
||||
@@ -119,11 +119,11 @@ static int allocate_buffers(ShortenContext *s)
|
||||
for (chan=0; chan<s->channels; chan++) {
|
||||
if(FFMAX(1, s->nmean) >= UINT_MAX/sizeof(int32_t)){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "nmean too large\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if(s->blocksize + s->nwrap >= UINT_MAX/sizeof(int32_t) || s->blocksize + s->nwrap <= (unsigned)s->nwrap){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "s->blocksize + s->nwrap too large\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
tmp_ptr = av_realloc(s->offset[chan], sizeof(int32_t)*FFMAX(1, s->nmean));
|
||||
@@ -209,14 +209,14 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
init_get_bits(&hb, header, header_size*8);
|
||||
if (get_le32(&hb) != MKTAG('R','I','F','F')) {
|
||||
av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
skip_bits_long(&hb, 32); /* chunk_size */
|
||||
|
||||
if (get_le32(&hb) != MKTAG('W','A','V','E')) {
|
||||
av_log(avctx, AV_LOG_ERROR, "missing WAVE tag\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
while (get_le32(&hb) != MKTAG('f','m','t',' ')) {
|
||||
@@ -227,7 +227,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
|
||||
if (len < 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "fmt chunk was too short\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
wave_format = get_le16(&hb);
|
||||
@@ -237,7 +237,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported wave format\n");
|
||||
return -1;
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
avctx->channels = get_le16(&hb);
|
||||
@@ -248,7 +248,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header
|
||||
|
||||
if (avctx->bits_per_coded_sample != 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported number of bits per sample\n");
|
||||
return -1;
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
len -= 16;
|
||||
@@ -342,8 +342,13 @@ static int shorten_decode_frame(AVCodecContext *avctx,
|
||||
s->internal_ftype = get_uint(s, TYPESIZE);
|
||||
|
||||
s->channels = get_uint(s, CHANSIZE);
|
||||
if (s->channels > MAX_CHANNELS) {
|
||||
if (!s->channels) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "No channels reported\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->channels <= 0 || s->channels > MAX_CHANNELS) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "too many channels: %d\n", s->channels);
|
||||
s->channels = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -506,7 +511,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
|
||||
s->bitshift = get_ur_golomb_shorten(&s->gb, BITSHIFTSIZE);
|
||||
break;
|
||||
case FN_BLOCKSIZE: {
|
||||
int blocksize = get_uint(s, av_log2(s->blocksize));
|
||||
unsigned blocksize = get_uint(s, av_log2(s->blocksize));
|
||||
if (blocksize > s->blocksize) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Increasing block size is not supported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
@@ -534,7 +539,7 @@ frame_done:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", i - buf_size);
|
||||
s->bitstream_size=0;
|
||||
s->bitstream_index=0;
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->bitstream_size) {
|
||||
s->bitstream_index += i;
|
||||
|
@@ -645,7 +645,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
}
|
||||
if(bits) { //decode 16-bit data
|
||||
for(i = stereo; i >= 0; i--)
|
||||
pred[i] = av_bswap16(get_bits(&gb, 16));
|
||||
pred[i] = sign_extend(av_bswap16(get_bits(&gb, 16)), 16);
|
||||
for(i = 0; i <= stereo; i++)
|
||||
*samples++ = pred[i];
|
||||
for(; i < unp_size / 2; i++) {
|
||||
|
@@ -54,6 +54,7 @@ typedef struct SmcContext {
|
||||
unsigned char color_quads[COLORS_PER_TABLE * CQUAD];
|
||||
unsigned char color_octets[COLORS_PER_TABLE * COCTET];
|
||||
|
||||
uint32_t pal[256];
|
||||
} SmcContext;
|
||||
|
||||
#define GET_BLOCK_COUNT() \
|
||||
@@ -110,11 +111,7 @@ static void smc_decode_stream(SmcContext *s)
|
||||
int color_octet_index = 0;
|
||||
|
||||
/* make the palette available */
|
||||
memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
|
||||
if (s->avctx->palctrl->palette_changed) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
s->avctx->palctrl->palette_changed = 0;
|
||||
}
|
||||
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
|
||||
|
||||
chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF;
|
||||
stream_ptr += 4;
|
||||
@@ -441,6 +438,7 @@ static int smc_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
SmcContext *s = avctx->priv_data;
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
|
||||
|
||||
s->buf = buf;
|
||||
s->size = buf_size;
|
||||
@@ -453,6 +451,11 @@ static int smc_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pal) {
|
||||
s->frame.palette_has_changed = 1;
|
||||
memcpy(s->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
smc_decode_stream(s);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
|
@@ -2299,7 +2299,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
|
||||
s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
|
||||
s->m.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t));
|
||||
h263_encode_init(&s->m); //mv_penalty
|
||||
ff_h263_encode_init(&s->m); //mv_penalty
|
||||
|
||||
s->max_ref_frames = FFMAX(FFMIN(avctx->refs, MAX_REF_FRAMES), 1);
|
||||
|
||||
|
@@ -43,7 +43,7 @@
|
||||
#undef NDEBUG
|
||||
#include <assert.h>
|
||||
|
||||
extern const uint8_t mvtab[33][2];
|
||||
extern const uint8_t ff_mvtab[33][2];
|
||||
|
||||
static VLC svq1_block_type;
|
||||
static VLC svq1_motion_component;
|
||||
@@ -664,9 +664,6 @@ static int svq1_decode_frame(AVCodecContext *avctx,
|
||||
//this should be removed after libavcodec can handle more flexible picture types & ordering
|
||||
if(s->pict_type==AV_PICTURE_TYPE_B && s->last_picture_ptr==NULL) return buf_size;
|
||||
|
||||
#if FF_API_HURRY_UP
|
||||
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return buf_size;
|
||||
#endif
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
@@ -771,8 +768,8 @@ static av_cold int svq1_decode_init(AVCodecContext *avctx)
|
||||
&ff_svq1_block_type_vlc[0][0], 2, 1, 6);
|
||||
|
||||
INIT_VLC_STATIC(&svq1_motion_component, 7, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 176);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 176);
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
static const uint8_t sizes[2][6] = {{14, 10, 14, 18, 16, 18}, {10, 10, 14, 14, 14, 16}};
|
||||
|
@@ -406,7 +406,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
|
||||
int mx, my, pred_x, pred_y, dxy;
|
||||
int16_t *motion_ptr;
|
||||
|
||||
motion_ptr= h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
|
||||
motion_ptr= ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
|
||||
if(s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER){
|
||||
for(i=0; i<6; i++)
|
||||
init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32);
|
||||
@@ -496,7 +496,7 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
|
||||
s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
|
||||
s->mb_type = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int16_t));
|
||||
s->dummy = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int32_t));
|
||||
h263_encode_init(&s->m); //mv_penalty
|
||||
ff_h263_encode_init(&s->m); //mv_penalty
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user