Compare commits
396 Commits
release/2.
...
n0.10.6
Author | SHA1 | Date | |
---|---|---|---|
![]() |
36982b3616 | ||
![]() |
5b5e61a0bf | ||
![]() |
7f1fb8d2a3 | ||
![]() |
75a11e950f | ||
![]() |
e6fa08f14e | ||
![]() |
400b23beab | ||
![]() |
cff9f07d39 | ||
![]() |
de4606a5b7 | ||
![]() |
93a0dd8358 | ||
![]() |
12801f969b | ||
![]() |
35b15a0da8 | ||
![]() |
fa73f547a0 | ||
![]() |
c09b4dde37 | ||
![]() |
db4903f4e4 | ||
![]() |
7a0e5a63d0 | ||
![]() |
3038e2041e | ||
![]() |
8b64036038 | ||
![]() |
f2d56c2eeb | ||
![]() |
46c1e5de58 | ||
![]() |
e6dfaf7bb8 | ||
![]() |
c9df500190 | ||
![]() |
d12bf6fc9e | ||
![]() |
8a525e4d18 | ||
![]() |
7450a0215a | ||
![]() |
ba10ea845f | ||
![]() |
0b9d464348 | ||
![]() |
c279e37e90 | ||
![]() |
cc88dacc1a | ||
![]() |
988910a277 | ||
![]() |
d6a55ab016 | ||
![]() |
36487066ee | ||
![]() |
fe8243d7a9 | ||
![]() |
6365b43295 | ||
![]() |
2c8ce46250 | ||
![]() |
a0f6c93f52 | ||
![]() |
ca8c814970 | ||
![]() |
8076d32f30 | ||
![]() |
0f3381ad5b | ||
![]() |
9822e3aa52 | ||
![]() |
0b923a2b72 | ||
![]() |
d792be5681 | ||
![]() |
443f1463c0 | ||
![]() |
be209bdabb | ||
![]() |
24025cc0b9 | ||
![]() |
5920d00d74 | ||
![]() |
79fb7bc667 | ||
![]() |
141d4ed6c0 | ||
![]() |
5acd1c6561 | ||
![]() |
a2d4d9f4fb | ||
![]() |
3c55bf1201 | ||
![]() |
dc5283dffc | ||
![]() |
c28e1c12ad | ||
![]() |
c5f9c272e9 | ||
![]() |
0f81057c12 | ||
![]() |
592ba67815 | ||
![]() |
15c2e8027f | ||
![]() |
be2dd2559f | ||
![]() |
2051adbfa0 | ||
![]() |
2bc1e4fcb9 | ||
![]() |
0582b8e3ea | ||
![]() |
6744eee1e5 | ||
![]() |
14bba214fa | ||
![]() |
1c8e2561b4 | ||
![]() |
5c413648c1 | ||
![]() |
3efe6becc7 | ||
![]() |
dc8371b2b1 | ||
![]() |
0815d9174c | ||
![]() |
332555f660 | ||
![]() |
c5ec190859 | ||
![]() |
b561618014 | ||
![]() |
e0daa15a96 | ||
![]() |
911c250aef | ||
![]() |
965302c9f3 | ||
![]() |
0c19855539 | ||
![]() |
d0267ecf76 | ||
![]() |
2281ac9ffd | ||
![]() |
12941dbe2c | ||
![]() |
9e575e54a0 | ||
![]() |
9a76b7375e | ||
![]() |
d7de11260b | ||
![]() |
31bc3fb563 | ||
![]() |
9aaaeba45c | ||
![]() |
e46cf805b1 | ||
![]() |
d4f3abca6a | ||
![]() |
e5f4e24942 | ||
![]() |
b1ad5a21da | ||
![]() |
90575bd7dd | ||
![]() |
f695bd6016 | ||
![]() |
8c0bbe5156 | ||
![]() |
6d1b91324c | ||
![]() |
a1b127515b | ||
![]() |
d9ffa2aca1 | ||
![]() |
7124fa5d36 | ||
![]() |
da0c457663 | ||
![]() |
7a7229b52d | ||
![]() |
25a1a5b1b3 | ||
![]() |
6704522ca9 | ||
![]() |
fdb7080781 | ||
![]() |
bed5847563 | ||
![]() |
e9ac06160f | ||
![]() |
02b7239462 | ||
![]() |
8efae4cbbf | ||
![]() |
50032a75d6 | ||
![]() |
eed53a38c9 | ||
![]() |
501e60dcf5 | ||
![]() |
d36c706b86 | ||
![]() |
fcb8bbf264 | ||
![]() |
38c5e8fec5 | ||
![]() |
1301942248 | ||
![]() |
e2c7b37fd2 | ||
![]() |
7f90fe1b4b | ||
![]() |
2cf6afffe5 | ||
![]() |
50e6e494c9 | ||
![]() |
0f54c97f58 | ||
![]() |
a1f678f7ca | ||
![]() |
94905d2af6 | ||
![]() |
b04fbd2cd2 | ||
![]() |
f7b045db09 | ||
![]() |
de1591b167 | ||
![]() |
c7b73724c7 | ||
![]() |
1846f3b5b1 | ||
![]() |
2fb4be9a99 | ||
![]() |
e1608014c5 | ||
![]() |
8c0c0e9eb3 | ||
![]() |
997e7692d8 | ||
![]() |
944b6a801e | ||
![]() |
ddd9483a10 | ||
![]() |
9c13d232a4 | ||
![]() |
c4926cba15 | ||
![]() |
321bbb6f49 | ||
![]() |
81476cf693 | ||
![]() |
3c69368e6b | ||
![]() |
fcf09ebff5 | ||
![]() |
d6c73986cc | ||
![]() |
aefa2bf70a | ||
![]() |
ece27b09d6 | ||
![]() |
479856a3b2 | ||
![]() |
fc0d962919 | ||
![]() |
0452ebfd4b | ||
![]() |
9e9e6bbe7b | ||
![]() |
3e4eea6c32 | ||
![]() |
cc0fec8393 | ||
![]() |
fa67ad85ac | ||
![]() |
0adc452146 | ||
![]() |
7df0e309fd | ||
![]() |
a4b329d622 | ||
![]() |
eefd6bbee9 | ||
![]() |
ce39a84a7d | ||
![]() |
514f3e7c02 | ||
![]() |
4dfea3e9f0 | ||
![]() |
f9ee7d13e8 | ||
![]() |
ec27262c4d | ||
![]() |
d34e9e61dd | ||
![]() |
c38d3e1a39 | ||
![]() |
5872580e65 | ||
![]() |
4713234518 | ||
![]() |
5836110018 | ||
![]() |
3fab87edc9 | ||
![]() |
b1f9ff45d4 | ||
![]() |
96acb0a4eb | ||
![]() |
df93682e64 | ||
![]() |
22285aba13 | ||
![]() |
097ad61100 | ||
![]() |
c785a7058a | ||
![]() |
6736de0ce6 | ||
![]() |
fe8508b948 | ||
![]() |
0d40fbaef0 | ||
![]() |
a4846943a3 | ||
![]() |
bf2534a5e2 | ||
![]() |
1ca4e70b6c | ||
![]() |
25a2802239 | ||
![]() |
581a830829 | ||
![]() |
43e5fda45c | ||
![]() |
a638e10ba0 | ||
![]() |
d5207e2af8 | ||
![]() |
9ea94c44b1 | ||
![]() |
aaa6a66677 | ||
![]() |
7240cc3f8b | ||
![]() |
7fe4c8cb76 | ||
![]() |
746f1594d7 | ||
![]() |
0e4bb0530f | ||
![]() |
994c0efcc7 | ||
![]() |
cf5e119d4a | ||
![]() |
1ee1e9e43f | ||
![]() |
15e9aee544 | ||
![]() |
e8050f313e | ||
![]() |
be424d86a8 | ||
![]() |
a08cb950b2 | ||
![]() |
46f8bbfc6d | ||
![]() |
562c6a7bf1 | ||
![]() |
e711ccee4d | ||
![]() |
d6372e80fe | ||
![]() |
29d91e9161 | ||
![]() |
583f57f04a | ||
![]() |
f8f6c14f54 | ||
![]() |
9e24f2a1f0 | ||
![]() |
e788c6e9cb | ||
![]() |
2e681cf50f | ||
![]() |
9ddd3abe78 | ||
![]() |
86bd0244ec | ||
![]() |
15de658c04 | ||
![]() |
19d3f7d8ac | ||
![]() |
c21b858b27 | ||
![]() |
0b9bb581fd | ||
![]() |
105601c151 | ||
![]() |
3a4949aa50 | ||
![]() |
ec554ee747 | ||
![]() |
bf3998d71e | ||
![]() |
87208b8fc4 | ||
![]() |
265a628f16 | ||
![]() |
a854d00acd | ||
![]() |
d076d0febd | ||
![]() |
a56eaa024f | ||
![]() |
fdc6f6507c | ||
![]() |
976d173606 | ||
![]() |
989431c02f | ||
![]() |
f9bdc93723 | ||
![]() |
e687d77d15 | ||
![]() |
abfafb6c81 | ||
![]() |
f139838d64 | ||
![]() |
0a224ab102 | ||
![]() |
d39b183d8d | ||
![]() |
dc8054128a | ||
![]() |
001f4c7dc6 | ||
![]() |
313ddbfe48 | ||
![]() |
7f5bd6c72b | ||
![]() |
0be85fd80f | ||
![]() |
9f253ebb41 | ||
![]() |
6242dae507 | ||
![]() |
1749b0d74d | ||
![]() |
568e9062bd | ||
![]() |
5dbc75870f | ||
![]() |
c91a14638e | ||
![]() |
c00c380724 | ||
![]() |
43625c5128 | ||
![]() |
5effcfa767 | ||
![]() |
1ee0cd1ad7 | ||
![]() |
b594732475 | ||
![]() |
ce15406e78 | ||
![]() |
c9e95636a8 | ||
![]() |
6e5c07f4c8 | ||
![]() |
c999a8ed65 | ||
![]() |
4d343a6f47 | ||
![]() |
a81a6d9c80 | ||
![]() |
48f0eeb2e5 | ||
![]() |
d26e47bf6c | ||
![]() |
568a474a08 | ||
![]() |
9a66cdbc16 | ||
![]() |
ddb1149e25 | ||
![]() |
f6778f58d4 | ||
![]() |
e4e4d92641 | ||
![]() |
de0ff4ce69 | ||
![]() |
6548cb2578 | ||
![]() |
f6257cf4b7 | ||
![]() |
a15adb18fa | ||
![]() |
666bd5848a | ||
![]() |
d94256d36c | ||
![]() |
7bb97a61df | ||
![]() |
c65eadee5d | ||
![]() |
a43f4bd601 | ||
![]() |
8f881885c2 | ||
![]() |
26521d87ba | ||
![]() |
e1a4143793 | ||
![]() |
b9482a6efd | ||
![]() |
88c3cc019c | ||
![]() |
9980e4df3b | ||
![]() |
d4f2786cda | ||
![]() |
2744fdbd9e | ||
![]() |
1fcc2c6091 | ||
![]() |
74871ac70a | ||
![]() |
9cb7f6e54a | ||
![]() |
ed6aaf579d | ||
![]() |
e1b4614ab4 | ||
![]() |
c3bf08d04c | ||
![]() |
12247a13e0 | ||
![]() |
7503861b42 | ||
![]() |
9def2f200e | ||
![]() |
7b676935ee | ||
![]() |
9550c63196 | ||
![]() |
4a15240a27 | ||
![]() |
a47b96bdd3 | ||
![]() |
fb049da952 | ||
![]() |
4a325ddeae | ||
![]() |
48ac765efe | ||
![]() |
522645e38f | ||
![]() |
e891ee4bf6 | ||
![]() |
ef673211e7 | ||
![]() |
eaeaeb265f | ||
![]() |
db315c796d | ||
![]() |
035dd77cbb | ||
![]() |
e3743869e9 | ||
![]() |
ce14f00dea | ||
![]() |
627f4621f5 | ||
![]() |
3e8434bcea | ||
![]() |
efd30c4d95 | ||
![]() |
d7fddc97d4 | ||
![]() |
feed0c6b6a | ||
![]() |
d0e53ecff7 | ||
![]() |
1ca84aa162 | ||
![]() |
d5f2382d03 | ||
![]() |
416849f2e0 | ||
![]() |
dd37038ac7 | ||
![]() |
e410dd1792 | ||
![]() |
ffdc41f039 | ||
![]() |
ca7e97bdcf | ||
![]() |
4ae138cb12 | ||
![]() |
003f7e3dd0 | ||
![]() |
85eb76a23f | ||
![]() |
5186984ee9 | ||
![]() |
b5331b979b | ||
![]() |
11f3173e1b | ||
![]() |
cd17195d1c | ||
![]() |
1128b10247 | ||
![]() |
6a073aa7a7 | ||
![]() |
073891e875 | ||
![]() |
2e341bc99a | ||
![]() |
b7c8fff803 | ||
![]() |
3f7e90cf0c | ||
![]() |
78d4f8cc56 | ||
![]() |
de2656ec25 | ||
![]() |
9686a2c2cf | ||
![]() |
b863979c0f | ||
![]() |
fecd7468fc | ||
![]() |
19da1a39e8 | ||
![]() |
7e88df99e1 | ||
![]() |
7f3f85544c | ||
![]() |
750f5baf30 | ||
![]() |
a63f3f714c | ||
![]() |
1dd1ee00d5 | ||
![]() |
4493af756b | ||
![]() |
e904e9b720 | ||
![]() |
5f896773e0 | ||
![]() |
b2dcac7141 | ||
![]() |
40ccc81146 | ||
![]() |
1c63d61372 | ||
![]() |
2ad77c60ef | ||
![]() |
a1556d37b8 | ||
![]() |
083a8a0037 | ||
![]() |
71a939fee4 | ||
![]() |
9dbd437da2 | ||
![]() |
2510e1476e | ||
![]() |
0f839cff6b | ||
![]() |
abe3572878 | ||
![]() |
0d30e2c6f2 | ||
![]() |
a0473085f3 | ||
![]() |
e537dc230b | ||
![]() |
19f4943d12 | ||
![]() |
bf6d1a1ca7 | ||
![]() |
424b6edd19 | ||
![]() |
4f48417fe7 | ||
![]() |
8e3dc37bc0 | ||
![]() |
0312969b9e | ||
![]() |
62beae313a | ||
![]() |
8011a29fa8 | ||
![]() |
fe710f2074 | ||
![]() |
bba43a1ea0 | ||
![]() |
f947e965be | ||
![]() |
5c365dc979 | ||
![]() |
95a9d44dc3 | ||
![]() |
27558bd87e | ||
![]() |
5ab9294a8d | ||
![]() |
cfd7d166e2 | ||
![]() |
5bcd47cf63 | ||
![]() |
0c60d5c59f | ||
![]() |
cd9bdc6395 | ||
![]() |
b68470707b | ||
![]() |
7046ae5593 | ||
![]() |
d19e3e19d6 | ||
![]() |
04597e2595 | ||
![]() |
d16653c3d4 | ||
![]() |
183e0eb5b9 | ||
![]() |
be0b3137d0 | ||
![]() |
683213230e | ||
![]() |
ad0ee682b3 | ||
![]() |
ba418ad400 | ||
![]() |
6dcbbdc011 | ||
![]() |
e43bd4fa58 | ||
![]() |
25b4ed053f | ||
![]() |
e1f2a6a32b | ||
![]() |
6fc3287b9c | ||
![]() |
f43b6e2b1e | ||
![]() |
697a45d861 | ||
![]() |
4c7879775e | ||
![]() |
a2c8db1b79 | ||
![]() |
fc89f15497 | ||
![]() |
e364f50718 | ||
![]() |
571a4cf273 | ||
![]() |
bafd38a352 | ||
![]() |
350d06d63f | ||
![]() |
9f82cbf7c1 | ||
![]() |
dcde8e1c90 | ||
![]() |
569cb94869 | ||
![]() |
0df7d7482c | ||
![]() |
b2f27d2926 | ||
![]() |
7e16636995 | ||
![]() |
83d78fece0 |
79
Changelog
79
Changelog
@@ -3,6 +3,85 @@ releases are sorted from youngest to oldest.
|
||||
|
||||
version next:
|
||||
|
||||
version 0.10.6:
|
||||
|
||||
- many bug fixes that where found with Coverity
|
||||
|
||||
- The following CVE fixes where backported:
|
||||
CVE-2012-2796, CVE-2012-2775, CVE-2012-2772, CVE-2012-2776,
|
||||
CVE-2012-2779, CVE-2012-2787, CVE-2012-2794, CVE-2012-2800,
|
||||
CVE-2012-2802, CVE-2012-2801, CVE-2012-2786, CVE-2012-2798,
|
||||
CVE-2012-2793, CVE-2012-2789, CVE-2012-2788, CVE-2012-2790,
|
||||
CVE-2012-2777, CVE-2012-2784
|
||||
|
||||
- hundreads of other bug fixes, some possibly security relevant,
|
||||
see the git log for details.
|
||||
|
||||
|
||||
version 0.10.5:
|
||||
|
||||
- Several bugs and crashes have been fixed as well as build problems
|
||||
with recent mingw64
|
||||
|
||||
|
||||
version 0.10.4:
|
||||
|
||||
- Several bugs and crashes have been fixed
|
||||
Note, CVE-2012-0851 and CVE-2011-3937 have been fixed in previous releases
|
||||
|
||||
version 0.10.3:
|
||||
|
||||
- Security fixes in the 4xm demuxer, avi demuxer, cook decoder,
|
||||
mm demuxer, mpegvideo decoder, vqavideo decoder (CVE-2012-0947) and
|
||||
xmv demuxer.
|
||||
|
||||
- Several bugs and crashes have been fixed in the following codecs: AAC,
|
||||
APE, H.263, H.264, Indeo 4, Mimic, MJPEG, Motion Pixels Video, RAW,
|
||||
TTA, VC1, VQA, WMA Voice, vqavideo.
|
||||
|
||||
- Several bugs and crashes have been fixed in the following formats:
|
||||
ASF, ID3v2, MOV, xWMA
|
||||
|
||||
- This release additionally updates the following codecs to the
|
||||
bytestream2 API, and therefore benefit from additional overflow
|
||||
checks: truemotion2, utvideo, vqavideo
|
||||
|
||||
|
||||
version 0.10.1
|
||||
- Several security fixes, many bugfixes affecting many formats and
|
||||
codecs, the list below is not complete.
|
||||
|
||||
- swapuv filter
|
||||
|
||||
- Several bugs and crashes have been fixed in the following codecs: AAC,
|
||||
AC-3, ADPCM, AMR (both NB and WB), ATRAC3, CAVC, Cook, camstudio, DCA,
|
||||
DPCM, DSI CIN, DV, EA TGQ, FLAC, fraps, G.722 (both encoder and
|
||||
decoder), H.264, huvffyuv, BB JV decoder, Indeo 3, KGV1, LCL, the
|
||||
libx264 wrapper, MJPEG, mp3on4, Musepack, MPEG1/2, PNG, QDM2, Qt RLE,
|
||||
ROQ, RV10, RV30/RV34/RV40, shorten, smacker, subrip, SVQ3, TIFF,
|
||||
Truemotion2, TTA, VC1, VMware Screen codec, Vorbis, VP5, VP6, WMA,
|
||||
Westwood SNDx, XXAN.
|
||||
|
||||
- This release additionally updates the following codecs to the
|
||||
bytestream2 API, and therefore benefit from additional overflow
|
||||
checks: XXAN, ALG MM, TQG, SMC, Qt SMC, ROQ, PNG
|
||||
|
||||
- Several bugs and crashes have been fixed in the following formats:
|
||||
AIFF, ASF, DV, Matroska, NSV, MOV, MPEG-TS, Smacker, Sony OpenMG, RM,
|
||||
SWF.
|
||||
|
||||
- Libswscale has an potential overflow for large image size fixed.
|
||||
|
||||
- The following APIs have been added:
|
||||
|
||||
avcodec_is_open()
|
||||
avformat_get_riff_video_tags()
|
||||
avformat_get_riff_audio_tags()
|
||||
|
||||
Please see the file doc/APIchanges and the Doxygen documentation for
|
||||
further information.
|
||||
|
||||
|
||||
version 0.10:
|
||||
- Fixes: CVE-2011-3929, CVE-2011-3934, CVE-2011-3935, CVE-2011-3936,
|
||||
CVE-2011-3937, CVE-2011-3940, CVE-2011-3941, CVE-2011-3944,
|
||||
|
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER =
|
||||
PROJECT_NUMBER = 0.10.6
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -806,7 +806,7 @@ int opt_codecs(const char *opt, const char *arg)
|
||||
if (p2 && strcmp(p->name, p2->name) == 0) {
|
||||
if (p->decode)
|
||||
decode = 1;
|
||||
if (p->encode)
|
||||
if (p->encode || p->encode2)
|
||||
encode = 1;
|
||||
cap |= p->capabilities;
|
||||
}
|
||||
|
2
configure
vendored
2
configure
vendored
@@ -1168,6 +1168,7 @@ HAVE_LIST="
|
||||
dlfcn_h
|
||||
dlopen
|
||||
dos_paths
|
||||
dxva_h
|
||||
ebp_available
|
||||
ebx_available
|
||||
exp2
|
||||
@@ -3047,6 +3048,7 @@ check_func_headers windows.h MapViewOfFile
|
||||
check_func_headers windows.h VirtualAlloc
|
||||
|
||||
check_header dlfcn.h
|
||||
check_header dxva.h
|
||||
check_header dxva2api.h -D_WIN32_WINNT=0x0600
|
||||
check_header libcrystalhd/libcrystalhd_if.h
|
||||
check_header malloc.h
|
||||
|
@@ -22,6 +22,19 @@ API changes, most recent first:
|
||||
muxers supporting it (av_write_frame makes sure it is called
|
||||
only for muxers with this flag).
|
||||
|
||||
2012-03-04 - xxxxxxx - lavu 51.22.1 - error.h
|
||||
Add AVERROR_UNKNOWN
|
||||
|
||||
2012-02-29 - xxxxxxx - lavf 53.21.0
|
||||
Add avformat_get_riff_video_tags() and avformat_get_riff_audio_tags().
|
||||
|
||||
2012-02-29 - xxxxxxx - lavu 51.22.0 - intfloat.h
|
||||
Add a new installed header libavutil/intfloat.h with int/float punning
|
||||
functions.
|
||||
|
||||
2012-02-17 - xxxxxxx - lavc 53.35.0
|
||||
Add avcodec_is_open() function.
|
||||
|
||||
2012-01-15 - lavc 53.34.0
|
||||
New audio encoding API:
|
||||
b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -407,6 +407,10 @@ prefix is ``ffmpeg2pass''. The complete file name will be
|
||||
@file{PREFIX-N.log}, where N is a number specific to the output
|
||||
stream
|
||||
|
||||
Note that this option is overwritten by a local option of the same name
|
||||
when using @code{-vcodec libx264}. That option maps to the x264 option stats
|
||||
which has a different syntax.
|
||||
|
||||
@item -vlang @var{code}
|
||||
Set the ISO 639 language code (3 letters) of the current video stream.
|
||||
|
||||
|
@@ -2549,6 +2549,9 @@ For example:
|
||||
will create two separate outputs from the same input, one cropped and
|
||||
one padded.
|
||||
|
||||
@section swapuv
|
||||
Swap U & V plane.
|
||||
|
||||
@section thumbnail
|
||||
Select the most representative frame in a given sequence of consecutive frames.
|
||||
|
||||
|
41
ffmpeg.c
41
ffmpeg.c
@@ -505,7 +505,7 @@ static int alloc_buffer(AVCodecContext *s, InputStream *ist, FrameBuffer **pbuf)
|
||||
const int v_shift = i==0 ? 0 : v_chroma_shift;
|
||||
if (s->flags & CODEC_FLAG_EMU_EDGE)
|
||||
buf->data[i] = buf->base[i];
|
||||
else
|
||||
else if (buf->base[i])
|
||||
buf->data[i] = buf->base[i] +
|
||||
FFALIGN((buf->linesize[i]*edge >> v_shift) +
|
||||
(pixel_size*edge >> h_shift), 32);
|
||||
@@ -2626,32 +2626,35 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
|
||||
break;
|
||||
}
|
||||
/* two pass mode */
|
||||
if (codec->codec_id != CODEC_ID_H264 &&
|
||||
(codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
|
||||
if (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
|
||||
char logfilename[1024];
|
||||
FILE *f;
|
||||
|
||||
snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
|
||||
pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
|
||||
i);
|
||||
if (codec->flags & CODEC_FLAG_PASS2) {
|
||||
char *logbuffer;
|
||||
size_t logbuffer_size;
|
||||
if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
|
||||
logfilename);
|
||||
exit_program(1);
|
||||
if (!strcmp(ost->enc->name, "libx264")) {
|
||||
av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
|
||||
} else {
|
||||
if (codec->flags & CODEC_FLAG_PASS2) {
|
||||
char *logbuffer;
|
||||
size_t logbuffer_size;
|
||||
if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
|
||||
logfilename);
|
||||
exit_program(1);
|
||||
}
|
||||
codec->stats_in = logbuffer;
|
||||
}
|
||||
codec->stats_in = logbuffer;
|
||||
}
|
||||
if (codec->flags & CODEC_FLAG_PASS1) {
|
||||
f = fopen(logfilename, "wb");
|
||||
if (!f) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
|
||||
logfilename, strerror(errno));
|
||||
exit_program(1);
|
||||
if (codec->flags & CODEC_FLAG_PASS1) {
|
||||
f = fopen(logfilename, "wb");
|
||||
if (!f) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
|
||||
logfilename, strerror(errno));
|
||||
exit_program(1);
|
||||
}
|
||||
ost->logfile = f;
|
||||
}
|
||||
ost->logfile = f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1790,6 +1790,10 @@ int main(int argc, char **argv)
|
||||
|
||||
if (!print_format)
|
||||
print_format = av_strdup("default");
|
||||
if (!print_format) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
w_name = av_strtok(print_format, "=", &buf);
|
||||
w_args = buf;
|
||||
|
||||
|
@@ -3457,6 +3457,9 @@ static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int cop
|
||||
{
|
||||
AVStream *fst;
|
||||
|
||||
if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
|
||||
return NULL;
|
||||
|
||||
fst = av_mallocz(sizeof(AVStream));
|
||||
if (!fst)
|
||||
return NULL;
|
||||
@@ -3802,6 +3805,9 @@ static void add_codec(FFStream *stream, AVCodecContext *av)
|
||||
{
|
||||
AVStream *st;
|
||||
|
||||
if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
|
||||
return NULL;
|
||||
|
||||
/* compute default parameters */
|
||||
switch(av->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
|
@@ -915,7 +915,7 @@ static av_cold int decode_end(AVCodecContext *avctx){
|
||||
av_freep(&f->cfrm[i].data);
|
||||
f->cfrm[i].allocated_size= 0;
|
||||
}
|
||||
free_vlc(&f->pre_vlc);
|
||||
ff_free_vlc(&f->pre_vlc);
|
||||
if(f->current_picture.data[0])
|
||||
avctx->release_buffer(avctx, &f->current_picture);
|
||||
if(f->last_picture.data[0])
|
||||
|
@@ -47,7 +47,7 @@ typedef struct EightSvxContext {
|
||||
/* buffer used to store the whole audio decoded/interleaved chunk,
|
||||
* which is sent with the first packet */
|
||||
uint8_t *samples;
|
||||
size_t samples_size;
|
||||
int64_t samples_size;
|
||||
int samples_idx;
|
||||
} EightSvxContext;
|
||||
|
||||
|
@@ -594,7 +594,7 @@ OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \
|
||||
flacdec.o flacdata.o flac.o \
|
||||
mpegaudiodata.o vorbis_data.o
|
||||
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o timecode.o
|
||||
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o timecode.o
|
||||
OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
|
@@ -826,19 +826,20 @@ static int decode_band_types(AACContext *ac, enum BandType band_type[120],
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
|
||||
return -1;
|
||||
}
|
||||
while ((sect_len_incr = get_bits(gb, bits)) == (1 << bits) - 1 && get_bits_left(gb) >= bits)
|
||||
do {
|
||||
sect_len_incr = get_bits(gb, bits);
|
||||
sect_end += sect_len_incr;
|
||||
sect_end += sect_len_incr;
|
||||
if (get_bits_left(gb) < 0 || sect_len_incr == (1 << bits) - 1) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, overread_err);
|
||||
return -1;
|
||||
}
|
||||
if (sect_end > ics->max_sfb) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Number of bands (%d) exceeds limit (%d).\n",
|
||||
sect_end, ics->max_sfb);
|
||||
return -1;
|
||||
}
|
||||
if (get_bits_left(gb) < 0) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, overread_err);
|
||||
return -1;
|
||||
}
|
||||
if (sect_end > ics->max_sfb) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Number of bands (%d) exceeds limit (%d).\n",
|
||||
sect_end, ics->max_sfb);
|
||||
return -1;
|
||||
}
|
||||
} while (sect_len_incr == (1 << bits) - 1);
|
||||
for (; k < sect_end; k++) {
|
||||
band_type [idx] = sect_band_type;
|
||||
band_type_run_end[idx++] = sect_end;
|
||||
|
@@ -200,8 +200,8 @@ WINDOW_FUNC(long_start)
|
||||
float *out = sce->ret;
|
||||
|
||||
dsp->vector_fmul(out, audio, lwindow, 1024);
|
||||
memcpy(out + 1024, audio, sizeof(out[0]) * 448);
|
||||
dsp->vector_fmul_reverse(out + 1024 + 448, audio, swindow, 128);
|
||||
memcpy(out + 1024, audio + 1024, sizeof(out[0]) * 448);
|
||||
dsp->vector_fmul_reverse(out + 1024 + 448, audio + 1024 + 448, swindow, 128);
|
||||
memset(out + 1024 + 576, 0, sizeof(out[0]) * 448);
|
||||
}
|
||||
|
||||
@@ -487,10 +487,10 @@ static void deinterleave_input_samples(AACEncContext *s,
|
||||
const float *sptr = samples + channel_map[ch];
|
||||
|
||||
/* copy last 1024 samples of previous frame to the start of the current frame */
|
||||
memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][1024], 1024 * sizeof(s->planar_samples[0][0]));
|
||||
memcpy(&s->planar_samples[ch][1024], &s->planar_samples[ch][2048], 1024 * sizeof(s->planar_samples[0][0]));
|
||||
|
||||
/* deinterleave */
|
||||
for (i = 1024; i < 1024 * 2; i++) {
|
||||
for (i = 2048; i < 3072; i++) {
|
||||
s->planar_samples[ch][i] = *sptr;
|
||||
sptr += sinc;
|
||||
}
|
||||
|
@@ -275,6 +275,10 @@ int ff_ps_read_data(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps
|
||||
err:
|
||||
ps->start = 0;
|
||||
skip_bits_long(gb_host, bits_left);
|
||||
memset(ps->iid_par, 0, sizeof(ps->iid_par));
|
||||
memset(ps->icc_par, 0, sizeof(ps->icc_par));
|
||||
memset(ps->ipd_par, 0, sizeof(ps->ipd_par));
|
||||
memset(ps->opd_par, 0, sizeof(ps->opd_par));
|
||||
return bits_left;
|
||||
}
|
||||
|
||||
|
@@ -542,7 +542,7 @@ static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
||||
k = sbr->n_master;
|
||||
} while (sb != sbr->kx[1] + sbr->m[1]);
|
||||
|
||||
if (sbr->patch_num_subbands[sbr->num_patches-1] < 3 && sbr->num_patches > 1)
|
||||
if (sbr->num_patches > 1 && sbr->patch_num_subbands[sbr->num_patches-1] < 3)
|
||||
sbr->num_patches--;
|
||||
|
||||
return 0;
|
||||
|
@@ -134,7 +134,7 @@ int avpriv_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr)
|
||||
(hdr->num_blocks * 256.0));
|
||||
hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on;
|
||||
}
|
||||
hdr->channel_layout = ff_ac3_channel_layout_tab[hdr->channel_mode];
|
||||
hdr->channel_layout = avpriv_ac3_channel_layout_tab[hdr->channel_mode];
|
||||
if (hdr->lfe_on)
|
||||
hdr->channel_layout |= AV_CH_LOW_FREQUENCY;
|
||||
|
||||
|
@@ -1383,7 +1383,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
avctx->request_channels < s->channels) {
|
||||
s->out_channels = avctx->request_channels;
|
||||
s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
|
||||
s->channel_layout = ff_ac3_channel_layout_tab[s->output_mode];
|
||||
s->channel_layout = avpriv_ac3_channel_layout_tab[s->output_mode];
|
||||
}
|
||||
avctx->channels = s->out_channels;
|
||||
avctx->channel_layout = s->channel_layout;
|
||||
@@ -1408,6 +1408,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
avctx->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
|
||||
|
||||
/* get output buffer */
|
||||
avctx->channels = s->out_channels;
|
||||
s->frame.nb_samples = s->num_blocks * 256;
|
||||
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
|
@@ -109,7 +109,7 @@ static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd,
|
||||
int snr_offset, int floor,
|
||||
const uint8_t *bap_tab, uint8_t *bap)
|
||||
{
|
||||
int bin, band;
|
||||
int bin, band, band_end;
|
||||
|
||||
/* special case, if snr offset is -960, set all bap's to zero */
|
||||
if (snr_offset == -960) {
|
||||
@@ -121,12 +121,14 @@ static void ac3_bit_alloc_calc_bap_c(int16_t *mask, int16_t *psd,
|
||||
band = ff_ac3_bin_to_band_tab[start];
|
||||
do {
|
||||
int m = (FFMAX(mask[band] - snr_offset - floor, 0) & 0x1FE0) + floor;
|
||||
int band_end = FFMIN(ff_ac3_band_start_tab[band+1], end);
|
||||
band_end = ff_ac3_band_start_tab[++band];
|
||||
band_end = FFMIN(band_end, end);
|
||||
|
||||
for (; bin < band_end; bin++) {
|
||||
int address = av_clip((psd[bin] - m) >> 5, 0, 63);
|
||||
bap[bin] = bap_tab[address];
|
||||
}
|
||||
} while (end > ff_ac3_band_start_tab[band++]);
|
||||
} while (end > band_end);
|
||||
}
|
||||
|
||||
static void ac3_update_bap_counts_c(uint16_t mant_cnt[16], uint8_t *bap,
|
||||
|
@@ -84,7 +84,7 @@ const uint8_t ff_ac3_channels_tab[8] = {
|
||||
/**
|
||||
* Map audio coding mode (acmod) to channel layout mask.
|
||||
*/
|
||||
const uint16_t ff_ac3_channel_layout_tab[8] = {
|
||||
const uint16_t avpriv_ac3_channel_layout_tab[8] = {
|
||||
AV_CH_LAYOUT_STEREO,
|
||||
AV_CH_LAYOUT_MONO,
|
||||
AV_CH_LAYOUT_STEREO,
|
||||
|
@@ -33,7 +33,7 @@
|
||||
|
||||
extern const uint16_t ff_ac3_frame_size_tab[38][3];
|
||||
extern const uint8_t ff_ac3_channels_tab[8];
|
||||
extern const uint16_t ff_ac3_channel_layout_tab[8];
|
||||
extern const uint16_t avpriv_ac3_channel_layout_tab[8];
|
||||
extern const uint8_t ff_ac3_enc_channel_map[8][2][6];
|
||||
extern const uint8_t ff_ac3_dec_channel_map[8][2][6];
|
||||
extern const uint16_t ff_ac3_sample_rate_tab[3];
|
||||
|
@@ -265,8 +265,9 @@ static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned c
|
||||
return c->predictor;
|
||||
}
|
||||
|
||||
static void xa_decode(short *out, const unsigned char *in,
|
||||
ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
|
||||
static int xa_decode(AVCodecContext *avctx,
|
||||
short *out, const unsigned char *in,
|
||||
ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
|
||||
{
|
||||
int i, j;
|
||||
int shift,filter,f0,f1;
|
||||
@@ -277,6 +278,12 @@ static void xa_decode(short *out, const unsigned char *in,
|
||||
|
||||
shift = 12 - (in[4+i*2] & 15);
|
||||
filter = in[4+i*2] >> 4;
|
||||
if (filter > 4) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid XA-ADPCM filter %d (max. allowed is 4)\n",
|
||||
filter);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
f0 = xa_adpcm_table[filter][0];
|
||||
f1 = xa_adpcm_table[filter][1];
|
||||
|
||||
@@ -304,7 +311,12 @@ static void xa_decode(short *out, const unsigned char *in,
|
||||
|
||||
shift = 12 - (in[5+i*2] & 15);
|
||||
filter = in[5+i*2] >> 4;
|
||||
|
||||
if (filter > 4) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid XA-ADPCM filter %d (max. allowed is 4)\n",
|
||||
filter);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
f0 = xa_adpcm_table[filter][0];
|
||||
f1 = xa_adpcm_table[filter][1];
|
||||
|
||||
@@ -328,6 +340,8 @@ static void xa_decode(short *out, const unsigned char *in,
|
||||
left->sample2 = s_2;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -699,7 +713,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
cs = &c->status[channel];
|
||||
cs->predictor = (int16_t)bytestream_get_le16(&src);
|
||||
cs->step_index = *src++;
|
||||
cs->step_index = av_clip(*src++, 0, 88);
|
||||
src++;
|
||||
*samples++ = cs->predictor;
|
||||
}
|
||||
@@ -722,8 +736,8 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
c->status[0].predictor = (int16_t)AV_RL16(src + 10);
|
||||
c->status[1].predictor = (int16_t)AV_RL16(src + 12);
|
||||
c->status[0].step_index = src[14];
|
||||
c->status[1].step_index = src[15];
|
||||
c->status[0].step_index = av_clip(src[14], 0, 88);
|
||||
c->status[1].step_index = av_clip(src[15], 0, 88);
|
||||
/* sign extend the predictors */
|
||||
src += 16;
|
||||
diff_channel = c->status[1].predictor;
|
||||
@@ -763,7 +777,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
cs = &c->status[channel];
|
||||
cs->predictor = (int16_t)bytestream_get_le16(&src);
|
||||
cs->step_index = *src++;
|
||||
cs->step_index = av_clip(*src++, 0, 88);
|
||||
src++;
|
||||
}
|
||||
|
||||
@@ -815,8 +829,9 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
break;
|
||||
case CODEC_ID_ADPCM_XA:
|
||||
while (buf_size >= 128) {
|
||||
xa_decode(samples, src, &c->status[0], &c->status[1],
|
||||
avctx->channels);
|
||||
if ((ret = xa_decode(avctx, samples, src, &c->status[0],
|
||||
&c->status[1], avctx->channels)) < 0)
|
||||
return ret;
|
||||
src += 128;
|
||||
samples += 28 * 8;
|
||||
buf_size -= 128;
|
||||
@@ -826,7 +841,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
src += 4; // skip sample count (already read)
|
||||
|
||||
for (i=0; i<=st; i++)
|
||||
c->status[i].step_index = bytestream_get_le32(&src);
|
||||
c->status[i].step_index = av_clip(bytestream_get_le32(&src), 0, 88);
|
||||
for (i=0; i<=st; i++)
|
||||
c->status[i].predictor = bytestream_get_le32(&src);
|
||||
|
||||
@@ -1043,11 +1058,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
case CODEC_ID_ADPCM_IMA_SMJPEG:
|
||||
if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) {
|
||||
c->status[0].predictor = sign_extend(bytestream_get_le16(&src), 16);
|
||||
c->status[0].step_index = bytestream_get_le16(&src);
|
||||
c->status[0].step_index = av_clip(bytestream_get_le16(&src), 0, 88);
|
||||
src += 4;
|
||||
} else {
|
||||
c->status[0].predictor = sign_extend(bytestream_get_be16(&src), 16);
|
||||
c->status[0].step_index = bytestream_get_byte(&src);
|
||||
c->status[0].step_index = av_clip(bytestream_get_byte(&src), 0, 88);
|
||||
src += 1;
|
||||
}
|
||||
|
||||
|
@@ -651,6 +651,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
for (k = 1; k < sub_blocks; k++)
|
||||
s[k] = s[k - 1] + decode_rice(gb, 0);
|
||||
}
|
||||
for (k = 1; k < sub_blocks; k++)
|
||||
if (s[k] > 32) {
|
||||
av_log(avctx, AV_LOG_ERROR, "k invalid for rice code.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (get_bits1(gb))
|
||||
*bd->shift_lsbs = get_bits(gb, 4) + 1;
|
||||
@@ -663,6 +668,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
|
||||
2, sconf->max_order + 1));
|
||||
*bd->opt_order = get_bits(gb, opt_order_length);
|
||||
if (*bd->opt_order > sconf->max_order) {
|
||||
*bd->opt_order = sconf->max_order;
|
||||
av_log(avctx, AV_LOG_ERROR, "Predictor order too large!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
*bd->opt_order = sconf->max_order;
|
||||
}
|
||||
@@ -695,6 +705,10 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int rice_param = parcor_rice_table[sconf->coef_table][k][1];
|
||||
int offset = parcor_rice_table[sconf->coef_table][k][0];
|
||||
quant_cof[k] = decode_rice(gb, rice_param) + offset;
|
||||
if (quant_cof[k] < -64 || quant_cof[k] > 63) {
|
||||
av_log(avctx, AV_LOG_ERROR, "quant_cof %d is out of range\n", quant_cof[k]);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
// read coefficients 20 to 126
|
||||
@@ -727,7 +741,7 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
bd->ltp_gain[0] = decode_rice(gb, 1) << 3;
|
||||
bd->ltp_gain[1] = decode_rice(gb, 2) << 3;
|
||||
|
||||
r = get_unary(gb, 0, 4);
|
||||
r = get_unary(gb, 0, 3);
|
||||
c = get_bits(gb, 2);
|
||||
bd->ltp_gain[2] = ltp_gain_values[r][c];
|
||||
|
||||
@@ -756,7 +770,6 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
int delta[8];
|
||||
unsigned int k [8];
|
||||
unsigned int b = av_clip((av_ceil_log2(bd->block_length) - 3) >> 1, 0, 5);
|
||||
unsigned int i = start;
|
||||
|
||||
// read most significant bits
|
||||
unsigned int high;
|
||||
@@ -767,29 +780,30 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
|
||||
current_res = bd->raw_samples + start;
|
||||
|
||||
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
|
||||
for (sb = 0; sb < sub_blocks; sb++) {
|
||||
unsigned int sb_len = sb_length - (sb ? 0 : start);
|
||||
|
||||
k [sb] = s[sb] > b ? s[sb] - b : 0;
|
||||
delta[sb] = 5 - s[sb] + k[sb];
|
||||
|
||||
ff_bgmc_decode(gb, sb_length, current_res,
|
||||
ff_bgmc_decode(gb, sb_len, current_res,
|
||||
delta[sb], sx[sb], &high, &low, &value, ctx->bgmc_lut, ctx->bgmc_lut_status);
|
||||
|
||||
current_res += sb_length;
|
||||
current_res += sb_len;
|
||||
}
|
||||
|
||||
ff_bgmc_decode_end(gb);
|
||||
|
||||
|
||||
// read least significant bits and tails
|
||||
i = start;
|
||||
current_res = bd->raw_samples + start;
|
||||
|
||||
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
|
||||
for (sb = 0; sb < sub_blocks; sb++, start = 0) {
|
||||
unsigned int cur_tail_code = tail_code[sx[sb]][delta[sb]];
|
||||
unsigned int cur_k = k[sb];
|
||||
unsigned int cur_s = s[sb];
|
||||
|
||||
for (; i < sb_length; i++) {
|
||||
for (; start < sb_length; start++) {
|
||||
int32_t res = *current_res;
|
||||
|
||||
if (res == cur_tail_code) {
|
||||
@@ -1165,14 +1179,14 @@ static int read_channel_data(ALSDecContext *ctx, ALSChannelData *cd, int c)
|
||||
|
||||
if (current->master_channel != c) {
|
||||
current->time_diff_flag = get_bits1(gb);
|
||||
current->weighting[0] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 32)];
|
||||
current->weighting[1] = mcc_weightings[av_clip(decode_rice(gb, 2) + 14, 0, 32)];
|
||||
current->weighting[2] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 32)];
|
||||
current->weighting[0] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
current->weighting[1] = mcc_weightings[av_clip(decode_rice(gb, 2) + 14, 0, 31)];
|
||||
current->weighting[2] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
|
||||
if (current->time_diff_flag) {
|
||||
current->weighting[3] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 32)];
|
||||
current->weighting[4] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 32)];
|
||||
current->weighting[5] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 32)];
|
||||
current->weighting[3] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
current->weighting[4] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
current->weighting[5] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
|
||||
current->time_diff_sign = get_bits1(gb);
|
||||
current->time_diff_index = get_bits(gb, ctx->ltp_lag_length - 3) + 3;
|
||||
|
@@ -200,6 +200,10 @@ static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf,
|
||||
p->bad_frame_indicator = !get_bits1(&gb); // quality bit
|
||||
skip_bits(&gb, 2); // two padding bits
|
||||
|
||||
if (mode >= N_MODES || buf_size < frame_sizes_nb[mode] + 1) {
|
||||
return NO_DATA;
|
||||
}
|
||||
|
||||
if (mode < MODE_DTX)
|
||||
ff_amr_bit_reorder((uint16_t *) &p->frame, sizeof(AMRNBFrame), buf + 1,
|
||||
amr_unpacking_bitmaps_per_mode[mode]);
|
||||
@@ -947,6 +951,10 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
|
||||
buf_out = (float *)p->avframe.data[0];
|
||||
|
||||
p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
|
||||
if (p->cur_frame_mode == NO_DATA) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Corrupt bitstream\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (p->cur_frame_mode == MODE_DTX) {
|
||||
av_log_missing_feature(avctx, "dtx mode", 0);
|
||||
av_log(avctx, AV_LOG_INFO, "Note: libopencore_amrnb supports dtx\n");
|
||||
|
@@ -898,10 +898,10 @@ static float auto_correlation(float *diff_isf, float mean, int lag)
|
||||
* Extrapolate a ISF vector to the 16kHz range (20th order LP)
|
||||
* used at mode 6k60 LP filter for the high frequency band.
|
||||
*
|
||||
* @param[out] out Buffer for extrapolated isf
|
||||
* @param[in] isf Input isf vector
|
||||
* @param[out] isf Buffer for extrapolated isf; contains LP_ORDER
|
||||
* values on input
|
||||
*/
|
||||
static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
|
||||
static void extrapolate_isf(float isf[LP_ORDER_16k])
|
||||
{
|
||||
float diff_isf[LP_ORDER - 2], diff_mean;
|
||||
float *diff_hi = diff_isf - LP_ORDER + 1; // diff array for extrapolated indexes
|
||||
@@ -909,8 +909,7 @@ static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
|
||||
float est, scale;
|
||||
int i, i_max_corr;
|
||||
|
||||
memcpy(out, isf, (LP_ORDER - 1) * sizeof(float));
|
||||
out[LP_ORDER_16k - 1] = isf[LP_ORDER - 1];
|
||||
isf[LP_ORDER_16k - 1] = isf[LP_ORDER - 1];
|
||||
|
||||
/* Calculate the difference vector */
|
||||
for (i = 0; i < LP_ORDER - 2; i++)
|
||||
@@ -931,16 +930,16 @@ static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
|
||||
i_max_corr++;
|
||||
|
||||
for (i = LP_ORDER - 1; i < LP_ORDER_16k - 1; i++)
|
||||
out[i] = isf[i - 1] + isf[i - 1 - i_max_corr]
|
||||
isf[i] = isf[i - 1] + isf[i - 1 - i_max_corr]
|
||||
- isf[i - 2 - i_max_corr];
|
||||
|
||||
/* Calculate an estimate for ISF(18) and scale ISF based on the error */
|
||||
est = 7965 + (out[2] - out[3] - out[4]) / 6.0;
|
||||
scale = 0.5 * (FFMIN(est, 7600) - out[LP_ORDER - 2]) /
|
||||
(out[LP_ORDER_16k - 2] - out[LP_ORDER - 2]);
|
||||
est = 7965 + (isf[2] - isf[3] - isf[4]) / 6.0;
|
||||
scale = 0.5 * (FFMIN(est, 7600) - isf[LP_ORDER - 2]) /
|
||||
(isf[LP_ORDER_16k - 2] - isf[LP_ORDER - 2]);
|
||||
|
||||
for (i = LP_ORDER - 1; i < LP_ORDER_16k - 1; i++)
|
||||
diff_hi[i] = scale * (out[i] - out[i - 1]);
|
||||
diff_hi[i] = scale * (isf[i] - isf[i - 1]);
|
||||
|
||||
/* Stability insurance */
|
||||
for (i = LP_ORDER; i < LP_ORDER_16k - 1; i++)
|
||||
@@ -952,11 +951,11 @@ static void extrapolate_isf(float out[LP_ORDER_16k], float isf[LP_ORDER])
|
||||
}
|
||||
|
||||
for (i = LP_ORDER - 1; i < LP_ORDER_16k - 1; i++)
|
||||
out[i] = out[i - 1] + diff_hi[i] * (1.0f / (1 << 15));
|
||||
isf[i] = isf[i - 1] + diff_hi[i] * (1.0f / (1 << 15));
|
||||
|
||||
/* Scale the ISF vector for 16000 Hz */
|
||||
for (i = 0; i < LP_ORDER_16k - 1; i++)
|
||||
out[i] *= 0.8;
|
||||
isf[i] *= 0.8;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1003,7 +1002,7 @@ static void hb_synthesis(AMRWBContext *ctx, int subframe, float *samples,
|
||||
ff_weighted_vector_sumf(e_isf, isf_past, isf, isfp_inter[subframe],
|
||||
1.0 - isfp_inter[subframe], LP_ORDER);
|
||||
|
||||
extrapolate_isf(e_isf, e_isf);
|
||||
extrapolate_isf(e_isf);
|
||||
|
||||
e_isf[LP_ORDER_16k - 1] *= 2.0;
|
||||
ff_acelp_lsf2lspd(e_isp, e_isf, LP_ORDER_16k);
|
||||
@@ -1095,23 +1094,27 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
|
||||
buf_out = (float *)ctx->avframe.data[0];
|
||||
|
||||
header_size = decode_mime_header(ctx, buf);
|
||||
if (ctx->fr_cur_mode > MODE_SID) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid mode %d\n", ctx->fr_cur_mode);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
expected_fr_size = ((cf_sizes_wb[ctx->fr_cur_mode] + 7) >> 3) + 1;
|
||||
|
||||
if (buf_size < expected_fr_size) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Frame too small (%d bytes). Truncated file?\n", buf_size);
|
||||
*got_frame_ptr = 0;
|
||||
return buf_size;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (!ctx->fr_quality || ctx->fr_cur_mode > MODE_SID)
|
||||
av_log(avctx, AV_LOG_ERROR, "Encountered a bad or corrupted frame\n");
|
||||
|
||||
if (ctx->fr_cur_mode == MODE_SID) /* Comfort noise frame */
|
||||
if (ctx->fr_cur_mode == MODE_SID) { /* Comfort noise frame */
|
||||
av_log_missing_feature(avctx, "SID mode", 1);
|
||||
|
||||
if (ctx->fr_cur_mode >= MODE_SID)
|
||||
return -1;
|
||||
}
|
||||
|
||||
ff_amr_bit_reorder((uint16_t *) &ctx->frame, sizeof(AMRWBFrame),
|
||||
buf + header_size, amr_bit_orderings_by_mode[ctx->fr_cur_mode]);
|
||||
|
@@ -404,9 +404,12 @@ static inline int ape_decode_value(APEContext *ctx, APERice *rice)
|
||||
|
||||
if (tmpk <= 16)
|
||||
x = range_decode_bits(ctx, tmpk);
|
||||
else {
|
||||
else if (tmpk <= 32) {
|
||||
x = range_decode_bits(ctx, 16);
|
||||
x |= (range_decode_bits(ctx, tmpk - 16) << 16);
|
||||
} else {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
x += overflow << tmpk;
|
||||
} else {
|
||||
|
@@ -402,7 +402,7 @@ static int decodeTonalComponents (GetBitContext *gb, tonal_component *pComponent
|
||||
|
||||
for (k=0; k<coded_components; k++) {
|
||||
sfIndx = get_bits(gb,6);
|
||||
if(component_count>=64)
|
||||
if (component_count >= 64)
|
||||
return AVERROR_INVALIDDATA;
|
||||
pComponent[component_count].pos = j * 64 + (get_bits(gb,6));
|
||||
max_coded_values = SAMPLES_PER_FRAME - pComponent[component_count].pos;
|
||||
|
@@ -4032,7 +4032,8 @@ AVCodecContext *avcodec_alloc_context2(enum AVMediaType);
|
||||
|
||||
/**
|
||||
* Allocate an AVCodecContext and set its fields to default values. The
|
||||
* resulting struct can be deallocated by simply calling av_free().
|
||||
* resulting struct can be deallocated by calling avcodec_close() on it followed
|
||||
* by av_free().
|
||||
*
|
||||
* @param codec if non-NULL, allocate private data and initialize defaults
|
||||
* for the given codec. It is illegal to then call avcodec_open2()
|
||||
@@ -4178,6 +4179,11 @@ int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
|
||||
* @endcode
|
||||
*
|
||||
* @param avctx The context to initialize.
|
||||
* @param codec The codec to open this context for. If a non-NULL codec has been
|
||||
* previously passed to avcodec_alloc_context3() or
|
||||
* avcodec_get_context_defaults3() for this context, then this
|
||||
* parameter MUST be either NULL or equal to the previously passed
|
||||
* codec.
|
||||
* @param options A dictionary filled with AVCodecContext and codec-private options.
|
||||
* On return this object will be filled with options that were not found.
|
||||
*
|
||||
@@ -4463,6 +4469,15 @@ int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
||||
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
||||
const AVSubtitle *sub);
|
||||
|
||||
/**
|
||||
* Close a given AVCodecContext and free all the data associated with it
|
||||
* (but not the AVCodecContext itself).
|
||||
*
|
||||
* Calling this function on an AVCodecContext that hasn't been opened will free
|
||||
* the codec-specific data allocated in avcodec_alloc_context3() /
|
||||
* avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
|
||||
* do nothing.
|
||||
*/
|
||||
int avcodec_close(AVCodecContext *avctx);
|
||||
|
||||
/**
|
||||
@@ -4874,4 +4889,10 @@ const AVClass *avcodec_get_class(void);
|
||||
*/
|
||||
const AVClass *avcodec_get_frame_class(void);
|
||||
|
||||
/**
|
||||
* @return a positive value if s is open (i.e. avcodec_open2() was called on it
|
||||
* with no corresponding avcodec_close()), 0 otherwise.
|
||||
*/
|
||||
int avcodec_is_open(AVCodecContext *s);
|
||||
|
||||
#endif /* AVCODEC_AVCODEC_H */
|
||||
|
@@ -162,6 +162,7 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
|
||||
AvsContext *const avs = avctx->priv_data;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
avcodec_get_frame_defaults(&avs->picture);
|
||||
avcodec_set_dimensions(avctx, 318, 198);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -91,9 +91,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
frame_len_bits = 11;
|
||||
}
|
||||
|
||||
if (avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "too many channels: %d\n", avctx->channels);
|
||||
return -1;
|
||||
if (avctx->channels < 1 || avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n", avctx->channels);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->version_b = avctx->extradata && avctx->extradata[3] == 'b';
|
||||
|
@@ -253,9 +253,9 @@ static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
|
||||
(byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
|
||||
|
||||
'use_static' should be set to 1 for tables, which should be freed
|
||||
with av_free_static(), 0 if free_vlc() will be used.
|
||||
with av_free_static(), 0 if ff_free_vlc() will be used.
|
||||
*/
|
||||
int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
const void *bits, int bits_wrap, int bits_size,
|
||||
const void *codes, int codes_wrap, int codes_size,
|
||||
const void *symbols, int symbols_wrap, int symbols_size,
|
||||
@@ -318,7 +318,7 @@ int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
}
|
||||
|
||||
|
||||
void free_vlc(VLC *vlc)
|
||||
void ff_free_vlc(VLC *vlc)
|
||||
{
|
||||
av_freep(&vlc->table);
|
||||
}
|
||||
|
@@ -117,7 +117,7 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
depth = bytestream_get_le16(&buf);
|
||||
|
||||
if(ihsize == 40 || ihsize == 64 || ihsize == 56)
|
||||
if (ihsize >= 40)
|
||||
comp = bytestream_get_le32(&buf);
|
||||
else
|
||||
comp = BMP_RGB;
|
||||
@@ -132,8 +132,7 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
rgb[0] = bytestream_get_le32(&buf);
|
||||
rgb[1] = bytestream_get_le32(&buf);
|
||||
rgb[2] = bytestream_get_le32(&buf);
|
||||
if (ihsize >= 108)
|
||||
alpha = bytestream_get_le32(&buf);
|
||||
alpha = bytestream_get_le32(&buf);
|
||||
}
|
||||
|
||||
avctx->width = width;
|
||||
@@ -231,9 +230,6 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
if(comp == BMP_RLE4 || comp == BMP_RLE8)
|
||||
memset(p->data[0], 0, avctx->height * p->linesize[0]);
|
||||
|
||||
if(depth == 4 || depth == 8)
|
||||
memset(p->data[1], 0, 1024);
|
||||
|
||||
if(height > 0){
|
||||
ptr = p->data[0] + (avctx->height - 1) * p->linesize[0];
|
||||
linesize = -p->linesize[0];
|
||||
@@ -244,6 +240,9 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(avctx->pix_fmt == PIX_FMT_PAL8){
|
||||
int colors = 1 << depth;
|
||||
|
||||
memset(p->data[1], 0, 1024);
|
||||
|
||||
if(ihsize >= 36){
|
||||
int t;
|
||||
buf = buf0 + 46;
|
||||
|
@@ -21,6 +21,7 @@
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "libavutil/avassert.h"
|
||||
|
||||
enum BMVFlags{
|
||||
BMV_NOP = 0,
|
||||
@@ -52,7 +53,7 @@ typedef struct BMVDecContext {
|
||||
|
||||
static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame, int frame_off)
|
||||
{
|
||||
int val, saved_val = 0;
|
||||
unsigned val, saved_val = 0;
|
||||
int tmplen = src_len;
|
||||
const uint8_t *src, *source_end = source + src_len;
|
||||
uint8_t *frame_end = frame + SCREEN_WIDE * SCREEN_HIGH;
|
||||
@@ -98,6 +99,8 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
|
||||
}
|
||||
if (!(val & 0xC)) {
|
||||
for (;;) {
|
||||
if(shift>22)
|
||||
return -1;
|
||||
if (!read_two_nibbles) {
|
||||
if (src < source || src >= source_end)
|
||||
return -1;
|
||||
@@ -131,6 +134,7 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
|
||||
}
|
||||
advance_mode = val & 1;
|
||||
len = (val >> 1) - 1;
|
||||
av_assert0(len>0);
|
||||
mode += 1 + advance_mode;
|
||||
if (mode >= 4)
|
||||
mode -= 3;
|
||||
@@ -139,7 +143,7 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
|
||||
switch (mode) {
|
||||
case 1:
|
||||
if (forward) {
|
||||
if (dst - frame + SCREEN_WIDE < frame_off ||
|
||||
if (dst - frame + SCREEN_WIDE < -frame_off ||
|
||||
frame_end - dst < frame_off + len)
|
||||
return -1;
|
||||
for (i = 0; i < len; i++)
|
||||
@@ -147,7 +151,7 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
|
||||
dst += len;
|
||||
} else {
|
||||
dst -= len;
|
||||
if (dst - frame + SCREEN_WIDE < frame_off ||
|
||||
if (dst - frame + SCREEN_WIDE < -frame_off ||
|
||||
frame_end - dst < frame_off + len)
|
||||
return -1;
|
||||
for (i = len - 1; i >= 0; i--)
|
||||
@@ -264,6 +268,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
c->avctx = avctx;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
|
||||
if (avctx->width != SCREEN_WIDE || avctx->height != SCREEN_HIGH) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid dimension %dx%d\n", avctx->width, avctx->height);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
c->pic.reference = 1;
|
||||
if (avctx->get_buffer(avctx, &c->pic) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
|
@@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Bytestream functions
|
||||
* copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
|
||||
* Copyright (c) 2012 Aneesh Dogra (lionaneesh) <lionaneesh@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
@@ -23,6 +24,7 @@
|
||||
#define AVCODEC_BYTESTREAM_H
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
@@ -30,35 +32,57 @@ typedef struct {
|
||||
const uint8_t *buffer, *buffer_end, *buffer_start;
|
||||
} GetByteContext;
|
||||
|
||||
#define DEF_T(type, name, bytes, read, write) \
|
||||
static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\
|
||||
(*b) += bytes;\
|
||||
return read(*b - bytes);\
|
||||
}\
|
||||
static av_always_inline void bytestream_put_ ##name(uint8_t **b, const type value){\
|
||||
write(*b, value);\
|
||||
(*b) += bytes;\
|
||||
}\
|
||||
static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g)\
|
||||
{\
|
||||
return bytestream_get_ ## name(&g->buffer);\
|
||||
}\
|
||||
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return bytestream2_get_ ## name ## u(g);\
|
||||
}\
|
||||
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return read(g->buffer);\
|
||||
typedef struct {
|
||||
uint8_t *buffer, *buffer_end, *buffer_start;
|
||||
int eof;
|
||||
} PutByteContext;
|
||||
|
||||
#define DEF_T(type, name, bytes, read, write) \
|
||||
static av_always_inline type bytestream_get_ ## name(const uint8_t **b) \
|
||||
{ \
|
||||
(*b) += bytes; \
|
||||
return read(*b - bytes); \
|
||||
} \
|
||||
static av_always_inline void bytestream_put_ ## name(uint8_t **b, \
|
||||
const type value) \
|
||||
{ \
|
||||
write(*b, value); \
|
||||
(*b) += bytes; \
|
||||
} \
|
||||
static av_always_inline void bytestream2_put_ ## name ## u(PutByteContext *p, \
|
||||
const type value) \
|
||||
{ \
|
||||
bytestream_put_ ## name(&p->buffer, value); \
|
||||
} \
|
||||
static av_always_inline void bytestream2_put_ ## name(PutByteContext *p, \
|
||||
const type value) \
|
||||
{ \
|
||||
if (!p->eof && (p->buffer_end - p->buffer >= bytes)) { \
|
||||
write(p->buffer, value); \
|
||||
p->buffer += bytes; \
|
||||
} else \
|
||||
p->eof = 1; \
|
||||
} \
|
||||
static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g) \
|
||||
{ \
|
||||
return bytestream_get_ ## name(&g->buffer); \
|
||||
} \
|
||||
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \
|
||||
{ \
|
||||
if (g->buffer_end - g->buffer < bytes) \
|
||||
return 0; \
|
||||
return bytestream2_get_ ## name ## u(g); \
|
||||
} \
|
||||
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \
|
||||
{ \
|
||||
if (g->buffer_end - g->buffer < bytes) \
|
||||
return 0; \
|
||||
return read(g->buffer); \
|
||||
}
|
||||
|
||||
#define DEF(name, bytes, read, write) \
|
||||
#define DEF(name, bytes, read, write) \
|
||||
DEF_T(unsigned int, name, bytes, read, write)
|
||||
#define DEF64(name, bytes, read, write) \
|
||||
#define DEF64(name, bytes, read, write) \
|
||||
DEF_T(uint64_t, name, bytes, read, write)
|
||||
|
||||
DEF64(le64, 8, AV_RL64, AV_WL64)
|
||||
@@ -112,11 +136,22 @@ DEF (byte, 1, AV_RB8 , AV_WB8 )
|
||||
#endif
|
||||
|
||||
static av_always_inline void bytestream2_init(GetByteContext *g,
|
||||
const uint8_t *buf, int buf_size)
|
||||
const uint8_t *buf,
|
||||
int buf_size)
|
||||
{
|
||||
g->buffer = buf;
|
||||
g->buffer = buf;
|
||||
g->buffer_start = buf;
|
||||
g->buffer_end = buf + buf_size;
|
||||
g->buffer_end = buf + buf_size;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_init_writer(PutByteContext *p,
|
||||
uint8_t *buf,
|
||||
int buf_size)
|
||||
{
|
||||
p->buffer = buf;
|
||||
p->buffer_start = buf;
|
||||
p->buffer_end = buf + buf_size;
|
||||
p->eof = 0;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
|
||||
@@ -124,32 +159,61 @@ static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *
|
||||
return g->buffer_end - g->buffer;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p)
|
||||
{
|
||||
return p->buffer_end - p->buffer;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_skip(GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
g->buffer += FFMIN(g->buffer_end - g->buffer, size);
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_skipu(GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
g->buffer += size;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_skip_p(PutByteContext *p,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2;
|
||||
if (p->eof)
|
||||
return;
|
||||
size2 = FFMIN(p->buffer_end - p->buffer, size);
|
||||
if (size2 != size)
|
||||
p->eof = 1;
|
||||
p->buffer += size2;
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_tell(GetByteContext *g)
|
||||
{
|
||||
return (int)(g->buffer - g->buffer_start);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset,
|
||||
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
|
||||
{
|
||||
return (int)(p->buffer - p->buffer_start);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_seek(GetByteContext *g,
|
||||
int offset,
|
||||
int whence)
|
||||
{
|
||||
switch (whence) {
|
||||
case SEEK_CUR:
|
||||
offset = av_clip(offset, -(g->buffer - g->buffer_start),
|
||||
g->buffer_end - g->buffer);
|
||||
offset = av_clip(offset, -(g->buffer - g->buffer_start),
|
||||
g->buffer_end - g->buffer);
|
||||
g->buffer += offset;
|
||||
break;
|
||||
case SEEK_END:
|
||||
offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
|
||||
offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
|
||||
g->buffer = g->buffer_end + offset;
|
||||
break;
|
||||
case SEEK_SET:
|
||||
offset = av_clip(offset, 0, g->buffer_end - g->buffer_start);
|
||||
offset = av_clip(offset, 0, g->buffer_end - g->buffer_start);
|
||||
g->buffer = g->buffer_start + offset;
|
||||
break;
|
||||
default:
|
||||
@@ -158,6 +222,37 @@ static av_always_inline int bytestream2_seek(GetByteContext *g, int offset,
|
||||
return bytestream2_tell(g);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_seek_p(PutByteContext *p,
|
||||
int offset,
|
||||
int whence)
|
||||
{
|
||||
p->eof = 0;
|
||||
switch (whence) {
|
||||
case SEEK_CUR:
|
||||
if (p->buffer_end - p->buffer < offset)
|
||||
p->eof = 1;
|
||||
offset = av_clip(offset, -(p->buffer - p->buffer_start),
|
||||
p->buffer_end - p->buffer);
|
||||
p->buffer += offset;
|
||||
break;
|
||||
case SEEK_END:
|
||||
if (offset > 0)
|
||||
p->eof = 1;
|
||||
offset = av_clip(offset, -(p->buffer_end - p->buffer_start), 0);
|
||||
p->buffer = p->buffer_end + offset;
|
||||
break;
|
||||
case SEEK_SET:
|
||||
if (p->buffer_end - p->buffer_start < offset)
|
||||
p->eof = 1;
|
||||
offset = av_clip(offset, 0, p->buffer_end - p->buffer_start);
|
||||
p->buffer = p->buffer_start + offset;
|
||||
break;
|
||||
default:
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
return bytestream2_tell_p(p);
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
@@ -168,14 +263,78 @@ static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
|
||||
return size2;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
|
||||
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
{
|
||||
memcpy(dst, g->buffer, size);
|
||||
g->buffer += size;
|
||||
return size;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p,
|
||||
const uint8_t *src,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2;
|
||||
if (p->eof)
|
||||
return 0;
|
||||
size2 = FFMIN(p->buffer_end - p->buffer, size);
|
||||
if (size2 != size)
|
||||
p->eof = 1;
|
||||
memcpy(p->buffer, src, size2);
|
||||
p->buffer += size2;
|
||||
return size2;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p,
|
||||
const uint8_t *src,
|
||||
unsigned int size)
|
||||
{
|
||||
memcpy(p->buffer, src, size);
|
||||
p->buffer += size;
|
||||
return size;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_set_buffer(PutByteContext *p,
|
||||
const uint8_t c,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2;
|
||||
if (p->eof)
|
||||
return;
|
||||
size2 = FFMIN(p->buffer_end - p->buffer, size);
|
||||
if (size2 != size)
|
||||
p->eof = 1;
|
||||
memset(p->buffer, c, size2);
|
||||
p->buffer += size2;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream2_set_bufferu(PutByteContext *p,
|
||||
const uint8_t c,
|
||||
unsigned int size)
|
||||
{
|
||||
memset(p->buffer, c, size);
|
||||
p->buffer += size;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
|
||||
{
|
||||
return p->eof;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
{
|
||||
memcpy(dst, *b, size);
|
||||
(*b) += size;
|
||||
return size;
|
||||
}
|
||||
|
||||
static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
|
||||
static av_always_inline void bytestream_put_buffer(uint8_t **b,
|
||||
const uint8_t *src,
|
||||
unsigned int size)
|
||||
{
|
||||
memcpy(*b, src, size);
|
||||
(*b) += size;
|
||||
|
@@ -609,12 +609,21 @@ static int decode_pic(AVSContext *h) {
|
||||
static int decode_seq_header(AVSContext *h) {
|
||||
MpegEncContext *s = &h->s;
|
||||
int frame_rate_code;
|
||||
int width, height;
|
||||
|
||||
h->profile = get_bits(&s->gb,8);
|
||||
h->level = get_bits(&s->gb,8);
|
||||
skip_bits1(&s->gb); //progressive sequence
|
||||
s->width = get_bits(&s->gb,14);
|
||||
s->height = get_bits(&s->gb,14);
|
||||
|
||||
width = get_bits(&s->gb, 14);
|
||||
height = get_bits(&s->gb, 14);
|
||||
if ((s->width || s->height) && (s->width != width || s->height != height)) {
|
||||
av_log_missing_feature(s, "Width/height changing in CAVS is", 0);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
s->width = width;
|
||||
s->height = height;
|
||||
|
||||
skip_bits(&s->gb,2); //chroma format
|
||||
skip_bits(&s->gb,3); //sample_precision
|
||||
h->aspect_ratio = get_bits(&s->gb,4);
|
||||
@@ -656,7 +665,8 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
|
||||
if (buf_size == 0) {
|
||||
if (!s->low_delay && h->DPB[0].f.data[0]) {
|
||||
*data_size = sizeof(AVPicture);
|
||||
*picture = *(AVFrame *) &h->DPB[0];
|
||||
*picture = h->DPB[0].f;
|
||||
memset(&h->DPB[0], 0, sizeof(h->DPB[0]));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -280,6 +280,10 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (buf_size > CDG_HEADER_SIZE + CDG_DATA_SIZE) {
|
||||
av_log(avctx, AV_LOG_ERROR, "buffer too big for decoder\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
ret = avctx->reget_buffer(avctx, &cc->frame);
|
||||
if (ret) {
|
||||
|
@@ -133,9 +133,8 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
out2 -= val * old_out2;
|
||||
out3 -= val * old_out3;
|
||||
|
||||
old_out3 = out[-5];
|
||||
|
||||
for (i = 5; i <= filter_length; i += 2) {
|
||||
old_out3 = out[-i];
|
||||
val = filter_coeffs[i-1];
|
||||
|
||||
out0 -= val * old_out3;
|
||||
@@ -154,7 +153,6 @@ void ff_celp_lp_synthesis_filterf(float *out, const float *filter_coeffs,
|
||||
|
||||
FFSWAP(float, old_out0, old_out2);
|
||||
old_out1 = old_out3;
|
||||
old_out3 = out[-i-2];
|
||||
}
|
||||
|
||||
tmp0 = out0;
|
||||
|
@@ -321,11 +321,11 @@ static av_cold int cook_decode_close(AVCodecContext *avctx)
|
||||
|
||||
/* Free the VLC tables. */
|
||||
for (i = 0; i < 13; i++)
|
||||
free_vlc(&q->envelope_quant_index[i]);
|
||||
ff_free_vlc(&q->envelope_quant_index[i]);
|
||||
for (i = 0; i < 7; i++)
|
||||
free_vlc(&q->sqvh[i]);
|
||||
ff_free_vlc(&q->sqvh[i]);
|
||||
for (i = 0; i < q->num_subpackets; i++)
|
||||
free_vlc(&q->subpacket[i].ccpl);
|
||||
ff_free_vlc(&q->subpacket[i].ccpl);
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG, "Memory deallocated.\n");
|
||||
|
||||
@@ -366,8 +366,8 @@ static void decode_gain_info(GetBitContext *gb, int *gaininfo)
|
||||
* @param q pointer to the COOKContext
|
||||
* @param quant_index_table pointer to the array
|
||||
*/
|
||||
static void decode_envelope(COOKContext *q, COOKSubpacket *p,
|
||||
int *quant_index_table)
|
||||
static int decode_envelope(COOKContext *q, COOKSubpacket *p,
|
||||
int *quant_index_table)
|
||||
{
|
||||
int i, j, vlc_index;
|
||||
|
||||
@@ -388,7 +388,15 @@ static void decode_envelope(COOKContext *q, COOKSubpacket *p,
|
||||
j = get_vlc2(&q->gb, q->envelope_quant_index[vlc_index - 1].table,
|
||||
q->envelope_quant_index[vlc_index - 1].bits, 2);
|
||||
quant_index_table[i] = quant_index_table[i - 1] + j - 12; // differential encoding
|
||||
if (quant_index_table[i] > 63 || quant_index_table[i] < -63) {
|
||||
av_log(q->avctx, AV_LOG_ERROR,
|
||||
"Invalid quantizer %d at position %d, outside [-63, 63] range\n",
|
||||
quant_index_table[i], i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -507,7 +515,11 @@ static inline void expand_category(COOKContext *q, int *category,
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < q->num_vectors; i++)
|
||||
++category[category_index[i]];
|
||||
{
|
||||
int idx = category_index[i];
|
||||
if (++category[idx] >= FF_ARRAY_ELEMS(dither_tab))
|
||||
--category[idx];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -635,20 +647,24 @@ static void decode_vectors(COOKContext *q, COOKSubpacket *p, int *category,
|
||||
* @param q pointer to the COOKContext
|
||||
* @param mlt_buffer pointer to mlt coefficients
|
||||
*/
|
||||
static void mono_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer)
|
||||
static int mono_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer)
|
||||
{
|
||||
int category_index[128];
|
||||
int quant_index_table[102];
|
||||
int category[128];
|
||||
int res;
|
||||
|
||||
memset(&category, 0, sizeof(category));
|
||||
memset(&category_index, 0, sizeof(category_index));
|
||||
|
||||
decode_envelope(q, p, quant_index_table);
|
||||
if ((res = decode_envelope(q, p, quant_index_table)) < 0)
|
||||
return res;
|
||||
q->num_vectors = get_bits(&q->gb, p->log2_numvector_size);
|
||||
categorize(q, p, quant_index_table, category, category_index);
|
||||
expand_category(q, category, category_index);
|
||||
decode_vectors(q, p, category, quant_index_table, mlt_buffer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -798,10 +814,10 @@ static void decouple_float(COOKContext *q,
|
||||
* @param mlt_buffer1 pointer to left channel mlt coefficients
|
||||
* @param mlt_buffer2 pointer to right channel mlt coefficients
|
||||
*/
|
||||
static void joint_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer1,
|
||||
float *mlt_buffer2)
|
||||
static int joint_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer1,
|
||||
float *mlt_buffer2)
|
||||
{
|
||||
int i, j;
|
||||
int i, j, res;
|
||||
int decouple_tab[SUBBAND_SIZE];
|
||||
float *decode_buffer = q->decode_buffer_0;
|
||||
int idx, cpl_tmp;
|
||||
@@ -815,7 +831,8 @@ static void joint_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer1,
|
||||
memset(mlt_buffer1, 0, 1024 * sizeof(*mlt_buffer1));
|
||||
memset(mlt_buffer2, 0, 1024 * sizeof(*mlt_buffer2));
|
||||
decouple_info(q, p, decouple_tab);
|
||||
mono_decode(q, p, decode_buffer);
|
||||
if ((res = mono_decode(q, p, decode_buffer)) < 0)
|
||||
return res;
|
||||
|
||||
/* The two channels are stored interleaved in decode_buffer. */
|
||||
for (i = 0; i < p->js_subband_start; i++) {
|
||||
@@ -832,11 +849,13 @@ static void joint_decode(COOKContext *q, COOKSubpacket *p, float *mlt_buffer1,
|
||||
cpl_tmp = cplband[i];
|
||||
idx -= decouple_tab[cpl_tmp];
|
||||
cplscale = q->cplscales[p->js_vlc_bits - 2]; // choose decoupler table
|
||||
f1 = cplscale[decouple_tab[cpl_tmp]];
|
||||
f2 = cplscale[idx - 1];
|
||||
f1 = cplscale[decouple_tab[cpl_tmp] + 1];
|
||||
f2 = cplscale[idx];
|
||||
q->decouple(q, p, i, f1, f2, decode_buffer, mlt_buffer1, mlt_buffer2);
|
||||
idx = (1 << p->js_vlc_bits) - 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -909,10 +928,11 @@ static inline void mlt_compensate_output(COOKContext *q, float *decode_buffer,
|
||||
* @param inbuffer pointer to the inbuffer
|
||||
* @param outbuffer pointer to the outbuffer
|
||||
*/
|
||||
static void decode_subpacket(COOKContext *q, COOKSubpacket *p,
|
||||
const uint8_t *inbuffer, float *outbuffer)
|
||||
static int decode_subpacket(COOKContext *q, COOKSubpacket *p,
|
||||
const uint8_t *inbuffer, float *outbuffer)
|
||||
{
|
||||
int sub_packet_size = p->size;
|
||||
int res;
|
||||
/* packet dump */
|
||||
// for (i = 0; i < sub_packet_size ; i++)
|
||||
// av_log(q->avctx, AV_LOG_ERROR, "%02x", inbuffer[i]);
|
||||
@@ -921,13 +941,16 @@ static void decode_subpacket(COOKContext *q, COOKSubpacket *p,
|
||||
decode_bytes_and_gain(q, p, inbuffer, &p->gains1);
|
||||
|
||||
if (p->joint_stereo) {
|
||||
joint_decode(q, p, q->decode_buffer_1, q->decode_buffer_2);
|
||||
if ((res = joint_decode(q, p, q->decode_buffer_1, q->decode_buffer_2)) < 0)
|
||||
return res;
|
||||
} else {
|
||||
mono_decode(q, p, q->decode_buffer_1);
|
||||
if ((res = mono_decode(q, p, q->decode_buffer_1)) < 0)
|
||||
return res;
|
||||
|
||||
if (p->num_channels == 2) {
|
||||
decode_bytes_and_gain(q, p, inbuffer + sub_packet_size / 2, &p->gains2);
|
||||
mono_decode(q, p, q->decode_buffer_2);
|
||||
if ((res = mono_decode(q, p, q->decode_buffer_2)) < 0)
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -941,6 +964,8 @@ static void decode_subpacket(COOKContext *q, COOKSubpacket *p,
|
||||
else
|
||||
mlt_compensate_output(q, q->decode_buffer_2, &p->gains2,
|
||||
p->mono_previous_buffer2, outbuffer, p->ch_idx + 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -996,7 +1021,8 @@ static int cook_decode_frame(AVCodecContext *avctx, void *data,
|
||||
i, q->subpacket[i].size, q->subpacket[i].joint_stereo, offset,
|
||||
avctx->block_align);
|
||||
|
||||
decode_subpacket(q, &q->subpacket[i], buf + offset, samples);
|
||||
if ((ret = decode_subpacket(q, &q->subpacket[i], buf + offset, samples)) < 0)
|
||||
return ret;
|
||||
offset += q->subpacket[i].size;
|
||||
chidx += q->subpacket[i].num_channels;
|
||||
av_log(avctx, AV_LOG_DEBUG, "subpacket[%i] %i %i\n",
|
||||
@@ -1078,6 +1104,10 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
q->sample_rate = avctx->sample_rate;
|
||||
q->nb_channels = avctx->channels;
|
||||
q->bit_rate = avctx->bit_rate;
|
||||
if (!q->nb_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Initialize RNG. */
|
||||
av_lfg_init(&q->random_state, 0);
|
||||
@@ -1205,6 +1235,11 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
||||
q->subpacket[s].gains2.now = q->subpacket[s].gain_3;
|
||||
q->subpacket[s].gains2.previous = q->subpacket[s].gain_4;
|
||||
|
||||
if (q->num_subpackets + q->subpacket[s].num_channels > q->nb_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Too many subpackets %d for channels %d\n", q->num_subpackets, q->nb_channels);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
q->num_subpackets++;
|
||||
s++;
|
||||
if (s > MAX_SUBPACKETS) {
|
||||
|
@@ -36,8 +36,8 @@ static const int expbits_tab[8] = {
|
||||
52,47,43,37,29,22,16,0,
|
||||
};
|
||||
|
||||
static const float dither_tab[8] = {
|
||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.176777, 0.25, 0.707107,
|
||||
static const float dither_tab[9] = {
|
||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.176777, 0.25, 0.707107, 1.0
|
||||
};
|
||||
|
||||
static const float quant_centroid_tab[7][14] = {
|
||||
@@ -510,23 +510,37 @@ static const int cplband[51] = {
|
||||
19,
|
||||
};
|
||||
|
||||
static const float cplscale2[3] = {
|
||||
// The 1 and 0 at the beginning/end are to prevent overflows with
|
||||
// bitstream-read indexes. E.g. if n_bits=5, we can access any
|
||||
// index from [1, (1<<n_bits)] for the first decoupling coeff,
|
||||
// and (1<<n_bits)-coeff1 as index for coeff2, i.e.:
|
||||
// coeff1_idx = [1, 32], and coeff2_idx = [0, 31].
|
||||
// These values aren't part of the tables in the original binary.
|
||||
|
||||
static const float cplscale2[5] = {
|
||||
1,
|
||||
0.953020632266998,0.70710676908493,0.302905440330505,
|
||||
0,
|
||||
};
|
||||
|
||||
static const float cplscale3[7] = {
|
||||
static const float cplscale3[9] = {
|
||||
1,
|
||||
0.981279790401459,0.936997592449188,0.875934481620789,0.70710676908493,
|
||||
0.482430040836334,0.349335819482803,0.192587479948997,
|
||||
0,
|
||||
};
|
||||
|
||||
static const float cplscale4[15] = {
|
||||
static const float cplscale4[17] = {
|
||||
1,
|
||||
0.991486728191376,0.973249018192291,0.953020632266998,0.930133521556854,
|
||||
0.903453230857849,0.870746195316315,0.826180458068848,0.70710676908493,
|
||||
0.563405573368073,0.491732746362686,0.428686618804932,0.367221474647522,
|
||||
0.302905440330505,0.229752898216248,0.130207896232605,
|
||||
0,
|
||||
};
|
||||
|
||||
static const float cplscale5[31] = {
|
||||
static const float cplscale5[33] = {
|
||||
1,
|
||||
0.995926380157471,0.987517595291138,0.978726446628571,0.969505727291107,
|
||||
0.95979779958725,0.949531257152557,0.938616216182709,0.926936149597168,
|
||||
0.914336204528809,0.900602877140045,0.885426938533783,0.868331849575043,
|
||||
@@ -535,9 +549,11 @@ static const float cplscale5[31] = {
|
||||
0.464778542518616,0.434642940759659,0.404955863952637,0.375219136476517,
|
||||
0.344963222742081,0.313672333955765,0.280692428350449,0.245068684220314,
|
||||
0.205169528722763,0.157508864998817,0.0901700109243393,
|
||||
0,
|
||||
};
|
||||
|
||||
static const float cplscale6[63] = {
|
||||
static const float cplscale6[65] = {
|
||||
1,
|
||||
0.998005926609039,0.993956744670868,0.989822506904602,0.985598564147949,
|
||||
0.981279790401459,0.976860702037811,0.972335040569305,0.967696130275726,
|
||||
0.962936460971832,0.958047747612000,0.953020632266998,0.947844684123993,
|
||||
@@ -554,6 +570,7 @@ static const float cplscale6[63] = {
|
||||
0.302905440330505,0.286608695983887,0.269728302955627,0.252119421958923,
|
||||
0.233590632677078,0.213876649737358,0.192587479948997,0.169101938605309,
|
||||
0.142307326197624,0.109772264957428,0.0631198287010193,
|
||||
0,
|
||||
};
|
||||
|
||||
static const float* const cplscales[5] = {
|
||||
|
@@ -228,7 +228,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"CamStudio codec error: invalid depth %i bpp\n",
|
||||
avctx->bits_per_coded_sample);
|
||||
return 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->bpp = avctx->bits_per_coded_sample;
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
@@ -242,7 +242,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
|
||||
c->decomp_buf = av_malloc(c->decomp_size + AV_LZO_OUTPUT_PADDING);
|
||||
if (!c->decomp_buf) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
|
||||
return 1;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intmath.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/audioconvert.h"
|
||||
#include "avcodec.h"
|
||||
#include "dsputil.h"
|
||||
@@ -638,13 +639,20 @@ static int dca_parse_frame_header(DCAContext *s)
|
||||
}
|
||||
|
||||
|
||||
static inline int get_scale(GetBitContext *gb, int level, int value)
|
||||
static inline int get_scale(GetBitContext *gb, int level, int value, int log2range)
|
||||
{
|
||||
if (level < 5) {
|
||||
/* huffman encoded */
|
||||
value += get_bitalloc(gb, &dca_scalefactor, level);
|
||||
} else if (level < 8)
|
||||
value = get_bits(gb, level + 1);
|
||||
value = av_clip(value, 0, (1 << log2range) - 1);
|
||||
} else if (level < 8) {
|
||||
if (level + 1 > log2range) {
|
||||
skip_bits(gb, level + 1 - log2range);
|
||||
value = get_bits(gb, log2range);
|
||||
} else {
|
||||
value = get_bits(gb, level + 1);
|
||||
}
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
@@ -717,28 +725,31 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
||||
|
||||
for (j = base_channel; j < s->prim_channels; j++) {
|
||||
const uint32_t *scale_table;
|
||||
int scale_sum;
|
||||
int scale_sum, log_size;
|
||||
|
||||
memset(s->scale_factor[j], 0,
|
||||
s->subband_activity[j] * sizeof(s->scale_factor[0][0][0]) * 2);
|
||||
|
||||
if (s->scalefactor_huffman[j] == 6)
|
||||
if (s->scalefactor_huffman[j] == 6) {
|
||||
scale_table = scale_factor_quant7;
|
||||
else
|
||||
log_size = 7;
|
||||
} else {
|
||||
scale_table = scale_factor_quant6;
|
||||
log_size = 6;
|
||||
}
|
||||
|
||||
/* When huffman coded, only the difference is encoded */
|
||||
scale_sum = 0;
|
||||
|
||||
for (k = 0; k < s->subband_activity[j]; k++) {
|
||||
if (k >= s->vq_start_subband[j] || s->bitalloc[j][k] > 0) {
|
||||
scale_sum = get_scale(&s->gb, s->scalefactor_huffman[j], scale_sum);
|
||||
scale_sum = get_scale(&s->gb, s->scalefactor_huffman[j], scale_sum, log_size);
|
||||
s->scale_factor[j][k][0] = scale_table[scale_sum];
|
||||
}
|
||||
|
||||
if (k < s->vq_start_subband[j] && s->transition_mode[j][k]) {
|
||||
/* Get second scale factor */
|
||||
scale_sum = get_scale(&s->gb, s->scalefactor_huffman[j], scale_sum);
|
||||
scale_sum = get_scale(&s->gb, s->scalefactor_huffman[j], scale_sum, log_size);
|
||||
s->scale_factor[j][k][1] = scale_table[scale_sum];
|
||||
}
|
||||
}
|
||||
@@ -767,8 +778,7 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
||||
* (is this valid as well for joint scales ???) */
|
||||
|
||||
for (k = s->subband_activity[j]; k < s->subband_activity[source_channel]; k++) {
|
||||
scale = get_scale(&s->gb, s->joint_huff[j], 0);
|
||||
scale += 64; /* bias */
|
||||
scale = get_scale(&s->gb, s->joint_huff[j], 64 /* bias */, 7);
|
||||
s->joint_scale_factor[j][k] = scale; /*joint_scale_table[scale]; */
|
||||
}
|
||||
|
||||
@@ -789,6 +799,11 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
||||
}
|
||||
} else {
|
||||
int am = s->amode & DCA_CHANNEL_MASK;
|
||||
if (am >= FF_ARRAY_ELEMS(dca_default_coeffs)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Invalid channel mode %d\n", am);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
for (j = base_channel; j < s->prim_channels; j++) {
|
||||
s->downmix_coef[j][0] = dca_default_coeffs[am][j][0];
|
||||
s->downmix_coef[j][1] = dca_default_coeffs[am][j][1];
|
||||
@@ -828,7 +843,8 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
||||
}
|
||||
|
||||
/* Scale factor index */
|
||||
s->lfe_scale_factor = scale_factor_quant7[get_bits(&s->gb, 8)];
|
||||
skip_bits(&s->gb, 1);
|
||||
s->lfe_scale_factor = scale_factor_quant7[get_bits(&s->gb, 7)];
|
||||
|
||||
/* Quantization step size * scale factor */
|
||||
lfe_scale = 0.035 * s->lfe_scale_factor;
|
||||
|
@@ -7528,7 +7528,7 @@ static const float dca_downmix_coeffs[65] = {
|
||||
0.001412537544623, 0.001000000000000, 0.000501187233627, 0.000251188643151, 0.000000000000000,
|
||||
};
|
||||
|
||||
static const uint8_t dca_default_coeffs[16][5][2] = {
|
||||
static const uint8_t dca_default_coeffs[10][5][2] = {
|
||||
{ { 13, 13 }, },
|
||||
{ { 0, 64 }, { 64, 0 }, },
|
||||
{ { 0, 64 }, { 64, 0 }, },
|
||||
|
219
libavcodec/dfa.c
219
libavcodec/dfa.c
@@ -21,8 +21,9 @@
|
||||
*/
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "bytestream.h"
|
||||
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/lzo.h" // for av_memcpy_backptr
|
||||
|
||||
typedef struct DfaContext {
|
||||
@@ -35,9 +36,13 @@ typedef struct DfaContext {
|
||||
static av_cold int dfa_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
DfaContext *s = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
|
||||
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
|
||||
return ret;
|
||||
|
||||
s->frame_buf = av_mallocz(avctx->width * avctx->height + AV_LZO_OUTPUT_PADDING);
|
||||
if (!s->frame_buf)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -45,19 +50,16 @@ static av_cold int dfa_decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_copy(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_copy(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
const int size = width * height;
|
||||
|
||||
if (src_end - src < size)
|
||||
return -1;
|
||||
bytestream_get_buffer(&src, frame, size);
|
||||
if (bytestream2_get_buffer(gb, frame, size) != size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_tsw1(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_tsw1(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
const uint8_t *frame_start = frame;
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
@@ -65,31 +67,31 @@ static int decode_tsw1(uint8_t *frame, int width, int height,
|
||||
int v, count, segments;
|
||||
unsigned offset;
|
||||
|
||||
segments = bytestream_get_le32(&src);
|
||||
offset = bytestream_get_le32(&src);
|
||||
segments = bytestream2_get_le32(gb);
|
||||
offset = bytestream2_get_le32(gb);
|
||||
if (frame_end - frame <= offset)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += offset;
|
||||
while (segments--) {
|
||||
if (bytestream2_get_bytes_left(gb) < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (mask == 0x10000) {
|
||||
if (src >= src_end)
|
||||
return -1;
|
||||
bitbuf = bytestream_get_le16(&src);
|
||||
bitbuf = bytestream2_get_le16u(gb);
|
||||
mask = 1;
|
||||
}
|
||||
if (src_end - src < 2 || frame_end - frame < 2)
|
||||
return -1;
|
||||
if (frame_end - frame < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (bitbuf & mask) {
|
||||
v = bytestream_get_le16(&src);
|
||||
v = bytestream2_get_le16(gb);
|
||||
offset = (v & 0x1FFF) << 1;
|
||||
count = ((v >> 13) + 2) << 1;
|
||||
if (frame - frame_start < offset || frame_end - frame < count)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
av_memcpy_backptr(frame, offset, count);
|
||||
frame += count;
|
||||
} else {
|
||||
*frame++ = *src++;
|
||||
*frame++ = *src++;
|
||||
*frame++ = bytestream2_get_byte(gb);
|
||||
*frame++ = bytestream2_get_byte(gb);
|
||||
}
|
||||
mask <<= 1;
|
||||
}
|
||||
@@ -97,39 +99,38 @@ static int decode_tsw1(uint8_t *frame, int width, int height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_dsw1(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_dsw1(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
const uint8_t *frame_start = frame;
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
int mask = 0x10000, bitbuf = 0;
|
||||
int v, offset, count, segments;
|
||||
|
||||
segments = bytestream_get_le16(&src);
|
||||
segments = bytestream2_get_le16(gb);
|
||||
while (segments--) {
|
||||
if (bytestream2_get_bytes_left(gb) < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (mask == 0x10000) {
|
||||
if (src >= src_end)
|
||||
return -1;
|
||||
bitbuf = bytestream_get_le16(&src);
|
||||
bitbuf = bytestream2_get_le16u(gb);
|
||||
mask = 1;
|
||||
}
|
||||
if (src_end - src < 2 || frame_end - frame < 2)
|
||||
return -1;
|
||||
if (frame_end - frame < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (bitbuf & mask) {
|
||||
v = bytestream_get_le16(&src);
|
||||
v = bytestream2_get_le16(gb);
|
||||
offset = (v & 0x1FFF) << 1;
|
||||
count = ((v >> 13) + 2) << 1;
|
||||
if (frame - frame_start < offset || frame_end - frame < count)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
// can't use av_memcpy_backptr() since it can overwrite following pixels
|
||||
for (v = 0; v < count; v++)
|
||||
frame[v] = frame[v - offset];
|
||||
frame += count;
|
||||
} else if (bitbuf & (mask << 1)) {
|
||||
frame += bytestream_get_le16(&src);
|
||||
frame += bytestream2_get_le16(gb);
|
||||
} else {
|
||||
*frame++ = *src++;
|
||||
*frame++ = *src++;
|
||||
*frame++ = bytestream2_get_byte(gb);
|
||||
*frame++ = bytestream2_get_byte(gb);
|
||||
}
|
||||
mask <<= 2;
|
||||
}
|
||||
@@ -137,30 +138,28 @@ static int decode_dsw1(uint8_t *frame, int width, int height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_dds1(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
const uint8_t *frame_start = frame;
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
int mask = 0x10000, bitbuf = 0;
|
||||
int i, v, offset, count, segments;
|
||||
|
||||
segments = bytestream_get_le16(&src);
|
||||
segments = bytestream2_get_le16(gb);
|
||||
while (segments--) {
|
||||
if (bytestream2_get_bytes_left(gb) < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (mask == 0x10000) {
|
||||
if (src >= src_end)
|
||||
return -1;
|
||||
bitbuf = bytestream_get_le16(&src);
|
||||
bitbuf = bytestream2_get_le16u(gb);
|
||||
mask = 1;
|
||||
}
|
||||
if (src_end - src < 2 || frame_end - frame < 2)
|
||||
return -1;
|
||||
|
||||
if (bitbuf & mask) {
|
||||
v = bytestream_get_le16(&src);
|
||||
v = bytestream2_get_le16(gb);
|
||||
offset = (v & 0x1FFF) << 2;
|
||||
count = ((v >> 13) + 2) << 1;
|
||||
if (frame - frame_start < offset || frame_end - frame < count*2 + width)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
for (i = 0; i < count; i++) {
|
||||
frame[0] = frame[1] =
|
||||
frame[width] = frame[width + 1] = frame[-offset];
|
||||
@@ -168,13 +167,18 @@ static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
frame += 2;
|
||||
}
|
||||
} else if (bitbuf & (mask << 1)) {
|
||||
frame += bytestream_get_le16(&src) * 2;
|
||||
v = bytestream2_get_le16(gb)*2;
|
||||
if (frame - frame_end < v)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += v;
|
||||
} else {
|
||||
if (frame_end - frame < width + 3)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame[0] = frame[1] =
|
||||
frame[width] = frame[width + 1] = *src++;
|
||||
frame[width] = frame[width + 1] = bytestream2_get_byte(gb);
|
||||
frame += 2;
|
||||
frame[0] = frame[1] =
|
||||
frame[width] = frame[width + 1] = *src++;
|
||||
frame[width] = frame[width + 1] = bytestream2_get_byte(gb);
|
||||
frame += 2;
|
||||
}
|
||||
mask <<= 2;
|
||||
@@ -183,40 +187,40 @@ static int decode_dds1(uint8_t *frame, int width, int height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_bdlt(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_bdlt(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
uint8_t *line_ptr;
|
||||
int count, lines, segments;
|
||||
|
||||
count = bytestream_get_le16(&src);
|
||||
count = bytestream2_get_le16(gb);
|
||||
if (count >= height)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += width * count;
|
||||
lines = bytestream_get_le16(&src);
|
||||
if (count + lines > height || src >= src_end)
|
||||
return -1;
|
||||
lines = bytestream2_get_le16(gb);
|
||||
if (count + lines > height)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
while (lines--) {
|
||||
if (bytestream2_get_bytes_left(gb) < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
line_ptr = frame;
|
||||
frame += width;
|
||||
segments = *src++;
|
||||
segments = bytestream2_get_byteu(gb);
|
||||
while (segments--) {
|
||||
if (src_end - src < 3)
|
||||
return -1;
|
||||
if (frame - line_ptr <= *src)
|
||||
return -1;
|
||||
line_ptr += *src++;
|
||||
count = (int8_t)*src++;
|
||||
if (frame - line_ptr <= bytestream2_peek_byte(gb))
|
||||
return AVERROR_INVALIDDATA;
|
||||
line_ptr += bytestream2_get_byte(gb);
|
||||
count = (int8_t)bytestream2_get_byte(gb);
|
||||
if (count >= 0) {
|
||||
if (frame - line_ptr < count || src_end - src < count)
|
||||
return -1;
|
||||
bytestream_get_buffer(&src, line_ptr, count);
|
||||
if (frame - line_ptr < count)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (bytestream2_get_buffer(gb, line_ptr, count) != count)
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
count = -count;
|
||||
if (frame - line_ptr < count || src >= src_end)
|
||||
return -1;
|
||||
memset(line_ptr, *src++, count);
|
||||
if (frame - line_ptr < count)
|
||||
return AVERROR_INVALIDDATA;
|
||||
memset(line_ptr, bytestream2_get_byte(gb), count);
|
||||
}
|
||||
line_ptr += count;
|
||||
}
|
||||
@@ -225,49 +229,53 @@ static int decode_bdlt(uint8_t *frame, int width, int height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_wdlt(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
const uint8_t *frame_end = frame + width * height;
|
||||
uint8_t *line_ptr;
|
||||
int count, i, v, lines, segments;
|
||||
int y = 0;
|
||||
|
||||
lines = bytestream_get_le16(&src);
|
||||
if (lines > height || src >= src_end)
|
||||
return -1;
|
||||
lines = bytestream2_get_le16(gb);
|
||||
if (lines > height)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
while (lines--) {
|
||||
segments = bytestream_get_le16(&src);
|
||||
if (bytestream2_get_bytes_left(gb) < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
segments = bytestream2_get_le16u(gb);
|
||||
while ((segments & 0xC000) == 0xC000) {
|
||||
unsigned skip_lines = -(int16_t)segments;
|
||||
unsigned delta = -((int16_t)segments * width);
|
||||
if (frame_end - frame <= delta)
|
||||
return -1;
|
||||
if (frame_end - frame <= delta || y + lines + skip_lines > height)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += delta;
|
||||
segments = bytestream_get_le16(&src);
|
||||
y += skip_lines;
|
||||
segments = bytestream2_get_le16(gb);
|
||||
}
|
||||
if (segments & 0x8000) {
|
||||
frame[width - 1] = segments & 0xFF;
|
||||
segments = bytestream_get_le16(&src);
|
||||
segments = bytestream2_get_le16(gb);
|
||||
}
|
||||
line_ptr = frame;
|
||||
frame += width;
|
||||
y++;
|
||||
while (segments--) {
|
||||
if (src_end - src < 2)
|
||||
return -1;
|
||||
if (frame - line_ptr <= *src)
|
||||
return -1;
|
||||
line_ptr += *src++;
|
||||
count = (int8_t)*src++;
|
||||
if (frame - line_ptr <= bytestream2_peek_byte(gb))
|
||||
return AVERROR_INVALIDDATA;
|
||||
line_ptr += bytestream2_get_byte(gb);
|
||||
count = (int8_t)bytestream2_get_byte(gb);
|
||||
if (count >= 0) {
|
||||
if (frame - line_ptr < count*2 || src_end - src < count*2)
|
||||
return -1;
|
||||
bytestream_get_buffer(&src, line_ptr, count*2);
|
||||
if (frame - line_ptr < count * 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (bytestream2_get_buffer(gb, line_ptr, count * 2) != count * 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
line_ptr += count * 2;
|
||||
} else {
|
||||
count = -count;
|
||||
if (frame - line_ptr < count*2 || src_end - src < 2)
|
||||
return -1;
|
||||
v = bytestream_get_le16(&src);
|
||||
if (frame - line_ptr < count * 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
v = bytestream2_get_le16(gb);
|
||||
for (i = 0; i < count; i++)
|
||||
bytestream_put_le16(&line_ptr, v);
|
||||
}
|
||||
@@ -277,22 +285,19 @@ static int decode_wdlt(uint8_t *frame, int width, int height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_unk6(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_unk6(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
return -1;
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
static int decode_blck(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end)
|
||||
static int decode_blck(GetByteContext *gb, uint8_t *frame, int width, int height)
|
||||
{
|
||||
memset(frame, 0, width * height);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
typedef int (*chunk_decoder)(uint8_t *frame, int width, int height,
|
||||
const uint8_t *src, const uint8_t *src_end);
|
||||
typedef int (*chunk_decoder)(GetByteContext *gb, uint8_t *frame, int width, int height);
|
||||
|
||||
static const chunk_decoder decoder[8] = {
|
||||
decode_copy, decode_tsw1, decode_bdlt, decode_wdlt,
|
||||
@@ -308,9 +313,8 @@ static int dfa_decode_frame(AVCodecContext *avctx,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
DfaContext *s = avctx->priv_data;
|
||||
GetByteContext gb;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
const uint8_t *buf_end = avpkt->data + avpkt->size;
|
||||
const uint8_t *tmp_buf;
|
||||
uint32_t chunk_type, chunk_size;
|
||||
uint8_t *dst;
|
||||
int ret;
|
||||
@@ -324,30 +328,25 @@ static int dfa_decode_frame(AVCodecContext *avctx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (buf < buf_end) {
|
||||
chunk_size = AV_RL32(buf + 4);
|
||||
chunk_type = AV_RL32(buf + 8);
|
||||
buf += 12;
|
||||
if (buf_end - buf < chunk_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Chunk size is too big (%d bytes)\n", chunk_size);
|
||||
return -1;
|
||||
}
|
||||
bytestream2_init(&gb, avpkt->data, avpkt->size);
|
||||
while (bytestream2_get_bytes_left(&gb) > 0) {
|
||||
bytestream2_skip(&gb, 4);
|
||||
chunk_size = bytestream2_get_le32(&gb);
|
||||
chunk_type = bytestream2_get_le32(&gb);
|
||||
if (!chunk_type)
|
||||
break;
|
||||
if (chunk_type == 1) {
|
||||
pal_elems = FFMIN(chunk_size / 3, 256);
|
||||
tmp_buf = buf;
|
||||
for (i = 0; i < pal_elems; i++) {
|
||||
s->pal[i] = bytestream_get_be24(&tmp_buf) << 2;
|
||||
s->pal[i] = bytestream2_get_be24(&gb) << 2;
|
||||
s->pal[i] |= 0xFF << 24 | (s->pal[i] >> 6) & 0x30303;
|
||||
}
|
||||
s->pic.palette_has_changed = 1;
|
||||
} else if (chunk_type <= 9) {
|
||||
if (decoder[chunk_type - 2](s->frame_buf, avctx->width, avctx->height,
|
||||
buf, buf + chunk_size)) {
|
||||
if (decoder[chunk_type - 2](&gb, s->frame_buf, avctx->width, avctx->height)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error decoding %s chunk\n",
|
||||
chunk_name[chunk_type - 2]);
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
av_log(avctx, AV_LOG_WARNING, "Ignoring unknown chunk type %d\n",
|
||||
|
@@ -491,10 +491,16 @@ static inline void codeblock(DiracContext *s, SubBand *b,
|
||||
}
|
||||
|
||||
if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
|
||||
int quant = b->quant;
|
||||
if (is_arith)
|
||||
b->quant += dirac_get_arith_int(c, CTX_DELTA_Q_F, CTX_DELTA_Q_DATA);
|
||||
quant += dirac_get_arith_int(c, CTX_DELTA_Q_F, CTX_DELTA_Q_DATA);
|
||||
else
|
||||
b->quant += dirac_get_se_golomb(gb);
|
||||
quant += dirac_get_se_golomb(gb);
|
||||
if (quant < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid quant\n");
|
||||
return;
|
||||
}
|
||||
b->quant = quant;
|
||||
}
|
||||
|
||||
b->quant = FFMIN(b->quant, MAX_QUANT);
|
||||
@@ -619,7 +625,7 @@ static void decode_component(DiracContext *s, int comp)
|
||||
b->quant = svq3_get_ue_golomb(&s->gb);
|
||||
align_get_bits(&s->gb);
|
||||
b->coeff_data = s->gb.buffer + get_bits_count(&s->gb)/8;
|
||||
b->length = FFMIN(b->length, get_bits_left(&s->gb)/8);
|
||||
b->length = FFMIN(b->length, FFMAX(get_bits_left(&s->gb)/8, 0));
|
||||
skip_bits_long(&s->gb, b->length*8);
|
||||
}
|
||||
}
|
||||
@@ -1172,7 +1178,7 @@ static void propagate_block_data(DiracBlock *block, int stride, int size)
|
||||
* Dirac Specification ->
|
||||
* 12. Block motion data syntax
|
||||
*/
|
||||
static void dirac_unpack_block_motion_data(DiracContext *s)
|
||||
static int dirac_unpack_block_motion_data(DiracContext *s)
|
||||
{
|
||||
GetBitContext *gb = &s->gb;
|
||||
uint8_t *sbsplit = s->sbsplit;
|
||||
@@ -1192,7 +1198,9 @@ static void dirac_unpack_block_motion_data(DiracContext *s)
|
||||
ff_dirac_init_arith_decoder(arith, gb, svq3_get_ue_golomb(gb)); /* svq3_get_ue_golomb(gb) is the length */
|
||||
for (y = 0; y < s->sbheight; y++) {
|
||||
for (x = 0; x < s->sbwidth; x++) {
|
||||
int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
|
||||
unsigned int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
|
||||
if (split > 2)
|
||||
return -1;
|
||||
sbsplit[x] = (split + pred_sbsplit(sbsplit+x, s->sbwidth, x, y)) % 3;
|
||||
}
|
||||
sbsplit += s->sbwidth;
|
||||
@@ -1221,6 +1229,8 @@ static void dirac_unpack_block_motion_data(DiracContext *s)
|
||||
propagate_block_data(block, s->blwidth, step);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int weight(int i, int blen, int offset)
|
||||
@@ -1675,7 +1685,8 @@ static int dirac_decode_picture_header(DiracContext *s)
|
||||
if (s->num_refs) {
|
||||
if (dirac_unpack_prediction_parameters(s)) /* [DIRAC_STD] 11.2 Picture Prediction Data. picture_prediction() */
|
||||
return -1;
|
||||
dirac_unpack_block_motion_data(s); /* [DIRAC_STD] 12. Block motion data syntax */
|
||||
if (dirac_unpack_block_motion_data(s)) /* [DIRAC_STD] 12. Block motion data syntax */
|
||||
return -1;
|
||||
}
|
||||
if (dirac_unpack_idwt_params(s)) /* [DIRAC_STD] 11.3 Wavelet transform data */
|
||||
return -1;
|
||||
|
@@ -1038,7 +1038,7 @@ int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth)
|
||||
if (cid->width == avctx->width && cid->height == avctx->height &&
|
||||
cid->interlaced == !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT) &&
|
||||
cid->bit_depth == bit_depth) {
|
||||
for (j = 0; j < sizeof(cid->bit_rates); j++) {
|
||||
for (j = 0; j < FF_ARRAY_ELEMS(cid->bit_rates); j++) {
|
||||
if (cid->bit_rates[j] == mbs)
|
||||
return cid->cid;
|
||||
}
|
||||
|
@@ -84,9 +84,9 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, int cid)
|
||||
}
|
||||
ctx->cid_table = &ff_dnxhd_cid_table[index];
|
||||
|
||||
free_vlc(&ctx->ac_vlc);
|
||||
free_vlc(&ctx->dc_vlc);
|
||||
free_vlc(&ctx->run_vlc);
|
||||
ff_free_vlc(&ctx->ac_vlc);
|
||||
ff_free_vlc(&ctx->dc_vlc);
|
||||
ff_free_vlc(&ctx->run_vlc);
|
||||
|
||||
init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257,
|
||||
ctx->cid_table->ac_bits, 1, 1,
|
||||
@@ -416,9 +416,9 @@ static av_cold int dnxhd_decode_close(AVCodecContext *avctx)
|
||||
|
||||
if (ctx->picture.data[0])
|
||||
ff_thread_release_buffer(avctx, &ctx->picture);
|
||||
free_vlc(&ctx->ac_vlc);
|
||||
free_vlc(&ctx->dc_vlc);
|
||||
free_vlc(&ctx->run_vlc);
|
||||
ff_free_vlc(&ctx->ac_vlc);
|
||||
ff_free_vlc(&ctx->dc_vlc);
|
||||
ff_free_vlc(&ctx->run_vlc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -183,6 +183,11 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int stereo = s->channels - 1;
|
||||
int16_t *output_samples;
|
||||
|
||||
if (stereo && (buf_size & 1)) {
|
||||
buf_size--;
|
||||
buf_end--;
|
||||
}
|
||||
|
||||
/* calculate output size */
|
||||
switch(avctx->codec->id) {
|
||||
case CODEC_ID_ROQ_DPCM:
|
||||
@@ -320,7 +325,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = s->frame;
|
||||
|
||||
return buf_size;
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
#define DPCM_DECODER(id_, name_, long_name_) \
|
||||
|
@@ -147,11 +147,11 @@ static int cin_decode_huffman(const unsigned char *src, int src_size, unsigned c
|
||||
return dst_cur - dst;
|
||||
}
|
||||
|
||||
static void cin_decode_lzss(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
static int cin_decode_lzss(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
{
|
||||
uint16_t cmd;
|
||||
int i, sz, offset, code;
|
||||
unsigned char *dst_end = dst + dst_size;
|
||||
unsigned char *dst_end = dst + dst_size, *dst_start = dst;
|
||||
const unsigned char *src_end = src + src_size;
|
||||
|
||||
while (src < src_end && dst < dst_end) {
|
||||
@@ -162,6 +162,8 @@ static void cin_decode_lzss(const unsigned char *src, int src_size, unsigned cha
|
||||
} else {
|
||||
cmd = AV_RL16(src); src += 2;
|
||||
offset = cmd >> 4;
|
||||
if ((int) (dst - dst_start) < offset + 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
sz = (cmd & 0xF) + 2;
|
||||
/* don't use memcpy/memmove here as the decoding routine (ab)uses */
|
||||
/* buffer overlappings to repeat bytes in the destination */
|
||||
@@ -173,6 +175,8 @@ static void cin_decode_lzss(const unsigned char *src, int src_size, unsigned cha
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cin_decode_rle(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
@@ -202,13 +206,7 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
CinVideoContext *cin = avctx->priv_data;
|
||||
int i, y, palette_type, palette_colors_count, bitmap_frame_type, bitmap_frame_size;
|
||||
|
||||
cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
|
||||
if (avctx->reget_buffer(avctx, &cin->frame)) {
|
||||
av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n");
|
||||
return -1;
|
||||
}
|
||||
int i, y, palette_type, palette_colors_count, bitmap_frame_type, bitmap_frame_size, res = 0;
|
||||
|
||||
palette_type = buf[0];
|
||||
palette_colors_count = AV_RL16(buf+1);
|
||||
@@ -234,8 +232,6 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
bitmap_frame_size -= 4;
|
||||
}
|
||||
}
|
||||
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
|
||||
cin->frame.palette_has_changed = 1;
|
||||
|
||||
/* note: the decoding routines below assumes that surface.width = surface.pitch */
|
||||
switch (bitmap_frame_type) {
|
||||
@@ -268,17 +264,31 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
case 38:
|
||||
cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
res = cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP],
|
||||
cin->bitmap_size);
|
||||
if (res < 0)
|
||||
return res;
|
||||
break;
|
||||
case 39:
|
||||
cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
res = cin_decode_lzss(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP],
|
||||
cin->bitmap_size);
|
||||
if (res < 0)
|
||||
return res;
|
||||
cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP],
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
}
|
||||
|
||||
cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
|
||||
if (avctx->reget_buffer(avctx, &cin->frame)) {
|
||||
av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
|
||||
cin->frame.palette_has_changed = 1;
|
||||
for (y = 0; y < cin->avctx->height; ++y)
|
||||
memcpy(cin->frame.data[0] + (cin->avctx->height - 1 - y) * cin->frame.linesize[0],
|
||||
cin->bitmap_table[CIN_CUR_BMP] + y * cin->avctx->width,
|
||||
|
@@ -367,18 +367,17 @@ void ff_put_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<8;i++) {
|
||||
pixels[0] = cm[block[0]];
|
||||
pixels[1] = cm[block[1]];
|
||||
pixels[2] = cm[block[2]];
|
||||
pixels[3] = cm[block[3]];
|
||||
pixels[4] = cm[block[4]];
|
||||
pixels[5] = cm[block[5]];
|
||||
pixels[6] = cm[block[6]];
|
||||
pixels[7] = cm[block[7]];
|
||||
pixels[0] = av_clip_uint8(block[0]);
|
||||
pixels[1] = av_clip_uint8(block[1]);
|
||||
pixels[2] = av_clip_uint8(block[2]);
|
||||
pixels[3] = av_clip_uint8(block[3]);
|
||||
pixels[4] = av_clip_uint8(block[4]);
|
||||
pixels[5] = av_clip_uint8(block[5]);
|
||||
pixels[6] = av_clip_uint8(block[6]);
|
||||
pixels[7] = av_clip_uint8(block[7]);
|
||||
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
@@ -389,14 +388,13 @@ static void put_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<4;i++) {
|
||||
pixels[0] = cm[block[0]];
|
||||
pixels[1] = cm[block[1]];
|
||||
pixels[2] = cm[block[2]];
|
||||
pixels[3] = cm[block[3]];
|
||||
pixels[0] = av_clip_uint8(block[0]);
|
||||
pixels[1] = av_clip_uint8(block[1]);
|
||||
pixels[2] = av_clip_uint8(block[2]);
|
||||
pixels[3] = av_clip_uint8(block[3]);
|
||||
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
@@ -407,12 +405,11 @@ static void put_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<2;i++) {
|
||||
pixels[0] = cm[block[0]];
|
||||
pixels[1] = cm[block[1]];
|
||||
pixels[0] = av_clip_uint8(block[0]);
|
||||
pixels[1] = av_clip_uint8(block[1]);
|
||||
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
@@ -444,18 +441,17 @@ void ff_add_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<8;i++) {
|
||||
pixels[0] = cm[pixels[0] + block[0]];
|
||||
pixels[1] = cm[pixels[1] + block[1]];
|
||||
pixels[2] = cm[pixels[2] + block[2]];
|
||||
pixels[3] = cm[pixels[3] + block[3]];
|
||||
pixels[4] = cm[pixels[4] + block[4]];
|
||||
pixels[5] = cm[pixels[5] + block[5]];
|
||||
pixels[6] = cm[pixels[6] + block[6]];
|
||||
pixels[7] = cm[pixels[7] + block[7]];
|
||||
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
|
||||
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
|
||||
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
|
||||
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
|
||||
pixels[4] = av_clip_uint8(pixels[4] + block[4]);
|
||||
pixels[5] = av_clip_uint8(pixels[5] + block[5]);
|
||||
pixels[6] = av_clip_uint8(pixels[6] + block[6]);
|
||||
pixels[7] = av_clip_uint8(pixels[7] + block[7]);
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
}
|
||||
@@ -465,14 +461,13 @@ static void add_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<4;i++) {
|
||||
pixels[0] = cm[pixels[0] + block[0]];
|
||||
pixels[1] = cm[pixels[1] + block[1]];
|
||||
pixels[2] = cm[pixels[2] + block[2]];
|
||||
pixels[3] = cm[pixels[3] + block[3]];
|
||||
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
|
||||
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
|
||||
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
|
||||
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
}
|
||||
@@ -482,12 +477,11 @@ static void add_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels
|
||||
int line_size)
|
||||
{
|
||||
int i;
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
/* read the pixels */
|
||||
for(i=0;i<2;i++) {
|
||||
pixels[0] = cm[pixels[0] + block[0]];
|
||||
pixels[1] = cm[pixels[1] + block[1]];
|
||||
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
|
||||
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
|
||||
pixels += line_size;
|
||||
block += 8;
|
||||
}
|
||||
@@ -2779,15 +2773,11 @@ static void ff_jref_idct2_add(uint8_t *dest, int line_size, DCTELEM *block)
|
||||
|
||||
static void ff_jref_idct1_put(uint8_t *dest, int line_size, DCTELEM *block)
|
||||
{
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
dest[0] = cm[(block[0] + 4)>>3];
|
||||
dest[0] = av_clip_uint8((block[0] + 4)>>3);
|
||||
}
|
||||
static void ff_jref_idct1_add(uint8_t *dest, int line_size, DCTELEM *block)
|
||||
{
|
||||
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
|
||||
|
||||
dest[0] = cm[dest[0] + ((block[0] + 4)>>3)];
|
||||
dest[0] = av_clip_uint8(dest[0] + ((block[0] + 4)>>3));
|
||||
}
|
||||
|
||||
static void just_return(void *mem av_unused, int stride av_unused, int h av_unused) { return; }
|
||||
|
@@ -312,7 +312,7 @@ static av_cold int dvvideo_init(AVCodecContext *avctx)
|
||||
dv_rl_vlc[i].level = level;
|
||||
dv_rl_vlc[i].run = run;
|
||||
}
|
||||
free_vlc(&dv_vlc);
|
||||
ff_free_vlc(&dv_vlc);
|
||||
|
||||
dv_vlc_map_tableinit();
|
||||
}
|
||||
|
@@ -25,7 +25,14 @@
|
||||
|
||||
#define _WIN32_WINNT 0x0600
|
||||
#define COBJMACROS
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include "dxva2.h"
|
||||
#if HAVE_DXVA_H
|
||||
#include <dxva.h>
|
||||
#endif
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "mpegvideo.h"
|
||||
|
||||
|
@@ -43,6 +43,7 @@ typedef struct TgqContext {
|
||||
ScanTable scantable;
|
||||
int qtable[64];
|
||||
DECLARE_ALIGNED(16, DCTELEM, block)[6][64];
|
||||
GetByteContext gb;
|
||||
} TgqContext;
|
||||
|
||||
static av_cold int tgq_decode_init(AVCodecContext *avctx){
|
||||
@@ -141,39 +142,36 @@ static void tgq_idct_put_mb_dconly(TgqContext *s, int mb_x, int mb_y, const int8
|
||||
}
|
||||
}
|
||||
|
||||
static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x, const uint8_t **bs, const uint8_t *buf_end){
|
||||
static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x){
|
||||
int mode;
|
||||
int i;
|
||||
int8_t dc[6];
|
||||
|
||||
mode = bytestream_get_byte(bs);
|
||||
if (mode>buf_end-*bs) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "truncated macroblock\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mode = bytestream2_get_byte(&s->gb);
|
||||
if (mode>12) {
|
||||
GetBitContext gb;
|
||||
init_get_bits(&gb, *bs, mode*8);
|
||||
init_get_bits(&gb, s->gb.buffer, FFMIN(s->gb.buffer_end - s->gb.buffer, mode) * 8);
|
||||
for(i=0; i<6; i++)
|
||||
tgq_decode_block(s, s->block[i], &gb);
|
||||
tgq_idct_put_mb(s, s->block, mb_x, mb_y);
|
||||
bytestream2_skip(&s->gb, mode);
|
||||
}else{
|
||||
if (mode==3) {
|
||||
memset(dc, (*bs)[0], 4);
|
||||
dc[4] = (*bs)[1];
|
||||
dc[5] = (*bs)[2];
|
||||
memset(dc, bytestream2_get_byte(&s->gb), 4);
|
||||
dc[4] = bytestream2_get_byte(&s->gb);
|
||||
dc[5] = bytestream2_get_byte(&s->gb);
|
||||
}else if (mode==6) {
|
||||
memcpy(dc, *bs, 6);
|
||||
bytestream2_get_buffer(&s->gb, dc, 6);
|
||||
}else if (mode==12) {
|
||||
for(i=0; i<6; i++)
|
||||
dc[i] = (*bs)[i*2];
|
||||
for (i = 0; i < 6; i++) {
|
||||
dc[i] = bytestream2_get_byte(&s->gb);
|
||||
bytestream2_skip(&s->gb, 1);
|
||||
}
|
||||
}else{
|
||||
av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode);
|
||||
}
|
||||
tgq_idct_put_mb_dconly(s, mb_x, mb_y, dc);
|
||||
}
|
||||
*bs += mode;
|
||||
}
|
||||
|
||||
static void tgq_calculate_qtable(TgqContext *s, int quant){
|
||||
@@ -193,28 +191,30 @@ static int tgq_decode_frame(AVCodecContext *avctx,
|
||||
AVPacket *avpkt){
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
const uint8_t *buf_start = buf;
|
||||
const uint8_t *buf_end = buf + buf_size;
|
||||
TgqContext *s = avctx->priv_data;
|
||||
int x,y;
|
||||
|
||||
int big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
|
||||
buf += 8;
|
||||
|
||||
if(8>buf_end-buf) {
|
||||
if (buf_size < 16) {
|
||||
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
|
||||
return -1;
|
||||
}
|
||||
s->width = big_endian ? AV_RB16(&buf[0]) : AV_RL16(&buf[0]);
|
||||
s->height = big_endian ? AV_RB16(&buf[2]) : AV_RL16(&buf[2]);
|
||||
bytestream2_init(&s->gb, buf + 8, buf_size - 8);
|
||||
if (big_endian) {
|
||||
s->width = bytestream2_get_be16u(&s->gb);
|
||||
s->height = bytestream2_get_be16u(&s->gb);
|
||||
} else {
|
||||
s->width = bytestream2_get_le16u(&s->gb);
|
||||
s->height = bytestream2_get_le16u(&s->gb);
|
||||
}
|
||||
|
||||
if (s->avctx->width!=s->width || s->avctx->height!=s->height) {
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
if (s->frame.data[0])
|
||||
avctx->release_buffer(avctx, &s->frame);
|
||||
}
|
||||
tgq_calculate_qtable(s, buf[4]);
|
||||
buf += 8;
|
||||
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
|
||||
bytestream2_skip(&s->gb, 3);
|
||||
|
||||
if (!s->frame.data[0]) {
|
||||
s->frame.key_frame = 1;
|
||||
@@ -226,14 +226,14 @@ static int tgq_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
}
|
||||
|
||||
for (y=0; y<(avctx->height+15)/16; y++)
|
||||
for (x=0; x<(avctx->width+15)/16; x++)
|
||||
tgq_decode_mb(s, y, x, &buf, buf_end);
|
||||
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
|
||||
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
|
||||
tgq_decode_mb(s, y, x);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->frame;
|
||||
|
||||
return buf-buf_start;
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
static av_cold int tgq_decode_end(AVCodecContext *avctx){
|
||||
|
@@ -62,7 +62,7 @@ static int tqi_decode_mb(MpegEncContext *s, DCTELEM (*block)[64])
|
||||
int n;
|
||||
s->dsp.clear_blocks(block[0]);
|
||||
for (n=0; n<6; n++)
|
||||
if(ff_mpeg1_decode_block_intra(s, block[n], n)<0)
|
||||
if (ff_mpeg1_decode_block_intra(s, block[n], n) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
@@ -137,7 +137,7 @@ static int tqi_decode_frame(AVCodecContext *avctx,
|
||||
for (s->mb_y=0; s->mb_y<(avctx->height+15)/16; s->mb_y++)
|
||||
for (s->mb_x=0; s->mb_x<(avctx->width+15)/16; s->mb_x++)
|
||||
{
|
||||
if(tqi_decode_mb(s, t->block) < 0)
|
||||
if (tqi_decode_mb(s, t->block) < 0)
|
||||
break;
|
||||
tqi_idct_put(t, t->block);
|
||||
}
|
||||
|
@@ -440,9 +440,14 @@ static void guess_mv(MpegEncContext *s)
|
||||
if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
|
||||
num_avail <= mb_width / 2) {
|
||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
||||
s->mb_x = 0;
|
||||
s->mb_y = mb_y;
|
||||
ff_init_block_index(s);
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
||||
|
||||
ff_update_block_index(s);
|
||||
|
||||
if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
|
||||
continue;
|
||||
if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
|
||||
@@ -477,6 +482,9 @@ static void guess_mv(MpegEncContext *s)
|
||||
|
||||
changed = 0;
|
||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
||||
s->mb_x = 0;
|
||||
s->mb_y = mb_y;
|
||||
ff_init_block_index(s);
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
||||
int mv_predictor[8][2] = { { 0 } };
|
||||
@@ -488,6 +496,8 @@ static void guess_mv(MpegEncContext *s)
|
||||
const int mot_index = (mb_x + mb_y * mot_stride) * mot_step;
|
||||
int prev_x, prev_y, prev_ref;
|
||||
|
||||
ff_update_block_index(s);
|
||||
|
||||
if ((mb_x ^ mb_y ^ pass) & 1)
|
||||
continue;
|
||||
|
||||
@@ -1098,11 +1108,16 @@ void ff_er_frame_end(MpegEncContext *s)
|
||||
|
||||
/* handle inter blocks with damaged AC */
|
||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
||||
s->mb_x = 0;
|
||||
s->mb_y = mb_y;
|
||||
ff_init_block_index(s);
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
||||
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||
int dir = !s->last_picture.f.data[0];
|
||||
|
||||
ff_update_block_index(s);
|
||||
|
||||
error = s->error_status_table[mb_xy];
|
||||
|
||||
if (IS_INTRA(mb_type))
|
||||
@@ -1140,11 +1155,16 @@ void ff_er_frame_end(MpegEncContext *s)
|
||||
/* guess MVs */
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
||||
s->mb_x = 0;
|
||||
s->mb_y = mb_y;
|
||||
ff_init_block_index(s);
|
||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||
int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
|
||||
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
||||
const int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||
|
||||
ff_update_block_index(s);
|
||||
|
||||
error = s->error_status_table[mb_xy];
|
||||
|
||||
if (IS_INTRA(mb_type))
|
||||
|
@@ -48,8 +48,8 @@ typedef struct Escape124Context {
|
||||
CodeBook codebooks[3];
|
||||
} Escape124Context;
|
||||
|
||||
static int can_safely_read(GetBitContext* gb, int bits) {
|
||||
return get_bits_count(gb) + bits <= gb->size_in_bits;
|
||||
static int can_safely_read(GetBitContext* gb, uint64_t bits) {
|
||||
return get_bits_left(gb) >= bits;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -90,7 +90,7 @@ static CodeBook unpack_codebook(GetBitContext* gb, unsigned depth,
|
||||
unsigned i, j;
|
||||
CodeBook cb = { 0 };
|
||||
|
||||
if (!can_safely_read(gb, size * 34))
|
||||
if (!can_safely_read(gb, size * 34L))
|
||||
return cb;
|
||||
|
||||
if (size >= INT_MAX / sizeof(MacroBlock))
|
||||
|
@@ -110,11 +110,11 @@ av_cold void ff_ccitt_unpack_init(void)
|
||||
ccitt_vlc[1].table = code_table2;
|
||||
ccitt_vlc[1].table_allocated = 648;
|
||||
for(i = 0; i < 2; i++){
|
||||
init_vlc_sparse(&ccitt_vlc[i], 9, CCITT_SYMS,
|
||||
ccitt_codes_lens[i], 1, 1,
|
||||
ccitt_codes_bits[i], 1, 1,
|
||||
ccitt_syms, 2, 2,
|
||||
INIT_VLC_USE_NEW_STATIC);
|
||||
ff_init_vlc_sparse(&ccitt_vlc[i], 9, CCITT_SYMS,
|
||||
ccitt_codes_lens[i], 1, 1,
|
||||
ccitt_codes_bits[i], 1, 1,
|
||||
ccitt_syms, 2, 2,
|
||||
INIT_VLC_USE_NEW_STATIC);
|
||||
}
|
||||
INIT_VLC_STATIC(&ccitt_group3_2d_vlc, 9, 11,
|
||||
ccitt_group3_2d_lens, 1, 1,
|
||||
@@ -228,7 +228,7 @@ static int decode_group3_2d_line(AVCodecContext *avctx, GetBitContext *gb,
|
||||
mode = !mode;
|
||||
}
|
||||
//sync line pointers
|
||||
while(run_off <= offs){
|
||||
while(offs < width && run_off <= offs){
|
||||
run_off += *ref++;
|
||||
run_off += *ref++;
|
||||
}
|
||||
|
@@ -255,7 +255,7 @@ static void find_best_state(uint8_t best_state[256][256], const uint8_t one_stat
|
||||
occ[j]=1.0;
|
||||
for(k=0; k<256; k++){
|
||||
double newocc[256]={0};
|
||||
for(m=0; m<256; m++){
|
||||
for(m=1; m<256; m++){
|
||||
if(occ[m]){
|
||||
len -=occ[m]*( p *l2tab[ m]
|
||||
+ (1-p)*l2tab[256-m]);
|
||||
@@ -993,7 +993,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
}
|
||||
}
|
||||
gob_count= strtol(p, &next, 0);
|
||||
if(next==p || gob_count <0){
|
||||
if(next==p || gob_count <=0){
|
||||
av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
|
||||
return -1;
|
||||
}
|
||||
|
@@ -422,7 +422,16 @@ static inline int decode_subframe(FLACContext *s, int channel)
|
||||
type = get_bits(&s->gb, 6);
|
||||
|
||||
if (get_bits1(&s->gb)) {
|
||||
int left = get_bits_left(&s->gb);
|
||||
wasted = 1;
|
||||
if ( left < 0 ||
|
||||
(left < s->curr_bps && !show_bits_long(&s->gb, left)) ||
|
||||
!show_bits_long(&s->gb, s->curr_bps)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Invalid number of wasted bits > available bits (%d) - left=%d\n",
|
||||
s->curr_bps, left);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
while (!get_bits1(&s->gb))
|
||||
wasted++;
|
||||
s->curr_bps -= wasted;
|
||||
|
@@ -122,10 +122,11 @@ static av_cold int flashsv_decode_init(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
|
||||
static void flashsv2_prime(FlashSVContext *s, uint8_t *src,
|
||||
int size, int unp_size)
|
||||
static int flashsv2_prime(FlashSVContext *s, uint8_t *src,
|
||||
int size, int unp_size)
|
||||
{
|
||||
z_stream zs;
|
||||
int zret; // Zlib return code
|
||||
|
||||
zs.zalloc = NULL;
|
||||
zs.zfree = NULL;
|
||||
@@ -137,7 +138,8 @@ static void flashsv2_prime(FlashSVContext *s, uint8_t *src,
|
||||
s->zstream.avail_out = s->block_size * 3;
|
||||
inflate(&s->zstream, Z_SYNC_FLUSH);
|
||||
|
||||
deflateInit(&zs, 0);
|
||||
if (deflateInit(&zs, 0) != Z_OK)
|
||||
return -1;
|
||||
zs.next_in = s->tmpblock;
|
||||
zs.avail_in = s->block_size * 3 - s->zstream.avail_out;
|
||||
zs.next_out = s->deflate_block;
|
||||
@@ -145,13 +147,18 @@ static void flashsv2_prime(FlashSVContext *s, uint8_t *src,
|
||||
deflate(&zs, Z_SYNC_FLUSH);
|
||||
deflateEnd(&zs);
|
||||
|
||||
inflateReset(&s->zstream);
|
||||
if ((zret = inflateReset(&s->zstream)) != Z_OK) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
s->zstream.next_in = s->deflate_block;
|
||||
s->zstream.avail_in = s->deflate_block_size - zs.avail_out;
|
||||
s->zstream.next_out = s->tmpblock;
|
||||
s->zstream.avail_out = s->block_size * 3;
|
||||
inflate(&s->zstream, Z_SYNC_FLUSH);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
@@ -164,11 +171,14 @@ static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
int k;
|
||||
int ret = inflateReset(&s->zstream);
|
||||
if (ret != Z_OK) {
|
||||
//return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", ret);
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
if (s->zlibprime_curr || s->zlibprime_prev) {
|
||||
flashsv2_prime(s, s->blocks[blk_idx].pos, s->blocks[blk_idx].size,
|
||||
ret = flashsv2_prime(s, s->blocks[blk_idx].pos, s->blocks[blk_idx].size,
|
||||
s->blocks[blk_idx].unp_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
s->zstream.next_in = avpkt->data + get_bits_count(gb) / 8;
|
||||
s->zstream.avail_in = block_size;
|
||||
|
@@ -113,13 +113,13 @@ static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
|
||||
if(j) dst[i] += dst[i - stride];
|
||||
else if(Uoff) dst[i] += 0x80;
|
||||
if (get_bits_left(&gb) < 0) {
|
||||
free_vlc(&vlc);
|
||||
ff_free_vlc(&vlc);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
dst += stride;
|
||||
}
|
||||
free_vlc(&vlc);
|
||||
ff_free_vlc(&vlc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -140,7 +140,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
uint32_t offs[4];
|
||||
int i, j, is_chroma;
|
||||
const int planes = 3;
|
||||
|
||||
enum PixelFormat pix_fmt;
|
||||
|
||||
header = AV_RL32(buf);
|
||||
version = header & 0xff;
|
||||
@@ -155,8 +155,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
buf += header_size;
|
||||
|
||||
avctx->pix_fmt = version & 1 ? PIX_FMT_BGR24 : PIX_FMT_YUVJ420P;
|
||||
|
||||
if (version < 2) {
|
||||
unsigned needed_size = avctx->width*avctx->height*3;
|
||||
if (version == 0) needed_size /= 2;
|
||||
@@ -176,6 +174,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
FF_BUFFER_HINTS_PRESERVE |
|
||||
FF_BUFFER_HINTS_REUSABLE;
|
||||
|
||||
pix_fmt = version & 1 ? PIX_FMT_BGR24 : PIX_FMT_YUVJ420P;
|
||||
if (avctx->pix_fmt != pix_fmt && f->data[0]) {
|
||||
avctx->release_buffer(avctx, f);
|
||||
}
|
||||
avctx->pix_fmt = pix_fmt;
|
||||
|
||||
switch(version) {
|
||||
case 0:
|
||||
default:
|
||||
|
@@ -126,8 +126,8 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
|
||||
c->prev_samples[c->prev_samples_pos++] = rlow - rhigh;
|
||||
ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24,
|
||||
&xout1, &xout2);
|
||||
*out_buf++ = av_clip_int16(xout1 >> 12);
|
||||
*out_buf++ = av_clip_int16(xout2 >> 12);
|
||||
*out_buf++ = av_clip_int16(xout1 >> 11);
|
||||
*out_buf++ = av_clip_int16(xout2 >> 11);
|
||||
if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
|
||||
memmove(c->prev_samples, c->prev_samples + c->prev_samples_pos - 22,
|
||||
22 * sizeof(c->prev_samples[0]));
|
||||
|
@@ -128,8 +128,8 @@ static inline void filter_samples(G722Context *c, const int16_t *samples,
|
||||
c->prev_samples[c->prev_samples_pos++] = samples[0];
|
||||
c->prev_samples[c->prev_samples_pos++] = samples[1];
|
||||
ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24, &xout1, &xout2);
|
||||
*xlow = xout1 + xout2 >> 13;
|
||||
*xhigh = xout1 - xout2 >> 13;
|
||||
*xlow = xout1 + xout2 >> 14;
|
||||
*xhigh = xout1 - xout2 >> 14;
|
||||
if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
|
||||
memmove(c->prev_samples,
|
||||
c->prev_samples + c->prev_samples_pos - 22,
|
||||
@@ -174,7 +174,7 @@ static void g722_encode_trellis(G722Context *c, int trellis,
|
||||
for (i = 0; i < 2; i++) {
|
||||
nodes[i] = c->nodep_buf[i];
|
||||
nodes_next[i] = c->nodep_buf[i] + frontier;
|
||||
memset(c->nodep_buf[i], 0, 2 * frontier * sizeof(*c->nodep_buf));
|
||||
memset(c->nodep_buf[i], 0, 2 * frontier * sizeof(*c->nodep_buf[i]));
|
||||
nodes[i][0] = c->node_buf[i] + frontier;
|
||||
nodes[i][0]->ssd = 0;
|
||||
nodes[i][0]->path = 0;
|
||||
|
@@ -118,10 +118,23 @@ for examples see get_bits, show_bits, skip_bits, get_vlc
|
||||
# define MIN_CACHE_BITS 25
|
||||
#endif
|
||||
|
||||
#if UNCHECKED_BITSTREAM_READER
|
||||
#define OPEN_READER(name, gb) \
|
||||
unsigned int name##_index = (gb)->index; \
|
||||
av_unused unsigned int name##_cache
|
||||
|
||||
#define HAVE_BITS_REMAINING(name, gb) 1
|
||||
#else
|
||||
#define OPEN_READER(name, gb) \
|
||||
unsigned int name##_index = (gb)->index; \
|
||||
unsigned int av_unused name##_cache = 0; \
|
||||
unsigned int av_unused name##_size_plus8 = \
|
||||
(gb)->size_in_bits_plus8
|
||||
|
||||
#define HAVE_BITS_REMAINING(name, gb) \
|
||||
name##_index < name##_size_plus8
|
||||
#endif
|
||||
|
||||
#define CLOSE_READER(name, gb) (gb)->index = name##_index
|
||||
|
||||
#ifdef BITSTREAM_READER_LE
|
||||
@@ -154,7 +167,7 @@ for examples see get_bits, show_bits, skip_bits, get_vlc
|
||||
# define SKIP_COUNTER(name, gb, num) name##_index += (num)
|
||||
#else
|
||||
# define SKIP_COUNTER(name, gb, num) \
|
||||
name##_index = FFMIN((gb)->size_in_bits_plus8, name##_index + (num))
|
||||
name##_index = FFMIN(name##_size_plus8, name##_index + (num))
|
||||
#endif
|
||||
|
||||
#define SKIP_BITS(name, gb, num) do { \
|
||||
@@ -360,19 +373,19 @@ static inline void align_get_bits(GetBitContext *s)
|
||||
bits, bits_wrap, bits_size, \
|
||||
codes, codes_wrap, codes_size, \
|
||||
flags) \
|
||||
init_vlc_sparse(vlc, nb_bits, nb_codes, \
|
||||
bits, bits_wrap, bits_size, \
|
||||
codes, codes_wrap, codes_size, \
|
||||
NULL, 0, 0, flags)
|
||||
ff_init_vlc_sparse(vlc, nb_bits, nb_codes, \
|
||||
bits, bits_wrap, bits_size, \
|
||||
codes, codes_wrap, codes_size, \
|
||||
NULL, 0, 0, flags)
|
||||
|
||||
int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
const void *bits, int bits_wrap, int bits_size,
|
||||
const void *codes, int codes_wrap, int codes_size,
|
||||
const void *symbols, int symbols_wrap, int symbols_size,
|
||||
int flags);
|
||||
#define INIT_VLC_LE 2
|
||||
#define INIT_VLC_USE_NEW_STATIC 4
|
||||
void free_vlc(VLC *vlc);
|
||||
void ff_free_vlc(VLC *vlc);
|
||||
|
||||
#define INIT_VLC_STATIC(vlc, bits, a,b,c,d,e,f,g, static_size) do { \
|
||||
static VLC_TYPE table[static_size][2]; \
|
||||
|
@@ -135,7 +135,7 @@ static inline int svq3_get_ue_golomb(GetBitContext *gb){
|
||||
ret = (ret << 4) | ff_interleaved_dirac_golomb_vlc_code[buf];
|
||||
UPDATE_CACHE(re, gb);
|
||||
buf = GET_CACHE(re, gb);
|
||||
} while(ret<0x8000000U);
|
||||
} while (ret<0x8000000U && HAVE_BITS_REMAINING(re, gb));
|
||||
|
||||
CLOSE_READER(re, gb);
|
||||
return ret - 1;
|
||||
@@ -301,7 +301,7 @@ static inline int get_ur_golomb_jpegls(GetBitContext *gb, int k, int limit, int
|
||||
return buf;
|
||||
}else{
|
||||
int i;
|
||||
for(i=0; SHOW_UBITS(re, gb, 1) == 0; i++){
|
||||
for (i = 0; i < limit && SHOW_UBITS(re, gb, 1) == 0; i++) {
|
||||
if (gb->size_in_bits <= re_index)
|
||||
return -1;
|
||||
LAST_SKIP_BITS(re, gb, 1);
|
||||
|
@@ -66,7 +66,7 @@ static av_cold void h261_decode_init_vlc(H261Context *h){
|
||||
INIT_VLC_STATIC(&h261_cbp_vlc, H261_CBP_VLC_BITS, 63,
|
||||
&h261_cbp_tab[0][1], 2, 1,
|
||||
&h261_cbp_tab[0][0], 2, 1, 512);
|
||||
init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
|
||||
ff_init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
|
||||
INIT_VLC_RL(h261_rl_tcoeff, 552);
|
||||
}
|
||||
}
|
||||
@@ -265,7 +265,7 @@ static int h261_decode_mb(H261Context *h){
|
||||
while( h->mba_diff == MBA_STUFFING ); // stuffing
|
||||
|
||||
if ( h->mba_diff < 0 ){
|
||||
if ( get_bits_count(&s->gb) + 7 >= s->gb.size_in_bits )
|
||||
if (get_bits_left(&s->gb) <= 7)
|
||||
return SLICE_END;
|
||||
|
||||
av_log(s->avctx, AV_LOG_ERROR, "illegal mba at %d %d\n", s->mb_x, s->mb_y);
|
||||
@@ -286,6 +286,10 @@ static int h261_decode_mb(H261Context *h){
|
||||
|
||||
// Read mtype
|
||||
h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2);
|
||||
if (h->mtype < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "illegal mtype %d\n", h->mtype);
|
||||
return SLICE_ERROR;
|
||||
}
|
||||
h->mtype = h261_mtype_map[h->mtype];
|
||||
|
||||
// Read mquant
|
||||
|
@@ -240,7 +240,7 @@ void ff_h261_encode_init(MpegEncContext *s){
|
||||
|
||||
if (!done) {
|
||||
done = 1;
|
||||
init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
|
||||
ff_init_rl(&h261_rl_tcoeff, ff_h261_rl_table_store);
|
||||
}
|
||||
|
||||
s->min_qcoeff= -127;
|
||||
|
@@ -98,7 +98,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
|
||||
}
|
||||
}
|
||||
|
||||
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc;
|
||||
int16_t *dc_val;
|
||||
@@ -226,7 +226,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
}
|
||||
}
|
||||
|
||||
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc, scale, i;
|
||||
int16_t *dc_val, *ac_val, *ac_val1;
|
||||
@@ -313,8 +313,8 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
|
||||
}
|
||||
|
||||
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py)
|
||||
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py)
|
||||
{
|
||||
int wrap;
|
||||
int16_t *A, *B, *C, (*mot_val)[2];
|
||||
|
@@ -38,16 +38,16 @@
|
||||
extern const AVRational ff_h263_pixel_aspect[16];
|
||||
extern const uint8_t ff_h263_cbpy_tab[16][2];
|
||||
|
||||
extern const uint8_t cbpc_b_tab[4][2];
|
||||
extern const uint8_t ff_cbpc_b_tab[4][2];
|
||||
|
||||
extern const uint8_t mvtab[33][2];
|
||||
extern const uint8_t ff_mvtab[33][2];
|
||||
|
||||
extern const uint8_t ff_h263_intra_MCBPC_code[9];
|
||||
extern const uint8_t ff_h263_intra_MCBPC_bits[9];
|
||||
|
||||
extern const uint8_t ff_h263_inter_MCBPC_code[28];
|
||||
extern const uint8_t ff_h263_inter_MCBPC_bits[28];
|
||||
extern const uint8_t h263_mbtype_b_tab[15][2];
|
||||
extern const uint8_t ff_h263_mbtype_b_tab[15][2];
|
||||
|
||||
extern VLC ff_h263_intra_MCBPC_vlc;
|
||||
extern VLC ff_h263_inter_MCBPC_vlc;
|
||||
@@ -55,41 +55,41 @@ extern VLC ff_h263_cbpy_vlc;
|
||||
|
||||
extern RLTable ff_h263_rl_inter;
|
||||
|
||||
extern RLTable rl_intra_aic;
|
||||
extern RLTable ff_rl_intra_aic;
|
||||
|
||||
extern const uint16_t h263_format[8][2];
|
||||
extern const uint8_t modified_quant_tab[2][32];
|
||||
extern const uint16_t ff_h263_format[8][2];
|
||||
extern const uint8_t ff_modified_quant_tab[2][32];
|
||||
extern const uint16_t ff_mba_max[6];
|
||||
extern const uint8_t ff_mba_length[7];
|
||||
|
||||
extern uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
|
||||
|
||||
|
||||
int h263_decode_motion(MpegEncContext * s, int pred, int f_code);
|
||||
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code);
|
||||
av_const int ff_h263_aspect_to_info(AVRational aspect);
|
||||
int ff_h263_decode_init(AVCodecContext *avctx);
|
||||
int ff_h263_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
AVPacket *avpkt);
|
||||
int ff_h263_decode_end(AVCodecContext *avctx);
|
||||
void h263_encode_mb(MpegEncContext *s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y);
|
||||
void h263_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void h263_encode_gob_header(MpegEncContext * s, int mb_line);
|
||||
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py);
|
||||
void h263_encode_init(MpegEncContext *s);
|
||||
void h263_decode_init_vlc(MpegEncContext *s);
|
||||
int h263_decode_picture_header(MpegEncContext *s);
|
||||
void ff_h263_encode_mb(MpegEncContext *s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y);
|
||||
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line);
|
||||
int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
|
||||
int *px, int *py);
|
||||
void ff_h263_encode_init(MpegEncContext *s);
|
||||
void ff_h263_decode_init_vlc(MpegEncContext *s);
|
||||
int ff_h263_decode_picture_header(MpegEncContext *s);
|
||||
int ff_h263_decode_gob_header(MpegEncContext *s);
|
||||
void ff_h263_update_motion_val(MpegEncContext * s);
|
||||
void ff_h263_loop_filter(MpegEncContext * s);
|
||||
int ff_h263_decode_mba(MpegEncContext *s);
|
||||
void ff_h263_encode_mba(MpegEncContext *s);
|
||||
void ff_init_qscale_tab(MpegEncContext *s);
|
||||
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
|
||||
int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n);
|
||||
|
||||
|
||||
/**
|
||||
@@ -119,7 +119,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
|
||||
int l, bit_size, code;
|
||||
|
||||
if (val == 0) {
|
||||
return mvtab[0][1];
|
||||
return ff_mvtab[0][1];
|
||||
} else {
|
||||
bit_size = f_code - 1;
|
||||
/* modulo encoding */
|
||||
@@ -128,7 +128,7 @@ static inline int h263_get_motion_length(MpegEncContext * s, int val, int f_code
|
||||
val--;
|
||||
code = (val >> bit_size) + 1;
|
||||
|
||||
return mvtab[code][1] + 1 + bit_size;
|
||||
return ff_mvtab[code][1] + 1 + bit_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -57,7 +57,7 @@ const uint8_t ff_h263_inter_MCBPC_bits[28] = {
|
||||
11, 13, 13, 13,/* inter4Q*/
|
||||
};
|
||||
|
||||
const uint8_t h263_mbtype_b_tab[15][2] = {
|
||||
const uint8_t ff_h263_mbtype_b_tab[15][2] = {
|
||||
{1, 1},
|
||||
{3, 3},
|
||||
{1, 5},
|
||||
@@ -75,7 +75,7 @@ const uint8_t h263_mbtype_b_tab[15][2] = {
|
||||
{1, 8},
|
||||
};
|
||||
|
||||
const uint8_t cbpc_b_tab[4][2] = {
|
||||
const uint8_t ff_cbpc_b_tab[4][2] = {
|
||||
{0, 1},
|
||||
{2, 2},
|
||||
{7, 3},
|
||||
@@ -88,7 +88,7 @@ const uint8_t ff_h263_cbpy_tab[16][2] =
|
||||
{2,5}, {3,6}, {5,4}, {10,4}, {4,4}, {8,4}, {6,4}, {3,2}
|
||||
};
|
||||
|
||||
const uint8_t mvtab[33][2] =
|
||||
const uint8_t ff_mvtab[33][2] =
|
||||
{
|
||||
{1,1}, {1,2}, {1,3}, {1,4}, {3,6}, {5,7}, {4,7}, {3,7},
|
||||
{11,9}, {10,9}, {9,9}, {17,10}, {16,10}, {15,10}, {14,10}, {13,10},
|
||||
@@ -98,7 +98,7 @@ const uint8_t mvtab[33][2] =
|
||||
};
|
||||
|
||||
/* third non intra table */
|
||||
const uint16_t inter_vlc[103][2] = {
|
||||
const uint16_t ff_inter_vlc[103][2] = {
|
||||
{ 0x2, 2 },{ 0xf, 4 },{ 0x15, 6 },{ 0x17, 7 },
|
||||
{ 0x1f, 8 },{ 0x25, 9 },{ 0x24, 9 },{ 0x21, 10 },
|
||||
{ 0x20, 10 },{ 0x7, 11 },{ 0x6, 11 },{ 0x20, 11 },
|
||||
@@ -127,7 +127,7 @@ const uint16_t inter_vlc[103][2] = {
|
||||
{ 0x5e, 12 },{ 0x5f, 12 },{ 0x3, 7 },
|
||||
};
|
||||
|
||||
const int8_t inter_level[102] = {
|
||||
const int8_t ff_inter_level[102] = {
|
||||
1, 2, 3, 4, 5, 6, 7, 8,
|
||||
9, 10, 11, 12, 1, 2, 3, 4,
|
||||
5, 6, 1, 2, 3, 4, 1, 2,
|
||||
@@ -143,7 +143,7 @@ const int8_t inter_level[102] = {
|
||||
1, 1, 1, 1, 1, 1,
|
||||
};
|
||||
|
||||
const int8_t inter_run[102] = {
|
||||
const int8_t ff_inter_run[102] = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 1, 1, 1, 1,
|
||||
1, 1, 2, 2, 2, 2, 3, 3,
|
||||
@@ -162,9 +162,9 @@ const int8_t inter_run[102] = {
|
||||
RLTable ff_h263_rl_inter = {
|
||||
102,
|
||||
58,
|
||||
inter_vlc,
|
||||
inter_run,
|
||||
inter_level,
|
||||
ff_inter_vlc,
|
||||
ff_inter_run,
|
||||
ff_inter_level,
|
||||
};
|
||||
|
||||
static const uint16_t intra_vlc_aic[103][2] = {
|
||||
@@ -228,7 +228,7 @@ static const int8_t intra_level_aic[102] = {
|
||||
1, 1, 1, 1, 1, 1,
|
||||
};
|
||||
|
||||
RLTable rl_intra_aic = {
|
||||
RLTable ff_rl_intra_aic = {
|
||||
102,
|
||||
58,
|
||||
intra_vlc_aic,
|
||||
@@ -236,7 +236,7 @@ RLTable rl_intra_aic = {
|
||||
intra_level_aic,
|
||||
};
|
||||
|
||||
const uint16_t h263_format[8][2] = {
|
||||
const uint16_t ff_h263_format[8][2] = {
|
||||
{ 0, 0 },
|
||||
{ 128, 96 },
|
||||
{ 176, 144 },
|
||||
@@ -250,7 +250,7 @@ const uint8_t ff_aic_dc_scale_table[32]={
|
||||
0, 2, 4, 6, 8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62
|
||||
};
|
||||
|
||||
const uint8_t modified_quant_tab[2][32]={
|
||||
const uint8_t ff_modified_quant_tab[2][32]={
|
||||
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||
{
|
||||
0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,10,11,12,13,14,15,16,17,18,18,19,20,21,22,23,24,25,26,27,28
|
||||
|
@@ -115,7 +115,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
h263_decode_init_vlc(s);
|
||||
ff_h263_decode_init_vlc(s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -435,7 +435,7 @@ retry:
|
||||
} else if (CONFIG_FLV_DECODER && s->h263_flv) {
|
||||
ret = ff_flv_decode_picture_header(s);
|
||||
} else {
|
||||
ret = h263_decode_picture_header(s);
|
||||
ret = ff_h263_decode_picture_header(s);
|
||||
}
|
||||
|
||||
if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size);
|
||||
@@ -444,6 +444,13 @@ retry:
|
||||
if (ret < 0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
|
||||
return -1;
|
||||
} else if ((s->width != avctx->coded_width ||
|
||||
s->height != avctx->coded_height ||
|
||||
(s->width + 15) >> 4 != s->mb_width ||
|
||||
(s->height + 15) >> 4 != s->mb_height) &&
|
||||
(HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))) {
|
||||
av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
|
||||
return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
|
||||
}
|
||||
|
||||
avctx->has_b_frames= !s->low_delay;
|
||||
@@ -571,7 +578,6 @@ retry:
|
||||
if (s->codec_id == CODEC_ID_MPEG4 && s->xvid_build>=0 && avctx->idct_algo == FF_IDCT_AUTO && (av_get_cpu_flags() & AV_CPU_FLAG_MMX)) {
|
||||
avctx->idct_algo= FF_IDCT_XVIDMMX;
|
||||
ff_dct_common_init(s);
|
||||
s->picture_number=0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -664,7 +670,7 @@ retry:
|
||||
ret = decode_slice(s);
|
||||
while(s->mb_y<s->mb_height){
|
||||
if(s->msmpeg4_version){
|
||||
if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_count(&s->gb) > s->gb.size_in_bits)
|
||||
if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_left(&s->gb)<0)
|
||||
break;
|
||||
}else{
|
||||
int prev_x=s->mb_x, prev_y=s->mb_y;
|
||||
|
@@ -104,7 +104,7 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h){
|
||||
return 0;
|
||||
} //FIXME cleanup like check_intra_pred_mode
|
||||
|
||||
static int check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
|
||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
|
||||
MpegEncContext * const s = &h->s;
|
||||
static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
|
||||
static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
|
||||
@@ -136,22 +136,6 @@ static int check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
|
||||
return mode;
|
||||
}
|
||||
|
||||
/**
|
||||
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
|
||||
*/
|
||||
int ff_h264_check_intra16x16_pred_mode(H264Context *h, int mode)
|
||||
{
|
||||
return check_intra_pred_mode(h, mode, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
|
||||
*/
|
||||
int ff_h264_check_intra_chroma_pred_mode(H264Context *h, int mode)
|
||||
{
|
||||
return check_intra_pred_mode(h, mode, 1);
|
||||
}
|
||||
|
||||
|
||||
const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){
|
||||
int i, si, di;
|
||||
@@ -2522,8 +2506,8 @@ static int field_end(H264Context *h, int in_setup){
|
||||
s->mb_y= 0;
|
||||
|
||||
if (!in_setup && !s->dropable)
|
||||
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, (16*s->mb_height >> FIELD_PICTURE) - 1,
|
||||
s->picture_structure==PICT_BOTTOM_FIELD);
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX,
|
||||
s->picture_structure == PICT_BOTTOM_FIELD);
|
||||
|
||||
if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
|
||||
ff_vdpau_h264_set_reference_frames(s);
|
||||
@@ -2640,9 +2624,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
int num_ref_idx_active_override_flag;
|
||||
unsigned int slice_type, tmp, i, j;
|
||||
int default_ref_list_done = 0;
|
||||
int last_pic_structure;
|
||||
|
||||
s->dropable= h->nal_ref_idc == 0;
|
||||
int last_pic_structure, last_pic_dropable;
|
||||
|
||||
/* FIXME: 2tap qpel isn't implemented for high bit depth. */
|
||||
if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc && !h->pixel_shift){
|
||||
@@ -2661,8 +2643,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
|
||||
h0->current_slice = 0;
|
||||
if (!s0->first_field)
|
||||
s->current_picture_ptr= NULL;
|
||||
if (!s0->first_field) {
|
||||
if (s->current_picture_ptr && !s->dropable &&
|
||||
s->current_picture_ptr->owner2 == s) {
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX,
|
||||
s->picture_structure == PICT_BOTTOM_FIELD);
|
||||
}
|
||||
s->current_picture_ptr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
slice_type= get_ue_golomb_31(&s->gb);
|
||||
@@ -2707,11 +2695,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
s->avctx->level = h->sps.level_idc;
|
||||
s->avctx->refs = h->sps.ref_frame_count;
|
||||
|
||||
if(h == h0 && h->dequant_coeff_pps != pps_id){
|
||||
h->dequant_coeff_pps = pps_id;
|
||||
init_dequant_tables(h);
|
||||
}
|
||||
|
||||
s->mb_width= h->sps.mb_width;
|
||||
s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
||||
|
||||
@@ -2727,9 +2710,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
|| s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||
|| av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) {
|
||||
if(h != h0 || (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
if(h != h0 || (HAVE_THREADS && h->s.avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log_missing_feature(s->avctx, "Width/height/bit depth/chroma idc changing with threads is", 0);
|
||||
return -1; // width / height changed during parallelized decoding
|
||||
return AVERROR_PATCHWELCOME; // width / height changed during parallelized decoding
|
||||
}
|
||||
free_tables(h, 0);
|
||||
flush_dpb(s->avctx);
|
||||
@@ -2806,7 +2789,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
else
|
||||
s->avctx->pix_fmt = PIX_FMT_YUV420P10;
|
||||
break;
|
||||
default:
|
||||
case 8:
|
||||
if (CHROMA444){
|
||||
s->avctx->pix_fmt = s->avctx->color_range == AVCOL_RANGE_JPEG ? PIX_FMT_YUVJ444P : PIX_FMT_YUV444P;
|
||||
if (s->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||
@@ -2825,6 +2808,11 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
hwaccel_pixfmt_list_h264_jpeg_420 :
|
||||
ff_hwaccel_pixfmt_list_420);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Unsupported bit depth: %d\n", h->sps.bit_depth_luma);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id, s->avctx->pix_fmt);
|
||||
@@ -2870,11 +2858,18 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
}
|
||||
|
||||
if(h == h0 && h->dequant_coeff_pps != pps_id){
|
||||
h->dequant_coeff_pps = pps_id;
|
||||
init_dequant_tables(h);
|
||||
}
|
||||
|
||||
h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num);
|
||||
|
||||
h->mb_mbaff = 0;
|
||||
h->mb_aff_frame = 0;
|
||||
last_pic_structure = s0->picture_structure;
|
||||
last_pic_dropable = s->dropable;
|
||||
s->dropable = h->nal_ref_idc == 0;
|
||||
if(h->sps.frame_mbs_only_flag){
|
||||
s->picture_structure= PICT_FRAME;
|
||||
}else{
|
||||
@@ -2891,10 +2886,22 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
h->mb_field_decoding_flag= s->picture_structure != PICT_FRAME;
|
||||
|
||||
if(h0->current_slice == 0){
|
||||
// Shorten frame num gaps so we don't have to allocate reference frames just to throw them away
|
||||
if(h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
|
||||
int unwrap_prev_frame_num = h->prev_frame_num, max_frame_num = 1<<h->sps.log2_max_frame_num;
|
||||
if (h0->current_slice != 0) {
|
||||
if (last_pic_structure != s->picture_structure ||
|
||||
last_pic_dropable != s->dropable) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"Changing field mode (%d -> %d) between slices is not allowed\n",
|
||||
last_pic_structure, s->picture_structure);
|
||||
s->picture_structure = last_pic_structure;
|
||||
s->dropable = last_pic_dropable;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
/* Shorten frame num gaps so we don't have to allocate reference
|
||||
* frames just to throw them away */
|
||||
if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
|
||||
int unwrap_prev_frame_num = h->prev_frame_num;
|
||||
int max_frame_num = 1 << h->sps.log2_max_frame_num;
|
||||
|
||||
if (unwrap_prev_frame_num > h->frame_num) unwrap_prev_frame_num -= max_frame_num;
|
||||
|
||||
@@ -2907,8 +2914,74 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
}
|
||||
|
||||
while(h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 &&
|
||||
h->frame_num != (h->prev_frame_num+1)%(1<<h->sps.log2_max_frame_num)){
|
||||
/* See if we have a decoded first field looking for a pair...
|
||||
* Here, we're using that to see if we should mark previously
|
||||
* decode frames as "finished".
|
||||
* We have to do that before the "dummy" in-between frame allocation,
|
||||
* since that can modify s->current_picture_ptr. */
|
||||
if (s0->first_field) {
|
||||
assert(s0->current_picture_ptr);
|
||||
assert(s0->current_picture_ptr->f.data[0]);
|
||||
assert(s0->current_picture_ptr->f.reference != DELAYED_PIC_REF);
|
||||
|
||||
/* Mark old field/frame as completed */
|
||||
if (!last_pic_dropable && s0->current_picture_ptr->owner2 == s0) {
|
||||
ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_BOTTOM_FIELD);
|
||||
}
|
||||
|
||||
/* figure out if we have a complementary field pair */
|
||||
if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
|
||||
/* Previous field is unmatched. Don't display it, but let it
|
||||
* remain for reference if marked as such. */
|
||||
if (!last_pic_dropable && last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
} else {
|
||||
if (s0->current_picture_ptr->frame_num != h->frame_num) {
|
||||
/* This and previous field were reference, but had
|
||||
* different frame_nums. Consider this field first in
|
||||
* pair. Throw away previous field except for reference
|
||||
* purposes. */
|
||||
if (!last_pic_dropable && last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
} else {
|
||||
/* Second field in complementary pair */
|
||||
if (!((last_pic_structure == PICT_TOP_FIELD &&
|
||||
s->picture_structure == PICT_BOTTOM_FIELD) ||
|
||||
(last_pic_structure == PICT_BOTTOM_FIELD &&
|
||||
s->picture_structure == PICT_TOP_FIELD))) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Invalid field mode combination %d/%d\n",
|
||||
last_pic_structure, s->picture_structure);
|
||||
s->picture_structure = last_pic_structure;
|
||||
s->dropable = last_pic_dropable;
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (last_pic_dropable != s->dropable) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Cannot combine reference and non-reference fields in the same frame\n");
|
||||
av_log_ask_for_sample(s->avctx, NULL);
|
||||
s->picture_structure = last_pic_structure;
|
||||
s->dropable = last_pic_dropable;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Take ownership of this buffer. Note that if another thread owned
|
||||
* the first field of this buffer, we're not operating on that pointer,
|
||||
* so the original thread is still responsible for reporting progress
|
||||
* on that first field (or if that was us, we just did that above).
|
||||
* By taking ownership, we assign responsibility to ourselves to
|
||||
* report progress on the second field. */
|
||||
s0->current_picture_ptr->owner2 = s0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 &&
|
||||
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
|
||||
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
||||
av_log(h->s.avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num);
|
||||
if (ff_h264_frame_start(h) < 0)
|
||||
@@ -2939,7 +3012,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
}
|
||||
|
||||
/* See if we have a decoded first field looking for a pair... */
|
||||
/* See if we have a decoded first field looking for a pair...
|
||||
* We're using that to see whether to continue decoding in that
|
||||
* frame, or to allocate a new one. */
|
||||
if (s0->first_field) {
|
||||
assert(s0->current_picture_ptr);
|
||||
assert(s0->current_picture_ptr->f.data[0]);
|
||||
@@ -2956,13 +3031,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
|
||||
} else {
|
||||
if (s0->current_picture_ptr->frame_num != h->frame_num) {
|
||||
/*
|
||||
* This and previous field had
|
||||
* different frame_nums. Consider this field first in
|
||||
* pair. Throw away previous field except for reference
|
||||
* purposes.
|
||||
*/
|
||||
s0->first_field = 1;
|
||||
/* This and the previous field had different frame_nums.
|
||||
* Consider this field first in pair. Throw away previous
|
||||
* one except for reference purposes. */
|
||||
s0->first_field = 1;
|
||||
s0->current_picture_ptr = NULL;
|
||||
|
||||
} else {
|
||||
@@ -3041,7 +3113,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->ref_count[1]= h->pps.ref_count[1];
|
||||
|
||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
||||
unsigned max= (16<<(s->picture_structure != PICT_FRAME))-1;
|
||||
unsigned max= s->picture_structure == PICT_FRAME ? 15 : 31;
|
||||
|
||||
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
|
||||
h->direct_spatial_mv_pred= get_bits1(&s->gb);
|
||||
}
|
||||
@@ -3051,13 +3124,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
|
||||
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
|
||||
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
|
||||
}
|
||||
|
||||
}
|
||||
if(h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
|
||||
if (h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
|
||||
h->ref_count[0]= h->ref_count[1]= 1;
|
||||
return -1;
|
||||
h->ref_count[0] = h->ref_count[1] = 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if(h->slice_type_nos == AV_PICTURE_TYPE_B)
|
||||
h->list_count= 2;
|
||||
else
|
||||
@@ -3694,8 +3768,8 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
|
||||
if(s->mb_y >= s->mb_height){
|
||||
tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
|
||||
|
||||
if( get_bits_count(&s->gb) == s->gb.size_in_bits
|
||||
|| get_bits_count(&s->gb) < s->gb.size_in_bits && s->avctx->error_recognition < FF_ER_AGGRESSIVE) {
|
||||
if ( get_bits_left(&s->gb) == 0
|
||||
|| get_bits_left(&s->gb) > 0 && !(s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END&part_mask);
|
||||
|
||||
return 0;
|
||||
@@ -3707,9 +3781,9 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){
|
||||
}
|
||||
}
|
||||
|
||||
if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->mb_skip_run<=0){
|
||||
if (get_bits_left(&s->gb) <= 0 && s->mb_skip_run <= 0){
|
||||
tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
|
||||
if(get_bits_count(&s->gb) == s->gb.size_in_bits ){
|
||||
if (get_bits_left(&s->gb) == 0) {
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END&part_mask);
|
||||
if (s->mb_x > lf_x_start) loop_filter(h, lf_x_start, s->mb_x);
|
||||
|
||||
@@ -3798,7 +3872,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
int consumed;
|
||||
int dst_length;
|
||||
int bit_length;
|
||||
uint8_t *ptr;
|
||||
const uint8_t *ptr;
|
||||
int i, nalsize = 0;
|
||||
int err;
|
||||
|
||||
@@ -3820,7 +3894,11 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
break;
|
||||
}
|
||||
|
||||
if(buf_index+3 >= buf_size) break;
|
||||
|
||||
if (buf_index + 3 >= buf_size) {
|
||||
buf_index = buf_size;
|
||||
break;
|
||||
}
|
||||
|
||||
buf_index+=3;
|
||||
if(buf_index >= next_avc) continue;
|
||||
@@ -3829,8 +3907,9 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
hx = h->thread_context[context_count];
|
||||
|
||||
ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index);
|
||||
if (ptr==NULL || dst_length < 0){
|
||||
return -1;
|
||||
if (ptr == NULL || dst_length < 0) {
|
||||
buf_index = -1;
|
||||
goto end;
|
||||
}
|
||||
i= buf_index + consumed;
|
||||
if((s->workaround_bugs & FF_BUG_AUTODETECT) && i+3<next_avc &&
|
||||
@@ -3882,7 +3961,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
case NAL_IDR_SLICE:
|
||||
if (h->nal_unit_type != NAL_IDR_SLICE) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Invalid mix of idr and non-idr slices\n");
|
||||
return -1;
|
||||
buf_index = -1;
|
||||
goto end;
|
||||
}
|
||||
idr(h); // FIXME ensure we don't lose some frames if there is reordering
|
||||
case NAL_SLICE:
|
||||
@@ -3974,10 +4054,10 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
break;
|
||||
case NAL_SPS:
|
||||
init_get_bits(&s->gb, ptr, bit_length);
|
||||
if(ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? (nalsize != consumed) && nalsize : 1)){
|
||||
if (ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? (nalsize != consumed) && nalsize : 1)){
|
||||
av_log(h->s.avctx, AV_LOG_DEBUG, "SPS decoding failure, trying alternative mode\n");
|
||||
if(h->is_avc) av_assert0(next_avc - buf_index + consumed == nalsize);
|
||||
init_get_bits(&s->gb, &buf[buf_index + 1 - consumed], 8*(next_avc - buf_index + consumed));
|
||||
init_get_bits(&s->gb, &buf[buf_index + 1 - consumed], 8*(next_avc - buf_index + consumed - 1));
|
||||
ff_h264_decode_seq_parameter_set(h);
|
||||
}
|
||||
|
||||
@@ -4026,6 +4106,15 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
}
|
||||
if(context_count)
|
||||
execute_decode_slices(h, context_count);
|
||||
|
||||
end:
|
||||
/* clean up */
|
||||
if (s->current_picture_ptr && s->current_picture_ptr->owner2 == s &&
|
||||
!s->dropable) {
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX,
|
||||
s->picture_structure == PICT_BOTTOM_FIELD);
|
||||
}
|
||||
|
||||
return buf_index;
|
||||
}
|
||||
|
||||
|
@@ -671,15 +671,7 @@ void ff_generate_sliding_window_mmcos(H264Context *h);
|
||||
*/
|
||||
int ff_h264_check_intra4x4_pred_mode(H264Context *h);
|
||||
|
||||
/**
|
||||
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
|
||||
*/
|
||||
int ff_h264_check_intra16x16_pred_mode(H264Context *h, int mode);
|
||||
|
||||
/**
|
||||
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
|
||||
*/
|
||||
int ff_h264_check_intra_chroma_pred_mode(H264Context *h, int mode);
|
||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma);
|
||||
|
||||
void ff_h264_hl_decode_mb(H264Context *h);
|
||||
int ff_h264_frame_start(H264Context *h);
|
||||
|
@@ -1998,6 +1998,8 @@ decode_intra_mb:
|
||||
}
|
||||
|
||||
// The pixels are stored in the same order as levels in h->mb array.
|
||||
if ((int) (h->cabac.bytestream_end - ptr) < mb_size)
|
||||
return -1;
|
||||
memcpy(h->mb, ptr, mb_size); ptr+=mb_size;
|
||||
|
||||
ff_init_cabac_decoder(&h->cabac, ptr, h->cabac.bytestream_end - ptr);
|
||||
@@ -2042,14 +2044,14 @@ decode_intra_mb:
|
||||
write_back_intra_pred_mode(h);
|
||||
if( ff_h264_check_intra4x4_pred_mode(h) < 0 ) return -1;
|
||||
} else {
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra16x16_pred_mode( h, h->intra16x16_pred_mode );
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode( h, h->intra16x16_pred_mode, 0 );
|
||||
if( h->intra16x16_pred_mode < 0 ) return -1;
|
||||
}
|
||||
if(decode_chroma){
|
||||
h->chroma_pred_mode_table[mb_xy] =
|
||||
pred_mode = decode_cabac_mb_chroma_pre_mode( h );
|
||||
|
||||
pred_mode= ff_h264_check_intra_chroma_pred_mode( h, pred_mode );
|
||||
pred_mode= ff_h264_check_intra_pred_mode( h, pred_mode, 1 );
|
||||
if( pred_mode < 0 ) return -1;
|
||||
h->chroma_pred_mode= pred_mode;
|
||||
} else {
|
||||
|
@@ -823,12 +823,12 @@ decode_intra_mb:
|
||||
if( ff_h264_check_intra4x4_pred_mode(h) < 0)
|
||||
return -1;
|
||||
}else{
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra16x16_pred_mode(h, h->intra16x16_pred_mode);
|
||||
h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode(h, h->intra16x16_pred_mode, 0);
|
||||
if(h->intra16x16_pred_mode < 0)
|
||||
return -1;
|
||||
}
|
||||
if(decode_chroma){
|
||||
pred_mode= ff_h264_check_intra_chroma_pred_mode(h, get_ue_golomb_31(&s->gb));
|
||||
pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&s->gb), 1);
|
||||
if(pred_mode < 0)
|
||||
return -1;
|
||||
h->chroma_pred_mode= pred_mode;
|
||||
|
@@ -253,7 +253,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
|
||||
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
|
||||
b8_stride = 2+4*s->mb_stride;
|
||||
b4_stride *= 6;
|
||||
if(IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])){
|
||||
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
|
||||
mb_type_col[0] &= ~MB_TYPE_INTERLACED;
|
||||
mb_type_col[1] &= ~MB_TYPE_INTERLACED;
|
||||
}
|
||||
@@ -443,6 +443,10 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
|
||||
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
|
||||
b8_stride = 2+4*s->mb_stride;
|
||||
b4_stride *= 6;
|
||||
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
|
||||
mb_type_col[0] &= ~MB_TYPE_INTERLACED;
|
||||
mb_type_col[1] &= ~MB_TYPE_INTERLACED;
|
||||
}
|
||||
|
||||
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
|
||||
|
||||
|
@@ -241,7 +241,7 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){
|
||||
sps->num_reorder_frames= get_ue_golomb(&s->gb);
|
||||
get_ue_golomb(&s->gb); /*max_dec_frame_buffering*/
|
||||
|
||||
if(get_bits_left(&s->gb) < 0){
|
||||
if (get_bits_left(&s->gb) < 0) {
|
||||
sps->num_reorder_frames=0;
|
||||
sps->bitstream_restriction_flag= 0;
|
||||
}
|
||||
@@ -251,9 +251,9 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if(get_bits_left(&s->gb) < 0){
|
||||
if (get_bits_left(&s->gb) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Overread VUI by %d bits\n", -get_bits_left(&s->gb));
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -351,9 +351,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
if (sps->chroma_format_idc > 3U) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "chroma_format_idc %d is illegal\n", sps->chroma_format_idc);
|
||||
goto fail;
|
||||
}
|
||||
if(sps->chroma_format_idc == 3)
|
||||
} else if(sps->chroma_format_idc == 3) {
|
||||
sps->residual_color_transform_flag = get_bits1(&s->gb);
|
||||
}
|
||||
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
||||
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
||||
if (sps->bit_depth_luma > 12U || sps->bit_depth_chroma > 12U) {
|
||||
@@ -515,6 +515,9 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){
|
||||
if(pps_id >= MAX_PPS_COUNT) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "pps_id (%d) out of range\n", pps_id);
|
||||
return -1;
|
||||
} else if (h->sps.bit_depth_luma > 10) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Unimplemented luma bit depth=%d (max=10)\n", h->sps.bit_depth_luma);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
pps= av_mallocz(sizeof(PPS));
|
||||
|
@@ -655,6 +655,8 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
|
||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=1 + (s->picture_structure != PICT_FRAME) && s->current_picture_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
s->current_picture_ptr->sync |= 1;
|
||||
if(!h->s.avctx->has_b_frames)
|
||||
h->sync = 2;
|
||||
}
|
||||
|
||||
return (h->s.avctx->err_recognition & AV_EF_EXPLODE) ? err : 0;
|
||||
|
@@ -164,7 +164,7 @@ static int decode_buffering_period(H264Context *h){
|
||||
int ff_h264_decode_sei(H264Context *h){
|
||||
MpegEncContext * const s = &h->s;
|
||||
|
||||
while(get_bits_count(&s->gb) + 16 < s->gb.size_in_bits){
|
||||
while (get_bits_left(&s->gb) > 16) {
|
||||
int size, type;
|
||||
|
||||
type=0;
|
||||
|
@@ -49,7 +49,6 @@ static const uint8_t scan8[16*3]={
|
||||
void FUNCC(ff_h264_idct_add)(uint8_t *_dst, DCTELEM *_block, int stride)
|
||||
{
|
||||
int i;
|
||||
INIT_CLIP
|
||||
pixel *dst = (pixel*)_dst;
|
||||
dctcoef *block = (dctcoef*)_block;
|
||||
stride >>= sizeof(pixel)-1;
|
||||
@@ -74,16 +73,15 @@ void FUNCC(ff_h264_idct_add)(uint8_t *_dst, DCTELEM *_block, int stride)
|
||||
const int z2= (block[1 + 4*i]>>1) - block[3 + 4*i];
|
||||
const int z3= block[1 + 4*i] + (block[3 + 4*i]>>1);
|
||||
|
||||
dst[i + 0*stride]= CLIP(dst[i + 0*stride] + ((z0 + z3) >> 6));
|
||||
dst[i + 1*stride]= CLIP(dst[i + 1*stride] + ((z1 + z2) >> 6));
|
||||
dst[i + 2*stride]= CLIP(dst[i + 2*stride] + ((z1 - z2) >> 6));
|
||||
dst[i + 3*stride]= CLIP(dst[i + 3*stride] + ((z0 - z3) >> 6));
|
||||
dst[i + 0*stride]= av_clip_pixel(dst[i + 0*stride] + ((z0 + z3) >> 6));
|
||||
dst[i + 1*stride]= av_clip_pixel(dst[i + 1*stride] + ((z1 + z2) >> 6));
|
||||
dst[i + 2*stride]= av_clip_pixel(dst[i + 2*stride] + ((z1 - z2) >> 6));
|
||||
dst[i + 3*stride]= av_clip_pixel(dst[i + 3*stride] + ((z0 - z3) >> 6));
|
||||
}
|
||||
}
|
||||
|
||||
void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, DCTELEM *_block, int stride){
|
||||
int i;
|
||||
INIT_CLIP
|
||||
pixel *dst = (pixel*)_dst;
|
||||
dctcoef *block = (dctcoef*)_block;
|
||||
stride >>= sizeof(pixel)-1;
|
||||
@@ -143,14 +141,14 @@ void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, DCTELEM *_block, int stride){
|
||||
const int b5 = (a3>>2) - a5;
|
||||
const int b7 = a7 - (a1>>2);
|
||||
|
||||
dst[i + 0*stride] = CLIP( dst[i + 0*stride] + ((b0 + b7) >> 6) );
|
||||
dst[i + 1*stride] = CLIP( dst[i + 1*stride] + ((b2 + b5) >> 6) );
|
||||
dst[i + 2*stride] = CLIP( dst[i + 2*stride] + ((b4 + b3) >> 6) );
|
||||
dst[i + 3*stride] = CLIP( dst[i + 3*stride] + ((b6 + b1) >> 6) );
|
||||
dst[i + 4*stride] = CLIP( dst[i + 4*stride] + ((b6 - b1) >> 6) );
|
||||
dst[i + 5*stride] = CLIP( dst[i + 5*stride] + ((b4 - b3) >> 6) );
|
||||
dst[i + 6*stride] = CLIP( dst[i + 6*stride] + ((b2 - b5) >> 6) );
|
||||
dst[i + 7*stride] = CLIP( dst[i + 7*stride] + ((b0 - b7) >> 6) );
|
||||
dst[i + 0*stride] = av_clip_pixel( dst[i + 0*stride] + ((b0 + b7) >> 6) );
|
||||
dst[i + 1*stride] = av_clip_pixel( dst[i + 1*stride] + ((b2 + b5) >> 6) );
|
||||
dst[i + 2*stride] = av_clip_pixel( dst[i + 2*stride] + ((b4 + b3) >> 6) );
|
||||
dst[i + 3*stride] = av_clip_pixel( dst[i + 3*stride] + ((b6 + b1) >> 6) );
|
||||
dst[i + 4*stride] = av_clip_pixel( dst[i + 4*stride] + ((b6 - b1) >> 6) );
|
||||
dst[i + 5*stride] = av_clip_pixel( dst[i + 5*stride] + ((b4 - b3) >> 6) );
|
||||
dst[i + 6*stride] = av_clip_pixel( dst[i + 6*stride] + ((b2 - b5) >> 6) );
|
||||
dst[i + 7*stride] = av_clip_pixel( dst[i + 7*stride] + ((b0 - b7) >> 6) );
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,13 +156,12 @@ void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, DCTELEM *_block, int stride){
|
||||
void FUNCC(ff_h264_idct_dc_add)(uint8_t *p_dst, DCTELEM *block, int stride){
|
||||
int i, j;
|
||||
int dc = (((dctcoef*)block)[0] + 32) >> 6;
|
||||
INIT_CLIP
|
||||
pixel *dst = (pixel*)p_dst;
|
||||
stride >>= sizeof(pixel)-1;
|
||||
for( j = 0; j < 4; j++ )
|
||||
{
|
||||
for( i = 0; i < 4; i++ )
|
||||
dst[i] = CLIP( dst[i] + dc );
|
||||
dst[i] = av_clip_pixel( dst[i] + dc );
|
||||
dst += stride;
|
||||
}
|
||||
}
|
||||
@@ -172,13 +169,12 @@ void FUNCC(ff_h264_idct_dc_add)(uint8_t *p_dst, DCTELEM *block, int stride){
|
||||
void FUNCC(ff_h264_idct8_dc_add)(uint8_t *p_dst, DCTELEM *block, int stride){
|
||||
int i, j;
|
||||
int dc = (((dctcoef*)block)[0] + 32) >> 6;
|
||||
INIT_CLIP
|
||||
pixel *dst = (pixel*)p_dst;
|
||||
stride >>= sizeof(pixel)-1;
|
||||
for( j = 0; j < 8; j++ )
|
||||
{
|
||||
for( i = 0; i < 8; i++ )
|
||||
dst[i] = CLIP( dst[i] + dc );
|
||||
dst[i] = av_clip_pixel( dst[i] + dc );
|
||||
dst += stride;
|
||||
}
|
||||
}
|
||||
|
@@ -61,7 +61,7 @@ static int build_huff_tree(VLC *vlc, Node *nodes, int head, int flags)
|
||||
int pos = 0;
|
||||
|
||||
get_tree_codes(bits, lens, xlat, nodes, head, 0, 0, &pos, no_zero_count);
|
||||
return init_vlc_sparse(vlc, 9, pos, lens, 2, 2, bits, 4, 4, xlat, 1, 1, 0);
|
||||
return ff_init_vlc_sparse(vlc, 9, pos, lens, 2, 2, bits, 4, 4, xlat, 1, 1, 0);
|
||||
}
|
||||
|
||||
|
||||
|
@@ -82,13 +82,15 @@ typedef struct HYuvContext{
|
||||
DSPContext dsp;
|
||||
}HYuvContext;
|
||||
|
||||
static const unsigned char classic_shift_luma[] = {
|
||||
#define classic_shift_luma_table_size 42
|
||||
static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
|
||||
34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
|
||||
16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
|
||||
69,68, 0
|
||||
};
|
||||
|
||||
static const unsigned char classic_shift_chroma[] = {
|
||||
#define classic_shift_chroma_table_size 59
|
||||
static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
|
||||
66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
|
||||
56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
|
||||
214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
|
||||
@@ -212,7 +214,7 @@ static int read_len_table(uint8_t *dst, GetBitContext *gb){
|
||||
if(repeat==0)
|
||||
repeat= get_bits(gb, 8);
|
||||
//printf("%d %d\n", val, repeat);
|
||||
if(i+repeat > 256) {
|
||||
if(i+repeat > 256 || get_bits_left(gb) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -322,8 +324,8 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
i++;
|
||||
}
|
||||
}
|
||||
free_vlc(&s->vlc[3+p]);
|
||||
init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
|
||||
ff_free_vlc(&s->vlc[3+p]);
|
||||
ff_init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
|
||||
}
|
||||
}else{
|
||||
uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
|
||||
@@ -363,7 +365,7 @@ static void generate_joint_tables(HYuvContext *s){
|
||||
}
|
||||
}
|
||||
}
|
||||
free_vlc(&s->vlc[3]);
|
||||
ff_free_vlc(&s->vlc[3]);
|
||||
init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
|
||||
}
|
||||
}
|
||||
@@ -380,7 +382,7 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
|
||||
if(generate_bits_table(s->bits[i], s->len[i])<0){
|
||||
return -1;
|
||||
}
|
||||
free_vlc(&s->vlc[i]);
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
|
||||
}
|
||||
|
||||
@@ -394,10 +396,10 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
|
||||
init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
|
||||
init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
|
||||
if(read_len_table(s->len[0], &gb)<0)
|
||||
return -1;
|
||||
init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
|
||||
init_get_bits(&gb, classic_shift_chroma, classic_shift_chroma_table_size*8);
|
||||
if(read_len_table(s->len[1], &gb)<0)
|
||||
return -1;
|
||||
|
||||
@@ -412,7 +414,7 @@ static int read_old_huffman_tables(HYuvContext *s){
|
||||
memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
|
||||
|
||||
for(i=0; i<3; i++){
|
||||
free_vlc(&s->vlc[i]);
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
|
||||
}
|
||||
|
||||
@@ -543,7 +545,7 @@ s->bgr32=1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
alloc_temp(s);
|
||||
@@ -750,7 +752,7 @@ static void decode_422_bitstream(HYuvContext *s, int count){
|
||||
count/=2;
|
||||
|
||||
if(count >= (get_bits_left(&s->gb))/(31*4)){
|
||||
for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
|
||||
for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
|
||||
READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
|
||||
READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
|
||||
}
|
||||
@@ -768,7 +770,7 @@ static void decode_gray_bitstream(HYuvContext *s, int count){
|
||||
count/=2;
|
||||
|
||||
if(count >= (get_bits_left(&s->gb))/(31*2)){
|
||||
for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
|
||||
for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
|
||||
READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
|
||||
}
|
||||
}else{
|
||||
@@ -1253,7 +1255,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
||||
av_freep(&s->bitstream_buffer);
|
||||
|
||||
for(i=0; i<6; i++){
|
||||
free_vlc(&s->vlc[i]);
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -191,7 +191,13 @@ static int extract_header(AVCodecContext *const avctx,
|
||||
const uint8_t *buf;
|
||||
unsigned buf_size;
|
||||
IffContext *s = avctx->priv_data;
|
||||
int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
int palette_size;
|
||||
|
||||
if (avctx->extradata_size < 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
|
||||
if (avpkt) {
|
||||
int image_size;
|
||||
@@ -207,8 +213,6 @@ static int extract_header(AVCodecContext *const avctx,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
} else {
|
||||
if (avctx->extradata_size < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
buf = avctx->extradata;
|
||||
buf_size = bytestream_get_be16(&buf);
|
||||
if (buf_size <= 1 || palette_size < 0) {
|
||||
@@ -312,7 +316,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
int err;
|
||||
|
||||
if (avctx->bits_per_coded_sample <= 8) {
|
||||
int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
int palette_size;
|
||||
|
||||
if (avctx->extradata_size >= 2)
|
||||
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
|
||||
else
|
||||
palette_size = 0;
|
||||
avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
|
||||
(avctx->extradata_size >= 2 && palette_size) ? PIX_FMT_PAL8 : PIX_FMT_GRAY8;
|
||||
} else if (avctx->bits_per_coded_sample <= 32) {
|
||||
@@ -473,7 +482,7 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
|
||||
} else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return res;
|
||||
} else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != PIX_FMT_GRAY8) {
|
||||
} else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt == PIX_FMT_PAL8) {
|
||||
if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
|
||||
return res;
|
||||
}
|
||||
|
@@ -419,6 +419,9 @@ static int decode_cell_data(Cell *cell, uint8_t *block, uint8_t *ref_block,
|
||||
blk_row_offset = (row_offset << (2 + v_zoom)) - (cell->width << 2);
|
||||
line_offset = v_zoom ? row_offset : 0;
|
||||
|
||||
if (cell->height & v_zoom || cell->width & h_zoom)
|
||||
return IV3_BAD_DATA;
|
||||
|
||||
for (y = 0; y < cell->height; is_first_row = 0, y += 1 + v_zoom) {
|
||||
for (x = 0; x < cell->width; x += 1 + h_zoom) {
|
||||
ref = ref_block;
|
||||
@@ -727,6 +730,8 @@ static int parse_bintree(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
SPLIT_CELL(ref_cell->height, curr_cell.height);
|
||||
ref_cell->ypos += curr_cell.height;
|
||||
ref_cell->height -= curr_cell.height;
|
||||
if (ref_cell->height <= 0 || curr_cell.height <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (code == V_SPLIT) {
|
||||
if (curr_cell.width > strip_width) {
|
||||
/* split strip */
|
||||
@@ -735,6 +740,8 @@ static int parse_bintree(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
SPLIT_CELL(ref_cell->width, curr_cell.width);
|
||||
ref_cell->xpos += curr_cell.width;
|
||||
ref_cell->width -= curr_cell.width;
|
||||
if (ref_cell->width <= 0 || curr_cell.width <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
while (get_bits_left(&ctx->gb) >= 2) { /* loop until return */
|
||||
@@ -890,14 +897,24 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (width != ctx->width || height != ctx->height) {
|
||||
int res;
|
||||
|
||||
av_dlog(avctx, "Frame dimensions changed!\n");
|
||||
|
||||
if (width < 16 || width > 640 ||
|
||||
height < 16 || height > 480 ||
|
||||
width & 3 || height & 3) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid picture dimensions: %d x %d!\n", width, height);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ctx->width = width;
|
||||
ctx->height = height;
|
||||
|
||||
free_frame_buffers(ctx);
|
||||
if(allocate_frame_buffers(ctx, avctx) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if ((res = allocate_frame_buffers(ctx, avctx)) < 0)
|
||||
return res;
|
||||
avcodec_set_dimensions(avctx, width, height);
|
||||
}
|
||||
|
||||
|
@@ -35,9 +35,6 @@
|
||||
#include "ivi_common.h"
|
||||
#include "indeo4data.h"
|
||||
|
||||
#define IVI4_STREAM_ANALYSER 0
|
||||
#define IVI4_DEBUG_CHECKSUM 0
|
||||
|
||||
/**
|
||||
* Indeo 4 frame types.
|
||||
*/
|
||||
@@ -54,46 +51,6 @@ enum {
|
||||
#define IVI4_PIC_SIZE_ESC 7
|
||||
|
||||
|
||||
typedef struct {
|
||||
GetBitContext gb;
|
||||
AVFrame frame;
|
||||
RVMapDesc rvmap_tabs[9]; ///< local corrected copy of the static rvmap tables
|
||||
|
||||
uint32_t frame_num;
|
||||
int frame_type;
|
||||
int prev_frame_type; ///< frame type of the previous frame
|
||||
uint32_t data_size; ///< size of the frame data in bytes from picture header
|
||||
int is_scalable;
|
||||
int transp_status; ///< transparency mode status: 1 - enabled
|
||||
|
||||
IVIPicConfig pic_conf;
|
||||
IVIPlaneDesc planes[3]; ///< color planes
|
||||
|
||||
int buf_switch; ///< used to switch between three buffers
|
||||
int dst_buf; ///< buffer index for the currently decoded frame
|
||||
int ref_buf; ///< inter frame reference buffer index
|
||||
|
||||
IVIHuffTab mb_vlc; ///< current macroblock table descriptor
|
||||
IVIHuffTab blk_vlc; ///< current block table descriptor
|
||||
|
||||
uint16_t checksum; ///< frame checksum
|
||||
|
||||
uint8_t rvmap_sel;
|
||||
uint8_t in_imf;
|
||||
uint8_t in_q; ///< flag for explicitly stored quantiser delta
|
||||
uint8_t pic_glob_quant;
|
||||
uint8_t unknown1;
|
||||
|
||||
#if IVI4_STREAM_ANALYSER
|
||||
uint8_t has_b_frames;
|
||||
uint8_t has_transp;
|
||||
uint8_t uses_tiling;
|
||||
uint8_t uses_haar;
|
||||
uint8_t uses_fullpel;
|
||||
#endif
|
||||
} IVI4DecContext;
|
||||
|
||||
|
||||
static const struct {
|
||||
InvTransformPtr *inv_trans;
|
||||
DCTransformPtr *dc_trans;
|
||||
@@ -158,7 +115,7 @@ static inline int scale_tile_size(int def_size, int size_factor)
|
||||
* @param[in] avctx pointer to the AVCodecContext
|
||||
* @return result code: 0 = OK, negative number = error
|
||||
*/
|
||||
static int decode_pic_hdr(IVI4DecContext *ctx, AVCodecContext *avctx)
|
||||
static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx)
|
||||
{
|
||||
int pic_size_indx, i, p;
|
||||
IVIPicConfig pic_conf;
|
||||
@@ -322,7 +279,7 @@ static int decode_pic_hdr(IVI4DecContext *ctx, AVCodecContext *avctx)
|
||||
* @param[in] avctx pointer to the AVCodecContext
|
||||
* @return result code: 0 = OK, negative number = error
|
||||
*/
|
||||
static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band,
|
||||
AVCodecContext *avctx)
|
||||
{
|
||||
int plane, band_num, indx, transform_id, scan_indx;
|
||||
@@ -372,7 +329,8 @@ static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
|
||||
if (!get_bits1(&ctx->gb) || ctx->frame_type == FRAMETYPE_INTRA) {
|
||||
transform_id = get_bits(&ctx->gb, 5);
|
||||
if (!transforms[transform_id].inv_trans) {
|
||||
if (transform_id >= FF_ARRAY_ELEMS(transforms) ||
|
||||
!transforms[transform_id].inv_trans) {
|
||||
av_log_ask_for_sample(avctx, "Unimplemented transform: %d!\n", transform_id);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
@@ -457,7 +415,7 @@ static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
* @param[in] avctx pointer to the AVCodecContext
|
||||
* @return result code: 0 = OK, negative number = error
|
||||
*/
|
||||
static int decode_mb_info(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
static int decode_mb_info(IVI45DecContext *ctx, IVIBandDesc *band,
|
||||
IVITile *tile, AVCodecContext *avctx)
|
||||
{
|
||||
int x, y, mv_x, mv_y, mv_delta, offs, mb_offset, blks_per_mb,
|
||||
@@ -476,6 +434,11 @@ static int decode_mb_info(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3);
|
||||
mv_x = mv_y = 0;
|
||||
|
||||
if (((tile->width + band->mb_size-1)/band->mb_size) * ((tile->height + band->mb_size-1)/band->mb_size) != tile->num_MBs) {
|
||||
av_log(avctx, AV_LOG_ERROR, "num_MBs mismatch %d %d %d %d\n", tile->width, tile->height, band->mb_size, tile->num_MBs);
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (y = tile->ypos; y < tile->ypos + tile->height; y += band->mb_size) {
|
||||
mb_offset = offs;
|
||||
|
||||
@@ -572,126 +535,12 @@ static int decode_mb_info(IVI4DecContext *ctx, IVIBandDesc *band,
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Decode an Indeo 4 band.
|
||||
*
|
||||
* @param[in,out] ctx pointer to the decoder context
|
||||
* @param[in,out] band pointer to the band descriptor
|
||||
* @param[in] avctx pointer to the AVCodecContext
|
||||
* @return result code: 0 = OK, negative number = error
|
||||
*/
|
||||
static int decode_band(IVI4DecContext *ctx, int plane_num,
|
||||
IVIBandDesc *band, AVCodecContext *avctx)
|
||||
{
|
||||
int result, i, t, pos, idx1, idx2;
|
||||
IVITile *tile;
|
||||
|
||||
band->buf = band->bufs[ctx->dst_buf];
|
||||
band->ref_buf = band->bufs[ctx->ref_buf];
|
||||
|
||||
result = decode_band_hdr(ctx, band, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error decoding band header\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
if (band->is_empty) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty band encountered!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
band->rv_map = &ctx->rvmap_tabs[band->rvmap_sel];
|
||||
|
||||
/* apply corrections to the selected rvmap table if present */
|
||||
for (i = 0; i < band->num_corr; i++) {
|
||||
idx1 = band->corr[i * 2];
|
||||
idx2 = band->corr[i * 2 + 1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
pos = get_bits_count(&ctx->gb);
|
||||
|
||||
for (t = 0; t < band->num_tiles; t++) {
|
||||
tile = &band->tiles[t];
|
||||
|
||||
tile->is_empty = get_bits1(&ctx->gb);
|
||||
if (tile->is_empty) {
|
||||
ff_ivi_process_empty_tile(avctx, band, tile,
|
||||
(ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3));
|
||||
av_dlog(avctx, "Empty tile encountered!\n");
|
||||
} else {
|
||||
tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb);
|
||||
if (!tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Tile data size is zero!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
result = decode_mb_info(ctx, band, tile, avctx);
|
||||
if (result < 0)
|
||||
break;
|
||||
|
||||
result = ff_ivi_decode_blocks(&ctx->gb, band, tile);
|
||||
if (result < 0 || ((get_bits_count(&ctx->gb) - pos) >> 3) != tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Corrupted tile data encountered!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
pos += tile->data_size << 3; // skip to next tile
|
||||
}
|
||||
}
|
||||
|
||||
/* restore the selected rvmap table by applying its corrections in reverse order */
|
||||
for (i = band->num_corr - 1; i >= 0; i--) {
|
||||
idx1 = band->corr[i * 2];
|
||||
idx2 = band->corr[i * 2 + 1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
#if defined(DEBUG) && IVI4_DEBUG_CHECKSUM
|
||||
if (band->checksum_present) {
|
||||
uint16_t chksum = ivi_calc_band_checksum(band);
|
||||
if (chksum != band->checksum) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Band checksum mismatch! Plane %d, band %d, received: %x, calculated: %x\n",
|
||||
band->plane, band->band_num, band->checksum, chksum);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
align_get_bits(&ctx->gb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
IVI4DecContext *ctx = avctx->priv_data;
|
||||
|
||||
ff_ivi_init_static_vlc();
|
||||
|
||||
/* copy rvmap tables in our context so we can apply changes to them */
|
||||
memcpy(ctx->rvmap_tabs, ff_ivi_rvmap_tabs, sizeof(ff_ivi_rvmap_tabs));
|
||||
|
||||
/* Force allocation of the internal buffers */
|
||||
/* during picture header decoding. */
|
||||
ctx->pic_conf.pic_width = 0;
|
||||
ctx->pic_conf.pic_height = 0;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_YUV410P;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Rearrange decoding and reference buffers.
|
||||
*
|
||||
* @param[in,out] ctx pointer to the decoder context
|
||||
*/
|
||||
static void switch_buffers(IVI4DecContext *ctx)
|
||||
static void switch_buffers(IVI45DecContext *ctx)
|
||||
{
|
||||
switch (ctx->prev_frame_type) {
|
||||
case FRAMETYPE_INTRA:
|
||||
@@ -720,95 +569,33 @@ static void switch_buffers(IVI4DecContext *ctx)
|
||||
}
|
||||
|
||||
|
||||
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
AVPacket *avpkt)
|
||||
static int is_nonnull_frame(IVI45DecContext *ctx)
|
||||
{
|
||||
IVI4DecContext *ctx = avctx->priv_data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
int result, p, b;
|
||||
|
||||
init_get_bits(&ctx->gb, buf, buf_size * 8);
|
||||
|
||||
result = decode_pic_hdr(ctx, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error decoding picture header\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
switch_buffers(ctx);
|
||||
|
||||
if (ctx->frame_type < FRAMETYPE_NULL_FIRST) {
|
||||
for (p = 0; p < 3; p++) {
|
||||
for (b = 0; b < ctx->planes[p].num_bands; b++) {
|
||||
result = decode_band(ctx, p, &ctx->planes[p].bands[b], avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error decoding band: %d, plane: %d\n", b, p);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If the bidirectional mode is enabled, next I and the following P frame will */
|
||||
/* be sent together. Unfortunately the approach below seems to be the only way */
|
||||
/* to handle the B-frames mode. That's exactly the same Intel decoders do. */
|
||||
if (ctx->frame_type == FRAMETYPE_INTRA) {
|
||||
while (get_bits(&ctx->gb, 8)); // skip version string
|
||||
skip_bits_long(&ctx->gb, 64); // skip padding, TODO: implement correct 8-bytes alignment
|
||||
if (get_bits_left(&ctx->gb) > 18 && show_bits(&ctx->gb, 18) == 0x3FFF8)
|
||||
av_log(avctx, AV_LOG_ERROR, "Buffer contains IP frames!\n");
|
||||
}
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
ctx->frame.reference = 0;
|
||||
if ((result = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
if (ctx->is_scalable) {
|
||||
ff_ivi_recompose_haar(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0], 4);
|
||||
} else {
|
||||
ff_ivi_output_plane(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]);
|
||||
}
|
||||
|
||||
ff_ivi_output_plane(&ctx->planes[2], ctx->frame.data[1], ctx->frame.linesize[1]);
|
||||
ff_ivi_output_plane(&ctx->planes[1], ctx->frame.data[2], ctx->frame.linesize[2]);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = ctx->frame;
|
||||
|
||||
return buf_size;
|
||||
return ctx->frame_type < FRAMETYPE_NULL_FIRST;
|
||||
}
|
||||
|
||||
|
||||
static av_cold int decode_close(AVCodecContext *avctx)
|
||||
static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
IVI4DecContext *ctx = avctx->priv_data;
|
||||
IVI45DecContext *ctx = avctx->priv_data;
|
||||
|
||||
ff_ivi_free_buffers(&ctx->planes[0]);
|
||||
ff_ivi_init_static_vlc();
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
/* copy rvmap tables in our context so we can apply changes to them */
|
||||
memcpy(ctx->rvmap_tabs, ff_ivi_rvmap_tabs, sizeof(ff_ivi_rvmap_tabs));
|
||||
|
||||
#if IVI4_STREAM_ANALYSER
|
||||
if (ctx->is_scalable)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses scalability mode!\n");
|
||||
if (ctx->uses_tiling)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses local decoding!\n");
|
||||
if (ctx->has_b_frames)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video contains B-frames!\n");
|
||||
if (ctx->has_transp)
|
||||
av_log(avctx, AV_LOG_ERROR, "Transparency mode is enabled!\n");
|
||||
if (ctx->uses_haar)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses Haar transform!\n");
|
||||
if (ctx->uses_fullpel)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses fullpel motion vectors!\n");
|
||||
#endif
|
||||
/* Force allocation of the internal buffers */
|
||||
/* during picture header decoding. */
|
||||
ctx->pic_conf.pic_width = 0;
|
||||
ctx->pic_conf.pic_height = 0;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_YUV410P;
|
||||
|
||||
ctx->decode_pic_hdr = decode_pic_hdr;
|
||||
ctx->decode_band_hdr = decode_band_hdr;
|
||||
ctx->decode_mb_info = decode_mb_info;
|
||||
ctx->switch_buffers = switch_buffers;
|
||||
ctx->is_nonnull_frame = is_nonnull_frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -818,9 +605,9 @@ AVCodec ff_indeo4_decoder = {
|
||||
.name = "indeo4",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_INDEO4,
|
||||
.priv_data_size = sizeof(IVI4DecContext),
|
||||
.priv_data_size = sizeof(IVI45DecContext),
|
||||
.init = decode_init,
|
||||
.close = decode_close,
|
||||
.decode = decode_frame,
|
||||
.close = ff_ivi_decode_close,
|
||||
.decode = ff_ivi_decode_frame,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Intel Indeo Video Interactive 4"),
|
||||
};
|
||||
|
@@ -48,37 +48,6 @@ enum {
|
||||
|
||||
#define IVI5_PIC_SIZE_ESC 15
|
||||
|
||||
#define IVI5_IS_PROTECTED 0x20
|
||||
|
||||
typedef struct {
|
||||
GetBitContext gb;
|
||||
AVFrame frame;
|
||||
RVMapDesc rvmap_tabs[9]; ///< local corrected copy of the static rvmap tables
|
||||
IVIPlaneDesc planes[3]; ///< color planes
|
||||
const uint8_t *frame_data; ///< input frame data pointer
|
||||
int buf_switch; ///< used to switch between three buffers
|
||||
int inter_scal; ///< signals a sequence of scalable inter frames
|
||||
int dst_buf; ///< buffer index for the currently decoded frame
|
||||
int ref_buf; ///< inter frame reference buffer index
|
||||
int ref2_buf; ///< temporal storage for switching buffers
|
||||
uint32_t frame_size; ///< frame size in bytes
|
||||
int frame_type;
|
||||
int prev_frame_type; ///< frame type of the previous frame
|
||||
int frame_num;
|
||||
uint32_t pic_hdr_size; ///< picture header size in bytes
|
||||
uint8_t frame_flags;
|
||||
uint16_t checksum; ///< frame checksum
|
||||
|
||||
IVIHuffTab mb_vlc; ///< vlc table for decoding macroblock data
|
||||
|
||||
uint16_t gop_hdr_size;
|
||||
uint8_t gop_flags;
|
||||
int is_scalable;
|
||||
uint32_t lock_word;
|
||||
IVIPicConfig pic_conf;
|
||||
} IVI5DecContext;
|
||||
|
||||
|
||||
/**
|
||||
* Decode Indeo5 GOP (Group of pictures) header.
|
||||
* This header is present in key frames only.
|
||||
@@ -88,7 +57,7 @@ typedef struct {
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_gop_header(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
static int decode_gop_header(IVI45DecContext *ctx, AVCodecContext *avctx)
|
||||
{
|
||||
int result, i, p, tile_size, pic_size_indx, mb_size, blk_size, is_scalable;
|
||||
int quant_mat, blk_size_changed = 0;
|
||||
@@ -220,6 +189,10 @@ static int decode_gop_header(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
if (band->blk_size == 8) {
|
||||
if(quant_mat >= 5){
|
||||
av_log(avctx, AV_LOG_ERROR, "quant_mat %d too large!\n", quant_mat);
|
||||
return -1;
|
||||
}
|
||||
band->intra_base = &ivi5_base_quant_8x8_intra[quant_mat][0];
|
||||
band->inter_base = &ivi5_base_quant_8x8_inter[quant_mat][0];
|
||||
band->intra_scale = &ivi5_scale_quant_8x8_intra[quant_mat][0];
|
||||
@@ -319,7 +292,7 @@ static inline void skip_hdr_extension(GetBitContext *gb)
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx)
|
||||
{
|
||||
if (get_bits(&ctx->gb, 5) != 0x1F) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid picture start code!\n");
|
||||
@@ -336,8 +309,12 @@ static int decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
ctx->frame_num = get_bits(&ctx->gb, 8);
|
||||
|
||||
if (ctx->frame_type == FRAMETYPE_INTRA) {
|
||||
if (decode_gop_header(ctx, avctx))
|
||||
return -1;
|
||||
ctx->gop_invalid = 1;
|
||||
if (decode_gop_header(ctx, avctx)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid GOP header, skipping frames.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
ctx->gop_invalid = 0;
|
||||
}
|
||||
|
||||
if (ctx->frame_type != FRAMETYPE_NULL) {
|
||||
@@ -372,7 +349,7 @@ static int decode_pic_hdr(IVI5DecContext *ctx, AVCodecContext *avctx)
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_band_hdr(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band,
|
||||
AVCodecContext *avctx)
|
||||
{
|
||||
int i;
|
||||
@@ -442,7 +419,7 @@ static int decode_band_hdr(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
static int decode_mb_info(IVI45DecContext *ctx, IVIBandDesc *band,
|
||||
IVITile *tile, AVCodecContext *avctx)
|
||||
{
|
||||
int x, y, mv_x, mv_y, mv_delta, offs, mb_offset,
|
||||
@@ -458,6 +435,12 @@ static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
((band->qdelta_present && band->inherit_qdelta) || band->inherit_mv))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches parameters %d\n",
|
||||
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* scale factor for motion vectors */
|
||||
mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3);
|
||||
mv_x = mv_y = 0;
|
||||
@@ -561,102 +544,12 @@ static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band,
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Decode an Indeo5 band.
|
||||
*
|
||||
* @param[in,out] ctx ptr to the decoder context
|
||||
* @param[in,out] band ptr to the band descriptor
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_band(IVI5DecContext *ctx, int plane_num,
|
||||
IVIBandDesc *band, AVCodecContext *avctx)
|
||||
{
|
||||
int result, i, t, idx1, idx2, pos;
|
||||
IVITile *tile;
|
||||
|
||||
band->buf = band->bufs[ctx->dst_buf];
|
||||
band->ref_buf = band->bufs[ctx->ref_buf];
|
||||
band->data_ptr = ctx->frame_data + (get_bits_count(&ctx->gb) >> 3);
|
||||
|
||||
result = decode_band_hdr(ctx, band, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error while decoding band header: %d\n",
|
||||
result);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (band->is_empty) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty band encountered!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
band->rv_map = &ctx->rvmap_tabs[band->rvmap_sel];
|
||||
|
||||
/* apply corrections to the selected rvmap table if present */
|
||||
for (i = 0; i < band->num_corr; i++) {
|
||||
idx1 = band->corr[i*2];
|
||||
idx2 = band->corr[i*2+1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
pos = get_bits_count(&ctx->gb);
|
||||
|
||||
for (t = 0; t < band->num_tiles; t++) {
|
||||
tile = &band->tiles[t];
|
||||
|
||||
tile->is_empty = get_bits1(&ctx->gb);
|
||||
if (tile->is_empty) {
|
||||
ff_ivi_process_empty_tile(avctx, band, tile,
|
||||
(ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3));
|
||||
} else {
|
||||
tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb);
|
||||
|
||||
result = decode_mb_info(ctx, band, tile, avctx);
|
||||
if (result < 0)
|
||||
break;
|
||||
|
||||
result = ff_ivi_decode_blocks(&ctx->gb, band, tile);
|
||||
if (result < 0 || (get_bits_count(&ctx->gb) - pos) >> 3 != tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Corrupted tile data encountered!\n");
|
||||
break;
|
||||
}
|
||||
pos += tile->data_size << 3; // skip to next tile
|
||||
}
|
||||
}
|
||||
|
||||
/* restore the selected rvmap table by applying its corrections in reverse order */
|
||||
for (i = band->num_corr-1; i >= 0; i--) {
|
||||
idx1 = band->corr[i*2];
|
||||
idx2 = band->corr[i*2+1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (band->checksum_present) {
|
||||
uint16_t chksum = ivi_calc_band_checksum(band);
|
||||
if (chksum != band->checksum) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Band checksum mismatch! Plane %d, band %d, received: %x, calculated: %x\n",
|
||||
band->plane, band->band_num, band->checksum, chksum);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
align_get_bits(&ctx->gb);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Switch buffers.
|
||||
*
|
||||
* @param[in,out] ctx ptr to the decoder context
|
||||
*/
|
||||
static void switch_buffers(IVI5DecContext *ctx)
|
||||
static void switch_buffers(IVI45DecContext *ctx)
|
||||
{
|
||||
switch (ctx->prev_frame_type) {
|
||||
case FRAMETYPE_INTRA:
|
||||
@@ -694,12 +587,18 @@ static void switch_buffers(IVI5DecContext *ctx)
|
||||
}
|
||||
|
||||
|
||||
static int is_nonnull_frame(IVI45DecContext *ctx)
|
||||
{
|
||||
return ctx->frame_type != FRAMETYPE_NULL;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Initialize Indeo5 decoder.
|
||||
*/
|
||||
static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
IVI5DecContext *ctx = avctx->priv_data;
|
||||
IVI45DecContext *ctx = avctx->priv_data;
|
||||
int result;
|
||||
|
||||
ff_ivi_init_static_vlc();
|
||||
@@ -729,109 +628,25 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
ctx->buf_switch = 0;
|
||||
ctx->inter_scal = 0;
|
||||
|
||||
ctx->decode_pic_hdr = decode_pic_hdr;
|
||||
ctx->decode_band_hdr = decode_band_hdr;
|
||||
ctx->decode_mb_info = decode_mb_info;
|
||||
ctx->switch_buffers = switch_buffers;
|
||||
ctx->is_nonnull_frame = is_nonnull_frame;
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_YUV410P;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* main decoder function
|
||||
*/
|
||||
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
IVI5DecContext *ctx = avctx->priv_data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
int result, p, b;
|
||||
|
||||
init_get_bits(&ctx->gb, buf, buf_size * 8);
|
||||
ctx->frame_data = buf;
|
||||
ctx->frame_size = buf_size;
|
||||
|
||||
result = decode_pic_hdr(ctx, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while decoding picture header: %d\n", result);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ctx->gop_flags & IVI5_IS_PROTECTED) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Password-protected clip!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch_buffers(ctx);
|
||||
|
||||
//{ START_TIMER;
|
||||
|
||||
if (ctx->frame_type != FRAMETYPE_NULL) {
|
||||
for (p = 0; p < 3; p++) {
|
||||
for (b = 0; b < ctx->planes[p].num_bands; b++) {
|
||||
result = decode_band(ctx, p, &ctx->planes[p].bands[b], avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while decoding band: %d, plane: %d\n", b, p);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//STOP_TIMER("decode_planes"); }
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
ctx->frame.reference = 0;
|
||||
if (avctx->get_buffer(avctx, &ctx->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ctx->is_scalable) {
|
||||
ff_ivi_recompose53 (&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0], 4);
|
||||
} else {
|
||||
ff_ivi_output_plane(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]);
|
||||
}
|
||||
|
||||
ff_ivi_output_plane(&ctx->planes[2], ctx->frame.data[1], ctx->frame.linesize[1]);
|
||||
ff_ivi_output_plane(&ctx->planes[1], ctx->frame.data[2], ctx->frame.linesize[2]);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = ctx->frame;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Close Indeo5 decoder and clean up its context.
|
||||
*/
|
||||
static av_cold int decode_close(AVCodecContext *avctx)
|
||||
{
|
||||
IVI5DecContext *ctx = avctx->priv_data;
|
||||
|
||||
ff_ivi_free_buffers(&ctx->planes[0]);
|
||||
|
||||
if (ctx->mb_vlc.cust_tab.table)
|
||||
free_vlc(&ctx->mb_vlc.cust_tab);
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
AVCodec ff_indeo5_decoder = {
|
||||
.name = "indeo5",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_INDEO5,
|
||||
.priv_data_size = sizeof(IVI5DecContext),
|
||||
.priv_data_size = sizeof(IVI45DecContext),
|
||||
.init = decode_init,
|
||||
.close = decode_close,
|
||||
.decode = decode_frame,
|
||||
.close = ff_ivi_decode_close,
|
||||
.decode = ff_ivi_decode_frame,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Intel Indeo Video Interactive 5"),
|
||||
};
|
||||
|
@@ -65,8 +65,8 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
s->pb_frame = get_bits1(&s->gb);
|
||||
|
||||
if (format < 6) {
|
||||
s->width = h263_format[format][0];
|
||||
s->height = h263_format[format][1];
|
||||
s->width = ff_h263_format[format][0];
|
||||
s->height = ff_h263_format[format][1];
|
||||
s->avctx->sample_aspect_ratio.num = 12;
|
||||
s->avctx->sample_aspect_ratio.den = 11;
|
||||
} else {
|
||||
@@ -77,7 +77,7 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
}
|
||||
if(get_bits(&s->gb, 2))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
|
||||
s->loop_filter = get_bits1(&s->gb);
|
||||
s->loop_filter = get_bits1(&s->gb) * !s->avctx->lowres;
|
||||
if(get_bits1(&s->gb))
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Bad value for reserved field\n");
|
||||
if(get_bits1(&s->gb))
|
||||
|
@@ -103,7 +103,7 @@ static VLC cbpc_b_vlc;
|
||||
/* init vlcs */
|
||||
|
||||
/* XXX: find a better solution to handle static init */
|
||||
void h263_decode_init_vlc(MpegEncContext *s)
|
||||
void ff_h263_decode_init_vlc(MpegEncContext *s)
|
||||
{
|
||||
static int done = 0;
|
||||
|
||||
@@ -120,18 +120,18 @@ void h263_decode_init_vlc(MpegEncContext *s)
|
||||
&ff_h263_cbpy_tab[0][1], 2, 1,
|
||||
&ff_h263_cbpy_tab[0][0], 2, 1, 64);
|
||||
INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 33,
|
||||
&mvtab[0][1], 2, 1,
|
||||
&mvtab[0][0], 2, 1, 538);
|
||||
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
&ff_mvtab[0][1], 2, 1,
|
||||
&ff_mvtab[0][0], 2, 1, 538);
|
||||
ff_init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
ff_init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
INIT_VLC_RL(ff_h263_rl_inter, 554);
|
||||
INIT_VLC_RL(rl_intra_aic, 554);
|
||||
INIT_VLC_RL(ff_rl_intra_aic, 554);
|
||||
INIT_VLC_STATIC(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15,
|
||||
&h263_mbtype_b_tab[0][1], 2, 1,
|
||||
&h263_mbtype_b_tab[0][0], 2, 1, 80);
|
||||
&ff_h263_mbtype_b_tab[0][1], 2, 1,
|
||||
&ff_h263_mbtype_b_tab[0][0], 2, 1, 80);
|
||||
INIT_VLC_STATIC(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4,
|
||||
&cbpc_b_tab[0][1], 2, 1,
|
||||
&cbpc_b_tab[0][0], 2, 1, 8);
|
||||
&ff_cbpc_b_tab[0][1], 2, 1,
|
||||
&ff_cbpc_b_tab[0][0], 2, 1, 8);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ int ff_h263_resync(MpegEncContext *s){
|
||||
return -1;
|
||||
}
|
||||
|
||||
int h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code)
|
||||
{
|
||||
int code, val, sign, shift;
|
||||
code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
|
||||
@@ -381,16 +381,16 @@ static void preview_obmc(MpegEncContext *s){
|
||||
if ((cbpc & 16) == 0) {
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
|
||||
mot_val[0 ]= mot_val[2 ]=
|
||||
mot_val[0+stride]= mot_val[2+stride]= mx;
|
||||
@@ -399,16 +399,16 @@ static void preview_obmc(MpegEncContext *s){
|
||||
} else {
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
|
||||
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
|
||||
mot_val[0] = mx;
|
||||
@@ -432,7 +432,7 @@ static void h263_decode_dquant(MpegEncContext *s){
|
||||
|
||||
if(s->modified_quant){
|
||||
if(get_bits1(&s->gb))
|
||||
s->qscale= modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
|
||||
s->qscale= ff_modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
|
||||
else
|
||||
s->qscale= get_bits(&s->gb, 5);
|
||||
}else
|
||||
@@ -450,7 +450,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
|
||||
|
||||
scan_table = s->intra_scantable.permutated;
|
||||
if (s->h263_aic && s->mb_intra) {
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
i = 0;
|
||||
if (s->ac_pred) {
|
||||
if (s->h263_aic_dir)
|
||||
@@ -539,7 +539,7 @@ retry:
|
||||
if (i >= 64){
|
||||
if(s->alt_inter_vlc && rl == &ff_h263_rl_inter && !s->mb_intra){
|
||||
//Looks like a hack but no, it's the way it is supposed to work ...
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
i = 0;
|
||||
s->gb= gb;
|
||||
s->dsp.clear_block(block);
|
||||
@@ -556,7 +556,7 @@ retry:
|
||||
}
|
||||
not_coded:
|
||||
if (s->mb_intra && s->h263_aic) {
|
||||
h263_pred_acdc(s, block, n);
|
||||
ff_h263_pred_acdc(s, block, n);
|
||||
i = 63;
|
||||
}
|
||||
s->block_last_index[n] = i;
|
||||
@@ -655,11 +655,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
/* 16x16 motion prediction */
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
@@ -667,7 +667,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
@@ -680,18 +680,18 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for(i=0;i<4;i++) {
|
||||
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
if (s->umvplus)
|
||||
mx = h263p_decode_umotion(s, pred_x);
|
||||
else
|
||||
mx = h263_decode_motion(s, pred_x, 1);
|
||||
mx = ff_h263_decode_motion(s, pred_x, 1);
|
||||
if (mx >= 0xffff)
|
||||
return -1;
|
||||
|
||||
if (s->umvplus)
|
||||
my = h263p_decode_umotion(s, pred_y);
|
||||
else
|
||||
my = h263_decode_motion(s, pred_y, 1);
|
||||
my = ff_h263_decode_motion(s, pred_y, 1);
|
||||
if (my >= 0xffff)
|
||||
return -1;
|
||||
s->mv[0][i][0] = mx;
|
||||
@@ -763,11 +763,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
//FIXME UMV
|
||||
|
||||
if(USES_LIST(mb_type, 0)){
|
||||
int16_t *mot_val= h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, 0, 0, &mx, &my);
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
|
||||
mx = h263_decode_motion(s, mx, 1);
|
||||
my = h263_decode_motion(s, my, 1);
|
||||
mx = ff_h263_decode_motion(s, mx, 1);
|
||||
my = ff_h263_decode_motion(s, my, 1);
|
||||
|
||||
s->mv[0][0][0] = mx;
|
||||
s->mv[0][0][1] = my;
|
||||
@@ -776,11 +776,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
}
|
||||
|
||||
if(USES_LIST(mb_type, 1)){
|
||||
int16_t *mot_val= h263_pred_motion(s, 0, 1, &mx, &my);
|
||||
int16_t *mot_val= ff_h263_pred_motion(s, 0, 1, &mx, &my);
|
||||
s->mv_dir |= MV_DIR_BACKWARD;
|
||||
|
||||
mx = h263_decode_motion(s, mx, 1);
|
||||
my = h263_decode_motion(s, my, 1);
|
||||
mx = ff_h263_decode_motion(s, mx, 1);
|
||||
my = ff_h263_decode_motion(s, my, 1);
|
||||
|
||||
s->mv[1][0][0] = mx;
|
||||
s->mv[1][0][1] = my;
|
||||
@@ -831,8 +831,8 @@ intra:
|
||||
}
|
||||
|
||||
while(pb_mv_count--){
|
||||
h263_decode_motion(s, 0, 1);
|
||||
h263_decode_motion(s, 0, 1);
|
||||
ff_h263_decode_motion(s, 0, 1);
|
||||
ff_h263_decode_motion(s, 0, 1);
|
||||
}
|
||||
|
||||
/* decode each block */
|
||||
@@ -854,8 +854,8 @@ end:
|
||||
{
|
||||
int v= show_bits(&s->gb, 16);
|
||||
|
||||
if(get_bits_count(&s->gb) + 16 > s->gb.size_in_bits){
|
||||
v>>= get_bits_count(&s->gb) + 16 - s->gb.size_in_bits;
|
||||
if (get_bits_left(&s->gb) < 16) {
|
||||
v >>= 16 - get_bits_left(&s->gb);
|
||||
}
|
||||
|
||||
if(v==0)
|
||||
@@ -866,7 +866,7 @@ end:
|
||||
}
|
||||
|
||||
/* most is hardcoded. should extend to handle all h263 streams */
|
||||
int h263_decode_picture_header(MpegEncContext *s)
|
||||
int ff_h263_decode_picture_header(MpegEncContext *s)
|
||||
{
|
||||
int format, width, height, i;
|
||||
uint32_t startcode;
|
||||
@@ -918,8 +918,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
if (format != 7 && format != 6) {
|
||||
s->h263_plus = 0;
|
||||
/* H.263v1 */
|
||||
width = h263_format[format][0];
|
||||
height = h263_format[format][1];
|
||||
width = ff_h263_format[format][0];
|
||||
height = ff_h263_format[format][1];
|
||||
if (!width)
|
||||
return -1;
|
||||
|
||||
@@ -963,6 +963,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
s->h263_aic = get_bits1(&s->gb); /* Advanced Intra Coding (AIC) */
|
||||
s->loop_filter= get_bits1(&s->gb);
|
||||
s->unrestricted_mv = s->umvplus || s->obmc || s->loop_filter;
|
||||
if(s->avctx->lowres)
|
||||
s->loop_filter = 0;
|
||||
|
||||
s->h263_slice_structured= get_bits1(&s->gb);
|
||||
if (get_bits1(&s->gb) != 0) {
|
||||
@@ -1026,8 +1028,8 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
s->avctx->sample_aspect_ratio= ff_h263_pixel_aspect[s->aspect_ratio_info];
|
||||
}
|
||||
} else {
|
||||
width = h263_format[format][0];
|
||||
height = h263_format[format][1];
|
||||
width = ff_h263_format[format][0];
|
||||
height = ff_h263_format[format][1];
|
||||
s->avctx->sample_aspect_ratio= (AVRational){12,11};
|
||||
}
|
||||
if ((width == 0) || (height == 0))
|
||||
|
@@ -102,7 +102,7 @@ av_const int ff_h263_aspect_to_info(AVRational aspect){
|
||||
return FF_ASPECT_EXTENDED;
|
||||
}
|
||||
|
||||
void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
{
|
||||
int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
|
||||
int best_clock_code=1;
|
||||
@@ -141,7 +141,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
put_bits(&s->pb, 1, 0); /* camera off */
|
||||
put_bits(&s->pb, 1, 0); /* freeze picture release off */
|
||||
|
||||
format = ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height);
|
||||
format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height);
|
||||
if (!s->h263_plus) {
|
||||
/* H.263v1 */
|
||||
put_bits(&s->pb, 3, format);
|
||||
@@ -247,7 +247,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
/**
|
||||
* Encode a group of blocks header.
|
||||
*/
|
||||
void h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
{
|
||||
put_bits(&s->pb, 17, 1); /* GBSC */
|
||||
|
||||
@@ -333,7 +333,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
} else {
|
||||
i = 0;
|
||||
if (s->h263_aic && s->mb_intra)
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
|
||||
if(s->alt_inter_vlc && !s->mb_intra){
|
||||
int aic_vlc_bits=0;
|
||||
@@ -353,14 +353,14 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
if(level<0) level= -level;
|
||||
|
||||
code = get_rl_index(rl, last, run, level);
|
||||
aic_code = get_rl_index(&rl_intra_aic, last, run, level);
|
||||
aic_code = get_rl_index(&ff_rl_intra_aic, last, run, level);
|
||||
inter_vlc_bits += rl->table_vlc[code][1]+1;
|
||||
aic_vlc_bits += rl_intra_aic.table_vlc[aic_code][1]+1;
|
||||
aic_vlc_bits += ff_rl_intra_aic.table_vlc[aic_code][1]+1;
|
||||
|
||||
if (code == rl->n) {
|
||||
inter_vlc_bits += 1+6+8-1;
|
||||
}
|
||||
if (aic_code == rl_intra_aic.n) {
|
||||
if (aic_code == ff_rl_intra_aic.n) {
|
||||
aic_vlc_bits += 1+6+8-1;
|
||||
wrong_pos += run + 1;
|
||||
}else
|
||||
@@ -370,7 +370,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n)
|
||||
}
|
||||
i = 0;
|
||||
if(aic_vlc_bits < inter_vlc_bits && wrong_pos > 63)
|
||||
rl = &rl_intra_aic;
|
||||
rl = &ff_rl_intra_aic;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -454,9 +454,9 @@ static void h263p_encode_umotion(MpegEncContext * s, int val)
|
||||
}
|
||||
}
|
||||
|
||||
void h263_encode_mb(MpegEncContext * s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y)
|
||||
void ff_h263_encode_mb(MpegEncContext * s,
|
||||
DCTELEM block[6][64],
|
||||
int motion_x, int motion_y)
|
||||
{
|
||||
int cbpc, cbpy, i, cbp, pred_x, pred_y;
|
||||
int16_t pred_dc;
|
||||
@@ -500,7 +500,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
/* motion vectors: 16x16 mode */
|
||||
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||
|
||||
if (!s->umvplus) {
|
||||
ff_h263_encode_motion_vector(s, motion_x - pred_x,
|
||||
@@ -527,7 +527,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
|
||||
for(i=0; i<4; i++){
|
||||
/* motion vectors: 8x8 mode*/
|
||||
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
|
||||
motion_x = s->current_picture.f.motion_val[0][s->block_index[i]][0];
|
||||
motion_y = s->current_picture.f.motion_val[0][s->block_index[i]][1];
|
||||
@@ -561,7 +561,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
if(i<4) scale= s->y_dc_scale;
|
||||
else scale= s->c_dc_scale;
|
||||
|
||||
pred_dc = h263_pred_dc(s, i, &dc_ptr[i]);
|
||||
pred_dc = ff_h263_pred_dc(s, i, &dc_ptr[i]);
|
||||
level -= pred_dc;
|
||||
/* Quant */
|
||||
if (level >= 0)
|
||||
@@ -662,7 +662,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
|
||||
if (val == 0) {
|
||||
/* zero vector */
|
||||
code = 0;
|
||||
put_bits(&s->pb, mvtab[code][1], mvtab[code][0]);
|
||||
put_bits(&s->pb, ff_mvtab[code][1], ff_mvtab[code][0]);
|
||||
} else {
|
||||
bit_size = f_code - 1;
|
||||
range = 1 << bit_size;
|
||||
@@ -676,7 +676,7 @@ void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code)
|
||||
code = (val >> bit_size) + 1;
|
||||
bits = val & (range - 1);
|
||||
|
||||
put_bits(&s->pb, mvtab[code][1] + 1, (mvtab[code][0] << 1) | sign);
|
||||
put_bits(&s->pb, ff_mvtab[code][1] + 1, (ff_mvtab[code][0] << 1) | sign);
|
||||
if (bit_size > 0) {
|
||||
put_bits(&s->pb, bit_size, bits);
|
||||
}
|
||||
@@ -692,7 +692,7 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
for(mv=-MAX_MV; mv<=MAX_MV; mv++){
|
||||
int len;
|
||||
|
||||
if(mv==0) len= mvtab[0][1];
|
||||
if(mv==0) len= ff_mvtab[0][1];
|
||||
else{
|
||||
int val, bit_size, code;
|
||||
|
||||
@@ -704,9 +704,9 @@ static void init_mv_penalty_and_fcode(MpegEncContext *s)
|
||||
val--;
|
||||
code = (val >> bit_size) + 1;
|
||||
if(code<33){
|
||||
len= mvtab[code][1] + 1 + bit_size;
|
||||
len= ff_mvtab[code][1] + 1 + bit_size;
|
||||
}else{
|
||||
len= mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
|
||||
len= ff_mvtab[32][1] + av_log2(code>>5) + 2 + bit_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -768,17 +768,17 @@ static void init_uni_h263_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_t
|
||||
}
|
||||
}
|
||||
|
||||
void h263_encode_init(MpegEncContext *s)
|
||||
void ff_h263_encode_init(MpegEncContext *s)
|
||||
{
|
||||
static int done = 0;
|
||||
|
||||
if (!done) {
|
||||
done = 1;
|
||||
|
||||
init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
init_rl(&rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
ff_init_rl(&ff_h263_rl_inter, ff_h263_static_rl_table_store[0]);
|
||||
ff_init_rl(&ff_rl_intra_aic, ff_h263_static_rl_table_store[1]);
|
||||
|
||||
init_uni_h263_rl_tab(&rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
|
||||
init_uni_h263_rl_tab(&ff_rl_intra_aic, NULL, uni_h263_intra_aic_rl_len);
|
||||
init_uni_h263_rl_tab(&ff_h263_rl_inter , NULL, uni_h263_inter_rl_len);
|
||||
|
||||
init_mv_penalty_and_fcode(s);
|
||||
|
@@ -123,6 +123,10 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
|
||||
if (huff_tab->tab_sel == 7) {
|
||||
/* custom huffman table (explicitly encoded) */
|
||||
new_huff.num_rows = get_bits(gb, 4);
|
||||
if (!new_huff.num_rows) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty custom Huffman table!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i = 0; i < new_huff.num_rows; i++)
|
||||
new_huff.xbits[i] = get_bits(gb, 4);
|
||||
@@ -132,13 +136,14 @@ int ff_ivi_dec_huff_desc(GetBitContext *gb, int desc_coded, int which_tab,
|
||||
ff_ivi_huff_desc_copy(&huff_tab->cust_desc, &new_huff);
|
||||
|
||||
if (huff_tab->cust_tab.table)
|
||||
free_vlc(&huff_tab->cust_tab);
|
||||
ff_free_vlc(&huff_tab->cust_tab);
|
||||
result = ff_ivi_create_huff_from_desc(&huff_tab->cust_desc,
|
||||
&huff_tab->cust_tab, 0);
|
||||
if (result) {
|
||||
huff_tab->cust_desc.num_rows = 0; // reset faulty description
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while initializing custom vlc table!\n");
|
||||
return -1;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
huff_tab->tab = &huff_tab->cust_tab;
|
||||
@@ -207,14 +212,15 @@ int av_cold ff_ivi_init_planes(IVIPlaneDesc *planes, const IVIPicConfig *cfg)
|
||||
band->width = b_width;
|
||||
band->height = b_height;
|
||||
band->pitch = width_aligned;
|
||||
band->bufs[0] = av_malloc(buf_size);
|
||||
band->bufs[1] = av_malloc(buf_size);
|
||||
band->aheight = height_aligned;
|
||||
band->bufs[0] = av_mallocz(buf_size);
|
||||
band->bufs[1] = av_mallocz(buf_size);
|
||||
if (!band->bufs[0] || !band->bufs[1])
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* allocate the 3rd band buffer for scalability mode */
|
||||
if (cfg->luma_bands > 1) {
|
||||
band->bufs[2] = av_malloc(buf_size);
|
||||
band->bufs[2] = av_mallocz(buf_size);
|
||||
if (!band->bufs[2])
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -237,7 +243,7 @@ void av_cold ff_ivi_free_buffers(IVIPlaneDesc *planes)
|
||||
av_freep(&planes[p].bands[b].bufs[2]);
|
||||
|
||||
if (planes[p].bands[b].blk_vlc.cust_tab.table)
|
||||
free_vlc(&planes[p].bands[b].blk_vlc.cust_tab);
|
||||
ff_free_vlc(&planes[p].bands[b].blk_vlc.cust_tab);
|
||||
for (t = 0; t < planes[p].bands[b].num_tiles; t++)
|
||||
av_freep(&planes[p].bands[b].tiles[t].mbs);
|
||||
av_freep(&planes[p].bands[b].tiles);
|
||||
@@ -284,6 +290,7 @@ int av_cold ff_ivi_init_tiles(IVIPlaneDesc *planes, int tile_width, int tile_hei
|
||||
for (x = 0; x < band->width; x += t_width) {
|
||||
tile->xpos = x;
|
||||
tile->ypos = y;
|
||||
tile->mb_size = band->mb_size;
|
||||
tile->width = FFMIN(band->width - x, t_width);
|
||||
tile->height = FFMIN(band->height - y, t_height);
|
||||
tile->is_empty = tile->data_size = 0;
|
||||
@@ -379,6 +386,21 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
mv_x >>= 1;
|
||||
mv_y >>= 1; /* convert halfpel vectors into fullpel ones */
|
||||
}
|
||||
if (mb->type) {
|
||||
int dmv_x, dmv_y, cx, cy;
|
||||
|
||||
dmv_x = mb->mv_x >> band->is_halfpel;
|
||||
dmv_y = mb->mv_y >> band->is_halfpel;
|
||||
cx = mb->mv_x & band->is_halfpel;
|
||||
cy = mb->mv_y & band->is_halfpel;
|
||||
|
||||
if ( mb->xpos + dmv_x < 0
|
||||
|| mb->xpos + dmv_x + band->mb_size + cx > band->pitch
|
||||
|| mb->ypos + dmv_y < 0
|
||||
|| mb->ypos + dmv_y + band->mb_size + cy > band->aheight) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (blk = 0; blk < num_blocks; blk++) {
|
||||
@@ -471,8 +493,17 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale)
|
||||
/**
|
||||
* Handle empty tiles by performing data copying and motion
|
||||
* compensation respectively.
|
||||
*
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @param[in] band pointer to the band descriptor
|
||||
* @param[in] tile pointer to the tile descriptor
|
||||
* @param[in] mv_scale scaling factor for motion vectors
|
||||
*/
|
||||
static int ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale)
|
||||
{
|
||||
int x, y, need_mc, mbn, blk, num_blocks, mv_x, mv_y, mc_type;
|
||||
int offs, mb_offset, row_offset;
|
||||
@@ -482,6 +513,13 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
void (*mc_no_delta_func)(int16_t *buf, const int16_t *ref_buf, uint32_t pitch,
|
||||
int mc_type);
|
||||
|
||||
if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches "
|
||||
"parameters %d in ivi_process_empty_tile()\n",
|
||||
tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
offs = tile->ypos * band->pitch + tile->xpos;
|
||||
mb = tile->mbs;
|
||||
ref_mb = tile->ref_mbs;
|
||||
@@ -562,6 +600,8 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
dst += band->pitch;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -624,6 +664,226 @@ void ff_ivi_output_plane(IVIPlaneDesc *plane, uint8_t *dst, int dst_pitch)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode an Indeo 4 or 5 band.
|
||||
*
|
||||
* @param[in,out] ctx ptr to the decoder context
|
||||
* @param[in,out] band ptr to the band descriptor
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @return result code: 0 = OK, -1 = error
|
||||
*/
|
||||
static int decode_band(IVI45DecContext *ctx, int plane_num,
|
||||
IVIBandDesc *band, AVCodecContext *avctx)
|
||||
{
|
||||
int result, i, t, idx1, idx2, pos;
|
||||
IVITile *tile;
|
||||
|
||||
band->buf = band->bufs[ctx->dst_buf];
|
||||
band->ref_buf = band->bufs[ctx->ref_buf];
|
||||
band->data_ptr = ctx->frame_data + (get_bits_count(&ctx->gb) >> 3);
|
||||
|
||||
result = ctx->decode_band_hdr(ctx, band, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error while decoding band header: %d\n",
|
||||
result);
|
||||
return result;
|
||||
}
|
||||
|
||||
if (band->is_empty) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Empty band encountered!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
band->rv_map = &ctx->rvmap_tabs[band->rvmap_sel];
|
||||
|
||||
/* apply corrections to the selected rvmap table if present */
|
||||
for (i = 0; i < band->num_corr; i++) {
|
||||
idx1 = band->corr[i * 2];
|
||||
idx2 = band->corr[i * 2 + 1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
pos = get_bits_count(&ctx->gb);
|
||||
|
||||
for (t = 0; t < band->num_tiles; t++) {
|
||||
tile = &band->tiles[t];
|
||||
|
||||
if (tile->mb_size != band->mb_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "MB sizes mismatch: %d vs. %d\n",
|
||||
band->mb_size, tile->mb_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
tile->is_empty = get_bits1(&ctx->gb);
|
||||
if (tile->is_empty) {
|
||||
result = ivi_process_empty_tile(avctx, band, tile,
|
||||
(ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3));
|
||||
if (result < 0)
|
||||
break;
|
||||
av_dlog(avctx, "Empty tile encountered!\n");
|
||||
} else {
|
||||
tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb);
|
||||
if (!tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Tile data size is zero!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
result = ctx->decode_mb_info(ctx, band, tile, avctx);
|
||||
if (result < 0)
|
||||
break;
|
||||
|
||||
result = ff_ivi_decode_blocks(&ctx->gb, band, tile);
|
||||
if (result < 0 || ((get_bits_count(&ctx->gb) - pos) >> 3) != tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Corrupted tile data encountered!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
pos += tile->data_size << 3; // skip to next tile
|
||||
}
|
||||
}
|
||||
|
||||
/* restore the selected rvmap table by applying its corrections in reverse order */
|
||||
for (i = band->num_corr-1; i >= 0; i--) {
|
||||
idx1 = band->corr[i*2];
|
||||
idx2 = band->corr[i*2+1];
|
||||
FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]);
|
||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (band->checksum_present) {
|
||||
uint16_t chksum = ivi_calc_band_checksum(band);
|
||||
if (chksum != band->checksum) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Band checksum mismatch! Plane %d, band %d, received: %x, calculated: %x\n",
|
||||
band->plane, band->band_num, band->checksum, chksum);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
align_get_bits(&ctx->gb);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
IVI45DecContext *ctx = avctx->priv_data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
int result, p, b;
|
||||
|
||||
init_get_bits(&ctx->gb, buf, buf_size * 8);
|
||||
ctx->frame_data = buf;
|
||||
ctx->frame_size = buf_size;
|
||||
|
||||
result = ctx->decode_pic_hdr(ctx, avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while decoding picture header: %d\n", result);
|
||||
return -1;
|
||||
}
|
||||
if (ctx->gop_invalid)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (ctx->gop_flags & IVI5_IS_PROTECTED) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Password-protected clip!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ctx->switch_buffers(ctx);
|
||||
|
||||
//{ START_TIMER;
|
||||
|
||||
if (ctx->is_nonnull_frame(ctx)) {
|
||||
for (p = 0; p < 3; p++) {
|
||||
for (b = 0; b < ctx->planes[p].num_bands; b++) {
|
||||
result = decode_band(ctx, p, &ctx->planes[p].bands[b], avctx);
|
||||
if (result) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Error while decoding band: %d, plane: %d\n", b, p);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//STOP_TIMER("decode_planes"); }
|
||||
|
||||
/* If the bidirectional mode is enabled, next I and the following P frame will */
|
||||
/* be sent together. Unfortunately the approach below seems to be the only way */
|
||||
/* to handle the B-frames mode. That's exactly the same Intel decoders do. */
|
||||
if (avctx->codec_id == CODEC_ID_INDEO4 && ctx->frame_type == 0/*FRAMETYPE_INTRA*/) {
|
||||
while (get_bits(&ctx->gb, 8)); // skip version string
|
||||
skip_bits_long(&ctx->gb, 64); // skip padding, TODO: implement correct 8-bytes alignment
|
||||
if (get_bits_left(&ctx->gb) > 18 && show_bits(&ctx->gb, 18) == 0x3FFF8)
|
||||
av_log(avctx, AV_LOG_ERROR, "Buffer contains IP frames!\n");
|
||||
}
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
ctx->frame.reference = 0;
|
||||
avcodec_set_dimensions(avctx, ctx->planes[0].width, ctx->planes[0].height);
|
||||
if ((result = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
if (ctx->is_scalable) {
|
||||
if (avctx->codec_id == CODEC_ID_INDEO4)
|
||||
ff_ivi_recompose_haar(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0], 4);
|
||||
else
|
||||
ff_ivi_recompose53 (&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0], 4);
|
||||
} else {
|
||||
ff_ivi_output_plane(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]);
|
||||
}
|
||||
|
||||
ff_ivi_output_plane(&ctx->planes[2], ctx->frame.data[1], ctx->frame.linesize[1]);
|
||||
ff_ivi_output_plane(&ctx->planes[1], ctx->frame.data[2], ctx->frame.linesize[2]);
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = ctx->frame;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close Indeo5 decoder and clean up its context.
|
||||
*/
|
||||
av_cold int ff_ivi_decode_close(AVCodecContext *avctx)
|
||||
{
|
||||
IVI45DecContext *ctx = avctx->priv_data;
|
||||
|
||||
ff_ivi_free_buffers(&ctx->planes[0]);
|
||||
|
||||
if (ctx->mb_vlc.cust_tab.table)
|
||||
ff_free_vlc(&ctx->mb_vlc.cust_tab);
|
||||
|
||||
if (ctx->frame.data[0])
|
||||
avctx->release_buffer(avctx, &ctx->frame);
|
||||
|
||||
#if IVI4_STREAM_ANALYSER
|
||||
if (avctx->codec_id == CODEC_ID_INDEO4) {
|
||||
if (ctx->is_scalable)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses scalability mode!\n");
|
||||
if (ctx->uses_tiling)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses local decoding!\n");
|
||||
if (ctx->has_b_frames)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video contains B-frames!\n");
|
||||
if (ctx->has_transp)
|
||||
av_log(avctx, AV_LOG_ERROR, "Transparency mode is enabled!\n");
|
||||
if (ctx->uses_haar)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses Haar transform!\n");
|
||||
if (ctx->uses_fullpel)
|
||||
av_log(avctx, AV_LOG_ERROR, "This video uses fullpel motion vectors!\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* These are 2x8 predefined Huffman codebooks for coding macroblock/block
|
||||
|
@@ -34,6 +34,8 @@
|
||||
#include <stdint.h>
|
||||
|
||||
#define IVI_VLC_BITS 13 ///< max number of bits of the ivi's huffman codes
|
||||
#define IVI4_STREAM_ANALYSER 0
|
||||
#define IVI5_IS_PROTECTED 0x20
|
||||
|
||||
/**
|
||||
* huffman codebook descriptor
|
||||
@@ -116,6 +118,7 @@ typedef struct {
|
||||
int ypos;
|
||||
int width;
|
||||
int height;
|
||||
int mb_size;
|
||||
int is_empty; ///< = 1 if this tile doesn't contain any data
|
||||
int data_size; ///< size of the data in bytes
|
||||
int num_MBs; ///< number of macroblocks in this tile
|
||||
@@ -132,6 +135,7 @@ typedef struct {
|
||||
int band_num; ///< band number
|
||||
int width;
|
||||
int height;
|
||||
int aheight; ///< aligned band height
|
||||
const uint8_t *data_ptr; ///< ptr to the first byte of the band data
|
||||
int data_size; ///< size of the band data
|
||||
int16_t *buf; ///< pointer to the output buffer for this band
|
||||
@@ -192,6 +196,62 @@ typedef struct {
|
||||
uint8_t chroma_bands;
|
||||
} IVIPicConfig;
|
||||
|
||||
typedef struct IVI45DecContext {
|
||||
GetBitContext gb;
|
||||
AVFrame frame;
|
||||
RVMapDesc rvmap_tabs[9]; ///< local corrected copy of the static rvmap tables
|
||||
|
||||
uint32_t frame_num;
|
||||
int frame_type;
|
||||
int prev_frame_type; ///< frame type of the previous frame
|
||||
uint32_t data_size; ///< size of the frame data in bytes from picture header
|
||||
int is_scalable;
|
||||
int transp_status; ///< transparency mode status: 1 - enabled
|
||||
const uint8_t *frame_data; ///< input frame data pointer
|
||||
int inter_scal; ///< signals a sequence of scalable inter frames
|
||||
uint32_t frame_size; ///< frame size in bytes
|
||||
uint32_t pic_hdr_size; ///< picture header size in bytes
|
||||
uint8_t frame_flags;
|
||||
uint16_t checksum; ///< frame checksum
|
||||
|
||||
IVIPicConfig pic_conf;
|
||||
IVIPlaneDesc planes[3]; ///< color planes
|
||||
|
||||
int buf_switch; ///< used to switch between three buffers
|
||||
int dst_buf; ///< buffer index for the currently decoded frame
|
||||
int ref_buf; ///< inter frame reference buffer index
|
||||
int ref2_buf; ///< temporal storage for switching buffers
|
||||
|
||||
IVIHuffTab mb_vlc; ///< current macroblock table descriptor
|
||||
IVIHuffTab blk_vlc; ///< current block table descriptor
|
||||
|
||||
uint8_t rvmap_sel;
|
||||
uint8_t in_imf;
|
||||
uint8_t in_q; ///< flag for explicitly stored quantiser delta
|
||||
uint8_t pic_glob_quant;
|
||||
uint8_t unknown1;
|
||||
|
||||
uint16_t gop_hdr_size;
|
||||
uint8_t gop_flags;
|
||||
uint32_t lock_word;
|
||||
|
||||
#if IVI4_STREAM_ANALYSER
|
||||
uint8_t has_b_frames;
|
||||
uint8_t has_transp;
|
||||
uint8_t uses_tiling;
|
||||
uint8_t uses_haar;
|
||||
uint8_t uses_fullpel;
|
||||
#endif
|
||||
|
||||
int (*decode_pic_hdr) (struct IVI45DecContext *ctx, AVCodecContext *avctx);
|
||||
int (*decode_band_hdr) (struct IVI45DecContext *ctx, IVIBandDesc *band, AVCodecContext *avctx);
|
||||
int (*decode_mb_info) (struct IVI45DecContext *ctx, IVIBandDesc *band, IVITile *tile, AVCodecContext *avctx);
|
||||
void (*switch_buffers) (struct IVI45DecContext *ctx);
|
||||
int (*is_nonnull_frame)(struct IVI45DecContext *ctx);
|
||||
|
||||
int gop_invalid;
|
||||
} IVI45DecContext;
|
||||
|
||||
/** compare some properties of two pictures */
|
||||
static inline int ivi_pic_config_cmp(IVIPicConfig *str1, IVIPicConfig *str2)
|
||||
{
|
||||
@@ -315,18 +375,6 @@ int ff_ivi_dec_tile_data_size(GetBitContext *gb);
|
||||
*/
|
||||
int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile);
|
||||
|
||||
/**
|
||||
* Handle empty tiles by performing data copying and motion
|
||||
* compensation respectively.
|
||||
*
|
||||
* @param[in] avctx ptr to the AVCodecContext
|
||||
* @param[in] band pointer to the band descriptor
|
||||
* @param[in] tile pointer to the tile descriptor
|
||||
* @param[in] mv_scale scaling factor for motion vectors
|
||||
*/
|
||||
void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band,
|
||||
IVITile *tile, int32_t mv_scale);
|
||||
|
||||
/**
|
||||
* Convert and output the current plane.
|
||||
* This conversion is done by adding back the bias value of 128
|
||||
@@ -348,4 +396,8 @@ uint16_t ivi_calc_band_checksum (IVIBandDesc *band);
|
||||
*/
|
||||
int ivi_check_band (IVIBandDesc *band, const uint8_t *ref, int pitch);
|
||||
|
||||
int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
AVPacket *avpkt);
|
||||
av_cold int ff_ivi_decode_close(AVCodecContext *avctx);
|
||||
|
||||
#endif /* AVCODEC_IVI_COMMON_H */
|
||||
|
@@ -40,7 +40,7 @@ typedef struct JLSState{
|
||||
int A[367], B[367], C[365], N[367];
|
||||
int limit, reset, bpp, qbpp, maxval, range;
|
||||
int near, twonear;
|
||||
int run_index[3];
|
||||
int run_index[4];
|
||||
}JLSState;
|
||||
|
||||
extern const uint8_t ff_log2_run[32];
|
||||
|
@@ -198,6 +198,9 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, void *
|
||||
r = ff_log2_run[state->run_index[comp]];
|
||||
if(r)
|
||||
r = get_bits_long(&s->gb, r);
|
||||
if(x + r * stride > w) {
|
||||
r = (w - x) / stride;
|
||||
}
|
||||
for(i = 0; i < r; i++) {
|
||||
W(dst, x, Ra);
|
||||
x += stride;
|
||||
@@ -287,7 +290,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
|
||||
// av_log(s->avctx, AV_LOG_DEBUG, "JPEG params: ILV=%i Pt=%i BPP=%i, scan = %i\n", ilv, point_transform, s->bits, s->cur_scan);
|
||||
if(ilv == 0) { /* separate planes */
|
||||
stride = (s->nb_components > 1) ? 3 : 1;
|
||||
off = av_clip(s->cur_scan - 1, 0, stride);
|
||||
off = av_clip(s->cur_scan - 1, 0, stride - 1);
|
||||
width = s->width * stride;
|
||||
cur += off;
|
||||
for(i = 0; i < s->height; i++) {
|
||||
|
@@ -143,6 +143,10 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
buf += 5;
|
||||
|
||||
if (video_size) {
|
||||
if(video_size < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "video size %d invalid\n", video_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (avctx->reget_buffer(avctx, &s->frame) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
@@ -150,7 +154,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if (video_type == 0 || video_type == 1) {
|
||||
GetBitContext gb;
|
||||
init_get_bits(&gb, buf, FFMIN(video_size, (buf_end - buf) * 8));
|
||||
init_get_bits(&gb, buf, 8 * FFMIN(video_size, buf_end - buf));
|
||||
|
||||
for (j = 0; j < avctx->height; j += 8)
|
||||
for (i = 0; i < avctx->width; i += 8)
|
||||
|
@@ -30,10 +30,17 @@
|
||||
|
||||
typedef struct {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame pic;
|
||||
uint16_t *prev, *cur;
|
||||
AVFrame prev, cur;
|
||||
} KgvContext;
|
||||
|
||||
static void decode_flush(AVCodecContext *avctx)
|
||||
{
|
||||
KgvContext * const c = avctx->priv_data;
|
||||
|
||||
if (c->prev.data[0])
|
||||
avctx->release_buffer(avctx, &c->prev);
|
||||
}
|
||||
|
||||
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
@@ -42,7 +49,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
int offsets[8];
|
||||
uint16_t *out, *prev;
|
||||
int outcnt = 0, maxcnt;
|
||||
int w, h, i;
|
||||
int w, h, i, res;
|
||||
|
||||
if (avpkt->size < 2)
|
||||
return -1;
|
||||
@@ -54,20 +61,23 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if (av_image_check_size(w, h, 0, avctx))
|
||||
return -1;
|
||||
|
||||
if (w != avctx->width || h != avctx->height)
|
||||
if (w != avctx->width || h != avctx->height) {
|
||||
if (c->prev.data[0])
|
||||
avctx->release_buffer(avctx, &c->prev);
|
||||
avcodec_set_dimensions(avctx, w, h);
|
||||
}
|
||||
|
||||
maxcnt = w * h;
|
||||
|
||||
out = av_realloc(c->cur, w * h * 2);
|
||||
if (!out)
|
||||
return -1;
|
||||
c->cur = out;
|
||||
|
||||
prev = av_realloc(c->prev, w * h * 2);
|
||||
if (!prev)
|
||||
return -1;
|
||||
c->prev = prev;
|
||||
c->cur.reference = 3;
|
||||
if ((res = avctx->get_buffer(avctx, &c->cur)) < 0)
|
||||
return res;
|
||||
out = (uint16_t *) c->cur.data[0];
|
||||
if (c->prev.data[0]) {
|
||||
prev = (uint16_t *) c->prev.data[0];
|
||||
} else {
|
||||
prev = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
offsets[i] = -1;
|
||||
@@ -80,6 +90,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
out[outcnt++] = code; // rgb555 pixel coded directly
|
||||
} else {
|
||||
int count;
|
||||
int inp_off;
|
||||
uint16_t *inp;
|
||||
|
||||
if ((code & 0x6000) == 0x6000) {
|
||||
@@ -101,7 +112,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if (maxcnt - start < count)
|
||||
break;
|
||||
|
||||
inp = prev + start;
|
||||
if (!prev) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Frame reference does not exist\n");
|
||||
break;
|
||||
}
|
||||
|
||||
inp = prev;
|
||||
inp_off = start;
|
||||
} else {
|
||||
// copy from earlier in this frame
|
||||
int offset = (code & 0x1FFF) + 1;
|
||||
@@ -119,27 +137,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
if (outcnt < offset)
|
||||
break;
|
||||
|
||||
inp = out + outcnt - offset;
|
||||
inp = out;
|
||||
inp_off = outcnt - offset;
|
||||
}
|
||||
|
||||
if (maxcnt - outcnt < count)
|
||||
break;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
for (i = inp_off; i < count + inp_off; i++) {
|
||||
out[outcnt++] = inp[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (outcnt - maxcnt)
|
||||
av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt);
|
||||
|
||||
c->pic.data[0] = (uint8_t *)c->cur;
|
||||
c->pic.linesize[0] = w * 2;
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = c->pic;
|
||||
*(AVFrame*)data = c->cur;
|
||||
|
||||
FFSWAP(uint16_t *, c->cur, c->prev);
|
||||
if (c->prev.data[0])
|
||||
avctx->release_buffer(avctx, &c->prev);
|
||||
FFSWAP(AVFrame, c->cur, c->prev);
|
||||
|
||||
return avpkt->size;
|
||||
}
|
||||
@@ -150,18 +169,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
|
||||
c->avctx = avctx;
|
||||
avctx->pix_fmt = PIX_FMT_RGB555;
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
avctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
KgvContext * const c = avctx->priv_data;
|
||||
|
||||
av_freep(&c->cur);
|
||||
av_freep(&c->prev);
|
||||
|
||||
decode_flush(avctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -173,5 +188,6 @@ AVCodec ff_kgv1_decoder = {
|
||||
.init = decode_init,
|
||||
.close = decode_end,
|
||||
.decode = decode_frame,
|
||||
.flush = decode_flush,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Kega Game Video"),
|
||||
};
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#define KMVC_KEYFRAME 0x80
|
||||
#define KMVC_PALETTE 0x40
|
||||
#define KMVC_METHOD 0x0F
|
||||
#define MAX_PALSIZE 256
|
||||
|
||||
/*
|
||||
* Decoder context
|
||||
@@ -43,7 +44,7 @@ typedef struct KmvcContext {
|
||||
|
||||
int setpal;
|
||||
int palsize;
|
||||
uint32_t pal[256];
|
||||
uint32_t pal[MAX_PALSIZE];
|
||||
uint8_t *cur, *prev;
|
||||
uint8_t *frm0, *frm1;
|
||||
GetByteContext g;
|
||||
@@ -380,10 +381,10 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
||||
c->palsize = 127;
|
||||
} else {
|
||||
c->palsize = AV_RL16(avctx->extradata + 10);
|
||||
if (c->palsize > 255U) {
|
||||
if (c->palsize >= (unsigned)MAX_PALSIZE) {
|
||||
c->palsize = 127;
|
||||
av_log(NULL, AV_LOG_ERROR, "palsize too big\n");
|
||||
return -1;
|
||||
av_log(avctx, AV_LOG_ERROR, "KMVC palette too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -247,24 +247,26 @@ static void lag_pred_line(LagarithContext *l, uint8_t *buf,
|
||||
{
|
||||
int L, TL;
|
||||
|
||||
/* Left pixel is actually prev_row[width] */
|
||||
L = buf[width - stride - 1];
|
||||
if (!line) {
|
||||
/* Left prediction only for first line */
|
||||
L = l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1,
|
||||
width - 1, buf[0]);
|
||||
return;
|
||||
} else if (line == 1) {
|
||||
/* Second line, left predict first pixel, the rest of the line is median predicted
|
||||
* NOTE: In the case of RGB this pixel is top predicted */
|
||||
TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L;
|
||||
} else {
|
||||
/* Top left is 2 rows back, last pixel */
|
||||
TL = buf[width - (2 * stride) - 1];
|
||||
}
|
||||
/* Left pixel is actually prev_row[width] */
|
||||
L = buf[width - stride - 1];
|
||||
|
||||
add_lag_median_prediction(buf, buf - stride, buf,
|
||||
width, &L, &TL);
|
||||
if (line == 1) {
|
||||
/* Second line, left predict first pixel, the rest of the line is median predicted
|
||||
* NOTE: In the case of RGB this pixel is top predicted */
|
||||
TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L;
|
||||
} else {
|
||||
/* Top left is 2 rows back, last pixel */
|
||||
TL = buf[width - (2 * stride) - 1];
|
||||
}
|
||||
|
||||
add_lag_median_prediction(buf, buf - stride, buf,
|
||||
width, &L, &TL);
|
||||
}
|
||||
}
|
||||
|
||||
static int lag_decode_line(LagarithContext *l, lag_rac *rac,
|
||||
@@ -310,13 +312,13 @@ handle_zeros:
|
||||
}
|
||||
|
||||
static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
|
||||
const uint8_t *src, int width,
|
||||
int esc_count)
|
||||
const uint8_t *src, const uint8_t *src_end,
|
||||
int width, int esc_count)
|
||||
{
|
||||
int i = 0;
|
||||
int count;
|
||||
uint8_t zero_run = 0;
|
||||
const uint8_t *start = src;
|
||||
const uint8_t *src_start = src;
|
||||
uint8_t mask1 = -(esc_count < 2);
|
||||
uint8_t mask2 = -(esc_count < 3);
|
||||
uint8_t *end = dst + (width - 2);
|
||||
@@ -324,6 +326,11 @@ static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
|
||||
output_zeros:
|
||||
if (l->zeros_rem) {
|
||||
count = FFMIN(l->zeros_rem, width - i);
|
||||
if (end - dst < count) {
|
||||
av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
memset(dst, 0, count);
|
||||
l->zeros_rem -= count;
|
||||
dst += count;
|
||||
@@ -333,6 +340,8 @@ output_zeros:
|
||||
i = 0;
|
||||
while (!zero_run && dst + i < end) {
|
||||
i++;
|
||||
if (src + i >= src_end)
|
||||
return AVERROR_INVALIDDATA;
|
||||
zero_run =
|
||||
!(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
|
||||
}
|
||||
@@ -348,9 +357,10 @@ output_zeros:
|
||||
} else {
|
||||
memcpy(dst, src, i);
|
||||
src += i;
|
||||
dst += i;
|
||||
}
|
||||
}
|
||||
return start - src;
|
||||
return src_start - src;
|
||||
}
|
||||
|
||||
|
||||
@@ -366,6 +376,7 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
|
||||
int esc_count = src[0];
|
||||
GetBitContext gb;
|
||||
lag_rac rac;
|
||||
const uint8_t *src_end = src + src_size;
|
||||
|
||||
rac.avctx = l->avctx;
|
||||
l->zeros = 0;
|
||||
@@ -396,10 +407,16 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
|
||||
esc_count -= 4;
|
||||
if (esc_count > 0) {
|
||||
/* Zero run coding only, no range coding. */
|
||||
for (i = 0; i < height; i++)
|
||||
src += lag_decode_zero_run_line(l, dst + (i * stride), src,
|
||||
width, esc_count);
|
||||
for (i = 0; i < height; i++) {
|
||||
int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
|
||||
src_end, width, esc_count);
|
||||
if (res < 0)
|
||||
return res;
|
||||
src += res;
|
||||
}
|
||||
} else {
|
||||
if (src_size < width * height)
|
||||
return AVERROR_INVALIDDATA; // buffer not big enough
|
||||
/* Plane is stored uncompressed */
|
||||
for (i = 0; i < height; i++) {
|
||||
memcpy(dst + (i * stride), src, width);
|
||||
@@ -500,11 +517,19 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
for (i = 0; i < 4; i++)
|
||||
srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
|
||||
if (offset_ry >= buf_size ||
|
||||
offset_gu >= buf_size ||
|
||||
offset_bv >= buf_size ||
|
||||
offs[3] >= buf_size) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid frame offsets\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
for (i = 0; i < 4; i++)
|
||||
lag_decode_arith_plane(l, srcs[i],
|
||||
avctx->width, avctx->height,
|
||||
-l->rgb_stride, buf + offs[i],
|
||||
buf_size);
|
||||
buf_size - offs[i]);
|
||||
dst = p->data[0];
|
||||
for (i = 0; i < 4; i++)
|
||||
srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
|
||||
@@ -576,15 +601,23 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (offset_ry >= buf_size ||
|
||||
offset_gu >= buf_size ||
|
||||
offset_bv >= buf_size) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid frame offsets\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
|
||||
p->linesize[0], buf + offset_ry,
|
||||
buf_size);
|
||||
buf_size - offset_ry);
|
||||
lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
|
||||
avctx->height / 2, p->linesize[2],
|
||||
buf + offset_gu, buf_size);
|
||||
buf + offset_gu, buf_size - offset_gu);
|
||||
lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
|
||||
avctx->height / 2, p->linesize[1],
|
||||
buf + offset_bv, buf_size);
|
||||
buf + offset_bv, buf_size - offset_bv);
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user