Compare commits
494 Commits
n2.4.1
...
release/2.
Author | SHA1 | Date | |
---|---|---|---|
![]() |
72006016ae | ||
![]() |
f23efdd240 | ||
![]() |
2789b48b4e | ||
![]() |
5628d62022 | ||
![]() |
58986f0fa1 | ||
![]() |
c513e721f3 | ||
![]() |
087561a7dc | ||
![]() |
0b0183c5b7 | ||
![]() |
055c302a11 | ||
![]() |
b44025a1d0 | ||
![]() |
7fc8458beb | ||
![]() |
9b493887d5 | ||
![]() |
804b90a5f5 | ||
![]() |
4a2be7027f | ||
![]() |
0caff57c42 | ||
![]() |
af79d964a7 | ||
![]() |
8320aa7dc7 | ||
![]() |
7a9f4f2816 | ||
![]() |
1fe67c9472 | ||
![]() |
6fd4b2b84f | ||
![]() |
a3740b6a86 | ||
![]() |
ff0e9060a5 | ||
![]() |
6d7384bd73 | ||
![]() |
9db97584ca | ||
![]() |
04bb8cc842 | ||
![]() |
03c80a7400 | ||
![]() |
b6ae28c2e9 | ||
![]() |
5123c8aa0b | ||
![]() |
6cb8599bab | ||
![]() |
00b8a9dd8d | ||
![]() |
9dd9090b89 | ||
![]() |
fd1be2bd87 | ||
![]() |
ced57c6ef3 | ||
![]() |
13b22617c1 | ||
![]() |
c01f551728 | ||
![]() |
a3d05bf6be | ||
![]() |
025b38f3a6 | ||
![]() |
41c81556a7 | ||
![]() |
f58c9746c3 | ||
![]() |
a7c8dc67e7 | ||
![]() |
0888e7110e | ||
![]() |
990c2ee3bd | ||
![]() |
ffd894917f | ||
![]() |
3fedff3985 | ||
![]() |
c532c56e7b | ||
![]() |
36bc9519b6 | ||
![]() |
136bd71f69 | ||
![]() |
c0726da933 | ||
![]() |
32f568f82c | ||
![]() |
0bf455c054 | ||
![]() |
e553282860 | ||
![]() |
8ad873ed4e | ||
![]() |
b470e21d61 | ||
![]() |
4a9ce87ef3 | ||
![]() |
1dad249ae6 | ||
![]() |
5399107682 | ||
![]() |
c7b089b1ac | ||
![]() |
93a04473fc | ||
![]() |
db0c4d2e3c | ||
![]() |
fdc62caf30 | ||
![]() |
998d943cce | ||
![]() |
bf66555769 | ||
![]() |
9945382816 | ||
![]() |
ebc768cfa1 | ||
![]() |
869613728c | ||
![]() |
4d27d864a6 | ||
![]() |
35eabb85b2 | ||
![]() |
68e1c80c67 | ||
![]() |
d2142c3b3a | ||
![]() |
ac7a211b3b | ||
![]() |
9ac72b1dde | ||
![]() |
9d5362a45c | ||
![]() |
0691554e68 | ||
![]() |
7f2b3c3bee | ||
![]() |
35ca5eb11f | ||
![]() |
e6cc6a08d9 | ||
![]() |
57208a0999 | ||
![]() |
9f665b0d6a | ||
![]() |
2d204f313b | ||
![]() |
c7e56b8b9c | ||
![]() |
cf13885622 | ||
![]() |
cee951b596 | ||
![]() |
1303d8a204 | ||
![]() |
1a87782c2e | ||
![]() |
a1f0c1b6fe | ||
![]() |
baa58c19c4 | ||
![]() |
82e24ec792 | ||
![]() |
997a997465 | ||
![]() |
c962405c19 | ||
![]() |
743973f56f | ||
![]() |
97b137a640 | ||
![]() |
a5e2f79317 | ||
![]() |
feb869b682 | ||
![]() |
b73e47a413 | ||
![]() |
2f51865751 | ||
![]() |
50e17cdc98 | ||
![]() |
936554f1d7 | ||
![]() |
81ba3b1b91 | ||
![]() |
9c10ef55b0 | ||
![]() |
dece653d0d | ||
![]() |
a26a200b3f | ||
![]() |
572cfee405 | ||
![]() |
e5f7aeb46f | ||
![]() |
ae14a444f2 | ||
![]() |
b13cba3082 | ||
![]() |
46ee330d3a | ||
![]() |
c9edf502cd | ||
![]() |
1c1574e377 | ||
![]() |
0f2401e3cb | ||
![]() |
9be586e166 | ||
![]() |
3d11279745 | ||
![]() |
63efad67ee | ||
![]() |
27432f259d | ||
![]() |
e69c430353 | ||
![]() |
ff0985347c | ||
![]() |
5f92a0d1e9 | ||
![]() |
3bfaadfada | ||
![]() |
852ef62b85 | ||
![]() |
45432879ff | ||
![]() |
77955dcabf | ||
![]() |
15fc5263f9 | ||
![]() |
5ae61395af | ||
![]() |
1697813db8 | ||
![]() |
22dc1fd166 | ||
![]() |
bc0bf39e4c | ||
![]() |
3cca4c770e | ||
![]() |
1cba89a135 | ||
![]() |
ffdfa80147 | ||
![]() |
c4353d7ee2 | ||
![]() |
24bb746194 | ||
![]() |
e201e75368 | ||
![]() |
79460c552f | ||
![]() |
fc0d183729 | ||
![]() |
052cd9e552 | ||
![]() |
d20c761782 | ||
![]() |
b4e4a5cbaa | ||
![]() |
1081c9decc | ||
![]() |
a22da15b0c | ||
![]() |
11afd2e2dd | ||
![]() |
0d728ed792 | ||
![]() |
fe618beccf | ||
![]() |
c6ef9ca971 | ||
![]() |
2acdf29838 | ||
![]() |
76d18b6e09 | ||
![]() |
2e6b1915cb | ||
![]() |
64857ecd68 | ||
![]() |
904f75d3c4 | ||
![]() |
89e08520fc | ||
![]() |
6e4e32e759 | ||
![]() |
5c36d0e1cd | ||
![]() |
c21604b5e7 | ||
![]() |
1a8f5c9e2d | ||
![]() |
2324c67076 | ||
![]() |
3d5c48937b | ||
![]() |
61b31c6939 | ||
![]() |
c2439cad51 | ||
![]() |
59fc55b297 | ||
![]() |
bda7aa7cb3 | ||
![]() |
04e28b3b25 | ||
![]() |
81a766e57a | ||
![]() |
e148369df9 | ||
![]() |
3e11a186fa | ||
![]() |
227a26cd40 | ||
![]() |
da29aadeb7 | ||
![]() |
776c481eb9 | ||
![]() |
9ac17454a9 | ||
![]() |
b0369f3369 | ||
![]() |
5426a36300 | ||
![]() |
5ba020d648 | ||
![]() |
43b601d36f | ||
![]() |
ca39fbe14d | ||
![]() |
932e5c374a | ||
![]() |
1fa9b7feb6 | ||
![]() |
ae19e19678 | ||
![]() |
53dc6f6643 | ||
![]() |
587cd92bd7 | ||
![]() |
8b130c4aab | ||
![]() |
22f8dfafa8 | ||
![]() |
9a15f32682 | ||
![]() |
87af5b3877 | ||
![]() |
47b82e51be | ||
![]() |
a54aaa822a | ||
![]() |
4e6706f851 | ||
![]() |
32d24c8c05 | ||
![]() |
b9ab59a829 | ||
![]() |
a0976c15e6 | ||
![]() |
f43127a516 | ||
![]() |
0c50e41d63 | ||
![]() |
eeff4bdbb8 | ||
![]() |
a0f1da9baa | ||
![]() |
71ad971244 | ||
![]() |
42b0ef9056 | ||
![]() |
1e18ed781f | ||
![]() |
ff6ee4571c | ||
![]() |
3190acae6f | ||
![]() |
ac81fbba74 | ||
![]() |
8a35f24ca9 | ||
![]() |
3d91569c5e | ||
![]() |
0baeb59307 | ||
![]() |
f8675743c4 | ||
![]() |
05e5d785fa | ||
![]() |
2389309d48 | ||
![]() |
0140f11c3b | ||
![]() |
694c3dab36 | ||
![]() |
8efb06c873 | ||
![]() |
e2865d9316 | ||
![]() |
bde9e859b3 | ||
![]() |
85cac770bd | ||
![]() |
808b0ccc03 | ||
![]() |
2cf83677b3 | ||
![]() |
c0be9d7264 | ||
![]() |
70402f6ee7 | ||
![]() |
9b1673531c | ||
![]() |
d7906baa85 | ||
![]() |
322470e606 | ||
![]() |
0e71738262 | ||
![]() |
128b0510e1 | ||
![]() |
ff59edb6dc | ||
![]() |
b4e0acfa04 | ||
![]() |
d36bb362a6 | ||
![]() |
be7105dff6 | ||
![]() |
97fdbd12f9 | ||
![]() |
db48767d7c | ||
![]() |
ae81d9a7da | ||
![]() |
abbfc4d87e | ||
![]() |
16775c7aaa | ||
![]() |
ac82e318bb | ||
![]() |
49d69844f5 | ||
![]() |
3f9a148022 | ||
![]() |
8622618839 | ||
![]() |
52fd0cda2c | ||
![]() |
2eda0e705a | ||
![]() |
f238de1990 | ||
![]() |
992ce9777c | ||
![]() |
47fe68eec8 | ||
![]() |
beb83f0a40 | ||
![]() |
2b415192fd | ||
![]() |
6e70816f71 | ||
![]() |
2fdb02693b | ||
![]() |
d7d29f0d43 | ||
![]() |
d76c9b5665 | ||
![]() |
d4253b3a5b | ||
![]() |
9fc7de8d80 | ||
![]() |
2958b8b86e | ||
![]() |
caf08defa6 | ||
![]() |
57a6cd8ab1 | ||
![]() |
b469fce85d | ||
![]() |
656f930160 | ||
![]() |
c56d6f3552 | ||
![]() |
04973b02c3 | ||
![]() |
bcb10b99f4 | ||
![]() |
65f581472c | ||
![]() |
c7fac44ef8 | ||
![]() |
8d48223a32 | ||
![]() |
3d10235b83 | ||
![]() |
2428b02bb4 | ||
![]() |
c2af6b500b | ||
![]() |
dd72df9845 | ||
![]() |
205e2264c3 | ||
![]() |
4a23765704 | ||
![]() |
083d0b7b2d | ||
![]() |
1e9ae5dbbd | ||
![]() |
01e2dd53a0 | ||
![]() |
b93659db5b | ||
![]() |
b94621ac6b | ||
![]() |
a2b4e38729 | ||
![]() |
9fe7e45030 | ||
![]() |
3c169251ee | ||
![]() |
81a2740919 | ||
![]() |
a144d305dd | ||
![]() |
4965da92ef | ||
![]() |
a83304814c | ||
![]() |
db6f3be941 | ||
![]() |
2f1f407044 | ||
![]() |
bdd2f7c7a7 | ||
![]() |
332777970c | ||
![]() |
62b9c99aec | ||
![]() |
0e18480fc3 | ||
![]() |
99d4d3532f | ||
![]() |
d9d66d2d16 | ||
![]() |
db33fb1c48 | ||
![]() |
0be9a2e9d9 | ||
![]() |
ee282be99b | ||
![]() |
d3bcc9cfa9 | ||
![]() |
eaa6d79105 | ||
![]() |
4d9aad95b5 | ||
![]() |
6882a04706 | ||
![]() |
98805cbac5 | ||
![]() |
66ca450fcb | ||
![]() |
8162b69091 | ||
![]() |
ed38ed3b62 | ||
![]() |
6f6f5f9c8b | ||
![]() |
690a3c4fb7 | ||
![]() |
2ca68b98ba | ||
![]() |
853bbe1aed | ||
![]() |
218c853774 | ||
![]() |
e315549ffa | ||
![]() |
c9ed7a4fc1 | ||
![]() |
1c321b7c31 | ||
![]() |
491e1ec95d | ||
![]() |
5805c0c2b6 | ||
![]() |
c61c66cfb1 | ||
![]() |
5ac53d07a2 | ||
![]() |
02055da69a | ||
![]() |
c7f419efd1 | ||
![]() |
b556432f5a | ||
![]() |
102df43f49 | ||
![]() |
36fc9bff08 | ||
![]() |
3ae71dd33e | ||
![]() |
673f679c8a | ||
![]() |
f65501f80c | ||
![]() |
6387aa94d6 | ||
![]() |
d581567e09 | ||
![]() |
9933e06595 | ||
![]() |
91d024d75d | ||
![]() |
5ee384c4eb | ||
![]() |
9ecd0f34ad | ||
![]() |
7de7bd4f56 | ||
![]() |
e939c7b7f5 | ||
![]() |
612ef09a18 | ||
![]() |
f0ee0fcbfc | ||
![]() |
d41f4e8dc8 | ||
![]() |
bc1c8ec5e6 | ||
![]() |
67b943ad66 | ||
![]() |
5e7e43c33e | ||
![]() |
13ce367368 | ||
![]() |
0962c26b6b | ||
![]() |
33c47d3976 | ||
![]() |
d0d0924947 | ||
![]() |
ca5d6c615e | ||
![]() |
7c17207ab9 | ||
![]() |
9847f02faf | ||
![]() |
dfdeabadca | ||
![]() |
33aa2c5d6b | ||
![]() |
b4552cc9b8 | ||
![]() |
172f929767 | ||
![]() |
66a9edfcf6 | ||
![]() |
a9382fc15c | ||
![]() |
bd9dcb411d | ||
![]() |
ae81a0e32d | ||
![]() |
4f93400db1 | ||
![]() |
0cd61c7f7d | ||
![]() |
28ac4e91dc | ||
![]() |
6b683be641 | ||
![]() |
3a3b5ae4c0 | ||
![]() |
70fcea3b77 | ||
![]() |
b545d11d49 | ||
![]() |
4324d7bade | ||
![]() |
c88bdac460 | ||
![]() |
bdf6e6fff4 | ||
![]() |
83e4aa3e7c | ||
![]() |
fce2cfbdcf | ||
![]() |
72f1907c96 | ||
![]() |
074ebfacf4 | ||
![]() |
47f8497837 | ||
![]() |
93f26b7992 | ||
![]() |
f684bbf224 | ||
![]() |
edca16f1af | ||
![]() |
aeac212fda | ||
![]() |
62f05d6309 | ||
![]() |
bd339d4882 | ||
![]() |
9de71b0eb2 | ||
![]() |
7e73760950 | ||
![]() |
0639e403be | ||
![]() |
5c7d6be5f9 | ||
![]() |
2005887707 | ||
![]() |
cf8462ce00 | ||
![]() |
02c8c064ea | ||
![]() |
9a4acedf31 | ||
![]() |
c2d37e7364 | ||
![]() |
62baf22ec0 | ||
![]() |
8f7d839e15 | ||
![]() |
93716f7bea | ||
![]() |
ff4c53e8b3 | ||
![]() |
9a22d6dd63 | ||
![]() |
beb28bc55d | ||
![]() |
1fa7ad2e20 | ||
![]() |
f514834917 | ||
![]() |
1d0e583728 | ||
![]() |
782331be1e | ||
![]() |
94c7ee4d9e | ||
![]() |
b7154758de | ||
![]() |
cd7d575e90 | ||
![]() |
17d169ce0f | ||
![]() |
fb1fb462e5 | ||
![]() |
7da810e68b | ||
![]() |
6de6d9e2d3 | ||
![]() |
736851264b | ||
![]() |
59431fc841 | ||
![]() |
f581e25a69 | ||
![]() |
9d0bb7fc39 | ||
![]() |
a53fd4b758 | ||
![]() |
b4ccdf5e68 | ||
![]() |
9b02aa2593 | ||
![]() |
f089e67d51 | ||
![]() |
842d7c9b3a | ||
![]() |
187297b871 | ||
![]() |
211ad5042a | ||
![]() |
2b06f5f8f1 | ||
![]() |
0a64b25c77 | ||
![]() |
40e52bbb63 | ||
![]() |
aeec1a6430 | ||
![]() |
ef121a88d5 | ||
![]() |
db99c41567 | ||
![]() |
54bdb5fc86 | ||
![]() |
2fd824b466 | ||
![]() |
005b38f8f1 | ||
![]() |
6911d9e1b0 | ||
![]() |
cecb2b39ce | ||
![]() |
9ebfee7ac0 | ||
![]() |
01b39884c7 | ||
![]() |
c08e8ab715 | ||
![]() |
68ae344b5e | ||
![]() |
5753d780b4 | ||
![]() |
dbb534cea6 | ||
![]() |
414d75b8bc | ||
![]() |
c52a25e03b | ||
![]() |
c7966bf795 | ||
![]() |
0f429392cf | ||
![]() |
2dc6c5d462 | ||
![]() |
dc0403530e | ||
![]() |
59147be24f | ||
![]() |
09bc4be3db | ||
![]() |
aaef59d535 | ||
![]() |
283e070877 | ||
![]() |
4e0c29451b | ||
![]() |
f5ae34250a | ||
![]() |
fc4c29bc6e | ||
![]() |
6158eec53f | ||
![]() |
6c0fef5762 | ||
![]() |
f5a4bd23e9 | ||
![]() |
8e1760f37f | ||
![]() |
1da5ab751f | ||
![]() |
237ef710a1 | ||
![]() |
be47e93134 | ||
![]() |
1821c849da | ||
![]() |
a4522ae516 | ||
![]() |
c7ee4bc016 | ||
![]() |
0cabb95811 | ||
![]() |
a1b32533aa | ||
![]() |
e2eb0d2326 | ||
![]() |
acac6b0d69 | ||
![]() |
deb8d0d6a1 | ||
![]() |
3e817d91ef | ||
![]() |
e231f0fade | ||
![]() |
f0f55e6726 | ||
![]() |
61dc8494d7 | ||
![]() |
423b87d621 | ||
![]() |
8d9568b4a1 | ||
![]() |
acf511de34 | ||
![]() |
fd2951bb53 | ||
![]() |
fa004f4854 | ||
![]() |
1155bdb754 | ||
![]() |
01838c5732 | ||
![]() |
f09f33031b | ||
![]() |
b2a9f64e1b | ||
![]() |
15ea618ef6 | ||
![]() |
ec33423273 | ||
![]() |
d5dd54df69 | ||
![]() |
e7a4c34e7c | ||
![]() |
2881bfbfd6 | ||
![]() |
80fb38153e | ||
![]() |
b79f337f8a | ||
![]() |
f593ac1c21 | ||
![]() |
baf92305a6 | ||
![]() |
d6d168e87b | ||
![]() |
50f9c4acc3 | ||
![]() |
211374e52a | ||
![]() |
1bf2461765 | ||
![]() |
64444cd578 | ||
![]() |
0047a31090 | ||
![]() |
d73ce6cb56 | ||
![]() |
9a6d3eee59 | ||
![]() |
8b221d60fa | ||
![]() |
9da9b36435 | ||
![]() |
09b33f9a82 | ||
![]() |
fa6b6dad3d | ||
![]() |
e0d88cfd18 | ||
![]() |
18043e3d22 | ||
![]() |
ccf470fdb6 | ||
![]() |
8f9bc6f2ce | ||
![]() |
fcab45f39b | ||
![]() |
bc44d06c3d | ||
![]() |
7740e36a89 | ||
![]() |
6127f792f9 | ||
![]() |
fd2cf9c45d | ||
![]() |
fc3dec8b62 | ||
![]() |
a7315116dd | ||
![]() |
37268dcc86 | ||
![]() |
ea28e74205 | ||
![]() |
c1c84f0a55 | ||
![]() |
56bf38859b | ||
![]() |
1cda4aa1e0 | ||
![]() |
9711b52739 |
44
MAINTAINERS
44
MAINTAINERS
@@ -14,7 +14,6 @@ patches and related discussions.
|
|||||||
Project Leader
|
Project Leader
|
||||||
==============
|
==============
|
||||||
|
|
||||||
Michael Niedermayer
|
|
||||||
final design decisions
|
final design decisions
|
||||||
|
|
||||||
|
|
||||||
@@ -46,7 +45,7 @@ Miscellaneous Areas
|
|||||||
documentation Mike Melanson
|
documentation Mike Melanson
|
||||||
website Robert Swain, Lou Logan
|
website Robert Swain, Lou Logan
|
||||||
build system (configure,Makefiles) Diego Biurrun, Mans Rullgard
|
build system (configure,Makefiles) Diego Biurrun, Mans Rullgard
|
||||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Döffinger
|
project server Árpád Gereöffy, Michael Niedermayer, Reimar Döffinger, Alexander Strasser
|
||||||
mailinglists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
mailinglists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
||||||
presets Robert Swain
|
presets Robert Swain
|
||||||
metadata subsystem Aurelien Jacobs
|
metadata subsystem Aurelien Jacobs
|
||||||
@@ -62,13 +61,20 @@ Internal Interfaces:
|
|||||||
libavutil/common.h Michael Niedermayer
|
libavutil/common.h Michael Niedermayer
|
||||||
|
|
||||||
Other:
|
Other:
|
||||||
intfloat* Michael Niedermayer
|
bprint Nicolas George
|
||||||
rational.c, rational.h Michael Niedermayer
|
|
||||||
mathematics.c, mathematics.h Michael Niedermayer
|
|
||||||
integer.c, integer.h Michael Niedermayer
|
|
||||||
bswap.h
|
bswap.h
|
||||||
|
des Reimar Doeffinger
|
||||||
|
float_dsp Loren Merritt
|
||||||
|
hash Reimar Doeffinger
|
||||||
|
intfloat* Michael Niedermayer
|
||||||
|
integer.c, integer.h Michael Niedermayer
|
||||||
|
lzo Reimar Doeffinger
|
||||||
|
mathematics.c, mathematics.h Michael Niedermayer
|
||||||
opencl.c, opencl.h Wei Gao
|
opencl.c, opencl.h Wei Gao
|
||||||
|
rational.c, rational.h Michael Niedermayer
|
||||||
|
rc4 Reimar Doeffinger
|
||||||
ripemd.c, ripemd.h James Almer
|
ripemd.c, ripemd.h James Almer
|
||||||
|
timecode Clément Bœsch
|
||||||
|
|
||||||
|
|
||||||
libavcodec
|
libavcodec
|
||||||
@@ -131,8 +137,8 @@ Codecs:
|
|||||||
binkaudio.c Peter Ross
|
binkaudio.c Peter Ross
|
||||||
bmp.c Mans Rullgard, Kostya Shishkov
|
bmp.c Mans Rullgard, Kostya Shishkov
|
||||||
cavs* Stefan Gehrer
|
cavs* Stefan Gehrer
|
||||||
celp_filters.* Vitor Sessak
|
|
||||||
cdxl.c Paul B Mahol
|
cdxl.c Paul B Mahol
|
||||||
|
celp_filters.* Vitor Sessak
|
||||||
cinepak.c Roberto Togni
|
cinepak.c Roberto Togni
|
||||||
cljr Alex Beregszaszi
|
cljr Alex Beregszaszi
|
||||||
cllc.c Derek Buitenhuis
|
cllc.c Derek Buitenhuis
|
||||||
@@ -143,8 +149,8 @@ Codecs:
|
|||||||
dca.c Kostya Shishkov, Benjamin Larsson
|
dca.c Kostya Shishkov, Benjamin Larsson
|
||||||
dnxhd* Baptiste Coudurier
|
dnxhd* Baptiste Coudurier
|
||||||
dpcm.c Mike Melanson
|
dpcm.c Mike Melanson
|
||||||
dxa.c Kostya Shishkov
|
|
||||||
dv.c Roman Shaposhnik
|
dv.c Roman Shaposhnik
|
||||||
|
dxa.c Kostya Shishkov
|
||||||
eacmv*, eaidct*, eat* Peter Ross
|
eacmv*, eaidct*, eat* Peter Ross
|
||||||
ffv1.c Michael Niedermayer
|
ffv1.c Michael Niedermayer
|
||||||
ffwavesynth.c Nicolas George
|
ffwavesynth.c Nicolas George
|
||||||
@@ -154,9 +160,9 @@ Codecs:
|
|||||||
g722.c Martin Storsjo
|
g722.c Martin Storsjo
|
||||||
g726.c Roman Shaposhnik
|
g726.c Roman Shaposhnik
|
||||||
gifdec.c Baptiste Coudurier
|
gifdec.c Baptiste Coudurier
|
||||||
h264* Loren Merritt, Michael Niedermayer
|
|
||||||
h261* Michael Niedermayer
|
h261* Michael Niedermayer
|
||||||
h263* Michael Niedermayer
|
h263* Michael Niedermayer
|
||||||
|
h264* Loren Merritt, Michael Niedermayer
|
||||||
huffyuv.c Michael Niedermayer
|
huffyuv.c Michael Niedermayer
|
||||||
idcinvideo.c Mike Melanson
|
idcinvideo.c Mike Melanson
|
||||||
imc* Benjamin Larsson
|
imc* Benjamin Larsson
|
||||||
@@ -171,8 +177,8 @@ Codecs:
|
|||||||
kmvc.c Kostya Shishkov
|
kmvc.c Kostya Shishkov
|
||||||
lcl*.c Roberto Togni, Reimar Doeffinger
|
lcl*.c Roberto Togni, Reimar Doeffinger
|
||||||
libcelt_dec.c Nicolas George
|
libcelt_dec.c Nicolas George
|
||||||
libgsm.c Michel Bardiaux
|
|
||||||
libdirac* David Conrad
|
libdirac* David Conrad
|
||||||
|
libgsm.c Michel Bardiaux
|
||||||
libopenjpeg.c Jaikrishnan Menon
|
libopenjpeg.c Jaikrishnan Menon
|
||||||
libopenjpegenc.c Michael Bradshaw
|
libopenjpegenc.c Michael Bradshaw
|
||||||
libschroedinger* David Conrad
|
libschroedinger* David Conrad
|
||||||
@@ -180,8 +186,8 @@ Codecs:
|
|||||||
libtheoraenc.c David Conrad
|
libtheoraenc.c David Conrad
|
||||||
libutvideo* Derek Buitenhuis
|
libutvideo* Derek Buitenhuis
|
||||||
libvorbis.c David Conrad
|
libvorbis.c David Conrad
|
||||||
libxavs.c Stefan Gehrer
|
|
||||||
libx264.c Mans Rullgard, Jason Garrett-Glaser
|
libx264.c Mans Rullgard, Jason Garrett-Glaser
|
||||||
|
libxavs.c Stefan Gehrer
|
||||||
loco.c Kostya Shishkov
|
loco.c Kostya Shishkov
|
||||||
lzo.h, lzo.c Reimar Doeffinger
|
lzo.h, lzo.c Reimar Doeffinger
|
||||||
mdec.c Michael Niedermayer
|
mdec.c Michael Niedermayer
|
||||||
@@ -243,8 +249,8 @@ Codecs:
|
|||||||
vda_h264_dec.c Xidorn Quan
|
vda_h264_dec.c Xidorn Quan
|
||||||
vima.c Paul B Mahol
|
vima.c Paul B Mahol
|
||||||
vmnc.c Kostya Shishkov
|
vmnc.c Kostya Shishkov
|
||||||
vorbis_enc.c Oded Shimon
|
|
||||||
vorbis_dec.c Denes Balatoni, David Conrad
|
vorbis_dec.c Denes Balatoni, David Conrad
|
||||||
|
vorbis_enc.c Oded Shimon
|
||||||
vp3* Mike Melanson
|
vp3* Mike Melanson
|
||||||
vp5 Aurelien Jacobs
|
vp5 Aurelien Jacobs
|
||||||
vp6 Aurelien Jacobs
|
vp6 Aurelien Jacobs
|
||||||
@@ -278,11 +284,11 @@ libavdevice
|
|||||||
libavdevice/avdevice.h
|
libavdevice/avdevice.h
|
||||||
|
|
||||||
|
|
||||||
|
dshow.c Roger Pack
|
||||||
iec61883.c Georg Lippitsch
|
iec61883.c Georg Lippitsch
|
||||||
libdc1394.c Roman Shaposhnik
|
libdc1394.c Roman Shaposhnik
|
||||||
v4l2.c Luca Abeni
|
v4l2.c Luca Abeni
|
||||||
vfwcap.c Ramiro Polla
|
vfwcap.c Ramiro Polla
|
||||||
dshow.c Roger Pack
|
|
||||||
|
|
||||||
libavfilter
|
libavfilter
|
||||||
===========
|
===========
|
||||||
@@ -292,11 +298,13 @@ Generic parts:
|
|||||||
|
|
||||||
Filters:
|
Filters:
|
||||||
af_amerge.c Nicolas George
|
af_amerge.c Nicolas George
|
||||||
|
af_aresample.c Michael Niedermayer
|
||||||
af_astreamsync.c Nicolas George
|
af_astreamsync.c Nicolas George
|
||||||
af_atempo.c Pavel Koshevoy
|
af_atempo.c Pavel Koshevoy
|
||||||
af_pan.c Nicolas George
|
af_pan.c Nicolas George
|
||||||
vf_delogo.c Jean Delvare (CC <khali@linux-fr.org>)
|
vf_delogo.c Jean Delvare (CC <khali@linux-fr.org>)
|
||||||
vf_drawbox.c/drawgrid Andrey Utkin
|
vf_drawbox.c/drawgrid Andrey Utkin
|
||||||
|
vf_scale.c Michael Niedermayer
|
||||||
vf_yadif.c Michael Niedermayer
|
vf_yadif.c Michael Niedermayer
|
||||||
|
|
||||||
Sources:
|
Sources:
|
||||||
@@ -316,7 +324,8 @@ Muxers/Demuxers:
|
|||||||
4xm.c Mike Melanson
|
4xm.c Mike Melanson
|
||||||
adtsenc.c Robert Swain
|
adtsenc.c Robert Swain
|
||||||
afc.c Paul B Mahol
|
afc.c Paul B Mahol
|
||||||
aiff.c Baptiste Coudurier
|
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||||
|
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||||
ape.c Kostya Shishkov
|
ape.c Kostya Shishkov
|
||||||
ass* Aurelien Jacobs
|
ass* Aurelien Jacobs
|
||||||
astdec.c Paul B Mahol
|
astdec.c Paul B Mahol
|
||||||
@@ -344,8 +353,8 @@ Muxers/Demuxers:
|
|||||||
idcin.c Mike Melanson
|
idcin.c Mike Melanson
|
||||||
idroqdec.c Mike Melanson
|
idroqdec.c Mike Melanson
|
||||||
iff.c Jaikrishnan Menon
|
iff.c Jaikrishnan Menon
|
||||||
ipmovie.c Mike Melanson
|
|
||||||
img2*.c Michael Niedermayer
|
img2*.c Michael Niedermayer
|
||||||
|
ipmovie.c Mike Melanson
|
||||||
ircam* Paul B Mahol
|
ircam* Paul B Mahol
|
||||||
iss.c Stefan Gehrer
|
iss.c Stefan Gehrer
|
||||||
jacosub* Clément Bœsch
|
jacosub* Clément Bœsch
|
||||||
@@ -359,11 +368,11 @@ Muxers/Demuxers:
|
|||||||
matroskadec.c Aurelien Jacobs
|
matroskadec.c Aurelien Jacobs
|
||||||
matroskaenc.c David Conrad
|
matroskaenc.c David Conrad
|
||||||
metadata* Aurelien Jacobs
|
metadata* Aurelien Jacobs
|
||||||
microdvd* Aurelien Jacobs
|
|
||||||
mgsts.c Paul B Mahol
|
mgsts.c Paul B Mahol
|
||||||
|
microdvd* Aurelien Jacobs
|
||||||
mm.c Peter Ross
|
mm.c Peter Ross
|
||||||
mov.c Michael Niedermayer, Baptiste Coudurier
|
mov.c Michael Niedermayer, Baptiste Coudurier
|
||||||
movenc.c Michael Niedermayer, Baptiste Coudurier
|
movenc.c Baptiste Coudurier, Matthieu Bouron
|
||||||
mpc.c Kostya Shishkov
|
mpc.c Kostya Shishkov
|
||||||
mpeg.c Michael Niedermayer
|
mpeg.c Michael Niedermayer
|
||||||
mpegenc.c Michael Niedermayer
|
mpegenc.c Michael Niedermayer
|
||||||
@@ -458,7 +467,6 @@ Releases
|
|||||||
|
|
||||||
2.0 Michael Niedermayer
|
2.0 Michael Niedermayer
|
||||||
1.2 Michael Niedermayer
|
1.2 Michael Niedermayer
|
||||||
1.1 Michael Niedermayer
|
|
||||||
|
|
||||||
If you want to maintain an older release, please contact us
|
If you want to maintain an older release, please contact us
|
||||||
|
|
||||||
|
@@ -67,7 +67,7 @@ struct SwsContext *sws_opts;
|
|||||||
AVDictionary *swr_opts;
|
AVDictionary *swr_opts;
|
||||||
AVDictionary *format_opts, *codec_opts, *resample_opts;
|
AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||||
|
|
||||||
const int this_year = 2013;
|
const int this_year = 2015;
|
||||||
|
|
||||||
static FILE *report_file;
|
static FILE *report_file;
|
||||||
|
|
||||||
|
68
compat/avisynth/avisynth_c_25.h
Normal file
68
compat/avisynth/avisynth_c_25.h
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
// Copyright (c) 2011 FFmpegSource Project
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in
|
||||||
|
// all copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
// THE SOFTWARE.
|
||||||
|
|
||||||
|
/* these are defines/functions that are used and were changed in the switch to 2.6
|
||||||
|
* and are needed to maintain full compatility with 2.5 */
|
||||||
|
|
||||||
|
enum {
|
||||||
|
AVS_CS_YV12_25 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||||
|
AVS_CS_I420_25 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||||
|
};
|
||||||
|
|
||||||
|
AVSC_INLINE int avs_get_height_p_25(const AVS_VideoFrame * p, int plane) {
|
||||||
|
switch (plane)
|
||||||
|
{
|
||||||
|
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||||
|
if (p->pitchUV)
|
||||||
|
return p->height>>1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return p->height;}
|
||||||
|
|
||||||
|
AVSC_INLINE int avs_get_row_size_p_25(const AVS_VideoFrame * p, int plane) {
|
||||||
|
int r;
|
||||||
|
switch (plane)
|
||||||
|
{
|
||||||
|
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||||
|
if (p->pitchUV)
|
||||||
|
return p->row_size>>1;
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||||
|
if (p->pitchUV)
|
||||||
|
{
|
||||||
|
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||||
|
if (r < p->pitchUV)
|
||||||
|
return r;
|
||||||
|
return p->row_size>>1;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
case AVS_PLANAR_Y_ALIGNED:
|
||||||
|
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||||
|
if (r <= p->pitch)
|
||||||
|
return r;
|
||||||
|
return p->row_size;
|
||||||
|
}
|
||||||
|
return p->row_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
AVSC_INLINE int avs_is_yv12_25(const AVS_VideoInfo * p)
|
||||||
|
{ return ((p->pixel_type & AVS_CS_YV12_25) == AVS_CS_YV12_25)||((p->pixel_type & AVS_CS_I420_25) == AVS_CS_I420_25); }
|
16
configure
vendored
16
configure
vendored
@@ -1415,6 +1415,7 @@ HAVE_LIST="
|
|||||||
alsa_asoundlib_h
|
alsa_asoundlib_h
|
||||||
altivec_h
|
altivec_h
|
||||||
arpa_inet_h
|
arpa_inet_h
|
||||||
|
as_object_arch
|
||||||
asm_mod_q
|
asm_mod_q
|
||||||
asm_mod_y
|
asm_mod_y
|
||||||
asm_types_h
|
asm_types_h
|
||||||
@@ -2801,7 +2802,9 @@ probe_cc(){
|
|||||||
unset _depflags _DEPCMD _DEPFLAGS
|
unset _depflags _DEPCMD _DEPFLAGS
|
||||||
_flags_filter=echo
|
_flags_filter=echo
|
||||||
|
|
||||||
if $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
if $_cc --version 2>&1 | grep -q '^GNU assembler'; then
|
||||||
|
true # no-op to avoid reading stdin in following checks
|
||||||
|
elif $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||||
_type=llvm_gcc
|
_type=llvm_gcc
|
||||||
gcc_extra_ver=$(expr "$($_cc --version | head -n1)" : '.*\((.*)\)')
|
gcc_extra_ver=$(expr "$($_cc --version | head -n1)" : '.*\((.*)\)')
|
||||||
_ident="llvm-gcc $($_cc -dumpversion) $gcc_extra_ver"
|
_ident="llvm-gcc $($_cc -dumpversion) $gcc_extra_ver"
|
||||||
@@ -3829,6 +3832,11 @@ EOF
|
|||||||
|
|
||||||
[ $target_os != win32 ] && enabled_all armv6t2 shared !pic && enable_weak_pic
|
[ $target_os != win32 ] && enabled_all armv6t2 shared !pic && enable_weak_pic
|
||||||
|
|
||||||
|
# llvm's integrated assembler supports .object_arch from llvm 3.5
|
||||||
|
[ "$objformat" = elf ] && check_as <<EOF && enable as_object_arch
|
||||||
|
.object_arch armv4
|
||||||
|
EOF
|
||||||
|
|
||||||
elif enabled mips; then
|
elif enabled mips; then
|
||||||
|
|
||||||
check_inline_asm loongson '"dmult.g $1, $2, $3"'
|
check_inline_asm loongson '"dmult.g $1, $2, $3"'
|
||||||
@@ -3955,6 +3963,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
check_ldflags -Wl,--as-needed
|
check_ldflags -Wl,--as-needed
|
||||||
|
check_ldflags -Wl,-z,noexecstack
|
||||||
|
|
||||||
if check_func dlopen; then
|
if check_func dlopen; then
|
||||||
ldl=
|
ldl=
|
||||||
@@ -4134,7 +4143,7 @@ enabled gnutls && require_pkg_config gnutls gnutls/gnutls.h gnutls_gl
|
|||||||
enabled libiec61883 && require libiec61883 libiec61883/iec61883.h iec61883_cmp_connect -lraw1394 -lavc1394 -lrom1394 -liec61883
|
enabled libiec61883 && require libiec61883 libiec61883/iec61883.h iec61883_cmp_connect -lraw1394 -lavc1394 -lrom1394 -liec61883
|
||||||
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
||||||
enabled libass && require_pkg_config libass ass/ass.h ass_library_init
|
enabled libass && require_pkg_config libass ass/ass.h ass_library_init
|
||||||
enabled libbluray && require libbluray libbluray/bluray.h bd_open -lbluray
|
enabled libbluray && require_pkg_config libbluray libbluray/bluray.h bd_open
|
||||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
|
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
|
||||||
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
|
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
|
||||||
die "ERROR: libcelt must be installed and version must be >= 0.11.0."; }
|
die "ERROR: libcelt must be installed and version must be >= 0.11.0."; }
|
||||||
@@ -4201,7 +4210,7 @@ enabled openal && { { for al_libs in "${OPENAL_LIBS}" "-lopenal" "-lO
|
|||||||
enabled opencl && { check_lib2 OpenCL/cl.h clEnqueueNDRangeKernel -Wl,-framework,OpenCL ||
|
enabled opencl && { check_lib2 OpenCL/cl.h clEnqueueNDRangeKernel -Wl,-framework,OpenCL ||
|
||||||
check_lib2 CL/cl.h clEnqueueNDRangeKernel -lOpenCL ||
|
check_lib2 CL/cl.h clEnqueueNDRangeKernel -lOpenCL ||
|
||||||
die "ERROR: opencl not found"; } &&
|
die "ERROR: opencl not found"; } &&
|
||||||
{ enabled_any w32threads os2threads &&
|
{ ! enabled_any w32threads os2threads ||
|
||||||
die "opencl currently needs --enable-pthreads or --disable-w32threads"; } &&
|
die "opencl currently needs --enable-pthreads or --disable-w32threads"; } &&
|
||||||
{ check_cpp_condition "OpenCL/cl.h" "defined(CL_VERSION_1_2)" ||
|
{ check_cpp_condition "OpenCL/cl.h" "defined(CL_VERSION_1_2)" ||
|
||||||
check_cpp_condition "CL/cl.h" "defined(CL_VERSION_1_2)" ||
|
check_cpp_condition "CL/cl.h" "defined(CL_VERSION_1_2)" ||
|
||||||
@@ -4777,6 +4786,7 @@ enabled getenv || echo "#define getenv(x) NULL" >> $TMPH
|
|||||||
|
|
||||||
|
|
||||||
mkdir -p doc
|
mkdir -p doc
|
||||||
|
mkdir -p tests
|
||||||
echo "@c auto-generated by configure" > doc/config.texi
|
echo "@c auto-generated by configure" > doc/config.texi
|
||||||
|
|
||||||
print_config ARCH_ "$config_files" $ARCH_LIST
|
print_config ARCH_ "$config_files" $ARCH_LIST
|
||||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# This could be handy for archiving the generated documentation or
|
# This could be handy for archiving the generated documentation or
|
||||||
# if some version control system is used.
|
# if some version control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER = 2.0
|
PROJECT_NUMBER = 2.0.7
|
||||||
|
|
||||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||||
# in the documentation. The maximum height of the logo should not exceed 55
|
# in the documentation. The maximum height of the logo should not exceed 55
|
||||||
|
@@ -103,8 +103,8 @@ endif
|
|||||||
uninstall: uninstall-man
|
uninstall: uninstall-man
|
||||||
|
|
||||||
uninstall-man:
|
uninstall-man:
|
||||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(MANPAGES1))
|
$(RM) $(addprefix "$(MANDIR)/man1/",$(PROGS-yes:%=%.1) $(PROGS-yes:%=%-all.1) $(COMPONENTS-yes:%=%.1))
|
||||||
$(RM) $(addprefix "$(MANDIR)/man3/",$(MANPAGES3))
|
$(RM) $(addprefix "$(MANDIR)/man3/",$(LIBRARIES-yes:%=%.3))
|
||||||
|
|
||||||
clean:: docclean
|
clean:: docclean
|
||||||
|
|
||||||
|
@@ -14,7 +14,3 @@ accepted. If you are experiencing issues with any formally released version of
|
|||||||
FFmpeg, please try git master to check if the issue still exists. If it does,
|
FFmpeg, please try git master to check if the issue still exists. If it does,
|
||||||
make your report against the development code following the usual bug reporting
|
make your report against the development code following the usual bug reporting
|
||||||
guidelines.
|
guidelines.
|
||||||
|
|
||||||
AVI/AVXSynth
|
|
||||||
--------
|
|
||||||
If you want to use FFmpeg with AVISynth, you need AVISynth 2.6.0 at minimum.
|
|
||||||
|
@@ -1,3 +1,4 @@
|
|||||||
|
@anchor{codec-options}
|
||||||
@chapter Codec Options
|
@chapter Codec Options
|
||||||
@c man begin CODEC OPTIONS
|
@c man begin CODEC OPTIONS
|
||||||
|
|
||||||
|
@@ -25,6 +25,95 @@ enabled encoders.
|
|||||||
A description of some of the currently available audio encoders
|
A description of some of the currently available audio encoders
|
||||||
follows.
|
follows.
|
||||||
|
|
||||||
|
@anchor{aacenc}
|
||||||
|
@section aac
|
||||||
|
|
||||||
|
Advanced Audio Coding (AAC) encoder.
|
||||||
|
|
||||||
|
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
|
||||||
|
low complexity (AAC-LC) profile is supported. To use this encoder, you must set
|
||||||
|
@option{strict} option to @samp{experimental} or lower.
|
||||||
|
|
||||||
|
As this encoder is experimental, unexpected behavior may exist from time to
|
||||||
|
time. For a more stable AAC encoder, see @ref{libvo-aacenc}. However, be warned
|
||||||
|
that it has a worse quality reported by some users.
|
||||||
|
|
||||||
|
@c Comment this out until somebody writes the respective documentation.
|
||||||
|
@c See also @ref{libfaac}, @ref{libaacplus}, and @ref{libfdk-aac-enc}.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item b
|
||||||
|
Set bit rate in bits/s. Setting this automatically activates constant bit rate
|
||||||
|
(CBR) mode.
|
||||||
|
|
||||||
|
@item q
|
||||||
|
Set quality for variable bit rate (VBR) mode. This option is valid only using
|
||||||
|
the @command{ffmpeg} command-line tool. For library interface users, use
|
||||||
|
@option{global_quality}.
|
||||||
|
|
||||||
|
@item stereo_mode
|
||||||
|
Set stereo encoding mode. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item auto
|
||||||
|
Automatically selected by the encoder.
|
||||||
|
|
||||||
|
@item ms_off
|
||||||
|
Disable middle/side encoding. This is the default.
|
||||||
|
|
||||||
|
@item ms_force
|
||||||
|
Force middle/side encoding.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item aac_coder
|
||||||
|
Set AAC encoder coding method. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item 0
|
||||||
|
FAAC-inspired method.
|
||||||
|
|
||||||
|
This method is a simplified reimplementation of the method used in FAAC, which
|
||||||
|
sets thresholds proportional to the band energies, and then decreases all the
|
||||||
|
thresholds with quantizer steps to find the appropriate quantization with
|
||||||
|
distortion below threshold band by band.
|
||||||
|
|
||||||
|
The quality of this method is comparable to the two loop searching method
|
||||||
|
descibed below, but somewhat a little better and slower.
|
||||||
|
|
||||||
|
@item 1
|
||||||
|
Average noise to mask ratio (ANMR) trellis-based solution.
|
||||||
|
|
||||||
|
This has a theoretic best quality out of all the coding methods, but at the
|
||||||
|
cost of the slowest speed.
|
||||||
|
|
||||||
|
@item 2
|
||||||
|
Two loop searching (TLS) method.
|
||||||
|
|
||||||
|
This method first sets quantizers depending on band thresholds and then tries
|
||||||
|
to find an optimal combination by adding or subtracting a specific value from
|
||||||
|
all quantizers and adjusting some individual quantizer a little.
|
||||||
|
|
||||||
|
This method produces similar quality with the FAAC method and is the default.
|
||||||
|
|
||||||
|
@item 3
|
||||||
|
Constant quantizer method.
|
||||||
|
|
||||||
|
This method sets a constant quantizer for all bands. This is the fastest of all
|
||||||
|
the methods, yet produces the worst quality.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@subsection Tips and Tricks
|
||||||
|
|
||||||
|
According to some reports
|
||||||
|
(e.g. @url{http://d.hatena.ne.jp/kamedo2/20120729/1343545890}), setting the
|
||||||
|
@option{cutoff} option to 15000 Hz greatly improves the quality of the output
|
||||||
|
quality. As a result, we encourage you to do the same.
|
||||||
|
|
||||||
@section ac3 and ac3_fixed
|
@section ac3 and ac3_fixed
|
||||||
|
|
||||||
AC-3 audio encoders.
|
AC-3 audio encoders.
|
||||||
@@ -420,26 +509,36 @@ Requires the presence of the libmp3lame headers and library during
|
|||||||
configuration. You need to explicitly configure the build with
|
configuration. You need to explicitly configure the build with
|
||||||
@code{--enable-libmp3lame}.
|
@code{--enable-libmp3lame}.
|
||||||
|
|
||||||
@subsection Option Mapping
|
@subsection Options
|
||||||
|
|
||||||
The following options are supported by the libmp3lame wrapper,
|
The following options are supported by the libmp3lame wrapper. The
|
||||||
the LAME-equivalent options follow the FFmpeg ones.
|
@command{lame}-equivalent of the options are listed in parentheses.
|
||||||
|
|
||||||
@multitable @columnfractions .2 .2
|
@table @option
|
||||||
@item FFmpeg @tab LAME
|
@item b (@emph{-b})
|
||||||
@item b @tab b
|
Set bitrate expressed in bits/s for CBR. LAME @code{bitrate} is
|
||||||
Set bitrate expressed in bits/s, LAME @code{bitrate} is expressed in
|
expressed in kilobits/s.
|
||||||
kilobits/s.
|
|
||||||
@item q @tab V
|
@item q (@emph{-V})
|
||||||
Set quality setting for VBR.
|
Set constant quality setting for VBR. This option is valid only
|
||||||
@item compression_level @tab q
|
using the @command{ffmpeg} command-line tool. For library interface
|
||||||
Set algorithm quality. Valid arguments are integers in the 0-9 range.
|
users, use @option{global_quality}.
|
||||||
@item reservoir @tab N.A.
|
|
||||||
Enable use of bit reservoir. LAME has this enabled by default.
|
@item compression_level (@emph{-q})
|
||||||
@item joint_stereo @tab -m j
|
Set algorithm quality. Valid arguments are integers in the 0-9 range,
|
||||||
|
with 0 meaning highest quality but slowest, and 9 meaning fastest
|
||||||
|
while producing the worst quality.
|
||||||
|
|
||||||
|
@item reservoir
|
||||||
|
Enable use of bit reservoir when set to 1. Default value is 1. LAME
|
||||||
|
has this enabled by default, but can be overriden by use
|
||||||
|
@option{--nores} option.
|
||||||
|
|
||||||
|
@item joint_stereo (@emph{-m j})
|
||||||
Enable the encoder to use (on a frame by frame basis) either L/R
|
Enable the encoder to use (on a frame by frame basis) either L/R
|
||||||
stereo or mid/side stereo.
|
stereo or mid/side stereo. Default value is 1.
|
||||||
@end multitable
|
|
||||||
|
@end table
|
||||||
|
|
||||||
@section libopencore-amrnb
|
@section libopencore-amrnb
|
||||||
|
|
||||||
@@ -486,24 +585,26 @@ Requires the presence of the libtwolame headers and library during
|
|||||||
configuration. You need to explicitly configure the build with
|
configuration. You need to explicitly configure the build with
|
||||||
@code{--enable-libtwolame}.
|
@code{--enable-libtwolame}.
|
||||||
|
|
||||||
@subsection Options Mapping
|
@subsection Options
|
||||||
|
|
||||||
The following options are supported by the libtwolame wrapper. The
|
The following options are supported by the libtwolame wrapper. The
|
||||||
TwoLAME-equivalent options follow the FFmpeg ones and are in
|
@command{twolame}-equivalent options follow the FFmpeg ones and are in
|
||||||
parentheses.
|
parentheses.
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
@item b
|
@item b (@emph{-b})
|
||||||
(b) Set bitrate in bits/s. Note that FFmpeg @code{b} option is
|
Set bitrate expressed in bits/s for CBR. @command{twolame} @option{b}
|
||||||
expressed in bits/s, twolame @code{b} in kilobits/s. The default
|
option is expressed in kilobits/s. Default value is 128k.
|
||||||
value is 128k.
|
|
||||||
|
|
||||||
@item q
|
@item q (@emph{-V})
|
||||||
(V) Set quality for experimental VBR support. Maximum value range is
|
Set quality for experimental VBR support. Maximum value range is
|
||||||
from -50 to 50, useful range is from -10 to 10.
|
from -50 to 50, useful range is from -10 to 10. The higher the
|
||||||
|
value, the better the quality. This option is valid only using the
|
||||||
|
@command{ffmpeg} command-line tool. For library interface users,
|
||||||
|
use @option{global_quality}.
|
||||||
|
|
||||||
@item mode
|
@item mode (@emph{--mode})
|
||||||
(mode) Set MPEG mode. Possible values:
|
Set the mode of the resulting audio. Possible values:
|
||||||
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item auto
|
@item auto
|
||||||
@@ -518,29 +619,30 @@ Dual channel
|
|||||||
Mono
|
Mono
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item psymodel
|
@item psymodel (@emph{--psyc-mode})
|
||||||
(psyc-mode) Set psychoacoustic model to use in encoding. The argument
|
Set psychoacoustic model to use in encoding. The argument must be
|
||||||
must be an integer between -1 and 4, inclusive. The higher the value,
|
an integer between -1 and 4, inclusive. The higher the value, the
|
||||||
the better the quality. The default value is 3.
|
better the quality. The default value is 3.
|
||||||
|
|
||||||
@item energy_levels
|
@item energy_levels (@emph{--energy})
|
||||||
(energy) Enable energy levels extensions when set to 1. The default
|
Enable energy levels extensions when set to 1. The default value is
|
||||||
value is 0 (disabled).
|
0 (disabled).
|
||||||
|
|
||||||
@item error_protection
|
@item error_protection (@emph{--protect})
|
||||||
(protect) Enable CRC error protection when set to 1. The default value
|
Enable CRC error protection when set to 1. The default value is 0
|
||||||
is 0 (disabled).
|
(disabled).
|
||||||
|
|
||||||
@item copyright
|
@item copyright (@emph{--copyright})
|
||||||
(copyright) Set MPEG audio copyright flag when set to 1. The default
|
Set MPEG audio copyright flag when set to 1. The default value is 0
|
||||||
value is 0 (disabled).
|
(disabled).
|
||||||
|
|
||||||
@item original
|
@item original (@emph{--original})
|
||||||
(original) Set MPEG audio original flag when set to 1. The default
|
Set MPEG audio original flag when set to 1. The default value is 0
|
||||||
value is 0 (disabled).
|
(disabled).
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@anchor{libvo-aacenc}
|
||||||
@section libvo-aacenc
|
@section libvo-aacenc
|
||||||
|
|
||||||
VisualOn AAC encoder.
|
VisualOn AAC encoder.
|
||||||
@@ -549,16 +651,19 @@ Requires the presence of the libvo-aacenc headers and library during
|
|||||||
configuration. You need to explicitly configure the build with
|
configuration. You need to explicitly configure the build with
|
||||||
@code{--enable-libvo-aacenc --enable-version3}.
|
@code{--enable-libvo-aacenc --enable-version3}.
|
||||||
|
|
||||||
|
This encoder is considered to be worse than the
|
||||||
|
@ref{aacenc,,native experimental FFmpeg AAC encoder}, according to
|
||||||
|
multiple sources.
|
||||||
|
|
||||||
@subsection Options
|
@subsection Options
|
||||||
|
|
||||||
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
|
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
|
||||||
channels. It is also CBR-only. It is considered to be worse than the
|
channels. It is also CBR-only.
|
||||||
native experimental FFmpeg AAC encoder.
|
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
|
|
||||||
@item b
|
@item b
|
||||||
Bitrate.
|
Set bit rate in bits/s.
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@@ -648,7 +753,7 @@ Set maximum frame size, or duration of a frame in milliseconds. The
|
|||||||
argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
|
argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
|
||||||
frame sizes achieve lower latency but less quality at a given bitrate.
|
frame sizes achieve lower latency but less quality at a given bitrate.
|
||||||
Sizes greater than 20ms are only interesting at fairly low bitrates.
|
Sizes greater than 20ms are only interesting at fairly low bitrates.
|
||||||
The default of FFmpeg is 10ms, but is 20ms in @command{opusenc}.
|
The default is 20ms.
|
||||||
|
|
||||||
@item packet_loss (@emph{expect-loss})
|
@item packet_loss (@emph{expect-loss})
|
||||||
Set expected packet loss percentage. The default is 0.
|
Set expected packet loss percentage. The default is 0.
|
||||||
@@ -873,178 +978,318 @@ For more information about libvpx see:
|
|||||||
|
|
||||||
x264 H.264/MPEG-4 AVC encoder wrapper.
|
x264 H.264/MPEG-4 AVC encoder wrapper.
|
||||||
|
|
||||||
Requires the presence of the libx264 headers and library during
|
This encoder requires the presence of the libx264 headers and library
|
||||||
configuration. You need to explicitly configure the build with
|
during configuration. You need to explicitly configure the build with
|
||||||
@code{--enable-libx264}.
|
@code{--enable-libx264}.
|
||||||
|
|
||||||
x264 supports an impressive number of features, including 8x8 and 4x4 adaptive
|
libx264 supports an impressive number of features, including 8x8 and
|
||||||
spatial transform, adaptive B-frame placement, CAVLC/CABAC entropy coding,
|
4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
|
||||||
interlacing (MBAFF), lossless mode, psy optimizations for detail retention
|
entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
|
||||||
(adaptive quantization, psy-RD, psy-trellis).
|
for detail retention (adaptive quantization, psy-RD, psy-trellis).
|
||||||
|
|
||||||
The FFmpeg wrapper provides a mapping for most of them using global options
|
Many libx264 encoder options are mapped to FFmpeg global codec
|
||||||
that match those of the encoders and provides private options for the unique
|
options, while unique encoder options are provided through private
|
||||||
encoder options. Additionally an expert override is provided to directly pass
|
options. Additionally the @option{x264opts} and @option{x264-params}
|
||||||
a list of key=value tuples as accepted by x264_param_parse.
|
private options allows to pass a list of key=value tuples as accepted
|
||||||
|
by the libx264 @code{x264_param_parse} function.
|
||||||
|
|
||||||
@subsection Option Mapping
|
The x264 project website is at
|
||||||
|
@url{http://www.videolan.org/developers/x264.html}.
|
||||||
|
|
||||||
The following options are supported by the x264 wrapper, the x264-equivalent
|
@subsection Options
|
||||||
options follow the FFmpeg ones.
|
|
||||||
|
|
||||||
@multitable @columnfractions .2 .2
|
The following options are supported by the libx264 wrapper. The
|
||||||
@item b @tab bitrate
|
@command{x264}-equivalent options or values are listed in parentheses
|
||||||
FFmpeg @code{b} option is expressed in bits/s, x264 @code{bitrate} in kilobits/s.
|
for easy migration.
|
||||||
@item bf @tab bframes
|
|
||||||
Maximum number of B-frames.
|
To reduce the duplication of documentation, only the private options
|
||||||
@item g @tab keyint
|
and some others requiring special attention are documented here. For
|
||||||
Maximum GOP size.
|
the documentation of the undocumented generic options, see
|
||||||
@item qmin @tab qpmin
|
@ref{codec-options,,the Codec Options chapter}.
|
||||||
@item qmax @tab qpmax
|
|
||||||
@item qdiff @tab qpstep
|
To get a more accurate and extensive documentation of the libx264
|
||||||
@item qblur @tab qblur
|
options, invoke the command @command{x264 --full-help} or consult
|
||||||
@item qcomp @tab qcomp
|
the libx264 documentation.
|
||||||
@item refs @tab ref
|
|
||||||
@item sc_threshold @tab scenecut
|
|
||||||
@item trellis @tab trellis
|
|
||||||
@item nr @tab nr
|
|
||||||
Noise reduction.
|
|
||||||
@item me_range @tab merange
|
|
||||||
@item me_method @tab me
|
|
||||||
@item subq @tab subme
|
|
||||||
@item b_strategy @tab b-adapt
|
|
||||||
@item keyint_min @tab keyint-min
|
|
||||||
@item coder @tab cabac
|
|
||||||
Set coder to @code{ac} to use CABAC.
|
|
||||||
@item cmp @tab chroma-me
|
|
||||||
Set to @code{chroma} to use chroma motion estimation.
|
|
||||||
@item threads @tab threads
|
|
||||||
@item thread_type @tab sliced_threads
|
|
||||||
Set to @code{slice} to use sliced threading instead of frame threading.
|
|
||||||
@item flags -cgop @tab open-gop
|
|
||||||
Set @code{-cgop} to use recovery points to close GOPs.
|
|
||||||
@item rc_init_occupancy @tab vbv-init
|
|
||||||
Initial buffer occupancy.
|
|
||||||
@end multitable
|
|
||||||
|
|
||||||
@subsection Private Options
|
|
||||||
@table @option
|
@table @option
|
||||||
@item -preset @var{string}
|
@item b (@emph{bitrate})
|
||||||
Set the encoding preset (cf. x264 --fullhelp).
|
Set bitrate in bits/s. Note that FFmpeg's @option{b} option is
|
||||||
@item -tune @var{string}
|
expressed in bits/s, while @command{x264}'s @option{bitrate} is in
|
||||||
Tune the encoding params (cf. x264 --fullhelp).
|
kilobits/s.
|
||||||
@item -profile @var{string}
|
|
||||||
Set profile restrictions (cf. x264 --fullhelp).
|
@item bf (@emph{bframes})
|
||||||
@item -fastfirstpass @var{integer}
|
|
||||||
Use fast settings when encoding first pass.
|
@item g (@emph{keyint})
|
||||||
@item -crf @var{float}
|
|
||||||
Select the quality for constant quality mode.
|
@item qmax (@emph{qpmax})
|
||||||
@item -crf_max @var{float}
|
|
||||||
In CRF mode, prevents VBV from lowering quality beyond this point.
|
@item qmin (@emph{qpmin})
|
||||||
@item -qp @var{integer}
|
|
||||||
Constant quantization parameter rate control method.
|
@item qdiff (@emph{qpstep})
|
||||||
@item -aq-mode @var{integer}
|
|
||||||
AQ method
|
@item qblur (@emph{qblur})
|
||||||
|
|
||||||
|
@item qcomp (@emph{qcomp})
|
||||||
|
|
||||||
|
@item refs (@emph{ref})
|
||||||
|
|
||||||
|
@item sc_threshold (@emph{scenecut})
|
||||||
|
|
||||||
|
@item trellis (@emph{trellis})
|
||||||
|
|
||||||
|
@item nr (@emph{nr})
|
||||||
|
|
||||||
|
@item me_range (@emph{merange})
|
||||||
|
|
||||||
|
@item me_method (@emph{me})
|
||||||
|
Set motion estimation method. Possible values in the decreasing order
|
||||||
|
of speed:
|
||||||
|
|
||||||
Possible values:
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item none
|
@item dia (@emph{dia})
|
||||||
|
@item epzs (@emph{dia})
|
||||||
|
Diamond search with radius 1 (fastest). @samp{epzs} is an alias for
|
||||||
|
@samp{dia}.
|
||||||
|
@item hex (@emph{hex})
|
||||||
|
Hexagonal search with radius 2.
|
||||||
|
@item umh (@emph{umh})
|
||||||
|
Uneven multi-hexagon search.
|
||||||
|
@item esa (@emph{esa})
|
||||||
|
Exhaustive search.
|
||||||
|
@item tesa (@emph{tesa})
|
||||||
|
Hadamard exhaustive search (slowest).
|
||||||
|
@end table
|
||||||
|
|
||||||
@item variance
|
@item subq (@emph{subme})
|
||||||
|
|
||||||
|
@item b_strategy (@emph{b-adapt})
|
||||||
|
|
||||||
|
@item keyint_min (@emph{min-keyint})
|
||||||
|
|
||||||
|
@item coder
|
||||||
|
Set entropy encoder. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item ac
|
||||||
|
Enable CABAC.
|
||||||
|
|
||||||
|
@item vlc
|
||||||
|
Enable CAVLC and disable CABAC. It generates the same effect as
|
||||||
|
@command{x264}'s @option{--no-cabac} option.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item cmp
|
||||||
|
Set full pixel motion estimation comparation algorithm. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item chroma
|
||||||
|
Enable chroma in motion estimation.
|
||||||
|
|
||||||
|
@item sad
|
||||||
|
Ignore chroma in motion estimation. It generates the same effect as
|
||||||
|
@command{x264}'s @option{--no-chroma-me} option.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item threads (@emph{threads})
|
||||||
|
|
||||||
|
@item thread_type
|
||||||
|
Set multithreading technique. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item slice
|
||||||
|
Slice-based multithreading. It generates the same effect as
|
||||||
|
@command{x264}'s @option{--sliced-threads} option.
|
||||||
|
@item frame
|
||||||
|
Frame-based multithreading.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item flags
|
||||||
|
Set encoding flags. It can be used to disable closed GOP and enable
|
||||||
|
open GOP by setting it to @code{-cgop}. The result is similar to
|
||||||
|
the behavior of @command{x264}'s @option{--open-gop} option.
|
||||||
|
|
||||||
|
@item rc_init_occupancy (@emph{vbv-init})
|
||||||
|
|
||||||
|
@item preset (@emph{preset})
|
||||||
|
Set the encoding preset.
|
||||||
|
|
||||||
|
@item tune (@emph{tune})
|
||||||
|
Set tuning of the encoding params.
|
||||||
|
|
||||||
|
@item profile (@emph{profile})
|
||||||
|
Set profile restrictions.
|
||||||
|
|
||||||
|
@item fastfirstpass
|
||||||
|
Enable fast settings when encoding first pass, when set to 1. When set
|
||||||
|
to 0, it has the same effect of @command{x264}'s
|
||||||
|
@option{--slow-firstpass} option.
|
||||||
|
|
||||||
|
@item crf (@emph{crf})
|
||||||
|
Set the quality for constant quality mode.
|
||||||
|
|
||||||
|
@item crf_max (@emph{crf-max})
|
||||||
|
In CRF mode, prevents VBV from lowering quality beyond this point.
|
||||||
|
|
||||||
|
@item qp (@emph{qp})
|
||||||
|
Set constant quantization rate control method parameter.
|
||||||
|
|
||||||
|
@item aq-mode (@emph{aq-mode})
|
||||||
|
Set AQ method. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item none (@emph{0})
|
||||||
|
Disabled.
|
||||||
|
|
||||||
|
@item variance (@emph{1})
|
||||||
Variance AQ (complexity mask).
|
Variance AQ (complexity mask).
|
||||||
@item autovariance
|
|
||||||
|
@item autovariance (@emph{2})
|
||||||
Auto-variance AQ (experimental).
|
Auto-variance AQ (experimental).
|
||||||
@end table
|
@end table
|
||||||
@item -aq-strength @var{float}
|
|
||||||
AQ strength, reduces blocking and blurring in flat and textured areas.
|
|
||||||
@item -psy @var{integer}
|
|
||||||
Use psychovisual optimizations.
|
|
||||||
@item -psy-rd @var{string}
|
|
||||||
Strength of psychovisual optimization, in <psy-rd>:<psy-trellis> format.
|
|
||||||
@item -rc-lookahead @var{integer}
|
|
||||||
Number of frames to look ahead for frametype and ratecontrol.
|
|
||||||
@item -weightb @var{integer}
|
|
||||||
Weighted prediction for B-frames.
|
|
||||||
@item -weightp @var{integer}
|
|
||||||
Weighted prediction analysis method.
|
|
||||||
|
|
||||||
Possible values:
|
@item aq-strength (@emph{aq-strength})
|
||||||
|
Set AQ strength, reduce blocking and blurring in flat and textured areas.
|
||||||
|
|
||||||
|
@item psy
|
||||||
|
Use psychovisual optimizations when set to 1. When set to 0, it has the
|
||||||
|
same effect as @command{x264}'s @option{--no-psy} option.
|
||||||
|
|
||||||
|
@item psy-rd (@emph{psy-rd})
|
||||||
|
Set strength of psychovisual optimization, in
|
||||||
|
@var{psy-rd}:@var{psy-trellis} format.
|
||||||
|
|
||||||
|
@item rc-lookahead (@emph{rc-lookahead})
|
||||||
|
Set number of frames to look ahead for frametype and ratecontrol.
|
||||||
|
|
||||||
|
@item weightb
|
||||||
|
Enable weighted prediction for B-frames when set to 1. When set to 0,
|
||||||
|
it has the same effect as @command{x264}'s @option{--no-weightb} option.
|
||||||
|
|
||||||
|
@item weightp (@emph{weightp})
|
||||||
|
Set weighted prediction method for P-frames. Possible values:
|
||||||
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item none
|
@item none (@emph{0})
|
||||||
|
Disabled
|
||||||
@item simple
|
@item simple (@emph{1})
|
||||||
|
Enable only weighted refs
|
||||||
@item smart
|
@item smart (@emph{2})
|
||||||
|
Enable both weighted refs and duplicates
|
||||||
@end table
|
@end table
|
||||||
@item -ssim @var{integer}
|
|
||||||
Calculate and print SSIM stats.
|
|
||||||
@item -intra-refresh @var{integer}
|
|
||||||
Use Periodic Intra Refresh instead of IDR frames.
|
|
||||||
@item -b-bias @var{integer}
|
|
||||||
Influences how often B-frames are used.
|
|
||||||
@item -b-pyramid @var{integer}
|
|
||||||
Keep some B-frames as references.
|
|
||||||
|
|
||||||
Possible values:
|
@item ssim (@emph{ssim})
|
||||||
|
Enable calculation and printing SSIM stats after the encoding.
|
||||||
|
|
||||||
|
@item intra-refresh (@emph{intra-refresh})
|
||||||
|
Enable the use of Periodic Intra Refresh instead of IDR frames when set
|
||||||
|
to 1.
|
||||||
|
|
||||||
|
@item b-bias (@emph{b-bias})
|
||||||
|
Set the influence on how often B-frames are used.
|
||||||
|
|
||||||
|
@item b-pyramid (@emph{b-pyramid})
|
||||||
|
Set method for keeping of some B-frames as references. Possible values:
|
||||||
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item none
|
@item none (@emph{none})
|
||||||
|
Disabled.
|
||||||
@item strict
|
@item strict (@emph{strict})
|
||||||
Strictly hierarchical pyramid.
|
Strictly hierarchical pyramid.
|
||||||
@item normal
|
@item normal (@emph{normal})
|
||||||
Non-strict (not Blu-ray compatible).
|
Non-strict (not Blu-ray compatible).
|
||||||
@end table
|
@end table
|
||||||
@item -mixed-refs @var{integer}
|
|
||||||
One reference per partition, as opposed to one reference per macroblock.
|
|
||||||
@item -8x8dct @var{integer}
|
|
||||||
High profile 8x8 transform.
|
|
||||||
@item -fast-pskip @var{integer}
|
|
||||||
@item -aud @var{integer}
|
|
||||||
Use access unit delimiters.
|
|
||||||
@item -mbtree @var{integer}
|
|
||||||
Use macroblock tree ratecontrol.
|
|
||||||
@item -deblock @var{string}
|
|
||||||
Loop filter parameters, in <alpha:beta> form.
|
|
||||||
@item -cplxblur @var{float}
|
|
||||||
Reduce fluctuations in QP (before curve compression).
|
|
||||||
@item -partitions @var{string}
|
|
||||||
A comma-separated list of partitions to consider, possible values: p8x8, p4x4, b8x8, i8x8, i4x4, none, all.
|
|
||||||
@item -direct-pred @var{integer}
|
|
||||||
Direct MV prediction mode
|
|
||||||
|
|
||||||
Possible values:
|
@item mixed-refs
|
||||||
|
Enable the use of one reference per partition, as opposed to one
|
||||||
|
reference per macroblock when set to 1. When set to 0, it has the
|
||||||
|
same effect as @command{x264}'s @option{--no-mixed-refs} option.
|
||||||
|
|
||||||
|
@item 8x8dct
|
||||||
|
Enable adaptive spatial transform (high profile 8x8 transform)
|
||||||
|
when set to 1. When set to 0, it has the same effect as
|
||||||
|
@command{x264}'s @option{--no-8x8dct} option.
|
||||||
|
|
||||||
|
@item fast-pskip
|
||||||
|
Enable early SKIP detection on P-frames when set to 1. When set
|
||||||
|
to 0, it has the same effect as @command{x264}'s
|
||||||
|
@option{--no-fast-pskip} option.
|
||||||
|
|
||||||
|
@item aud (@emph{aud})
|
||||||
|
Enable use of access unit delimiters when set to 1.
|
||||||
|
|
||||||
|
@item mbtree
|
||||||
|
Enable use macroblock tree ratecontrol when set to 1. When set
|
||||||
|
to 0, it has the same effect as @command{x264}'s
|
||||||
|
@option{--no-mbtree} option.
|
||||||
|
|
||||||
|
@item deblock (@emph{deblock})
|
||||||
|
Set loop filter parameters, in @var{alpha}:@var{beta} form.
|
||||||
|
|
||||||
|
@item cplxblur (@emph{cplxblur})
|
||||||
|
Set fluctuations reduction in QP (before curve compression).
|
||||||
|
|
||||||
|
@item partitions (@emph{partitions})
|
||||||
|
Set partitions to consider as a comma-separated list of. Possible
|
||||||
|
values in the list:
|
||||||
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item none
|
@item p8x8
|
||||||
|
8x8 P-frame partition.
|
||||||
@item spatial
|
@item p4x4
|
||||||
|
4x4 P-frame partition.
|
||||||
@item temporal
|
@item b8x8
|
||||||
|
4x4 B-frame partition.
|
||||||
@item auto
|
@item i8x8
|
||||||
|
8x8 I-frame partition.
|
||||||
@end table
|
@item i4x4
|
||||||
@item -slice-max-size @var{integer}
|
4x4 I-frame partition.
|
||||||
Limit the size of each slice in bytes.
|
(Enabling @samp{p4x4} requires @samp{p8x8} to be enabled. Enabling
|
||||||
@item -stats @var{string}
|
@samp{i8x8} requires adaptive spatial transform (@option{8x8dct}
|
||||||
Filename for 2 pass stats.
|
option) to be enabled.)
|
||||||
@item -nal-hrd @var{integer}
|
@item none (@emph{none})
|
||||||
Signal HRD information (requires vbv-bufsize; cbr not allowed in .mp4).
|
Do not consider any partitions.
|
||||||
|
@item all (@emph{all})
|
||||||
Possible values:
|
Consider every partition.
|
||||||
@table @samp
|
|
||||||
@item none
|
|
||||||
|
|
||||||
@item vbr
|
|
||||||
|
|
||||||
@item cbr
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item x264opts @var{options}
|
@item direct-pred (@emph{direct})
|
||||||
Allow to set any x264 option, see @code{x264 --fullhelp} for a list.
|
Set direct MV prediction mode. Possible values:
|
||||||
|
|
||||||
@var{options} is a list of @var{key}=@var{value} couples separated by
|
@table @samp
|
||||||
|
@item none (@emph{none})
|
||||||
|
Disable MV prediction.
|
||||||
|
@item spatial (@emph{spatial})
|
||||||
|
Enable spatial predicting.
|
||||||
|
@item temporal (@emph{temporal})
|
||||||
|
Enable temporal predicting.
|
||||||
|
@item auto (@emph{auto})
|
||||||
|
Automatically decided.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item slice-max-size (@emph{slice-max-size})
|
||||||
|
Set the limit of the size of each slice in bytes. If not specified
|
||||||
|
but RTP payload size (@option{ps}) is specified, that is used.
|
||||||
|
|
||||||
|
@item stats (@emph{stats})
|
||||||
|
Set the file name for multi-pass stats.
|
||||||
|
|
||||||
|
@item nal-hrd (@emph{nal-hrd})
|
||||||
|
Set signal HRD information (requires @option{vbv-bufsize} to be set).
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item none (@emph{none})
|
||||||
|
Disable HRD information signaling.
|
||||||
|
@item vbr (@emph{vbr})
|
||||||
|
Variable bit rate.
|
||||||
|
@item cbr (@emph{cbr})
|
||||||
|
Constant bit rate (not allowed in MP4 container).
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item x264opts (N.A.)
|
||||||
|
Set any x264 option, see @command{x264 --fullhelp} for a list.
|
||||||
|
|
||||||
|
Argument is a list of @var{key}=@var{value} couples separated by
|
||||||
":". In @var{filter} and @var{psy-rd} options that use ":" as a separator
|
":". In @var{filter} and @var{psy-rd} options that use ":" as a separator
|
||||||
themselves, use "," instead. They accept it as well since long ago but this
|
themselves, use "," instead. They accept it as well since long ago but this
|
||||||
is kept undocumented for some reason.
|
is kept undocumented for some reason.
|
||||||
@@ -1054,18 +1299,136 @@ For example to specify libx264 encoding options with @command{ffmpeg}:
|
|||||||
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
For more information about libx264 and the supported options see:
|
@item x264-params (N.A.)
|
||||||
@url{http://www.videolan.org/developers/x264.html}
|
Override the x264 configuration using a :-separated list of key=value
|
||||||
|
parameters.
|
||||||
|
|
||||||
@item -x264-params @var{string}
|
This option is functionally the same as the @option{x264opts}, but is
|
||||||
Override the x264 configuration using a :-separated list of key=value parameters.
|
duplicated for compability with the Libav fork.
|
||||||
|
|
||||||
|
For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||||
@example
|
@example
|
||||||
-x264-params level=30:bframes=0:weightp=0:cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:no-fast-pskip=1:subq=6:8x8dct=0:trellis=0
|
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
|
||||||
|
cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
|
||||||
|
no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
|
||||||
@end example
|
@end example
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
Encoding avpresets for common usages are provided so they can be used with the
|
Encoding ffpresets for common usages are provided so they can be used with the
|
||||||
general presets system (e.g. passing the @code{-pre} option).
|
general presets system (e.g. passing the @option{pre} option).
|
||||||
|
|
||||||
|
@section libxvid
|
||||||
|
|
||||||
|
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||||
|
|
||||||
|
This encoder requires the presence of the libxvidcore headers and library
|
||||||
|
during configuration. You need to explicitly configure the build with
|
||||||
|
@code{--enable-libxvid --enable-gpl}.
|
||||||
|
|
||||||
|
The native @code{mpeg4} encoder supports the MPEG-4 Part 2 format, so
|
||||||
|
users can encode to this format without this library.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
The following options are supported by the libxvid wrapper. Some of
|
||||||
|
the following options are listed but are not documented, and
|
||||||
|
correspond to shared codec options. See @ref{codec-options,,the Codec
|
||||||
|
Options chapter} for their documentation. The other shared options
|
||||||
|
which are not listed have no effect for the libxvid encoder.
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item b
|
||||||
|
|
||||||
|
@item g
|
||||||
|
|
||||||
|
@item qmin
|
||||||
|
|
||||||
|
@item qmax
|
||||||
|
|
||||||
|
@item mpeg_quant
|
||||||
|
|
||||||
|
@item threads
|
||||||
|
|
||||||
|
@item bf
|
||||||
|
|
||||||
|
@item b_qfactor
|
||||||
|
|
||||||
|
@item b_qoffset
|
||||||
|
|
||||||
|
@item flags
|
||||||
|
Set specific encoding flags. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
|
||||||
|
@item mv4
|
||||||
|
Use four motion vector by macroblock.
|
||||||
|
|
||||||
|
@item aic
|
||||||
|
Enable high quality AC prediction.
|
||||||
|
|
||||||
|
@item gray
|
||||||
|
Only encode grayscale.
|
||||||
|
|
||||||
|
@item gmc
|
||||||
|
Enable the use of global motion compensation (GMC).
|
||||||
|
|
||||||
|
@item qpel
|
||||||
|
Enable quarter-pixel motion compensation.
|
||||||
|
|
||||||
|
@item cgop
|
||||||
|
Enable closed GOP.
|
||||||
|
|
||||||
|
@item global_header
|
||||||
|
Place global headers in extradata instead of every keyframe.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item trellis
|
||||||
|
|
||||||
|
@item me_method
|
||||||
|
Set motion estimation method. Possible values in decreasing order of
|
||||||
|
speed and increasing order of quality:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item zero
|
||||||
|
Use no motion estimation (default).
|
||||||
|
|
||||||
|
@item phods
|
||||||
|
@item x1
|
||||||
|
@item log
|
||||||
|
Enable advanced diamond zonal search for 16x16 blocks and half-pixel
|
||||||
|
refinement for 16x16 blocks. @samp{x1} and @samp{log} are aliases for
|
||||||
|
@samp{phods}.
|
||||||
|
|
||||||
|
@item epzs
|
||||||
|
Enable all of the things described above, plus advanced diamond zonal
|
||||||
|
search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
|
||||||
|
estimation on chroma planes.
|
||||||
|
|
||||||
|
@item full
|
||||||
|
Enable all of the things described above, plus extended 16x16 and 8x8
|
||||||
|
blocks search.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item mbd
|
||||||
|
Set macroblock decision algorithm. Possible values in the increasing
|
||||||
|
order of quality:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item simple
|
||||||
|
Use macroblock comparing function algorithm (default).
|
||||||
|
|
||||||
|
@item bits
|
||||||
|
Enable rate distortion-based half pixel and quarter pixel refinement for
|
||||||
|
16x16 blocks.
|
||||||
|
|
||||||
|
@item rd
|
||||||
|
Enable all of the things described above, plus rate distortion-based
|
||||||
|
half pixel and quarter pixel refinement for 8x8 blocks, and rate
|
||||||
|
distortion-based search using square pattern.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
@section png
|
@section png
|
||||||
|
|
||||||
|
@@ -3370,7 +3370,7 @@ within the parameter list.
|
|||||||
@item
|
@item
|
||||||
Show the text at the center of the video frame:
|
Show the text at the center of the video frame:
|
||||||
@example
|
@example
|
||||||
drawtext="fontsize=30:fontfile=FreeSerif.ttf:text='hello world':x=(w-text_w)/2:y=(h-text_h-line_h)/2"
|
drawtext="fontsize=30:fontfile=FreeSerif.ttf:text='hello world':x=(w-text_w)/2:y=(h-text_h)/2"
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
@item
|
||||||
|
@@ -57,6 +57,9 @@ Enable RTP MP4A-LATM payload.
|
|||||||
Reduce the latency introduced by optional buffering
|
Reduce the latency introduced by optional buffering
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@item seek2any @var{integer} (@emph{input})
|
||||||
|
Forces seeking to enable seek to any mode if set to 1. Default is 0.
|
||||||
|
|
||||||
@item analyzeduration @var{integer} (@emph{input})
|
@item analyzeduration @var{integer} (@emph{input})
|
||||||
Specify how many microseconds are analyzed to probe the input. A
|
Specify how many microseconds are analyzed to probe the input. A
|
||||||
higher value will allow to detect more accurate information, but will
|
higher value will allow to detect more accurate information, but will
|
||||||
@@ -133,6 +136,12 @@ been without shifting.
|
|||||||
Also note that this affects only leading negative timestamps, and not
|
Also note that this affects only leading negative timestamps, and not
|
||||||
non-monotonic negative timestamps.
|
non-monotonic negative timestamps.
|
||||||
|
|
||||||
|
@item skip_initial_bytes @var{integer} (@emph{input})
|
||||||
|
Set number initial bytes to skip. Default is 0.
|
||||||
|
|
||||||
|
@item correct_ts_overflow @var{integer} (@emph{input})
|
||||||
|
Correct single timestamp overflows if set to 1. Default is 1.
|
||||||
|
|
||||||
@item flush_packets @var{integer} (@emph{output})
|
@item flush_packets @var{integer} (@emph{output})
|
||||||
Flush the underlying I/O stream after each packet. Default 1 enables it, and
|
Flush the underlying I/O stream after each packet. Default 1 enables it, and
|
||||||
has the effect of reducing the latency; 0 disables it and may slightly
|
has the effect of reducing the latency; 0 disables it and may slightly
|
||||||
|
@@ -33,7 +33,7 @@ Select nearest neighbor rescaling algorithm.
|
|||||||
@item area
|
@item area
|
||||||
Select averaging area rescaling algorithm.
|
Select averaging area rescaling algorithm.
|
||||||
|
|
||||||
@item bicubiclin
|
@item bicublin
|
||||||
Select bicubic scaling algorithm for the luma component, bilinear for
|
Select bicubic scaling algorithm for the luma component, bilinear for
|
||||||
chroma components.
|
chroma components.
|
||||||
|
|
||||||
|
@@ -386,7 +386,7 @@ Return 1 if @var{x} is lesser than or equal to @var{y}, 0 otherwise.
|
|||||||
Return the maximum between @var{x} and @var{y}.
|
Return the maximum between @var{x} and @var{y}.
|
||||||
|
|
||||||
@item min(x, y)
|
@item min(x, y)
|
||||||
Return the maximum between @var{x} and @var{y}.
|
Return the minimum between @var{x} and @var{y}.
|
||||||
|
|
||||||
@item mod(x, y)
|
@item mod(x, y)
|
||||||
Compute the remainder of division of @var{x} by @var{y}.
|
Compute the remainder of division of @var{x} by @var{y}.
|
||||||
|
11
ffmpeg.c
11
ffmpeg.c
@@ -319,7 +319,7 @@ sigterm_handler(int sig)
|
|||||||
received_nb_signals++;
|
received_nb_signals++;
|
||||||
term_exit();
|
term_exit();
|
||||||
if(received_nb_signals > 3)
|
if(received_nb_signals > 3)
|
||||||
exit_program(123);
|
exit(123);
|
||||||
}
|
}
|
||||||
|
|
||||||
void term_init(void)
|
void term_init(void)
|
||||||
@@ -349,7 +349,6 @@ void term_init(void)
|
|||||||
signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
|
signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
avformat_network_deinit();
|
|
||||||
|
|
||||||
signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
|
signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
|
||||||
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
|
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
|
||||||
@@ -1452,7 +1451,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
|||||||
&& ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
|
&& ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
|
||||||
&& ost->st->codec->codec_id != AV_CODEC_ID_VC1
|
&& ost->st->codec->codec_id != AV_CODEC_ID_VC1
|
||||||
) {
|
) {
|
||||||
if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY)) {
|
if (av_parser_change(av_stream_get_parser(ist->st), ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY)) {
|
||||||
opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
|
opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
|
||||||
if (!opkt.buf)
|
if (!opkt.buf)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
@@ -1852,7 +1851,7 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
|
|||||||
if (avpkt.duration) {
|
if (avpkt.duration) {
|
||||||
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
|
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||||
} else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
|
} else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
|
||||||
int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
|
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->st->codec->ticks_per_frame;
|
||||||
duration = ((int64_t)AV_TIME_BASE *
|
duration = ((int64_t)AV_TIME_BASE *
|
||||||
ist->st->codec->time_base.num * ticks) /
|
ist->st->codec->time_base.num * ticks) /
|
||||||
ist->st->codec->time_base.den;
|
ist->st->codec->time_base.den;
|
||||||
@@ -1909,7 +1908,7 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
|
|||||||
} else if (pkt->duration) {
|
} else if (pkt->duration) {
|
||||||
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||||
} else if(ist->st->codec->time_base.num != 0) {
|
} else if(ist->st->codec->time_base.num != 0) {
|
||||||
int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
|
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
|
||||||
ist->next_dts += ((int64_t)AV_TIME_BASE *
|
ist->next_dts += ((int64_t)AV_TIME_BASE *
|
||||||
ist->st->codec->time_base.num * ticks) /
|
ist->st->codec->time_base.num * ticks) /
|
||||||
ist->st->codec->time_base.den;
|
ist->st->codec->time_base.den;
|
||||||
@@ -2086,7 +2085,7 @@ static int transcode_init(void)
|
|||||||
AVCodecContext *codec;
|
AVCodecContext *codec;
|
||||||
OutputStream *ost;
|
OutputStream *ost;
|
||||||
InputStream *ist;
|
InputStream *ist;
|
||||||
char error[1024];
|
char error[1024] = {0};
|
||||||
int want_sdp = 1;
|
int want_sdp = 1;
|
||||||
|
|
||||||
/* init framerate emulation */
|
/* init framerate emulation */
|
||||||
|
@@ -42,12 +42,15 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFo
|
|||||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
||||||
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
||||||
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
||||||
|
const enum AVPixelFormat mjpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||||
|
const enum AVPixelFormat ljpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||||
|
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||||
|
|
||||||
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||||
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
p = mjpeg_formats;
|
||||||
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
p =ljpeg_formats;
|
||||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (; *p != AV_PIX_FMT_NONE; p++) {
|
for (; *p != AV_PIX_FMT_NONE; p++) {
|
||||||
@@ -92,6 +95,11 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec)
|
|||||||
|
|
||||||
static char *choose_pix_fmts(OutputStream *ost)
|
static char *choose_pix_fmts(OutputStream *ost)
|
||||||
{
|
{
|
||||||
|
AVDictionaryEntry *strict_dict = av_dict_get(ost->opts, "strict", NULL, 0);
|
||||||
|
if (strict_dict)
|
||||||
|
// used by choose_pixel_fmt() and below
|
||||||
|
av_opt_set(ost->st->codec, "strict", strict_dict->value, 0);
|
||||||
|
|
||||||
if (ost->keep_pix_fmt) {
|
if (ost->keep_pix_fmt) {
|
||||||
if (ost->filter)
|
if (ost->filter)
|
||||||
avfilter_graph_set_auto_convert(ost->filter->graph->graph,
|
avfilter_graph_set_auto_convert(ost->filter->graph->graph,
|
||||||
|
@@ -1681,7 +1681,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
/* pick the "best" stream of each type */
|
/* pick the "best" stream of each type */
|
||||||
|
|
||||||
/* video: highest resolution */
|
/* video: highest resolution */
|
||||||
if (!o->video_disable && oc->oformat->video_codec != AV_CODEC_ID_NONE) {
|
if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) {
|
||||||
int area = 0, idx = -1;
|
int area = 0, idx = -1;
|
||||||
int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
|
int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
|
||||||
for (i = 0; i < nb_input_streams; i++) {
|
for (i = 0; i < nb_input_streams; i++) {
|
||||||
@@ -1703,7 +1703,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* audio: most channels */
|
/* audio: most channels */
|
||||||
if (!o->audio_disable && oc->oformat->audio_codec != AV_CODEC_ID_NONE) {
|
if (!o->audio_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
|
||||||
int channels = 0, idx = -1;
|
int channels = 0, idx = -1;
|
||||||
for (i = 0; i < nb_input_streams; i++) {
|
for (i = 0; i < nb_input_streams; i++) {
|
||||||
ist = input_streams[i];
|
ist = input_streams[i];
|
||||||
@@ -2617,7 +2617,7 @@ const OptionDef options[] = {
|
|||||||
{ "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
|
{ "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
|
||||||
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
|
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
|
||||||
"set the input ts scale", "scale" },
|
"set the input ts scale", "scale" },
|
||||||
{ "timestamp", HAS_ARG | OPT_PERFILE, { .func_arg = opt_recording_timestamp },
|
{ "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
|
||||||
"set the recording timestamp ('now' to set the current time)", "time" },
|
"set the recording timestamp ('now' to set the current time)", "time" },
|
||||||
{ "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
|
{ "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
|
||||||
"add metadata", "string=string" },
|
"add metadata", "string=string" },
|
||||||
|
@@ -328,6 +328,14 @@ static AVLFG random_state;
|
|||||||
|
|
||||||
static FILE *logfile = NULL;
|
static FILE *logfile = NULL;
|
||||||
|
|
||||||
|
static void htmlstrip(char *s) {
|
||||||
|
while (s && *s) {
|
||||||
|
s += strspn(s, "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,. ");
|
||||||
|
if (*s)
|
||||||
|
*s++ = '?';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int64_t ffm_read_write_index(int fd)
|
static int64_t ffm_read_write_index(int fd)
|
||||||
{
|
{
|
||||||
uint8_t buf[8];
|
uint8_t buf[8];
|
||||||
@@ -1887,6 +1895,7 @@ static int http_parse_request(HTTPContext *c)
|
|||||||
send_error:
|
send_error:
|
||||||
c->http_error = 404;
|
c->http_error = 404;
|
||||||
q = c->buffer;
|
q = c->buffer;
|
||||||
|
htmlstrip(msg);
|
||||||
snprintf(q, c->buffer_size,
|
snprintf(q, c->buffer_size,
|
||||||
"HTTP/1.0 404 Not Found\r\n"
|
"HTTP/1.0 404 Not Found\r\n"
|
||||||
"Content-type: text/html\r\n"
|
"Content-type: text/html\r\n"
|
||||||
|
@@ -38,15 +38,15 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
|||||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
int line = 0, ret;
|
int line, ret;
|
||||||
const int width = avctx->width;
|
const int width = avctx->width;
|
||||||
AVFrame *pic = data;
|
AVFrame *pic = data;
|
||||||
uint16_t *y, *u, *v;
|
uint16_t *y, *u, *v;
|
||||||
const uint8_t *line_end, *src = avpkt->data;
|
const uint8_t *line_end, *src = avpkt->data;
|
||||||
int stride = avctx->width * 8 / 3;
|
int stride = avctx->width * 8 / 3;
|
||||||
|
|
||||||
if (width == 1) {
|
if (width <= 1 || avctx->height <= 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
|
av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
if (avpkt->size < avctx->height * stride) {
|
if (avpkt->size < avctx->height * stride) {
|
||||||
@@ -61,45 +61,45 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
pic->pict_type = AV_PICTURE_TYPE_I;
|
pic->pict_type = AV_PICTURE_TYPE_I;
|
||||||
pic->key_frame = 1;
|
pic->key_frame = 1;
|
||||||
|
|
||||||
y = (uint16_t *)pic->data[0];
|
|
||||||
u = (uint16_t *)pic->data[1];
|
|
||||||
v = (uint16_t *)pic->data[2];
|
|
||||||
line_end = avpkt->data + stride;
|
line_end = avpkt->data + stride;
|
||||||
|
for (line = 0; line < avctx->height; line++) {
|
||||||
|
uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
|
||||||
|
uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||||
|
uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||||
|
int x;
|
||||||
|
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||||
|
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||||
|
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||||
|
|
||||||
while (line++ < avctx->height) {
|
for (x = 0; x < width; x += 6) {
|
||||||
while (1) {
|
uint32_t t;
|
||||||
uint32_t t = AV_RL32(src);
|
|
||||||
|
if (width - x < 6 || line_end - src < 16) {
|
||||||
|
y = y_temp;
|
||||||
|
u = u_temp;
|
||||||
|
v = v_temp;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (line_end - src < 4)
|
||||||
|
break;
|
||||||
|
|
||||||
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
*u++ = t << 6 & 0xFFC0;
|
*u++ = t << 6 & 0xFFC0;
|
||||||
*y++ = t >> 4 & 0xFFC0;
|
*y++ = t >> 4 & 0xFFC0;
|
||||||
*v++ = t >> 14 & 0xFFC0;
|
*v++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (src >= line_end - 1) {
|
if (line_end - src < 4)
|
||||||
*y = 0x80;
|
|
||||||
src++;
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
*y++ = t << 6 & 0xFFC0;
|
*y++ = t << 6 & 0xFFC0;
|
||||||
*u++ = t >> 4 & 0xFFC0;
|
*u++ = t >> 4 & 0xFFC0;
|
||||||
*y++ = t >> 14 & 0xFFC0;
|
*y++ = t >> 14 & 0xFFC0;
|
||||||
if (src >= line_end - 2) {
|
|
||||||
if (!(width & 1)) {
|
if (line_end - src < 4)
|
||||||
*y = 0x80;
|
|
||||||
src += 2;
|
|
||||||
}
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
@@ -107,15 +107,8 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
*y++ = t >> 4 & 0xFFC0;
|
*y++ = t >> 4 & 0xFFC0;
|
||||||
*u++ = t >> 14 & 0xFFC0;
|
*u++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (src >= line_end - 1) {
|
if (line_end - src < 4)
|
||||||
*y = 0x80;
|
|
||||||
src++;
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
@@ -123,18 +116,21 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
*v++ = t >> 4 & 0xFFC0;
|
*v++ = t >> 4 & 0xFFC0;
|
||||||
*y++ = t >> 14 & 0xFFC0;
|
*y++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (src >= line_end - 2) {
|
if (width - x < 6)
|
||||||
if (width & 1) {
|
|
||||||
*y = 0x80;
|
|
||||||
src += 2;
|
|
||||||
}
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (x < width) {
|
||||||
|
y = x + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||||
|
u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||||
|
v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||||
|
memcpy(y, y_temp, sizeof(*y) * (width - x));
|
||||||
|
memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
|
||||||
|
memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
line_end += stride;
|
||||||
|
src = line_end - stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
@@ -81,9 +81,13 @@ static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
|
|||||||
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
||||||
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
||||||
if(x < width && y < height) {
|
if(x < width && y < height) {
|
||||||
|
if (x + 1 < width) {
|
||||||
/* build average over 2 pixels */
|
/* build average over 2 pixels */
|
||||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||||
|
} else {
|
||||||
|
luma = src[(x + y * p->linesize[0])];
|
||||||
|
}
|
||||||
/* write blocks as linear data now so they are suitable for elbg */
|
/* write blocks as linear data now so they are suitable for elbg */
|
||||||
dest[0] = luma;
|
dest[0] = luma;
|
||||||
}
|
}
|
||||||
@@ -315,7 +319,9 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
} else {
|
} else {
|
||||||
/* fill up mc_meta_charset with data until lifetime exceeds */
|
/* fill up mc_meta_charset with data until lifetime exceeds */
|
||||||
if (c->mc_frame_counter < c->mc_lifetime) {
|
if (c->mc_frame_counter < c->mc_lifetime) {
|
||||||
*p = *pict;
|
ret = av_frame_ref(p, pict);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
p->pict_type = AV_PICTURE_TYPE_I;
|
p->pict_type = AV_PICTURE_TYPE_I;
|
||||||
p->key_frame = 1;
|
p->key_frame = 1;
|
||||||
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
||||||
@@ -332,8 +338,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
req_size = 0;
|
req_size = 0;
|
||||||
/* any frames to encode? */
|
/* any frames to encode? */
|
||||||
if (c->mc_lifetime) {
|
if (c->mc_lifetime) {
|
||||||
req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||||
if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, pkt, alloc_size)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
buf = pkt->data;
|
buf = pkt->data;
|
||||||
|
|
||||||
@@ -350,6 +356,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
/* advance pointers */
|
/* advance pointers */
|
||||||
buf += charset_size;
|
buf += charset_size;
|
||||||
charset += charset_size;
|
charset += charset_size;
|
||||||
|
req_size += charset_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* write x frames to buf */
|
/* write x frames to buf */
|
||||||
|
@@ -81,7 +81,7 @@ enum BandType {
|
|||||||
INTENSITY_BT = 15, ///< Scalefactor data are intensity stereo positions.
|
INTENSITY_BT = 15, ///< Scalefactor data are intensity stereo positions.
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IS_CODEBOOK_UNSIGNED(x) ((x - 1) & 10)
|
#define IS_CODEBOOK_UNSIGNED(x) (((x) - 1) & 10)
|
||||||
|
|
||||||
enum ChannelPosition {
|
enum ChannelPosition {
|
||||||
AAC_CHANNEL_OFF = 0,
|
AAC_CHANNEL_OFF = 0,
|
||||||
|
@@ -34,7 +34,7 @@ static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
|||||||
int size;
|
int size;
|
||||||
union {
|
union {
|
||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
uint8_t u8[8];
|
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||||
} tmp;
|
} tmp;
|
||||||
|
|
||||||
tmp.u64 = av_be2ne64(state);
|
tmp.u64 = av_be2ne64(state);
|
||||||
|
@@ -419,7 +419,7 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
|
|||||||
* Save current output configuration if and only if it has been locked.
|
* Save current output configuration if and only if it has been locked.
|
||||||
*/
|
*/
|
||||||
static void push_output_configuration(AACContext *ac) {
|
static void push_output_configuration(AACContext *ac) {
|
||||||
if (ac->oc[1].status == OC_LOCKED) {
|
if (ac->oc[1].status == OC_LOCKED || ac->oc[0].status == OC_NONE) {
|
||||||
ac->oc[0] = ac->oc[1];
|
ac->oc[0] = ac->oc[1];
|
||||||
}
|
}
|
||||||
ac->oc[1].status = OC_NONE;
|
ac->oc[1].status = OC_NONE;
|
||||||
|
@@ -165,7 +165,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
|
|||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
AACEncContext *s = avctx->priv_data;
|
AACEncContext *s = avctx->priv_data;
|
||||||
|
|
||||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
|
init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
|
||||||
put_bits(&pb, 5, 2); //object type - AAC-LC
|
put_bits(&pb, 5, 2); //object type - AAC-LC
|
||||||
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
||||||
put_bits(&pb, 4, s->channels);
|
put_bits(&pb, 4, s->channels);
|
||||||
|
@@ -727,7 +727,10 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel,
|
|||||||
if (active_lines > 0.0f)
|
if (active_lines > 0.0f)
|
||||||
band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction);
|
band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction);
|
||||||
pe += calc_pe_3gpp(band);
|
pe += calc_pe_3gpp(band);
|
||||||
|
if (band->thr > 0.0f)
|
||||||
band->norm_fac = band->active_lines / band->thr;
|
band->norm_fac = band->active_lines / band->thr;
|
||||||
|
else
|
||||||
|
band->norm_fac = 0.0f;
|
||||||
norm_fac += band->norm_fac;
|
norm_fac += band->norm_fac;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -520,7 +520,7 @@ static int sbr_make_f_master(AACContext *ac, SpectralBandReplication *sbr,
|
|||||||
/// High Frequency Generation - Patch Construction (14496-3 sp04 p216 fig. 4.46)
|
/// High Frequency Generation - Patch Construction (14496-3 sp04 p216 fig. 4.46)
|
||||||
static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
||||||
{
|
{
|
||||||
int i, k, sb = 0;
|
int i, k, last_k = -1, last_msb = -1, sb = 0;
|
||||||
int msb = sbr->k[0];
|
int msb = sbr->k[0];
|
||||||
int usb = sbr->kx[1];
|
int usb = sbr->kx[1];
|
||||||
int goal_sb = ((1000 << 11) + (sbr->sample_rate >> 1)) / sbr->sample_rate;
|
int goal_sb = ((1000 << 11) + (sbr->sample_rate >> 1)) / sbr->sample_rate;
|
||||||
@@ -534,6 +534,12 @@ static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
|||||||
|
|
||||||
do {
|
do {
|
||||||
int odd = 0;
|
int odd = 0;
|
||||||
|
if (k == last_k && msb == last_msb) {
|
||||||
|
av_log(ac->avctx, AV_LOG_ERROR, "patch construction failed\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
last_k = k;
|
||||||
|
last_msb = msb;
|
||||||
for (i = k; i == k || sb > (sbr->k[0] - 1 + msb - odd); i--) {
|
for (i = k; i == k || sb > (sbr->k[0] - 1 + msb - odd); i--) {
|
||||||
sb = sbr->f_master[i];
|
sb = sbr->f_master[i];
|
||||||
odd = (sb + sbr->k[0]) & 1;
|
odd = (sb + sbr->k[0]) & 1;
|
||||||
|
@@ -139,7 +139,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* report that the buffer was completely consumed */
|
/* report that the buffer was completely consumed */
|
||||||
return buf_size;
|
return avpkt->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int aasc_decode_end(AVCodecContext *avctx)
|
static av_cold int aasc_decode_end(AVCodecContext *avctx)
|
||||||
|
@@ -131,6 +131,9 @@ int ff_ac3_bit_alloc_calc_mask(AC3BitAllocParameters *s, int16_t *band_psd,
|
|||||||
int band_start, band_end, begin, end1;
|
int band_start, band_end, begin, end1;
|
||||||
int lowcomp, fastleak, slowleak;
|
int lowcomp, fastleak, slowleak;
|
||||||
|
|
||||||
|
if (end <= 0)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
/* excitation function */
|
/* excitation function */
|
||||||
band_start = ff_ac3_bin_to_band_tab[start];
|
band_start = ff_ac3_bin_to_band_tab[start];
|
||||||
band_end = ff_ac3_bin_to_band_tab[end-1] + 1;
|
band_end = ff_ac3_bin_to_band_tab[end-1] + 1;
|
||||||
|
@@ -147,7 +147,7 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
|||||||
int err;
|
int err;
|
||||||
union {
|
union {
|
||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
uint8_t u8[8];
|
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||||
} tmp = { av_be2ne64(state) };
|
} tmp = { av_be2ne64(state) };
|
||||||
AC3HeaderInfo hdr;
|
AC3HeaderInfo hdr;
|
||||||
GetBitContext gbc;
|
GetBitContext gbc;
|
||||||
|
@@ -260,7 +260,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
|
|||||||
energy_cpl = energy[blk][CPL_CH][bnd];
|
energy_cpl = energy[blk][CPL_CH][bnd];
|
||||||
energy_ch = energy[blk][ch][bnd];
|
energy_ch = energy[blk][ch][bnd];
|
||||||
blk1 = blk+1;
|
blk1 = blk+1;
|
||||||
while (!s->blocks[blk1].new_cpl_coords[ch] && blk1 < s->num_blocks) {
|
while (blk1 < s->num_blocks && !s->blocks[blk1].new_cpl_coords[ch]) {
|
||||||
if (s->blocks[blk1].cpl_in_use) {
|
if (s->blocks[blk1].cpl_in_use) {
|
||||||
energy_cpl += energy[blk1][CPL_CH][bnd];
|
energy_cpl += energy[blk1][CPL_CH][bnd];
|
||||||
energy_ch += energy[blk1][ch][bnd];
|
energy_ch += energy[blk1][ch][bnd];
|
||||||
|
@@ -570,6 +570,8 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
|||||||
case AV_CODEC_ID_ADPCM_IMA_DK4:
|
case AV_CODEC_ID_ADPCM_IMA_DK4:
|
||||||
if (avctx->block_align > 0)
|
if (avctx->block_align > 0)
|
||||||
buf_size = FFMIN(buf_size, avctx->block_align);
|
buf_size = FFMIN(buf_size, avctx->block_align);
|
||||||
|
if (buf_size < 4 * ch)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
|
nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
|
||||||
break;
|
break;
|
||||||
case AV_CODEC_ID_ADPCM_IMA_RAD:
|
case AV_CODEC_ID_ADPCM_IMA_RAD:
|
||||||
@@ -583,13 +585,15 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
|||||||
int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
|
int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
|
||||||
if (avctx->block_align > 0)
|
if (avctx->block_align > 0)
|
||||||
buf_size = FFMIN(buf_size, avctx->block_align);
|
buf_size = FFMIN(buf_size, avctx->block_align);
|
||||||
|
if (buf_size < 4 * ch)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
|
nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case AV_CODEC_ID_ADPCM_MS:
|
case AV_CODEC_ID_ADPCM_MS:
|
||||||
if (avctx->block_align > 0)
|
if (avctx->block_align > 0)
|
||||||
buf_size = FFMIN(buf_size, avctx->block_align);
|
buf_size = FFMIN(buf_size, avctx->block_align);
|
||||||
nb_samples = 2 + (buf_size - 7 * ch) * 2 / ch;
|
nb_samples = (buf_size - 6 * ch) * 2 / ch;
|
||||||
break;
|
break;
|
||||||
case AV_CODEC_ID_ADPCM_SBPRO_2:
|
case AV_CODEC_ID_ADPCM_SBPRO_2:
|
||||||
case AV_CODEC_ID_ADPCM_SBPRO_3:
|
case AV_CODEC_ID_ADPCM_SBPRO_3:
|
||||||
@@ -602,6 +606,8 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
|||||||
case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
|
case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
|
||||||
}
|
}
|
||||||
if (!s->status[0].step_index) {
|
if (!s->status[0].step_index) {
|
||||||
|
if (buf_size < ch)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
nb_samples++;
|
nb_samples++;
|
||||||
buf_size -= ch;
|
buf_size -= ch;
|
||||||
}
|
}
|
||||||
@@ -1517,6 +1523,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
|
|
||||||
|
if (avpkt->size < bytestream2_tell(&gb)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
|
||||||
|
return avpkt->size;
|
||||||
|
}
|
||||||
|
|
||||||
return bytestream2_tell(&gb);
|
return bytestream2_tell(&gb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -541,7 +541,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
case AV_CODEC_ID_ADPCM_IMA_QT:
|
case AV_CODEC_ID_ADPCM_IMA_QT:
|
||||||
{
|
{
|
||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
init_put_bits(&pb, dst, pkt_size * 8);
|
init_put_bits(&pb, dst, pkt_size);
|
||||||
|
|
||||||
for (ch = 0; ch < avctx->channels; ch++) {
|
for (ch = 0; ch < avctx->channels; ch++) {
|
||||||
ADPCMChannelStatus *status = &c->status[ch];
|
ADPCMChannelStatus *status = &c->status[ch];
|
||||||
@@ -570,7 +570,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
case AV_CODEC_ID_ADPCM_SWF:
|
case AV_CODEC_ID_ADPCM_SWF:
|
||||||
{
|
{
|
||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
init_put_bits(&pb, dst, pkt_size * 8);
|
init_put_bits(&pb, dst, pkt_size);
|
||||||
|
|
||||||
n = frame->nb_samples - 1;
|
n = frame->nb_samples - 1;
|
||||||
|
|
||||||
|
@@ -150,6 +150,7 @@ typedef struct AICContext {
|
|||||||
int16_t *data_ptr[NUM_BANDS];
|
int16_t *data_ptr[NUM_BANDS];
|
||||||
|
|
||||||
DECLARE_ALIGNED(16, int16_t, block)[64];
|
DECLARE_ALIGNED(16, int16_t, block)[64];
|
||||||
|
DECLARE_ALIGNED(16, uint8_t, quant_matrix)[64];
|
||||||
} AICContext;
|
} AICContext;
|
||||||
|
|
||||||
static int aic_decode_header(AICContext *ctx, const uint8_t *src, int size)
|
static int aic_decode_header(AICContext *ctx, const uint8_t *src, int size)
|
||||||
@@ -285,7 +286,7 @@ static void recombine_block_il(int16_t *dst, const uint8_t *scan,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unquant_block(int16_t *block, int q)
|
static void unquant_block(int16_t *block, int q, uint8_t *quant_matrix)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@@ -293,7 +294,7 @@ static void unquant_block(int16_t *block, int q)
|
|||||||
int val = (uint16_t)block[i];
|
int val = (uint16_t)block[i];
|
||||||
int sign = val & 1;
|
int sign = val & 1;
|
||||||
|
|
||||||
block[i] = (((val >> 1) ^ -sign) * q * aic_quant_matrix[i] >> 4)
|
block[i] = (((val >> 1) ^ -sign) * q * quant_matrix[i] >> 4)
|
||||||
+ sign;
|
+ sign;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -334,7 +335,7 @@ static int aic_decode_slice(AICContext *ctx, int mb_x, int mb_y,
|
|||||||
else
|
else
|
||||||
recombine_block_il(ctx->block, ctx->scantable.permutated,
|
recombine_block_il(ctx->block, ctx->scantable.permutated,
|
||||||
&base_y, &ext_y, blk);
|
&base_y, &ext_y, blk);
|
||||||
unquant_block(ctx->block, ctx->quant);
|
unquant_block(ctx->block, ctx->quant, ctx->quant_matrix);
|
||||||
ctx->dsp.idct(ctx->block);
|
ctx->dsp.idct(ctx->block);
|
||||||
|
|
||||||
if (!ctx->interlaced) {
|
if (!ctx->interlaced) {
|
||||||
@@ -352,7 +353,7 @@ static int aic_decode_slice(AICContext *ctx, int mb_x, int mb_y,
|
|||||||
for (blk = 0; blk < 2; blk++) {
|
for (blk = 0; blk < 2; blk++) {
|
||||||
recombine_block(ctx->block, ctx->scantable.permutated,
|
recombine_block(ctx->block, ctx->scantable.permutated,
|
||||||
&base_c, &ext_c);
|
&base_c, &ext_c);
|
||||||
unquant_block(ctx->block, ctx->quant);
|
unquant_block(ctx->block, ctx->quant, ctx->quant_matrix);
|
||||||
ctx->dsp.idct(ctx->block);
|
ctx->dsp.idct(ctx->block);
|
||||||
ctx->dsp.put_signed_pixels_clamped(ctx->block, C[blk],
|
ctx->dsp.put_signed_pixels_clamped(ctx->block, C[blk],
|
||||||
ctx->frame->linesize[blk + 1]);
|
ctx->frame->linesize[blk + 1]);
|
||||||
@@ -430,12 +431,14 @@ static av_cold int aic_decode_init(AVCodecContext *avctx)
|
|||||||
for (i = 0; i < 64; i++)
|
for (i = 0; i < 64; i++)
|
||||||
scan[i] = i;
|
scan[i] = i;
|
||||||
ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, scan);
|
ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, scan);
|
||||||
|
for (i = 0; i < 64; i++)
|
||||||
|
ctx->quant_matrix[ctx->dsp.idct_permutation[i]] = aic_quant_matrix[i];
|
||||||
|
|
||||||
ctx->mb_width = FFALIGN(avctx->width, 16) >> 4;
|
ctx->mb_width = FFALIGN(avctx->width, 16) >> 4;
|
||||||
ctx->mb_height = FFALIGN(avctx->height, 16) >> 4;
|
ctx->mb_height = FFALIGN(avctx->height, 16) >> 4;
|
||||||
|
|
||||||
ctx->num_x_slices = 16;
|
ctx->num_x_slices = (ctx->mb_width + 15) >> 4;
|
||||||
ctx->slice_width = ctx->mb_width / 16;
|
ctx->slice_width = 16;
|
||||||
for (i = 1; i < 32; i++) {
|
for (i = 1; i < 32; i++) {
|
||||||
if (!(ctx->mb_width % i) && (ctx->mb_width / i < 32)) {
|
if (!(ctx->mb_width % i) && (ctx->mb_width / i < 32)) {
|
||||||
ctx->slice_width = ctx->mb_width / i;
|
ctx->slice_width = ctx->mb_width / i;
|
||||||
|
@@ -311,6 +311,11 @@ static int decode_element(AVCodecContext *avctx, AVFrame *frame, int ch_index,
|
|||||||
int lpc_quant[2];
|
int lpc_quant[2];
|
||||||
int rice_history_mult[2];
|
int rice_history_mult[2];
|
||||||
|
|
||||||
|
if (!alac->rice_limit) {
|
||||||
|
avpriv_request_sample(alac->avctx, "Compression with rice limit 0");
|
||||||
|
return AVERROR(ENOSYS);
|
||||||
|
}
|
||||||
|
|
||||||
decorr_shift = get_bits(&alac->gb, 8);
|
decorr_shift = get_bits(&alac->gb, 8);
|
||||||
decorr_left_weight = get_bits(&alac->gb, 8);
|
decorr_left_weight = get_bits(&alac->gb, 8);
|
||||||
|
|
||||||
|
@@ -280,7 +280,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
|||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
uint64_t ht_size;
|
uint64_t ht_size;
|
||||||
int i, config_offset;
|
int i, config_offset;
|
||||||
MPEG4AudioConfig m4ac;
|
MPEG4AudioConfig m4ac = {0};
|
||||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||||
AVCodecContext *avctx = ctx->avctx;
|
AVCodecContext *avctx = ctx->avctx;
|
||||||
uint32_t als_id, header_size, trailer_size;
|
uint32_t als_id, header_size, trailer_size;
|
||||||
@@ -355,11 +355,15 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
|||||||
|
|
||||||
ctx->cs_switch = 1;
|
ctx->cs_switch = 1;
|
||||||
|
|
||||||
|
for (i = 0; i < avctx->channels; i++) {
|
||||||
|
sconf->chan_pos[i] = -1;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < avctx->channels; i++) {
|
for (i = 0; i < avctx->channels; i++) {
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
idx = get_bits(&gb, chan_pos_bits);
|
idx = get_bits(&gb, chan_pos_bits);
|
||||||
if (idx >= avctx->channels) {
|
if (idx >= avctx->channels || sconf->chan_pos[idx] != -1) {
|
||||||
av_log(avctx, AV_LOG_WARNING, "Invalid channel reordering.\n");
|
av_log(avctx, AV_LOG_WARNING, "Invalid channel reordering.\n");
|
||||||
ctx->cs_switch = 0;
|
ctx->cs_switch = 0;
|
||||||
break;
|
break;
|
||||||
@@ -676,7 +680,7 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
|||||||
|
|
||||||
|
|
||||||
if (!sconf->rlslms) {
|
if (!sconf->rlslms) {
|
||||||
if (sconf->adapt_order) {
|
if (sconf->adapt_order && sconf->max_order) {
|
||||||
int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
|
int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
|
||||||
2, sconf->max_order + 1));
|
2, sconf->max_order + 1));
|
||||||
*bd->opt_order = get_bits(gb, opt_order_length);
|
*bd->opt_order = get_bits(gb, opt_order_length);
|
||||||
@@ -1226,6 +1230,7 @@ static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
|
|||||||
ALSChannelData *ch = cd[c];
|
ALSChannelData *ch = cd[c];
|
||||||
unsigned int dep = 0;
|
unsigned int dep = 0;
|
||||||
unsigned int channels = ctx->avctx->channels;
|
unsigned int channels = ctx->avctx->channels;
|
||||||
|
unsigned int channel_size = ctx->sconf.frame_length + ctx->sconf.max_order;
|
||||||
|
|
||||||
if (reverted[c])
|
if (reverted[c])
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1257,9 +1262,9 @@ static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
|
|||||||
|
|
||||||
dep = 0;
|
dep = 0;
|
||||||
while (!ch[dep].stop_flag) {
|
while (!ch[dep].stop_flag) {
|
||||||
unsigned int smp;
|
ptrdiff_t smp;
|
||||||
unsigned int begin = 1;
|
ptrdiff_t begin = 1;
|
||||||
unsigned int end = bd->block_length - 1;
|
ptrdiff_t end = bd->block_length - 1;
|
||||||
int64_t y;
|
int64_t y;
|
||||||
int32_t *master = ctx->raw_samples[ch[dep].master_channel] + offset;
|
int32_t *master = ctx->raw_samples[ch[dep].master_channel] + offset;
|
||||||
|
|
||||||
@@ -1268,11 +1273,28 @@ static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
|
|||||||
|
|
||||||
if (ch[dep].time_diff_sign) {
|
if (ch[dep].time_diff_sign) {
|
||||||
t = -t;
|
t = -t;
|
||||||
|
if (begin < t) {
|
||||||
|
av_log(ctx->avctx, AV_LOG_ERROR, "begin %td smaller than time diff index %d.\n", begin, t);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
begin -= t;
|
begin -= t;
|
||||||
} else {
|
} else {
|
||||||
|
if (end < t) {
|
||||||
|
av_log(ctx->avctx, AV_LOG_ERROR, "end %td smaller than time diff index %d.\n", end, t);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
end -= t;
|
end -= t;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (FFMIN(begin - 1, begin - 1 + t) < ctx->raw_buffer - master ||
|
||||||
|
FFMAX(end + 1, end + 1 + t) > ctx->raw_buffer + channels * channel_size - master) {
|
||||||
|
av_log(ctx->avctx, AV_LOG_ERROR,
|
||||||
|
"sample pointer range [%p, %p] not contained in raw_buffer [%p, %p].\n",
|
||||||
|
master + FFMIN(begin - 1, begin - 1 + t), master + FFMAX(end + 1, end + 1 + t),
|
||||||
|
ctx->raw_buffer, ctx->raw_buffer + channels * channel_size);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
for (smp = begin; smp < end; smp++) {
|
for (smp = begin; smp < end; smp++) {
|
||||||
y = (1 << 6) +
|
y = (1 << 6) +
|
||||||
MUL64(ch[dep].weighting[0], master[smp - 1 ]) +
|
MUL64(ch[dep].weighting[0], master[smp - 1 ]) +
|
||||||
@@ -1285,6 +1307,16 @@ static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
|
|||||||
bd->raw_samples[smp] += y >> 7;
|
bd->raw_samples[smp] += y >> 7;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
if (begin - 1 < ctx->raw_buffer - master ||
|
||||||
|
end + 1 > ctx->raw_buffer + channels * channel_size - master) {
|
||||||
|
av_log(ctx->avctx, AV_LOG_ERROR,
|
||||||
|
"sample pointer range [%p, %p] not contained in raw_buffer [%p, %p].\n",
|
||||||
|
master + begin - 1, master + end + 1,
|
||||||
|
ctx->raw_buffer, ctx->raw_buffer + channels * channel_size);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
for (smp = begin; smp < end; smp++) {
|
for (smp = begin; smp < end; smp++) {
|
||||||
y = (1 << 6) +
|
y = (1 << 6) +
|
||||||
MUL64(ch[dep].weighting[0], master[smp - 1]) +
|
MUL64(ch[dep].weighting[0], master[smp - 1]) +
|
||||||
@@ -1436,6 +1468,11 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
|
|||||||
|
|
||||||
// TODO: read_diff_float_data
|
// TODO: read_diff_float_data
|
||||||
|
|
||||||
|
if (get_bits_left(gb) < 0) {
|
||||||
|
av_log(ctx->avctx, AV_LOG_ERROR, "Overread %d\n", -get_bits_left(gb));
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1699,9 +1736,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
// allocate and assign channel data buffer for mcc mode
|
// allocate and assign channel data buffer for mcc mode
|
||||||
if (sconf->mc_coding) {
|
if (sconf->mc_coding) {
|
||||||
ctx->chan_data_buffer = av_malloc(sizeof(*ctx->chan_data_buffer) *
|
ctx->chan_data_buffer = av_mallocz(sizeof(*ctx->chan_data_buffer) *
|
||||||
num_buffers * num_buffers);
|
num_buffers * num_buffers);
|
||||||
ctx->chan_data = av_malloc(sizeof(*ctx->chan_data) *
|
ctx->chan_data = av_mallocz(sizeof(*ctx->chan_data) *
|
||||||
num_buffers);
|
num_buffers);
|
||||||
ctx->reverted_channels = av_malloc(sizeof(*ctx->reverted_channels) *
|
ctx->reverted_channels = av_malloc(sizeof(*ctx->reverted_channels) *
|
||||||
num_buffers);
|
num_buffers);
|
||||||
|
@@ -417,7 +417,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
switch(buf[0]) {
|
switch(buf[0]) {
|
||||||
case '0': case '1': case '2': case '3': case '4':
|
case '0': case '1': case '2': case '3': case '4':
|
||||||
case '5': case '6': case '7': case '8': case '9':
|
case '5': case '6': case '7': case '8': case '9':
|
||||||
if (s->nb_args < MAX_NB_ARGS)
|
if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] < 6553)
|
||||||
s->args[s->nb_args] = FFMAX(s->args[s->nb_args], 0) * 10 + buf[0] - '0';
|
s->args[s->nb_args] = FFMAX(s->args[s->nb_args], 0) * 10 + buf[0] - '0';
|
||||||
break;
|
break;
|
||||||
case ';':
|
case ';':
|
||||||
|
@@ -598,12 +598,12 @@ static void decode_array_0000(APEContext *ctx, GetBitContext *gb,
|
|||||||
int ksummax, ksummin;
|
int ksummax, ksummin;
|
||||||
|
|
||||||
rice->ksum = 0;
|
rice->ksum = 0;
|
||||||
for (i = 0; i < 5; i++) {
|
for (i = 0; i < FFMIN(blockstodecode, 5); i++) {
|
||||||
out[i] = get_rice_ook(&ctx->gb, 10);
|
out[i] = get_rice_ook(&ctx->gb, 10);
|
||||||
rice->ksum += out[i];
|
rice->ksum += out[i];
|
||||||
}
|
}
|
||||||
rice->k = av_log2(rice->ksum / 10) + 1;
|
rice->k = av_log2(rice->ksum / 10) + 1;
|
||||||
for (; i < 64; i++) {
|
for (; i < FFMIN(blockstodecode, 64); i++) {
|
||||||
out[i] = get_rice_ook(&ctx->gb, rice->k);
|
out[i] = get_rice_ook(&ctx->gb, rice->k);
|
||||||
rice->ksum += out[i];
|
rice->ksum += out[i];
|
||||||
rice->k = av_log2(rice->ksum / ((i + 1) * 2)) + 1;
|
rice->k = av_log2(rice->ksum / ((i + 1) * 2)) + 1;
|
||||||
@@ -1467,13 +1467,13 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks);
|
av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
s->samples = nblocks;
|
|
||||||
|
|
||||||
/* Initialize the frame decoder */
|
/* Initialize the frame decoder */
|
||||||
if (init_frame_decoder(s) < 0) {
|
if (init_frame_decoder(s) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
|
av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
s->samples = nblocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!s->data) {
|
if (!s->data) {
|
||||||
|
@@ -41,10 +41,10 @@ function ff_scalarproduct_int16_neon, export=1
|
|||||||
|
|
||||||
vpadd.s32 d16, d0, d1
|
vpadd.s32 d16, d0, d1
|
||||||
vpadd.s32 d17, d2, d3
|
vpadd.s32 d17, d2, d3
|
||||||
vpadd.s32 d10, d4, d5
|
vpadd.s32 d18, d4, d5
|
||||||
vpadd.s32 d11, d6, d7
|
vpadd.s32 d19, d6, d7
|
||||||
vpadd.s32 d0, d16, d17
|
vpadd.s32 d0, d16, d17
|
||||||
vpadd.s32 d1, d10, d11
|
vpadd.s32 d1, d18, d19
|
||||||
vpadd.s32 d2, d0, d1
|
vpadd.s32 d2, d0, d1
|
||||||
vpaddl.s32 d3, d2
|
vpaddl.s32 d3, d2
|
||||||
vmov.32 r0, d3[0]
|
vmov.32 r0, d3[0]
|
||||||
@@ -81,10 +81,10 @@ function ff_scalarproduct_and_madd_int16_neon, export=1
|
|||||||
|
|
||||||
vpadd.s32 d16, d0, d1
|
vpadd.s32 d16, d0, d1
|
||||||
vpadd.s32 d17, d2, d3
|
vpadd.s32 d17, d2, d3
|
||||||
vpadd.s32 d10, d4, d5
|
vpadd.s32 d18, d4, d5
|
||||||
vpadd.s32 d11, d6, d7
|
vpadd.s32 d19, d6, d7
|
||||||
vpadd.s32 d0, d16, d17
|
vpadd.s32 d0, d16, d17
|
||||||
vpadd.s32 d1, d10, d11
|
vpadd.s32 d1, d18, d19
|
||||||
vpadd.s32 d2, d0, d1
|
vpadd.s32 d2, d0, d1
|
||||||
vpaddl.s32 d3, d2
|
vpaddl.s32 d3, d2
|
||||||
vmov.32 r0, d3[0]
|
vmov.32 r0, d3[0]
|
||||||
|
@@ -80,9 +80,16 @@ static int ass_encode_frame(AVCodecContext *avctx,
|
|||||||
* will be "Marked=N" instead of the layer num, so we will
|
* will be "Marked=N" instead of the layer num, so we will
|
||||||
* have layer=0, which is fine. */
|
* have layer=0, which is fine. */
|
||||||
layer = strtol(ass, &p, 10);
|
layer = strtol(ass, &p, 10);
|
||||||
if (*p) p += strcspn(p, ",") + 1; // skip layer or marked
|
|
||||||
if (*p) p += strcspn(p, ",") + 1; // skip start timestamp
|
#define SKIP_ENTRY(ptr) do { \
|
||||||
if (*p) p += strcspn(p, ",") + 1; // skip end timestamp
|
char *sep = strchr(ptr, ','); \
|
||||||
|
if (sep) \
|
||||||
|
ptr = sep + 1; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
SKIP_ENTRY(p); // skip layer or marked
|
||||||
|
SKIP_ENTRY(p); // skip start timestamp
|
||||||
|
SKIP_ENTRY(p); // skip end timestamp
|
||||||
snprintf(ass_line, sizeof(ass_line), "%d,%ld,%s", ++s->id, layer, p);
|
snprintf(ass_line, sizeof(ass_line), "%d,%ld,%s", ++s->id, layer, p);
|
||||||
ass_line[strcspn(ass_line, "\r\n")] = 0;
|
ass_line[strcspn(ass_line, "\r\n")] = 0;
|
||||||
ass = ass_line;
|
ass = ass_line;
|
||||||
|
@@ -356,7 +356,7 @@ int av_packet_merge_side_data(AVPacket *pkt){
|
|||||||
int av_packet_split_side_data(AVPacket *pkt){
|
int av_packet_split_side_data(AVPacket *pkt){
|
||||||
if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
|
if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
|
||||||
int i;
|
int i;
|
||||||
unsigned int size, orig_pktsize = pkt->size;
|
unsigned int size;
|
||||||
uint8_t *p;
|
uint8_t *p;
|
||||||
|
|
||||||
p = pkt->data + pkt->size - 8 - 5;
|
p = pkt->data + pkt->size - 8 - 5;
|
||||||
@@ -377,7 +377,7 @@ int av_packet_split_side_data(AVPacket *pkt){
|
|||||||
for (i=0; ; i++){
|
for (i=0; ; i++){
|
||||||
size= AV_RB32(p);
|
size= AV_RB32(p);
|
||||||
av_assert0(size<=INT_MAX && p - pkt->data >= size);
|
av_assert0(size<=INT_MAX && p - pkt->data >= size);
|
||||||
pkt->side_data[i].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
pkt->side_data[i].data = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
pkt->side_data[i].size = size;
|
pkt->side_data[i].size = size;
|
||||||
pkt->side_data[i].type = p[4]&127;
|
pkt->side_data[i].type = p[4]&127;
|
||||||
if (!pkt->side_data[i].data)
|
if (!pkt->side_data[i].data)
|
||||||
@@ -389,13 +389,6 @@ int av_packet_split_side_data(AVPacket *pkt){
|
|||||||
p-= size+5;
|
p-= size+5;
|
||||||
}
|
}
|
||||||
pkt->size -= 8;
|
pkt->size -= 8;
|
||||||
/* FFMIN() prevents overflow in case the packet wasn't allocated with
|
|
||||||
* proper padding.
|
|
||||||
* If the side data is smaller than the buffer padding size, the
|
|
||||||
* remaining bytes should have already been filled with zeros by the
|
|
||||||
* original packet allocation anyway. */
|
|
||||||
memset(pkt->data + pkt->size, 0,
|
|
||||||
FFMIN(orig_pktsize - pkt->size, FF_INPUT_BUFFER_PADDING_SIZE));
|
|
||||||
pkt->side_data_elems = i+1;
|
pkt->side_data_elems = i+1;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@@ -120,6 +120,7 @@ typedef struct BinkContext {
|
|||||||
int version; ///< internal Bink file version
|
int version; ///< internal Bink file version
|
||||||
int has_alpha;
|
int has_alpha;
|
||||||
int swap_planes;
|
int swap_planes;
|
||||||
|
unsigned frame_num;
|
||||||
|
|
||||||
Bundle bundle[BINKB_NB_SRC]; ///< bundles for decoding all data types
|
Bundle bundle[BINKB_NB_SRC]; ///< bundles for decoding all data types
|
||||||
Tree col_high[16]; ///< trees for decoding high nibble in "colours" data type
|
Tree col_high[16]; ///< trees for decoding high nibble in "colours" data type
|
||||||
@@ -1206,6 +1207,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
if (c->version >= 'i')
|
if (c->version >= 'i')
|
||||||
skip_bits_long(&gb, 32);
|
skip_bits_long(&gb, 32);
|
||||||
|
|
||||||
|
c->frame_num++;
|
||||||
|
|
||||||
for (plane = 0; plane < 3; plane++) {
|
for (plane = 0; plane < 3; plane++) {
|
||||||
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
|
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
|
||||||
|
|
||||||
@@ -1214,7 +1217,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
return ret;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
if ((ret = binkb_decode_plane(c, frame, &gb, plane_idx,
|
if ((ret = binkb_decode_plane(c, frame, &gb, plane_idx,
|
||||||
!avctx->frame_number, !!plane)) < 0)
|
c->frame_num == 1, !!plane)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
if (get_bits_count(&gb) >= bits_count)
|
if (get_bits_count(&gb) >= bits_count)
|
||||||
@@ -1332,6 +1335,13 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void flush(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
BinkContext * const c = avctx->priv_data;
|
||||||
|
|
||||||
|
c->frame_num = 0;
|
||||||
|
}
|
||||||
|
|
||||||
AVCodec ff_bink_decoder = {
|
AVCodec ff_bink_decoder = {
|
||||||
.name = "binkvideo",
|
.name = "binkvideo",
|
||||||
.type = AVMEDIA_TYPE_VIDEO,
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
@@ -1341,5 +1351,6 @@ AVCodec ff_bink_decoder = {
|
|||||||
.close = decode_end,
|
.close = decode_end,
|
||||||
.decode = decode_frame,
|
.decode = decode_frame,
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("Bink video"),
|
.long_name = NULL_IF_CONFIG_SMALL("Bink video"),
|
||||||
|
.flush = flush,
|
||||||
.capabilities = CODEC_CAP_DR1,
|
.capabilities = CODEC_CAP_DR1,
|
||||||
};
|
};
|
||||||
|
@@ -306,7 +306,7 @@ STOP_TIMER("get_cabac_bypass")
|
|||||||
|
|
||||||
for(i=0; i<SIZE; i++){
|
for(i=0; i<SIZE; i++){
|
||||||
START_TIMER
|
START_TIMER
|
||||||
if( (r[i]&1) != get_cabac(&c, state) )
|
if( (r[i]&1) != get_cabac_noinline(&c, state) )
|
||||||
av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
|
av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
|
||||||
STOP_TIMER("get_cabac")
|
STOP_TIMER("get_cabac")
|
||||||
}
|
}
|
||||||
|
@@ -214,6 +214,7 @@ typedef struct AVSContext {
|
|||||||
int luma_scan[4];
|
int luma_scan[4];
|
||||||
int qp;
|
int qp;
|
||||||
int qp_fixed;
|
int qp_fixed;
|
||||||
|
int pic_qp_fixed;
|
||||||
int cbp;
|
int cbp;
|
||||||
ScanTable scantable;
|
ScanTable scantable;
|
||||||
|
|
||||||
|
@@ -568,6 +568,11 @@ static int decode_residual_block(AVSContext *h, GetBitContext *gb,
|
|||||||
if(run > 64)
|
if(run > 64)
|
||||||
return -1;
|
return -1;
|
||||||
esc_code = get_ue_code(gb, esc_golomb_order);
|
esc_code = get_ue_code(gb, esc_golomb_order);
|
||||||
|
if (esc_code < 0 || esc_code > 32767) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "esc_code invalid\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
level = esc_code + (run > r->max_run ? 1 : r->level_add[run]);
|
level = esc_code + (run > r->max_run ? 1 : r->level_add[run]);
|
||||||
while (level > r->inc_limit)
|
while (level > r->inc_limit)
|
||||||
r++;
|
r++;
|
||||||
@@ -900,7 +905,7 @@ static inline int decode_slice_header(AVSContext *h, GetBitContext *gb)
|
|||||||
|
|
||||||
/* mark top macroblocks as unavailable */
|
/* mark top macroblocks as unavailable */
|
||||||
h->flags &= ~(B_AVAIL | C_AVAIL);
|
h->flags &= ~(B_AVAIL | C_AVAIL);
|
||||||
if ((h->mby == 0) && (!h->qp_fixed)) {
|
if (!h->pic_qp_fixed) {
|
||||||
h->qp_fixed = get_bits1(gb);
|
h->qp_fixed = get_bits1(gb);
|
||||||
h->qp = get_bits(gb, 6);
|
h->qp = get_bits(gb, 6);
|
||||||
}
|
}
|
||||||
@@ -1018,6 +1023,7 @@ static int decode_pic(AVSContext *h)
|
|||||||
skip_bits1(&h->gb); //advanced_pred_mode_disable
|
skip_bits1(&h->gb); //advanced_pred_mode_disable
|
||||||
skip_bits1(&h->gb); //top_field_first
|
skip_bits1(&h->gb); //top_field_first
|
||||||
skip_bits1(&h->gb); //repeat_first_field
|
skip_bits1(&h->gb); //repeat_first_field
|
||||||
|
h->pic_qp_fixed =
|
||||||
h->qp_fixed = get_bits1(&h->gb);
|
h->qp_fixed = get_bits1(&h->gb);
|
||||||
h->qp = get_bits(&h->gb, 6);
|
h->qp = get_bits(&h->gb, 6);
|
||||||
if (h->cur.f->pict_type == AV_PICTURE_TYPE_I) {
|
if (h->cur.f->pict_type == AV_PICTURE_TYPE_I) {
|
||||||
|
@@ -353,10 +353,9 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
|||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
} else {
|
} else {
|
||||||
*got_frame = 0;
|
*got_frame = 0;
|
||||||
buf_size = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf_size;
|
return avpkt->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int cdg_decode_end(AVCodecContext *avctx)
|
static av_cold int cdg_decode_end(AVCodecContext *avctx)
|
||||||
|
@@ -135,7 +135,7 @@ static int cinepak_decode_vectors (CinepakContext *s, cvid_strip *strip,
|
|||||||
const uint8_t *eod = (data + size);
|
const uint8_t *eod = (data + size);
|
||||||
uint32_t flag, mask;
|
uint32_t flag, mask;
|
||||||
uint8_t *cb0, *cb1, *cb2, *cb3;
|
uint8_t *cb0, *cb1, *cb2, *cb3;
|
||||||
unsigned int x, y;
|
int x, y;
|
||||||
char *ip0, *ip1, *ip2, *ip3;
|
char *ip0, *ip1, *ip2, *ip3;
|
||||||
|
|
||||||
flag = 0;
|
flag = 0;
|
||||||
|
@@ -1217,8 +1217,8 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
q->num_subpackets++;
|
q->num_subpackets++;
|
||||||
s++;
|
s++;
|
||||||
if (s > MAX_SUBPACKETS) {
|
if (s > FFMIN(MAX_SUBPACKETS, avctx->block_align)) {
|
||||||
avpriv_request_sample(avctx, "subpackets > %d", MAX_SUBPACKETS);
|
avpriv_request_sample(avctx, "subpackets > %d", FFMIN(MAX_SUBPACKETS, avctx->block_align));
|
||||||
return AVERROR_PATCHWELCOME;
|
return AVERROR_PATCHWELCOME;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -571,6 +571,14 @@ static int dca_parse_audio_coding_header(DCAContext *s, int base_channel,
|
|||||||
}
|
}
|
||||||
|
|
||||||
nchans = get_bits(&s->gb, 3) + 1;
|
nchans = get_bits(&s->gb, 3) + 1;
|
||||||
|
if (xxch && nchans >= 3) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "nchans %d is too large\n", nchans);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
} else if (nchans + base_channel > DCA_PRIM_CHANNELS_MAX) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "channel sum %d + %d is too large\n", nchans, base_channel);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
s->total_channels = nchans + base_channel;
|
s->total_channels = nchans + base_channel;
|
||||||
s->prim_channels = s->total_channels;
|
s->prim_channels = s->total_channels;
|
||||||
|
|
||||||
@@ -830,6 +838,10 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
|||||||
|
|
||||||
if (!base_channel) {
|
if (!base_channel) {
|
||||||
s->subsubframes[s->current_subframe] = get_bits(&s->gb, 2) + 1;
|
s->subsubframes[s->current_subframe] = get_bits(&s->gb, 2) + 1;
|
||||||
|
if (block_index + s->subsubframes[s->current_subframe] > s->sample_blocks/8) {
|
||||||
|
s->subsubframes[s->current_subframe] = 1;
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
s->partial_samples[s->current_subframe] = get_bits(&s->gb, 3);
|
s->partial_samples[s->current_subframe] = get_bits(&s->gb, 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1755,8 +1767,13 @@ static int dca_xbr_parse_frame(DCAContext *s)
|
|||||||
for(i = 0; i < num_chsets; i++) {
|
for(i = 0; i < num_chsets; i++) {
|
||||||
n_xbr_ch[i] = get_bits(&s->gb, 3) + 1;
|
n_xbr_ch[i] = get_bits(&s->gb, 3) + 1;
|
||||||
k = get_bits(&s->gb, 2) + 5;
|
k = get_bits(&s->gb, 2) + 5;
|
||||||
for(j = 0; j < n_xbr_ch[i]; j++)
|
for(j = 0; j < n_xbr_ch[i]; j++) {
|
||||||
active_bands[i][j] = get_bits(&s->gb, k) + 1;
|
active_bands[i][j] = get_bits(&s->gb, k) + 1;
|
||||||
|
if (active_bands[i][j] > DCA_SUBBANDS) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "too many active subbands (%d)\n", active_bands[i][j]);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* skip to the end of the header */
|
/* skip to the end of the header */
|
||||||
@@ -1798,23 +1815,34 @@ static int dca_xbr_parse_frame(DCAContext *s)
|
|||||||
for(i = 0; i < n_xbr_ch[chset]; i++) {
|
for(i = 0; i < n_xbr_ch[chset]; i++) {
|
||||||
const uint32_t *scale_table;
|
const uint32_t *scale_table;
|
||||||
int nbits;
|
int nbits;
|
||||||
|
int scale_table_size;
|
||||||
|
|
||||||
if (s->scalefactor_huffman[chan_base+i] == 6) {
|
if (s->scalefactor_huffman[chan_base+i] == 6) {
|
||||||
scale_table = scale_factor_quant7;
|
scale_table = scale_factor_quant7;
|
||||||
|
scale_table_size = FF_ARRAY_ELEMS(scale_factor_quant7);
|
||||||
} else {
|
} else {
|
||||||
scale_table = scale_factor_quant6;
|
scale_table = scale_factor_quant6;
|
||||||
|
scale_table_size = FF_ARRAY_ELEMS(scale_factor_quant6);
|
||||||
}
|
}
|
||||||
|
|
||||||
nbits = anctemp[i];
|
nbits = anctemp[i];
|
||||||
|
|
||||||
for(j = 0; j < active_bands[chset][i]; j++) {
|
for(j = 0; j < active_bands[chset][i]; j++) {
|
||||||
if(abits_high[i][j] > 0) {
|
if(abits_high[i][j] > 0) {
|
||||||
scale_table_high[i][j][0] =
|
int index = get_bits(&s->gb, nbits);
|
||||||
scale_table[get_bits(&s->gb, nbits)];
|
if (index >= scale_table_size) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "scale table index %d invalid\n", index);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
scale_table_high[i][j][0] = scale_table[index];
|
||||||
|
|
||||||
if(xbr_tmode && s->transition_mode[i][j]) {
|
if(xbr_tmode && s->transition_mode[i][j]) {
|
||||||
scale_table_high[i][j][1] =
|
int index = get_bits(&s->gb, nbits);
|
||||||
scale_table[get_bits(&s->gb, nbits)];
|
if (index >= scale_table_size) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "scale table index %d invalid\n", index);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
scale_table_high[i][j][1] = scale_table[index];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -28,6 +28,7 @@
|
|||||||
#ifndef AVCODEC_DIRAC_ARITH_H
|
#ifndef AVCODEC_DIRAC_ARITH_H
|
||||||
#define AVCODEC_DIRAC_ARITH_H
|
#define AVCODEC_DIRAC_ARITH_H
|
||||||
|
|
||||||
|
#include "libavutil/x86/asm.h"
|
||||||
#include "bytestream.h"
|
#include "bytestream.h"
|
||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
|
|
||||||
@@ -134,7 +135,7 @@ static inline int dirac_get_arith_bit(DiracArith *c, int ctx)
|
|||||||
|
|
||||||
range_times_prob = (c->range * prob_zero) >> 16;
|
range_times_prob = (c->range * prob_zero) >> 16;
|
||||||
|
|
||||||
#if HAVE_FAST_CMOV && HAVE_INLINE_ASM
|
#if HAVE_FAST_CMOV && HAVE_INLINE_ASM && HAVE_6REGS
|
||||||
low -= range_times_prob << 16;
|
low -= range_times_prob << 16;
|
||||||
range -= range_times_prob;
|
range -= range_times_prob;
|
||||||
bit = 0;
|
bit = 0;
|
||||||
@@ -170,6 +171,10 @@ static inline int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_c
|
|||||||
{
|
{
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
while (!dirac_get_arith_bit(c, follow_ctx)) {
|
while (!dirac_get_arith_bit(c, follow_ctx)) {
|
||||||
|
if (ret >= 0x40000000) {
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "dirac_get_arith_uint overflow\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
ret <<= 1;
|
ret <<= 1;
|
||||||
ret += dirac_get_arith_bit(c, data_ctx);
|
ret += dirac_get_arith_bit(c, data_ctx);
|
||||||
follow_ctx = ff_dirac_next_ctx[follow_ctx];
|
follow_ctx = ff_dirac_next_ctx[follow_ctx];
|
||||||
|
@@ -201,6 +201,7 @@ typedef struct DiracContext {
|
|||||||
|
|
||||||
uint16_t *mctmp; /* buffer holding the MC data multipled by OBMC weights */
|
uint16_t *mctmp; /* buffer holding the MC data multipled by OBMC weights */
|
||||||
uint8_t *mcscratch;
|
uint8_t *mcscratch;
|
||||||
|
int buffer_stride;
|
||||||
|
|
||||||
DECLARE_ALIGNED(16, uint8_t, obmc_weight)[3][MAX_BLOCKSIZE*MAX_BLOCKSIZE];
|
DECLARE_ALIGNED(16, uint8_t, obmc_weight)[3][MAX_BLOCKSIZE*MAX_BLOCKSIZE];
|
||||||
|
|
||||||
@@ -343,22 +344,44 @@ static int alloc_sequence_buffers(DiracContext *s)
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
w = s->source.width;
|
|
||||||
h = s->source.height;
|
|
||||||
|
|
||||||
/* fixme: allocate using real stride here */
|
/* fixme: allocate using real stride here */
|
||||||
s->sbsplit = av_malloc(sbwidth * sbheight);
|
s->sbsplit = av_malloc_array(sbwidth, sbheight);
|
||||||
s->blmotion = av_malloc(sbwidth * sbheight * 16 * sizeof(*s->blmotion));
|
s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
|
||||||
s->edge_emu_buffer_base = av_malloc((w+64)*MAX_BLOCKSIZE);
|
|
||||||
|
|
||||||
s->mctmp = av_malloc((w+64+MAX_BLOCKSIZE) * (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
|
if (!s->sbsplit || !s->blmotion)
|
||||||
s->mcscratch = av_malloc((w+64)*MAX_BLOCKSIZE);
|
|
||||||
|
|
||||||
if (!s->sbsplit || !s->blmotion || !s->mctmp || !s->mcscratch)
|
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int alloc_buffers(DiracContext *s, int stride)
|
||||||
|
{
|
||||||
|
int w = s->source.width;
|
||||||
|
int h = s->source.height;
|
||||||
|
|
||||||
|
av_assert0(stride >= w);
|
||||||
|
stride += 64;
|
||||||
|
|
||||||
|
if (s->buffer_stride >= stride)
|
||||||
|
return 0;
|
||||||
|
s->buffer_stride = 0;
|
||||||
|
|
||||||
|
av_freep(&s->edge_emu_buffer_base);
|
||||||
|
memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
|
||||||
|
av_freep(&s->mctmp);
|
||||||
|
av_freep(&s->mcscratch);
|
||||||
|
|
||||||
|
s->edge_emu_buffer_base = av_malloc_array(stride, MAX_BLOCKSIZE);
|
||||||
|
|
||||||
|
s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
|
||||||
|
s->mcscratch = av_malloc_array(stride, MAX_BLOCKSIZE);
|
||||||
|
|
||||||
|
if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
|
s->buffer_stride = stride;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void free_sequence_buffers(DiracContext *s)
|
static void free_sequence_buffers(DiracContext *s)
|
||||||
{
|
{
|
||||||
int i, j, k;
|
int i, j, k;
|
||||||
@@ -382,6 +405,7 @@ static void free_sequence_buffers(DiracContext *s)
|
|||||||
av_freep(&s->plane[i].idwt_tmp);
|
av_freep(&s->plane[i].idwt_tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s->buffer_stride = 0;
|
||||||
av_freep(&s->sbsplit);
|
av_freep(&s->sbsplit);
|
||||||
av_freep(&s->blmotion);
|
av_freep(&s->blmotion);
|
||||||
av_freep(&s->edge_emu_buffer_base);
|
av_freep(&s->edge_emu_buffer_base);
|
||||||
@@ -574,10 +598,10 @@ static av_always_inline void decode_subband_internal(DiracContext *s, SubBand *b
|
|||||||
|
|
||||||
top = 0;
|
top = 0;
|
||||||
for (cb_y = 0; cb_y < cb_height; cb_y++) {
|
for (cb_y = 0; cb_y < cb_height; cb_y++) {
|
||||||
bottom = (b->height * (cb_y+1)) / cb_height;
|
bottom = (b->height * (cb_y+1LL)) / cb_height;
|
||||||
left = 0;
|
left = 0;
|
||||||
for (cb_x = 0; cb_x < cb_width; cb_x++) {
|
for (cb_x = 0; cb_x < cb_width; cb_x++) {
|
||||||
right = (b->width * (cb_x+1)) / cb_width;
|
right = (b->width * (cb_x+1LL)) / cb_width;
|
||||||
codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
|
codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
|
||||||
left = right;
|
left = right;
|
||||||
}
|
}
|
||||||
@@ -761,7 +785,10 @@ static void decode_lowdelay(DiracContext *s)
|
|||||||
slice_num++;
|
slice_num++;
|
||||||
|
|
||||||
buf += bytes;
|
buf += bytes;
|
||||||
|
if (bufsize/8 >= bytes)
|
||||||
bufsize -= bytes*8;
|
bufsize -= bytes*8;
|
||||||
|
else
|
||||||
|
bufsize = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num,
|
avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num,
|
||||||
@@ -858,6 +885,14 @@ static int dirac_unpack_prediction_parameters(DiracContext *s)
|
|||||||
/*[DIRAC_STD] 11.2.4 motion_data_dimensions()
|
/*[DIRAC_STD] 11.2.4 motion_data_dimensions()
|
||||||
Calculated in function dirac_unpack_block_motion_data */
|
Calculated in function dirac_unpack_block_motion_data */
|
||||||
|
|
||||||
|
if (s->plane[0].xblen % (1 << s->chroma_x_shift) != 0 ||
|
||||||
|
s->plane[0].yblen % (1 << s->chroma_y_shift) != 0 ||
|
||||||
|
!s->plane[0].xblen || !s->plane[0].yblen) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"invalid x/y block length (%d/%d) for x/y chroma shift (%d/%d)\n",
|
||||||
|
s->plane[0].xblen, s->plane[0].yblen, s->chroma_x_shift, s->chroma_y_shift);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
if (!s->plane[0].xbsep || !s->plane[0].ybsep || s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) {
|
if (!s->plane[0].xbsep || !s->plane[0].ybsep || s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n");
|
||||||
return -1;
|
return -1;
|
||||||
@@ -966,8 +1001,8 @@ static int dirac_unpack_idwt_params(DiracContext *s)
|
|||||||
/* Codeblock parameters (core syntax only) */
|
/* Codeblock parameters (core syntax only) */
|
||||||
if (get_bits1(gb)) {
|
if (get_bits1(gb)) {
|
||||||
for (i = 0; i <= s->wavelet_depth; i++) {
|
for (i = 0; i <= s->wavelet_depth; i++) {
|
||||||
CHECKEDREAD(s->codeblock[i].width , tmp < 1, "codeblock width invalid\n")
|
CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
|
||||||
CHECKEDREAD(s->codeblock[i].height, tmp < 1, "codeblock height invalid\n")
|
CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
|
CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
|
||||||
@@ -1343,8 +1378,8 @@ static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5],
|
|||||||
motion_y >>= s->chroma_y_shift;
|
motion_y >>= s->chroma_y_shift;
|
||||||
}
|
}
|
||||||
|
|
||||||
mx = motion_x & ~(-1 << s->mv_precision);
|
mx = motion_x & ~(-1U << s->mv_precision);
|
||||||
my = motion_y & ~(-1 << s->mv_precision);
|
my = motion_y & ~(-1U << s->mv_precision);
|
||||||
motion_x >>= s->mv_precision;
|
motion_x >>= s->mv_precision;
|
||||||
motion_y >>= s->mv_precision;
|
motion_y >>= s->mv_precision;
|
||||||
/* normalize subpel coordinates to epel */
|
/* normalize subpel coordinates to epel */
|
||||||
@@ -1674,6 +1709,12 @@ static int dirac_decode_picture_header(DiracContext *s)
|
|||||||
ff_get_buffer(s->avctx, &s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
|
ff_get_buffer(s->avctx, &s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!s->ref_pics[i]) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Reference could not be allocated\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* retire the reference frames that are not used anymore */
|
/* retire the reference frames that are not used anymore */
|
||||||
@@ -1818,6 +1859,9 @@ static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int
|
|||||||
s->plane[1].stride = pic->avframe.linesize[1];
|
s->plane[1].stride = pic->avframe.linesize[1];
|
||||||
s->plane[2].stride = pic->avframe.linesize[2];
|
s->plane[2].stride = pic->avframe.linesize[2];
|
||||||
|
|
||||||
|
if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
/* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
|
/* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
|
||||||
if (dirac_decode_picture_header(s))
|
if (dirac_decode_picture_header(s))
|
||||||
return -1;
|
return -1;
|
||||||
@@ -1866,8 +1910,8 @@ static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
data_unit_size = AV_RB32(buf+buf_idx+5);
|
data_unit_size = AV_RB32(buf+buf_idx+5);
|
||||||
if (buf_idx + data_unit_size > buf_size || !data_unit_size) {
|
if (data_unit_size > buf_size - buf_idx || !data_unit_size) {
|
||||||
if(buf_idx + data_unit_size > buf_size)
|
if(data_unit_size > buf_size - buf_idx)
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"Data unit with size %d is larger than input buffer, discarding\n",
|
"Data unit with size %d is larger than input buffer, discarding\n",
|
||||||
data_unit_size);
|
data_unit_size);
|
||||||
|
@@ -35,6 +35,7 @@ typedef struct DNXHDContext {
|
|||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int64_t cid; ///< compression id
|
int64_t cid; ///< compression id
|
||||||
unsigned int width, height;
|
unsigned int width, height;
|
||||||
|
enum AVPixelFormat pix_fmt;
|
||||||
unsigned int mb_width, mb_height;
|
unsigned int mb_width, mb_height;
|
||||||
uint32_t mb_scan_index[68]; /* max for 1080p */
|
uint32_t mb_scan_index[68]; /* max for 1080p */
|
||||||
int cur_field; ///< current interlaced field
|
int cur_field; ///< current interlaced field
|
||||||
@@ -128,7 +129,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
|||||||
av_dlog(ctx->avctx, "width %d, height %d\n", ctx->width, ctx->height);
|
av_dlog(ctx->avctx, "width %d, height %d\n", ctx->width, ctx->height);
|
||||||
|
|
||||||
if (buf[0x21] & 0x40) {
|
if (buf[0x21] & 0x40) {
|
||||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
ctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
||||||
ctx->avctx->bits_per_raw_sample = 10;
|
ctx->avctx->bits_per_raw_sample = 10;
|
||||||
if (ctx->bit_depth != 10) {
|
if (ctx->bit_depth != 10) {
|
||||||
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
||||||
@@ -136,7 +137,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
|||||||
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
|
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
ctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||||
ctx->avctx->bits_per_raw_sample = 8;
|
ctx->avctx->bits_per_raw_sample = 8;
|
||||||
if (ctx->bit_depth != 8) {
|
if (ctx->bit_depth != 8) {
|
||||||
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
||||||
@@ -311,7 +312,7 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, AVFrame *frame, int x, int
|
|||||||
dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
|
dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
|
||||||
dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
|
dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
|
||||||
|
|
||||||
if (ctx->cur_field) {
|
if (frame->interlaced_frame && ctx->cur_field) {
|
||||||
dest_y += frame->linesize[0];
|
dest_y += frame->linesize[0];
|
||||||
dest_u += frame->linesize[1];
|
dest_u += frame->linesize[1];
|
||||||
dest_v += frame->linesize[2];
|
dest_v += frame->linesize[2];
|
||||||
@@ -376,9 +377,15 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
avctx->width, avctx->height, ctx->width, ctx->height);
|
avctx->width, avctx->height, ctx->width, ctx->height);
|
||||||
first_field = 1;
|
first_field = 1;
|
||||||
}
|
}
|
||||||
|
if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) {
|
||||||
|
av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n",
|
||||||
|
av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt));
|
||||||
|
first_field = 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (av_image_check_size(ctx->width, ctx->height, 0, avctx))
|
if (av_image_check_size(ctx->width, ctx->height, 0, avctx))
|
||||||
return -1;
|
return -1;
|
||||||
|
avctx->pix_fmt = ctx->pix_fmt;
|
||||||
avcodec_set_dimensions(avctx, ctx->width, ctx->height);
|
avcodec_set_dimensions(avctx, ctx->width, ctx->height);
|
||||||
|
|
||||||
if (first_field) {
|
if (first_field) {
|
||||||
|
@@ -236,7 +236,7 @@ static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
|
|||||||
|
|
||||||
static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
|
static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
|
||||||
{
|
{
|
||||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry), fail);
|
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*(ctx->m.avctx->qmax + 1)*sizeof(RCEntry), fail);
|
||||||
if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
|
if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
|
||||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry), fail);
|
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry), fail);
|
||||||
|
|
||||||
|
@@ -1931,7 +1931,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
|
|||||||
|
|
||||||
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
||||||
long i;
|
long i;
|
||||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||||
long a = *(long*)(src+i);
|
long a = *(long*)(src+i);
|
||||||
long b = *(long*)(dst+i);
|
long b = *(long*)(dst+i);
|
||||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||||
@@ -1956,7 +1956,7 @@ static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
|
|||||||
}
|
}
|
||||||
}else
|
}else
|
||||||
#endif
|
#endif
|
||||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||||
long a = *(long*)(src1+i);
|
long a = *(long*)(src1+i);
|
||||||
long b = *(long*)(src2+i);
|
long b = *(long*)(src2+i);
|
||||||
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
||||||
|
@@ -45,8 +45,11 @@ static int dvdsub_parse(AVCodecParserContext *s,
|
|||||||
DVDSubParseContext *pc = s->priv_data;
|
DVDSubParseContext *pc = s->priv_data;
|
||||||
|
|
||||||
if (pc->packet_index == 0) {
|
if (pc->packet_index == 0) {
|
||||||
if (buf_size < 2)
|
if (buf_size < 2 || AV_RB16(buf) && buf_size < 6) {
|
||||||
return 0;
|
if (buf_size)
|
||||||
|
av_log(avctx, AV_LOG_DEBUG, "Parser input %d too small\n", buf_size);
|
||||||
|
return buf_size;
|
||||||
|
}
|
||||||
pc->packet_len = AV_RB16(buf);
|
pc->packet_len = AV_RB16(buf);
|
||||||
if (pc->packet_len == 0) /* HD-DVD subpicture packet */
|
if (pc->packet_len == 0) /* HD-DVD subpicture packet */
|
||||||
pc->packet_len = AV_RB32(buf+2);
|
pc->packet_len = AV_RB32(buf+2);
|
||||||
|
@@ -98,6 +98,12 @@ static int decode_rle(uint8_t *bitmap, int linesize, int w, int h,
|
|||||||
int x, y, len, color;
|
int x, y, len, color;
|
||||||
uint8_t *d;
|
uint8_t *d;
|
||||||
|
|
||||||
|
if (start >= buf_size)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
if (w <= 0 || h <= 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
bit_len = (buf_size - start) * 8;
|
bit_len = (buf_size - start) * 8;
|
||||||
init_get_bits(&gb, buf + start, bit_len);
|
init_get_bits(&gb, buf + start, bit_len);
|
||||||
|
|
||||||
@@ -339,10 +345,12 @@ static int decode_dvd_subtitles(DVDSubContext *ctx, AVSubtitle *sub_header,
|
|||||||
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
||||||
sub_header->num_rects = 1;
|
sub_header->num_rects = 1;
|
||||||
sub_header->rects[0]->pict.data[0] = bitmap;
|
sub_header->rects[0]->pict.data[0] = bitmap;
|
||||||
decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
if (decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
||||||
buf, offset1, buf_size, is_8bit);
|
buf, offset1, buf_size, is_8bit) < 0)
|
||||||
decode_rle(bitmap + w, w * 2, w, h / 2,
|
goto fail;
|
||||||
buf, offset2, buf_size, is_8bit);
|
if (decode_rle(bitmap + w, w * 2, w, h / 2,
|
||||||
|
buf, offset2, buf_size, is_8bit) < 0)
|
||||||
|
goto fail;
|
||||||
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
||||||
if (is_8bit) {
|
if (is_8bit) {
|
||||||
if (yuv_palette == 0)
|
if (yuv_palette == 0)
|
||||||
|
@@ -306,6 +306,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||||
|
|
||||||
|
if (avctx->width%4 || avctx->height%4) {
|
||||||
|
avpriv_request_sample(avctx, "dimensions are not a multiple of 4");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
c->prev = av_frame_alloc();
|
c->prev = av_frame_alloc();
|
||||||
if (!c->prev)
|
if (!c->prev)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
@@ -762,6 +762,17 @@ void ff_er_frame_start(ERContext *s)
|
|||||||
s->error_occurred = 0;
|
s->error_occurred = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int er_supported(ERContext *s)
|
||||||
|
{
|
||||||
|
if(s->avctx->hwaccel ||
|
||||||
|
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
||||||
|
!s->cur_pic ||
|
||||||
|
s->cur_pic->field_picture
|
||||||
|
)
|
||||||
|
return 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a slice.
|
* Add a slice.
|
||||||
* @param endx x component of the last macroblock, can be -1
|
* @param endx x component of the last macroblock, can be -1
|
||||||
@@ -828,7 +839,7 @@ void ff_er_add_slice(ERContext *s, int startx, int starty,
|
|||||||
s->error_status_table[start_xy] |= VP_START;
|
s->error_status_table[start_xy] |= VP_START;
|
||||||
|
|
||||||
if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
|
if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||||
s->avctx->skip_top * s->mb_width < start_i) {
|
er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) {
|
||||||
int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
|
int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
|
||||||
|
|
||||||
prev_status &= ~ VP_START;
|
prev_status &= ~ VP_START;
|
||||||
@@ -853,9 +864,7 @@ void ff_er_frame_end(ERContext *s)
|
|||||||
* though it should not crash if enabled. */
|
* though it should not crash if enabled. */
|
||||||
if (!s->avctx->err_recognition || s->error_count == 0 ||
|
if (!s->avctx->err_recognition || s->error_count == 0 ||
|
||||||
s->avctx->lowres ||
|
s->avctx->lowres ||
|
||||||
s->avctx->hwaccel ||
|
!er_supported(s) ||
|
||||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
|
||||||
!s->cur_pic || s->cur_pic->field_picture ||
|
|
||||||
s->error_count == 3 * s->mb_width *
|
s->error_count == 3 * s->mb_width *
|
||||||
(s->avctx->skip_top + s->avctx->skip_bottom)) {
|
(s->avctx->skip_top + s->avctx->skip_bottom)) {
|
||||||
return;
|
return;
|
||||||
|
@@ -251,7 +251,7 @@ static void put_line(uint8_t *dst, int size, int width, const int *runs)
|
|||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
int run, mode = ~0, pix_left = width, run_idx = 0;
|
int run, mode = ~0, pix_left = width, run_idx = 0;
|
||||||
|
|
||||||
init_put_bits(&pb, dst, size * 8);
|
init_put_bits(&pb, dst, size);
|
||||||
while (pix_left > 0) {
|
while (pix_left > 0) {
|
||||||
run = runs[run_idx++];
|
run = runs[run_idx++];
|
||||||
mode = ~mode;
|
mode = ~mode;
|
||||||
|
@@ -102,7 +102,7 @@ av_cold int ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
|
|||||||
av_cold int ffv1_init_slices_state(FFV1Context *f)
|
av_cold int ffv1_init_slices_state(FFV1Context *f)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret;
|
||||||
for (i = 0; i < f->slice_count; i++) {
|
for (i = 0; i < f->max_slice_count; i++) {
|
||||||
FFV1Context *fs = f->slice_context[i];
|
FFV1Context *fs = f->slice_context[i];
|
||||||
if ((ret = ffv1_init_slice_state(f, fs)) < 0)
|
if ((ret = ffv1_init_slice_state(f, fs)) < 0)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@@ -114,10 +114,10 @@ av_cold int ffv1_init_slice_contexts(FFV1Context *f)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
f->slice_count = f->num_h_slices * f->num_v_slices;
|
f->max_slice_count = f->num_h_slices * f->num_v_slices;
|
||||||
av_assert0(f->slice_count > 0);
|
av_assert0(f->max_slice_count > 0);
|
||||||
|
|
||||||
for (i = 0; i < f->slice_count; i++) {
|
for (i = 0; i < f->max_slice_count; i++) {
|
||||||
FFV1Context *fs = av_mallocz(sizeof(*fs));
|
FFV1Context *fs = av_mallocz(sizeof(*fs));
|
||||||
int sx = i % f->num_h_slices;
|
int sx = i % f->num_h_slices;
|
||||||
int sy = i / f->num_h_slices;
|
int sy = i / f->num_h_slices;
|
||||||
@@ -202,7 +202,7 @@ av_cold int ffv1_close(AVCodecContext *avctx)
|
|||||||
ff_thread_release_buffer(avctx, &s->last_picture);
|
ff_thread_release_buffer(avctx, &s->last_picture);
|
||||||
av_frame_free(&s->last_picture.f);
|
av_frame_free(&s->last_picture.f);
|
||||||
|
|
||||||
for (j = 0; j < s->slice_count; j++) {
|
for (j = 0; j < s->max_slice_count; j++) {
|
||||||
FFV1Context *fs = s->slice_context[j];
|
FFV1Context *fs = s->slice_context[j];
|
||||||
for (i = 0; i < s->plane_count; i++) {
|
for (i = 0; i < s->plane_count; i++) {
|
||||||
PlaneContext *p = &fs->plane[i];
|
PlaneContext *p = &fs->plane[i];
|
||||||
@@ -216,14 +216,14 @@ av_cold int ffv1_close(AVCodecContext *avctx)
|
|||||||
av_freep(&avctx->stats_out);
|
av_freep(&avctx->stats_out);
|
||||||
for (j = 0; j < s->quant_table_count; j++) {
|
for (j = 0; j < s->quant_table_count; j++) {
|
||||||
av_freep(&s->initial_states[j]);
|
av_freep(&s->initial_states[j]);
|
||||||
for (i = 0; i < s->slice_count; i++) {
|
for (i = 0; i < s->max_slice_count; i++) {
|
||||||
FFV1Context *sf = s->slice_context[i];
|
FFV1Context *sf = s->slice_context[i];
|
||||||
av_freep(&sf->rc_stat2[j]);
|
av_freep(&sf->rc_stat2[j]);
|
||||||
}
|
}
|
||||||
av_freep(&s->rc_stat2[j]);
|
av_freep(&s->rc_stat2[j]);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < s->slice_count; i++)
|
for (i = 0; i < s->max_slice_count; i++)
|
||||||
av_freep(&s->slice_context[i]);
|
av_freep(&s->slice_context[i]);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -122,6 +122,7 @@ typedef struct FFV1Context {
|
|||||||
|
|
||||||
struct FFV1Context *slice_context[MAX_SLICES];
|
struct FFV1Context *slice_context[MAX_SLICES];
|
||||||
int slice_count;
|
int slice_count;
|
||||||
|
int max_slice_count;
|
||||||
int num_v_slices;
|
int num_v_slices;
|
||||||
int num_h_slices;
|
int num_h_slices;
|
||||||
int slice_width;
|
int slice_width;
|
||||||
|
@@ -483,6 +483,10 @@ static int read_extra_header(FFV1Context *f)
|
|||||||
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
|
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
|
||||||
|
|
||||||
f->version = get_symbol(c, state, 0);
|
f->version = get_symbol(c, state, 0);
|
||||||
|
if (f->version < 2) {
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
if (f->version > 2) {
|
if (f->version > 2) {
|
||||||
c->bytestream_end -= 4;
|
c->bytestream_end -= 4;
|
||||||
f->minor_version = get_symbol(c, state, 0);
|
f->minor_version = get_symbol(c, state, 0);
|
||||||
@@ -503,6 +507,12 @@ static int read_extra_header(FFV1Context *f)
|
|||||||
f->num_h_slices = 1 + get_symbol(c, state, 0);
|
f->num_h_slices = 1 + get_symbol(c, state, 0);
|
||||||
f->num_v_slices = 1 + get_symbol(c, state, 0);
|
f->num_v_slices = 1 + get_symbol(c, state, 0);
|
||||||
|
|
||||||
|
if (f->chroma_h_shift > 4U || f->chroma_v_shift > 4U) {
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
|
||||||
|
f->chroma_h_shift, f->chroma_v_shift);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
if (f->num_h_slices > (unsigned)f->width || !f->num_h_slices ||
|
if (f->num_h_slices > (unsigned)f->width || !f->num_h_slices ||
|
||||||
f->num_v_slices > (unsigned)f->height || !f->num_v_slices
|
f->num_v_slices > (unsigned)f->height || !f->num_v_slices
|
||||||
) {
|
) {
|
||||||
@@ -511,8 +521,11 @@ static int read_extra_header(FFV1Context *f)
|
|||||||
}
|
}
|
||||||
|
|
||||||
f->quant_table_count = get_symbol(c, state, 0);
|
f->quant_table_count = get_symbol(c, state, 0);
|
||||||
if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
|
if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES || !f->quant_table_count) {
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "quant table count %d is invalid\n", f->quant_table_count);
|
||||||
|
f->quant_table_count = 0;
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < f->quant_table_count; i++) {
|
for (i = 0; i < f->quant_table_count; i++) {
|
||||||
f->context_count[i] = read_quant_tables(c, f->quant_tables[i]);
|
f->context_count[i] = read_quant_tables(c, f->quant_tables[i]);
|
||||||
@@ -562,6 +575,7 @@ static int read_header(FFV1Context *f)
|
|||||||
memset(state, 128, sizeof(state));
|
memset(state, 128, sizeof(state));
|
||||||
|
|
||||||
if (f->version < 2) {
|
if (f->version < 2) {
|
||||||
|
int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
|
||||||
unsigned v= get_symbol(c, state, 0);
|
unsigned v= get_symbol(c, state, 0);
|
||||||
if (v >= 2) {
|
if (v >= 2) {
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
|
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
|
||||||
@@ -574,15 +588,38 @@ static int read_header(FFV1Context *f)
|
|||||||
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
f->colorspace = get_symbol(c, state, 0); //YUV cs type
|
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||||
|
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
|
||||||
|
chroma_planes = get_rac(c, state);
|
||||||
|
chroma_h_shift = get_symbol(c, state, 0);
|
||||||
|
chroma_v_shift = get_symbol(c, state, 0);
|
||||||
|
transparency = get_rac(c, state);
|
||||||
|
|
||||||
if (f->version > 0)
|
if (f->plane_count) {
|
||||||
f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
|
if ( colorspace != f->colorspace
|
||||||
|
|| bits_per_raw_sample != f->avctx->bits_per_raw_sample
|
||||||
|
|| chroma_planes != f->chroma_planes
|
||||||
|
|| chroma_h_shift!= f->chroma_h_shift
|
||||||
|
|| chroma_v_shift!= f->chroma_v_shift
|
||||||
|
|| transparency != f->transparency) {
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (chroma_h_shift > 4U || chroma_v_shift > 4U) {
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
|
||||||
|
chroma_h_shift, chroma_v_shift);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
|
f->colorspace = colorspace;
|
||||||
|
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
||||||
|
f->chroma_planes = chroma_planes;
|
||||||
|
f->chroma_h_shift = chroma_h_shift;
|
||||||
|
f->chroma_v_shift = chroma_v_shift;
|
||||||
|
f->transparency = transparency;
|
||||||
|
|
||||||
f->chroma_planes = get_rac(c, state);
|
|
||||||
f->chroma_h_shift = get_symbol(c, state, 0);
|
|
||||||
f->chroma_v_shift = get_symbol(c, state, 0);
|
|
||||||
f->transparency = get_rac(c, state);
|
|
||||||
f->plane_count = 2 + f->transparency;
|
f->plane_count = 2 + f->transparency;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -600,47 +637,32 @@ static int read_header(FFV1Context *f)
|
|||||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
|
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
|
||||||
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
|
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
|
||||||
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
|
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
|
||||||
default:
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
}
|
}
|
||||||
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
|
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
|
||||||
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
|
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
|
||||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
|
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
|
||||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
|
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
|
||||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
|
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
|
||||||
default:
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
}
|
}
|
||||||
} else if (f->avctx->bits_per_raw_sample == 9) {
|
} else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
|
||||||
f->packed_at_lsb = 1;
|
f->packed_at_lsb = 1;
|
||||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
|
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
|
||||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
|
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
|
||||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
|
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
|
||||||
default:
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
}
|
}
|
||||||
} else if (f->avctx->bits_per_raw_sample == 10) {
|
} else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
|
||||||
f->packed_at_lsb = 1;
|
f->packed_at_lsb = 1;
|
||||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
|
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
|
||||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
|
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
|
||||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
|
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
|
||||||
default:
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
|
||||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
|
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
|
||||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
|
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
|
||||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
|
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
|
||||||
default:
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (f->colorspace == 1) {
|
} else if (f->colorspace == 1) {
|
||||||
@@ -664,6 +686,10 @@ static int read_header(FFV1Context *f)
|
|||||||
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
|
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
|
||||||
return AVERROR(ENOSYS);
|
return AVERROR(ENOSYS);
|
||||||
}
|
}
|
||||||
|
if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) {
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||||
|
return AVERROR(ENOSYS);
|
||||||
|
}
|
||||||
|
|
||||||
av_dlog(f->avctx, "%d %d %d\n",
|
av_dlog(f->avctx, "%d %d %d\n",
|
||||||
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
|
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
|
||||||
@@ -673,6 +699,7 @@ static int read_header(FFV1Context *f)
|
|||||||
av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
|
av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
f->slice_count = f->max_slice_count;
|
||||||
} else if (f->version < 3) {
|
} else if (f->version < 3) {
|
||||||
f->slice_count = get_symbol(c, state, 0);
|
f->slice_count = get_symbol(c, state, 0);
|
||||||
} else {
|
} else {
|
||||||
@@ -687,8 +714,8 @@ static int read_header(FFV1Context *f)
|
|||||||
p -= size + trailer;
|
p -= size + trailer;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0) {
|
if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0 || f->slice_count > f->max_slice_count) {
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
|
av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid (max=%d)\n", f->slice_count, f->max_slice_count);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -899,16 +926,57 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
static int init_thread_copy(AVCodecContext *avctx)
|
static int init_thread_copy(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
FFV1Context *f = avctx->priv_data;
|
FFV1Context *f = avctx->priv_data;
|
||||||
|
int i, ret;
|
||||||
|
|
||||||
f->picture.f = NULL;
|
f->picture.f = NULL;
|
||||||
f->last_picture.f = NULL;
|
f->last_picture.f = NULL;
|
||||||
f->sample_buffer = NULL;
|
f->sample_buffer = NULL;
|
||||||
f->quant_table_count = 0;
|
f->max_slice_count = 0;
|
||||||
f->slice_count = 0;
|
f->slice_count = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < f->quant_table_count; i++) {
|
||||||
|
av_assert0(f->version > 1);
|
||||||
|
f->initial_states[i] = av_memdup(f->initial_states[i],
|
||||||
|
f->context_count[i] * sizeof(*f->initial_states[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
f->picture.f = av_frame_alloc();
|
||||||
|
f->last_picture.f = av_frame_alloc();
|
||||||
|
|
||||||
|
if ((ret = ffv1_init_slice_contexts(f)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
|
||||||
|
{
|
||||||
|
fsdst->version = fsrc->version;
|
||||||
|
fsdst->minor_version = fsrc->minor_version;
|
||||||
|
fsdst->chroma_planes = fsrc->chroma_planes;
|
||||||
|
fsdst->chroma_h_shift = fsrc->chroma_h_shift;
|
||||||
|
fsdst->chroma_v_shift = fsrc->chroma_v_shift;
|
||||||
|
fsdst->transparency = fsrc->transparency;
|
||||||
|
fsdst->plane_count = fsrc->plane_count;
|
||||||
|
fsdst->ac = fsrc->ac;
|
||||||
|
fsdst->colorspace = fsrc->colorspace;
|
||||||
|
|
||||||
|
fsdst->ec = fsrc->ec;
|
||||||
|
fsdst->intra = fsrc->intra;
|
||||||
|
fsdst->slice_damaged = fssrc->slice_damaged;
|
||||||
|
fsdst->key_frame_ok = fsrc->key_frame_ok;
|
||||||
|
|
||||||
|
fsdst->bits_per_raw_sample = fsrc->bits_per_raw_sample;
|
||||||
|
fsdst->packed_at_lsb = fsrc->packed_at_lsb;
|
||||||
|
fsdst->slice_count = fsrc->slice_count;
|
||||||
|
if (fsrc->version<3){
|
||||||
|
fsdst->slice_x = fssrc->slice_x;
|
||||||
|
fsdst->slice_y = fssrc->slice_y;
|
||||||
|
fsdst->slice_width = fssrc->slice_width;
|
||||||
|
fsdst->slice_height = fssrc->slice_height;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
||||||
{
|
{
|
||||||
FFV1Context *fsrc = src->priv_data;
|
FFV1Context *fsrc = src->priv_data;
|
||||||
@@ -918,36 +986,30 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
|||||||
if (dst == src)
|
if (dst == src)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!fdst->picture.f) {
|
{
|
||||||
|
FFV1Context bak = *fdst;
|
||||||
memcpy(fdst, fsrc, sizeof(*fdst));
|
memcpy(fdst, fsrc, sizeof(*fdst));
|
||||||
|
memcpy(fdst->initial_states, bak.initial_states, sizeof(fdst->initial_states));
|
||||||
for (i = 0; i < fdst->quant_table_count; i++) {
|
memcpy(fdst->slice_context, bak.slice_context , sizeof(fdst->slice_context));
|
||||||
fdst->initial_states[i] = av_malloc(fdst->context_count[i] * sizeof(*fdst->initial_states[i]));
|
fdst->picture = bak.picture;
|
||||||
memcpy(fdst->initial_states[i], fsrc->initial_states[i], fdst->context_count[i] * sizeof(*fdst->initial_states[i]));
|
fdst->last_picture = bak.last_picture;
|
||||||
|
for (i = 0; i<fdst->num_h_slices * fdst->num_v_slices; i++) {
|
||||||
|
FFV1Context *fssrc = fsrc->slice_context[i];
|
||||||
|
FFV1Context *fsdst = fdst->slice_context[i];
|
||||||
|
copy_fields(fsdst, fssrc, fsrc);
|
||||||
|
}
|
||||||
|
av_assert0(!fdst->plane[0].state);
|
||||||
|
av_assert0(!fdst->sample_buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
fdst->picture.f = av_frame_alloc();
|
av_assert1(fdst->max_slice_count == fsrc->max_slice_count);
|
||||||
fdst->last_picture.f = av_frame_alloc();
|
|
||||||
|
|
||||||
if ((ret = ffv1_init_slice_contexts(fdst)) < 0)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
av_assert1(fdst->slice_count == fsrc->slice_count);
|
|
||||||
|
|
||||||
fdst->key_frame_ok = fsrc->key_frame_ok;
|
|
||||||
|
|
||||||
ff_thread_release_buffer(dst, &fdst->picture);
|
ff_thread_release_buffer(dst, &fdst->picture);
|
||||||
if (fsrc->picture.f->data[0]) {
|
if (fsrc->picture.f->data[0]) {
|
||||||
if ((ret = ff_thread_ref_frame(&fdst->picture, &fsrc->picture)) < 0)
|
if ((ret = ff_thread_ref_frame(&fdst->picture, &fsrc->picture)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
for (i = 0; i < fdst->slice_count; i++) {
|
|
||||||
FFV1Context *fsdst = fdst->slice_context[i];
|
|
||||||
FFV1Context *fssrc = fsrc->slice_context[i];
|
|
||||||
|
|
||||||
fsdst->slice_damaged = fssrc->slice_damaged;
|
|
||||||
}
|
|
||||||
|
|
||||||
fdst->fsrc = fsrc;
|
fdst->fsrc = fsrc;
|
||||||
|
|
||||||
|
@@ -275,7 +275,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w,
|
|||||||
int run_mode = 0;
|
int run_mode = 0;
|
||||||
|
|
||||||
if (s->ac) {
|
if (s->ac) {
|
||||||
if (c->bytestream_end - c->bytestream < w * 20) {
|
if (c->bytestream_end - c->bytestream < w * 35) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@@ -902,6 +902,7 @@ slices_ok:
|
|||||||
|
|
||||||
if ((ret = ffv1_init_slice_contexts(s)) < 0)
|
if ((ret = ffv1_init_slice_contexts(s)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
s->slice_count = s->max_slice_count;
|
||||||
if ((ret = ffv1_init_slices_state(s)) < 0)
|
if ((ret = ffv1_init_slices_state(s)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@@ -911,7 +912,7 @@ slices_ok:
|
|||||||
if (!avctx->stats_out)
|
if (!avctx->stats_out)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
for (i = 0; i < s->quant_table_count; i++)
|
for (i = 0; i < s->quant_table_count; i++)
|
||||||
for (j = 0; j < s->slice_count; j++) {
|
for (j = 0; j < s->max_slice_count; j++) {
|
||||||
FFV1Context *sf = s->slice_context[j];
|
FFV1Context *sf = s->slice_context[j];
|
||||||
av_assert0(!sf->rc_stat2[i]);
|
av_assert0(!sf->rc_stat2[i]);
|
||||||
sf->rc_stat2[i] = av_mallocz(s->context_count[i] *
|
sf->rc_stat2[i] = av_mallocz(s->context_count[i] *
|
||||||
|
@@ -682,7 +682,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
|
|||||||
handle_error:
|
handle_error:
|
||||||
*poutbuf = NULL;
|
*poutbuf = NULL;
|
||||||
*poutbuf_size = 0;
|
*poutbuf_size = 0;
|
||||||
return read_end - buf;
|
return buf_size ? read_end - buf : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int flac_parse_init(AVCodecParserContext *c)
|
static av_cold int flac_parse_init(AVCodecParserContext *c)
|
||||||
|
@@ -465,10 +465,10 @@ static int decode_frame(FLACContext *s)
|
|||||||
ret = allocate_buffers(s);
|
ret = allocate_buffers(s);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
|
||||||
s->got_streaminfo = 1;
|
s->got_streaminfo = 1;
|
||||||
dump_headers(s->avctx, (FLACStreaminfo *)s);
|
dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||||
}
|
}
|
||||||
|
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||||
|
|
||||||
// dump_headers(s->avctx, (FLACStreaminfo *)s);
|
// dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||||
|
|
||||||
|
@@ -387,6 +387,10 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
s->diff_start = get_bits(&gb, 8);
|
s->diff_start = get_bits(&gb, 8);
|
||||||
s->diff_height = get_bits(&gb, 8);
|
s->diff_height = get_bits(&gb, 8);
|
||||||
|
if (s->diff_start + s->diff_height > cur_blk_height) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Block parameters invalid\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
av_log(avctx, AV_LOG_DEBUG,
|
av_log(avctx, AV_LOG_DEBUG,
|
||||||
"%dx%d diff start %d height %d\n",
|
"%dx%d diff start %d height %d\n",
|
||||||
i, j, s->diff_start, s->diff_height);
|
i, j, s->diff_start, s->diff_height);
|
||||||
|
@@ -288,7 +288,7 @@ static int write_header(FlashSV2Context * s, uint8_t * buf, int buf_size)
|
|||||||
if (buf_size < 5)
|
if (buf_size < 5)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
init_put_bits(&pb, buf, buf_size * 8);
|
init_put_bits(&pb, buf, buf_size);
|
||||||
|
|
||||||
put_bits(&pb, 4, (s->block_width >> 4) - 1);
|
put_bits(&pb, 4, (s->block_width >> 4) - 1);
|
||||||
put_bits(&pb, 12, s->image_width);
|
put_bits(&pb, 12, s->image_width);
|
||||||
|
@@ -131,7 +131,7 @@ static int encode_bitstream(FlashSVContext *s, AVFrame *p, uint8_t *buf,
|
|||||||
int buf_pos, res;
|
int buf_pos, res;
|
||||||
int pred_blocks = 0;
|
int pred_blocks = 0;
|
||||||
|
|
||||||
init_put_bits(&pb, buf, buf_size * 8);
|
init_put_bits(&pb, buf, buf_size);
|
||||||
|
|
||||||
put_bits(&pb, 4, block_width / 16 - 1);
|
put_bits(&pb, 4, block_width / 16 - 1);
|
||||||
put_bits(&pb, 12, s->image_width);
|
put_bits(&pb, 12, s->image_width);
|
||||||
|
@@ -389,7 +389,7 @@ static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
|
|||||||
return 0;
|
return 0;
|
||||||
zsize = (src[0] << 8) | src[1]; src += 2;
|
zsize = (src[0] << 8) | src[1]; src += 2;
|
||||||
|
|
||||||
if (src_end - src < zsize)
|
if (src_end - src < zsize + (sub_type != 2))
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
ret = uncompress(c->kempf_buf, &dlen, src, zsize);
|
ret = uncompress(c->kempf_buf, &dlen, src, zsize);
|
||||||
@@ -411,6 +411,8 @@ static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
|
|||||||
for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
|
for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
|
||||||
for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
|
for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
|
||||||
if (!bits) {
|
if (!bits) {
|
||||||
|
if (src >= src_end)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
bitbuf = *src++;
|
bitbuf = *src++;
|
||||||
bits = 8;
|
bits = 8;
|
||||||
}
|
}
|
||||||
@@ -441,8 +443,8 @@ static int g2m_init_buffers(G2MContext *c)
|
|||||||
int aligned_height;
|
int aligned_height;
|
||||||
|
|
||||||
if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
|
if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
|
||||||
c->framebuf_stride = FFALIGN(c->width * 3, 16);
|
c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3;
|
||||||
aligned_height = FFALIGN(c->height, 16);
|
aligned_height = c->height + 15;
|
||||||
av_free(c->framebuf);
|
av_free(c->framebuf);
|
||||||
c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
|
c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
|
||||||
if (!c->framebuf)
|
if (!c->framebuf)
|
||||||
@@ -451,7 +453,7 @@ static int g2m_init_buffers(G2MContext *c)
|
|||||||
if (!c->synth_tile || !c->jpeg_tile ||
|
if (!c->synth_tile || !c->jpeg_tile ||
|
||||||
c->old_tile_w < c->tile_width ||
|
c->old_tile_w < c->tile_width ||
|
||||||
c->old_tile_h < c->tile_height) {
|
c->old_tile_h < c->tile_height) {
|
||||||
c->tile_stride = FFALIGN(c->tile_width * 3, 16);
|
c->tile_stride = FFALIGN(c->tile_width, 16) * 3;
|
||||||
aligned_height = FFALIGN(c->tile_height, 16);
|
aligned_height = FFALIGN(c->tile_height, 16);
|
||||||
av_free(c->synth_tile);
|
av_free(c->synth_tile);
|
||||||
av_free(c->jpeg_tile);
|
av_free(c->jpeg_tile);
|
||||||
@@ -714,7 +716,10 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
c->tile_width = bytestream2_get_be32(&bc);
|
c->tile_width = bytestream2_get_be32(&bc);
|
||||||
c->tile_height = bytestream2_get_be32(&bc);
|
c->tile_height = bytestream2_get_be32(&bc);
|
||||||
if (!c->tile_width || !c->tile_height) {
|
if (c->tile_width <= 0 || c->tile_height <= 0 ||
|
||||||
|
((c->tile_width | c->tile_height) & 0xF) ||
|
||||||
|
c->tile_width * 4LL * c->tile_height >= INT_MAX
|
||||||
|
) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Invalid tile dimensions %dx%d\n",
|
"Invalid tile dimensions %dx%d\n",
|
||||||
c->tile_width, c->tile_height);
|
c->tile_width, c->tile_height);
|
||||||
|
@@ -2286,7 +2286,8 @@ static int pack_bitstream(G723_1_Context *p, unsigned char *frame, int size)
|
|||||||
if (p->cur_rate == RATE_6300) {
|
if (p->cur_rate == RATE_6300) {
|
||||||
info_bits = 0;
|
info_bits = 0;
|
||||||
put_bits(&pb, 2, info_bits);
|
put_bits(&pb, 2, info_bits);
|
||||||
}
|
}else
|
||||||
|
av_assert0(0);
|
||||||
|
|
||||||
put_bits(&pb, 8, p->lsp_index[2]);
|
put_bits(&pb, 8, p->lsp_index[2]);
|
||||||
put_bits(&pb, 8, p->lsp_index[1]);
|
put_bits(&pb, 8, p->lsp_index[1]);
|
||||||
|
@@ -251,26 +251,21 @@ static int gif_read_image(GifState *s, AVFrame *frame)
|
|||||||
case 1:
|
case 1:
|
||||||
y1 += 8;
|
y1 += 8;
|
||||||
ptr += linesize * 8;
|
ptr += linesize * 8;
|
||||||
if (y1 >= height) {
|
|
||||||
y1 = pass ? 2 : 4;
|
|
||||||
ptr = ptr1 + linesize * y1;
|
|
||||||
pass++;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
y1 += 4;
|
y1 += 4;
|
||||||
ptr += linesize * 4;
|
ptr += linesize * 4;
|
||||||
if (y1 >= height) {
|
|
||||||
y1 = 1;
|
|
||||||
ptr = ptr1 + linesize;
|
|
||||||
pass++;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
y1 += 2;
|
y1 += 2;
|
||||||
ptr += linesize * 2;
|
ptr += linesize * 2;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
while (y1 >= height) {
|
||||||
|
y1 = 4 >> pass;
|
||||||
|
ptr = ptr1 + linesize * y1;
|
||||||
|
pass++;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ptr += linesize;
|
ptr += linesize;
|
||||||
}
|
}
|
||||||
|
@@ -58,7 +58,7 @@ int main(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EXTEND(i) (i << 3 | i & 7)
|
#define EXTEND(i) ((i) << 3 | (i) & 7)
|
||||||
init_put_bits(&pb, temp, SIZE);
|
init_put_bits(&pb, temp, SIZE);
|
||||||
for (i = 0; i < COUNT; i++)
|
for (i = 0; i < COUNT; i++)
|
||||||
set_ue_golomb(&pb, EXTEND(i));
|
set_ue_golomb(&pb, EXTEND(i));
|
||||||
|
@@ -718,10 +718,10 @@ frame_end:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(startcode_found){
|
if(startcode_found){
|
||||||
av_fast_malloc(
|
av_fast_padded_mallocz(
|
||||||
&s->bitstream_buffer,
|
&s->bitstream_buffer,
|
||||||
&s->allocated_bitstream_buffer_size,
|
&s->allocated_bitstream_buffer_size,
|
||||||
buf_size - current_pos + FF_INPUT_BUFFER_PADDING_SIZE);
|
buf_size - current_pos);
|
||||||
if (!s->bitstream_buffer)
|
if (!s->bitstream_buffer)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos);
|
memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos);
|
||||||
|
@@ -500,18 +500,18 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
|
|||||||
|
|
||||||
if ((h->left_samples_available & 0x8080) != 0x8080) {
|
if ((h->left_samples_available & 0x8080) != 0x8080) {
|
||||||
mode = left[mode];
|
mode = left[mode];
|
||||||
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
|
||||||
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
|
||||||
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
|
||||||
(!(h->left_samples_available & 0x8000)) +
|
|
||||||
2 * (mode == DC_128_PRED8x8);
|
|
||||||
}
|
|
||||||
if (mode < 0) {
|
if (mode < 0) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR,
|
av_log(h->avctx, AV_LOG_ERROR,
|
||||||
"left block unavailable for requested intra mode at %d %d\n",
|
"left block unavailable for requested intra mode at %d %d\n",
|
||||||
h->mb_x, h->mb_y);
|
h->mb_x, h->mb_y);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
||||||
|
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||||
|
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
||||||
|
(!(h->left_samples_available & 0x8000)) +
|
||||||
|
2 * (mode == DC_128_PRED8x8);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return mode;
|
return mode;
|
||||||
@@ -1131,6 +1131,7 @@ static void free_tables(H264Context *h, int free_rbsp)
|
|||||||
av_buffer_pool_uninit(&h->ref_index_pool);
|
av_buffer_pool_uninit(&h->ref_index_pool);
|
||||||
|
|
||||||
if (free_rbsp && h->DPB) {
|
if (free_rbsp && h->DPB) {
|
||||||
|
memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
|
||||||
for (i = 0; i < MAX_PICTURE_COUNT; i++)
|
for (i = 0; i < MAX_PICTURE_COUNT; i++)
|
||||||
unref_picture(h, &h->DPB[i]);
|
unref_picture(h, &h->DPB[i]);
|
||||||
av_freep(&h->DPB);
|
av_freep(&h->DPB);
|
||||||
@@ -1587,6 +1588,7 @@ static int decode_init_thread_copy(AVCodecContext *avctx)
|
|||||||
memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
|
memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
|
||||||
memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
|
memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
|
||||||
|
|
||||||
|
h->avctx = avctx;
|
||||||
h->context_initialized = 0;
|
h->context_initialized = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1682,6 +1684,7 @@ static int decode_update_thread_context(AVCodecContext *dst,
|
|||||||
memset(&h->mb, 0, sizeof(h->mb));
|
memset(&h->mb, 0, sizeof(h->mb));
|
||||||
memset(&h->mb_luma_dc, 0, sizeof(h->mb_luma_dc));
|
memset(&h->mb_luma_dc, 0, sizeof(h->mb_luma_dc));
|
||||||
memset(&h->mb_padding, 0, sizeof(h->mb_padding));
|
memset(&h->mb_padding, 0, sizeof(h->mb_padding));
|
||||||
|
memset(&h->cur_pic, 0, sizeof(h->cur_pic));
|
||||||
|
|
||||||
h->avctx = dst;
|
h->avctx = dst;
|
||||||
h->DPB = NULL;
|
h->DPB = NULL;
|
||||||
@@ -1689,6 +1692,17 @@ static int decode_update_thread_context(AVCodecContext *dst,
|
|||||||
h->mb_type_pool = NULL;
|
h->mb_type_pool = NULL;
|
||||||
h->ref_index_pool = NULL;
|
h->ref_index_pool = NULL;
|
||||||
h->motion_val_pool = NULL;
|
h->motion_val_pool = NULL;
|
||||||
|
h->intra4x4_pred_mode= NULL;
|
||||||
|
h->non_zero_count = NULL;
|
||||||
|
h->slice_table_base = NULL;
|
||||||
|
h->slice_table = NULL;
|
||||||
|
h->cbp_table = NULL;
|
||||||
|
h->chroma_pred_mode_table = NULL;
|
||||||
|
memset(h->mvd_table, 0, sizeof(h->mvd_table));
|
||||||
|
h->direct_table = NULL;
|
||||||
|
h->list_counts = NULL;
|
||||||
|
h->mb2b_xy = NULL;
|
||||||
|
h->mb2br_xy = NULL;
|
||||||
|
|
||||||
if (h1->context_initialized) {
|
if (h1->context_initialized) {
|
||||||
h->context_initialized = 0;
|
h->context_initialized = 0;
|
||||||
@@ -1847,6 +1861,10 @@ static int h264_frame_start(H264Context *h)
|
|||||||
|
|
||||||
h->cur_pic_ptr = pic;
|
h->cur_pic_ptr = pic;
|
||||||
unref_picture(h, &h->cur_pic);
|
unref_picture(h, &h->cur_pic);
|
||||||
|
if (CONFIG_ERROR_RESILIENCE) {
|
||||||
|
h->er.cur_pic = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
|
if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@@ -2500,6 +2518,16 @@ static int pred_weight_table(H264Context *h)
|
|||||||
h->luma_log2_weight_denom = get_ue_golomb(&h->gb);
|
h->luma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||||
if (h->sps.chroma_format_idc)
|
if (h->sps.chroma_format_idc)
|
||||||
h->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
|
h->chroma_log2_weight_denom = get_ue_golomb(&h->gb);
|
||||||
|
|
||||||
|
if (h->luma_log2_weight_denom > 7U) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", h->luma_log2_weight_denom);
|
||||||
|
h->luma_log2_weight_denom = 0;
|
||||||
|
}
|
||||||
|
if (h->chroma_log2_weight_denom > 7U) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", h->chroma_log2_weight_denom);
|
||||||
|
h->chroma_log2_weight_denom = 0;
|
||||||
|
}
|
||||||
|
|
||||||
luma_def = 1 << h->luma_log2_weight_denom;
|
luma_def = 1 << h->luma_log2_weight_denom;
|
||||||
chroma_def = 1 << h->chroma_log2_weight_denom;
|
chroma_def = 1 << h->chroma_log2_weight_denom;
|
||||||
|
|
||||||
@@ -2660,6 +2688,7 @@ static void flush_change(H264Context *h)
|
|||||||
h->sync= 0;
|
h->sync= 0;
|
||||||
h->list_count = 0;
|
h->list_count = 0;
|
||||||
h->current_slice = 0;
|
h->current_slice = 0;
|
||||||
|
h->mmco_reset = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* forget old pics after a seek */
|
/* forget old pics after a seek */
|
||||||
@@ -3099,6 +3128,7 @@ static int h264_slice_header_init(H264Context *h, int reinit)
|
|||||||
h->avctx->active_thread_type & FF_THREAD_SLICE) ?
|
h->avctx->active_thread_type & FF_THREAD_SLICE) ?
|
||||||
h->avctx->thread_count : 1;
|
h->avctx->thread_count : 1;
|
||||||
int i;
|
int i;
|
||||||
|
int ret = AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
h->avctx->sample_aspect_ratio = h->sps.sar;
|
h->avctx->sample_aspect_ratio = h->sps.sar;
|
||||||
av_assert0(h->avctx->sample_aspect_ratio.den);
|
av_assert0(h->avctx->sample_aspect_ratio.den);
|
||||||
@@ -3124,7 +3154,7 @@ static int h264_slice_header_init(H264Context *h, int reinit)
|
|||||||
if (ff_h264_alloc_tables(h) < 0) {
|
if (ff_h264_alloc_tables(h) < 0) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR,
|
av_log(h->avctx, AV_LOG_ERROR,
|
||||||
"Could not allocate memory for h264\n");
|
"Could not allocate memory for h264\n");
|
||||||
return AVERROR(ENOMEM);
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nb_slices > MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) {
|
if (nb_slices > MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) {
|
||||||
@@ -3142,12 +3172,16 @@ static int h264_slice_header_init(H264Context *h, int reinit)
|
|||||||
if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
|
if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
|
||||||
if (context_init(h) < 0) {
|
if (context_init(h) < 0) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
|
av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
|
||||||
return -1;
|
goto fail;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (i = 1; i < h->slice_context_count; i++) {
|
for (i = 1; i < h->slice_context_count; i++) {
|
||||||
H264Context *c;
|
H264Context *c;
|
||||||
c = h->thread_context[i] = av_mallocz(sizeof(H264Context));
|
c = h->thread_context[i] = av_mallocz(sizeof(H264Context));
|
||||||
|
if (!c) {
|
||||||
|
ret = AVERROR(ENOMEM);
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
c->avctx = h->avctx;
|
c->avctx = h->avctx;
|
||||||
if (CONFIG_ERROR_RESILIENCE) {
|
if (CONFIG_ERROR_RESILIENCE) {
|
||||||
c->dsp = h->dsp;
|
c->dsp = h->dsp;
|
||||||
@@ -3186,13 +3220,17 @@ static int h264_slice_header_init(H264Context *h, int reinit)
|
|||||||
for (i = 0; i < h->slice_context_count; i++)
|
for (i = 0; i < h->slice_context_count; i++)
|
||||||
if (context_init(h->thread_context[i]) < 0) {
|
if (context_init(h->thread_context[i]) < 0) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
|
av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
|
||||||
return -1;
|
goto fail;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
h->context_initialized = 1;
|
h->context_initialized = 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
fail:
|
||||||
|
free_tables(h, 0);
|
||||||
|
h->context_initialized = 0;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -3214,6 +3252,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
int last_pic_structure, last_pic_droppable;
|
int last_pic_structure, last_pic_droppable;
|
||||||
int must_reinit;
|
int must_reinit;
|
||||||
int needs_reinit = 0;
|
int needs_reinit = 0;
|
||||||
|
int first_slice = h == h0 && !h0->current_slice;
|
||||||
|
|
||||||
h->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
|
h->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
|
||||||
h->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
|
h->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
|
||||||
@@ -3308,13 +3347,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
||||||
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||||
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)
|
|
||||||
|| h->mb_width != h->sps.mb_width
|
|| h->mb_width != h->sps.mb_width
|
||||||
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
||||||
));
|
));
|
||||||
if (h0->avctx->pix_fmt != get_pixel_format(h0, 0))
|
if (h0->avctx->pix_fmt != get_pixel_format(h0, 0))
|
||||||
must_reinit = 1;
|
must_reinit = 1;
|
||||||
|
|
||||||
|
if (first_slice && av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio))
|
||||||
|
must_reinit = 1;
|
||||||
|
|
||||||
h->mb_width = h->sps.mb_width;
|
h->mb_width = h->sps.mb_width;
|
||||||
h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
||||||
h->mb_num = h->mb_width * h->mb_height;
|
h->mb_num = h->mb_width * h->mb_height;
|
||||||
@@ -3348,7 +3389,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
h->height != h->avctx->coded_height ||
|
h->height != h->avctx->coded_height ||
|
||||||
must_reinit ||
|
must_reinit ||
|
||||||
needs_reinit)) {
|
needs_reinit)) {
|
||||||
|
h->context_initialized = 0;
|
||||||
if (h != h0) {
|
if (h != h0) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "changing width/height on "
|
av_log(h->avctx, AV_LOG_ERROR, "changing width/height on "
|
||||||
"slice %d\n", h0->current_slice + 1);
|
"slice %d\n", h0->current_slice + 1);
|
||||||
@@ -3434,7 +3475,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
} else {
|
} else {
|
||||||
/* Shorten frame num gaps so we don't have to allocate reference
|
/* Shorten frame num gaps so we don't have to allocate reference
|
||||||
* frames just to throw them away */
|
* frames just to throw them away */
|
||||||
if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
|
if (h->frame_num != h->prev_frame_num) {
|
||||||
int unwrap_prev_frame_num = h->prev_frame_num;
|
int unwrap_prev_frame_num = h->prev_frame_num;
|
||||||
int max_frame_num = 1 << h->sps.log2_max_frame_num;
|
int max_frame_num = 1 << h->sps.log2_max_frame_num;
|
||||||
|
|
||||||
@@ -3461,7 +3502,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
|
assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
|
||||||
|
|
||||||
/* Mark old field/frame as completed */
|
/* Mark old field/frame as completed */
|
||||||
if (!last_pic_droppable && h0->cur_pic_ptr->tf.owner == h0->avctx) {
|
if (h0->cur_pic_ptr->tf.owner == h0->avctx) {
|
||||||
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
|
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
|
||||||
last_pic_structure == PICT_BOTTOM_FIELD);
|
last_pic_structure == PICT_BOTTOM_FIELD);
|
||||||
}
|
}
|
||||||
@@ -3470,7 +3511,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
|
if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
|
||||||
/* Previous field is unmatched. Don't display it, but let it
|
/* Previous field is unmatched. Don't display it, but let it
|
||||||
* remain for reference if marked as such. */
|
* remain for reference if marked as such. */
|
||||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
if (last_pic_structure != PICT_FRAME) {
|
||||||
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
|
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
|
||||||
last_pic_structure == PICT_TOP_FIELD);
|
last_pic_structure == PICT_TOP_FIELD);
|
||||||
}
|
}
|
||||||
@@ -3480,7 +3521,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
* different frame_nums. Consider this field first in
|
* different frame_nums. Consider this field first in
|
||||||
* pair. Throw away previous field except for reference
|
* pair. Throw away previous field except for reference
|
||||||
* purposes. */
|
* purposes. */
|
||||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
if (last_pic_structure != PICT_FRAME) {
|
||||||
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
|
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
|
||||||
last_pic_structure == PICT_TOP_FIELD);
|
last_pic_structure == PICT_TOP_FIELD);
|
||||||
}
|
}
|
||||||
@@ -3507,7 +3548,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field &&
|
while (h->frame_num != h->prev_frame_num && !h0->first_field &&
|
||||||
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
|
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
|
||||||
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
||||||
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
|
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
|
||||||
@@ -3876,6 +3917,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
|
|
||||||
if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0];
|
if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0];
|
||||||
if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0];
|
if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0];
|
||||||
|
h->er.ref_count = h->ref_count[0];
|
||||||
|
|
||||||
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
|
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
|
||||||
av_log(h->avctx, AV_LOG_DEBUG,
|
av_log(h->avctx, AV_LOG_DEBUG,
|
||||||
@@ -4267,7 +4309,6 @@ static void er_add_slice(H264Context *h, int startx, int starty,
|
|||||||
if (CONFIG_ERROR_RESILIENCE) {
|
if (CONFIG_ERROR_RESILIENCE) {
|
||||||
ERContext *er = &h->er;
|
ERContext *er = &h->er;
|
||||||
|
|
||||||
er->ref_count = h->ref_count[0];
|
|
||||||
ff_er_add_slice(er, startx, starty, endx, endy, status);
|
ff_er_add_slice(er, startx, starty, endx, endy, status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -61,10 +61,10 @@
|
|||||||
#define MAX_SLICES 16
|
#define MAX_SLICES 16
|
||||||
|
|
||||||
#ifdef ALLOW_INTERLACE
|
#ifdef ALLOW_INTERLACE
|
||||||
#define MB_MBAFF(h) h->mb_mbaff
|
#define MB_MBAFF(h) (h)->mb_mbaff
|
||||||
#define MB_FIELD(h) h->mb_field_decoding_flag
|
#define MB_FIELD(h) (h)->mb_field_decoding_flag
|
||||||
#define FRAME_MBAFF(h) h->mb_aff_frame
|
#define FRAME_MBAFF(h) (h)->mb_aff_frame
|
||||||
#define FIELD_PICTURE(h) (h->picture_structure != PICT_FRAME)
|
#define FIELD_PICTURE(h) ((h)->picture_structure != PICT_FRAME)
|
||||||
#define LEFT_MBS 2
|
#define LEFT_MBS 2
|
||||||
#define LTOP 0
|
#define LTOP 0
|
||||||
#define LBOT 1
|
#define LBOT 1
|
||||||
@@ -84,12 +84,12 @@
|
|||||||
#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
|
#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
|
||||||
|
|
||||||
#ifndef CABAC
|
#ifndef CABAC
|
||||||
#define CABAC(h) h->pps.cabac
|
#define CABAC(h) (h)->pps.cabac
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define CHROMA(h) (h->sps.chroma_format_idc)
|
#define CHROMA(h) ((h)->sps.chroma_format_idc)
|
||||||
#define CHROMA422(h) (h->sps.chroma_format_idc == 2)
|
#define CHROMA422(h) ((h)->sps.chroma_format_idc == 2)
|
||||||
#define CHROMA444(h) (h->sps.chroma_format_idc == 3)
|
#define CHROMA444(h) ((h)->sps.chroma_format_idc == 3)
|
||||||
|
|
||||||
#define EXTENDED_SAR 255
|
#define EXTENDED_SAR 255
|
||||||
|
|
||||||
@@ -258,6 +258,7 @@ typedef struct MMCO {
|
|||||||
* H264Context
|
* H264Context
|
||||||
*/
|
*/
|
||||||
typedef struct H264Context {
|
typedef struct H264Context {
|
||||||
|
AVClass *av_class;
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
VideoDSPContext vdsp;
|
VideoDSPContext vdsp;
|
||||||
H264DSPContext h264dsp;
|
H264DSPContext h264dsp;
|
||||||
|
@@ -1278,7 +1278,7 @@ void ff_h264_init_cabac_states(H264Context *h) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int decode_cabac_field_decoding_flag(H264Context *h) {
|
static int decode_cabac_field_decoding_flag(H264Context *h) {
|
||||||
const long mbb_xy = h->mb_xy - 2L*h->mb_stride;
|
const int mbb_xy = h->mb_xy - 2*h->mb_stride;
|
||||||
|
|
||||||
unsigned long ctx = 0;
|
unsigned long ctx = 0;
|
||||||
|
|
||||||
|
@@ -168,11 +168,11 @@ static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
|
|||||||
buf += ctx->length_size;
|
buf += ctx->length_size;
|
||||||
unit_type = *buf & 0x1f;
|
unit_type = *buf & 0x1f;
|
||||||
|
|
||||||
if (buf + nal_size > buf_end || nal_size < 0)
|
if (nal_size > buf_end - buf || nal_size < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
||||||
if (ctx->first_idr && unit_type == 5) {
|
if (ctx->first_idr && (unit_type == 5 || unit_type == 7 || unit_type == 8)) {
|
||||||
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
||||||
avctx->extradata, avctx->extradata_size,
|
avctx->extradata, avctx->extradata_size,
|
||||||
buf, nal_size)) < 0)
|
buf, nal_size)) < 0)
|
||||||
|
@@ -384,7 +384,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
|||||||
}
|
}
|
||||||
sps->bit_depth_luma = get_ue_golomb(&h->gb) + 8;
|
sps->bit_depth_luma = get_ue_golomb(&h->gb) + 8;
|
||||||
sps->bit_depth_chroma = get_ue_golomb(&h->gb) + 8;
|
sps->bit_depth_chroma = get_ue_golomb(&h->gb) + 8;
|
||||||
if (sps->bit_depth_luma > 14U || sps->bit_depth_chroma > 14U || sps->bit_depth_luma != sps->bit_depth_chroma) {
|
if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
|
||||||
|
sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14 ||
|
||||||
|
sps->bit_depth_luma != sps->bit_depth_chroma) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
av_log(h->avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
||||||
sps->bit_depth_luma, sps->bit_depth_chroma);
|
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@@ -583,7 +583,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
|
|||||||
if (mmco[i].opcode != MMCO_SHORT2LONG ||
|
if (mmco[i].opcode != MMCO_SHORT2LONG ||
|
||||||
!h->long_ref[mmco[i].long_arg] ||
|
!h->long_ref[mmco[i].long_arg] ||
|
||||||
h->long_ref[mmco[i].long_arg]->frame_num != frame_num) {
|
h->long_ref[mmco[i].long_arg]->frame_num != frame_num) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "mmco: unref short failure\n");
|
av_log(h->avctx, h->short_ref_count ? AV_LOG_ERROR : AV_LOG_DEBUG, "mmco: unref short failure\n");
|
||||||
err = AVERROR_INVALIDDATA;
|
err = AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
@@ -681,7 +681,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
|
|||||||
*/
|
*/
|
||||||
if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) {
|
if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) {
|
||||||
/* Just mark the second field valid */
|
/* Just mark the second field valid */
|
||||||
h->cur_pic_ptr->reference = PICT_FRAME;
|
h->cur_pic_ptr->reference |= h->picture_structure;
|
||||||
} else if (h->cur_pic_ptr->long_ref) {
|
} else if (h->cur_pic_ptr->long_ref) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference "
|
av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference "
|
||||||
"assignment for second field "
|
"assignment for second field "
|
||||||
@@ -733,7 +733,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
|
|||||||
print_short_term(h);
|
print_short_term(h);
|
||||||
print_long_term(h);
|
print_long_term(h);
|
||||||
|
|
||||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=1 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=2 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||||
h->cur_pic_ptr->sync |= 1;
|
h->cur_pic_ptr->sync |= 1;
|
||||||
if(!h->avctx->has_b_frames)
|
if(!h->avctx->has_b_frames)
|
||||||
h->sync = 2;
|
h->sync = 2;
|
||||||
|
@@ -829,9 +829,9 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
bytestream2_init(&gb, buf, buf_size);
|
bytestream2_init(&gb, buf, buf_size);
|
||||||
if (avctx->codec_tag == MKTAG('R','G','B','8'))
|
if (avctx->codec_tag == MKTAG('R','G','B','8') && avctx->pix_fmt == AV_PIX_FMT_RGB32)
|
||||||
decode_rgb8(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
|
decode_rgb8(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
|
||||||
else if (avctx->codec_tag == MKTAG('R','G','B','N'))
|
else if (avctx->codec_tag == MKTAG('R','G','B','N') && avctx->pix_fmt == AV_PIX_FMT_RGB444)
|
||||||
decode_rgbn(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
|
decode_rgbn(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
|
||||||
else
|
else
|
||||||
return unsupported(avctx);
|
return unsupported(avctx);
|
||||||
|
@@ -71,6 +71,9 @@ void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int get_color_type(const AVPixFmtDescriptor *desc) {
|
static int get_color_type(const AVPixFmtDescriptor *desc) {
|
||||||
|
if (desc->flags & AV_PIX_FMT_FLAG_PAL)
|
||||||
|
return FF_COLOR_RGB;
|
||||||
|
|
||||||
if(desc->nb_components == 1 || desc->nb_components == 2)
|
if(desc->nb_components == 1 || desc->nb_components == 2)
|
||||||
return FF_COLOR_GRAY;
|
return FF_COLOR_GRAY;
|
||||||
|
|
||||||
|
@@ -94,7 +94,7 @@ typedef struct Indeo3DecodeContext {
|
|||||||
|
|
||||||
int16_t width, height;
|
int16_t width, height;
|
||||||
uint32_t frame_num; ///< current frame number (zero-based)
|
uint32_t frame_num; ///< current frame number (zero-based)
|
||||||
uint32_t data_size; ///< size of the frame data in bytes
|
int data_size; ///< size of the frame data in bytes
|
||||||
uint16_t frame_flags; ///< frame properties
|
uint16_t frame_flags; ///< frame properties
|
||||||
uint8_t cb_offset; ///< needed for selecting VQ tables
|
uint8_t cb_offset; ///< needed for selecting VQ tables
|
||||||
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
||||||
@@ -899,7 +899,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
|||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
const uint8_t *bs_hdr;
|
const uint8_t *bs_hdr;
|
||||||
uint32_t frame_num, word2, check_sum, data_size;
|
uint32_t frame_num, word2, check_sum, data_size;
|
||||||
uint32_t y_offset, u_offset, v_offset, starts[3], ends[3];
|
int y_offset, u_offset, v_offset;
|
||||||
|
uint32_t starts[3], ends[3];
|
||||||
uint16_t height, width;
|
uint16_t height, width;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
@@ -980,7 +981,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
|||||||
ctx->y_data_size = ends[0] - starts[0];
|
ctx->y_data_size = ends[0] - starts[0];
|
||||||
ctx->v_data_size = ends[1] - starts[1];
|
ctx->v_data_size = ends[1] - starts[1];
|
||||||
ctx->u_data_size = ends[2] - starts[2];
|
ctx->u_data_size = ends[2] - starts[2];
|
||||||
if (FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
if (FFMIN3(y_offset, v_offset, u_offset) < 0 ||
|
||||||
|
FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||||
FFMIN3(y_offset, v_offset, u_offset) < gb.buffer - bs_hdr + 16 ||
|
FFMIN3(y_offset, v_offset, u_offset) < gb.buffer - bs_hdr + 16 ||
|
||||||
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
||||||
|
@@ -235,15 +235,15 @@ void ff_ivi_recompose_haar(const IVIPlaneDesc *plane, uint8_t *dst,
|
|||||||
|
|
||||||
/** butterfly operation for the inverse Haar transform */
|
/** butterfly operation for the inverse Haar transform */
|
||||||
#define IVI_HAAR_BFLY(s1, s2, o1, o2, t) \
|
#define IVI_HAAR_BFLY(s1, s2, o1, o2, t) \
|
||||||
t = (s1 - s2) >> 1;\
|
t = ((s1) - (s2)) >> 1;\
|
||||||
o1 = (s1 + s2) >> 1;\
|
o1 = ((s1) + (s2)) >> 1;\
|
||||||
o2 = t;\
|
o2 = (t);\
|
||||||
|
|
||||||
/** inverse 8-point Haar transform */
|
/** inverse 8-point Haar transform */
|
||||||
#define INV_HAAR8(s1, s5, s3, s7, s2, s4, s6, s8,\
|
#define INV_HAAR8(s1, s5, s3, s7, s2, s4, s6, s8,\
|
||||||
d1, d2, d3, d4, d5, d6, d7, d8,\
|
d1, d2, d3, d4, d5, d6, d7, d8,\
|
||||||
t0, t1, t2, t3, t4, t5, t6, t7, t8) {\
|
t0, t1, t2, t3, t4, t5, t6, t7, t8) {\
|
||||||
t1 = s1 << 1; t5 = s5 << 1;\
|
t1 = (s1) << 1; t5 = (s5) << 1;\
|
||||||
IVI_HAAR_BFLY(t1, t5, t1, t5, t0); IVI_HAAR_BFLY(t1, s3, t1, t3, t0);\
|
IVI_HAAR_BFLY(t1, t5, t1, t5, t0); IVI_HAAR_BFLY(t1, s3, t1, t3, t0);\
|
||||||
IVI_HAAR_BFLY(t5, s7, t5, t7, t0); IVI_HAAR_BFLY(t1, s2, t1, t2, t0);\
|
IVI_HAAR_BFLY(t5, s7, t5, t7, t0); IVI_HAAR_BFLY(t1, s2, t1, t2, t0);\
|
||||||
IVI_HAAR_BFLY(t3, s4, t3, t4, t0); IVI_HAAR_BFLY(t5, s6, t5, t6, t0);\
|
IVI_HAAR_BFLY(t3, s4, t3, t4, t0); IVI_HAAR_BFLY(t5, s6, t5, t6, t0);\
|
||||||
@@ -485,21 +485,21 @@ void ff_ivi_dc_haar_2d(const int32_t *in, int16_t *out, uint32_t pitch,
|
|||||||
|
|
||||||
/** butterfly operation for the inverse slant transform */
|
/** butterfly operation for the inverse slant transform */
|
||||||
#define IVI_SLANT_BFLY(s1, s2, o1, o2, t) \
|
#define IVI_SLANT_BFLY(s1, s2, o1, o2, t) \
|
||||||
t = s1 - s2;\
|
t = (s1) - (s2);\
|
||||||
o1 = s1 + s2;\
|
o1 = (s1) + (s2);\
|
||||||
o2 = t;\
|
o2 = (t);\
|
||||||
|
|
||||||
/** This is a reflection a,b = 1/2, 5/4 for the inverse slant transform */
|
/** This is a reflection a,b = 1/2, 5/4 for the inverse slant transform */
|
||||||
#define IVI_IREFLECT(s1, s2, o1, o2, t) \
|
#define IVI_IREFLECT(s1, s2, o1, o2, t) \
|
||||||
t = ((s1 + s2*2 + 2) >> 2) + s1;\
|
t = (((s1) + (s2)*2 + 2) >> 2) + (s1);\
|
||||||
o2 = ((s1*2 - s2 + 2) >> 2) - s2;\
|
o2 = (((s1)*2 - (s2) + 2) >> 2) - (s2);\
|
||||||
o1 = t;\
|
o1 = (t);\
|
||||||
|
|
||||||
/** This is a reflection a,b = 1/2, 7/8 for the inverse slant transform */
|
/** This is a reflection a,b = 1/2, 7/8 for the inverse slant transform */
|
||||||
#define IVI_SLANT_PART4(s1, s2, o1, o2, t) \
|
#define IVI_SLANT_PART4(s1, s2, o1, o2, t) \
|
||||||
t = s2 + ((s1*4 - s2 + 4) >> 3);\
|
t = (s2) + (((s1)*4 - (s2) + 4) >> 3);\
|
||||||
o2 = s1 + ((-s1 - s2*4 + 4) >> 3);\
|
o2 = (s1) + ((-(s1) - (s2)*4 + 4) >> 3);\
|
||||||
o1 = t;\
|
o1 = (t);\
|
||||||
|
|
||||||
/** inverse slant8 transform */
|
/** inverse slant8 transform */
|
||||||
#define IVI_INV_SLANT8(s1, s4, s8, s5, s2, s6, s3, s7,\
|
#define IVI_INV_SLANT8(s1, s4, s8, s5, s2, s6, s3, s7,\
|
||||||
@@ -557,7 +557,7 @@ void ff_ivi_inverse_slant_8x8(const int32_t *in, int16_t *out, uint32_t pitch, c
|
|||||||
}
|
}
|
||||||
#undef COMPENSATE
|
#undef COMPENSATE
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
src = tmp;
|
src = tmp;
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
if (!src[0] && !src[1] && !src[2] && !src[3] && !src[4] && !src[5] && !src[6] && !src[7]) {
|
if (!src[0] && !src[1] && !src[2] && !src[3] && !src[4] && !src[5] && !src[6] && !src[7]) {
|
||||||
@@ -597,7 +597,7 @@ void ff_ivi_inverse_slant_4x4(const int32_t *in, int16_t *out, uint32_t pitch, c
|
|||||||
}
|
}
|
||||||
#undef COMPENSATE
|
#undef COMPENSATE
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
src = tmp;
|
src = tmp;
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (!src[0] && !src[1] && !src[2] && !src[3]) {
|
if (!src[0] && !src[1] && !src[2] && !src[3]) {
|
||||||
@@ -631,7 +631,7 @@ void ff_ivi_row_slant8(const int32_t *in, int16_t *out, uint32_t pitch, const ui
|
|||||||
int i;
|
int i;
|
||||||
int t0, t1, t2, t3, t4, t5, t6, t7, t8;
|
int t0, t1, t2, t3, t4, t5, t6, t7, t8;
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
if (!in[0] && !in[1] && !in[2] && !in[3] && !in[4] && !in[5] && !in[6] && !in[7]) {
|
if (!in[0] && !in[1] && !in[2] && !in[3] && !in[4] && !in[5] && !in[6] && !in[7]) {
|
||||||
memset(out, 0, 8*sizeof(out[0]));
|
memset(out, 0, 8*sizeof(out[0]));
|
||||||
@@ -673,7 +673,7 @@ void ff_ivi_col_slant8(const int32_t *in, int16_t *out, uint32_t pitch, const ui
|
|||||||
row4 = pitch << 2;
|
row4 = pitch << 2;
|
||||||
row8 = pitch << 3;
|
row8 = pitch << 3;
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
if (flags[i]) {
|
if (flags[i]) {
|
||||||
IVI_INV_SLANT8(in[0], in[8], in[16], in[24], in[32], in[40], in[48], in[56],
|
IVI_INV_SLANT8(in[0], in[8], in[16], in[24], in[32], in[40], in[48], in[56],
|
||||||
@@ -710,7 +710,7 @@ void ff_ivi_row_slant4(const int32_t *in, int16_t *out, uint32_t pitch, const ui
|
|||||||
int i;
|
int i;
|
||||||
int t0, t1, t2, t3, t4;
|
int t0, t1, t2, t3, t4;
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (!in[0] && !in[1] && !in[2] && !in[3]) {
|
if (!in[0] && !in[1] && !in[2] && !in[3]) {
|
||||||
memset(out, 0, 4*sizeof(out[0]));
|
memset(out, 0, 4*sizeof(out[0]));
|
||||||
@@ -732,7 +732,7 @@ void ff_ivi_col_slant4(const int32_t *in, int16_t *out, uint32_t pitch, const ui
|
|||||||
|
|
||||||
row2 = pitch << 1;
|
row2 = pitch << 1;
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (flags[i]) {
|
if (flags[i]) {
|
||||||
IVI_INV_SLANT4(in[0], in[4], in[8], in[12],
|
IVI_INV_SLANT4(in[0], in[4], in[8], in[12],
|
||||||
|
@@ -802,7 +802,7 @@ static void truncpasses(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile)
|
|||||||
Jpeg2000Cblk *cblk = prec->cblk + cblkno;
|
Jpeg2000Cblk *cblk = prec->cblk + cblkno;
|
||||||
|
|
||||||
cblk->ninclpasses = getcut(cblk, s->lambda,
|
cblk->ninclpasses = getcut(cblk, s->lambda,
|
||||||
(int64_t)dwt_norms[codsty->transform == FF_DWT53][bandpos][lev] * (int64_t)band->i_stepsize >> 16);
|
(int64_t)dwt_norms[codsty->transform == FF_DWT53][bandpos][lev] * (int64_t)band->i_stepsize >> 15);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -863,7 +863,7 @@ static int encode_tile(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile, int tileno
|
|||||||
int *ptr = t1.data[y-yy0];
|
int *ptr = t1.data[y-yy0];
|
||||||
for (x = xx0; x < xx1; x++){
|
for (x = xx0; x < xx1; x++){
|
||||||
*ptr = (comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * y + x]);
|
*ptr = (comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * y + x]);
|
||||||
*ptr = (int64_t)*ptr * (int64_t)(16384 * 65536 / band->i_stepsize) >> 14 - NMSEDEC_FRACBITS;
|
*ptr = (int64_t)*ptr * (int64_t)(16384 * 65536 / band->i_stepsize) >> 15 - NMSEDEC_FRACBITS;
|
||||||
ptr++;
|
ptr++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1016,7 +1016,7 @@ static av_cold int j2kenc_init(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ff_jpeg2000_init_tier1_luts();
|
ff_jpeg2000_init_tier1_luts();
|
||||||
|
ff_mqc_init_context_tables();
|
||||||
init_luts();
|
init_luts();
|
||||||
|
|
||||||
init_quantization(s);
|
init_quantization(s);
|
||||||
|
@@ -272,7 +272,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
|||||||
reslevel->log2_prec_height) -
|
reslevel->log2_prec_height) -
|
||||||
(reslevel->coord[1][0] >> reslevel->log2_prec_height);
|
(reslevel->coord[1][0] >> reslevel->log2_prec_height);
|
||||||
|
|
||||||
reslevel->band = av_malloc_array(reslevel->nbands, sizeof(*reslevel->band));
|
reslevel->band = av_calloc(reslevel->nbands, sizeof(*reslevel->band));
|
||||||
if (!reslevel->band)
|
if (!reslevel->band)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
@@ -320,7 +320,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
|||||||
if (!av_codec_is_encoder(avctx->codec))
|
if (!av_codec_is_encoder(avctx->codec))
|
||||||
band->f_stepsize *= 0.5;
|
band->f_stepsize *= 0.5;
|
||||||
|
|
||||||
band->i_stepsize = band->f_stepsize * (1 << 16);
|
band->i_stepsize = band->f_stepsize * (1 << 15);
|
||||||
|
|
||||||
/* computation of tbx_0, tbx_1, tby_0, tby_1
|
/* computation of tbx_0, tbx_1, tby_0, tby_1
|
||||||
* see ISO/IEC 15444-1:2002 B.5 eq. B-15 and tbl B.1
|
* see ISO/IEC 15444-1:2002 B.5 eq. B-15 and tbl B.1
|
||||||
@@ -368,7 +368,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
|||||||
for (j = 0; j < 2; j++)
|
for (j = 0; j < 2; j++)
|
||||||
band->coord[1][j] = ff_jpeg2000_ceildiv(band->coord[1][j], dy);
|
band->coord[1][j] = ff_jpeg2000_ceildiv(band->coord[1][j], dy);
|
||||||
|
|
||||||
band->prec = av_malloc_array(reslevel->num_precincts_x *
|
band->prec = av_calloc(reslevel->num_precincts_x *
|
||||||
(uint64_t)reslevel->num_precincts_y,
|
(uint64_t)reslevel->num_precincts_y,
|
||||||
sizeof(*band->prec));
|
sizeof(*band->prec));
|
||||||
if (!band->prec)
|
if (!band->prec)
|
||||||
@@ -509,11 +509,13 @@ void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty)
|
|||||||
for (bandno = 0; bandno < reslevel->nbands; bandno++) {
|
for (bandno = 0; bandno < reslevel->nbands; bandno++) {
|
||||||
Jpeg2000Band *band = reslevel->band + bandno;
|
Jpeg2000Band *band = reslevel->band + bandno;
|
||||||
for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) {
|
for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) {
|
||||||
|
if (band->prec) {
|
||||||
Jpeg2000Prec *prec = band->prec + precno;
|
Jpeg2000Prec *prec = band->prec + precno;
|
||||||
av_freep(&prec->zerobits);
|
av_freep(&prec->zerobits);
|
||||||
av_freep(&prec->cblkincl);
|
av_freep(&prec->cblkincl);
|
||||||
av_freep(&prec->cblk);
|
av_freep(&prec->cblk);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
av_freep(&band->prec);
|
av_freep(&band->prec);
|
||||||
}
|
}
|
||||||
|
@@ -28,6 +28,7 @@
|
|||||||
#include "libavutil/avassert.h"
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/common.h"
|
#include "libavutil/common.h"
|
||||||
#include "libavutil/opt.h"
|
#include "libavutil/opt.h"
|
||||||
|
#include "libavutil/pixdesc.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "bytestream.h"
|
#include "bytestream.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
@@ -37,6 +38,7 @@
|
|||||||
#define JP2_SIG_TYPE 0x6A502020
|
#define JP2_SIG_TYPE 0x6A502020
|
||||||
#define JP2_SIG_VALUE 0x0D0A870A
|
#define JP2_SIG_VALUE 0x0D0A870A
|
||||||
#define JP2_CODESTREAM 0x6A703263
|
#define JP2_CODESTREAM 0x6A703263
|
||||||
|
#define JP2_HEADER 0x6A703268
|
||||||
|
|
||||||
#define HAD_COC 0x01
|
#define HAD_COC 0x01
|
||||||
#define HAD_QCC 0x02
|
#define HAD_QCC 0x02
|
||||||
@@ -72,6 +74,10 @@ typedef struct Jpeg2000DecoderContext {
|
|||||||
int cdx[4], cdy[4];
|
int cdx[4], cdy[4];
|
||||||
int precision;
|
int precision;
|
||||||
int ncomponents;
|
int ncomponents;
|
||||||
|
int colour_space;
|
||||||
|
uint32_t palette[256];
|
||||||
|
int8_t pal8;
|
||||||
|
int cdef[4];
|
||||||
int tile_width, tile_height;
|
int tile_width, tile_height;
|
||||||
unsigned numXtiles, numYtiles;
|
unsigned numXtiles, numYtiles;
|
||||||
int maxtilelen;
|
int maxtilelen;
|
||||||
@@ -154,12 +160,74 @@ static int tag_tree_decode(Jpeg2000DecoderContext *s, Jpeg2000TgtNode *node,
|
|||||||
return curval;
|
return curval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pix_fmt_match(enum AVPixelFormat pix_fmt, int components,
|
||||||
|
int bpc, uint32_t log2_chroma_wh, int pal8)
|
||||||
|
{
|
||||||
|
int match = 1;
|
||||||
|
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
|
||||||
|
|
||||||
|
if (desc->nb_components != components) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (components) {
|
||||||
|
case 4:
|
||||||
|
match = match && desc->comp[3].depth_minus1 + 1 >= bpc &&
|
||||||
|
(log2_chroma_wh >> 14 & 3) == 0 &&
|
||||||
|
(log2_chroma_wh >> 12 & 3) == 0;
|
||||||
|
case 3:
|
||||||
|
match = match && desc->comp[2].depth_minus1 + 1 >= bpc &&
|
||||||
|
(log2_chroma_wh >> 10 & 3) == desc->log2_chroma_w &&
|
||||||
|
(log2_chroma_wh >> 8 & 3) == desc->log2_chroma_h;
|
||||||
|
case 2:
|
||||||
|
match = match && desc->comp[1].depth_minus1 + 1 >= bpc &&
|
||||||
|
(log2_chroma_wh >> 6 & 3) == desc->log2_chroma_w &&
|
||||||
|
(log2_chroma_wh >> 4 & 3) == desc->log2_chroma_h;
|
||||||
|
|
||||||
|
case 1:
|
||||||
|
match = match && desc->comp[0].depth_minus1 + 1 >= bpc &&
|
||||||
|
(log2_chroma_wh >> 2 & 3) == 0 &&
|
||||||
|
(log2_chroma_wh & 3) == 0 &&
|
||||||
|
(desc->flags & AV_PIX_FMT_FLAG_PAL) == pal8 * AV_PIX_FMT_FLAG_PAL;
|
||||||
|
}
|
||||||
|
return match;
|
||||||
|
}
|
||||||
|
|
||||||
|
// pix_fmts with lower bpp have to be listed before
|
||||||
|
// similar pix_fmts with higher bpp.
|
||||||
|
#define RGB_PIXEL_FORMATS AV_PIX_FMT_PAL8,AV_PIX_FMT_RGB24,AV_PIX_FMT_RGBA,AV_PIX_FMT_RGB48,AV_PIX_FMT_RGBA64
|
||||||
|
#define GRAY_PIXEL_FORMATS AV_PIX_FMT_GRAY8,AV_PIX_FMT_GRAY8A,AV_PIX_FMT_GRAY16
|
||||||
|
#define YUV_PIXEL_FORMATS AV_PIX_FMT_YUV410P,AV_PIX_FMT_YUV411P,AV_PIX_FMT_YUVA420P, \
|
||||||
|
AV_PIX_FMT_YUV420P,AV_PIX_FMT_YUV422P,AV_PIX_FMT_YUVA422P, \
|
||||||
|
AV_PIX_FMT_YUV440P,AV_PIX_FMT_YUV444P,AV_PIX_FMT_YUVA444P, \
|
||||||
|
AV_PIX_FMT_YUV420P9,AV_PIX_FMT_YUV422P9,AV_PIX_FMT_YUV444P9, \
|
||||||
|
AV_PIX_FMT_YUVA420P9,AV_PIX_FMT_YUVA422P9,AV_PIX_FMT_YUVA444P9, \
|
||||||
|
AV_PIX_FMT_YUV420P10,AV_PIX_FMT_YUV422P10,AV_PIX_FMT_YUV444P10, \
|
||||||
|
AV_PIX_FMT_YUVA420P10,AV_PIX_FMT_YUVA422P10,AV_PIX_FMT_YUVA444P10, \
|
||||||
|
AV_PIX_FMT_YUV420P12,AV_PIX_FMT_YUV422P12,AV_PIX_FMT_YUV444P12, \
|
||||||
|
AV_PIX_FMT_YUV420P14,AV_PIX_FMT_YUV422P14,AV_PIX_FMT_YUV444P14, \
|
||||||
|
AV_PIX_FMT_YUV420P16,AV_PIX_FMT_YUV422P16,AV_PIX_FMT_YUV444P16, \
|
||||||
|
AV_PIX_FMT_YUVA420P16,AV_PIX_FMT_YUVA422P16,AV_PIX_FMT_YUVA444P16
|
||||||
|
#define XYZ_PIXEL_FORMATS AV_PIX_FMT_XYZ12
|
||||||
|
|
||||||
|
static const enum AVPixelFormat rgb_pix_fmts[] = {RGB_PIXEL_FORMATS};
|
||||||
|
static const enum AVPixelFormat gray_pix_fmts[] = {GRAY_PIXEL_FORMATS};
|
||||||
|
static const enum AVPixelFormat yuv_pix_fmts[] = {YUV_PIXEL_FORMATS};
|
||||||
|
static const enum AVPixelFormat xyz_pix_fmts[] = {XYZ_PIXEL_FORMATS};
|
||||||
|
static const enum AVPixelFormat all_pix_fmts[] = {RGB_PIXEL_FORMATS,
|
||||||
|
GRAY_PIXEL_FORMATS,
|
||||||
|
YUV_PIXEL_FORMATS,
|
||||||
|
XYZ_PIXEL_FORMATS};
|
||||||
|
|
||||||
/* marker segments */
|
/* marker segments */
|
||||||
/* get sizes and offsets of image, tiles; number of components */
|
/* get sizes and offsets of image, tiles; number of components */
|
||||||
static int get_siz(Jpeg2000DecoderContext *s)
|
static int get_siz(Jpeg2000DecoderContext *s)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int ncomponents;
|
int ncomponents;
|
||||||
|
uint32_t log2_chroma_wh = 0;
|
||||||
|
const enum AVPixelFormat *possible_fmts = NULL;
|
||||||
|
int possible_fmts_nb = 0;
|
||||||
|
|
||||||
if (bytestream2_get_bytes_left(&s->g) < 36)
|
if (bytestream2_get_bytes_left(&s->g) < 36)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
@@ -175,6 +243,11 @@ static int get_siz(Jpeg2000DecoderContext *s)
|
|||||||
s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
|
s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
|
||||||
ncomponents = bytestream2_get_be16u(&s->g); // CSiz
|
ncomponents = bytestream2_get_be16u(&s->g); // CSiz
|
||||||
|
|
||||||
|
if (s->image_offset_x || s->image_offset_y) {
|
||||||
|
avpriv_request_sample(s->avctx, "Support for image offsets");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
if (ncomponents <= 0) {
|
if (ncomponents <= 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of components: %d\n",
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of components: %d\n",
|
||||||
s->ncomponents);
|
s->ncomponents);
|
||||||
@@ -205,13 +278,12 @@ static int get_siz(Jpeg2000DecoderContext *s)
|
|||||||
s->sgnd[i] = !!(x & 0x80);
|
s->sgnd[i] = !!(x & 0x80);
|
||||||
s->cdx[i] = bytestream2_get_byteu(&s->g);
|
s->cdx[i] = bytestream2_get_byteu(&s->g);
|
||||||
s->cdy[i] = bytestream2_get_byteu(&s->g);
|
s->cdy[i] = bytestream2_get_byteu(&s->g);
|
||||||
if (s->cdx[i] != 1 || s->cdy[i] != 1) {
|
if ( !s->cdx[i] || s->cdx[i] == 3 || s->cdx[i] > 4
|
||||||
avpriv_request_sample(s->avctx,
|
|| !s->cdy[i] || s->cdy[i] == 3 || s->cdy[i] > 4) {
|
||||||
"CDxy values %d %d for component %d",
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid sample seperation\n");
|
||||||
s->cdx[i], s->cdy[i], i);
|
|
||||||
if (!s->cdx[i] || !s->cdy[i])
|
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
log2_chroma_wh |= s->cdy[i] >> 1 << i * 4 | s->cdx[i] >> 1 << i * 4 + 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->numXtiles = ff_jpeg2000_ceildiv(s->width - s->tile_offset_x, s->tile_width);
|
s->numXtiles = ff_jpeg2000_ceildiv(s->width - s->tile_offset_x, s->tile_width);
|
||||||
@@ -242,35 +314,47 @@ static int get_siz(Jpeg2000DecoderContext *s)
|
|||||||
s->avctx->height = ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y,
|
s->avctx->height = ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y,
|
||||||
s->reduction_factor);
|
s->reduction_factor);
|
||||||
|
|
||||||
switch (s->ncomponents) {
|
if (s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_2K ||
|
||||||
case 1:
|
s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_4K) {
|
||||||
if (s->precision > 8)
|
possible_fmts = xyz_pix_fmts;
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
possible_fmts_nb = FF_ARRAY_ELEMS(xyz_pix_fmts);
|
||||||
else
|
} else {
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
switch (s->colour_space) {
|
||||||
|
case 16:
|
||||||
|
possible_fmts = rgb_pix_fmts;
|
||||||
|
possible_fmts_nb = FF_ARRAY_ELEMS(rgb_pix_fmts);
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 17:
|
||||||
switch (s->avctx->profile) {
|
possible_fmts = gray_pix_fmts;
|
||||||
case FF_PROFILE_JPEG2000_DCINEMA_2K:
|
possible_fmts_nb = FF_ARRAY_ELEMS(gray_pix_fmts);
|
||||||
case FF_PROFILE_JPEG2000_DCINEMA_4K:
|
break;
|
||||||
/* XYZ color-space for digital cinema profiles */
|
case 18:
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_XYZ12;
|
possible_fmts = yuv_pix_fmts;
|
||||||
|
possible_fmts_nb = FF_ARRAY_ELEMS(yuv_pix_fmts);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
if (s->precision > 8)
|
possible_fmts = all_pix_fmts;
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_RGB48;
|
possible_fmts_nb = FF_ARRAY_ELEMS(all_pix_fmts);
|
||||||
else
|
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
for (i = 0; i < possible_fmts_nb; ++i) {
|
||||||
|
if (pix_fmt_match(possible_fmts[i], ncomponents, s->precision, log2_chroma_wh, s->pal8)) {
|
||||||
|
s->avctx->pix_fmt = possible_fmts[i];
|
||||||
break;
|
break;
|
||||||
case 4:
|
}
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
|
}
|
||||||
break;
|
if (i == possible_fmts_nb) {
|
||||||
default:
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
/* pixel format can not be identified */
|
"Unknown pix_fmt, profile: %d, colour_space: %d, "
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_NONE;
|
"components: %d, precision: %d, "
|
||||||
break;
|
"cdx[1]: %d, cdy[1]: %d, cdx[2]: %d, cdy[2]: %d\n",
|
||||||
|
s->avctx->profile, s->colour_space, ncomponents, s->precision,
|
||||||
|
ncomponents > 2 ? s->cdx[1] : 0,
|
||||||
|
ncomponents > 2 ? s->cdy[1] : 0,
|
||||||
|
ncomponents > 2 ? s->cdx[2] : 0,
|
||||||
|
ncomponents > 2 ? s->cdy[2] : 0);
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -291,10 +375,17 @@ static int get_cox(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (c->nreslevels <= s->reduction_factor) {
|
||||||
|
/* we are forced to update reduction_factor as its requested value is
|
||||||
|
not compatible with this bitstream, and as we might have used it
|
||||||
|
already in setup earlier we have to fail this frame until
|
||||||
|
reinitialization is implemented */
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "reduction_factor too large for this bitstream, max is %d\n", c->nreslevels - 1);
|
||||||
|
s->reduction_factor = c->nreslevels - 1;
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
/* compute number of resolution levels to decode */
|
/* compute number of resolution levels to decode */
|
||||||
if (c->nreslevels < s->reduction_factor)
|
|
||||||
c->nreslevels2decode = 1;
|
|
||||||
else
|
|
||||||
c->nreslevels2decode = c->nreslevels - s->reduction_factor;
|
c->nreslevels2decode = c->nreslevels - s->reduction_factor;
|
||||||
|
|
||||||
c->log2_cblk_width = (bytestream2_get_byteu(&s->g) & 15) + 2; // cblk width
|
c->log2_cblk_width = (bytestream2_get_byteu(&s->g) & 15) + 2; // cblk width
|
||||||
@@ -306,6 +397,11 @@ static int get_cox(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (c->log2_cblk_width > 6 || c->log2_cblk_height > 6) {
|
||||||
|
avpriv_request_sample(s->avctx, "cblk size > 64");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
c->cblk_style = bytestream2_get_byteu(&s->g);
|
c->cblk_style = bytestream2_get_byteu(&s->g);
|
||||||
if (c->cblk_style != 0) { // cblk style
|
if (c->cblk_style != 0) { // cblk style
|
||||||
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
|
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
|
||||||
@@ -789,6 +885,10 @@ static int jpeg2000_decode_packets(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile
|
|||||||
prcx = ff_jpeg2000_ceildivpow2(x, reducedresno) >> rlevel->log2_prec_width;
|
prcx = ff_jpeg2000_ceildivpow2(x, reducedresno) >> rlevel->log2_prec_width;
|
||||||
prcy = ff_jpeg2000_ceildivpow2(y, reducedresno) >> rlevel->log2_prec_height;
|
prcy = ff_jpeg2000_ceildivpow2(y, reducedresno) >> rlevel->log2_prec_height;
|
||||||
precno = prcx + rlevel->num_precincts_x * prcy;
|
precno = prcx + rlevel->num_precincts_x * prcy;
|
||||||
|
|
||||||
|
if (prcx >= rlevel->num_precincts_x || prcy >= rlevel->num_precincts_y)
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
|
||||||
for (layno = 0; layno < tile->codsty[0].nlayers; layno++) {
|
for (layno = 0; layno < tile->codsty[0].nlayers; layno++) {
|
||||||
if ((ret = jpeg2000_decode_packet(s, codsty, rlevel,
|
if ((ret = jpeg2000_decode_packet(s, codsty, rlevel,
|
||||||
precno, layno,
|
precno, layno,
|
||||||
@@ -947,6 +1047,9 @@ static int decode_cblk(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *codsty,
|
|||||||
int bpass_csty_symbol = codsty->cblk_style & JPEG2000_CBLK_BYPASS;
|
int bpass_csty_symbol = codsty->cblk_style & JPEG2000_CBLK_BYPASS;
|
||||||
int vert_causal_ctx_csty_symbol = codsty->cblk_style & JPEG2000_CBLK_VSC;
|
int vert_causal_ctx_csty_symbol = codsty->cblk_style & JPEG2000_CBLK_VSC;
|
||||||
|
|
||||||
|
av_assert0(width <= JPEG2000_MAX_CBLKW);
|
||||||
|
av_assert0(height <= JPEG2000_MAX_CBLKH);
|
||||||
|
|
||||||
for (y = 0; y < height; y++)
|
for (y = 0; y < height; y++)
|
||||||
memset(t1->data[y], 0, width * sizeof(**t1->data));
|
memset(t1->data[y], 0, width * sizeof(**t1->data));
|
||||||
|
|
||||||
@@ -1024,7 +1127,7 @@ static void dequantization_int(int x, int y, Jpeg2000Cblk *cblk,
|
|||||||
int32_t *datap = &comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * (y + j) + x];
|
int32_t *datap = &comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * (y + j) + x];
|
||||||
int *src = t1->data[j];
|
int *src = t1->data[j];
|
||||||
for (i = 0; i < w; ++i)
|
for (i = 0; i < w; ++i)
|
||||||
datap[i] = (src[i] * band->i_stepsize + (1 << 15)) >> 16;
|
datap[i] = (src[i] * band->i_stepsize + (1 << 14)) >> 15;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1049,6 +1152,17 @@ static void mct_decode(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile)
|
|||||||
int32_t *src[3], i0, i1, i2;
|
int32_t *src[3], i0, i1, i2;
|
||||||
float *srcf[3], i0f, i1f, i2f;
|
float *srcf[3], i0f, i1f, i2f;
|
||||||
|
|
||||||
|
for (i = 1; i < 3; i++) {
|
||||||
|
if (tile->codsty[0].transform != tile->codsty[i].transform) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Transforms mismatch, MCT not supported\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (memcmp(tile->comp[0].coord, tile->comp[i].coord, sizeof(tile->comp[0].coord))) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Coords mismatch, MCT not supported\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < 3; i++)
|
for (i = 0; i < 3; i++)
|
||||||
if (tile->codsty[0].transform == FF_DWT97)
|
if (tile->codsty[0].transform == FF_DWT97)
|
||||||
srcf[i] = tile->comp[i].f_data;
|
srcf[i] = tile->comp[i].f_data;
|
||||||
@@ -1157,6 +1271,13 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
|||||||
if (tile->codsty[0].mct)
|
if (tile->codsty[0].mct)
|
||||||
mct_decode(s, tile);
|
mct_decode(s, tile);
|
||||||
|
|
||||||
|
if (s->cdef[0] < 0) {
|
||||||
|
for (x = 0; x < s->ncomponents; x++)
|
||||||
|
s->cdef[x] = x + 1;
|
||||||
|
if ((s->ncomponents & 1) == 0)
|
||||||
|
s->cdef[s->ncomponents-1] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (s->precision <= 8) {
|
if (s->precision <= 8) {
|
||||||
for (compno = 0; compno < s->ncomponents; compno++) {
|
for (compno = 0; compno < s->ncomponents; compno++) {
|
||||||
Jpeg2000Component *comp = tile->comp + compno;
|
Jpeg2000Component *comp = tile->comp + compno;
|
||||||
@@ -1165,14 +1286,21 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
|||||||
int32_t *i_datap = comp->i_data;
|
int32_t *i_datap = comp->i_data;
|
||||||
int cbps = s->cbps[compno];
|
int cbps = s->cbps[compno];
|
||||||
int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
|
int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
|
||||||
|
int planar = !!picture->data[2];
|
||||||
|
int pixelsize = planar ? 1 : s->ncomponents;
|
||||||
|
int plane = 0;
|
||||||
|
|
||||||
|
if (planar)
|
||||||
|
plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1);
|
||||||
|
|
||||||
|
|
||||||
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
|
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
|
||||||
line = picture->data[0] + y * picture->linesize[0];
|
line = picture->data[plane] + y / s->cdy[compno] * picture->linesize[plane];
|
||||||
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
|
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
|
||||||
uint8_t *dst;
|
uint8_t *dst;
|
||||||
|
|
||||||
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
|
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
|
||||||
dst = line + x * s->ncomponents + compno;
|
dst = line + x / s->cdx[compno] * pixelsize + compno*!planar;
|
||||||
|
|
||||||
if (codsty->transform == FF_DWT97) {
|
if (codsty->transform == FF_DWT97) {
|
||||||
for (; x < w; x += s->cdx[compno]) {
|
for (; x < w; x += s->cdx[compno]) {
|
||||||
@@ -1181,7 +1309,7 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
|||||||
val = av_clip(val, 0, (1 << cbps) - 1);
|
val = av_clip(val, 0, (1 << cbps) - 1);
|
||||||
*dst = val << (8 - cbps);
|
*dst = val << (8 - cbps);
|
||||||
datap++;
|
datap++;
|
||||||
dst += s->ncomponents;
|
dst += pixelsize;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (; x < w; x += s->cdx[compno]) {
|
for (; x < w; x += s->cdx[compno]) {
|
||||||
@@ -1190,10 +1318,10 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
|||||||
val = av_clip(val, 0, (1 << cbps) - 1);
|
val = av_clip(val, 0, (1 << cbps) - 1);
|
||||||
*dst = val << (8 - cbps);
|
*dst = val << (8 - cbps);
|
||||||
i_datap++;
|
i_datap++;
|
||||||
dst += s->ncomponents;
|
dst += pixelsize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
line += picture->linesize[0];
|
line += picture->linesize[plane];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -1205,14 +1333,20 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
|||||||
uint16_t *linel;
|
uint16_t *linel;
|
||||||
int cbps = s->cbps[compno];
|
int cbps = s->cbps[compno];
|
||||||
int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
|
int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
|
||||||
|
int planar = !!picture->data[2];
|
||||||
|
int pixelsize = planar ? 1 : s->ncomponents;
|
||||||
|
int plane = 0;
|
||||||
|
|
||||||
|
if (planar)
|
||||||
|
plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1);
|
||||||
|
|
||||||
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
|
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
|
||||||
linel = (uint16_t *)picture->data[0] + y * (picture->linesize[0] >> 1);
|
linel = (uint16_t *)picture->data[plane] + y / s->cdy[compno] * (picture->linesize[plane] >> 1);
|
||||||
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
|
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
|
||||||
uint16_t *dst;
|
uint16_t *dst;
|
||||||
|
|
||||||
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
|
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
|
||||||
dst = linel + (x * s->ncomponents + compno);
|
dst = linel + (x / s->cdx[compno] * pixelsize + compno*!planar);
|
||||||
if (codsty->transform == FF_DWT97) {
|
if (codsty->transform == FF_DWT97) {
|
||||||
for (; x < w; x += s-> cdx[compno]) {
|
for (; x < w; x += s-> cdx[compno]) {
|
||||||
int val = lrintf(*datap) + (1 << (cbps - 1));
|
int val = lrintf(*datap) + (1 << (cbps - 1));
|
||||||
@@ -1221,7 +1355,7 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
|||||||
/* align 12 bit values in little-endian mode */
|
/* align 12 bit values in little-endian mode */
|
||||||
*dst = val << (16 - cbps);
|
*dst = val << (16 - cbps);
|
||||||
datap++;
|
datap++;
|
||||||
dst += s->ncomponents;
|
dst += pixelsize;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (; x < w; x += s-> cdx[compno]) {
|
for (; x < w; x += s-> cdx[compno]) {
|
||||||
@@ -1231,10 +1365,10 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
|||||||
/* align 12 bit values in little-endian mode */
|
/* align 12 bit values in little-endian mode */
|
||||||
*dst = val << (16 - cbps);
|
*dst = val << (16 - cbps);
|
||||||
i_datap++;
|
i_datap++;
|
||||||
dst += s->ncomponents;
|
dst += pixelsize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
linel += picture->linesize[0] >> 1;
|
linel += picture->linesize[plane] >> 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1386,6 +1520,104 @@ static int jp2_find_codestream(Jpeg2000DecoderContext *s)
|
|||||||
atom = bytestream2_get_be32u(&s->g);
|
atom = bytestream2_get_be32u(&s->g);
|
||||||
if (atom == JP2_CODESTREAM) {
|
if (atom == JP2_CODESTREAM) {
|
||||||
found_codestream = 1;
|
found_codestream = 1;
|
||||||
|
} else if (atom == JP2_HEADER &&
|
||||||
|
bytestream2_get_bytes_left(&s->g) >= atom_size &&
|
||||||
|
atom_size >= 16) {
|
||||||
|
uint32_t atom2_size, atom2;
|
||||||
|
atom_size -= 8;
|
||||||
|
do {
|
||||||
|
atom2_size = bytestream2_get_be32u(&s->g);
|
||||||
|
atom2 = bytestream2_get_be32u(&s->g);
|
||||||
|
atom_size -= 8;
|
||||||
|
if (atom2_size < 8 || atom2_size - 8 > atom_size)
|
||||||
|
break;
|
||||||
|
atom2_size -= 8;
|
||||||
|
if (atom2 == JP2_CODESTREAM) {
|
||||||
|
return 1;
|
||||||
|
} else if (atom2 == MKBETAG('c','o','l','r') && atom2_size >= 7) {
|
||||||
|
int method = bytestream2_get_byteu(&s->g);
|
||||||
|
bytestream2_skipu(&s->g, 2);
|
||||||
|
atom_size -= 3;
|
||||||
|
atom2_size -= 3;
|
||||||
|
if (method == 1) {
|
||||||
|
s->colour_space = bytestream2_get_be32u(&s->g);
|
||||||
|
atom_size -= 4;
|
||||||
|
atom2_size -= 4;
|
||||||
|
}
|
||||||
|
bytestream2_skipu(&s->g, atom2_size);
|
||||||
|
atom_size -= atom2_size;
|
||||||
|
} else if (atom2 == MKBETAG('p','c','l','r') && atom2_size >= 6) {
|
||||||
|
int i, size, colour_count, colour_channels, colour_depth[3];
|
||||||
|
uint32_t r, g, b;
|
||||||
|
colour_count = bytestream2_get_be16u(&s->g);
|
||||||
|
colour_channels = bytestream2_get_byteu(&s->g);
|
||||||
|
// FIXME: Do not ignore channel_sign
|
||||||
|
colour_depth[0] = (bytestream2_get_byteu(&s->g) & 0x7f) + 1;
|
||||||
|
colour_depth[1] = (bytestream2_get_byteu(&s->g) & 0x7f) + 1;
|
||||||
|
colour_depth[2] = (bytestream2_get_byteu(&s->g) & 0x7f) + 1;
|
||||||
|
atom_size -= 6;
|
||||||
|
atom2_size -= 6;
|
||||||
|
size = (colour_depth[0] + 7 >> 3) * colour_count +
|
||||||
|
(colour_depth[1] + 7 >> 3) * colour_count +
|
||||||
|
(colour_depth[2] + 7 >> 3) * colour_count;
|
||||||
|
if (colour_count > 256 ||
|
||||||
|
colour_channels != 3 ||
|
||||||
|
colour_depth[0] > 16 ||
|
||||||
|
colour_depth[1] > 16 ||
|
||||||
|
colour_depth[2] > 16 ||
|
||||||
|
atom2_size < size) {
|
||||||
|
avpriv_request_sample(s->avctx, "Unknown palette");
|
||||||
|
bytestream2_skipu(&s->g, atom2_size);
|
||||||
|
atom_size -= atom2_size;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
s->pal8 = 1;
|
||||||
|
for (i = 0; i < colour_count; i++) {
|
||||||
|
if (colour_depth[0] <= 8) {
|
||||||
|
r = bytestream2_get_byteu(&s->g) << 8 - colour_depth[0];
|
||||||
|
r |= r >> colour_depth[0];
|
||||||
|
} else {
|
||||||
|
r = bytestream2_get_be16u(&s->g) >> colour_depth[0] - 8;
|
||||||
|
}
|
||||||
|
if (colour_depth[1] <= 8) {
|
||||||
|
g = bytestream2_get_byteu(&s->g) << 8 - colour_depth[1];
|
||||||
|
r |= r >> colour_depth[1];
|
||||||
|
} else {
|
||||||
|
g = bytestream2_get_be16u(&s->g) >> colour_depth[1] - 8;
|
||||||
|
}
|
||||||
|
if (colour_depth[2] <= 8) {
|
||||||
|
b = bytestream2_get_byteu(&s->g) << 8 - colour_depth[2];
|
||||||
|
r |= r >> colour_depth[2];
|
||||||
|
} else {
|
||||||
|
b = bytestream2_get_be16u(&s->g) >> colour_depth[2] - 8;
|
||||||
|
}
|
||||||
|
s->palette[i] = 0xffu << 24 | r << 16 | g << 8 | b;
|
||||||
|
}
|
||||||
|
atom_size -= size;
|
||||||
|
atom2_size -= size;
|
||||||
|
bytestream2_skipu(&s->g, atom2_size);
|
||||||
|
atom_size -= atom2_size;
|
||||||
|
} else if (atom2 == MKBETAG('c','d','e','f') && atom2_size >= 2 &&
|
||||||
|
bytestream2_get_bytes_left(&s->g) >= atom2_size) {
|
||||||
|
int n = bytestream2_get_be16u(&s->g);
|
||||||
|
atom_size -= 2;
|
||||||
|
atom2_size -= 2;
|
||||||
|
for (; n>0; n--) {
|
||||||
|
int cn = bytestream2_get_be16(&s->g);
|
||||||
|
int av_unused typ = bytestream2_get_be16(&s->g);
|
||||||
|
int asoc = bytestream2_get_be16(&s->g);
|
||||||
|
if (cn < 4 && asoc < 4)
|
||||||
|
s->cdef[cn] = asoc;
|
||||||
|
atom_size -= 6;
|
||||||
|
atom2_size -= 6;
|
||||||
|
}
|
||||||
|
bytestream2_skipu(&s->g, atom2_size);
|
||||||
|
} else {
|
||||||
|
bytestream2_skipu(&s->g, atom2_size);
|
||||||
|
atom_size -= atom2_size;
|
||||||
|
}
|
||||||
|
} while (atom_size >= 8);
|
||||||
|
bytestream2_skipu(&s->g, atom_size);
|
||||||
} else {
|
} else {
|
||||||
if (bytestream2_get_bytes_left(&s->g) < atom_size - 8)
|
if (bytestream2_get_bytes_left(&s->g) < atom_size - 8)
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1410,6 +1642,7 @@ static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
bytestream2_init(&s->g, avpkt->data, avpkt->size);
|
bytestream2_init(&s->g, avpkt->data, avpkt->size);
|
||||||
s->curtileno = -1;
|
s->curtileno = -1;
|
||||||
|
memset(s->cdef, -1, sizeof(s->cdef));
|
||||||
|
|
||||||
if (bytestream2_get_bytes_left(&s->g) < 2) {
|
if (bytestream2_get_bytes_left(&s->g) < 2) {
|
||||||
ret = AVERROR_INVALIDDATA;
|
ret = AVERROR_INVALIDDATA;
|
||||||
@@ -1456,6 +1689,9 @@ static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
|
||||||
|
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
|
||||||
|
memcpy(picture->data[1], s->palette, 256 * sizeof(uint32_t));
|
||||||
|
|
||||||
return bytestream2_tell(&s->g);
|
return bytestream2_tell(&s->g);
|
||||||
|
|
||||||
end:
|
end:
|
||||||
@@ -1466,6 +1702,7 @@ end:
|
|||||||
static void jpeg2000_init_static_data(AVCodec *codec)
|
static void jpeg2000_init_static_data(AVCodec *codec)
|
||||||
{
|
{
|
||||||
ff_jpeg2000_init_tier1_luts();
|
ff_jpeg2000_init_tier1_luts();
|
||||||
|
ff_mqc_init_context_tables();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define OFFSET(x) offsetof(Jpeg2000DecoderContext, x)
|
#define OFFSET(x) offsetof(Jpeg2000DecoderContext, x)
|
||||||
|
@@ -148,6 +148,8 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state,
|
|||||||
ret = ret >> 1;
|
ret = ret >> 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(FFABS(ret) > 0xFFFF)
|
||||||
|
return -0x10000;
|
||||||
/* update state */
|
/* update state */
|
||||||
state->A[Q] += FFABS(ret) - RItype;
|
state->A[Q] += FFABS(ret) - RItype;
|
||||||
ret *= state->twonear;
|
ret *= state->twonear;
|
||||||
@@ -215,6 +217,11 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s,
|
|||||||
x += stride;
|
x += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (x >= w) {
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "run overflow\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* decode run termination value */
|
/* decode run termination value */
|
||||||
Rb = R(last, x);
|
Rb = R(last, x);
|
||||||
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
||||||
|
@@ -107,7 +107,7 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, int w, int h)
|
|||||||
val = bytestream2_get_byte(&ctx->g);
|
val = bytestream2_get_byte(&ctx->g);
|
||||||
mx = val & 0xF;
|
mx = val & 0xF;
|
||||||
my = val >> 4;
|
my = val >> 4;
|
||||||
if ((l0x-mx) + 320*(l0y-my) < 0 || (l0x-mx) + 320*(l0y-my) > 316*196) {
|
if ((l0x-mx) + 320*(l0y-my) < 0 || (l0x-mx) + 320*(l0y-my) > 320*197 - 4) {
|
||||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@@ -132,7 +132,7 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, int w, int h)
|
|||||||
val = bytestream2_get_byte(&ctx->g);
|
val = bytestream2_get_byte(&ctx->g);
|
||||||
mx = val & 0xF;
|
mx = val & 0xF;
|
||||||
my = val >> 4;
|
my = val >> 4;
|
||||||
if ((l1x-mx) + 320*(l1y-my) < 0 || (l1x-mx) + 320*(l1y-my) > 318*198) {
|
if ((l1x-mx) + 320*(l1y-my) < 0 || (l1x-mx) + 320*(l1y-my) > 320*199 - 2) {
|
||||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@@ -207,7 +207,7 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, int w, int h)
|
|||||||
val = bytestream2_get_byte(&ctx->g);
|
val = bytestream2_get_byte(&ctx->g);
|
||||||
mx = (val & 0xF) - 8;
|
mx = (val & 0xF) - 8;
|
||||||
my = (val >> 4) - 8;
|
my = (val >> 4) - 8;
|
||||||
if ((l0x+mx) + 320*(l0y+my) < 0 || (l0x+mx) + 320*(l0y+my) > 318*198) {
|
if ((l0x+mx) + 320*(l0y+my) < 0 || (l0x+mx) + 320*(l0y+my) > 320*197 - 4) {
|
||||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@@ -232,7 +232,7 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, int w, int h)
|
|||||||
val = bytestream2_get_byte(&ctx->g);
|
val = bytestream2_get_byte(&ctx->g);
|
||||||
mx = (val & 0xF) - 8;
|
mx = (val & 0xF) - 8;
|
||||||
my = (val >> 4) - 8;
|
my = (val >> 4) - 8;
|
||||||
if ((l1x+mx) + 320*(l1y+my) < 0 || (l1x+mx) + 320*(l1y+my) > 318*198) {
|
if ((l1x+mx) + 320*(l1y+my) < 0 || (l1x+mx) + 320*(l1y+my) > 320*199 - 2) {
|
||||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
@@ -96,8 +96,7 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) frame->data[0],
|
WebRtcIlbcfix_DecodeImpl((int16_t *) frame->data[0], (const uint16_t *) buf, &s->decoder, 1);
|
||||||
(const WebRtc_UWord16*) buf, &s->decoder, 1);
|
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
|
|
||||||
@@ -170,7 +169,7 @@ static int ilbc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
WebRtcIlbcfix_EncodeImpl((WebRtc_UWord16*) avpkt->data, (const WebRtc_Word16*) frame->data[0], &s->encoder);
|
WebRtcIlbcfix_EncodeImpl((uint16_t *) avpkt->data, (const int16_t *) frame->data[0], &s->encoder);
|
||||||
|
|
||||||
avpkt->size = s->encoder.no_of_bytes;
|
avpkt->size = s->encoder.no_of_bytes;
|
||||||
*got_packet_ptr = 1;
|
*got_packet_ptr = 1;
|
||||||
|
@@ -380,7 +380,7 @@ static const AVOption libopus_options[] = {
|
|||||||
{ "voip", "Favor improved speech intelligibility", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_VOIP }, 0, 0, FLAGS, "application" },
|
{ "voip", "Favor improved speech intelligibility", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_VOIP }, 0, 0, FLAGS, "application" },
|
||||||
{ "audio", "Favor faithfulness to the input", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_AUDIO }, 0, 0, FLAGS, "application" },
|
{ "audio", "Favor faithfulness to the input", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_AUDIO }, 0, 0, FLAGS, "application" },
|
||||||
{ "lowdelay", "Restrict to only the lowest delay modes", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_RESTRICTED_LOWDELAY }, 0, 0, FLAGS, "application" },
|
{ "lowdelay", "Restrict to only the lowest delay modes", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_RESTRICTED_LOWDELAY }, 0, 0, FLAGS, "application" },
|
||||||
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 10.0 }, 2.5, 60.0, FLAGS },
|
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 20.0 }, 2.5, 60.0, FLAGS },
|
||||||
{ "packet_loss", "Expected packet loss percentage", OFFSET(packet_loss), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, FLAGS },
|
{ "packet_loss", "Expected packet loss percentage", OFFSET(packet_loss), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, FLAGS },
|
||||||
{ "vbr", "Variable bit rate mode", OFFSET(vbr), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, FLAGS, "vbr" },
|
{ "vbr", "Variable bit rate mode", OFFSET(vbr), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, FLAGS, "vbr" },
|
||||||
{ "off", "Use constant bit rate", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "vbr" },
|
{ "off", "Use constant bit rate", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "vbr" },
|
||||||
|
@@ -113,6 +113,8 @@ static int get_stats(AVCodecContext *avctx, int eos)
|
|||||||
// libtheora generates a summary header at the end
|
// libtheora generates a summary header at the end
|
||||||
memcpy(h->stats, buf, bytes);
|
memcpy(h->stats, buf, bytes);
|
||||||
avctx->stats_out = av_malloc(b64_size);
|
avctx->stats_out = av_malloc(b64_size);
|
||||||
|
if (!avctx->stats_out)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
av_base64_encode(avctx->stats_out, b64_size, h->stats, h->stats_offset);
|
av_base64_encode(avctx->stats_out, b64_size, h->stats, h->stats_offset);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -351,6 +351,7 @@ static int oggvorbis_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
avctx->delay = duration;
|
avctx->delay = duration;
|
||||||
av_assert0(!s->afq.remaining_delay);
|
av_assert0(!s->afq.remaining_delay);
|
||||||
s->afq.frames->duration += duration;
|
s->afq.frames->duration += duration;
|
||||||
|
if (s->afq.frames->pts != AV_NOPTS_VALUE)
|
||||||
s->afq.frames->pts -= duration;
|
s->afq.frames->pts -= duration;
|
||||||
s->afq.remaining_samples += duration;
|
s->afq.remaining_samples += duration;
|
||||||
}
|
}
|
||||||
|
@@ -343,19 +343,6 @@ static av_cold int X264_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
OPT_STR("level", x4->level);
|
OPT_STR("level", x4->level);
|
||||||
|
|
||||||
if(x4->x264opts){
|
|
||||||
const char *p= x4->x264opts;
|
|
||||||
while(p){
|
|
||||||
char param[256]={0}, val[256]={0};
|
|
||||||
if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
|
|
||||||
OPT_STR(param, "1");
|
|
||||||
}else
|
|
||||||
OPT_STR(param, val);
|
|
||||||
p= strchr(p, ':');
|
|
||||||
p+=!!p;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (avctx->i_quant_factor > 0)
|
if (avctx->i_quant_factor > 0)
|
||||||
x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
|
x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
|
||||||
|
|
||||||
@@ -525,6 +512,19 @@ static av_cold int X264_init(AVCodecContext *avctx)
|
|||||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER)
|
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER)
|
||||||
x4->params.b_repeat_headers = 0;
|
x4->params.b_repeat_headers = 0;
|
||||||
|
|
||||||
|
if(x4->x264opts){
|
||||||
|
const char *p= x4->x264opts;
|
||||||
|
while(p){
|
||||||
|
char param[256]={0}, val[256]={0};
|
||||||
|
if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
|
||||||
|
OPT_STR(param, "1");
|
||||||
|
}else
|
||||||
|
OPT_STR(param, val);
|
||||||
|
p= strchr(p, ':');
|
||||||
|
p+=!!p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (x4->x264_params) {
|
if (x4->x264_params) {
|
||||||
AVDictionary *dict = NULL;
|
AVDictionary *dict = NULL;
|
||||||
AVDictionaryEntry *en = NULL;
|
AVDictionaryEntry *en = NULL;
|
||||||
|
@@ -20,7 +20,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "libavutil/common.h"
|
#include "libavutil/common.h"
|
||||||
#include "libavutil/lls.h"
|
#include "libavutil/lls2.h"
|
||||||
|
|
||||||
#define LPC_USE_DOUBLE
|
#define LPC_USE_DOUBLE
|
||||||
#include "lpc.h"
|
#include "lpc.h"
|
||||||
@@ -208,7 +208,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (lpc_type == FF_LPC_TYPE_CHOLESKY) {
|
if (lpc_type == FF_LPC_TYPE_CHOLESKY) {
|
||||||
LLSModel m[2];
|
LLSModel2 m[2];
|
||||||
LOCAL_ALIGNED(32, double, var, [FFALIGN(MAX_LPC_ORDER+1,4)]);
|
LOCAL_ALIGNED(32, double, var, [FFALIGN(MAX_LPC_ORDER+1,4)]);
|
||||||
double av_uninit(weight);
|
double av_uninit(weight);
|
||||||
memset(var, 0, FFALIGN(MAX_LPC_ORDER+1,4)*sizeof(*var));
|
memset(var, 0, FFALIGN(MAX_LPC_ORDER+1,4)*sizeof(*var));
|
||||||
@@ -217,7 +217,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
|
|||||||
m[0].coeff[max_order-1][j] = -lpc[max_order-1][j];
|
m[0].coeff[max_order-1][j] = -lpc[max_order-1][j];
|
||||||
|
|
||||||
for(; pass<lpc_passes; pass++){
|
for(; pass<lpc_passes; pass++){
|
||||||
avpriv_init_lls(&m[pass&1], max_order);
|
avpriv_init_lls2(&m[pass&1], max_order);
|
||||||
|
|
||||||
weight=0;
|
weight=0;
|
||||||
for(i=max_order; i<blocksize; i++){
|
for(i=max_order; i<blocksize; i++){
|
||||||
@@ -238,7 +238,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
|
|||||||
|
|
||||||
m[pass&1].update_lls(&m[pass&1], var);
|
m[pass&1].update_lls(&m[pass&1], var);
|
||||||
}
|
}
|
||||||
avpriv_solve_lls(&m[pass&1], 0.001, 0);
|
avpriv_solve_lls2(&m[pass&1], 0.001, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
for(i=0; i<max_order; i++){
|
for(i=0; i<max_order; i++){
|
||||||
|
@@ -84,6 +84,11 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
|||||||
break;
|
break;
|
||||||
} else if (level != 0) {
|
} else if (level != 0) {
|
||||||
i += run;
|
i += run;
|
||||||
|
if (i > 63) {
|
||||||
|
av_log(a->avctx, AV_LOG_ERROR,
|
||||||
|
"ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
level = (level * qscale * quant_matrix[j]) >> 3;
|
level = (level * qscale * quant_matrix[j]) >> 3;
|
||||||
level = (level ^ SHOW_SBITS(re, &a->gb, 1)) - SHOW_SBITS(re, &a->gb, 1);
|
level = (level ^ SHOW_SBITS(re, &a->gb, 1)) - SHOW_SBITS(re, &a->gb, 1);
|
||||||
@@ -94,6 +99,11 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
|||||||
UPDATE_CACHE(re, &a->gb);
|
UPDATE_CACHE(re, &a->gb);
|
||||||
level = SHOW_SBITS(re, &a->gb, 10); SKIP_BITS(re, &a->gb, 10);
|
level = SHOW_SBITS(re, &a->gb, 10); SKIP_BITS(re, &a->gb, 10);
|
||||||
i += run;
|
i += run;
|
||||||
|
if (i > 63) {
|
||||||
|
av_log(a->avctx, AV_LOG_ERROR,
|
||||||
|
"ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
if (level < 0) {
|
if (level < 0) {
|
||||||
level = -level;
|
level = -level;
|
||||||
@@ -105,10 +115,6 @@ static inline int mdec_decode_block_intra(MDECContext *a, int16_t *block, int n)
|
|||||||
level = (level - 1) | 1;
|
level = (level - 1) | 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (i > 63) {
|
|
||||||
av_log(a->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", a->mb_x, a->mb_y);
|
|
||||||
return AVERROR_INVALIDDATA;
|
|
||||||
}
|
|
||||||
|
|
||||||
block[j] = level;
|
block[j] = level;
|
||||||
}
|
}
|
||||||
|
@@ -89,7 +89,7 @@ static void ff_acelp_interpolatef_mips(float *out, const float *in,
|
|||||||
"addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \n\t"
|
"addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \n\t"
|
||||||
"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
|
"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
|
||||||
|
|
||||||
: [v] "=&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
|
: [v] "+&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
|
||||||
[p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
|
[p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
|
||||||
[in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
|
[in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
|
||||||
[fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
|
[fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
|
||||||
|
@@ -150,7 +150,7 @@ int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
|
|||||||
s->quant_matrixes[index][s->scantable.permutated[8]]) >> 1;
|
s->quant_matrixes[index][s->scantable.permutated[8]]) >> 1;
|
||||||
av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
|
av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
|
||||||
index, s->qscale[index]);
|
index, s->qscale[index]);
|
||||||
len -= 65;
|
len -= 1 + 64 * (1+pr);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -212,7 +212,7 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||||
{
|
{
|
||||||
int len, nb_components, i, width, height, pix_fmt_id;
|
int len, nb_components, i, width, height, bits, pix_fmt_id;
|
||||||
int h_count[MAX_COMPONENTS];
|
int h_count[MAX_COMPONENTS];
|
||||||
int v_count[MAX_COMPONENTS];
|
int v_count[MAX_COMPONENTS];
|
||||||
|
|
||||||
@@ -221,14 +221,14 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
/* XXX: verify len field validity */
|
/* XXX: verify len field validity */
|
||||||
len = get_bits(&s->gb, 16);
|
len = get_bits(&s->gb, 16);
|
||||||
s->bits = get_bits(&s->gb, 8);
|
bits = get_bits(&s->gb, 8);
|
||||||
|
|
||||||
if (s->pegasus_rct)
|
if (s->pegasus_rct)
|
||||||
s->bits = 9;
|
bits = 9;
|
||||||
if (s->bits == 9 && !s->pegasus_rct)
|
if (bits == 9 && !s->pegasus_rct)
|
||||||
s->rct = 1; // FIXME ugly
|
s->rct = 1; // FIXME ugly
|
||||||
|
|
||||||
if (s->bits != 8 && !s->lossless) {
|
if (bits != 8 && !s->lossless) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "only 8 bits/component accepted\n");
|
av_log(s->avctx, AV_LOG_ERROR, "only 8 bits/component accepted\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@@ -259,7 +259,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s->ls && !(s->bits <= 8 || nb_components == 1)) {
|
if (s->ls && !(bits <= 8 || nb_components == 1)) {
|
||||||
avpriv_report_missing_feature(s->avctx,
|
avpriv_report_missing_feature(s->avctx,
|
||||||
"JPEG-LS that is not <= 8 "
|
"JPEG-LS that is not <= 8 "
|
||||||
"bits/component or 16-bit gray");
|
"bits/component or 16-bit gray");
|
||||||
@@ -305,11 +305,13 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
/* if different size, realloc/alloc picture */
|
/* if different size, realloc/alloc picture */
|
||||||
if ( width != s->width || height != s->height
|
if ( width != s->width || height != s->height
|
||||||
|
|| bits != s->bits
|
||||||
|| memcmp(s->h_count, h_count, sizeof(h_count))
|
|| memcmp(s->h_count, h_count, sizeof(h_count))
|
||||||
|| memcmp(s->v_count, v_count, sizeof(v_count))) {
|
|| memcmp(s->v_count, v_count, sizeof(v_count))) {
|
||||||
|
|
||||||
s->width = width;
|
s->width = width;
|
||||||
s->height = height;
|
s->height = height;
|
||||||
|
s->bits = bits;
|
||||||
memcpy(s->h_count, h_count, sizeof(h_count));
|
memcpy(s->h_count, h_count, sizeof(h_count));
|
||||||
memcpy(s->v_count, v_count, sizeof(v_count));
|
memcpy(s->v_count, v_count, sizeof(v_count));
|
||||||
s->interlaced = 0;
|
s->interlaced = 0;
|
||||||
@@ -441,9 +443,12 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
}
|
}
|
||||||
if (s->ls) {
|
if (s->ls) {
|
||||||
s->upscale_h = s->upscale_v = 0;
|
s->upscale_h = s->upscale_v = 0;
|
||||||
if (s->nb_components > 1)
|
if (s->nb_components == 3) {
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
||||||
else if (s->bits <= 8)
|
} else if (s->nb_components != 1) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
} else if (s->bits <= 8)
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
||||||
else
|
else
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
||||||
@@ -780,6 +785,12 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p
|
|||||||
int resync_mb_y = 0;
|
int resync_mb_y = 0;
|
||||||
int resync_mb_x = 0;
|
int resync_mb_x = 0;
|
||||||
|
|
||||||
|
if (s->nb_components != 3 && s->nb_components != 4)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
|
|
||||||
s->restart_count = s->restart_interval;
|
s->restart_count = s->restart_interval;
|
||||||
|
|
||||||
av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size,
|
av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size,
|
||||||
@@ -1078,32 +1089,39 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
|||||||
|
|
||||||
if (s->interlaced && s->bottom_field)
|
if (s->interlaced && s->bottom_field)
|
||||||
block_offset += linesize[c] >> 1;
|
block_offset += linesize[c] >> 1;
|
||||||
|
if ( 8*(h * mb_x + x) < s->width
|
||||||
|
&& 8*(v * mb_y + y) < s->height) {
|
||||||
ptr = data[c] + block_offset;
|
ptr = data[c] + block_offset;
|
||||||
|
} else
|
||||||
|
ptr = NULL;
|
||||||
if (!s->progressive) {
|
if (!s->progressive) {
|
||||||
if (copy_mb)
|
if (copy_mb) {
|
||||||
|
if (ptr)
|
||||||
mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
|
mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
|
||||||
linesize[c], s->avctx->lowres);
|
linesize[c], s->avctx->lowres);
|
||||||
|
|
||||||
else {
|
} else {
|
||||||
s->dsp.clear_block(s->block);
|
s->dsp.clear_block(s->block);
|
||||||
if (decode_block(s, s->block, i,
|
if (decode_block(s, s->block, i,
|
||||||
s->dc_index[i], s->ac_index[i],
|
s->dc_index[i], s->ac_index[i],
|
||||||
s->quant_matrixes[s->quant_index[c]]) < 0) {
|
s->quant_matrixes[s->quant_sindex[i]]) < 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"error y=%d x=%d\n", mb_y, mb_x);
|
"error y=%d x=%d\n", mb_y, mb_x);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
if (ptr) {
|
||||||
s->dsp.idct_put(ptr, linesize[c], s->block);
|
s->dsp.idct_put(ptr, linesize[c], s->block);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
int block_idx = s->block_stride[c] * (v * mb_y + y) +
|
int block_idx = s->block_stride[c] * (v * mb_y + y) +
|
||||||
(h * mb_x + x);
|
(h * mb_x + x);
|
||||||
int16_t *block = s->blocks[c][block_idx];
|
int16_t *block = s->blocks[c][block_idx];
|
||||||
if (Ah)
|
if (Ah)
|
||||||
block[0] += get_bits1(&s->gb) *
|
block[0] += get_bits1(&s->gb) *
|
||||||
s->quant_matrixes[s->quant_index[c]][0] << Al;
|
s->quant_matrixes[s->quant_sindex[i]][0] << Al;
|
||||||
else if (decode_dc_progressive(s, block, i, s->dc_index[i],
|
else if (decode_dc_progressive(s, block, i, s->dc_index[i],
|
||||||
s->quant_matrixes[s->quant_index[c]],
|
s->quant_matrixes[s->quant_sindex[i]],
|
||||||
Al) < 0) {
|
Al) < 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"error y=%d x=%d\n", mb_y, mb_x);
|
"error y=%d x=%d\n", mb_y, mb_x);
|
||||||
@@ -1136,7 +1154,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss,
|
|||||||
uint8_t *data = s->picture.data[c];
|
uint8_t *data = s->picture.data[c];
|
||||||
int linesize = s->linesize[c];
|
int linesize = s->linesize[c];
|
||||||
int last_scan = 0;
|
int last_scan = 0;
|
||||||
int16_t *quant_matrix = s->quant_matrixes[s->quant_index[c]];
|
int16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
|
||||||
|
|
||||||
av_assert0(ss>=0 && Ah>=0 && Al>=0);
|
av_assert0(ss>=0 && Ah>=0 && Al>=0);
|
||||||
if (se < ss || se > 63) {
|
if (se < ss || se > 63) {
|
||||||
@@ -1145,7 +1163,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!Al) {
|
if (!Al) {
|
||||||
s->coefs_finished[c] |= (1LL << (se + 1)) - (1LL << ss);
|
s->coefs_finished[c] |= (2LL << se) - (1LL << ss);
|
||||||
last_scan = !~s->coefs_finished[c];
|
last_scan = !~s->coefs_finished[c];
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1231,6 +1249,11 @@ int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
|
|||||||
&& nb_components == 3 && s->nb_components == 3 && i)
|
&& nb_components == 3 && s->nb_components == 3 && i)
|
||||||
index = 3 - i;
|
index = 3 - i;
|
||||||
|
|
||||||
|
s->quant_sindex[i] = s->quant_index[index];
|
||||||
|
s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
|
||||||
|
s->h_scount[i] = s->h_count[index];
|
||||||
|
s->v_scount[i] = s->v_count[index];
|
||||||
|
|
||||||
if(nb_components == 3 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
|
if(nb_components == 3 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
|
||||||
index = (i+2)%3;
|
index = (i+2)%3;
|
||||||
if(nb_components == 1 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
|
if(nb_components == 1 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
|
||||||
@@ -1238,10 +1261,6 @@ int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
|
|||||||
|
|
||||||
s->comp_index[i] = index;
|
s->comp_index[i] = index;
|
||||||
|
|
||||||
s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
|
|
||||||
s->h_scount[i] = s->h_count[index];
|
|
||||||
s->v_scount[i] = s->v_count[index];
|
|
||||||
|
|
||||||
s->dc_index[i] = get_bits(&s->gb, 4);
|
s->dc_index[i] = get_bits(&s->gb, 4);
|
||||||
s->ac_index[i] = get_bits(&s->gb, 4);
|
s->ac_index[i] = get_bits(&s->gb, 4);
|
||||||
|
|
||||||
@@ -1439,6 +1458,8 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (id == AV_RB32("LJIF")) {
|
if (id == AV_RB32("LJIF")) {
|
||||||
|
int rgb = s->rgb;
|
||||||
|
int pegasus_rct = s->pegasus_rct;
|
||||||
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||||
av_log(s->avctx, AV_LOG_INFO,
|
av_log(s->avctx, AV_LOG_INFO,
|
||||||
"Pegasus lossless jpeg header found\n");
|
"Pegasus lossless jpeg header found\n");
|
||||||
@@ -1448,17 +1469,27 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
|||||||
skip_bits(&s->gb, 16); /* unknown always 0? */
|
skip_bits(&s->gb, 16); /* unknown always 0? */
|
||||||
switch (get_bits(&s->gb, 8)) {
|
switch (get_bits(&s->gb, 8)) {
|
||||||
case 1:
|
case 1:
|
||||||
s->rgb = 1;
|
rgb = 1;
|
||||||
s->pegasus_rct = 0;
|
pegasus_rct = 0;
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
s->rgb = 1;
|
rgb = 1;
|
||||||
s->pegasus_rct = 1;
|
pegasus_rct = 1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace\n");
|
av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
len -= 9;
|
len -= 9;
|
||||||
|
if (s->got_picture)
|
||||||
|
if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
|
||||||
|
av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
s->rgb = rgb;
|
||||||
|
s->pegasus_rct = pegasus_rct;
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1625,6 +1656,10 @@ int ff_mjpeg_find_marker(MJpegDecodeContext *s,
|
|||||||
put_bits(&pb, 8, x);
|
put_bits(&pb, 8, x);
|
||||||
if (x == 0xFF) {
|
if (x == 0xFF) {
|
||||||
x = src[b++];
|
x = src[b++];
|
||||||
|
if (x & 0x80) {
|
||||||
|
av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
|
||||||
|
x &= 0x7f;
|
||||||
|
}
|
||||||
put_bits(&pb, 7, x);
|
put_bits(&pb, 7, x);
|
||||||
bit_count--;
|
bit_count--;
|
||||||
}
|
}
|
||||||
|
@@ -84,6 +84,7 @@ typedef struct MJpegDecodeContext {
|
|||||||
int nb_blocks[MAX_COMPONENTS];
|
int nb_blocks[MAX_COMPONENTS];
|
||||||
int h_scount[MAX_COMPONENTS];
|
int h_scount[MAX_COMPONENTS];
|
||||||
int v_scount[MAX_COMPONENTS];
|
int v_scount[MAX_COMPONENTS];
|
||||||
|
int quant_sindex[MAX_COMPONENTS];
|
||||||
int h_max, v_max; /* maximum h and v counts */
|
int h_max, v_max; /* maximum h and v counts */
|
||||||
int quant_index[4]; /* quant table index for each component */
|
int quant_index[4]; /* quant table index for each component */
|
||||||
int last_dc[MAX_COMPONENTS]; /* last DEQUANTIZED dc (XXX: am I right to do that ?) */
|
int last_dc[MAX_COMPONENTS]; /* last DEQUANTIZED dc (XXX: am I right to do that ?) */
|
||||||
|
@@ -454,7 +454,7 @@ static void encode_block(MpegEncContext *s, int16_t *block, int n)
|
|||||||
put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);
|
put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[6][64])
|
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
if (s->chroma_format == CHROMA_444) {
|
if (s->chroma_format == CHROMA_444) {
|
||||||
|
@@ -56,6 +56,6 @@ void ff_mjpeg_encode_picture_trailer(MpegEncContext *s);
|
|||||||
void ff_mjpeg_encode_stuffing(MpegEncContext *s);
|
void ff_mjpeg_encode_stuffing(MpegEncContext *s);
|
||||||
void ff_mjpeg_encode_dc(MpegEncContext *s, int val,
|
void ff_mjpeg_encode_dc(MpegEncContext *s, int val,
|
||||||
uint8_t *huff_size, uint16_t *huff_code);
|
uint8_t *huff_size, uint16_t *huff_code);
|
||||||
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[6][64]);
|
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64]);
|
||||||
|
|
||||||
#endif /* AVCODEC_MJPEGENC_H */
|
#endif /* AVCODEC_MJPEGENC_H */
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user