Compare commits
346 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f406bf3fa9 | ||
![]() |
54bec22a6e | ||
![]() |
e1b2c93a23 | ||
![]() |
e529ff52a0 | ||
![]() |
fb7e76d1cf | ||
![]() |
96047b3150 | ||
![]() |
2545defeac | ||
![]() |
2d97ad38ed | ||
![]() |
f6c628f029 | ||
![]() |
3f743e3e4c | ||
![]() |
f2dbd64bde | ||
![]() |
7bce659e18 | ||
![]() |
cb9379065f | ||
![]() |
4261778dbd | ||
![]() |
ba88a6e4e4 | ||
![]() |
e0407a7bf7 | ||
![]() |
d575984dfc | ||
![]() |
9ea1e82d68 | ||
![]() |
5cd2cdf33f | ||
![]() |
a5a6f6fec3 | ||
![]() |
88544e8ceb | ||
![]() |
f183eaa3ad | ||
![]() |
d773d7775a | ||
![]() |
f15f4cefd7 | ||
![]() |
0ec75a04e5 | ||
![]() |
34fb994d93 | ||
![]() |
acafd1814e | ||
![]() |
bb01956d67 | ||
![]() |
bc0c49b83e | ||
![]() |
0008a87cb1 | ||
![]() |
2aa6592338 | ||
![]() |
2b14d98086 | ||
![]() |
a05f86ec10 | ||
![]() |
9bdb254b98 | ||
![]() |
358d1f6e01 | ||
![]() |
ea28034f5d | ||
![]() |
6452b31599 | ||
![]() |
a14969253a | ||
![]() |
fab9a7be76 | ||
![]() |
3a67865963 | ||
![]() |
3fb754712c | ||
![]() |
e5294f407a | ||
![]() |
ed1ad2f5eb | ||
![]() |
e780c3daaf | ||
![]() |
7f954ca502 | ||
![]() |
8e9e57ed0c | ||
![]() |
1bd6372cd3 | ||
![]() |
40ffa99dfa | ||
![]() |
ff79f6b35a | ||
![]() |
e2a83d72da | ||
![]() |
6c3985713b | ||
![]() |
7d97cc8d87 | ||
![]() |
a56a9e65c6 | ||
![]() |
428b629eb2 | ||
![]() |
194d12345d | ||
![]() |
b3d8276d2d | ||
![]() |
c2eb668617 | ||
![]() |
c9c223ba00 | ||
![]() |
9d0ff6436e | ||
![]() |
02b7b125b5 | ||
![]() |
5643668308 | ||
![]() |
2d18e7f3ef | ||
![]() |
f1b5830182 | ||
![]() |
c588316555 | ||
![]() |
afd1f61944 | ||
![]() |
16a9c5ea9e | ||
![]() |
83fb31a76d | ||
![]() |
fc5b32877a | ||
![]() |
10e023c4fa | ||
![]() |
27a3a59428 | ||
![]() |
8ab849cddc | ||
![]() |
5191b00155 | ||
![]() |
1131e7a1a4 | ||
![]() |
26becbcd2a | ||
![]() |
366cdd3548 | ||
![]() |
e3b08b3ad4 | ||
![]() |
fa16440659 | ||
![]() |
c36fd16aaa | ||
![]() |
8c5897632a | ||
![]() |
b34fce9c54 | ||
![]() |
742f9aa879 | ||
![]() |
79041d92ee | ||
![]() |
82cebc0e05 | ||
![]() |
09abca6802 | ||
![]() |
43d64829e6 | ||
![]() |
4a479fd3e6 | ||
![]() |
4f41717d01 | ||
![]() |
6896dcbf5f | ||
![]() |
14404170b9 | ||
![]() |
e9e42beed2 | ||
![]() |
abd6decd55 | ||
![]() |
0385c824f1 | ||
![]() |
c4e764aa69 | ||
![]() |
9d02e38d3f | ||
![]() |
30cf47c6f0 | ||
![]() |
b45cd17d29 | ||
![]() |
26b6d70c72 | ||
![]() |
32919db4fb | ||
![]() |
56f44c26f0 | ||
![]() |
fe87a40de6 | ||
![]() |
0f6e309b97 | ||
![]() |
96e13c9897 | ||
![]() |
e72c0a0466 | ||
![]() |
dfddefa13a | ||
![]() |
ce94955b3c | ||
![]() |
dde95268cc | ||
![]() |
d20ac551a8 | ||
![]() |
352b0969e2 | ||
![]() |
b479b42b26 | ||
![]() |
36cab9c408 | ||
![]() |
34592d04fb | ||
![]() |
544accc895 | ||
![]() |
f41622ecb4 | ||
![]() |
fc8eb4c1f9 | ||
![]() |
02bae9f013 | ||
![]() |
5cb2a1c3f0 | ||
![]() |
a221c9bd76 | ||
![]() |
b2583c2b62 | ||
![]() |
bd553941ae | ||
![]() |
e0aa76d38a | ||
![]() |
a014b9614e | ||
![]() |
31c21d2f69 | ||
![]() |
3cd1c8653b | ||
![]() |
194485cfba | ||
![]() |
81cfe39113 | ||
![]() |
ef0c503d37 | ||
![]() |
1103aec1df | ||
![]() |
b40ab81d1f | ||
![]() |
314f055c29 | ||
![]() |
2c566744c4 | ||
![]() |
adad1ba5d8 | ||
![]() |
a80a7131d1 | ||
![]() |
3ab63abbd4 | ||
![]() |
d2c76782e0 | ||
![]() |
4dc8b4d7d0 | ||
![]() |
9ff0467566 | ||
![]() |
4407b38b28 | ||
![]() |
8caaf260a6 | ||
![]() |
e1f51bbd1f | ||
![]() |
4b7c149306 | ||
![]() |
e8919d6522 | ||
![]() |
b017785fa5 | ||
![]() |
01507eb1f8 | ||
![]() |
938ff93710 | ||
![]() |
0c88d539f8 | ||
![]() |
e39a992bd1 | ||
![]() |
72a12f61ef | ||
![]() |
30ae080e9d | ||
![]() |
dca463b728 | ||
![]() |
25b462cab9 | ||
![]() |
7c6a8afa7e | ||
![]() |
b052525f9b | ||
![]() |
90c7bfb9be | ||
![]() |
7bdd348e58 | ||
![]() |
af3d003658 | ||
![]() |
c00beff5e0 | ||
![]() |
57a43142ba | ||
![]() |
99905118a8 | ||
![]() |
dcf560204c | ||
![]() |
5b0e2eb041 | ||
![]() |
d461e077a5 | ||
![]() |
9a884b7b97 | ||
![]() |
9abe0bfb7f | ||
![]() |
13682b48e9 | ||
![]() |
f25e6e0c25 | ||
![]() |
80239a8bb1 | ||
![]() |
26bbc1c242 | ||
![]() |
efe259a27e | ||
![]() |
49f11e12d5 | ||
![]() |
d130fae519 | ||
![]() |
dde996bf99 | ||
![]() |
cad2958fd7 | ||
![]() |
29d61d73b1 | ||
![]() |
9a6a710998 | ||
![]() |
daaef403d1 | ||
![]() |
207f5a138a | ||
![]() |
e9c8a9aaa6 | ||
![]() |
7b7d8b8794 | ||
![]() |
90d6b563fe | ||
![]() |
dce2f820e9 | ||
![]() |
4aab3f868f | ||
![]() |
48609236da | ||
![]() |
d8fe695779 | ||
![]() |
0f42e06651 | ||
![]() |
230c4c6ad9 | ||
![]() |
f4489c9558 | ||
![]() |
0e5d9fe2a7 | ||
![]() |
9ae2aaea50 | ||
![]() |
2513314912 | ||
![]() |
e727cbf0be | ||
![]() |
bcc25353cf | ||
![]() |
6a10263f16 | ||
![]() |
bcc6429c01 | ||
![]() |
bdb219435e | ||
![]() |
a7338ae8ac | ||
![]() |
6776c2c04f | ||
![]() |
facd3dbc6e | ||
![]() |
8796c3b7d3 | ||
![]() |
7430f3064f | ||
![]() |
eac281b06c | ||
![]() |
b5210f4eae | ||
![]() |
10379d50be | ||
![]() |
cd874cf8e6 | ||
![]() |
82a3e469c6 | ||
![]() |
f859fed03d | ||
![]() |
991e6fa35b | ||
![]() |
09dca51066 | ||
![]() |
40de74d0eb | ||
![]() |
e2811c2ede | ||
![]() |
25d14b716a | ||
![]() |
f1de93dec3 | ||
![]() |
738d68de85 | ||
![]() |
00ecce5c8b | ||
![]() |
6ba07e9948 | ||
![]() |
125bea15d1 | ||
![]() |
70e3cc282b | ||
![]() |
242df26b44 | ||
![]() |
46c2dba20e | ||
![]() |
3caa6a5a57 | ||
![]() |
bf08665e2e | ||
![]() |
c4f5f4dbd3 | ||
![]() |
29df24252a | ||
![]() |
b920c1d5ad | ||
![]() |
2b9ee7d5b9 | ||
![]() |
f800cacada | ||
![]() |
5227eac5b0 | ||
![]() |
bb40f8f5e2 | ||
![]() |
ad8bf22086 | ||
![]() |
7f8804296d | ||
![]() |
f67e75b5dc | ||
![]() |
35e63f35b0 | ||
![]() |
3bfb7a2537 | ||
![]() |
4a1e7a6fb7 | ||
![]() |
ff1e982205 | ||
![]() |
bb116e6ba3 | ||
![]() |
ebe356bf1c | ||
![]() |
30099413ec | ||
![]() |
186e0ff067 | ||
![]() |
2642ad9f55 | ||
![]() |
95ddd2227b | ||
![]() |
3faebed6fa | ||
![]() |
3aee1fa5b6 | ||
![]() |
89a9c84ebb | ||
![]() |
0adde39e04 | ||
![]() |
03ae616b19 | ||
![]() |
830c3058ff | ||
![]() |
b12c5cbbb2 | ||
![]() |
82c96b5ad8 | ||
![]() |
3e4b957847 | ||
![]() |
cbabbe8220 | ||
![]() |
80122a3af3 | ||
![]() |
a475755b3a | ||
![]() |
66030e8133 | ||
![]() |
46f8d838b3 | ||
![]() |
bc3648d4b4 | ||
![]() |
27e6b4a3ff | ||
![]() |
b82860caa7 | ||
![]() |
3d05625136 | ||
![]() |
ddd3301bad | ||
![]() |
123981930f | ||
![]() |
3171e2360a | ||
![]() |
3533a850e7 | ||
![]() |
6d56bc9a6d | ||
![]() |
2c5e1d0933 | ||
![]() |
b37b83214a | ||
![]() |
6d7ab09788 | ||
![]() |
227cfc1f10 | ||
![]() |
416847d195 | ||
![]() |
bd4ad1a1d5 | ||
![]() |
6230de03aa | ||
![]() |
45acc228a6 | ||
![]() |
d37fac6dbb | ||
![]() |
7940306a47 | ||
![]() |
eabefe83f4 | ||
![]() |
eaa79b79b2 | ||
![]() |
c761379825 | ||
![]() |
ea3309eba7 | ||
![]() |
1c1e252cd1 | ||
![]() |
ca2c9d6b9b | ||
![]() |
fa6b99d351 | ||
![]() |
d79cb6947e | ||
![]() |
5aa4b29bbe | ||
![]() |
e4cbd0d6e5 | ||
![]() |
0ede7b5344 | ||
![]() |
5b933be089 | ||
![]() |
f2693e98b4 | ||
![]() |
c3861e14ce | ||
![]() |
daa5a988e2 | ||
![]() |
db67b7c31b | ||
![]() |
a643a47d41 | ||
![]() |
23af29e882 | ||
![]() |
7d995cd1b8 | ||
![]() |
72a58c0772 | ||
![]() |
d525423006 | ||
![]() |
4b476e6aa4 | ||
![]() |
124c78fd44 | ||
![]() |
a1ab3300c8 | ||
![]() |
1af235f6b3 | ||
![]() |
82031e41f8 | ||
![]() |
222e7549a7 | ||
![]() |
eb2244ece9 | ||
![]() |
b967c10029 | ||
![]() |
7ff4cd2acc | ||
![]() |
c4149c4d54 | ||
![]() |
8ad2f45964 | ||
![]() |
5df52b0131 | ||
![]() |
596d3e20ae | ||
![]() |
00d5ff6431 | ||
![]() |
437179e9c8 | ||
![]() |
031d3b66c2 | ||
![]() |
b76871d870 | ||
![]() |
15ae305007 | ||
![]() |
3c72204ae0 | ||
![]() |
ba21499648 | ||
![]() |
7933039ade | ||
![]() |
4015829acc | ||
![]() |
39dc4a6bb3 | ||
![]() |
a6a2d8eb8f | ||
![]() |
58556826a8 | ||
![]() |
bc2c9a479a | ||
![]() |
9cc22be032 | ||
![]() |
33e1bca651 | ||
![]() |
9841617b7f | ||
![]() |
2897481f64 | ||
![]() |
646c564de5 | ||
![]() |
cd6281abef | ||
![]() |
697be8173b | ||
![]() |
1853d8bb7a | ||
![]() |
1779cd7695 | ||
![]() |
bb4820727f | ||
![]() |
affc7687d3 | ||
![]() |
3569470693 | ||
![]() |
1d1df82093 | ||
![]() |
de187e3e9e | ||
![]() |
7754d48381 | ||
![]() |
63169474b3 | ||
![]() |
b3f106cb1f | ||
![]() |
9b6ccf0f24 | ||
![]() |
298d66c8de | ||
![]() |
4be1b68d52 | ||
![]() |
92edc13d69 | ||
![]() |
c9f015f1c6 | ||
![]() |
db6b2ca0b3 | ||
![]() |
3503ec8461 | ||
![]() |
ecc5e42d92 | ||
![]() |
f87ce262f6 |
@@ -1,9 +1,6 @@
|
|||||||
Entries are sorted chronologically from oldest to youngest within each release,
|
Entries are sorted chronologically from oldest to youngest within each release,
|
||||||
releases are sorted from youngest to oldest.
|
releases are sorted from youngest to oldest.
|
||||||
|
|
||||||
version <next>
|
|
||||||
|
|
||||||
|
|
||||||
version 2.2:
|
version 2.2:
|
||||||
|
|
||||||
- HNM version 4 demuxer and video decoder
|
- HNM version 4 demuxer and video decoder
|
||||||
@@ -31,6 +28,7 @@ version 2.2:
|
|||||||
- Support DNx444
|
- Support DNx444
|
||||||
- libx265 encoder
|
- libx265 encoder
|
||||||
- dejudder filter
|
- dejudder filter
|
||||||
|
- Autodetect VDA like all other hardware accelerations
|
||||||
|
|
||||||
|
|
||||||
version 2.1:
|
version 2.1:
|
||||||
|
@@ -13,7 +13,8 @@
|
|||||||
//
|
//
|
||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with this program; if not, write to the Free Software
|
// along with this program; if not, write to the Free Software
|
||||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||||
|
// MA 02110-1301 USA, or visit
|
||||||
// http://www.gnu.org/copyleft/gpl.html .
|
// http://www.gnu.org/copyleft/gpl.html .
|
||||||
//
|
//
|
||||||
// As a special exception, I give you permission to link to the
|
// As a special exception, I give you permission to link to the
|
||||||
|
@@ -13,7 +13,8 @@
|
|||||||
//
|
//
|
||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with this program; if not, write to the Free Software
|
// along with this program; if not, write to the Free Software
|
||||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||||
|
// MA 02110-1301 USA, or visit
|
||||||
// http://www.gnu.org/copyleft/gpl.html .
|
// http://www.gnu.org/copyleft/gpl.html .
|
||||||
//
|
//
|
||||||
// As a special exception, I give you permission to link to the
|
// As a special exception, I give you permission to link to the
|
||||||
|
13
configure
vendored
13
configure
vendored
@@ -149,7 +149,7 @@ Component options:
|
|||||||
Hardware accelerators:
|
Hardware accelerators:
|
||||||
--disable-dxva2 disable DXVA2 code [autodetect]
|
--disable-dxva2 disable DXVA2 code [autodetect]
|
||||||
--disable-vaapi disable VAAPI code [autodetect]
|
--disable-vaapi disable VAAPI code [autodetect]
|
||||||
--enable-vda enable VDA code
|
--disable-vda disable VDA code [autodetect]
|
||||||
--disable-vdpau disable VDPAU code [autodetect]
|
--disable-vdpau disable VDPAU code [autodetect]
|
||||||
|
|
||||||
Individual component options:
|
Individual component options:
|
||||||
@@ -2504,7 +2504,7 @@ enable static
|
|||||||
enable swscale_alpha
|
enable swscale_alpha
|
||||||
|
|
||||||
# Enable hwaccels by default.
|
# Enable hwaccels by default.
|
||||||
enable dxva2 vaapi vdpau xvmc
|
enable dxva2 vaapi vda vdpau xvmc
|
||||||
|
|
||||||
# build settings
|
# build settings
|
||||||
SHFLAGS='-shared -Wl,-soname,$$(@F)'
|
SHFLAGS='-shared -Wl,-soname,$$(@F)'
|
||||||
@@ -3324,6 +3324,9 @@ if test -n "$sysroot"; then
|
|||||||
gcc|llvm_gcc|clang)
|
gcc|llvm_gcc|clang)
|
||||||
add_cppflags --sysroot="$sysroot"
|
add_cppflags --sysroot="$sysroot"
|
||||||
add_ldflags --sysroot="$sysroot"
|
add_ldflags --sysroot="$sysroot"
|
||||||
|
# On Darwin --sysroot may be ignored, -isysroot always affects headers and linking
|
||||||
|
add_cppflags -isysroot "$sysroot"
|
||||||
|
add_ldflags -isysroot "$sysroot"
|
||||||
;;
|
;;
|
||||||
tms470)
|
tms470)
|
||||||
add_cppflags -I"$sysinclude"
|
add_cppflags -I"$sysinclude"
|
||||||
@@ -3383,7 +3386,7 @@ case "$arch" in
|
|||||||
tilegx|tile-gx)
|
tilegx|tile-gx)
|
||||||
arch="tilegx"
|
arch="tilegx"
|
||||||
;;
|
;;
|
||||||
i[3-6]86|i86pc|BePC|x86pc|x86_64|x86_32|amd64)
|
i[3-6]86*|i86pc|BePC|x86pc|x86_64|x86_32|amd64)
|
||||||
arch="x86"
|
arch="x86"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -3661,6 +3664,10 @@ case "$arch" in
|
|||||||
check_64bit ppc ppc64 'sizeof(void *) > 4'
|
check_64bit ppc ppc64 'sizeof(void *) > 4'
|
||||||
spic=$shared
|
spic=$shared
|
||||||
;;
|
;;
|
||||||
|
s390)
|
||||||
|
check_64bit s390 s390x 'sizeof(void *) > 4'
|
||||||
|
spic=$shared
|
||||||
|
;;
|
||||||
sparc)
|
sparc)
|
||||||
check_64bit sparc sparc64 'sizeof(void *) > 4'
|
check_64bit sparc sparc64 'sizeof(void *) > 4'
|
||||||
spic=$shared
|
spic=$shared
|
||||||
|
@@ -15,6 +15,9 @@ libavutil: 2012-10-22
|
|||||||
|
|
||||||
API changes, most recent first:
|
API changes, most recent first:
|
||||||
|
|
||||||
|
2014-03-18 - e9c8a9a - lsws 2.5.102
|
||||||
|
Make gray16 full-scale.
|
||||||
|
|
||||||
2014-xx-xx - xxxxxxx - lavu 53.05.0 - frame.h
|
2014-xx-xx - xxxxxxx - lavu 53.05.0 - frame.h
|
||||||
Add av_frame_copy() for copying the frame data.
|
Add av_frame_copy() for copying the frame data.
|
||||||
|
|
||||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# This could be handy for archiving the generated documentation or
|
# This could be handy for archiving the generated documentation or
|
||||||
# if some version control system is used.
|
# if some version control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER =
|
PROJECT_NUMBER = 2.2.3
|
||||||
|
|
||||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||||
# in the documentation. The maximum height of the logo should not exceed 55
|
# in the documentation. The maximum height of the logo should not exceed 55
|
||||||
|
@@ -62,7 +62,7 @@ AC-3 audio decoder.
|
|||||||
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
|
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
|
||||||
the undocumented RealAudio 3 (a.k.a. dnet).
|
the undocumented RealAudio 3 (a.k.a. dnet).
|
||||||
|
|
||||||
@subsubsection AC-3 Decoder Options
|
@subsection AC-3 Decoder Options
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
|
|
||||||
|
@@ -296,7 +296,7 @@ teletext packet PTS and DTS values untouched.
|
|||||||
|
|
||||||
Raw video demuxer.
|
Raw video demuxer.
|
||||||
|
|
||||||
This demuxer allows to read raw video data. Since there is no header
|
This demuxer allows one to read raw video data. Since there is no header
|
||||||
specifying the assumed video parameters, the user must specify them
|
specifying the assumed video parameters, the user must specify them
|
||||||
in order to be able to decode the data correctly.
|
in order to be able to decode the data correctly.
|
||||||
|
|
||||||
|
@@ -807,7 +807,7 @@ while producing the worst quality.
|
|||||||
|
|
||||||
@item reservoir
|
@item reservoir
|
||||||
Enable use of bit reservoir when set to 1. Default value is 1. LAME
|
Enable use of bit reservoir when set to 1. Default value is 1. LAME
|
||||||
has this enabled by default, but can be overriden by use
|
has this enabled by default, but can be overridden by use
|
||||||
@option{--nores} option.
|
@option{--nores} option.
|
||||||
|
|
||||||
@item joint_stereo (@emph{-m j})
|
@item joint_stereo (@emph{-m j})
|
||||||
@@ -1271,7 +1271,7 @@ Requires the presence of the libtheora headers and library during
|
|||||||
configuration. You need to explicitly configure the build with
|
configuration. You need to explicitly configure the build with
|
||||||
@code{--enable-libtheora}.
|
@code{--enable-libtheora}.
|
||||||
|
|
||||||
For more informations about the libtheora project see
|
For more information about the libtheora project see
|
||||||
@url{http://www.theora.org/}.
|
@url{http://www.theora.org/}.
|
||||||
|
|
||||||
@subsection Options
|
@subsection Options
|
||||||
@@ -1525,7 +1525,7 @@ for detail retention (adaptive quantization, psy-RD, psy-trellis).
|
|||||||
Many libx264 encoder options are mapped to FFmpeg global codec
|
Many libx264 encoder options are mapped to FFmpeg global codec
|
||||||
options, while unique encoder options are provided through private
|
options, while unique encoder options are provided through private
|
||||||
options. Additionally the @option{x264opts} and @option{x264-params}
|
options. Additionally the @option{x264opts} and @option{x264-params}
|
||||||
private options allows to pass a list of key=value tuples as accepted
|
private options allows one to pass a list of key=value tuples as accepted
|
||||||
by the libx264 @code{x264_param_parse} function.
|
by the libx264 @code{x264_param_parse} function.
|
||||||
|
|
||||||
The x264 project website is at
|
The x264 project website is at
|
||||||
@@ -1853,7 +1853,7 @@ Override the x264 configuration using a :-separated list of key=value
|
|||||||
parameters.
|
parameters.
|
||||||
|
|
||||||
This option is functionally the same as the @option{x264opts}, but is
|
This option is functionally the same as the @option{x264opts}, but is
|
||||||
duplicated for compability with the Libav fork.
|
duplicated for compatibility with the Libav fork.
|
||||||
|
|
||||||
For example to specify libx264 encoding options with @command{ffmpeg}:
|
For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||||
@example
|
@example
|
||||||
@@ -2047,7 +2047,7 @@ Set physical density of pixels, in dots per meter, unset by default
|
|||||||
Apple ProRes encoder.
|
Apple ProRes encoder.
|
||||||
|
|
||||||
FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder.
|
FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder.
|
||||||
The used encoder can be choosen with the @code{-vcodec} option.
|
The used encoder can be chosen with the @code{-vcodec} option.
|
||||||
|
|
||||||
@subsection Private Options for prores-ks
|
@subsection Private Options for prores-ks
|
||||||
|
|
||||||
|
@@ -119,8 +119,10 @@ int main(int argc, char *argv[])
|
|||||||
end:
|
end:
|
||||||
avformat_close_input(&fmt_ctx);
|
avformat_close_input(&fmt_ctx);
|
||||||
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
||||||
av_freep(&avio_ctx->buffer);
|
if (avio_ctx) {
|
||||||
av_freep(&avio_ctx);
|
av_freep(&avio_ctx->buffer);
|
||||||
|
av_freep(&avio_ctx);
|
||||||
|
}
|
||||||
av_file_unmap(buffer, buffer_size);
|
av_file_unmap(buffer, buffer_size);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
The FFmpeg resampler provides a high-level interface to the
|
The FFmpeg resampler provides a high-level interface to the
|
||||||
libswresample library audio resampling utilities. In particular it
|
libswresample library audio resampling utilities. In particular it
|
||||||
allows to perform audio resampling, audio channel layout rematrixing,
|
allows one to perform audio resampling, audio channel layout rematrixing,
|
||||||
and convert audio format and packing layout.
|
and convert audio format and packing layout.
|
||||||
|
|
||||||
@c man end DESCRIPTION
|
@c man end DESCRIPTION
|
||||||
|
@@ -13,7 +13,7 @@
|
|||||||
@c man begin DESCRIPTION
|
@c man begin DESCRIPTION
|
||||||
|
|
||||||
The FFmpeg rescaler provides a high-level interface to the libswscale
|
The FFmpeg rescaler provides a high-level interface to the libswscale
|
||||||
library image conversion utilities. In particular it allows to perform
|
library image conversion utilities. In particular it allows one to perform
|
||||||
image rescaling and pixel format conversion.
|
image rescaling and pixel format conversion.
|
||||||
|
|
||||||
@c man end DESCRIPTION
|
@c man end DESCRIPTION
|
||||||
|
@@ -111,7 +111,7 @@ must be configured in the stream configuration. They are sent to
|
|||||||
the @command{ffmpeg} encoders.
|
the @command{ffmpeg} encoders.
|
||||||
|
|
||||||
The @command{ffmpeg} @option{override_ffserver} commandline option
|
The @command{ffmpeg} @option{override_ffserver} commandline option
|
||||||
allows to override the encoding parameters set by the server.
|
allows one to override the encoding parameters set by the server.
|
||||||
|
|
||||||
Multiple streams can be connected to the same feed.
|
Multiple streams can be connected to the same feed.
|
||||||
|
|
||||||
|
@@ -172,7 +172,7 @@ terminated when the next special character (belonging to the set
|
|||||||
|
|
||||||
The name and arguments of the filter are optionally preceded and
|
The name and arguments of the filter are optionally preceded and
|
||||||
followed by a list of link labels.
|
followed by a list of link labels.
|
||||||
A link label allows to name a link and associate it to a filter output
|
A link label allows one to name a link and associate it to a filter output
|
||||||
or input pad. The preceding labels @var{in_link_1}
|
or input pad. The preceding labels @var{in_link_1}
|
||||||
... @var{in_link_N}, are associated to the filter input pads,
|
... @var{in_link_N}, are associated to the filter input pads,
|
||||||
the following labels @var{out_link_1} ... @var{out_link_M}, are
|
the following labels @var{out_link_1} ... @var{out_link_M}, are
|
||||||
@@ -3921,7 +3921,7 @@ The high threshold selects the "strong" edge pixels, which are then
|
|||||||
connected through 8-connectivity with the "weak" edge pixels selected
|
connected through 8-connectivity with the "weak" edge pixels selected
|
||||||
by the low threshold.
|
by the low threshold.
|
||||||
|
|
||||||
@var{low} and @var{high} threshold values must be choosen in the range
|
@var{low} and @var{high} threshold values must be chosen in the range
|
||||||
[0,1], and @var{low} should be lesser or equal to @var{high}.
|
[0,1], and @var{low} should be lesser or equal to @var{high}.
|
||||||
|
|
||||||
Default value for @var{low} is @code{20/255}, and default value for @var{high}
|
Default value for @var{low} is @code{20/255}, and default value for @var{high}
|
||||||
@@ -5238,7 +5238,7 @@ Set progressive threshold.
|
|||||||
|
|
||||||
Deinterleave or interleave fields.
|
Deinterleave or interleave fields.
|
||||||
|
|
||||||
This filter allows to process interlaced images fields without
|
This filter allows one to process interlaced images fields without
|
||||||
deinterlacing them. Deinterleaving splits the input frame into 2
|
deinterlacing them. Deinterleaving splits the input frame into 2
|
||||||
fields (so called half pictures). Odd lines are moved to the top
|
fields (so called half pictures). Odd lines are moved to the top
|
||||||
half of the output image, even lines to the bottom half.
|
half of the output image, even lines to the bottom half.
|
||||||
@@ -6860,7 +6860,7 @@ rotate=A*sin(2*PI/T*t)
|
|||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
@item
|
||||||
Rotate the video, output size is choosen so that the whole rotating
|
Rotate the video, output size is chosen so that the whole rotating
|
||||||
input video is always completely contained in the output:
|
input video is always completely contained in the output:
|
||||||
@example
|
@example
|
||||||
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
|
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
|
||||||
@@ -6983,7 +6983,7 @@ Default value is @samp{0}.
|
|||||||
@item flags
|
@item flags
|
||||||
Set libswscale scaling flags. See
|
Set libswscale scaling flags. See
|
||||||
@ref{sws_flags,,the ffmpeg-scaler manual,ffmpeg-scaler} for the
|
@ref{sws_flags,,the ffmpeg-scaler manual,ffmpeg-scaler} for the
|
||||||
complete list of values. If not explictly specified the filter applies
|
complete list of values. If not explicitly specified the filter applies
|
||||||
the default flags.
|
the default flags.
|
||||||
|
|
||||||
@item size, s
|
@item size, s
|
||||||
@@ -7797,7 +7797,7 @@ Produce 8x8 PNG tiles of all keyframes (@option{-skip_frame nokey}) in a movie:
|
|||||||
ffmpeg -skip_frame nokey -i file.avi -vf 'scale=128:72,tile=8x8' -an -vsync 0 keyframes%03d.png
|
ffmpeg -skip_frame nokey -i file.avi -vf 'scale=128:72,tile=8x8' -an -vsync 0 keyframes%03d.png
|
||||||
@end example
|
@end example
|
||||||
The @option{-vsync 0} is necessary to prevent @command{ffmpeg} from
|
The @option{-vsync 0} is necessary to prevent @command{ffmpeg} from
|
||||||
duplicating each output frame to accomodate the originally detected frame
|
duplicating each output frame to accommodate the originally detected frame
|
||||||
rate.
|
rate.
|
||||||
|
|
||||||
@item
|
@item
|
||||||
@@ -8394,7 +8394,7 @@ Set dithering to reduce the circular banding effects. Default is @code{1}
|
|||||||
(enabled).
|
(enabled).
|
||||||
|
|
||||||
@item aspect
|
@item aspect
|
||||||
Set vignette aspect. This setting allows to adjust the shape of the vignette.
|
Set vignette aspect. This setting allows one to adjust the shape of the vignette.
|
||||||
Setting this value to the SAR of the input will make a rectangular vignetting
|
Setting this value to the SAR of the input will make a rectangular vignetting
|
||||||
following the dimensions of the video.
|
following the dimensions of the video.
|
||||||
|
|
||||||
@@ -8903,7 +8903,7 @@ horizontally, vertically, or diagonally adjacent.
|
|||||||
|
|
||||||
At each interaction the grid evolves according to the adopted rule,
|
At each interaction the grid evolves according to the adopted rule,
|
||||||
which specifies the number of neighbor alive cells which will make a
|
which specifies the number of neighbor alive cells which will make a
|
||||||
cell stay alive or born. The @option{rule} option allows to specify
|
cell stay alive or born. The @option{rule} option allows one to specify
|
||||||
the rule to adopt.
|
the rule to adopt.
|
||||||
|
|
||||||
This source accepts the following options:
|
This source accepts the following options:
|
||||||
@@ -10323,7 +10323,7 @@ Note that when the movie is looped the source timestamps are not
|
|||||||
changed, so it will generate non monotonically increasing timestamps.
|
changed, so it will generate non monotonically increasing timestamps.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
This filter allows to overlay a second video on top of main input of
|
This filter allows one to overlay a second video on top of main input of
|
||||||
a filtergraph as shown in this graph:
|
a filtergraph as shown in this graph:
|
||||||
@example
|
@example
|
||||||
input -----------> deltapts0 --> overlay --> output
|
input -----------> deltapts0 --> overlay --> output
|
||||||
|
@@ -162,6 +162,27 @@ libzvbi is licensed under the GNU General Public License Version 2 or later
|
|||||||
you must upgrade FFmpeg's license to GPL in order to use it.
|
you must upgrade FFmpeg's license to GPL in order to use it.
|
||||||
@end float
|
@end float
|
||||||
|
|
||||||
|
@section AviSynth
|
||||||
|
|
||||||
|
FFmpeg can read AviSynth scripts as input. To enable support, pass
|
||||||
|
@code{--enable-avisynth} to configure. The correct headers are
|
||||||
|
included in compat/avisynth/, which allows the user to enable support
|
||||||
|
without needing to search for these headers themselves.
|
||||||
|
|
||||||
|
For Windows, supported AviSynth variants are
|
||||||
|
@url{http://avisynth.nl, AviSynth 2.5 or 2.6} for 32-bit builds and
|
||||||
|
@url{http://avs-plus.net, AviSynth+ 0.1} for 32-bit and 64-bit builds.
|
||||||
|
|
||||||
|
For Linux and OS X, the supported AviSynth variant is
|
||||||
|
@url{https://github.com/avxsynth/avxsynth, AvxSynth}.
|
||||||
|
|
||||||
|
@float NOTE
|
||||||
|
AviSynth and AvxSynth are loaded dynamically. Distributors can build FFmpeg
|
||||||
|
with @code{--enable-avisynth}, and the binaries will work regardless of the
|
||||||
|
end user having AviSynth or AvxSynth installed - they'll only need to be
|
||||||
|
installed to use AviSynth scripts (obviously).
|
||||||
|
@end float
|
||||||
|
|
||||||
|
|
||||||
@chapter Supported File Formats, Codecs or Features
|
@chapter Supported File Formats, Codecs or Features
|
||||||
|
|
||||||
|
@@ -299,7 +299,7 @@ the current branch history.
|
|||||||
git commit --amend
|
git commit --amend
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
allows to amend the last commit details quickly.
|
allows one to amend the last commit details quickly.
|
||||||
|
|
||||||
@example
|
@example
|
||||||
git rebase -i origin/master
|
git rebase -i origin/master
|
||||||
|
@@ -409,7 +409,7 @@ OpenAL is part of Core Audio, the official Mac OS X Audio interface.
|
|||||||
See @url{http://developer.apple.com/technologies/mac/audio-and-video.html}
|
See @url{http://developer.apple.com/technologies/mac/audio-and-video.html}
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
This device allows to capture from an audio input device handled
|
This device allows one to capture from an audio input device handled
|
||||||
through OpenAL.
|
through OpenAL.
|
||||||
|
|
||||||
You need to specify the name of the device to capture in the provided
|
You need to specify the name of the device to capture in the provided
|
||||||
@@ -617,7 +617,7 @@ Select the pixel format (only valid for raw video input).
|
|||||||
|
|
||||||
@item input_format
|
@item input_format
|
||||||
Set the preferred pixel format (for raw video) or a codec name.
|
Set the preferred pixel format (for raw video) or a codec name.
|
||||||
This option allows to select the input format, when several are
|
This option allows one to select the input format, when several are
|
||||||
available.
|
available.
|
||||||
|
|
||||||
@item framerate
|
@item framerate
|
||||||
@@ -678,7 +678,7 @@ other filename will be interpreted as device number 0.
|
|||||||
|
|
||||||
X11 video input device.
|
X11 video input device.
|
||||||
|
|
||||||
This device allows to capture a region of an X11 display.
|
This device allows one to capture a region of an X11 display.
|
||||||
|
|
||||||
The filename passed as input has the syntax:
|
The filename passed as input has the syntax:
|
||||||
@example
|
@example
|
||||||
|
@@ -758,7 +758,7 @@ The segment muxer supports the following options:
|
|||||||
@table @option
|
@table @option
|
||||||
@item reference_stream @var{specifier}
|
@item reference_stream @var{specifier}
|
||||||
Set the reference stream, as specified by the string @var{specifier}.
|
Set the reference stream, as specified by the string @var{specifier}.
|
||||||
If @var{specifier} is set to @code{auto}, the reference is choosen
|
If @var{specifier} is set to @code{auto}, the reference is chosen
|
||||||
automatically. Otherwise it must be a stream specifier (see the ``Stream
|
automatically. Otherwise it must be a stream specifier (see the ``Stream
|
||||||
specifiers'' chapter in the ffmpeg manual) which specifies the
|
specifiers'' chapter in the ffmpeg manual) which specifies the
|
||||||
reference stream. The default value is @code{auto}.
|
reference stream. The default value is @code{auto}.
|
||||||
|
@@ -42,7 +42,7 @@ ffmpeg -i INPUT -f alsa hw:1,7
|
|||||||
|
|
||||||
CACA output device.
|
CACA output device.
|
||||||
|
|
||||||
This output device allows to show a video stream in CACA window.
|
This output device allows one to show a video stream in CACA window.
|
||||||
Only one CACA window is allowed per application, so you can
|
Only one CACA window is allowed per application, so you can
|
||||||
have only one instance of this output device in an application.
|
have only one instance of this output device in an application.
|
||||||
|
|
||||||
@@ -216,7 +216,7 @@ OpenGL output device.
|
|||||||
|
|
||||||
To enable this output device you need to configure FFmpeg with @code{--enable-opengl}.
|
To enable this output device you need to configure FFmpeg with @code{--enable-opengl}.
|
||||||
|
|
||||||
Device allows to render to OpenGL context.
|
This output device allows one to render to OpenGL context.
|
||||||
Context may be provided by application or default SDL window is created.
|
Context may be provided by application or default SDL window is created.
|
||||||
|
|
||||||
When device renders to external context, application must implement handlers for following messages:
|
When device renders to external context, application must implement handlers for following messages:
|
||||||
@@ -302,7 +302,7 @@ ffmpeg -i INPUT -f pulse "stream name"
|
|||||||
|
|
||||||
SDL (Simple DirectMedia Layer) output device.
|
SDL (Simple DirectMedia Layer) output device.
|
||||||
|
|
||||||
This output device allows to show a video stream in an SDL
|
This output device allows one to show a video stream in an SDL
|
||||||
window. Only one SDL window is allowed per application, so you can
|
window. Only one SDL window is allowed per application, so you can
|
||||||
have only one instance of this output device in an application.
|
have only one instance of this output device in an application.
|
||||||
|
|
||||||
@@ -361,7 +361,7 @@ sndio audio output device.
|
|||||||
|
|
||||||
XV (XVideo) output device.
|
XV (XVideo) output device.
|
||||||
|
|
||||||
This output device allows to show a video stream in a X Window System
|
This output device allows one to show a video stream in a X Window System
|
||||||
window.
|
window.
|
||||||
|
|
||||||
@subsection Options
|
@subsection Options
|
||||||
|
@@ -51,8 +51,9 @@ The toolchain provided with Xcode is sufficient to build the basic
|
|||||||
unacelerated code.
|
unacelerated code.
|
||||||
|
|
||||||
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
|
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
|
||||||
|
@url{https://github.com/FFmpeg/gas-preprocessor} or
|
||||||
@url{http://github.com/yuvi/gas-preprocessor} to build the optimized
|
@url{http://github.com/yuvi/gas-preprocessor} to build the optimized
|
||||||
assembler functions. Just download the Perl script and put it somewhere
|
assembler functions. Put the Perl script somewhere
|
||||||
in your PATH, FFmpeg's configure will pick it up automatically.
|
in your PATH, FFmpeg's configure will pick it up automatically.
|
||||||
|
|
||||||
Mac OS X on amd64 and x86 requires @command{yasm} to build most of the
|
Mac OS X on amd64 and x86 requires @command{yasm} to build most of the
|
||||||
|
@@ -213,7 +213,7 @@ m3u8 files.
|
|||||||
|
|
||||||
HTTP (Hyper Text Transfer Protocol).
|
HTTP (Hyper Text Transfer Protocol).
|
||||||
|
|
||||||
This protocol accepts the following options.
|
This protocol accepts the following options:
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
@item seekable
|
@item seekable
|
||||||
@@ -223,32 +223,33 @@ if set to -1 it will try to autodetect if it is seekable. Default
|
|||||||
value is -1.
|
value is -1.
|
||||||
|
|
||||||
@item chunked_post
|
@item chunked_post
|
||||||
If set to 1 use chunked transfer-encoding for posts, default is 1.
|
If set to 1 use chunked Transfer-Encoding for posts, default is 1.
|
||||||
|
|
||||||
|
@item content_type
|
||||||
|
Set a specific content type for the POST messages.
|
||||||
|
|
||||||
@item headers
|
@item headers
|
||||||
Set custom HTTP headers, can override built in default headers. The
|
Set custom HTTP headers, can override built in default headers. The
|
||||||
value must be a string encoding the headers.
|
value must be a string encoding the headers.
|
||||||
|
|
||||||
@item content_type
|
|
||||||
Force a content type.
|
|
||||||
|
|
||||||
@item user-agent
|
|
||||||
Override User-Agent header. If not specified the protocol will use a
|
|
||||||
string describing the libavformat build.
|
|
||||||
|
|
||||||
@item multiple_requests
|
@item multiple_requests
|
||||||
Use persistent connections if set to 1. By default it is 0.
|
Use persistent connections if set to 1, default is 0.
|
||||||
|
|
||||||
@item post_data
|
@item post_data
|
||||||
Set custom HTTP post data.
|
Set custom HTTP post data.
|
||||||
|
|
||||||
|
@item user-agent
|
||||||
|
@item user_agent
|
||||||
|
Override the User-Agent header. If not specified the protocol will use a
|
||||||
|
string describing the libavformat build. ("Lavf/<version>")
|
||||||
|
|
||||||
@item timeout
|
@item timeout
|
||||||
Set timeout of socket I/O operations used by the underlying low level
|
Set timeout of socket I/O operations used by the underlying low level
|
||||||
operation. By default it is set to -1, which means that the timeout is
|
operation. By default it is set to -1, which means that the timeout is
|
||||||
not specified.
|
not specified.
|
||||||
|
|
||||||
@item mime_type
|
@item mime_type
|
||||||
Set MIME type.
|
Export the MIME type.
|
||||||
|
|
||||||
@item icy
|
@item icy
|
||||||
If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
|
If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
|
||||||
@@ -257,17 +258,25 @@ the @option{icy_metadata_headers} and @option{icy_metadata_packet} options.
|
|||||||
The default is 0.
|
The default is 0.
|
||||||
|
|
||||||
@item icy_metadata_headers
|
@item icy_metadata_headers
|
||||||
If the server supports ICY metadata, this contains the ICY specific HTTP reply
|
If the server supports ICY metadata, this contains the ICY-specific HTTP reply
|
||||||
headers, separated with newline characters.
|
headers, separated by newline characters.
|
||||||
|
|
||||||
@item icy_metadata_packet
|
@item icy_metadata_packet
|
||||||
If the server supports ICY metadata, and @option{icy} was set to 1, this
|
If the server supports ICY metadata, and @option{icy} was set to 1, this
|
||||||
contains the last non-empty metadata packet sent by the server.
|
contains the last non-empty metadata packet sent by the server. It should be
|
||||||
|
polled in regular intervals by applications interested in mid-stream metadata
|
||||||
|
updates.
|
||||||
|
|
||||||
@item cookies
|
@item cookies
|
||||||
Set the cookies to be sent in future requests. The format of each cookie is the
|
Set the cookies to be sent in future requests. The format of each cookie is the
|
||||||
same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
|
same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
|
||||||
delimited by a newline character.
|
delimited by a newline character.
|
||||||
|
|
||||||
|
@item offset
|
||||||
|
Set initial byte offset.
|
||||||
|
|
||||||
|
@item end_offset
|
||||||
|
Try to limit the request to bytes preceding this offset.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection HTTP Cookies
|
@subsection HTTP Cookies
|
||||||
@@ -951,7 +960,7 @@ used as master salt.
|
|||||||
|
|
||||||
@section tcp
|
@section tcp
|
||||||
|
|
||||||
Trasmission Control Protocol.
|
Transmission Control Protocol.
|
||||||
|
|
||||||
The required syntax for a TCP url is:
|
The required syntax for a TCP url is:
|
||||||
@example
|
@example
|
||||||
@@ -1057,7 +1066,7 @@ udp://@var{hostname}:@var{port}[?@var{options}]
|
|||||||
@var{options} contains a list of &-separated options of the form @var{key}=@var{val}.
|
@var{options} contains a list of &-separated options of the form @var{key}=@var{val}.
|
||||||
|
|
||||||
In case threading is enabled on the system, a circular buffer is used
|
In case threading is enabled on the system, a circular buffer is used
|
||||||
to store the incoming data, which allows to reduce loss of data due to
|
to store the incoming data, which allows one to reduce loss of data due to
|
||||||
UDP socket buffer overruns. The @var{fifo_size} and
|
UDP socket buffer overruns. The @var{fifo_size} and
|
||||||
@var{overrun_nonfatal} options are related to this buffer.
|
@var{overrun_nonfatal} options are related to this buffer.
|
||||||
|
|
||||||
|
@@ -35,7 +35,7 @@ Select nearest neighbor rescaling algorithm.
|
|||||||
@item area
|
@item area
|
||||||
Select averaging area rescaling algorithm.
|
Select averaging area rescaling algorithm.
|
||||||
|
|
||||||
@item bicubiclin
|
@item bicublin
|
||||||
Select bicubic scaling algorithm for the luma component, bilinear for
|
Select bicubic scaling algorithm for the luma component, bilinear for
|
||||||
chroma components.
|
chroma components.
|
||||||
|
|
||||||
|
@@ -327,10 +327,11 @@ die "No filename or title\n" unless defined $fn && defined $tl;
|
|||||||
$chapters{NAME} = "$fn \- $tl\n";
|
$chapters{NAME} = "$fn \- $tl\n";
|
||||||
$chapters{FOOTNOTES} .= "=back\n" if exists $chapters{FOOTNOTES};
|
$chapters{FOOTNOTES} .= "=back\n" if exists $chapters{FOOTNOTES};
|
||||||
|
|
||||||
|
# always use utf8
|
||||||
|
print "=encoding utf8\n\n";
|
||||||
|
|
||||||
unshift @chapters_sequence, "NAME";
|
unshift @chapters_sequence, "NAME";
|
||||||
for $chapter (@chapters_sequence) {
|
for $chapter (@chapters_sequence) {
|
||||||
# always use utf8
|
|
||||||
print "=encoding utf8\n";
|
|
||||||
if (exists $chapters{$chapter}) {
|
if (exists $chapters{$chapter}) {
|
||||||
$head = uc($chapter);
|
$head = uc($chapter);
|
||||||
print "=head1 $head\n\n";
|
print "=head1 $head\n\n";
|
||||||
|
@@ -1056,7 +1056,7 @@ which can be obtained with @code{ffmpeg -opencl_bench} or @code{av_opencl_get_de
|
|||||||
@item device_idx
|
@item device_idx
|
||||||
Select the index of the device used to run OpenCL code.
|
Select the index of the device used to run OpenCL code.
|
||||||
|
|
||||||
The specifed index must be one of the indexes in the device list which
|
The specified index must be one of the indexes in the device list which
|
||||||
can be obtained with @code{ffmpeg -opencl_bench} or @code{av_opencl_get_device_list()}.
|
can be obtained with @code{ffmpeg -opencl_bench} or @code{av_opencl_get_device_list()}.
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
4
ffmpeg.c
4
ffmpeg.c
@@ -323,7 +323,7 @@ sigterm_handler(int sig)
|
|||||||
received_nb_signals++;
|
received_nb_signals++;
|
||||||
term_exit();
|
term_exit();
|
||||||
if(received_nb_signals > 3)
|
if(received_nb_signals > 3)
|
||||||
exit_program(123);
|
exit(123);
|
||||||
}
|
}
|
||||||
|
|
||||||
void term_init(void)
|
void term_init(void)
|
||||||
@@ -514,6 +514,8 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
if (received_sigterm) {
|
if (received_sigterm) {
|
||||||
av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
|
av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
|
||||||
(int) received_sigterm);
|
(int) received_sigterm);
|
||||||
|
} else if (ret) {
|
||||||
|
av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
|
||||||
}
|
}
|
||||||
term_exit();
|
term_exit();
|
||||||
}
|
}
|
||||||
|
@@ -44,12 +44,15 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFo
|
|||||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
||||||
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
||||||
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
||||||
|
const enum AVPixelFormat mjpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||||
|
const enum AVPixelFormat ljpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||||
|
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||||
|
|
||||||
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||||
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
p = mjpeg_formats;
|
||||||
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
p =ljpeg_formats;
|
||||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (; *p != AV_PIX_FMT_NONE; p++) {
|
for (; *p != AV_PIX_FMT_NONE; p++) {
|
||||||
|
@@ -1821,7 +1821,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
|
|
||||||
/* subtitles: pick first */
|
/* subtitles: pick first */
|
||||||
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, oc, "s");
|
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, oc, "s");
|
||||||
if (!o->subtitle_disable && (oc->oformat->subtitle_codec != AV_CODEC_ID_NONE || subtitle_codec_name)) {
|
if (!o->subtitle_disable && (avcodec_find_encoder(oc->oformat->subtitle_codec) || subtitle_codec_name)) {
|
||||||
for (i = 0; i < nb_input_streams; i++)
|
for (i = 0; i < nb_input_streams; i++)
|
||||||
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
|
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
|
||||||
new_subtitle_stream(o, oc, i);
|
new_subtitle_stream(o, oc, i);
|
||||||
|
25
ffprobe.c
25
ffprobe.c
@@ -191,6 +191,7 @@ static const char unit_hertz_str[] = "Hz" ;
|
|||||||
static const char unit_byte_str[] = "byte" ;
|
static const char unit_byte_str[] = "byte" ;
|
||||||
static const char unit_bit_per_second_str[] = "bit/s";
|
static const char unit_bit_per_second_str[] = "bit/s";
|
||||||
|
|
||||||
|
static int nb_streams;
|
||||||
static uint64_t *nb_streams_packets;
|
static uint64_t *nb_streams_packets;
|
||||||
static uint64_t *nb_streams_frames;
|
static uint64_t *nb_streams_frames;
|
||||||
static int *selected_streams;
|
static int *selected_streams;
|
||||||
@@ -246,6 +247,7 @@ static char *value_string(char *buf, int buf_size, struct unit_value uv)
|
|||||||
vald /= pow(10, index * 3);
|
vald /= pow(10, index * 3);
|
||||||
prefix_string = decimal_unit_prefixes[index];
|
prefix_string = decimal_unit_prefixes[index];
|
||||||
}
|
}
|
||||||
|
vali = vald;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (show_float || (use_value_prefix && vald != (long long int)vald))
|
if (show_float || (use_value_prefix && vald != (long long int)vald))
|
||||||
@@ -336,7 +338,7 @@ static const AVOption writer_options[] = {
|
|||||||
{ "replace", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = WRITER_STRING_VALIDATION_REPLACE}, .unit = "sv" },
|
{ "replace", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = WRITER_STRING_VALIDATION_REPLACE}, .unit = "sv" },
|
||||||
{ "fail", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = WRITER_STRING_VALIDATION_FAIL}, .unit = "sv" },
|
{ "fail", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = WRITER_STRING_VALIDATION_FAIL}, .unit = "sv" },
|
||||||
{ "string_validation_replacement", "set string validation replacement string", OFFSET(string_validation_replacement), AV_OPT_TYPE_STRING, {.str=""}},
|
{ "string_validation_replacement", "set string validation replacement string", OFFSET(string_validation_replacement), AV_OPT_TYPE_STRING, {.str=""}},
|
||||||
{ "svr", "set string validation replacement string", OFFSET(string_validation_replacement), AV_OPT_TYPE_STRING, {.str=""}},
|
{ "svr", "set string validation replacement string", OFFSET(string_validation_replacement), AV_OPT_TYPE_STRING, {.str="\xEF\xBF\xBD"}},
|
||||||
{ NULL }
|
{ NULL }
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1631,6 +1633,14 @@ static void writer_register_all(void)
|
|||||||
#define print_section_header(s) writer_print_section_header(w, s)
|
#define print_section_header(s) writer_print_section_header(w, s)
|
||||||
#define print_section_footer(s) writer_print_section_footer(w, s)
|
#define print_section_footer(s) writer_print_section_footer(w, s)
|
||||||
|
|
||||||
|
#define REALLOCZ_ARRAY_STREAM(ptr, cur_n, new_n) \
|
||||||
|
{ \
|
||||||
|
ret = av_reallocp_array(&(ptr), (new_n), sizeof(*(ptr))); \
|
||||||
|
if (ret < 0) \
|
||||||
|
goto end; \
|
||||||
|
memset( (ptr) + (cur_n), 0, ((new_n) - (cur_n)) * sizeof(*(ptr)) ); \
|
||||||
|
}
|
||||||
|
|
||||||
static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id)
|
static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id)
|
||||||
{
|
{
|
||||||
AVDictionaryEntry *tag = NULL;
|
AVDictionaryEntry *tag = NULL;
|
||||||
@@ -1892,6 +1902,12 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
while (!av_read_frame(fmt_ctx, &pkt)) {
|
while (!av_read_frame(fmt_ctx, &pkt)) {
|
||||||
|
if (fmt_ctx->nb_streams > nb_streams) {
|
||||||
|
REALLOCZ_ARRAY_STREAM(nb_streams_frames, nb_streams, fmt_ctx->nb_streams);
|
||||||
|
REALLOCZ_ARRAY_STREAM(nb_streams_packets, nb_streams, fmt_ctx->nb_streams);
|
||||||
|
REALLOCZ_ARRAY_STREAM(selected_streams, nb_streams, fmt_ctx->nb_streams);
|
||||||
|
nb_streams = fmt_ctx->nb_streams;
|
||||||
|
}
|
||||||
if (selected_streams[pkt.stream_index]) {
|
if (selected_streams[pkt.stream_index]) {
|
||||||
AVRational tb = fmt_ctx->streams[pkt.stream_index]->time_base;
|
AVRational tb = fmt_ctx->streams[pkt.stream_index]->time_base;
|
||||||
|
|
||||||
@@ -2366,9 +2382,10 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
|||||||
|
|
||||||
#define CHECK_END if (ret < 0) goto end
|
#define CHECK_END if (ret < 0) goto end
|
||||||
|
|
||||||
nb_streams_frames = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_frames));
|
nb_streams = fmt_ctx->nb_streams;
|
||||||
nb_streams_packets = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_packets));
|
REALLOCZ_ARRAY_STREAM(nb_streams_frames,0,fmt_ctx->nb_streams);
|
||||||
selected_streams = av_calloc(fmt_ctx->nb_streams, sizeof(*selected_streams));
|
REALLOCZ_ARRAY_STREAM(nb_streams_packets,0,fmt_ctx->nb_streams);
|
||||||
|
REALLOCZ_ARRAY_STREAM(selected_streams,0,fmt_ctx->nb_streams);
|
||||||
|
|
||||||
for (i = 0; i < fmt_ctx->nb_streams; i++) {
|
for (i = 0; i < fmt_ctx->nb_streams; i++) {
|
||||||
if (stream_specifier) {
|
if (stream_specifier) {
|
||||||
|
@@ -119,7 +119,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((avctx->bits_per_coded_sample & 0x1f) <= 8) {
|
if (avctx->bits_per_coded_sample <= 8) {
|
||||||
const uint8_t *pal = av_packet_get_side_data(avpkt,
|
const uint8_t *pal = av_packet_get_side_data(avpkt,
|
||||||
AV_PKT_DATA_PALETTE,
|
AV_PKT_DATA_PALETTE,
|
||||||
NULL);
|
NULL);
|
||||||
|
@@ -26,7 +26,6 @@ OBJS = allcodecs.o \
|
|||||||
options.o \
|
options.o \
|
||||||
parser.o \
|
parser.o \
|
||||||
raw.o \
|
raw.o \
|
||||||
rawdec.o \
|
|
||||||
resample.o \
|
resample.o \
|
||||||
resample2.o \
|
resample2.o \
|
||||||
utils.o \
|
utils.o \
|
||||||
|
@@ -81,7 +81,7 @@ enum BandType {
|
|||||||
INTENSITY_BT = 15, ///< Scalefactor data are intensity stereo positions.
|
INTENSITY_BT = 15, ///< Scalefactor data are intensity stereo positions.
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IS_CODEBOOK_UNSIGNED(x) ((x - 1) & 10)
|
#define IS_CODEBOOK_UNSIGNED(x) (((x) - 1) & 10)
|
||||||
|
|
||||||
enum ChannelPosition {
|
enum ChannelPosition {
|
||||||
AAC_CHANNEL_OFF = 0,
|
AAC_CHANNEL_OFF = 0,
|
||||||
|
@@ -471,9 +471,11 @@ static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_
|
|||||||
* @param[out] coded_samples set to the number of samples as coded in the
|
* @param[out] coded_samples set to the number of samples as coded in the
|
||||||
* packet, or 0 if the codec does not encode the
|
* packet, or 0 if the codec does not encode the
|
||||||
* number of samples in each frame.
|
* number of samples in each frame.
|
||||||
|
* @param[out] approx_nb_samples set to non-zero if the number of samples
|
||||||
|
* returned is an approximation.
|
||||||
*/
|
*/
|
||||||
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
||||||
int buf_size, int *coded_samples)
|
int buf_size, int *coded_samples, int *approx_nb_samples)
|
||||||
{
|
{
|
||||||
ADPCMDecodeContext *s = avctx->priv_data;
|
ADPCMDecodeContext *s = avctx->priv_data;
|
||||||
int nb_samples = 0;
|
int nb_samples = 0;
|
||||||
@@ -482,6 +484,7 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
|||||||
int header_size;
|
int header_size;
|
||||||
|
|
||||||
*coded_samples = 0;
|
*coded_samples = 0;
|
||||||
|
*approx_nb_samples = 0;
|
||||||
|
|
||||||
if(ch <= 0)
|
if(ch <= 0)
|
||||||
return 0;
|
return 0;
|
||||||
@@ -552,10 +555,12 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
|||||||
case AV_CODEC_ID_ADPCM_EA_R2:
|
case AV_CODEC_ID_ADPCM_EA_R2:
|
||||||
header_size = 4 + 5 * ch;
|
header_size = 4 + 5 * ch;
|
||||||
*coded_samples = bytestream2_get_le32(gb);
|
*coded_samples = bytestream2_get_le32(gb);
|
||||||
|
*approx_nb_samples = 1;
|
||||||
break;
|
break;
|
||||||
case AV_CODEC_ID_ADPCM_EA_R3:
|
case AV_CODEC_ID_ADPCM_EA_R3:
|
||||||
header_size = 4 + 5 * ch;
|
header_size = 4 + 5 * ch;
|
||||||
*coded_samples = bytestream2_get_be32(gb);
|
*coded_samples = bytestream2_get_be32(gb);
|
||||||
|
*approx_nb_samples = 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
*coded_samples -= *coded_samples % 28;
|
*coded_samples -= *coded_samples % 28;
|
||||||
@@ -663,11 +668,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int16_t **samples_p;
|
int16_t **samples_p;
|
||||||
int st; /* stereo */
|
int st; /* stereo */
|
||||||
int count1, count2;
|
int count1, count2;
|
||||||
int nb_samples, coded_samples, ret;
|
int nb_samples, coded_samples, approx_nb_samples, ret;
|
||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
|
|
||||||
bytestream2_init(&gb, buf, buf_size);
|
bytestream2_init(&gb, buf, buf_size);
|
||||||
nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples);
|
nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
|
||||||
if (nb_samples <= 0) {
|
if (nb_samples <= 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
|
av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
@@ -683,7 +688,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
/* use coded_samples when applicable */
|
/* use coded_samples when applicable */
|
||||||
/* it is always <= nb_samples, so the output buffer will be large enough */
|
/* it is always <= nb_samples, so the output buffer will be large enough */
|
||||||
if (coded_samples) {
|
if (coded_samples) {
|
||||||
if (coded_samples != nb_samples)
|
if (!approx_nb_samples && coded_samples != nb_samples)
|
||||||
av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
|
av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
|
||||||
frame->nb_samples = nb_samples = coded_samples;
|
frame->nb_samples = nb_samples = coded_samples;
|
||||||
}
|
}
|
||||||
@@ -917,6 +922,9 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
*samples++ = c->status[0].predictor + c->status[1].predictor;
|
*samples++ = c->status[0].predictor + c->status[1].predictor;
|
||||||
*samples++ = c->status[0].predictor - c->status[1].predictor;
|
*samples++ = c->status[0].predictor - c->status[1].predictor;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((bytestream2_tell(&gb) & 1))
|
||||||
|
bytestream2_skip(&gb, 1);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case AV_CODEC_ID_ADPCM_IMA_ISS:
|
case AV_CODEC_ID_ADPCM_IMA_ISS:
|
||||||
|
@@ -144,10 +144,11 @@ function ff_put_pixels8_y2_armv6, export=1
|
|||||||
eor r7, r5, r7
|
eor r7, r5, r7
|
||||||
uadd8 r10, r10, r6
|
uadd8 r10, r10, r6
|
||||||
and r7, r7, r12
|
and r7, r7, r12
|
||||||
ldr_pre r6, r1, r2
|
ldrc_pre ne, r6, r1, r2
|
||||||
uadd8 r11, r11, r7
|
uadd8 r11, r11, r7
|
||||||
strd_post r8, r9, r0, r2
|
strd_post r8, r9, r0, r2
|
||||||
ldr r7, [r1, #4]
|
it ne
|
||||||
|
ldrne r7, [r1, #4]
|
||||||
strd_post r10, r11, r0, r2
|
strd_post r10, r11, r0, r2
|
||||||
bne 1b
|
bne 1b
|
||||||
|
|
||||||
@@ -196,9 +197,10 @@ function ff_put_pixels8_y2_no_rnd_armv6, export=1
|
|||||||
uhadd8 r9, r5, r7
|
uhadd8 r9, r5, r7
|
||||||
ldr r5, [r1, #4]
|
ldr r5, [r1, #4]
|
||||||
uhadd8 r12, r4, r6
|
uhadd8 r12, r4, r6
|
||||||
ldr_pre r6, r1, r2
|
ldrc_pre ne, r6, r1, r2
|
||||||
uhadd8 r14, r5, r7
|
uhadd8 r14, r5, r7
|
||||||
ldr r7, [r1, #4]
|
it ne
|
||||||
|
ldrne r7, [r1, #4]
|
||||||
stm r0, {r8,r9}
|
stm r0, {r8,r9}
|
||||||
add r0, r0, r2
|
add r0, r0, r2
|
||||||
stm r0, {r12,r14}
|
stm r0, {r12,r14}
|
||||||
|
@@ -26,7 +26,7 @@
|
|||||||
|
|
||||||
void ff_vp3_idct_put_neon(uint8_t *dest, int line_size, int16_t *data);
|
void ff_vp3_idct_put_neon(uint8_t *dest, int line_size, int16_t *data);
|
||||||
void ff_vp3_idct_add_neon(uint8_t *dest, int line_size, int16_t *data);
|
void ff_vp3_idct_add_neon(uint8_t *dest, int line_size, int16_t *data);
|
||||||
void ff_vp3_idct_dc_add_neon(uint8_t *dest, int line_size, const int16_t *data);
|
void ff_vp3_idct_dc_add_neon(uint8_t *dest, int line_size, int16_t *data);
|
||||||
|
|
||||||
void ff_vp3_v_loop_filter_neon(uint8_t *, int, int *);
|
void ff_vp3_v_loop_filter_neon(uint8_t *, int, int *);
|
||||||
void ff_vp3_h_loop_filter_neon(uint8_t *, int, int *);
|
void ff_vp3_h_loop_filter_neon(uint8_t *, int, int *);
|
||||||
|
@@ -1204,7 +1204,7 @@ function ff_put_vp8_\name\size\()_\hv\()_armv6, export=1
|
|||||||
mov r4, #\size
|
mov r4, #\size
|
||||||
stm r12, {r4, r5}
|
stm r12, {r4, r5}
|
||||||
orr r12, r6, r7
|
orr r12, r6, r7
|
||||||
b vp8_put_\name\()_\hv\()_armv6 + 4
|
b bl_put_\name\()_\hv\()_armv6
|
||||||
endfunc
|
endfunc
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
@@ -1300,6 +1300,7 @@ vp8_mc_hv bilin, 4, h, v, 2
|
|||||||
|
|
||||||
function vp8_put_epel_h6_armv6
|
function vp8_put_epel_h6_armv6
|
||||||
push {r1, r4-r11, lr}
|
push {r1, r4-r11, lr}
|
||||||
|
bl_put_epel_h6_armv6:
|
||||||
sub r2, r2, #2
|
sub r2, r2, #2
|
||||||
movrel lr, sixtap_filters_13245600 - 16
|
movrel lr, sixtap_filters_13245600 - 16
|
||||||
add lr, lr, r12, lsl #3
|
add lr, lr, r12, lsl #3
|
||||||
@@ -1358,6 +1359,7 @@ endfunc
|
|||||||
|
|
||||||
function vp8_put_epel_v6_armv6
|
function vp8_put_epel_v6_armv6
|
||||||
push {r1, r4-r11, lr}
|
push {r1, r4-r11, lr}
|
||||||
|
bl_put_epel_v6_armv6:
|
||||||
movrel lr, sixtap_filters_13245600 - 16
|
movrel lr, sixtap_filters_13245600 - 16
|
||||||
add lr, lr, r12, lsl #3
|
add lr, lr, r12, lsl #3
|
||||||
str r3, [sp, #48]
|
str r3, [sp, #48]
|
||||||
@@ -1437,6 +1439,7 @@ endfunc
|
|||||||
|
|
||||||
function vp8_put_epel_h4_armv6
|
function vp8_put_epel_h4_armv6
|
||||||
push {r1, r4-r11, lr}
|
push {r1, r4-r11, lr}
|
||||||
|
bl_put_epel_h4_armv6:
|
||||||
subs r2, r2, #1
|
subs r2, r2, #1
|
||||||
movrel lr, fourtap_filters_1324 - 4
|
movrel lr, fourtap_filters_1324 - 4
|
||||||
add lr, lr, r12, lsl #2
|
add lr, lr, r12, lsl #2
|
||||||
@@ -1483,6 +1486,7 @@ endfunc
|
|||||||
|
|
||||||
function vp8_put_epel_v4_armv6
|
function vp8_put_epel_v4_armv6
|
||||||
push {r1, r4-r11, lr}
|
push {r1, r4-r11, lr}
|
||||||
|
bl_put_epel_v4_armv6:
|
||||||
movrel lr, fourtap_filters_1324 - 4
|
movrel lr, fourtap_filters_1324 - 4
|
||||||
add lr, lr, r12, lsl #2
|
add lr, lr, r12, lsl #2
|
||||||
ldm lr, {r5, r6}
|
ldm lr, {r5, r6}
|
||||||
@@ -1544,6 +1548,7 @@ endfunc
|
|||||||
|
|
||||||
function vp8_put_bilin_h_armv6
|
function vp8_put_bilin_h_armv6
|
||||||
push {r1, r4-r11, lr}
|
push {r1, r4-r11, lr}
|
||||||
|
bl_put_bilin_h_armv6:
|
||||||
rsb r5, r12, r12, lsl #16
|
rsb r5, r12, r12, lsl #16
|
||||||
ldr r12, [sp, #44]
|
ldr r12, [sp, #44]
|
||||||
sub r3, r3, r4
|
sub r3, r3, r4
|
||||||
@@ -1589,6 +1594,7 @@ endfunc
|
|||||||
|
|
||||||
function vp8_put_bilin_v_armv6
|
function vp8_put_bilin_v_armv6
|
||||||
push {r1, r4-r11, lr}
|
push {r1, r4-r11, lr}
|
||||||
|
bl_put_bilin_v_armv6:
|
||||||
rsb r5, r12, r12, lsl #16
|
rsb r5, r12, r12, lsl #16
|
||||||
ldr r12, [sp, #44]
|
ldr r12, [sp, #44]
|
||||||
add r5, r5, #8
|
add r5, r5, #8
|
||||||
|
@@ -1370,7 +1370,7 @@ static int dca_subsubframe(DCAContext *s, int base_channel, int block_index)
|
|||||||
* Decode VQ encoded high frequencies
|
* Decode VQ encoded high frequencies
|
||||||
*/
|
*/
|
||||||
if (s->subband_activity[k] > s->vq_start_subband[k]) {
|
if (s->subband_activity[k] > s->vq_start_subband[k]) {
|
||||||
if (!s->debug_flag & 0x01) {
|
if (!(s->debug_flag & 0x01)) {
|
||||||
av_log(s->avctx, AV_LOG_DEBUG,
|
av_log(s->avctx, AV_LOG_DEBUG,
|
||||||
"Stream with high frequencies VQ coding\n");
|
"Stream with high frequencies VQ coding\n");
|
||||||
s->debug_flag |= 0x01;
|
s->debug_flag |= 0x01;
|
||||||
@@ -2173,7 +2173,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (s->core_downmix && (s->core_downmix_amode == DCA_STEREO ||
|
if (s->core_downmix && (s->core_downmix_amode == DCA_STEREO ||
|
||||||
s->core_downmix_amode == DCA_STEREO_TOTAL)) {
|
s->core_downmix_amode == DCA_STEREO_TOTAL)) {
|
||||||
int sign, code;
|
int sign, code;
|
||||||
for (i = 0; i < s->prim_channels + !!s->lfe; i++) {
|
for (i = 0; i < num_core_channels + !!s->lfe; i++) {
|
||||||
sign = s->core_downmix_codes[i][0] & 0x100 ? 1 : -1;
|
sign = s->core_downmix_codes[i][0] & 0x100 ? 1 : -1;
|
||||||
code = s->core_downmix_codes[i][0] & 0x0FF;
|
code = s->core_downmix_codes[i][0] & 0x0FF;
|
||||||
s->downmix_coef[i][0] = (!code ? 0.0f :
|
s->downmix_coef[i][0] = (!code ? 0.0f :
|
||||||
@@ -2191,19 +2191,19 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
"Invalid channel mode %d\n", am);
|
"Invalid channel mode %d\n", am);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
if (s->prim_channels + !!s->lfe >
|
if (num_core_channels + !!s->lfe >
|
||||||
FF_ARRAY_ELEMS(dca_default_coeffs[0])) {
|
FF_ARRAY_ELEMS(dca_default_coeffs[0])) {
|
||||||
avpriv_request_sample(s->avctx, "Downmixing %d channels",
|
avpriv_request_sample(s->avctx, "Downmixing %d channels",
|
||||||
s->prim_channels + !!s->lfe);
|
s->prim_channels + !!s->lfe);
|
||||||
return AVERROR_PATCHWELCOME;
|
return AVERROR_PATCHWELCOME;
|
||||||
}
|
}
|
||||||
for (i = 0; i < s->prim_channels + !!s->lfe; i++) {
|
for (i = 0; i < num_core_channels + !!s->lfe; i++) {
|
||||||
s->downmix_coef[i][0] = dca_default_coeffs[am][i][0];
|
s->downmix_coef[i][0] = dca_default_coeffs[am][i][0];
|
||||||
s->downmix_coef[i][1] = dca_default_coeffs[am][i][1];
|
s->downmix_coef[i][1] = dca_default_coeffs[am][i][1];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
av_dlog(s->avctx, "Stereo downmix coeffs:\n");
|
av_dlog(s->avctx, "Stereo downmix coeffs:\n");
|
||||||
for (i = 0; i < s->prim_channels + !!s->lfe; i++) {
|
for (i = 0; i < num_core_channels + !!s->lfe; i++) {
|
||||||
av_dlog(s->avctx, "L, input channel %d = %f\n", i,
|
av_dlog(s->avctx, "L, input channel %d = %f\n", i,
|
||||||
s->downmix_coef[i][0]);
|
s->downmix_coef[i][0]);
|
||||||
av_dlog(s->avctx, "R, input channel %d = %f\n", i,
|
av_dlog(s->avctx, "R, input channel %d = %f\n", i,
|
||||||
@@ -2329,6 +2329,17 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
{ /* xxx should also do MA extensions */
|
{ /* xxx should also do MA extensions */
|
||||||
if (s->amode < 16) {
|
if (s->amode < 16) {
|
||||||
avctx->channel_layout = dca_core_channel_layout[s->amode];
|
avctx->channel_layout = dca_core_channel_layout[s->amode];
|
||||||
|
|
||||||
|
if (s->prim_channels + !!s->lfe > 2 &&
|
||||||
|
avctx->request_channel_layout == AV_CH_LAYOUT_STEREO) {
|
||||||
|
/*
|
||||||
|
* Neither the core's auxiliary data nor our default tables contain
|
||||||
|
* downmix coefficients for the additional channel coded in the XCh
|
||||||
|
* extension, so when we're doing a Stereo downmix, don't decode it.
|
||||||
|
*/
|
||||||
|
s->xch_disable = 1;
|
||||||
|
}
|
||||||
|
|
||||||
#if FF_API_REQUEST_CHANNELS
|
#if FF_API_REQUEST_CHANNELS
|
||||||
FF_DISABLE_DEPRECATION_WARNINGS
|
FF_DISABLE_DEPRECATION_WARNINGS
|
||||||
if (s->xch_present && !s->xch_disable &&
|
if (s->xch_present && !s->xch_disable &&
|
||||||
@@ -2366,7 +2377,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->prim_channels + !!s->lfe > 2 &&
|
if (num_core_channels + !!s->lfe > 2 &&
|
||||||
avctx->request_channel_layout == AV_CH_LAYOUT_STEREO) {
|
avctx->request_channel_layout == AV_CH_LAYOUT_STEREO) {
|
||||||
channels = 2;
|
channels = 2;
|
||||||
s->output = s->prim_channels == 2 ? s->amode : DCA_STEREO;
|
s->output = s->prim_channels == 2 ? s->amode : DCA_STEREO;
|
||||||
@@ -2415,7 +2426,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||||||
* masks in some sense -- unfortunately some channels could overlap */
|
* masks in some sense -- unfortunately some channels could overlap */
|
||||||
if (av_popcount(channel_mask) != av_popcount(channel_layout)) {
|
if (av_popcount(channel_mask) != av_popcount(channel_layout)) {
|
||||||
av_log(avctx, AV_LOG_DEBUG,
|
av_log(avctx, AV_LOG_DEBUG,
|
||||||
"DTS-XXCH: Inconsistant avcodec/dts channel layouts\n");
|
"DTS-XXCH: Inconsistent avcodec/dts channel layouts\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2433,6 +2444,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||||||
s->xxch_order_tab[j++] = posn;
|
s->xxch_order_tab[j++] = posn;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s->lfe_index = av_popcount(channel_layout & (AV_CH_LOW_FREQUENCY-1));
|
s->lfe_index = av_popcount(channel_layout & (AV_CH_LOW_FREQUENCY-1));
|
||||||
|
@@ -28,6 +28,7 @@
|
|||||||
#ifndef AVCODEC_DIRAC_ARITH_H
|
#ifndef AVCODEC_DIRAC_ARITH_H
|
||||||
#define AVCODEC_DIRAC_ARITH_H
|
#define AVCODEC_DIRAC_ARITH_H
|
||||||
|
|
||||||
|
#include "libavutil/x86/asm.h"
|
||||||
#include "bytestream.h"
|
#include "bytestream.h"
|
||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
|
|
||||||
@@ -134,7 +135,7 @@ static inline int dirac_get_arith_bit(DiracArith *c, int ctx)
|
|||||||
|
|
||||||
range_times_prob = (c->range * prob_zero) >> 16;
|
range_times_prob = (c->range * prob_zero) >> 16;
|
||||||
|
|
||||||
#if HAVE_FAST_CMOV && HAVE_INLINE_ASM
|
#if HAVE_FAST_CMOV && HAVE_INLINE_ASM && HAVE_6REGS
|
||||||
low -= range_times_prob << 16;
|
low -= range_times_prob << 16;
|
||||||
range -= range_times_prob;
|
range -= range_times_prob;
|
||||||
bit = 0;
|
bit = 0;
|
||||||
|
@@ -201,6 +201,7 @@ typedef struct DiracContext {
|
|||||||
|
|
||||||
uint16_t *mctmp; /* buffer holding the MC data multipled by OBMC weights */
|
uint16_t *mctmp; /* buffer holding the MC data multipled by OBMC weights */
|
||||||
uint8_t *mcscratch;
|
uint8_t *mcscratch;
|
||||||
|
int buffer_stride;
|
||||||
|
|
||||||
DECLARE_ALIGNED(16, uint8_t, obmc_weight)[3][MAX_BLOCKSIZE*MAX_BLOCKSIZE];
|
DECLARE_ALIGNED(16, uint8_t, obmc_weight)[3][MAX_BLOCKSIZE*MAX_BLOCKSIZE];
|
||||||
|
|
||||||
@@ -343,22 +344,44 @@ static int alloc_sequence_buffers(DiracContext *s)
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
w = s->source.width;
|
|
||||||
h = s->source.height;
|
|
||||||
|
|
||||||
/* fixme: allocate using real stride here */
|
/* fixme: allocate using real stride here */
|
||||||
s->sbsplit = av_malloc(sbwidth * sbheight);
|
s->sbsplit = av_malloc_array(sbwidth, sbheight);
|
||||||
s->blmotion = av_malloc(sbwidth * sbheight * 16 * sizeof(*s->blmotion));
|
s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
|
||||||
s->edge_emu_buffer_base = av_malloc((w+64)*MAX_BLOCKSIZE);
|
|
||||||
|
|
||||||
s->mctmp = av_malloc((w+64+MAX_BLOCKSIZE) * (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
|
if (!s->sbsplit || !s->blmotion)
|
||||||
s->mcscratch = av_malloc((w+64)*MAX_BLOCKSIZE);
|
|
||||||
|
|
||||||
if (!s->sbsplit || !s->blmotion || !s->mctmp || !s->mcscratch)
|
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int alloc_buffers(DiracContext *s, int stride)
|
||||||
|
{
|
||||||
|
int w = s->source.width;
|
||||||
|
int h = s->source.height;
|
||||||
|
|
||||||
|
av_assert0(stride >= w);
|
||||||
|
stride += 64;
|
||||||
|
|
||||||
|
if (s->buffer_stride >= stride)
|
||||||
|
return 0;
|
||||||
|
s->buffer_stride = 0;
|
||||||
|
|
||||||
|
av_freep(&s->edge_emu_buffer_base);
|
||||||
|
memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
|
||||||
|
av_freep(&s->mctmp);
|
||||||
|
av_freep(&s->mcscratch);
|
||||||
|
|
||||||
|
s->edge_emu_buffer_base = av_malloc_array(stride, MAX_BLOCKSIZE);
|
||||||
|
|
||||||
|
s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
|
||||||
|
s->mcscratch = av_malloc_array(stride, MAX_BLOCKSIZE);
|
||||||
|
|
||||||
|
if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
|
s->buffer_stride = stride;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void free_sequence_buffers(DiracContext *s)
|
static void free_sequence_buffers(DiracContext *s)
|
||||||
{
|
{
|
||||||
int i, j, k;
|
int i, j, k;
|
||||||
@@ -382,6 +405,7 @@ static void free_sequence_buffers(DiracContext *s)
|
|||||||
av_freep(&s->plane[i].idwt_tmp);
|
av_freep(&s->plane[i].idwt_tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s->buffer_stride = 0;
|
||||||
av_freep(&s->sbsplit);
|
av_freep(&s->sbsplit);
|
||||||
av_freep(&s->blmotion);
|
av_freep(&s->blmotion);
|
||||||
av_freep(&s->edge_emu_buffer_base);
|
av_freep(&s->edge_emu_buffer_base);
|
||||||
@@ -1355,8 +1379,8 @@ static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5],
|
|||||||
motion_y >>= s->chroma_y_shift;
|
motion_y >>= s->chroma_y_shift;
|
||||||
}
|
}
|
||||||
|
|
||||||
mx = motion_x & ~(-1 << s->mv_precision);
|
mx = motion_x & ~(-1U << s->mv_precision);
|
||||||
my = motion_y & ~(-1 << s->mv_precision);
|
my = motion_y & ~(-1U << s->mv_precision);
|
||||||
motion_x >>= s->mv_precision;
|
motion_x >>= s->mv_precision;
|
||||||
motion_y >>= s->mv_precision;
|
motion_y >>= s->mv_precision;
|
||||||
/* normalize subpel coordinates to epel */
|
/* normalize subpel coordinates to epel */
|
||||||
@@ -1854,6 +1878,9 @@ static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int
|
|||||||
s->plane[1].stride = pic->avframe->linesize[1];
|
s->plane[1].stride = pic->avframe->linesize[1];
|
||||||
s->plane[2].stride = pic->avframe->linesize[2];
|
s->plane[2].stride = pic->avframe->linesize[2];
|
||||||
|
|
||||||
|
if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
/* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
|
/* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
|
||||||
if (dirac_decode_picture_header(s))
|
if (dirac_decode_picture_header(s))
|
||||||
return -1;
|
return -1;
|
||||||
|
@@ -166,6 +166,10 @@ static int fic_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (memcmp(src, fic_header, 7))
|
if (memcmp(src, fic_header, 7))
|
||||||
av_log(avctx, AV_LOG_WARNING, "Invalid FIC Header.\n");
|
av_log(avctx, AV_LOG_WARNING, "Invalid FIC Header.\n");
|
||||||
|
|
||||||
|
/* Is it a skip frame? */
|
||||||
|
if (src[17])
|
||||||
|
goto skip;
|
||||||
|
|
||||||
nslices = src[13];
|
nslices = src[13];
|
||||||
if (!nslices) {
|
if (!nslices) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Zero slices found.\n");
|
av_log(avctx, AV_LOG_ERROR, "Zero slices found.\n");
|
||||||
@@ -242,10 +246,11 @@ static int fic_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
ctx->slice_data[slice].y_off = y_off;
|
ctx->slice_data[slice].y_off = y_off;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret = avctx->execute(avctx, fic_decode_slice, ctx->slice_data,
|
if ((ret = avctx->execute(avctx, fic_decode_slice, ctx->slice_data,
|
||||||
NULL, nslices, sizeof(ctx->slice_data[0])) < 0)
|
NULL, nslices, sizeof(ctx->slice_data[0]))) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
skip:
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
if ((ret = av_frame_ref(data, ctx->frame)) < 0)
|
if ((ret = av_frame_ref(data, ctx->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@@ -691,6 +691,7 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
switch (chunk_type) {
|
switch (chunk_type) {
|
||||||
case DISPLAY_INFO:
|
case DISPLAY_INFO:
|
||||||
|
got_header =
|
||||||
c->got_header = 0;
|
c->got_header = 0;
|
||||||
if (chunk_size < 21) {
|
if (chunk_size < 21) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Invalid display info size %d\n",
|
av_log(avctx, AV_LOG_ERROR, "Invalid display info size %d\n",
|
||||||
@@ -717,7 +718,8 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Unknown compression method %d\n",
|
"Unknown compression method %d\n",
|
||||||
c->compression);
|
c->compression);
|
||||||
return AVERROR_PATCHWELCOME;
|
ret = AVERROR_PATCHWELCOME;
|
||||||
|
goto header_fail;
|
||||||
}
|
}
|
||||||
c->tile_width = bytestream2_get_be32(&bc);
|
c->tile_width = bytestream2_get_be32(&bc);
|
||||||
c->tile_height = bytestream2_get_be32(&bc);
|
c->tile_height = bytestream2_get_be32(&bc);
|
||||||
@@ -737,7 +739,8 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
(chunk_size - 21) < 16 ) {
|
(chunk_size - 21) < 16 ) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Display info: missing bitmasks!\n");
|
"Display info: missing bitmasks!\n");
|
||||||
return AVERROR_INVALIDDATA;
|
ret = AVERROR_INVALIDDATA;
|
||||||
|
goto header_fail;
|
||||||
}
|
}
|
||||||
r_mask = bytestream2_get_be32(&bc);
|
r_mask = bytestream2_get_be32(&bc);
|
||||||
g_mask = bytestream2_get_be32(&bc);
|
g_mask = bytestream2_get_be32(&bc);
|
||||||
@@ -746,11 +749,13 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Invalid or unsupported bitmasks: R=%X, G=%X, B=%X\n",
|
"Invalid or unsupported bitmasks: R=%X, G=%X, B=%X\n",
|
||||||
r_mask, g_mask, b_mask);
|
r_mask, g_mask, b_mask);
|
||||||
return AVERROR_PATCHWELCOME;
|
ret = AVERROR_PATCHWELCOME;
|
||||||
|
goto header_fail;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
avpriv_request_sample(avctx, "bpp=%d", c->bpp);
|
avpriv_request_sample(avctx, "bpp=%d", c->bpp);
|
||||||
return AVERROR_PATCHWELCOME;
|
ret = AVERROR_PATCHWELCOME;
|
||||||
|
goto header_fail;
|
||||||
}
|
}
|
||||||
if (g2m_init_buffers(c)) {
|
if (g2m_init_buffers(c)) {
|
||||||
ret = AVERROR(ENOMEM);
|
ret = AVERROR(ENOMEM);
|
||||||
|
@@ -2285,7 +2285,8 @@ static int pack_bitstream(G723_1_Context *p, unsigned char *frame, int size)
|
|||||||
if (p->cur_rate == RATE_6300) {
|
if (p->cur_rate == RATE_6300) {
|
||||||
info_bits = 0;
|
info_bits = 0;
|
||||||
put_bits(&pb, 2, info_bits);
|
put_bits(&pb, 2, info_bits);
|
||||||
}
|
}else
|
||||||
|
av_assert0(0);
|
||||||
|
|
||||||
put_bits(&pb, 8, p->lsp_index[2]);
|
put_bits(&pb, 8, p->lsp_index[2]);
|
||||||
put_bits(&pb, 8, p->lsp_index[1]);
|
put_bits(&pb, 8, p->lsp_index[1]);
|
||||||
|
@@ -58,7 +58,7 @@ int main(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EXTEND(i) (i << 3 | i & 7)
|
#define EXTEND(i) ((i) << 3 | (i) & 7)
|
||||||
init_put_bits(&pb, temp, SIZE);
|
init_put_bits(&pb, temp, SIZE);
|
||||||
for (i = 0; i < COUNT; i++)
|
for (i = 0; i < COUNT; i++)
|
||||||
set_ue_golomb(&pb, EXTEND(i));
|
set_ue_golomb(&pb, EXTEND(i));
|
||||||
|
@@ -214,6 +214,18 @@ static inline int get_se_golomb(GetBitContext *gb)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int get_se_golomb_long(GetBitContext *gb)
|
||||||
|
{
|
||||||
|
unsigned int buf = get_ue_golomb_long(gb);
|
||||||
|
|
||||||
|
if (buf & 1)
|
||||||
|
buf = (buf + 1) >> 1;
|
||||||
|
else
|
||||||
|
buf = -(buf >> 1);
|
||||||
|
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int svq3_get_se_golomb(GetBitContext *gb)
|
static inline int svq3_get_se_golomb(GetBitContext *gb)
|
||||||
{
|
{
|
||||||
unsigned int buf;
|
unsigned int buf;
|
||||||
|
@@ -1813,6 +1813,7 @@ static int decode_update_thread_context(AVCodecContext *dst,
|
|||||||
memset(&h->mb, 0, sizeof(h->mb));
|
memset(&h->mb, 0, sizeof(h->mb));
|
||||||
memset(&h->mb_luma_dc, 0, sizeof(h->mb_luma_dc));
|
memset(&h->mb_luma_dc, 0, sizeof(h->mb_luma_dc));
|
||||||
memset(&h->mb_padding, 0, sizeof(h->mb_padding));
|
memset(&h->mb_padding, 0, sizeof(h->mb_padding));
|
||||||
|
memset(&h->cur_pic, 0, sizeof(h->cur_pic));
|
||||||
|
|
||||||
h->avctx = dst;
|
h->avctx = dst;
|
||||||
h->DPB = NULL;
|
h->DPB = NULL;
|
||||||
@@ -3444,6 +3445,17 @@ int ff_set_ref_count(H264Context *h)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
|
||||||
|
{
|
||||||
|
switch (a) {
|
||||||
|
case AV_PIX_FMT_YUVJ420P: return AV_PIX_FMT_YUV420P;
|
||||||
|
case AV_PIX_FMT_YUVJ422P: return AV_PIX_FMT_YUV422P;
|
||||||
|
case AV_PIX_FMT_YUVJ444P: return AV_PIX_FMT_YUV444P;
|
||||||
|
default:
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode a slice header.
|
* Decode a slice header.
|
||||||
* This will (re)intialize the decoder and call h264_frame_start() as needed.
|
* This will (re)intialize the decoder and call h264_frame_start() as needed.
|
||||||
@@ -3573,7 +3585,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
|| h->mb_width != h->sps.mb_width
|
|| h->mb_width != h->sps.mb_width
|
||||||
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
||||||
));
|
));
|
||||||
if (h0->avctx->pix_fmt != get_pixel_format(h0, 0))
|
if (non_j_pixfmt(h0->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h0, 0)))
|
||||||
must_reinit = 1;
|
must_reinit = 1;
|
||||||
|
|
||||||
h->mb_width = h->sps.mb_width;
|
h->mb_width = h->sps.mb_width;
|
||||||
|
@@ -61,10 +61,10 @@
|
|||||||
#define MAX_SLICES 16
|
#define MAX_SLICES 16
|
||||||
|
|
||||||
#ifdef ALLOW_INTERLACE
|
#ifdef ALLOW_INTERLACE
|
||||||
#define MB_MBAFF(h) h->mb_mbaff
|
#define MB_MBAFF(h) (h)->mb_mbaff
|
||||||
#define MB_FIELD(h) h->mb_field_decoding_flag
|
#define MB_FIELD(h) (h)->mb_field_decoding_flag
|
||||||
#define FRAME_MBAFF(h) h->mb_aff_frame
|
#define FRAME_MBAFF(h) (h)->mb_aff_frame
|
||||||
#define FIELD_PICTURE(h) (h->picture_structure != PICT_FRAME)
|
#define FIELD_PICTURE(h) ((h)->picture_structure != PICT_FRAME)
|
||||||
#define LEFT_MBS 2
|
#define LEFT_MBS 2
|
||||||
#define LTOP 0
|
#define LTOP 0
|
||||||
#define LBOT 1
|
#define LBOT 1
|
||||||
@@ -84,12 +84,12 @@
|
|||||||
#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
|
#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
|
||||||
|
|
||||||
#ifndef CABAC
|
#ifndef CABAC
|
||||||
#define CABAC(h) h->pps.cabac
|
#define CABAC(h) (h)->pps.cabac
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define CHROMA(h) (h->sps.chroma_format_idc)
|
#define CHROMA(h) ((h)->sps.chroma_format_idc)
|
||||||
#define CHROMA422(h) (h->sps.chroma_format_idc == 2)
|
#define CHROMA422(h) ((h)->sps.chroma_format_idc == 2)
|
||||||
#define CHROMA444(h) (h->sps.chroma_format_idc == 3)
|
#define CHROMA444(h) ((h)->sps.chroma_format_idc == 3)
|
||||||
|
|
||||||
#define EXTENDED_SAR 255
|
#define EXTENDED_SAR 255
|
||||||
|
|
||||||
|
@@ -175,7 +175,7 @@ static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
|
|||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
||||||
if (ctx->first_idr && unit_type == 5) {
|
if (ctx->first_idr && (unit_type == 5 || unit_type == 7 || unit_type == 8)) {
|
||||||
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
||||||
avctx->extradata, avctx->extradata_size,
|
avctx->extradata, avctx->extradata_size,
|
||||||
buf, nal_size)) < 0)
|
buf, nal_size)) < 0)
|
||||||
|
@@ -283,7 +283,7 @@ static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
|
|||||||
static int set_sps(HEVCContext *s, const HEVCSPS *sps)
|
static int set_sps(HEVCContext *s, const HEVCSPS *sps)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
unsigned num = 0, den = 0;
|
unsigned int num = 0, den = 0;
|
||||||
|
|
||||||
pic_arrays_free(s);
|
pic_arrays_free(s);
|
||||||
ret = pic_arrays_init(s, sps);
|
ret = pic_arrays_init(s, sps);
|
||||||
|
@@ -81,10 +81,10 @@
|
|||||||
#define SAMPLE_CTB(tab, x, y) ((tab)[(y) * min_cb_width + (x)])
|
#define SAMPLE_CTB(tab, x, y) ((tab)[(y) * min_cb_width + (x)])
|
||||||
#define SAMPLE_CBF(tab, x, y) ((tab)[((y) & ((1<<log2_trafo_size)-1)) * MAX_CU_SIZE + ((x) & ((1<<log2_trafo_size)-1))])
|
#define SAMPLE_CBF(tab, x, y) ((tab)[((y) & ((1<<log2_trafo_size)-1)) * MAX_CU_SIZE + ((x) & ((1<<log2_trafo_size)-1))])
|
||||||
|
|
||||||
#define IS_IDR(s) (s->nal_unit_type == NAL_IDR_W_RADL || s->nal_unit_type == NAL_IDR_N_LP)
|
#define IS_IDR(s) ((s)->nal_unit_type == NAL_IDR_W_RADL || (s)->nal_unit_type == NAL_IDR_N_LP)
|
||||||
#define IS_BLA(s) (s->nal_unit_type == NAL_BLA_W_RADL || s->nal_unit_type == NAL_BLA_W_LP || \
|
#define IS_BLA(s) ((s)->nal_unit_type == NAL_BLA_W_RADL || (s)->nal_unit_type == NAL_BLA_W_LP || \
|
||||||
s->nal_unit_type == NAL_BLA_N_LP)
|
(s)->nal_unit_type == NAL_BLA_N_LP)
|
||||||
#define IS_IRAP(s) (s->nal_unit_type >= 16 && s->nal_unit_type <= 23)
|
#define IS_IRAP(s) ((s)->nal_unit_type >= 16 && (s)->nal_unit_type <= 23)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Table 7-3: NAL unit type codes
|
* Table 7-3: NAL unit type codes
|
||||||
@@ -461,7 +461,7 @@ typedef struct HEVCSPS {
|
|||||||
} HEVCSPS;
|
} HEVCSPS;
|
||||||
|
|
||||||
typedef struct HEVCPPS {
|
typedef struct HEVCPPS {
|
||||||
unsigned sps_id; ///< seq_parameter_set_id
|
unsigned int sps_id; ///< seq_parameter_set_id
|
||||||
|
|
||||||
uint8_t sign_data_hiding_flag;
|
uint8_t sign_data_hiding_flag;
|
||||||
|
|
||||||
|
@@ -611,8 +611,8 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
|||||||
{
|
{
|
||||||
const AVPixFmtDescriptor *desc;
|
const AVPixFmtDescriptor *desc;
|
||||||
GetBitContext *gb = &s->HEVClc->gb;
|
GetBitContext *gb = &s->HEVClc->gb;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int sps_id = 0;
|
unsigned int sps_id = 0;
|
||||||
int log2_diff_max_min_transform_block_size;
|
int log2_diff_max_min_transform_block_size;
|
||||||
int bit_depth_chroma, start, vui_present, sublayer_ordering_info;
|
int bit_depth_chroma, start, vui_present, sublayer_ordering_info;
|
||||||
int i;
|
int i;
|
||||||
@@ -993,8 +993,8 @@ int ff_hevc_decode_nal_pps(HEVCContext *s)
|
|||||||
int pic_area_in_ctbs, pic_area_in_min_cbs, pic_area_in_min_tbs;
|
int pic_area_in_ctbs, pic_area_in_min_cbs, pic_area_in_min_tbs;
|
||||||
int log2_diff_ctb_min_tb_size;
|
int log2_diff_ctb_min_tb_size;
|
||||||
int i, j, x, y, ctb_addr_rs, tile_id;
|
int i, j, x, y, ctb_addr_rs, tile_id;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int pps_id = 0;
|
unsigned int pps_id = 0;
|
||||||
|
|
||||||
AVBufferRef *pps_buf;
|
AVBufferRef *pps_buf;
|
||||||
HEVCPPS *pps = av_mallocz(sizeof(*pps));
|
HEVCPPS *pps = av_mallocz(sizeof(*pps));
|
||||||
|
@@ -35,6 +35,12 @@
|
|||||||
|
|
||||||
#define FF_SANE_NB_CHANNELS 63U
|
#define FF_SANE_NB_CHANNELS 63U
|
||||||
|
|
||||||
|
#if HAVE_NEON || ARCH_PPC || HAVE_MMX
|
||||||
|
# define STRIDE_ALIGN 16
|
||||||
|
#else
|
||||||
|
# define STRIDE_ALIGN 8
|
||||||
|
#endif
|
||||||
|
|
||||||
typedef struct FramePool {
|
typedef struct FramePool {
|
||||||
/**
|
/**
|
||||||
* Pools for each data plane. For audio all the planes have the same size,
|
* Pools for each data plane. For audio all the planes have the same size,
|
||||||
|
@@ -235,15 +235,15 @@ void ff_ivi_recompose_haar(const IVIPlaneDesc *plane, uint8_t *dst,
|
|||||||
|
|
||||||
/** butterfly operation for the inverse Haar transform */
|
/** butterfly operation for the inverse Haar transform */
|
||||||
#define IVI_HAAR_BFLY(s1, s2, o1, o2, t) \
|
#define IVI_HAAR_BFLY(s1, s2, o1, o2, t) \
|
||||||
t = (s1 - s2) >> 1;\
|
t = ((s1) - (s2)) >> 1;\
|
||||||
o1 = (s1 + s2) >> 1;\
|
o1 = ((s1) + (s2)) >> 1;\
|
||||||
o2 = t;\
|
o2 = (t);\
|
||||||
|
|
||||||
/** inverse 8-point Haar transform */
|
/** inverse 8-point Haar transform */
|
||||||
#define INV_HAAR8(s1, s5, s3, s7, s2, s4, s6, s8,\
|
#define INV_HAAR8(s1, s5, s3, s7, s2, s4, s6, s8,\
|
||||||
d1, d2, d3, d4, d5, d6, d7, d8,\
|
d1, d2, d3, d4, d5, d6, d7, d8,\
|
||||||
t0, t1, t2, t3, t4, t5, t6, t7, t8) {\
|
t0, t1, t2, t3, t4, t5, t6, t7, t8) {\
|
||||||
t1 = s1 << 1; t5 = s5 << 1;\
|
t1 = (s1) << 1; t5 = (s5) << 1;\
|
||||||
IVI_HAAR_BFLY(t1, t5, t1, t5, t0); IVI_HAAR_BFLY(t1, s3, t1, t3, t0);\
|
IVI_HAAR_BFLY(t1, t5, t1, t5, t0); IVI_HAAR_BFLY(t1, s3, t1, t3, t0);\
|
||||||
IVI_HAAR_BFLY(t5, s7, t5, t7, t0); IVI_HAAR_BFLY(t1, s2, t1, t2, t0);\
|
IVI_HAAR_BFLY(t5, s7, t5, t7, t0); IVI_HAAR_BFLY(t1, s2, t1, t2, t0);\
|
||||||
IVI_HAAR_BFLY(t3, s4, t3, t4, t0); IVI_HAAR_BFLY(t5, s6, t5, t6, t0);\
|
IVI_HAAR_BFLY(t3, s4, t3, t4, t0); IVI_HAAR_BFLY(t5, s6, t5, t6, t0);\
|
||||||
@@ -485,21 +485,21 @@ void ff_ivi_dc_haar_2d(const int32_t *in, int16_t *out, uint32_t pitch,
|
|||||||
|
|
||||||
/** butterfly operation for the inverse slant transform */
|
/** butterfly operation for the inverse slant transform */
|
||||||
#define IVI_SLANT_BFLY(s1, s2, o1, o2, t) \
|
#define IVI_SLANT_BFLY(s1, s2, o1, o2, t) \
|
||||||
t = s1 - s2;\
|
t = (s1) - (s2);\
|
||||||
o1 = s1 + s2;\
|
o1 = (s1) + (s2);\
|
||||||
o2 = t;\
|
o2 = (t);\
|
||||||
|
|
||||||
/** This is a reflection a,b = 1/2, 5/4 for the inverse slant transform */
|
/** This is a reflection a,b = 1/2, 5/4 for the inverse slant transform */
|
||||||
#define IVI_IREFLECT(s1, s2, o1, o2, t) \
|
#define IVI_IREFLECT(s1, s2, o1, o2, t) \
|
||||||
t = ((s1 + s2*2 + 2) >> 2) + s1;\
|
t = (((s1) + (s2)*2 + 2) >> 2) + (s1);\
|
||||||
o2 = ((s1*2 - s2 + 2) >> 2) - s2;\
|
o2 = (((s1)*2 - (s2) + 2) >> 2) - (s2);\
|
||||||
o1 = t;\
|
o1 = (t);\
|
||||||
|
|
||||||
/** This is a reflection a,b = 1/2, 7/8 for the inverse slant transform */
|
/** This is a reflection a,b = 1/2, 7/8 for the inverse slant transform */
|
||||||
#define IVI_SLANT_PART4(s1, s2, o1, o2, t) \
|
#define IVI_SLANT_PART4(s1, s2, o1, o2, t) \
|
||||||
t = s2 + ((s1*4 - s2 + 4) >> 3);\
|
t = (s2) + (((s1)*4 - (s2) + 4) >> 3);\
|
||||||
o2 = s1 + ((-s1 - s2*4 + 4) >> 3);\
|
o2 = (s1) + ((-(s1) - (s2)*4 + 4) >> 3);\
|
||||||
o1 = t;\
|
o1 = (t);\
|
||||||
|
|
||||||
/** inverse slant8 transform */
|
/** inverse slant8 transform */
|
||||||
#define IVI_INV_SLANT8(s1, s4, s8, s5, s2, s6, s3, s7,\
|
#define IVI_INV_SLANT8(s1, s4, s8, s5, s2, s6, s3, s7,\
|
||||||
@@ -557,7 +557,7 @@ void ff_ivi_inverse_slant_8x8(const int32_t *in, int16_t *out, uint32_t pitch, c
|
|||||||
}
|
}
|
||||||
#undef COMPENSATE
|
#undef COMPENSATE
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
src = tmp;
|
src = tmp;
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
if (!src[0] && !src[1] && !src[2] && !src[3] && !src[4] && !src[5] && !src[6] && !src[7]) {
|
if (!src[0] && !src[1] && !src[2] && !src[3] && !src[4] && !src[5] && !src[6] && !src[7]) {
|
||||||
@@ -597,7 +597,7 @@ void ff_ivi_inverse_slant_4x4(const int32_t *in, int16_t *out, uint32_t pitch, c
|
|||||||
}
|
}
|
||||||
#undef COMPENSATE
|
#undef COMPENSATE
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
src = tmp;
|
src = tmp;
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (!src[0] && !src[1] && !src[2] && !src[3]) {
|
if (!src[0] && !src[1] && !src[2] && !src[3]) {
|
||||||
@@ -631,7 +631,7 @@ void ff_ivi_row_slant8(const int32_t *in, int16_t *out, uint32_t pitch, const ui
|
|||||||
int i;
|
int i;
|
||||||
int t0, t1, t2, t3, t4, t5, t6, t7, t8;
|
int t0, t1, t2, t3, t4, t5, t6, t7, t8;
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
if (!in[0] && !in[1] && !in[2] && !in[3] && !in[4] && !in[5] && !in[6] && !in[7]) {
|
if (!in[0] && !in[1] && !in[2] && !in[3] && !in[4] && !in[5] && !in[6] && !in[7]) {
|
||||||
memset(out, 0, 8*sizeof(out[0]));
|
memset(out, 0, 8*sizeof(out[0]));
|
||||||
@@ -673,7 +673,7 @@ void ff_ivi_col_slant8(const int32_t *in, int16_t *out, uint32_t pitch, const ui
|
|||||||
row4 = pitch << 2;
|
row4 = pitch << 2;
|
||||||
row8 = pitch << 3;
|
row8 = pitch << 3;
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
if (flags[i]) {
|
if (flags[i]) {
|
||||||
IVI_INV_SLANT8(in[0], in[8], in[16], in[24], in[32], in[40], in[48], in[56],
|
IVI_INV_SLANT8(in[0], in[8], in[16], in[24], in[32], in[40], in[48], in[56],
|
||||||
@@ -710,7 +710,7 @@ void ff_ivi_row_slant4(const int32_t *in, int16_t *out, uint32_t pitch, const ui
|
|||||||
int i;
|
int i;
|
||||||
int t0, t1, t2, t3, t4;
|
int t0, t1, t2, t3, t4;
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (!in[0] && !in[1] && !in[2] && !in[3]) {
|
if (!in[0] && !in[1] && !in[2] && !in[3]) {
|
||||||
memset(out, 0, 4*sizeof(out[0]));
|
memset(out, 0, 4*sizeof(out[0]));
|
||||||
@@ -732,7 +732,7 @@ void ff_ivi_col_slant4(const int32_t *in, int16_t *out, uint32_t pitch, const ui
|
|||||||
|
|
||||||
row2 = pitch << 1;
|
row2 = pitch << 1;
|
||||||
|
|
||||||
#define COMPENSATE(x) ((x + 1)>>1)
|
#define COMPENSATE(x) (((x) + 1)>>1)
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (flags[i]) {
|
if (flags[i]) {
|
||||||
IVI_INV_SLANT4(in[0], in[4], in[8], in[12],
|
IVI_INV_SLANT4(in[0], in[4], in[8], in[12],
|
||||||
|
@@ -349,7 +349,8 @@ static int libvorbis_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
avctx->delay = duration;
|
avctx->delay = duration;
|
||||||
av_assert0(!s->afq.remaining_delay);
|
av_assert0(!s->afq.remaining_delay);
|
||||||
s->afq.frames->duration += duration;
|
s->afq.frames->duration += duration;
|
||||||
s->afq.frames->pts -= duration;
|
if (s->afq.frames->pts != AV_NOPTS_VALUE)
|
||||||
|
s->afq.frames->pts -= duration;
|
||||||
s->afq.remaining_samples += duration;
|
s->afq.remaining_samples += duration;
|
||||||
}
|
}
|
||||||
ff_af_queue_remove(&s->afq, duration, &avpkt->pts, &avpkt->duration);
|
ff_af_queue_remove(&s->afq, duration, &avpkt->pts, &avpkt->duration);
|
||||||
|
@@ -391,19 +391,6 @@ static av_cold int X264_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
OPT_STR("level", x4->level);
|
OPT_STR("level", x4->level);
|
||||||
|
|
||||||
if(x4->x264opts){
|
|
||||||
const char *p= x4->x264opts;
|
|
||||||
while(p){
|
|
||||||
char param[256]={0}, val[256]={0};
|
|
||||||
if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
|
|
||||||
OPT_STR(param, "1");
|
|
||||||
}else
|
|
||||||
OPT_STR(param, val);
|
|
||||||
p= strchr(p, ':');
|
|
||||||
p+=!!p;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (avctx->i_quant_factor > 0)
|
if (avctx->i_quant_factor > 0)
|
||||||
x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
|
x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
|
||||||
|
|
||||||
@@ -589,6 +576,19 @@ static av_cold int X264_init(AVCodecContext *avctx)
|
|||||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER)
|
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER)
|
||||||
x4->params.b_repeat_headers = 0;
|
x4->params.b_repeat_headers = 0;
|
||||||
|
|
||||||
|
if(x4->x264opts){
|
||||||
|
const char *p= x4->x264opts;
|
||||||
|
while(p){
|
||||||
|
char param[256]={0}, val[256]={0};
|
||||||
|
if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
|
||||||
|
OPT_STR(param, "1");
|
||||||
|
}else
|
||||||
|
OPT_STR(param, val);
|
||||||
|
p= strchr(p, ':');
|
||||||
|
p+=!!p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (x4->x264_params) {
|
if (x4->x264_params) {
|
||||||
AVDictionary *dict = NULL;
|
AVDictionary *dict = NULL;
|
||||||
AVDictionaryEntry *en = NULL;
|
AVDictionaryEntry *en = NULL;
|
||||||
|
@@ -81,6 +81,7 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
|
|||||||
libx265Context *ctx = avctx->priv_data;
|
libx265Context *ctx = avctx->priv_data;
|
||||||
x265_nal *nal;
|
x265_nal *nal;
|
||||||
uint8_t *buf;
|
uint8_t *buf;
|
||||||
|
char sar[10];
|
||||||
int sar_num, sar_den;
|
int sar_num, sar_den;
|
||||||
int nnal;
|
int nnal;
|
||||||
int ret;
|
int ret;
|
||||||
@@ -121,11 +122,11 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
|
|||||||
av_reduce(&sar_num, &sar_den,
|
av_reduce(&sar_num, &sar_den,
|
||||||
avctx->sample_aspect_ratio.num,
|
avctx->sample_aspect_ratio.num,
|
||||||
avctx->sample_aspect_ratio.den, 4096);
|
avctx->sample_aspect_ratio.den, 4096);
|
||||||
ctx->params->bEnableVuiParametersPresentFlag = 1;
|
snprintf(sar, sizeof(sar), "%d:%d", sar_num, sar_den);
|
||||||
ctx->params->bEnableAspectRatioIdc = 1;
|
if (x265_param_parse(ctx->params, "sar", sar) == X265_PARAM_BAD_VALUE) {
|
||||||
ctx->params->aspectRatioIdc = 255;
|
av_log(avctx, AV_LOG_ERROR, "Invalid SAR: %d:%d.\n", sar_num, sar_den);
|
||||||
ctx->params->sarWidth = sar_num;
|
return AVERROR_INVALIDDATA;
|
||||||
ctx->params->sarHeight = sar_den;
|
}
|
||||||
|
|
||||||
if (x265_max_bit_depth == 8)
|
if (x265_max_bit_depth == 8)
|
||||||
ctx->params->internalBitDepth = 8;
|
ctx->params->internalBitDepth = 8;
|
||||||
@@ -190,7 +191,7 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
|
|||||||
for (i = 0; i < nnal; i++)
|
for (i = 0; i < nnal; i++)
|
||||||
ctx->header_size += nal[i].sizeBytes;
|
ctx->header_size += nal[i].sizeBytes;
|
||||||
|
|
||||||
ctx->header = av_malloc(ctx->header_size);
|
ctx->header = av_malloc(ctx->header_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
if (!ctx->header) {
|
if (!ctx->header) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Cannot allocate HEVC header of size %d.\n", ctx->header_size);
|
"Cannot allocate HEVC header of size %d.\n", ctx->header_size);
|
||||||
@@ -204,6 +205,13 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
|
|||||||
buf += nal[i].sizeBytes;
|
buf += nal[i].sizeBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
|
||||||
|
avctx->extradata_size = ctx->header_size;
|
||||||
|
avctx->extradata = ctx->header;
|
||||||
|
ctx->header_size = 0;
|
||||||
|
ctx->header = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -83,6 +83,17 @@ static void build_basic_mjpeg_vlc(MJpegDecodeContext *s)
|
|||||||
avpriv_mjpeg_val_ac_chrominance, 251, 0, 0);
|
avpriv_mjpeg_val_ac_chrominance, 251, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
|
||||||
|
{
|
||||||
|
s->buggy_avid = 1;
|
||||||
|
if (len > 14 && buf[12] == 1) /* 1 - NTSC */
|
||||||
|
s->interlace_polarity = 1;
|
||||||
|
if (len > 14 && buf[12] == 2) /* 2 - PAL */
|
||||||
|
s->interlace_polarity = 0;
|
||||||
|
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||||
|
av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
|
||||||
|
}
|
||||||
|
|
||||||
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
|
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
MJpegDecodeContext *s = avctx->priv_data;
|
MJpegDecodeContext *s = avctx->priv_data;
|
||||||
@@ -120,7 +131,17 @@ av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
|
|||||||
if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
|
if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
|
||||||
s->interlace_polarity = 1; /* bottom field first */
|
s->interlace_polarity = 1; /* bottom field first */
|
||||||
av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
|
av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
|
||||||
|
} else if (avctx->field_order == AV_FIELD_UNKNOWN) {
|
||||||
|
if (avctx->codec_tag == AV_RL32("MJPG"))
|
||||||
|
s->interlace_polarity = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( avctx->extradata_size > 8
|
||||||
|
&& AV_RL32(avctx->extradata) == 0x2C
|
||||||
|
&& AV_RL32(avctx->extradata+4) == 0x18) {
|
||||||
|
parse_avid(s, avctx->extradata, avctx->extradata_size);
|
||||||
|
}
|
||||||
|
|
||||||
if (avctx->codec->id == AV_CODEC_ID_AMV)
|
if (avctx->codec->id == AV_CODEC_ID_AMV)
|
||||||
s->flipped = 1;
|
s->flipped = 1;
|
||||||
|
|
||||||
@@ -1230,7 +1251,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!Al) {
|
if (!Al) {
|
||||||
s->coefs_finished[c] |= (1LL << (se + 1)) - (1LL << ss);
|
s->coefs_finished[c] |= (2LL << se) - (1LL << ss);
|
||||||
last_scan = !~s->coefs_finished[c];
|
last_scan = !~s->coefs_finished[c];
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1694,9 +1715,7 @@ static int mjpeg_decode_com(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
/* buggy avid, it puts EOI only at every 10th frame */
|
/* buggy avid, it puts EOI only at every 10th frame */
|
||||||
if (!strncmp(cbuf, "AVID", 4)) {
|
if (!strncmp(cbuf, "AVID", 4)) {
|
||||||
s->buggy_avid = 1;
|
parse_avid(s, cbuf, len);
|
||||||
if (len > 14 && cbuf[12] == 1) /* 1 - NTSC, 2 - PAL */
|
|
||||||
s->interlace_polarity = 1;
|
|
||||||
} else if (!strcmp(cbuf, "CS=ITU601"))
|
} else if (!strcmp(cbuf, "CS=ITU601"))
|
||||||
s->cs_itu601 = 1;
|
s->cs_itu601 = 1;
|
||||||
else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32)) ||
|
else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32)) ||
|
||||||
|
@@ -487,7 +487,7 @@ static void encode_block(MpegEncContext *s, int16_t *block, int n)
|
|||||||
put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);
|
put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[6][64])
|
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
if (s->chroma_format == CHROMA_444) {
|
if (s->chroma_format == CHROMA_444) {
|
||||||
|
@@ -61,6 +61,6 @@ void ff_mjpeg_encode_stuffing(MpegEncContext *s);
|
|||||||
void ff_mjpeg_init_hvsample(AVCodecContext *avctx, int hsample[3], int vsample[3]);
|
void ff_mjpeg_init_hvsample(AVCodecContext *avctx, int hsample[3], int vsample[3]);
|
||||||
void ff_mjpeg_encode_dc(PutBitContext *pb, int val,
|
void ff_mjpeg_encode_dc(PutBitContext *pb, int val,
|
||||||
uint8_t *huff_size, uint16_t *huff_code);
|
uint8_t *huff_size, uint16_t *huff_code);
|
||||||
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[6][64]);
|
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64]);
|
||||||
|
|
||||||
#endif /* AVCODEC_MJPEGENC_H */
|
#endif /* AVCODEC_MJPEGENC_H */
|
||||||
|
@@ -872,7 +872,7 @@ static int read_decoding_params(MLPDecodeContext *m, GetBitContext *gbp,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MSB_MASK(bits) (-1u << bits)
|
#define MSB_MASK(bits) (-1u << (bits))
|
||||||
|
|
||||||
/** Generate PCM samples using the prediction filters and residual values
|
/** Generate PCM samples using the prediction filters and residual values
|
||||||
* read from the data stream, and update the filter state. */
|
* read from the data stream, and update the filter state. */
|
||||||
|
@@ -1774,7 +1774,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!avctx->hwaccel) {
|
if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
|
||||||
for(i=0; i<avctx->height; i++)
|
for(i=0; i<avctx->height; i++)
|
||||||
memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i,
|
memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i,
|
||||||
0x80, avctx->width);
|
0x80, avctx->width);
|
||||||
|
@@ -99,6 +99,7 @@ struct MpegEncContext;
|
|||||||
*/
|
*/
|
||||||
typedef struct Picture{
|
typedef struct Picture{
|
||||||
struct AVFrame f;
|
struct AVFrame f;
|
||||||
|
uint8_t avframe_padding[1024]; // hack to allow linking to a avutil with larger AVFrame
|
||||||
ThreadFrame tf;
|
ThreadFrame tf;
|
||||||
|
|
||||||
AVBufferRef *qscale_table_buf;
|
AVBufferRef *qscale_table_buf;
|
||||||
@@ -195,8 +196,8 @@ typedef struct Picture{
|
|||||||
int mbaff; ///< h264 1 -> MBAFF frame 0-> not MBAFF
|
int mbaff; ///< h264 1 -> MBAFF frame 0-> not MBAFF
|
||||||
int field_picture; ///< whether or not the picture was encoded in separate fields
|
int field_picture; ///< whether or not the picture was encoded in separate fields
|
||||||
|
|
||||||
int mb_var_sum; ///< sum of MB variance for current frame
|
int64_t mb_var_sum; ///< sum of MB variance for current frame
|
||||||
int mc_mb_var_sum; ///< motion compensated MB variance for current frame
|
int64_t mc_mb_var_sum; ///< motion compensated MB variance for current frame
|
||||||
|
|
||||||
int b_frame_score;
|
int b_frame_score;
|
||||||
int needs_realloc; ///< Picture needs to be reallocated (eg due to a frame size change)
|
int needs_realloc; ///< Picture needs to be reallocated (eg due to a frame size change)
|
||||||
@@ -251,8 +252,8 @@ typedef struct MotionEstContext{
|
|||||||
int stride;
|
int stride;
|
||||||
int uvstride;
|
int uvstride;
|
||||||
/* temp variables for picture complexity calculation */
|
/* temp variables for picture complexity calculation */
|
||||||
int mc_mb_var_sum_temp;
|
int64_t mc_mb_var_sum_temp;
|
||||||
int mb_var_sum_temp;
|
int64_t mb_var_sum_temp;
|
||||||
int scene_change_score;
|
int scene_change_score;
|
||||||
/* cmp, chroma_cmp;*/
|
/* cmp, chroma_cmp;*/
|
||||||
op_pixels_func (*hpel_put)[4];
|
op_pixels_func (*hpel_put)[4];
|
||||||
|
@@ -1039,6 +1039,10 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
|
|||||||
direct = 0;
|
direct = 0;
|
||||||
if ((s->width & 15) || (s->height & 15))
|
if ((s->width & 15) || (s->height & 15))
|
||||||
direct = 0;
|
direct = 0;
|
||||||
|
if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
|
||||||
|
direct = 0;
|
||||||
|
if (s->linesize & (STRIDE_ALIGN-1))
|
||||||
|
direct = 0;
|
||||||
|
|
||||||
av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
|
av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
|
||||||
pic_arg->linesize[1], s->linesize, s->uvlinesize);
|
pic_arg->linesize[1], s->linesize, s->uvlinesize);
|
||||||
@@ -3439,7 +3443,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
|||||||
s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
|
s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
|
||||||
if(s->msmpeg4_version >= 3)
|
if(s->msmpeg4_version >= 3)
|
||||||
s->no_rounding=1;
|
s->no_rounding=1;
|
||||||
av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
|
av_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
|
||||||
s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
|
s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -55,7 +55,7 @@ static av_cold int msrle_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
|
|
||||||
switch (avctx->bits_per_coded_sample & 0x1f) {
|
switch (avctx->bits_per_coded_sample) {
|
||||||
case 1:
|
case 1:
|
||||||
avctx->pix_fmt = AV_PIX_FMT_MONOWHITE;
|
avctx->pix_fmt = AV_PIX_FMT_MONOWHITE;
|
||||||
break;
|
break;
|
||||||
@@ -116,6 +116,9 @@ static int msrle_decode_frame(AVCodecContext *avctx,
|
|||||||
uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
|
uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
|
if (linesize < 0)
|
||||||
|
return linesize;
|
||||||
|
|
||||||
for (i = 0; i < avctx->height; i++) {
|
for (i = 0; i < avctx->height; i++) {
|
||||||
if (avctx->bits_per_coded_sample == 4) {
|
if (avctx->bits_per_coded_sample == 4) {
|
||||||
for (j = 0; j < avctx->width - 1; j += 2) {
|
for (j = 0; j < avctx->width - 1; j += 2) {
|
||||||
|
@@ -84,8 +84,8 @@ void ff_mss34_gen_quant_mat(uint16_t *qmat, int quality, int luma)
|
|||||||
blk[6 * step] = (-(t3 + t7) + t8 + tA) >> shift; \
|
blk[6 * step] = (-(t3 + t7) + t8 + tA) >> shift; \
|
||||||
blk[7 * step] = (-(t1 + t6) + t9 + tB) >> shift; \
|
blk[7 * step] = (-(t1 + t6) + t9 + tB) >> shift; \
|
||||||
|
|
||||||
#define SOP_ROW(a) ((a) << 16) + 0x2000
|
#define SOP_ROW(a) (((a) << 16) + 0x2000)
|
||||||
#define SOP_COL(a) ((a + 32) << 16)
|
#define SOP_COL(a) (((a) + 32) << 16)
|
||||||
|
|
||||||
void ff_mss34_dct_put(uint8_t *dst, int stride, int *block)
|
void ff_mss34_dct_put(uint8_t *dst, int stride, int *block)
|
||||||
{
|
{
|
||||||
|
@@ -364,7 +364,7 @@ static int get_value_cached(GetBitContext *gb, int vec_pos, uint8_t *vec,
|
|||||||
return prev[component];
|
return prev[component];
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MKVAL(vals) (vals[0] | (vals[1] << 3) | (vals[2] << 6))
|
#define MKVAL(vals) ((vals)[0] | ((vals)[1] << 3) | ((vals)[2] << 6))
|
||||||
|
|
||||||
/* Image mode - the hardest to comprehend MSS4 coding mode.
|
/* Image mode - the hardest to comprehend MSS4 coding mode.
|
||||||
*
|
*
|
||||||
|
@@ -36,12 +36,20 @@
|
|||||||
|
|
||||||
.macro extfunc name
|
.macro extfunc name
|
||||||
.global X(\name)
|
.global X(\name)
|
||||||
|
#if _CALL_ELF == 2
|
||||||
|
.text
|
||||||
|
X(\name):
|
||||||
|
addis %r2, %r12, .TOC.-X(\name)@ha
|
||||||
|
addi %r2, %r2, .TOC.-X(\name)@l
|
||||||
|
.localentry X(\name), .-X(\name)
|
||||||
|
#else
|
||||||
.section .opd, "aw"
|
.section .opd, "aw"
|
||||||
X(\name):
|
X(\name):
|
||||||
.quad L(\name), .TOC.@tocbase, 0
|
.quad L(\name), .TOC.@tocbase, 0
|
||||||
.previous
|
.previous
|
||||||
.type X(\name), STT_FUNC
|
.type X(\name), STT_FUNC
|
||||||
L(\name):
|
L(\name):
|
||||||
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro movrel rd, sym, gp
|
.macro movrel rd, sym, gp
|
||||||
|
@@ -317,6 +317,7 @@ static int submit_packet(PerThreadContext *p, AVPacket *avpkt)
|
|||||||
FrameThreadContext *fctx = p->parent;
|
FrameThreadContext *fctx = p->parent;
|
||||||
PerThreadContext *prev_thread = fctx->prev_thread;
|
PerThreadContext *prev_thread = fctx->prev_thread;
|
||||||
const AVCodec *codec = p->avctx->codec;
|
const AVCodec *codec = p->avctx->codec;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!avpkt->size && !(codec->capabilities & CODEC_CAP_DELAY)) return 0;
|
if (!avpkt->size && !(codec->capabilities & CODEC_CAP_DELAY)) return 0;
|
||||||
|
|
||||||
@@ -340,6 +341,7 @@ static int submit_packet(PerThreadContext *p, AVPacket *avpkt)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
av_packet_free_side_data(&p->avpkt);
|
||||||
av_buffer_unref(&p->avpkt.buf);
|
av_buffer_unref(&p->avpkt.buf);
|
||||||
p->avpkt = *avpkt;
|
p->avpkt = *avpkt;
|
||||||
if (avpkt->buf)
|
if (avpkt->buf)
|
||||||
@@ -354,6 +356,10 @@ static int submit_packet(PerThreadContext *p, AVPacket *avpkt)
|
|||||||
memcpy(p->buf, avpkt->data, avpkt->size);
|
memcpy(p->buf, avpkt->data, avpkt->size);
|
||||||
memset(p->buf + avpkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
memset(p->buf + avpkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
}
|
}
|
||||||
|
if ((ret = av_copy_packet_side_data(&p->avpkt, avpkt)) < 0) {
|
||||||
|
pthread_mutex_unlock(&p->mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
p->state = STATE_SETTING_UP;
|
p->state = STATE_SETTING_UP;
|
||||||
pthread_cond_signal(&p->input_cond);
|
pthread_cond_signal(&p->input_cond);
|
||||||
@@ -592,6 +598,7 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
|
|||||||
pthread_cond_destroy(&p->input_cond);
|
pthread_cond_destroy(&p->input_cond);
|
||||||
pthread_cond_destroy(&p->progress_cond);
|
pthread_cond_destroy(&p->progress_cond);
|
||||||
pthread_cond_destroy(&p->output_cond);
|
pthread_cond_destroy(&p->output_cond);
|
||||||
|
av_packet_free_side_data(&p->avpkt);
|
||||||
av_buffer_unref(&p->avpkt.buf);
|
av_buffer_unref(&p->avpkt.buf);
|
||||||
av_freep(&p->buf);
|
av_freep(&p->buf);
|
||||||
av_freep(&p->released_buffers);
|
av_freep(&p->released_buffers);
|
||||||
@@ -727,8 +734,6 @@ void ff_thread_flush(AVCodecContext *avctx)
|
|||||||
if (fctx->prev_thread) {
|
if (fctx->prev_thread) {
|
||||||
if (fctx->prev_thread != &fctx->threads[0])
|
if (fctx->prev_thread != &fctx->threads[0])
|
||||||
update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
|
update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
|
||||||
if (avctx->codec->flush)
|
|
||||||
avctx->codec->flush(fctx->threads[0].avctx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fctx->next_decoding = fctx->next_finished = 0;
|
fctx->next_decoding = fctx->next_finished = 0;
|
||||||
@@ -741,6 +746,9 @@ void ff_thread_flush(AVCodecContext *avctx)
|
|||||||
av_frame_unref(p->frame);
|
av_frame_unref(p->frame);
|
||||||
|
|
||||||
release_delayed_buffers(p);
|
release_delayed_buffers(p);
|
||||||
|
|
||||||
|
if (avctx->codec->flush)
|
||||||
|
avctx->codec->flush(p->avctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -46,7 +46,7 @@ void ff_write_pass1_stats(MpegEncContext *s)
|
|||||||
{
|
{
|
||||||
snprintf(s->avctx->stats_out, 256,
|
snprintf(s->avctx->stats_out, 256,
|
||||||
"in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
|
"in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
|
||||||
"fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n",
|
"fcode:%d bcode:%d mc-var:%"PRId64" var:%"PRId64" icount:%d skipcount:%d hbits:%d;\n",
|
||||||
s->current_picture_ptr->f.display_picture_number,
|
s->current_picture_ptr->f.display_picture_number,
|
||||||
s->current_picture_ptr->f.coded_picture_number,
|
s->current_picture_ptr->f.coded_picture_number,
|
||||||
s->pict_type,
|
s->pict_type,
|
||||||
@@ -206,7 +206,7 @@ av_cold int ff_rate_control_init(MpegEncContext *s)
|
|||||||
assert(picture_number < rcc->num_entries);
|
assert(picture_number < rcc->num_entries);
|
||||||
rce = &rcc->entry[picture_number];
|
rce = &rcc->entry[picture_number];
|
||||||
|
|
||||||
e += sscanf(p, " in:%*d out:%*d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d",
|
e += sscanf(p, " in:%*d out:%*d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%"SCNd64" var:%"SCNd64" icount:%d skipcount:%d hbits:%d",
|
||||||
&rce->pict_type, &rce->qscale, &rce->i_tex_bits, &rce->p_tex_bits,
|
&rce->pict_type, &rce->qscale, &rce->i_tex_bits, &rce->p_tex_bits,
|
||||||
&rce->mv_bits, &rce->misc_bits,
|
&rce->mv_bits, &rce->misc_bits,
|
||||||
&rce->f_code, &rce->b_code,
|
&rce->f_code, &rce->b_code,
|
||||||
@@ -753,7 +753,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
|||||||
RateControlEntry local_rce, *rce;
|
RateControlEntry local_rce, *rce;
|
||||||
double bits;
|
double bits;
|
||||||
double rate_factor;
|
double rate_factor;
|
||||||
int var;
|
int64_t var;
|
||||||
const int pict_type = s->pict_type;
|
const int pict_type = s->pict_type;
|
||||||
Picture * const pic = &s->current_picture;
|
Picture * const pic = &s->current_picture;
|
||||||
emms_c();
|
emms_c();
|
||||||
@@ -769,8 +769,9 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
|||||||
fps = get_fps(s->avctx);
|
fps = get_fps(s->avctx);
|
||||||
/* update predictors */
|
/* update predictors */
|
||||||
if (picture_number > 2 && !dry_run) {
|
if (picture_number > 2 && !dry_run) {
|
||||||
const int last_var = s->last_pict_type == AV_PICTURE_TYPE_I ? rcc->last_mb_var_sum
|
const int64_t last_var =
|
||||||
: rcc->last_mc_mb_var_sum;
|
s->last_pict_type == AV_PICTURE_TYPE_I ? rcc->last_mb_var_sum
|
||||||
|
: rcc->last_mc_mb_var_sum;
|
||||||
av_assert1(s->frame_bits >= s->stuffing_bits);
|
av_assert1(s->frame_bits >= s->stuffing_bits);
|
||||||
update_predictor(&rcc->pred[s->last_pict_type],
|
update_predictor(&rcc->pred[s->last_pict_type],
|
||||||
rcc->last_qscale,
|
rcc->last_qscale,
|
||||||
@@ -817,7 +818,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
|||||||
assert(pict_type == rce->new_pict_type);
|
assert(pict_type == rce->new_pict_type);
|
||||||
|
|
||||||
q = rce->new_qscale / br_compensation;
|
q = rce->new_qscale / br_compensation;
|
||||||
av_dlog(s, "%f %f %f last:%d var:%d type:%d//\n", q, rce->new_qscale,
|
av_dlog(s, "%f %f %f last:%d var:%"PRId64" type:%d//\n", q, rce->new_qscale,
|
||||||
br_compensation, s->frame_bits, var, pict_type);
|
br_compensation, s->frame_bits, var, pict_type);
|
||||||
} else {
|
} else {
|
||||||
rce->pict_type =
|
rce->pict_type =
|
||||||
@@ -879,7 +880,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
|||||||
if (s->avctx->debug & FF_DEBUG_RC) {
|
if (s->avctx->debug & FF_DEBUG_RC) {
|
||||||
av_log(s->avctx, AV_LOG_DEBUG,
|
av_log(s->avctx, AV_LOG_DEBUG,
|
||||||
"%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f "
|
"%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f "
|
||||||
"size:%d var:%d/%d br:%d fps:%d\n",
|
"size:%d var:%"PRId64"/%"PRId64" br:%d fps:%d\n",
|
||||||
av_get_picture_type_char(pict_type),
|
av_get_picture_type_char(pict_type),
|
||||||
qmin, q, qmax, picture_number,
|
qmin, q, qmax, picture_number,
|
||||||
(int)wanted_bits / 1000, (int)s->total_bits / 1000,
|
(int)wanted_bits / 1000, (int)s->total_bits / 1000,
|
||||||
|
@@ -49,8 +49,8 @@ typedef struct RateControlEntry{
|
|||||||
uint64_t expected_bits;
|
uint64_t expected_bits;
|
||||||
int new_pict_type;
|
int new_pict_type;
|
||||||
float new_qscale;
|
float new_qscale;
|
||||||
int mc_mb_var_sum;
|
int64_t mc_mb_var_sum;
|
||||||
int mb_var_sum;
|
int64_t mb_var_sum;
|
||||||
int i_count;
|
int i_count;
|
||||||
int skip_count;
|
int skip_count;
|
||||||
int f_code;
|
int f_code;
|
||||||
@@ -71,8 +71,8 @@ typedef struct RateControlContext{
|
|||||||
double pass1_wanted_bits; ///< bits which should have been outputed by the pass1 code (including complexity init)
|
double pass1_wanted_bits; ///< bits which should have been outputed by the pass1 code (including complexity init)
|
||||||
double last_qscale;
|
double last_qscale;
|
||||||
double last_qscale_for[5]; ///< last qscale for a specific pict type, used for max_diff & ipb factor stuff
|
double last_qscale_for[5]; ///< last qscale for a specific pict type, used for max_diff & ipb factor stuff
|
||||||
int last_mc_mb_var_sum;
|
int64_t last_mc_mb_var_sum;
|
||||||
int last_mb_var_sum;
|
int64_t last_mb_var_sum;
|
||||||
uint64_t i_cplx_sum[5];
|
uint64_t i_cplx_sum[5];
|
||||||
uint64_t p_cplx_sum[5];
|
uint64_t p_cplx_sum[5];
|
||||||
uint64_t mv_bits_sum[5];
|
uint64_t mv_bits_sum[5];
|
||||||
|
@@ -83,17 +83,6 @@ static const PixelFormatTag pix_fmt_bps_mov[] = {
|
|||||||
{ AV_PIX_FMT_NONE, 0 },
|
{ AV_PIX_FMT_NONE, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags,
|
|
||||||
unsigned int fourcc)
|
|
||||||
{
|
|
||||||
while (tags->pix_fmt >= 0) {
|
|
||||||
if (tags->fourcc == fourcc)
|
|
||||||
return tags->pix_fmt;
|
|
||||||
tags++;
|
|
||||||
}
|
|
||||||
return AV_PIX_FMT_NONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if LIBAVCODEC_VERSION_MAJOR < 55
|
#if LIBAVCODEC_VERSION_MAJOR < 55
|
||||||
enum AVPixelFormat ff_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc)
|
enum AVPixelFormat ff_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc)
|
||||||
{
|
{
|
||||||
@@ -109,7 +98,7 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
|
|||||||
if ( avctx->codec_tag == MKTAG('r','a','w',' ')
|
if ( avctx->codec_tag == MKTAG('r','a','w',' ')
|
||||||
|| avctx->codec_tag == MKTAG('N','O','1','6'))
|
|| avctx->codec_tag == MKTAG('N','O','1','6'))
|
||||||
avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_mov,
|
avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_mov,
|
||||||
avctx->bits_per_coded_sample & 0x1f);
|
avctx->bits_per_coded_sample);
|
||||||
else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
|
else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
|
||||||
avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_avi,
|
avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_avi,
|
||||||
avctx->bits_per_coded_sample);
|
avctx->bits_per_coded_sample);
|
||||||
@@ -135,7 +124,7 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
|
|||||||
memset(context->palette->data, 0, AVPALETTE_SIZE);
|
memset(context->palette->data, 0, AVPALETTE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (((avctx->bits_per_coded_sample & 0x1f) == 4 || (avctx->bits_per_coded_sample & 0x1f) == 2) &&
|
if ((avctx->bits_per_coded_sample == 4 || avctx->bits_per_coded_sample == 2) &&
|
||||||
avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
|
avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
|
||||||
(!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' '))) {
|
(!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' '))) {
|
||||||
context->is_2_4_bpp = 1;
|
context->is_2_4_bpp = 1;
|
||||||
@@ -209,14 +198,14 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
int i;
|
int i;
|
||||||
uint8_t *dst = frame->buf[0]->data;
|
uint8_t *dst = frame->buf[0]->data;
|
||||||
buf_size = context->frame_size - AVPALETTE_SIZE;
|
buf_size = context->frame_size - AVPALETTE_SIZE;
|
||||||
if ((avctx->bits_per_coded_sample & 0x1f) == 4) {
|
if (avctx->bits_per_coded_sample == 4) {
|
||||||
for (i = 0; 2 * i + 1 < buf_size && i<avpkt->size; i++) {
|
for (i = 0; 2 * i + 1 < buf_size && i<avpkt->size; i++) {
|
||||||
dst[2 * i + 0] = buf[i] >> 4;
|
dst[2 * i + 0] = buf[i] >> 4;
|
||||||
dst[2 * i + 1] = buf[i] & 15;
|
dst[2 * i + 1] = buf[i] & 15;
|
||||||
}
|
}
|
||||||
linesize_align = 8;
|
linesize_align = 8;
|
||||||
} else {
|
} else {
|
||||||
av_assert0((avctx->bits_per_coded_sample & 0x1f) == 2);
|
av_assert0(avctx->bits_per_coded_sample == 2);
|
||||||
for (i = 0; 4 * i + 3 < buf_size && i<avpkt->size; i++) {
|
for (i = 0; 4 * i + 3 < buf_size && i<avpkt->size; i++) {
|
||||||
dst[4 * i + 0] = buf[i] >> 6;
|
dst[4 * i + 0] = buf[i] >> 6;
|
||||||
dst[4 * i + 1] = buf[i] >> 4 & 3;
|
dst[4 * i + 1] = buf[i] >> 4 & 3;
|
||||||
|
@@ -730,7 +730,10 @@ static int rv10_decode_frame(AVCodecContext *avctx,
|
|||||||
offset + FFMAX(size, size2) > buf_size)
|
offset + FFMAX(size, size2) > buf_size)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if (rv10_decode_packet(avctx, buf + offset, size, size2) > 8 * size)
|
if ((ret = rv10_decode_packet(avctx, buf + offset, size, size2)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (ret > 8 * size)
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1502,8 +1502,8 @@ static int ratecontrol_1pass(SnowContext *s, AVFrame *pict)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* ugly, ratecontrol just takes a sqrt again */
|
/* ugly, ratecontrol just takes a sqrt again */
|
||||||
coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
|
|
||||||
av_assert0(coef_sum < INT_MAX);
|
av_assert0(coef_sum < INT_MAX);
|
||||||
|
coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
|
||||||
|
|
||||||
if(pict->pict_type == AV_PICTURE_TYPE_I){
|
if(pict->pict_type == AV_PICTURE_TYPE_I){
|
||||||
s->m.current_picture.mb_var_sum= coef_sum;
|
s->m.current_picture.mb_var_sum= coef_sum;
|
||||||
|
@@ -1187,7 +1187,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (h->pict_type != AV_PICTURE_TYPE_I) {
|
if (h->pict_type != AV_PICTURE_TYPE_I) {
|
||||||
if (!s->last_pic->f.data[0]) {
|
if (!s->last_pic->f.data[0]) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
|
av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
|
||||||
av_frame_unref(s->last_pic);
|
av_frame_unref(&s->last_pic->f);
|
||||||
ret = get_buffer(avctx, s->last_pic);
|
ret = get_buffer(avctx, s->last_pic);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
@@ -1200,7 +1200,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
|
if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
|
av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
|
||||||
av_frame_unref(s->next_pic);
|
av_frame_unref(&s->next_pic->f);
|
||||||
ret = get_buffer(avctx, s->next_pic);
|
ret = get_buffer(avctx, s->next_pic);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@@ -46,6 +46,7 @@
|
|||||||
#include "thread.h"
|
#include "thread.h"
|
||||||
#include "frame_thread_encoder.h"
|
#include "frame_thread_encoder.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
#include "raw.h"
|
||||||
#include "bytestream.h"
|
#include "bytestream.h"
|
||||||
#include "version.h"
|
#include "version.h"
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
@@ -274,12 +275,6 @@ int ff_side_data_update_matrix_encoding(AVFrame *frame,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if HAVE_NEON || ARCH_PPC || HAVE_MMX
|
|
||||||
# define STRIDE_ALIGN 16
|
|
||||||
#else
|
|
||||||
# define STRIDE_ALIGN 8
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||||
int linesize_align[AV_NUM_DATA_POINTERS])
|
int linesize_align[AV_NUM_DATA_POINTERS])
|
||||||
{
|
{
|
||||||
@@ -813,6 +808,7 @@ int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
|
|||||||
typedef struct CompatReleaseBufPriv {
|
typedef struct CompatReleaseBufPriv {
|
||||||
AVCodecContext avctx;
|
AVCodecContext avctx;
|
||||||
AVFrame frame;
|
AVFrame frame;
|
||||||
|
uint8_t avframe_padding[1024]; // hack to allow linking to a avutil with larger AVFrame
|
||||||
} CompatReleaseBufPriv;
|
} CompatReleaseBufPriv;
|
||||||
|
|
||||||
static void compat_free_buffer(void *opaque, uint8_t *data)
|
static void compat_free_buffer(void *opaque, uint8_t *data)
|
||||||
@@ -1076,6 +1072,17 @@ int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags,
|
||||||
|
unsigned int fourcc)
|
||||||
|
{
|
||||||
|
while (tags->pix_fmt >= 0) {
|
||||||
|
if (tags->fourcc == fourcc)
|
||||||
|
return tags->pix_fmt;
|
||||||
|
tags++;
|
||||||
|
}
|
||||||
|
return AV_PIX_FMT_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
static int is_hwaccel_pix_fmt(enum AVPixelFormat pix_fmt)
|
static int is_hwaccel_pix_fmt(enum AVPixelFormat pix_fmt)
|
||||||
{
|
{
|
||||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
|
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
|
||||||
@@ -1621,7 +1628,7 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
|
|||||||
const AVFrame *frame,
|
const AVFrame *frame,
|
||||||
int *got_packet_ptr)
|
int *got_packet_ptr)
|
||||||
{
|
{
|
||||||
AVFrame tmp;
|
AVFrame *extended_frame = NULL;
|
||||||
AVFrame *padded_frame = NULL;
|
AVFrame *padded_frame = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
AVPacket user_pkt = *avpkt;
|
AVPacket user_pkt = *avpkt;
|
||||||
@@ -1646,9 +1653,13 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
|
av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
|
||||||
|
|
||||||
tmp = *frame;
|
extended_frame = av_frame_alloc();
|
||||||
tmp.extended_data = tmp.data;
|
if (!extended_frame)
|
||||||
frame = &tmp;
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
|
memcpy(extended_frame, frame, sizeof(AVFrame));
|
||||||
|
extended_frame->extended_data = extended_frame->data;
|
||||||
|
frame = extended_frame;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* check for valid frame size */
|
/* check for valid frame size */
|
||||||
@@ -1656,14 +1667,15 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
|
|||||||
if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
|
if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
|
||||||
if (frame->nb_samples > avctx->frame_size) {
|
if (frame->nb_samples > avctx->frame_size) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
|
av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
|
||||||
return AVERROR(EINVAL);
|
ret = AVERROR(EINVAL);
|
||||||
|
goto end;
|
||||||
}
|
}
|
||||||
} else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
|
} else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
|
||||||
if (frame->nb_samples < avctx->frame_size &&
|
if (frame->nb_samples < avctx->frame_size &&
|
||||||
!avctx->internal->last_audio_frame) {
|
!avctx->internal->last_audio_frame) {
|
||||||
ret = pad_last_frame(avctx, &padded_frame, frame);
|
ret = pad_last_frame(avctx, &padded_frame, frame);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
goto end;
|
||||||
|
|
||||||
frame = padded_frame;
|
frame = padded_frame;
|
||||||
avctx->internal->last_audio_frame = 1;
|
avctx->internal->last_audio_frame = 1;
|
||||||
@@ -1735,6 +1747,7 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
|
|||||||
|
|
||||||
end:
|
end:
|
||||||
av_frame_free(&padded_frame);
|
av_frame_free(&padded_frame);
|
||||||
|
av_free(extended_frame);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@@ -1912,9 +1912,10 @@ static void vc1_interp_mc(VC1Context *v)
|
|||||||
uvmx = (mx + ((mx & 3) == 3)) >> 1;
|
uvmx = (mx + ((mx & 3) == 3)) >> 1;
|
||||||
uvmy = (my + ((my & 3) == 3)) >> 1;
|
uvmy = (my + ((my & 3) == 3)) >> 1;
|
||||||
if (v->field_mode) {
|
if (v->field_mode) {
|
||||||
if (v->cur_field_type != v->ref_field_type[1])
|
if (v->cur_field_type != v->ref_field_type[1]) {
|
||||||
my = my - 2 + 4 * v->cur_field_type;
|
my = my - 2 + 4 * v->cur_field_type;
|
||||||
uvmy = uvmy - 2 + 4 * v->cur_field_type;
|
uvmy = uvmy - 2 + 4 * v->cur_field_type;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (v->fastuvmc) {
|
if (v->fastuvmc) {
|
||||||
uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
|
uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
|
||||||
|
@@ -85,6 +85,11 @@ int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num)
|
|||||||
|
|
||||||
++p;
|
++p;
|
||||||
|
|
||||||
|
for (i = p; (bits[i] == 0) && (i < num); ++i)
|
||||||
|
;
|
||||||
|
if (i == num)
|
||||||
|
return 0;
|
||||||
|
|
||||||
for (; p < num; ++p) {
|
for (; p < num; ++p) {
|
||||||
if (bits[p] > 32)
|
if (bits[p] > 32)
|
||||||
return 1;
|
return 1;
|
||||||
|
@@ -151,7 +151,7 @@ typedef struct vorbis_context_s {
|
|||||||
uint8_t mode_count;
|
uint8_t mode_count;
|
||||||
vorbis_mode *modes;
|
vorbis_mode *modes;
|
||||||
uint8_t mode_number; // mode number for the current packet
|
uint8_t mode_number; // mode number for the current packet
|
||||||
uint8_t previous_window;
|
int8_t previous_window;
|
||||||
float *channel_residues;
|
float *channel_residues;
|
||||||
float *saved;
|
float *saved;
|
||||||
} vorbis_context;
|
} vorbis_context;
|
||||||
@@ -701,8 +701,7 @@ static int vorbis_parse_setup_hdr_residues(vorbis_context *vc)
|
|||||||
res_setup->partition_size = get_bits(gb, 24) + 1;
|
res_setup->partition_size = get_bits(gb, 24) + 1;
|
||||||
/* Validations to prevent a buffer overflow later. */
|
/* Validations to prevent a buffer overflow later. */
|
||||||
if (res_setup->begin>res_setup->end ||
|
if (res_setup->begin>res_setup->end ||
|
||||||
res_setup->end > (res_setup->type == 2 ? vc->audio_channels : 1) * vc->blocksize[1] / 2 ||
|
(res_setup->end-res_setup->begin) / res_setup->partition_size > FFMIN(V_MAX_PARTITIONS, 65535)) {
|
||||||
(res_setup->end-res_setup->begin) / res_setup->partition_size > V_MAX_PARTITIONS) {
|
|
||||||
av_log(vc->avctx, AV_LOG_ERROR,
|
av_log(vc->avctx, AV_LOG_ERROR,
|
||||||
"partition out of bounds: type, begin, end, size, blocksize: %"PRIu16", %"PRIu32", %"PRIu32", %u, %"PRIu32"\n",
|
"partition out of bounds: type, begin, end, size, blocksize: %"PRIu16", %"PRIu32", %"PRIu32", %u, %"PRIu32"\n",
|
||||||
res_setup->type, res_setup->begin, res_setup->end,
|
res_setup->type, res_setup->begin, res_setup->end,
|
||||||
@@ -989,7 +988,7 @@ static int vorbis_parse_id_hdr(vorbis_context *vc)
|
|||||||
if (!vc->channel_residues || !vc->saved)
|
if (!vc->channel_residues || !vc->saved)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
vc->previous_window = 0;
|
vc->previous_window = -1;
|
||||||
|
|
||||||
ff_mdct_init(&vc->mdct[0], bl0, 1, -1.0);
|
ff_mdct_init(&vc->mdct[0], bl0, 1, -1.0);
|
||||||
ff_mdct_init(&vc->mdct[1], bl1, 1, -1.0);
|
ff_mdct_init(&vc->mdct[1], bl1, 1, -1.0);
|
||||||
@@ -1372,6 +1371,7 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
|
|||||||
unsigned pass, ch_used, i, j, k, l;
|
unsigned pass, ch_used, i, j, k, l;
|
||||||
unsigned max_output = (ch - 1) * vlen;
|
unsigned max_output = (ch - 1) * vlen;
|
||||||
int ptns_to_read = vr->ptns_to_read;
|
int ptns_to_read = vr->ptns_to_read;
|
||||||
|
int libvorbis_bug = 0;
|
||||||
|
|
||||||
if (vr_type == 2) {
|
if (vr_type == 2) {
|
||||||
for (j = 1; j < ch; ++j)
|
for (j = 1; j < ch; ++j)
|
||||||
@@ -1386,8 +1386,13 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (max_output > ch_left * vlen) {
|
if (max_output > ch_left * vlen) {
|
||||||
av_log(vc->avctx, AV_LOG_ERROR, "Insufficient output buffer\n");
|
if (max_output <= ch_left * vlen + vr->partition_size*ch_used/ch) {
|
||||||
return AVERROR_INVALIDDATA;
|
ptns_to_read--;
|
||||||
|
libvorbis_bug = 1;
|
||||||
|
} else {
|
||||||
|
av_log(vc->avctx, AV_LOG_ERROR, "Insufficient output buffer\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
av_dlog(NULL, " residue type 0/1/2 decode begin, ch: %d cpc %d \n", ch, c_p_c);
|
av_dlog(NULL, " residue type 0/1/2 decode begin, ch: %d cpc %d \n", ch, c_p_c);
|
||||||
@@ -1496,6 +1501,14 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc,
|
|||||||
voffset += vr->partition_size;
|
voffset += vr->partition_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (libvorbis_bug && !pass) {
|
||||||
|
for (j = 0; j < ch_used; ++j) {
|
||||||
|
if (!do_not_decode[j]) {
|
||||||
|
get_vlc2(&vc->gb, vc->codebooks[vr->classbook].vlc.table,
|
||||||
|
vc->codebooks[vr->classbook].nb_bits, 3);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1548,7 +1561,7 @@ static int vorbis_parse_audio_packet(vorbis_context *vc, float **floor_ptr)
|
|||||||
{
|
{
|
||||||
GetBitContext *gb = &vc->gb;
|
GetBitContext *gb = &vc->gb;
|
||||||
FFTContext *mdct;
|
FFTContext *mdct;
|
||||||
unsigned previous_window = vc->previous_window;
|
int previous_window = vc->previous_window;
|
||||||
unsigned mode_number, blockflag, blocksize;
|
unsigned mode_number, blockflag, blocksize;
|
||||||
int i, j;
|
int i, j;
|
||||||
uint8_t no_residue[255];
|
uint8_t no_residue[255];
|
||||||
@@ -1581,9 +1594,11 @@ static int vorbis_parse_audio_packet(vorbis_context *vc, float **floor_ptr)
|
|||||||
blocksize = vc->blocksize[blockflag];
|
blocksize = vc->blocksize[blockflag];
|
||||||
vlen = blocksize / 2;
|
vlen = blocksize / 2;
|
||||||
if (blockflag) {
|
if (blockflag) {
|
||||||
previous_window = get_bits(gb, 1);
|
int code = get_bits(gb, 2);
|
||||||
skip_bits1(gb); // next_window
|
if (previous_window < 0)
|
||||||
}
|
previous_window = code>>1;
|
||||||
|
} else if (previous_window < 0)
|
||||||
|
previous_window = 0;
|
||||||
|
|
||||||
memset(ch_res_ptr, 0, sizeof(float) * vc->audio_channels * vlen); //FIXME can this be removed ?
|
memset(ch_res_ptr, 0, sizeof(float) * vc->audio_channels * vlen); //FIXME can this be removed ?
|
||||||
for (i = 0; i < vc->audio_channels; ++i)
|
for (i = 0; i < vc->audio_channels; ++i)
|
||||||
@@ -1812,7 +1827,7 @@ static av_cold void vorbis_decode_flush(AVCodecContext *avctx)
|
|||||||
memset(vc->saved, 0, (vc->blocksize[1] / 4) * vc->audio_channels *
|
memset(vc->saved, 0, (vc->blocksize[1] / 4) * vc->audio_channels *
|
||||||
sizeof(*vc->saved));
|
sizeof(*vc->saved));
|
||||||
}
|
}
|
||||||
vc->previous_window = 0;
|
vc->previous_window = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVCodec ff_vorbis_decoder = {
|
AVCodec ff_vorbis_decoder = {
|
||||||
|
@@ -386,9 +386,9 @@ int ff_wma_end(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
ff_free_vlc(&s->coef_vlc[i]);
|
ff_free_vlc(&s->coef_vlc[i]);
|
||||||
av_free(s->run_table[i]);
|
av_freep(&s->run_table[i]);
|
||||||
av_free(s->level_table[i]);
|
av_freep(&s->level_table[i]);
|
||||||
av_free(s->int_table[i]);
|
av_freep(&s->int_table[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -343,7 +343,7 @@ DECLARE_ASM_CONST(16, int32_t, walkenIdctRounders)[] = {
|
|||||||
"movdqa %%xmm6, 4*16("dct") \n\t" \
|
"movdqa %%xmm6, 4*16("dct") \n\t" \
|
||||||
"movdqa "SREG2", 7*16("dct") \n\t"
|
"movdqa "SREG2", 7*16("dct") \n\t"
|
||||||
|
|
||||||
inline void ff_idct_xvid_sse2(short *block)
|
av_extern_inline void ff_idct_xvid_sse2(short *block)
|
||||||
{
|
{
|
||||||
__asm__ volatile(
|
__asm__ volatile(
|
||||||
"movq "MANGLE(m127)", %%mm0 \n\t"
|
"movq "MANGLE(m127)", %%mm0 \n\t"
|
||||||
|
@@ -216,7 +216,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
|
|||||||
"psubusw "MM"1, "MM"4 \n\t"
|
"psubusw "MM"1, "MM"4 \n\t"
|
||||||
"packuswb "MM"4, "MM"4 \n\t"
|
"packuswb "MM"4, "MM"4 \n\t"
|
||||||
#if COMPILE_TEMPLATE_SSE2
|
#if COMPILE_TEMPLATE_SSE2
|
||||||
"packuswb "MM"4, "MM"4 \n\t"
|
"packsswb "MM"4, "MM"4 \n\t"
|
||||||
#endif
|
#endif
|
||||||
"movd "MM"4, %0 \n\t" // *overflow
|
"movd "MM"4, %0 \n\t" // *overflow
|
||||||
: "=g" (*overflow)
|
: "=g" (*overflow)
|
||||||
|
@@ -102,7 +102,7 @@ static int iec61883_callback(unsigned char *data, int length,
|
|||||||
DVPacket *packet;
|
DVPacket *packet;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
#ifdef THREADS
|
#if THREADS
|
||||||
pthread_mutex_lock(&dv->mutex);
|
pthread_mutex_lock(&dv->mutex);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -139,7 +139,7 @@ static int iec61883_callback(unsigned char *data, int length,
|
|||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
#ifdef THREADS
|
#if THREADS
|
||||||
pthread_cond_broadcast(&dv->cond);
|
pthread_cond_broadcast(&dv->cond);
|
||||||
pthread_mutex_unlock(&dv->mutex);
|
pthread_mutex_unlock(&dv->mutex);
|
||||||
#endif
|
#endif
|
||||||
@@ -151,7 +151,7 @@ static void *iec61883_receive_task(void *opaque)
|
|||||||
struct iec61883_data *dv = (struct iec61883_data *)opaque;
|
struct iec61883_data *dv = (struct iec61883_data *)opaque;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
#ifdef THREADS
|
#if THREADS
|
||||||
while (dv->thread_loop)
|
while (dv->thread_loop)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
@@ -168,7 +168,7 @@ static void *iec61883_receive_task(void *opaque)
|
|||||||
raw1394_loop_iterate(dv->raw1394);
|
raw1394_loop_iterate(dv->raw1394);
|
||||||
} else if (dv->receiving) {
|
} else if (dv->receiving) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "No more input data available\n");
|
av_log(NULL, AV_LOG_ERROR, "No more input data available\n");
|
||||||
#ifdef THREADS
|
#if THREADS
|
||||||
pthread_mutex_lock(&dv->mutex);
|
pthread_mutex_lock(&dv->mutex);
|
||||||
dv->eof = 1;
|
dv->eof = 1;
|
||||||
pthread_cond_broadcast(&dv->cond);
|
pthread_cond_broadcast(&dv->cond);
|
||||||
@@ -413,7 +413,7 @@ static int iec61883_read_packet(AVFormatContext *context, AVPacket *pkt)
|
|||||||
* Try to parse frames from queue
|
* Try to parse frames from queue
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef THREADS
|
#if THREADS
|
||||||
pthread_mutex_lock(&dv->mutex);
|
pthread_mutex_lock(&dv->mutex);
|
||||||
while ((size = dv->parse_queue(dv, pkt)) == -1)
|
while ((size = dv->parse_queue(dv, pkt)) == -1)
|
||||||
if (!dv->eof)
|
if (!dv->eof)
|
||||||
|
@@ -1204,6 +1204,8 @@ static int opengl_draw(AVFormatContext *h, void *input, int repaint, int is_pkt)
|
|||||||
glClear(GL_COLOR_BUFFER_BIT);
|
glClear(GL_COLOR_BUFFER_BIT);
|
||||||
|
|
||||||
if (!repaint) {
|
if (!repaint) {
|
||||||
|
if (is_pkt)
|
||||||
|
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
|
||||||
LOAD_TEXTURE_DATA(0, 0)
|
LOAD_TEXTURE_DATA(0, 0)
|
||||||
if (desc->flags & AV_PIX_FMT_FLAG_PLANAR) {
|
if (desc->flags & AV_PIX_FMT_FLAG_PLANAR) {
|
||||||
LOAD_TEXTURE_DATA(1, 1)
|
LOAD_TEXTURE_DATA(1, 1)
|
||||||
|
@@ -130,8 +130,15 @@ static int xv_write_header(AVFormatContext *s)
|
|||||||
xv->image_width = encctx->width;
|
xv->image_width = encctx->width;
|
||||||
xv->image_height = encctx->height;
|
xv->image_height = encctx->height;
|
||||||
if (!xv->window_width && !xv->window_height) {
|
if (!xv->window_width && !xv->window_height) {
|
||||||
|
AVRational sar = encctx->sample_aspect_ratio;
|
||||||
xv->window_width = encctx->width;
|
xv->window_width = encctx->width;
|
||||||
xv->window_height = encctx->height;
|
xv->window_height = encctx->height;
|
||||||
|
if (sar.num) {
|
||||||
|
if (sar.num > sar.den)
|
||||||
|
xv->window_width = av_rescale(xv->window_width, sar.num, sar.den);
|
||||||
|
if (sar.num < sar.den)
|
||||||
|
xv->window_height = av_rescale(xv->window_height, sar.den, sar.num);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
xv->window = XCreateSimpleWindow(xv->display, DefaultRootWindow(xv->display),
|
xv->window = XCreateSimpleWindow(xv->display, DefaultRootWindow(xv->display),
|
||||||
xv->window_x, xv->window_y,
|
xv->window_x, xv->window_y,
|
||||||
|
@@ -1058,11 +1058,11 @@ static int push_samples(ATempoContext *atempo,
|
|||||||
outlink->time_base);
|
outlink->time_base);
|
||||||
|
|
||||||
ret = ff_filter_frame(outlink, atempo->dst_buffer);
|
ret = ff_filter_frame(outlink, atempo->dst_buffer);
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
atempo->dst_buffer = NULL;
|
atempo->dst_buffer = NULL;
|
||||||
atempo->dst = NULL;
|
atempo->dst = NULL;
|
||||||
atempo->dst_end = NULL;
|
atempo->dst_end = NULL;
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
atempo->nsamples_out += n_out;
|
atempo->nsamples_out += n_out;
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -184,7 +184,7 @@ static av_cold int channelmap_init(AVFilterContext *ctx)
|
|||||||
s->map[i].out_channel_idx = i;
|
s->map[i].out_channel_idx = i;
|
||||||
break;
|
break;
|
||||||
case MAP_ONE_STR:
|
case MAP_ONE_STR:
|
||||||
if (!get_channel(&mapping, &in_ch, separator)) {
|
if (get_channel(&mapping, &in_ch, separator) < 0) {
|
||||||
av_log(ctx, AV_LOG_ERROR, err);
|
av_log(ctx, AV_LOG_ERROR, err);
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
@@ -389,6 +389,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
emms_c();
|
||||||
|
|
||||||
if (buf != out_buf)
|
if (buf != out_buf)
|
||||||
av_frame_free(&buf);
|
av_frame_free(&buf);
|
||||||
|
|
||||||
|
@@ -279,7 +279,7 @@ static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
|
|||||||
p2 += 8 * linesize;
|
p2 += 8 * linesize;
|
||||||
}
|
}
|
||||||
emms_c();
|
emms_c();
|
||||||
mafd = nb_sad ? sad / nb_sad : 0;
|
mafd = nb_sad ? (double)sad / nb_sad : 0;
|
||||||
diff = fabs(mafd - select->prev_mafd);
|
diff = fabs(mafd - select->prev_mafd);
|
||||||
ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
|
ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
|
||||||
select->prev_mafd = mafd;
|
select->prev_mafd = mafd;
|
||||||
|
@@ -38,7 +38,7 @@ static void print_formats(AVFilterContext *filter_ctx)
|
|||||||
for (j = 0; j < fmts->nb_formats; j++) \
|
for (j = 0; j < fmts->nb_formats; j++) \
|
||||||
if(av_get_pix_fmt_name(fmts->formats[j])) \
|
if(av_get_pix_fmt_name(fmts->formats[j])) \
|
||||||
printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
|
printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
|
||||||
i, filter_ctx->filter->inout##puts[i].name, \
|
i, filter_ctx->inout##put_pads[i].name, \
|
||||||
av_get_pix_fmt_name(fmts->formats[j])); \
|
av_get_pix_fmt_name(fmts->formats[j])); \
|
||||||
} else if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_AUDIO) { \
|
} else if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_AUDIO) { \
|
||||||
AVFilterFormats *fmts; \
|
AVFilterFormats *fmts; \
|
||||||
@@ -47,7 +47,7 @@ static void print_formats(AVFilterContext *filter_ctx)
|
|||||||
fmts = filter_ctx->inout##puts[i]->outin##_formats; \
|
fmts = filter_ctx->inout##puts[i]->outin##_formats; \
|
||||||
for (j = 0; j < fmts->nb_formats; j++) \
|
for (j = 0; j < fmts->nb_formats; j++) \
|
||||||
printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
|
printf(#INOUT "PUT[%d] %s: fmt:%s\n", \
|
||||||
i, filter_ctx->filter->inout##puts[i].name, \
|
i, filter_ctx->inout##put_pads[i].name, \
|
||||||
av_get_sample_fmt_name(fmts->formats[j])); \
|
av_get_sample_fmt_name(fmts->formats[j])); \
|
||||||
\
|
\
|
||||||
layouts = filter_ctx->inout##puts[i]->outin##_channel_layouts; \
|
layouts = filter_ctx->inout##puts[i]->outin##_channel_layouts; \
|
||||||
@@ -56,7 +56,7 @@ static void print_formats(AVFilterContext *filter_ctx)
|
|||||||
av_get_channel_layout_string(buf, sizeof(buf), -1, \
|
av_get_channel_layout_string(buf, sizeof(buf), -1, \
|
||||||
layouts->channel_layouts[j]); \
|
layouts->channel_layouts[j]); \
|
||||||
printf(#INOUT "PUT[%d] %s: chlayout:%s\n", \
|
printf(#INOUT "PUT[%d] %s: chlayout:%s\n", \
|
||||||
i, filter_ctx->filter->inout##puts[i].name, buf); \
|
i, filter_ctx->inout##put_pads[i].name, buf); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
@@ -113,12 +113,12 @@ int main(int argc, char **argv)
|
|||||||
/* create a link for each of the input pads */
|
/* create a link for each of the input pads */
|
||||||
for (i = 0; i < filter_ctx->nb_inputs; i++) {
|
for (i = 0; i < filter_ctx->nb_inputs; i++) {
|
||||||
AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
|
AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
|
||||||
link->type = filter_ctx->filter->inputs[i].type;
|
link->type = filter_ctx->input_pads[i].type;
|
||||||
filter_ctx->inputs[i] = link;
|
filter_ctx->inputs[i] = link;
|
||||||
}
|
}
|
||||||
for (i = 0; i < filter_ctx->nb_outputs; i++) {
|
for (i = 0; i < filter_ctx->nb_outputs; i++) {
|
||||||
AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
|
AVFilterLink *link = av_mallocz(sizeof(AVFilterLink));
|
||||||
link->type = filter_ctx->filter->outputs[i].type;
|
link->type = filter_ctx->output_pads[i].type;
|
||||||
filter_ctx->outputs[i] = link;
|
filter_ctx->outputs[i] = link;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -31,9 +31,10 @@ static int print_link_prop(AVBPrint *buf, AVFilterLink *link)
|
|||||||
{
|
{
|
||||||
char *format;
|
char *format;
|
||||||
char layout[64];
|
char layout[64];
|
||||||
|
AVBPrint dummy_buffer = { 0 };
|
||||||
|
|
||||||
if (!buf)
|
if (!buf)
|
||||||
buf = &(AVBPrint){ 0 }; /* dummy buffer */
|
buf = &dummy_buffer;
|
||||||
switch (link->type) {
|
switch (link->type) {
|
||||||
case AVMEDIA_TYPE_VIDEO:
|
case AVMEDIA_TYPE_VIDEO:
|
||||||
format = av_x_if_null(av_get_pix_fmt_name(link->format), "?");
|
format = av_x_if_null(av_get_pix_fmt_name(link->format), "?");
|
||||||
|
@@ -306,8 +306,8 @@ static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
|
|||||||
//av_log(NULL, AV_LOG_ERROR, "\n");
|
//av_log(NULL, AV_LOG_ERROR, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
p_x = (center_x - width / 2);
|
p_x = (center_x - width / 2.0);
|
||||||
p_y = (center_y - height / 2);
|
p_y = (center_y - height / 2.0);
|
||||||
t->vector.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y;
|
t->vector.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y;
|
||||||
t->vector.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y;
|
t->vector.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y;
|
||||||
|
|
||||||
|
@@ -81,7 +81,7 @@ static void *load_sym(AVFilterContext *ctx, const char *sym_name)
|
|||||||
Frei0rContext *s = ctx->priv;
|
Frei0rContext *s = ctx->priv;
|
||||||
void *sym = dlsym(s->dl_handle, sym_name);
|
void *sym = dlsym(s->dl_handle, sym_name);
|
||||||
if (!sym)
|
if (!sym)
|
||||||
av_log(ctx, AV_LOG_ERROR, "Could not find symbol '%s' in loaded module\n", sym_name);
|
av_log(ctx, AV_LOG_ERROR, "Could not find symbol '%s' in loaded module.\n", sym_name);
|
||||||
return sym;
|
return sym;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -129,7 +129,7 @@ static int set_param(AVFilterContext *ctx, f0r_param_info_t info, int index, cha
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
av_log(ctx, AV_LOG_ERROR, "Invalid value '%s' for parameter '%s'\n",
|
av_log(ctx, AV_LOG_ERROR, "Invalid value '%s' for parameter '%s'.\n",
|
||||||
param, info.name);
|
param, info.name);
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
@@ -202,11 +202,11 @@ static int set_params(AVFilterContext *ctx, const char *params)
|
|||||||
default: /* F0R_PARAM_STRING */
|
default: /* F0R_PARAM_STRING */
|
||||||
v = s;
|
v = s;
|
||||||
s->get_param_value(s->instance, v, i);
|
s->get_param_value(s->instance, v, i);
|
||||||
av_log(ctx, AV_LOG_DEBUG, "'%s'\n", s);
|
av_log(ctx, AV_LOG_DEBUG, "'%s'", s);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
av_log(ctx, AV_LOG_VERBOSE, "\n");
|
av_log(ctx, AV_LOG_VERBOSE, ".\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -217,7 +217,7 @@ static int load_path(AVFilterContext *ctx, void **handle_ptr, const char *prefix
|
|||||||
char *path = av_asprintf("%s%s%s", prefix, name, SLIBSUF);
|
char *path = av_asprintf("%s%s%s", prefix, name, SLIBSUF);
|
||||||
if (!path)
|
if (!path)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
av_log(ctx, AV_LOG_DEBUG, "Looking for frei0r effect in '%s'\n", path);
|
av_log(ctx, AV_LOG_DEBUG, "Looking for frei0r effect in '%s'.\n", path);
|
||||||
*handle_ptr = dlopen(path, RTLD_NOW|RTLD_LOCAL);
|
*handle_ptr = dlopen(path, RTLD_NOW|RTLD_LOCAL);
|
||||||
av_free(path);
|
av_free(path);
|
||||||
return 0;
|
return 0;
|
||||||
@@ -288,7 +288,7 @@ static av_cold int frei0r_init(AVFilterContext *ctx,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
if (!s->dl_handle) {
|
if (!s->dl_handle) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "Could not find module '%s'\n", dl_name);
|
av_log(ctx, AV_LOG_ERROR, "Could not find module '%s'.\n", dl_name);
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,7 +304,7 @@ static av_cold int frei0r_init(AVFilterContext *ctx,
|
|||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
if (f0r_init() < 0) {
|
if (f0r_init() < 0) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "Could not init the frei0r module\n");
|
av_log(ctx, AV_LOG_ERROR, "Could not init the frei0r module.\n");
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -312,7 +312,7 @@ static av_cold int frei0r_init(AVFilterContext *ctx,
|
|||||||
pi = &s->plugin_info;
|
pi = &s->plugin_info;
|
||||||
if (pi->plugin_type != type) {
|
if (pi->plugin_type != type) {
|
||||||
av_log(ctx, AV_LOG_ERROR,
|
av_log(ctx, AV_LOG_ERROR,
|
||||||
"Invalid type '%s' for the plugin\n",
|
"Invalid type '%s' for this plugin\n",
|
||||||
pi->plugin_type == F0R_PLUGIN_TYPE_FILTER ? "filter" :
|
pi->plugin_type == F0R_PLUGIN_TYPE_FILTER ? "filter" :
|
||||||
pi->plugin_type == F0R_PLUGIN_TYPE_SOURCE ? "source" :
|
pi->plugin_type == F0R_PLUGIN_TYPE_SOURCE ? "source" :
|
||||||
pi->plugin_type == F0R_PLUGIN_TYPE_MIXER2 ? "mixer2" :
|
pi->plugin_type == F0R_PLUGIN_TYPE_MIXER2 ? "mixer2" :
|
||||||
@@ -359,7 +359,7 @@ static int config_input_props(AVFilterLink *inlink)
|
|||||||
if (s->destruct && s->instance)
|
if (s->destruct && s->instance)
|
||||||
s->destruct(s->instance);
|
s->destruct(s->instance);
|
||||||
if (!(s->instance = s->construct(inlink->w, inlink->h))) {
|
if (!(s->instance = s->construct(inlink->w, inlink->h))) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance\n");
|
av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance.\n");
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -476,7 +476,11 @@ static int source_config_props(AVFilterLink *outlink)
|
|||||||
if (s->destruct && s->instance)
|
if (s->destruct && s->instance)
|
||||||
s->destruct(s->instance);
|
s->destruct(s->instance);
|
||||||
if (!(s->instance = s->construct(outlink->w, outlink->h))) {
|
if (!(s->instance = s->construct(outlink->w, outlink->h))) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance\n");
|
av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance.\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
if (!s->params) {
|
||||||
|
av_log(ctx, AV_LOG_ERROR, "frei0r filter parameters not set.\n");
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -147,6 +147,7 @@ static void denoise_depth(HQDN3DContext *s,
|
|||||||
else
|
else
|
||||||
denoise_temporal(src, dst, frame_ant,
|
denoise_temporal(src, dst, frame_ant,
|
||||||
w, h, sstride, dstride, temporal, depth);
|
w, h, sstride, dstride, temporal, depth);
|
||||||
|
emms_c();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define denoise(...) \
|
#define denoise(...) \
|
||||||
|
@@ -126,20 +126,21 @@ static int alloc_metrics(PullupContext *s, PullupField *f)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_field_queue(PullupField *head, PullupField **last)
|
static void free_field_queue(PullupField *head)
|
||||||
{
|
{
|
||||||
PullupField *f = head;
|
PullupField *f = head;
|
||||||
while (f) {
|
do {
|
||||||
|
PullupField *next;
|
||||||
|
if (!f)
|
||||||
|
break;
|
||||||
av_free(f->diffs);
|
av_free(f->diffs);
|
||||||
av_free(f->combs);
|
av_free(f->combs);
|
||||||
av_free(f->vars);
|
av_free(f->vars);
|
||||||
if (f == *last) {
|
next = f->next;
|
||||||
av_freep(last);
|
memset(f, 0, sizeof(*f));
|
||||||
break;
|
av_free(f);
|
||||||
}
|
f = next;
|
||||||
f = f->next;
|
} while (f != head);
|
||||||
av_freep(&f->prev);
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static PullupField *make_field_queue(PullupContext *s, int len)
|
static PullupField *make_field_queue(PullupContext *s, int len)
|
||||||
@@ -158,14 +159,14 @@ static PullupField *make_field_queue(PullupContext *s, int len)
|
|||||||
for (; len > 0; len--) {
|
for (; len > 0; len--) {
|
||||||
f->next = av_mallocz(sizeof(*f->next));
|
f->next = av_mallocz(sizeof(*f->next));
|
||||||
if (!f->next) {
|
if (!f->next) {
|
||||||
free_field_queue(head, &f);
|
free_field_queue(head);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
f->next->prev = f;
|
f->next->prev = f;
|
||||||
f = f->next;
|
f = f->next;
|
||||||
if (alloc_metrics(s, f) < 0) {
|
if (alloc_metrics(s, f) < 0) {
|
||||||
free_field_queue(head, &f);
|
free_field_queue(head);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -255,6 +256,8 @@ static int alloc_buffer(PullupContext *s, PullupBuffer *b)
|
|||||||
for (i = 0; i < s->nb_planes; i++) {
|
for (i = 0; i < s->nb_planes; i++) {
|
||||||
b->planes[i] = av_malloc(s->planeheight[i] * s->planewidth[i]);
|
b->planes[i] = av_malloc(s->planeheight[i] * s->planewidth[i]);
|
||||||
}
|
}
|
||||||
|
if (s->nb_planes == 1)
|
||||||
|
b->planes[1] = av_malloc(4*256);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -736,7 +739,8 @@ static av_cold void uninit(AVFilterContext *ctx)
|
|||||||
PullupContext *s = ctx->priv;
|
PullupContext *s = ctx->priv;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
free_field_queue(s->head, &s->last);
|
free_field_queue(s->head);
|
||||||
|
s->last = NULL;
|
||||||
|
|
||||||
for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
|
for (i = 0; i < FF_ARRAY_ELEMS(s->buffers); i++) {
|
||||||
av_freep(&s->buffers[i].planes[0]);
|
av_freep(&s->buffers[i].planes[0]);
|
||||||
|
@@ -50,7 +50,7 @@ OBJS-$(CONFIG_RTPDEC) += rdt.o \
|
|||||||
rtpdec_xiph.o \
|
rtpdec_xiph.o \
|
||||||
srtp.o
|
srtp.o
|
||||||
OBJS-$(CONFIG_RTPENC_CHAIN) += rtpenc_chain.o rtp.o
|
OBJS-$(CONFIG_RTPENC_CHAIN) += rtpenc_chain.o rtp.o
|
||||||
OBJS-$(CONFIG_SHARED) += log2_tab.o
|
OBJS-$(CONFIG_SHARED) += log2_tab.o golomb_tab.o
|
||||||
|
|
||||||
# muxers/demuxers
|
# muxers/demuxers
|
||||||
OBJS-$(CONFIG_A64_MUXER) += a64.o rawenc.o
|
OBJS-$(CONFIG_A64_MUXER) += a64.o rawenc.o
|
||||||
@@ -84,7 +84,7 @@ OBJS-$(CONFIG_AST_MUXER) += ast.o astenc.o
|
|||||||
OBJS-$(CONFIG_AU_DEMUXER) += au.o pcm.o
|
OBJS-$(CONFIG_AU_DEMUXER) += au.o pcm.o
|
||||||
OBJS-$(CONFIG_AU_MUXER) += au.o rawenc.o
|
OBJS-$(CONFIG_AU_MUXER) += au.o rawenc.o
|
||||||
OBJS-$(CONFIG_AVI_DEMUXER) += avidec.o
|
OBJS-$(CONFIG_AVI_DEMUXER) += avidec.o
|
||||||
OBJS-$(CONFIG_AVI_MUXER) += avienc.o
|
OBJS-$(CONFIG_AVI_MUXER) += avienc.o avlanguage.o
|
||||||
OBJS-$(CONFIG_AVISYNTH) += avisynth.o
|
OBJS-$(CONFIG_AVISYNTH) += avisynth.o
|
||||||
OBJS-$(CONFIG_AVM2_MUXER) += swfenc.o swf.o
|
OBJS-$(CONFIG_AVM2_MUXER) += swfenc.o swf.o
|
||||||
OBJS-$(CONFIG_AVR_DEMUXER) += avr.o pcm.o
|
OBJS-$(CONFIG_AVR_DEMUXER) += avr.o pcm.o
|
||||||
@@ -203,7 +203,7 @@ OBJS-$(CONFIG_M4V_MUXER) += rawenc.o
|
|||||||
OBJS-$(CONFIG_MATROSKA_DEMUXER) += matroskadec.o matroska.o \
|
OBJS-$(CONFIG_MATROSKA_DEMUXER) += matroskadec.o matroska.o \
|
||||||
isom.o rmsipr.o
|
isom.o rmsipr.o
|
||||||
OBJS-$(CONFIG_MATROSKA_MUXER) += matroskaenc.o matroska.o \
|
OBJS-$(CONFIG_MATROSKA_MUXER) += matroskaenc.o matroska.o \
|
||||||
isom.o avc.o \
|
isom.o avc.o hevc.o \
|
||||||
flacenc_header.o avlanguage.o wv.o
|
flacenc_header.o avlanguage.o wv.o
|
||||||
OBJS-$(CONFIG_MD5_MUXER) += md5enc.o
|
OBJS-$(CONFIG_MD5_MUXER) += md5enc.o
|
||||||
OBJS-$(CONFIG_MGSTS_DEMUXER) += mgsts.o
|
OBJS-$(CONFIG_MGSTS_DEMUXER) += mgsts.o
|
||||||
@@ -217,7 +217,7 @@ OBJS-$(CONFIG_MM_DEMUXER) += mm.o
|
|||||||
OBJS-$(CONFIG_MMF_DEMUXER) += mmf.o
|
OBJS-$(CONFIG_MMF_DEMUXER) += mmf.o
|
||||||
OBJS-$(CONFIG_MMF_MUXER) += mmf.o rawenc.o
|
OBJS-$(CONFIG_MMF_MUXER) += mmf.o rawenc.o
|
||||||
OBJS-$(CONFIG_MOV_DEMUXER) += mov.o isom.o mov_chan.o
|
OBJS-$(CONFIG_MOV_DEMUXER) += mov.o isom.o mov_chan.o
|
||||||
OBJS-$(CONFIG_MOV_MUXER) += movenc.o isom.o avc.o \
|
OBJS-$(CONFIG_MOV_MUXER) += movenc.o isom.o avc.o hevc.o \
|
||||||
movenchint.o mov_chan.o rtp.o
|
movenchint.o mov_chan.o rtp.o
|
||||||
OBJS-$(CONFIG_MP2_MUXER) += mp3enc.o rawenc.o id3v2enc.o
|
OBJS-$(CONFIG_MP2_MUXER) += mp3enc.o rawenc.o id3v2enc.o
|
||||||
OBJS-$(CONFIG_MP3_DEMUXER) += mp3dec.o
|
OBJS-$(CONFIG_MP3_DEMUXER) += mp3dec.o
|
||||||
|
@@ -237,7 +237,7 @@ static int aiff_read_header(AVFormatContext *s)
|
|||||||
break;
|
break;
|
||||||
case MKTAG('I', 'D', '3', ' '):
|
case MKTAG('I', 'D', '3', ' '):
|
||||||
position = avio_tell(pb);
|
position = avio_tell(pb);
|
||||||
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
|
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta, size);
|
||||||
if (id3v2_extra_meta)
|
if (id3v2_extra_meta)
|
||||||
if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0) {
|
if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0) {
|
||||||
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
|
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
|
||||||
|
@@ -266,7 +266,7 @@ static void get_id3_tag(AVFormatContext *s, int len)
|
|||||||
{
|
{
|
||||||
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
|
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
|
||||||
|
|
||||||
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
|
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta, len);
|
||||||
if (id3v2_extra_meta)
|
if (id3v2_extra_meta)
|
||||||
ff_id3v2_parse_apic(s, &id3v2_extra_meta);
|
ff_id3v2_parse_apic(s, &id3v2_extra_meta);
|
||||||
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
|
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
|
||||||
|
@@ -611,6 +611,7 @@ static int asf_write_header(AVFormatContext *s)
|
|||||||
ASFContext *asf = s->priv_data;
|
ASFContext *asf = s->priv_data;
|
||||||
|
|
||||||
s->packet_size = PACKET_SIZE;
|
s->packet_size = PACKET_SIZE;
|
||||||
|
s->max_interleave_delta = 0;
|
||||||
asf->nb_packets = 0;
|
asf->nb_packets = 0;
|
||||||
|
|
||||||
asf->index_ptr = av_malloc(sizeof(ASFIndex) * ASF_INDEX_BLOCK);
|
asf->index_ptr = av_malloc(sizeof(ASFIndex) * ASF_INDEX_BLOCK);
|
||||||
|
@@ -844,6 +844,12 @@ typedef struct AVStream {
|
|||||||
double (*duration_error)[2][MAX_STD_TIMEBASES];
|
double (*duration_error)[2][MAX_STD_TIMEBASES];
|
||||||
int64_t codec_info_duration;
|
int64_t codec_info_duration;
|
||||||
int64_t codec_info_duration_fields;
|
int64_t codec_info_duration_fields;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 0 -> decoder has not been searched for yet.
|
||||||
|
* >0 -> decoder found
|
||||||
|
* <0 -> decoder with codec_id == -found_decoder has not been found
|
||||||
|
*/
|
||||||
int found_decoder;
|
int found_decoder;
|
||||||
|
|
||||||
int64_t last_duration;
|
int64_t last_duration;
|
||||||
@@ -988,6 +994,13 @@ typedef struct AVStream {
|
|||||||
int64_t pts_reorder_error[MAX_REORDER_DELAY+1];
|
int64_t pts_reorder_error[MAX_REORDER_DELAY+1];
|
||||||
uint8_t pts_reorder_error_count[MAX_REORDER_DELAY+1];
|
uint8_t pts_reorder_error_count[MAX_REORDER_DELAY+1];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internal data to analyze DTS and detect faulty mpeg streams
|
||||||
|
*/
|
||||||
|
int64_t last_dts_for_order_check;
|
||||||
|
uint8_t dts_ordered;
|
||||||
|
uint8_t dts_misordered;
|
||||||
|
|
||||||
} AVStream;
|
} AVStream;
|
||||||
|
|
||||||
AVRational av_stream_get_r_frame_rate(const AVStream *s);
|
AVRational av_stream_get_r_frame_rate(const AVStream *s);
|
||||||
|
@@ -864,7 +864,11 @@ fail:
|
|||||||
if (!avi->index_loaded && pb->seekable)
|
if (!avi->index_loaded && pb->seekable)
|
||||||
avi_load_index(s);
|
avi_load_index(s);
|
||||||
avi->index_loaded |= 1;
|
avi->index_loaded |= 1;
|
||||||
avi->non_interleaved |= guess_ni_flag(s) | (s->flags & AVFMT_FLAG_SORT_DTS);
|
|
||||||
|
if ((ret = guess_ni_flag(s)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
avi->non_interleaved |= ret | (s->flags & AVFMT_FLAG_SORT_DTS);
|
||||||
|
|
||||||
dict_entry = av_dict_get(s->metadata, "ISFT", NULL, 0);
|
dict_entry = av_dict_get(s->metadata, "ISFT", NULL, 0);
|
||||||
if (dict_entry && !strcmp(dict_entry->value, "PotEncoder"))
|
if (dict_entry && !strcmp(dict_entry->value, "PotEncoder"))
|
||||||
@@ -1315,7 +1319,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||||||
AVIndexEntry *e;
|
AVIndexEntry *e;
|
||||||
int index;
|
int index;
|
||||||
|
|
||||||
index = av_index_search_timestamp(st, ast->frame_offset, 0);
|
index = av_index_search_timestamp(st, ast->frame_offset, AVSEEK_FLAG_ANY);
|
||||||
e = &st->index_entries[index];
|
e = &st->index_entries[index];
|
||||||
|
|
||||||
if (index >= 0 && e->timestamp == ast->frame_offset) {
|
if (index >= 0 && e->timestamp == ast->frame_offset) {
|
||||||
@@ -1455,14 +1459,69 @@ static int avi_read_idx1(AVFormatContext *s, int size)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Scan the index and consider any file with streams more than
|
||||||
|
* 2 seconds or 64MB apart non-interleaved. */
|
||||||
|
static int check_stream_max_drift(AVFormatContext *s)
|
||||||
|
{
|
||||||
|
int64_t min_pos, pos;
|
||||||
|
int i;
|
||||||
|
int *idx = av_mallocz_array(s->nb_streams, sizeof(*idx));
|
||||||
|
if (!idx)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
for (min_pos = pos = 0; min_pos != INT64_MAX; pos = min_pos + 1LU) {
|
||||||
|
int64_t max_dts = INT64_MIN / 2;
|
||||||
|
int64_t min_dts = INT64_MAX / 2;
|
||||||
|
int64_t max_buffer = 0;
|
||||||
|
|
||||||
|
min_pos = INT64_MAX;
|
||||||
|
|
||||||
|
for (i = 0; i < s->nb_streams; i++) {
|
||||||
|
AVStream *st = s->streams[i];
|
||||||
|
AVIStream *ast = st->priv_data;
|
||||||
|
int n = st->nb_index_entries;
|
||||||
|
while (idx[i] < n && st->index_entries[idx[i]].pos < pos)
|
||||||
|
idx[i]++;
|
||||||
|
if (idx[i] < n) {
|
||||||
|
int64_t dts;
|
||||||
|
dts = av_rescale_q(st->index_entries[idx[i]].timestamp /
|
||||||
|
FFMAX(ast->sample_size, 1),
|
||||||
|
st->time_base, AV_TIME_BASE_Q);
|
||||||
|
min_dts = FFMIN(min_dts, dts);
|
||||||
|
min_pos = FFMIN(min_pos, st->index_entries[idx[i]].pos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (i = 0; i < s->nb_streams; i++) {
|
||||||
|
AVStream *st = s->streams[i];
|
||||||
|
AVIStream *ast = st->priv_data;
|
||||||
|
|
||||||
|
if (idx[i] && min_dts != INT64_MAX / 2) {
|
||||||
|
int64_t dts;
|
||||||
|
dts = av_rescale_q(st->index_entries[idx[i] - 1].timestamp /
|
||||||
|
FFMAX(ast->sample_size, 1),
|
||||||
|
st->time_base, AV_TIME_BASE_Q);
|
||||||
|
max_dts = FFMAX(max_dts, dts);
|
||||||
|
max_buffer = FFMAX(max_buffer,
|
||||||
|
av_rescale(dts - min_dts,
|
||||||
|
st->codec->bit_rate,
|
||||||
|
AV_TIME_BASE));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (max_dts - min_dts > 2 * AV_TIME_BASE ||
|
||||||
|
max_buffer > 1024 * 1024 * 8 * 8) {
|
||||||
|
av_free(idx);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
av_free(idx);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int guess_ni_flag(AVFormatContext *s)
|
static int guess_ni_flag(AVFormatContext *s)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int64_t last_start = 0;
|
int64_t last_start = 0;
|
||||||
int64_t first_end = INT64_MAX;
|
int64_t first_end = INT64_MAX;
|
||||||
int64_t oldpos = avio_tell(s->pb);
|
int64_t oldpos = avio_tell(s->pb);
|
||||||
int *idx;
|
|
||||||
int64_t min_pos, pos;
|
|
||||||
|
|
||||||
for (i = 0; i < s->nb_streams; i++) {
|
for (i = 0; i < s->nb_streams; i++) {
|
||||||
AVStream *st = s->streams[i];
|
AVStream *st = s->streams[i];
|
||||||
@@ -1486,35 +1545,11 @@ static int guess_ni_flag(AVFormatContext *s)
|
|||||||
first_end = st->index_entries[n - 1].pos;
|
first_end = st->index_entries[n - 1].pos;
|
||||||
}
|
}
|
||||||
avio_seek(s->pb, oldpos, SEEK_SET);
|
avio_seek(s->pb, oldpos, SEEK_SET);
|
||||||
|
|
||||||
if (last_start > first_end)
|
if (last_start > first_end)
|
||||||
return 1;
|
return 1;
|
||||||
idx= av_calloc(s->nb_streams, sizeof(*idx));
|
|
||||||
if (!idx)
|
|
||||||
return 0;
|
|
||||||
for (min_pos=pos=0; min_pos!=INT64_MAX; pos= min_pos+1LU) {
|
|
||||||
int64_t max_dts = INT64_MIN/2, min_dts= INT64_MAX/2;
|
|
||||||
min_pos = INT64_MAX;
|
|
||||||
|
|
||||||
for (i=0; i<s->nb_streams; i++) {
|
return check_stream_max_drift(s);
|
||||||
AVStream *st = s->streams[i];
|
|
||||||
AVIStream *ast = st->priv_data;
|
|
||||||
int n= st->nb_index_entries;
|
|
||||||
while (idx[i]<n && st->index_entries[idx[i]].pos < pos)
|
|
||||||
idx[i]++;
|
|
||||||
if (idx[i] < n) {
|
|
||||||
min_dts = FFMIN(min_dts, av_rescale_q(st->index_entries[idx[i]].timestamp/FFMAX(ast->sample_size, 1), st->time_base, AV_TIME_BASE_Q));
|
|
||||||
min_pos = FFMIN(min_pos, st->index_entries[idx[i]].pos);
|
|
||||||
}
|
|
||||||
if (idx[i])
|
|
||||||
max_dts = FFMAX(max_dts, av_rescale_q(st->index_entries[idx[i]-1].timestamp/FFMAX(ast->sample_size, 1), st->time_base, AV_TIME_BASE_Q));
|
|
||||||
}
|
|
||||||
if (max_dts - min_dts > 2*AV_TIME_BASE) {
|
|
||||||
av_free(idx);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
av_free(idx);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int avi_load_index(AVFormatContext *s)
|
static int avi_load_index(AVFormatContext *s)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user