Compare commits
346 Commits
release/2.
...
n2.2.3
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f406bf3fa9 | ||
![]() |
54bec22a6e | ||
![]() |
e1b2c93a23 | ||
![]() |
e529ff52a0 | ||
![]() |
fb7e76d1cf | ||
![]() |
96047b3150 | ||
![]() |
2545defeac | ||
![]() |
2d97ad38ed | ||
![]() |
f6c628f029 | ||
![]() |
3f743e3e4c | ||
![]() |
f2dbd64bde | ||
![]() |
7bce659e18 | ||
![]() |
cb9379065f | ||
![]() |
4261778dbd | ||
![]() |
ba88a6e4e4 | ||
![]() |
e0407a7bf7 | ||
![]() |
d575984dfc | ||
![]() |
9ea1e82d68 | ||
![]() |
5cd2cdf33f | ||
![]() |
a5a6f6fec3 | ||
![]() |
88544e8ceb | ||
![]() |
f183eaa3ad | ||
![]() |
d773d7775a | ||
![]() |
f15f4cefd7 | ||
![]() |
0ec75a04e5 | ||
![]() |
34fb994d93 | ||
![]() |
acafd1814e | ||
![]() |
bb01956d67 | ||
![]() |
bc0c49b83e | ||
![]() |
0008a87cb1 | ||
![]() |
2aa6592338 | ||
![]() |
2b14d98086 | ||
![]() |
a05f86ec10 | ||
![]() |
9bdb254b98 | ||
![]() |
358d1f6e01 | ||
![]() |
ea28034f5d | ||
![]() |
6452b31599 | ||
![]() |
a14969253a | ||
![]() |
fab9a7be76 | ||
![]() |
3a67865963 | ||
![]() |
3fb754712c | ||
![]() |
e5294f407a | ||
![]() |
ed1ad2f5eb | ||
![]() |
e780c3daaf | ||
![]() |
7f954ca502 | ||
![]() |
8e9e57ed0c | ||
![]() |
1bd6372cd3 | ||
![]() |
40ffa99dfa | ||
![]() |
ff79f6b35a | ||
![]() |
e2a83d72da | ||
![]() |
6c3985713b | ||
![]() |
7d97cc8d87 | ||
![]() |
a56a9e65c6 | ||
![]() |
428b629eb2 | ||
![]() |
194d12345d | ||
![]() |
b3d8276d2d | ||
![]() |
c2eb668617 | ||
![]() |
c9c223ba00 | ||
![]() |
9d0ff6436e | ||
![]() |
02b7b125b5 | ||
![]() |
5643668308 | ||
![]() |
2d18e7f3ef | ||
![]() |
f1b5830182 | ||
![]() |
c588316555 | ||
![]() |
afd1f61944 | ||
![]() |
16a9c5ea9e | ||
![]() |
83fb31a76d | ||
![]() |
fc5b32877a | ||
![]() |
10e023c4fa | ||
![]() |
27a3a59428 | ||
![]() |
8ab849cddc | ||
![]() |
5191b00155 | ||
![]() |
1131e7a1a4 | ||
![]() |
26becbcd2a | ||
![]() |
366cdd3548 | ||
![]() |
e3b08b3ad4 | ||
![]() |
fa16440659 | ||
![]() |
c36fd16aaa | ||
![]() |
8c5897632a | ||
![]() |
b34fce9c54 | ||
![]() |
742f9aa879 | ||
![]() |
79041d92ee | ||
![]() |
82cebc0e05 | ||
![]() |
09abca6802 | ||
![]() |
43d64829e6 | ||
![]() |
4a479fd3e6 | ||
![]() |
4f41717d01 | ||
![]() |
6896dcbf5f | ||
![]() |
14404170b9 | ||
![]() |
e9e42beed2 | ||
![]() |
abd6decd55 | ||
![]() |
0385c824f1 | ||
![]() |
c4e764aa69 | ||
![]() |
9d02e38d3f | ||
![]() |
30cf47c6f0 | ||
![]() |
b45cd17d29 | ||
![]() |
26b6d70c72 | ||
![]() |
32919db4fb | ||
![]() |
56f44c26f0 | ||
![]() |
fe87a40de6 | ||
![]() |
0f6e309b97 | ||
![]() |
96e13c9897 | ||
![]() |
e72c0a0466 | ||
![]() |
dfddefa13a | ||
![]() |
ce94955b3c | ||
![]() |
dde95268cc | ||
![]() |
d20ac551a8 | ||
![]() |
352b0969e2 | ||
![]() |
b479b42b26 | ||
![]() |
36cab9c408 | ||
![]() |
34592d04fb | ||
![]() |
544accc895 | ||
![]() |
f41622ecb4 | ||
![]() |
fc8eb4c1f9 | ||
![]() |
02bae9f013 | ||
![]() |
5cb2a1c3f0 | ||
![]() |
a221c9bd76 | ||
![]() |
b2583c2b62 | ||
![]() |
bd553941ae | ||
![]() |
e0aa76d38a | ||
![]() |
a014b9614e | ||
![]() |
31c21d2f69 | ||
![]() |
3cd1c8653b | ||
![]() |
194485cfba | ||
![]() |
81cfe39113 | ||
![]() |
ef0c503d37 | ||
![]() |
1103aec1df | ||
![]() |
b40ab81d1f | ||
![]() |
314f055c29 | ||
![]() |
2c566744c4 | ||
![]() |
adad1ba5d8 | ||
![]() |
a80a7131d1 | ||
![]() |
3ab63abbd4 | ||
![]() |
d2c76782e0 | ||
![]() |
4dc8b4d7d0 | ||
![]() |
9ff0467566 | ||
![]() |
4407b38b28 | ||
![]() |
8caaf260a6 | ||
![]() |
e1f51bbd1f | ||
![]() |
4b7c149306 | ||
![]() |
e8919d6522 | ||
![]() |
b017785fa5 | ||
![]() |
01507eb1f8 | ||
![]() |
938ff93710 | ||
![]() |
0c88d539f8 | ||
![]() |
e39a992bd1 | ||
![]() |
72a12f61ef | ||
![]() |
30ae080e9d | ||
![]() |
dca463b728 | ||
![]() |
25b462cab9 | ||
![]() |
7c6a8afa7e | ||
![]() |
b052525f9b | ||
![]() |
90c7bfb9be | ||
![]() |
7bdd348e58 | ||
![]() |
af3d003658 | ||
![]() |
c00beff5e0 | ||
![]() |
57a43142ba | ||
![]() |
99905118a8 | ||
![]() |
dcf560204c | ||
![]() |
5b0e2eb041 | ||
![]() |
d461e077a5 | ||
![]() |
9a884b7b97 | ||
![]() |
9abe0bfb7f | ||
![]() |
13682b48e9 | ||
![]() |
f25e6e0c25 | ||
![]() |
80239a8bb1 | ||
![]() |
26bbc1c242 | ||
![]() |
efe259a27e | ||
![]() |
49f11e12d5 | ||
![]() |
d130fae519 | ||
![]() |
dde996bf99 | ||
![]() |
cad2958fd7 | ||
![]() |
29d61d73b1 | ||
![]() |
9a6a710998 | ||
![]() |
daaef403d1 | ||
![]() |
207f5a138a | ||
![]() |
e9c8a9aaa6 | ||
![]() |
7b7d8b8794 | ||
![]() |
90d6b563fe | ||
![]() |
dce2f820e9 | ||
![]() |
4aab3f868f | ||
![]() |
48609236da | ||
![]() |
d8fe695779 | ||
![]() |
0f42e06651 | ||
![]() |
230c4c6ad9 | ||
![]() |
f4489c9558 | ||
![]() |
0e5d9fe2a7 | ||
![]() |
9ae2aaea50 | ||
![]() |
2513314912 | ||
![]() |
e727cbf0be | ||
![]() |
bcc25353cf | ||
![]() |
6a10263f16 | ||
![]() |
bcc6429c01 | ||
![]() |
bdb219435e | ||
![]() |
a7338ae8ac | ||
![]() |
6776c2c04f | ||
![]() |
facd3dbc6e | ||
![]() |
8796c3b7d3 | ||
![]() |
7430f3064f | ||
![]() |
eac281b06c | ||
![]() |
b5210f4eae | ||
![]() |
10379d50be | ||
![]() |
cd874cf8e6 | ||
![]() |
82a3e469c6 | ||
![]() |
f859fed03d | ||
![]() |
991e6fa35b | ||
![]() |
09dca51066 | ||
![]() |
40de74d0eb | ||
![]() |
e2811c2ede | ||
![]() |
25d14b716a | ||
![]() |
f1de93dec3 | ||
![]() |
738d68de85 | ||
![]() |
00ecce5c8b | ||
![]() |
6ba07e9948 | ||
![]() |
125bea15d1 | ||
![]() |
70e3cc282b | ||
![]() |
242df26b44 | ||
![]() |
46c2dba20e | ||
![]() |
3caa6a5a57 | ||
![]() |
bf08665e2e | ||
![]() |
c4f5f4dbd3 | ||
![]() |
29df24252a | ||
![]() |
b920c1d5ad | ||
![]() |
2b9ee7d5b9 | ||
![]() |
f800cacada | ||
![]() |
5227eac5b0 | ||
![]() |
bb40f8f5e2 | ||
![]() |
ad8bf22086 | ||
![]() |
7f8804296d | ||
![]() |
f67e75b5dc | ||
![]() |
35e63f35b0 | ||
![]() |
3bfb7a2537 | ||
![]() |
4a1e7a6fb7 | ||
![]() |
ff1e982205 | ||
![]() |
bb116e6ba3 | ||
![]() |
ebe356bf1c | ||
![]() |
30099413ec | ||
![]() |
186e0ff067 | ||
![]() |
2642ad9f55 | ||
![]() |
95ddd2227b | ||
![]() |
3faebed6fa | ||
![]() |
3aee1fa5b6 | ||
![]() |
89a9c84ebb | ||
![]() |
0adde39e04 | ||
![]() |
03ae616b19 | ||
![]() |
830c3058ff | ||
![]() |
b12c5cbbb2 | ||
![]() |
82c96b5ad8 | ||
![]() |
3e4b957847 | ||
![]() |
cbabbe8220 | ||
![]() |
80122a3af3 | ||
![]() |
a475755b3a | ||
![]() |
66030e8133 | ||
![]() |
46f8d838b3 | ||
![]() |
bc3648d4b4 | ||
![]() |
27e6b4a3ff | ||
![]() |
b82860caa7 | ||
![]() |
3d05625136 | ||
![]() |
ddd3301bad | ||
![]() |
123981930f | ||
![]() |
3171e2360a | ||
![]() |
3533a850e7 | ||
![]() |
6d56bc9a6d | ||
![]() |
2c5e1d0933 | ||
![]() |
b37b83214a | ||
![]() |
6d7ab09788 | ||
![]() |
227cfc1f10 | ||
![]() |
416847d195 | ||
![]() |
bd4ad1a1d5 | ||
![]() |
6230de03aa | ||
![]() |
45acc228a6 | ||
![]() |
d37fac6dbb | ||
![]() |
7940306a47 | ||
![]() |
eabefe83f4 | ||
![]() |
eaa79b79b2 | ||
![]() |
c761379825 | ||
![]() |
ea3309eba7 | ||
![]() |
1c1e252cd1 | ||
![]() |
ca2c9d6b9b | ||
![]() |
fa6b99d351 | ||
![]() |
d79cb6947e | ||
![]() |
5aa4b29bbe | ||
![]() |
e4cbd0d6e5 | ||
![]() |
0ede7b5344 | ||
![]() |
5b933be089 | ||
![]() |
f2693e98b4 | ||
![]() |
c3861e14ce | ||
![]() |
daa5a988e2 | ||
![]() |
db67b7c31b | ||
![]() |
a643a47d41 | ||
![]() |
23af29e882 | ||
![]() |
7d995cd1b8 | ||
![]() |
72a58c0772 | ||
![]() |
d525423006 | ||
![]() |
4b476e6aa4 | ||
![]() |
124c78fd44 | ||
![]() |
a1ab3300c8 | ||
![]() |
1af235f6b3 | ||
![]() |
82031e41f8 | ||
![]() |
222e7549a7 | ||
![]() |
eb2244ece9 | ||
![]() |
b967c10029 | ||
![]() |
7ff4cd2acc | ||
![]() |
c4149c4d54 | ||
![]() |
8ad2f45964 | ||
![]() |
5df52b0131 | ||
![]() |
596d3e20ae | ||
![]() |
00d5ff6431 | ||
![]() |
437179e9c8 | ||
![]() |
031d3b66c2 | ||
![]() |
b76871d870 | ||
![]() |
15ae305007 | ||
![]() |
3c72204ae0 | ||
![]() |
ba21499648 | ||
![]() |
7933039ade | ||
![]() |
4015829acc | ||
![]() |
39dc4a6bb3 | ||
![]() |
a6a2d8eb8f | ||
![]() |
58556826a8 | ||
![]() |
bc2c9a479a | ||
![]() |
9cc22be032 | ||
![]() |
33e1bca651 | ||
![]() |
9841617b7f | ||
![]() |
2897481f64 | ||
![]() |
646c564de5 | ||
![]() |
cd6281abef | ||
![]() |
697be8173b | ||
![]() |
1853d8bb7a | ||
![]() |
1779cd7695 | ||
![]() |
bb4820727f | ||
![]() |
affc7687d3 | ||
![]() |
3569470693 | ||
![]() |
1d1df82093 | ||
![]() |
de187e3e9e | ||
![]() |
7754d48381 | ||
![]() |
63169474b3 | ||
![]() |
b3f106cb1f | ||
![]() |
9b6ccf0f24 | ||
![]() |
298d66c8de | ||
![]() |
4be1b68d52 | ||
![]() |
92edc13d69 | ||
![]() |
c9f015f1c6 | ||
![]() |
db6b2ca0b3 | ||
![]() |
3503ec8461 | ||
![]() |
ecc5e42d92 | ||
![]() |
f87ce262f6 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -15,7 +15,6 @@
|
||||
*.pdb
|
||||
*.so
|
||||
*.so.*
|
||||
*.swp
|
||||
*.ver
|
||||
*-example
|
||||
*-test
|
||||
@@ -37,7 +36,7 @@
|
||||
/doc/avoptions_format.texi
|
||||
/doc/doxy/html/
|
||||
/doc/examples/avio_reading
|
||||
/doc/examples/decoding_encoding
|
||||
/doc/examples/avcodec
|
||||
/doc/examples/demuxing_decoding
|
||||
/doc/examples/filter_audio
|
||||
/doc/examples/filtering_audio
|
||||
@@ -49,7 +48,6 @@
|
||||
/doc/examples/resampling_audio
|
||||
/doc/examples/scaling_video
|
||||
/doc/examples/transcode_aac
|
||||
/doc/examples/transcoding
|
||||
/doc/fate.txt
|
||||
/doc/print_options
|
||||
/lcov/
|
||||
@@ -82,5 +80,4 @@
|
||||
/tools/qt-faststart
|
||||
/tools/trasher
|
||||
/tools/seek_print
|
||||
/tools/uncoded_frame
|
||||
/tools/zmqsend
|
||||
|
68
Changelog
68
Changelog
@@ -1,67 +1,6 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 2.3.3:
|
||||
- h264: fix grayscale only decoding with weighted prediction
|
||||
- mjpegdec: support AV_PIX_FMT_YUV420P16 with upscale_h
|
||||
- proresenc_ks: fix buffer overflow
|
||||
- matroskadec: fix crash
|
||||
|
||||
version 2.3.2:
|
||||
- snow: fix null pointer dereference
|
||||
- huffyucdec: fix overread
|
||||
- vc1dec: fix crash
|
||||
- iff: fix out of array access
|
||||
- matroskaenc: fix assertion failure
|
||||
- cdgraphics: fix infinite loop
|
||||
- dvdsub_parser: fix infinite loop
|
||||
- mpeg12dec: support decoding some broken files
|
||||
- v4l2enc: fix crash
|
||||
- h264_parser: fix handling huge resolutions
|
||||
- h264_mp4toannexb_bsf: multiple bugfixes
|
||||
|
||||
version 2.3.1:
|
||||
- public AVDCT API/ABI for DCT functions
|
||||
- g2meet: allow size changes within original sizes
|
||||
- dv: improved error resilience, fixing Ticket2340 and Ticket2341
|
||||
|
||||
version 2.3:
|
||||
- AC3 fixed-point decoding
|
||||
- shuffleplanes filter
|
||||
- subfile protocol
|
||||
- Phantom Cine demuxer
|
||||
- replaygain data export
|
||||
- VP7 video decoder
|
||||
- Alias PIX image encoder and decoder
|
||||
- Improvements to the BRender PIX image decoder
|
||||
- Improvements to the XBM decoder
|
||||
- QTKit input device
|
||||
- improvements to OpenEXR image decoder
|
||||
- support decoding 16-bit RLE SGI images
|
||||
- GDI screen grabbing for Windows
|
||||
- alternative rendition support for HTTP Live Streaming
|
||||
- AVFoundation input device
|
||||
- Direct Stream Digital (DSD) decoder
|
||||
- Magic Lantern Video (MLV) demuxer
|
||||
- On2 AVC (Audio for Video) decoder
|
||||
- support for decoding through DXVA2 in ffmpeg
|
||||
- libbs2b-based stereo-to-binaural audio filter
|
||||
- libx264 reference frames count limiting depending on level
|
||||
- native Opus decoder
|
||||
- display matrix export and rotation api
|
||||
- WebVTT encoder
|
||||
- showcqt multimedia filter
|
||||
- zoompan filter
|
||||
- signalstats filter
|
||||
- hqx filter (hq2x, hq3x, hq4x)
|
||||
- flanger filter
|
||||
- Image format auto-detection
|
||||
- LRC demuxer and muxer
|
||||
- Samba protocol (via libsmbclient)
|
||||
- WebM DASH Manifest muxer
|
||||
- libfribidi support in drawtext
|
||||
|
||||
|
||||
version 2.2:
|
||||
|
||||
- HNM version 4 demuxer and video decoder
|
||||
@@ -277,7 +216,7 @@ version 1.1:
|
||||
- JSON captions for TED talks decoding support
|
||||
- SOX Resampler support in libswresample
|
||||
- aselect filter
|
||||
- SGI RLE 8-bit / Silicon Graphics RLE 8-bit video decoder
|
||||
- SGI RLE 8-bit decoder
|
||||
- Silicon Graphics Motion Video Compressor 1 & 2 decoder
|
||||
- Silicon Graphics Movie demuxer
|
||||
- apad filter
|
||||
@@ -321,9 +260,7 @@ version 1.0:
|
||||
- RTMPE protocol support
|
||||
- RTMPTE protocol support
|
||||
- showwaves and showspectrum filter
|
||||
- LucasArts SMUSH SANM playback support
|
||||
- LucasArts SMUSH VIMA audio decoder (ADPCM)
|
||||
- LucasArts SMUSH demuxer
|
||||
- LucasArts SMUSH playback support
|
||||
- SAMI, RealText and SubViewer demuxers and decoders
|
||||
- Heart Of Darkness PAF playback support
|
||||
- iec61883 device
|
||||
@@ -447,7 +384,6 @@ version 0.10:
|
||||
- ffwavesynth decoder
|
||||
- aviocat tool
|
||||
- ffeval tool
|
||||
- support encoding and decoding 4-channel SGI images
|
||||
|
||||
|
||||
version 0.9:
|
||||
|
15
INSTALL
Normal file
15
INSTALL
Normal file
@@ -0,0 +1,15 @@
|
||||
|
||||
1) Type './configure' to create the configuration. A list of configure
|
||||
options is printed by running 'configure --help'.
|
||||
|
||||
'configure' can be launched from a directory different from the FFmpeg
|
||||
sources to build the objects out of tree. To do this, use an absolute
|
||||
path when launching 'configure', e.g. '/ffmpegdir/ffmpeg/configure'.
|
||||
|
||||
2) Then type 'make' to build FFmpeg. GNU Make 3.81 or later is required.
|
||||
|
||||
3) Type 'make install' to install all binaries and libraries you built.
|
||||
|
||||
NOTICE
|
||||
|
||||
- Non system dependencies (e.g. libx264, libvpx) are disabled by default.
|
17
INSTALL.md
17
INSTALL.md
@@ -1,17 +0,0 @@
|
||||
#Installing FFmpeg:
|
||||
|
||||
1. Type `./configure` to create the configuration. A list of configure
|
||||
options is printed by running `configure --help`.
|
||||
|
||||
`configure` can be launched from a directory different from the FFmpeg
|
||||
sources to build the objects out of tree. To do this, use an absolute
|
||||
path when launching `configure`, e.g. `/ffmpegdir/ffmpeg/configure`.
|
||||
|
||||
2. Then type `make` to build FFmpeg. GNU Make 3.81 or later is required.
|
||||
|
||||
3. Type `make install` to install all binaries and libraries you built.
|
||||
|
||||
NOTICE
|
||||
------
|
||||
|
||||
- Non system dependencies (e.g. libx264, libvpx) are disabled by default.
|
@@ -1,4 +1,4 @@
|
||||
#FFmpeg:
|
||||
FFmpeg:
|
||||
|
||||
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
|
||||
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
|
||||
@@ -10,7 +10,7 @@ version 2 or later (GPL v2+). See the file COPYING.GPLv2 for details. None of
|
||||
these parts are used by default, you have to explicitly pass --enable-gpl to
|
||||
configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
||||
|
||||
Specifically, the GPL parts of FFmpeg are:
|
||||
Specifically, the GPL parts of FFmpeg are
|
||||
|
||||
- libpostproc
|
||||
- libmpcodecs
|
54
MAINTAINERS
54
MAINTAINERS
@@ -14,6 +14,7 @@ patches and related discussions.
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
Michael Niedermayer
|
||||
final design decisions
|
||||
|
||||
|
||||
@@ -43,8 +44,8 @@ Miscellaneous Areas
|
||||
===================
|
||||
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu
|
||||
build system (configure, makefiles) Diego Biurrun, Mans Rullgard
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser
|
||||
build system (configure,Makefiles) Diego Biurrun, Mans Rullgard
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Döffinger, Alexander Strasser
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
release management Michael Niedermayer
|
||||
@@ -53,10 +54,8 @@ release management Michael Niedermayer
|
||||
Communication
|
||||
=============
|
||||
|
||||
website Robert Swain
|
||||
fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
|
||||
mailing lists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
||||
website Robert Swain, Lou Logan
|
||||
mailinglists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
||||
Google+ Paul B Mahol, Michael Niedermayer, Alexander Strasser
|
||||
Twitter Lou Logan
|
||||
Launchpad Timothy Gu
|
||||
@@ -74,7 +73,6 @@ Other:
|
||||
bprint Nicolas George
|
||||
bswap.h
|
||||
des Reimar Doeffinger
|
||||
dynarray.h Nicolas George
|
||||
eval.c, eval.h Michael Niedermayer
|
||||
float_dsp Loren Merritt
|
||||
hash Reimar Doeffinger
|
||||
@@ -131,7 +129,6 @@ Generic Parts:
|
||||
tableprint.c, tableprint.h Reimar Doeffinger
|
||||
fixed point FFT:
|
||||
fft* Zeljko Lukac
|
||||
Text Subtitles Clément Bœsch
|
||||
|
||||
Codecs:
|
||||
4xm.c Michael Niedermayer
|
||||
@@ -165,13 +162,11 @@ Codecs:
|
||||
dnxhd* Baptiste Coudurier
|
||||
dpcm.c Mike Melanson
|
||||
dv.c Roman Shaposhnik
|
||||
dvbsubdec.c Anshul Maheshwari
|
||||
dxa.c Kostya Shishkov
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
exif.c, exif.h Thilo Borgmann
|
||||
ffv1* Michael Niedermayer
|
||||
ffv1.c Michael Niedermayer
|
||||
ffwavesynth.c Nicolas George
|
||||
fic.c Derek Buitenhuis
|
||||
flac* Justin Ruggles
|
||||
flashsv* Benjamin Larsson
|
||||
flicvideo.c Mike Melanson
|
||||
@@ -181,7 +176,7 @@ Codecs:
|
||||
h261* Michael Niedermayer
|
||||
h263* Michael Niedermayer
|
||||
h264* Loren Merritt, Michael Niedermayer
|
||||
huffyuv* Michael Niedermayer, Christophe Gisquet
|
||||
huffyuv.c Michael Niedermayer
|
||||
idcinvideo.c Mike Melanson
|
||||
imc* Benjamin Larsson
|
||||
indeo2* Kostya Shishkov
|
||||
@@ -241,12 +236,12 @@ Codecs:
|
||||
rtjpeg.c, rtjpeg.h Reimar Doeffinger
|
||||
rv10.c Michael Niedermayer
|
||||
rv3* Kostya Shishkov
|
||||
rv4* Kostya Shishkov, Christophe Gisquet
|
||||
rv4* Kostya Shishkov
|
||||
s3tc* Ivo van Poorten
|
||||
smacker.c Kostya Shishkov
|
||||
smc.c Mike Melanson
|
||||
smvjpegdec.c Ash Hughes
|
||||
snow* Michael Niedermayer, Loren Merritt
|
||||
snow.c Michael Niedermayer, Loren Merritt
|
||||
sonic.c Alex Beregszaszi
|
||||
srt* Aurelien Jacobs
|
||||
sunrast.c Ivo van Poorten
|
||||
@@ -265,13 +260,13 @@ Codecs:
|
||||
v410*.c Derek Buitenhuis
|
||||
vb.c Kostya Shishkov
|
||||
vble.c Derek Buitenhuis
|
||||
vc1* Kostya Shishkov, Christophe Gisquet
|
||||
vc1* Kostya Shishkov
|
||||
vcr1.c Michael Niedermayer
|
||||
vda_h264_dec.c Xidorn Quan
|
||||
vima.c Paul B Mahol
|
||||
vmnc.c Kostya Shishkov
|
||||
vorbisdec.c Denes Balatoni, David Conrad
|
||||
vorbisenc.c Oded Shimon
|
||||
vorbis_dec.c Denes Balatoni, David Conrad
|
||||
vorbis_enc.c Oded Shimon
|
||||
vp3* Mike Melanson
|
||||
vp5 Aurelien Jacobs
|
||||
vp6 Aurelien Jacobs
|
||||
@@ -307,20 +302,16 @@ libavdevice
|
||||
libavdevice/avdevice.h
|
||||
|
||||
|
||||
avfoundation.m Thilo Borgmann
|
||||
dshow.c Roger Pack (CC rogerdpack@gmail.com)
|
||||
dshow.c Roger Pack
|
||||
fbdev_enc.c Lukasz Marek
|
||||
gdigrab.c Roger Pack (CC rogerdpack@gmail.com)
|
||||
iec61883.c Georg Lippitsch
|
||||
lavfi Stefano Sabatini
|
||||
libdc1394.c Roman Shaposhnik
|
||||
opengl_enc.c Lukasz Marek
|
||||
pulse_audio_enc.c Lukasz Marek
|
||||
qtkit.m Thilo Borgmann
|
||||
sdl Stefano Sabatini
|
||||
v4l2.c Luca Abeni
|
||||
vfwcap.c Ramiro Polla
|
||||
xv.c Lukasz Marek
|
||||
|
||||
libavfilter
|
||||
===========
|
||||
@@ -343,7 +334,6 @@ Filters:
|
||||
af_ladspa.c Paul B Mahol
|
||||
af_pan.c Nicolas George
|
||||
avf_avectorscope.c Paul B Mahol
|
||||
avf_showcqt.c Muhammad Faiz
|
||||
vf_blend.c Paul B Mahol
|
||||
vf_colorbalance.c Paul B Mahol
|
||||
vf_dejudder.c Nicholas Robbins
|
||||
@@ -351,7 +341,6 @@ Filters:
|
||||
vf_drawbox.c/drawgrid Andrey Utkin
|
||||
vf_extractplanes.c Paul B Mahol
|
||||
vf_histogram.c Paul B Mahol
|
||||
vf_hqx.c Clément Bœsch
|
||||
vf_il.c Paul B Mahol
|
||||
vf_mergeplanes.c Paul B Mahol
|
||||
vf_psnr.c Paul B Mahol
|
||||
@@ -422,7 +411,6 @@ Muxers/Demuxers:
|
||||
matroska.c Aurelien Jacobs
|
||||
matroskadec.c Aurelien Jacobs
|
||||
matroskaenc.c David Conrad
|
||||
matroska subtitles (matroskaenc.c) John Peebles
|
||||
metadata* Aurelien Jacobs
|
||||
mgsts.c Paul B Mahol
|
||||
microdvd* Aurelien Jacobs
|
||||
@@ -432,15 +420,14 @@ Muxers/Demuxers:
|
||||
mpc.c Kostya Shishkov
|
||||
mpeg.c Michael Niedermayer
|
||||
mpegenc.c Michael Niedermayer
|
||||
mpegts.c Marton Balint
|
||||
mpegtsenc.c Baptiste Coudurier
|
||||
mpegts* Baptiste Coudurier
|
||||
msnwc_tcp.c Ramiro Polla
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier
|
||||
mxfdec.c Tomas Härdin
|
||||
nistspheredec.c Paul B Mahol
|
||||
nsvdec.c Francois Revol
|
||||
nut* Michael Niedermayer
|
||||
nut.c Michael Niedermayer
|
||||
nuv.c Reimar Doeffinger
|
||||
oggdec.c, oggdec.h David Conrad
|
||||
oggenc.c Baptiste Coudurier
|
||||
@@ -475,7 +462,6 @@ Muxers/Demuxers:
|
||||
voc.c Aurelien Jacobs
|
||||
wav.c Michael Niedermayer
|
||||
wc3movie.c Mike Melanson
|
||||
webm dash (matroskaenc.c) Vignesh Venkatasubramanian
|
||||
webvtt* Matthew J Heaney
|
||||
westwood.c Mike Melanson
|
||||
wtv.c Peter Ross
|
||||
@@ -517,8 +503,6 @@ Amiga / PowerPC Colin Ward
|
||||
Linux / PowerPC Luca Barbato
|
||||
Windows MinGW Alex Beregszaszi, Ramiro Polla
|
||||
Windows Cygwin Victor Paesa
|
||||
Windows MSVC Matthew Oliver
|
||||
Windows ICL Matthew Oliver
|
||||
ADI/Blackfin DSP Marc Hoffman
|
||||
Sparc Roman Shaposhnik
|
||||
x86 Michael Niedermayer
|
||||
@@ -527,8 +511,8 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
2.3 Michael Niedermayer
|
||||
2.2 Michael Niedermayer
|
||||
2.1 Michael Niedermayer
|
||||
1.2 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
@@ -545,7 +529,7 @@ Attila Kinali 11F0 F9A6 A1D2 11F6 C745 D10C 6520 BCDD F2DF E765
|
||||
Baptiste Coudurier 8D77 134D 20CC 9220 201F C5DB 0AC9 325C 5C1A BAAA
|
||||
Ben Littler 3EE3 3723 E560 3214 A8CD 4DEB 2CDB FCE7 768C 8D2C
|
||||
Benoit Fouet B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
|
||||
Clément Bœsch 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
|
||||
Bœsch Clément 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
|
||||
Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
||||
Diego Biurrun 8227 1E31 B6D9 4994 7427 E220 9CAE D6CC 4757 FCC5
|
||||
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
|
||||
@@ -560,14 +544,12 @@ Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
|
||||
Reimar Doeffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
Reimar Döffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
|
||||
Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
|
||||
Robert Swain EE7A 56EA 4A81 A7B5 2001 A521 67FA 362D A2FC 3E71
|
||||
Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
|
||||
Stefano Sabatini 0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
|
||||
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
|
||||
Tiancheng "Timothy" Gu 9456 AFC0 814A 8139 E994 8351 7FE6 B095 B582 B0D4
|
||||
Tim Nicholson 38CF DB09 3ED0 F607 8B67 6CED 0C0B FC44 8B0B FC83
|
||||
Tomas Härdin A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Wei Gao 4269 7741 857A 0E60 9EC5 08D2 4744 4EFA 62C1 87B9
|
||||
|
16
Makefile
16
Makefile
@@ -4,7 +4,6 @@ include config.mak
|
||||
vpath %.c $(SRC_PATH)
|
||||
vpath %.cpp $(SRC_PATH)
|
||||
vpath %.h $(SRC_PATH)
|
||||
vpath %.m $(SRC_PATH)
|
||||
vpath %.S $(SRC_PATH)
|
||||
vpath %.asm $(SRC_PATH)
|
||||
vpath %.rc $(SRC_PATH)
|
||||
@@ -30,22 +29,18 @@ $(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog)-$(CONFIG_OPENCL) += cmdutils_o
|
||||
|
||||
OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
|
||||
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
|
||||
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
|
||||
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_vda.o
|
||||
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
TOOLS = qt-faststart trasher uncoded_frame
|
||||
TOOLS-$(CONFIG_ZLIB) += cws2fws
|
||||
|
||||
# $(FFLIBS-yes) needs to be in linking order
|
||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
|
||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
|
||||
FFLIBS-$(CONFIG_SWRESAMPLE)+= swresample
|
||||
FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
|
||||
FFLIBS := avutil
|
||||
@@ -77,8 +72,9 @@ config.h: .config
|
||||
|
||||
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS MMX-OBJS YASM-OBJS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS VIS-OBJS \
|
||||
MMX-OBJS YASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
|
||||
OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
|
||||
@@ -110,7 +106,7 @@ endef
|
||||
|
||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
||||
|
||||
ffprobe.o cmdutils.o libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
ffprobe.o cmdutils.o : libavutil/ffversion.h
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
$(CP) $< $@
|
||||
|
@@ -1,11 +1,10 @@
|
||||
FFmpeg README
|
||||
=============
|
||||
-------------
|
||||
|
||||
1) Documentation
|
||||
----------------
|
||||
|
||||
* Read the documentation in the doc/ directory in git.
|
||||
|
||||
You can also view it online at http://ffmpeg.org/documentation.html
|
||||
|
||||
2) Licensing
|
177
RELEASE_NOTES
177
RELEASE_NOTES
@@ -1,177 +0,0 @@
|
||||
┌───────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.3 "Mandelbrot" │
|
||||
└───────────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 2.3 "Mandelbrot", a major
|
||||
release with all the great features committed during the three-month period
|
||||
since the release of FFmpeg 2.2.
|
||||
|
||||
In this release, there are lots of internal overhauls that make FFmpeg a
|
||||
more accessible project for new developers. Many important new
|
||||
optimizations and features like QTKit and AVFoundation input devices are
|
||||
committed. Contributions done by Libav such as a new native Opus decoder
|
||||
are also merged.
|
||||
|
||||
Because of the increasing difficulty to maintain and lack of maintainers,
|
||||
we are very sorry to say that we have removed all Blackfin and SPARC
|
||||
architecture assembly optimizations with the cleanups done. If you are
|
||||
interested in maintaining optimization for these two architecture, feel
|
||||
free to contact us and we will restore the code!
|
||||
|
||||
Oh, and since this release, this modern-looking release note is provided in
|
||||
addition to the old-style Changelog file, to make it easier for you to
|
||||
focus on the most important features in this release.
|
||||
|
||||
Enjoy!
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ * API Information │
|
||||
└────────────────────────────┘
|
||||
|
||||
FFmpeg 2.3 is completely source-compatible to the FFmpeg 2.2 series. There
|
||||
are however some API deprecations that you need to take care of. Use `git
|
||||
diff n2.2 n2.3 doc/APIchanges` to show the list of added and deprecated
|
||||
APIs. FFmpeg 2.3 includes the following library versions:
|
||||
|
||||
• libavutil 52.92.100
|
||||
• libavcodec 55.69.100
|
||||
• libavformat 55.48.100
|
||||
• libavdevice 55.13.102
|
||||
• libavfilter 4.11.100
|
||||
• libswscale 2. 6.100
|
||||
• libswresample 0.19.100
|
||||
• libpostproc 52. 3.100
|
||||
|
||||
Please refer to the doc/APIChanges file for more information.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ New Optimization │
|
||||
└────────────────────────────┘
|
||||
|
||||
We are excited to announce that we have committed new x86 assembly
|
||||
optimization for HEVC, and FFmpeg's audio resampler libswresample. ARM
|
||||
users will get a boost in MLP/TrueHD decoding thanks to new optimization.
|
||||
Decoding Huffyuv also got a major boost from optimization on the C code.
|
||||
|
||||
Of special interest for Microsoft Visual Studio users, we have also
|
||||
converted some preexisting x86 assembly to NASM/Yasm format compatible
|
||||
with MSVC setup, especially in the area of audio resampling.
|
||||
|
||||
Another major feature in this release is the introduction of AArch64
|
||||
(ARMv8) assembly optimization. AArch64 is another name for the first
|
||||
64-bit ARM architecture, used by Apple A7 SoC inside iPhone 5S. Some
|
||||
32-bit ARM assembly has already been ported to AArch64, but more work is
|
||||
underway.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ Native Opus decoder │
|
||||
└────────────────────────────┘
|
||||
|
||||
Opus is an open audio format jointly developed by Xiph.Org, Mozilla,
|
||||
Skype/Microsoft, and Broadcom. It combines the features of the Skype Cilk
|
||||
speech codec and the Xiph.Org CELT music codec into one low-latency
|
||||
codec. Decoding Opus is already possible since FFmpeg 1.0 using the
|
||||
libopus library, but the new Opus native decoder brings a higher level of
|
||||
stability and speed.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ QTKit and AVFoundation │
|
||||
└────────────────────────────┘
|
||||
|
||||
For OS X users, the new QTKit and AVFoundation devices allow you to use
|
||||
the integrated camera on Macs. AVFoundation is a newer API only available
|
||||
on OS X 10.7 "Lion" or newer. For users with older OS X systems, the
|
||||
QTKit device using the older OS X API is for you.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ API Additions │
|
||||
└────────────────────────────┘
|
||||
|
||||
In this release, stream side data are introduced as AVStream.side_data as
|
||||
a way to store miscellaneous stream-wide information. The format is
|
||||
similar to the previously anonymous structure AVPacket.side_data (now
|
||||
named as AVPacketSideData). With this change, audio ReplayGain
|
||||
information and video rotation matrix are now exported through this API,
|
||||
if available in the demuxer.
|
||||
|
||||
We also have improved libswresample's Doxygen API documentation, so new
|
||||
developers wishing to use FFmpeg's excellent libraries can get started
|
||||
more easily and faster.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ Last But Not Least │
|
||||
└────────────────────────────┘
|
||||
|
||||
Other interesting new features including hqx video filter, a pixel art
|
||||
scaling filter; a fixed-point AC-3 decoder contributed by Imagination
|
||||
Technologies; an On2 TrueMotion VP7 video decoder; an HTML5 WebVTT
|
||||
subtitle encoder that allows creation of WebVTT from any text-based
|
||||
subtitles; and an 1-bit Direct Stream Digital audio decoder.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ★ List of New Features │
|
||||
└────────────────────────────┘
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ Command line tools │
|
||||
└────────────────────────────┘
|
||||
|
||||
• Support for decoding through DXVA2 in ffmpeg
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavcodec │
|
||||
└────────────────────────────┘
|
||||
|
||||
• AC3 fixed-point decoding
|
||||
• VP7 video decoder
|
||||
• Alias PIX image encoder and decoder
|
||||
• Improvements to the BRender PIX image decoder
|
||||
• Improvements to the XBM decoder
|
||||
• Improvements to OpenEXR image decoder
|
||||
• Support decoding 16-bit RLE SGI images
|
||||
• Direct Stream Digital (DSD) decoder
|
||||
• On2 AVC (Audio for Video) decoder
|
||||
• Native Opus decoder
|
||||
• WebVTT encoder
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavdevice │
|
||||
└────────────────────────────┘
|
||||
|
||||
• QTKit input device
|
||||
• GDI screen grabbing for Windows
|
||||
• AVFoundation input device
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavformat │
|
||||
└────────────────────────────┘
|
||||
|
||||
• subfile protocol
|
||||
• Phantom Cine demuxer
|
||||
• Alternative rendition support for HTTP Live Streaming
|
||||
• Magic Lantern Video (MLV) demuxer
|
||||
• Image format auto-detection
|
||||
• LRC lyric file demuxer and muxer
|
||||
• Samba protocol (via libsmbclient)
|
||||
• WebM DASH Manifest muxer
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavfilter │
|
||||
└────────────────────────────┘
|
||||
|
||||
• shuffleplanes filter
|
||||
• libbs2b-based stereo-to-binaural audio filter
|
||||
• showcqt multimedia filter
|
||||
• zoompan filter
|
||||
• signalstats filter
|
||||
• hqx filter (hq2x, hq3x, hq4x)
|
||||
• flanger filter
|
||||
• libfribidi support in drawtext
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ⚠ Behaviour changes │
|
||||
└────────────────────────────┘
|
||||
|
||||
• libx264 reference frames count is now limited depending on level chosen
|
||||
• Because of the new image format auto-detection feature, you don't need to
|
||||
specify image format when decoding an image with no extension.
|
3
arch.mak
3
arch.mak
@@ -1,6 +1,5 @@
|
||||
OBJS-$(HAVE_ARMV5TE) += $(ARMV5TE-OBJS) $(ARMV5TE-OBJS-yes)
|
||||
OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
|
||||
OBJS-$(HAVE_ARMV8) += $(ARMV8-OBJS) $(ARMV8-OBJS-yes)
|
||||
OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
|
||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||
|
||||
@@ -11,5 +10,7 @@ OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_VIS) += $(VIS-OBJS) $(VIS-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
|
||||
OBJS-$(HAVE_YASM) += $(YASM-OBJS) $(YASM-OBJS-yes)
|
||||
|
73
cmdutils.c
73
cmdutils.c
@@ -66,7 +66,6 @@ AVDictionary *swr_opts;
|
||||
AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||
|
||||
static FILE *report_file;
|
||||
static int report_file_level = AV_LOG_DEBUG;
|
||||
int hide_banner = 0;
|
||||
|
||||
void init_opts(void)
|
||||
@@ -105,10 +104,8 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
|
||||
av_log_default_callback(ptr, level, fmt, vl);
|
||||
av_log_format_line(ptr, level, fmt, vl2, line, sizeof(line), &print_prefix);
|
||||
va_end(vl2);
|
||||
if (report_file_level >= level) {
|
||||
fputs(line, report_file);
|
||||
fflush(report_file);
|
||||
}
|
||||
}
|
||||
|
||||
static void (*program_exit)(int ret);
|
||||
@@ -555,11 +552,6 @@ int opt_default(void *optctx, const char *opt, const char *arg)
|
||||
}
|
||||
consumed = 1;
|
||||
}
|
||||
#else
|
||||
if (!consumed && !strcmp(opt, "sws_flags")) {
|
||||
av_log(NULL, AV_LOG_WARNING, "Ignoring %s %s, due to disabled swscale\n", opt, arg);
|
||||
consumed = 1;
|
||||
}
|
||||
#endif
|
||||
#if CONFIG_SWRESAMPLE
|
||||
swr_class = swr_get_class();
|
||||
@@ -670,7 +662,7 @@ static void init_parse_context(OptionParseContext *octx,
|
||||
memset(octx, 0, sizeof(*octx));
|
||||
|
||||
octx->nb_groups = nb_groups;
|
||||
octx->groups = av_mallocz_array(octx->nb_groups, sizeof(*octx->groups));
|
||||
octx->groups = av_mallocz(sizeof(*octx->groups) * octx->nb_groups);
|
||||
if (!octx->groups)
|
||||
exit_program(1);
|
||||
|
||||
@@ -842,17 +834,10 @@ int opt_loglevel(void *optctx, const char *opt, const char *arg)
|
||||
};
|
||||
char *tail;
|
||||
int level;
|
||||
int flags;
|
||||
int i;
|
||||
|
||||
flags = av_log_get_flags();
|
||||
tail = strstr(arg, "repeat");
|
||||
if (tail)
|
||||
flags &= ~AV_LOG_SKIP_REPEATED;
|
||||
else
|
||||
flags |= AV_LOG_SKIP_REPEATED;
|
||||
|
||||
av_log_set_flags(flags);
|
||||
av_log_set_flags(tail ? 0 : AV_LOG_SKIP_REPEATED);
|
||||
if (tail == arg)
|
||||
arg += 6 + (arg[6]=='+');
|
||||
if(tail && !*arg)
|
||||
@@ -934,13 +919,6 @@ static int init_report(const char *env)
|
||||
av_free(filename_template);
|
||||
filename_template = val;
|
||||
val = NULL;
|
||||
} else if (!strcmp(key, "level")) {
|
||||
char *tail;
|
||||
report_file_level = strtol(val, &tail, 10);
|
||||
if (*tail) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid report file level\n");
|
||||
exit_program(1);
|
||||
}
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_ERROR, "Unknown key '%s' in FFREPORT\n", key);
|
||||
}
|
||||
@@ -1120,7 +1098,7 @@ void show_banner(int argc, char **argv, const OptionDef *options)
|
||||
int show_version(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
av_log_set_callback(log_callback_help);
|
||||
print_program_info (SHOW_COPYRIGHT, AV_LOG_INFO);
|
||||
print_program_info (0 , AV_LOG_INFO);
|
||||
print_all_libs_info(SHOW_VERSION, AV_LOG_INFO);
|
||||
|
||||
return 0;
|
||||
@@ -1208,29 +1186,16 @@ int show_license(void *optctx, const char *opt, const char *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int is_device(const AVClass *avclass)
|
||||
{
|
||||
if (!avclass)
|
||||
return 0;
|
||||
return avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_INPUT;
|
||||
}
|
||||
|
||||
static int show_formats_devices(void *optctx, const char *opt, const char *arg, int device_only)
|
||||
int show_formats(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
AVInputFormat *ifmt = NULL;
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
const char *last_name;
|
||||
int is_dev;
|
||||
|
||||
printf("%s\n"
|
||||
printf("File formats:\n"
|
||||
" D. = Demuxing supported\n"
|
||||
" .E = Muxing supported\n"
|
||||
" --\n", device_only ? "Devices:" : "File formats:");
|
||||
" --\n");
|
||||
last_name = "000";
|
||||
for (;;) {
|
||||
int decode = 0;
|
||||
@@ -1239,9 +1204,6 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
|
||||
const char *long_name = NULL;
|
||||
|
||||
while ((ofmt = av_oformat_next(ofmt))) {
|
||||
is_dev = is_device(ofmt->priv_class);
|
||||
if (!is_dev && device_only)
|
||||
continue;
|
||||
if ((name == NULL || strcmp(ofmt->name, name) < 0) &&
|
||||
strcmp(ofmt->name, last_name) > 0) {
|
||||
name = ofmt->name;
|
||||
@@ -1250,9 +1212,6 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
|
||||
}
|
||||
}
|
||||
while ((ifmt = av_iformat_next(ifmt))) {
|
||||
is_dev = is_device(ifmt->priv_class);
|
||||
if (!is_dev && device_only)
|
||||
continue;
|
||||
if ((name == NULL || strcmp(ifmt->name, name) < 0) &&
|
||||
strcmp(ifmt->name, last_name) > 0) {
|
||||
name = ifmt->name;
|
||||
@@ -1275,16 +1234,6 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int show_formats(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
return show_formats_devices(optctx, opt, arg, 0);
|
||||
}
|
||||
|
||||
int show_devices(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
return show_formats_devices(optctx, opt, arg, 1);
|
||||
}
|
||||
|
||||
#define PRINT_CODEC_SUPPORTED(codec, field, type, list_name, term, get_name) \
|
||||
if (codec->field) { \
|
||||
const type *p = codec->field; \
|
||||
@@ -1429,9 +1378,6 @@ int show_codecs(void *optctx, const char *opt, const char *arg)
|
||||
const AVCodecDescriptor *desc = codecs[i];
|
||||
const AVCodec *codec = NULL;
|
||||
|
||||
if (strstr(desc->name, "_deprecated"))
|
||||
continue;
|
||||
|
||||
printf(" ");
|
||||
printf(avcodec_find_decoder(desc->id) ? "D" : ".");
|
||||
printf(avcodec_find_encoder(desc->id) ? "E" : ".");
|
||||
@@ -1857,7 +1803,7 @@ int read_yesno(void)
|
||||
|
||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
{
|
||||
int64_t ret;
|
||||
int ret;
|
||||
FILE *f = av_fopen_utf8(filename, "rb");
|
||||
|
||||
if (!f) {
|
||||
@@ -1996,8 +1942,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
|
||||
}
|
||||
|
||||
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
|
||||
!codec ||
|
||||
(codec->priv_class &&
|
||||
(codec && codec->priv_class &&
|
||||
av_opt_find(&codec->priv_class, t->key, NULL, flags,
|
||||
AV_OPT_SEARCH_FAKE_OBJ)))
|
||||
av_dict_set(&ret, t->key, t->value, 0);
|
||||
@@ -2020,7 +1965,7 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
|
||||
|
||||
if (!s->nb_streams)
|
||||
return NULL;
|
||||
opts = av_mallocz_array(s->nb_streams, sizeof(*opts));
|
||||
opts = av_mallocz(s->nb_streams * sizeof(*opts));
|
||||
if (!opts) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Could not alloc memory for stream options.\n");
|
||||
|
14
cmdutils.h
14
cmdutils.h
@@ -24,13 +24,12 @@
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libswscale/swscale.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#ifdef __MINGW32__
|
||||
#undef main /* We don't want SDL to override our main() */
|
||||
#endif
|
||||
|
||||
@@ -59,7 +58,7 @@ void register_exit(void (*cb)(int ret));
|
||||
/**
|
||||
* Wraps exit with a program-specific cleanup routine.
|
||||
*/
|
||||
void exit_program(int ret) av_noreturn;
|
||||
void exit_program(int ret);
|
||||
|
||||
/**
|
||||
* Initialize the cmdutils option system, in particular
|
||||
@@ -431,17 +430,10 @@ int show_license(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the formats supported by the
|
||||
* program (including devices).
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_formats(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the devices supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_devices(void *optctx, const char *opt, const char *arg);
|
||||
int show_formats(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the codecs supported by the
|
||||
|
@@ -6,7 +6,6 @@
|
||||
{ "version" , OPT_EXIT, {.func_arg = show_version}, "show version" },
|
||||
{ "buildconf" , OPT_EXIT, {.func_arg = show_buildconf}, "show build configuration" },
|
||||
{ "formats" , OPT_EXIT, {.func_arg = show_formats }, "show available formats" },
|
||||
{ "devices" , OPT_EXIT, {.func_arg = show_devices }, "show available devices" },
|
||||
{ "codecs" , OPT_EXIT, {.func_arg = show_codecs }, "show available codecs" },
|
||||
{ "decoders" , OPT_EXIT, {.func_arg = show_decoders }, "show available decoders" },
|
||||
{ "encoders" , OPT_EXIT, {.func_arg = show_encoders }, "show available encoders" },
|
||||
|
@@ -181,12 +181,12 @@ static int64_t run_opencl_bench(AVOpenCLExternalEnv *ext_opencl_env)
|
||||
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_int), &width);
|
||||
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_int), &height);
|
||||
|
||||
start = av_gettime_relative();
|
||||
start = av_gettime();
|
||||
for (i = 0; i < OPENCL_NB_ITER; i++)
|
||||
OCLCHECK(clEnqueueNDRangeKernel, ext_opencl_env->command_queue, kernel, 2, NULL,
|
||||
global_work_size_2d, local_work_size_2d, 0, NULL, NULL);
|
||||
clFinish(ext_opencl_env->command_queue);
|
||||
ret = (av_gettime_relative() - start)/OPENCL_NB_ITER;
|
||||
ret = (av_gettime() - start)/OPENCL_NB_ITER;
|
||||
end:
|
||||
if (kernel)
|
||||
clReleaseKernel(kernel);
|
||||
@@ -224,7 +224,7 @@ int opt_opencl_bench(void *optctx, const char *opt, const char *arg)
|
||||
av_log(NULL, AV_LOG_ERROR, "No OpenCL device detected!\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (!(devices = av_malloc_array(nb_devices, sizeof(OpenCLDeviceBenchmark)))) {
|
||||
if (!(devices = av_malloc(sizeof(OpenCLDeviceBenchmark) * nb_devices))) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not allocate buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
@@ -51,9 +51,6 @@ COMPILE_HOSTC = $(call COMPILE,HOSTCC)
|
||||
%.o: %.cpp
|
||||
$(COMPILE_CXX)
|
||||
|
||||
%.o: %.m
|
||||
$(COMPILE_C)
|
||||
|
||||
%.s: %.c
|
||||
$(CC) $(CPPFLAGS) $(CFLAGS) -S -o $@ $<
|
||||
|
||||
@@ -93,7 +90,7 @@ include $(SRC_PATH)/arch.mak
|
||||
|
||||
OBJS += $(OBJS-yes)
|
||||
SLIBOBJS += $(SLIBOBJS-yes)
|
||||
FFLIBS := $($(NAME)_FFLIBS) $(FFLIBS-yes) $(FFLIBS)
|
||||
FFLIBS := $(FFLIBS-yes) $(FFLIBS)
|
||||
TESTPROGS += $(TESTPROGS-yes)
|
||||
|
||||
LDLIBS = $(FFLIBS:%=%$(BUILDSUF))
|
||||
|
@@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Work around broken floating point limits on some systems.
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include_next <float.h>
|
||||
|
||||
#ifdef FLT_MAX
|
||||
#undef FLT_MAX
|
||||
#define FLT_MAX 3.40282346638528859812e+38F
|
||||
|
||||
#undef FLT_MIN
|
||||
#define FLT_MIN 1.17549435082228750797e-38F
|
||||
|
||||
#undef DBL_MAX
|
||||
#define DBL_MAX ((double)1.79769313486231570815e+308L)
|
||||
|
||||
#undef DBL_MIN
|
||||
#define DBL_MIN ((double)2.22507385850720138309e-308L)
|
||||
#endif
|
@@ -1,22 +0,0 @@
|
||||
/*
|
||||
* Work around broken floating point limits on some systems.
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include_next <limits.h>
|
||||
#include <float.h>
|
@@ -134,29 +134,28 @@ typedef struct win32_cond_t {
|
||||
volatile int is_broadcast;
|
||||
} win32_cond_t;
|
||||
|
||||
static int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
||||
static void pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
||||
{
|
||||
win32_cond_t *win32_cond = NULL;
|
||||
if (cond_init) {
|
||||
cond_init(cond);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* non native condition variables */
|
||||
win32_cond = av_mallocz(sizeof(win32_cond_t));
|
||||
if (!win32_cond)
|
||||
return ENOMEM;
|
||||
return;
|
||||
cond->ptr = win32_cond;
|
||||
win32_cond->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL);
|
||||
if (!win32_cond->semaphore)
|
||||
return ENOMEM;
|
||||
return;
|
||||
win32_cond->waiters_done = CreateEvent(NULL, TRUE, FALSE, NULL);
|
||||
if (!win32_cond->waiters_done)
|
||||
return ENOMEM;
|
||||
return;
|
||||
|
||||
pthread_mutex_init(&win32_cond->mtx_waiter_count, NULL);
|
||||
pthread_mutex_init(&win32_cond->mtx_broadcast, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pthread_cond_destroy(pthread_cond_t *cond)
|
||||
|
246
doc/APIchanges
246
doc/APIchanges
@@ -15,208 +15,23 @@ libavutil: 2012-10-22
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
2014-07-14 - 62227a7 - lavf 55.47.100 - avformat.h
|
||||
Add av_stream_get_parser()
|
||||
|
||||
2014-07-09 - c67690f / a54f03b - lavu 52.92.100 / 53.18.0 - display.h
|
||||
Add av_display_matrix_flip() to flip the transformation matrix.
|
||||
|
||||
2014-07-09 - 1b58f13 / f6ee61f - lavc 55.69.100 / 55.56.0 - dv_profile.h
|
||||
Add a public API for DV profile handling.
|
||||
|
||||
2014-06-20 - 0dceefc / 9e500ef - lavu 52.90.100 / 53.17.0 - imgutils.h
|
||||
Add av_image_check_sar().
|
||||
|
||||
2014-06-20 - 4a99333 / 874390e - lavc 55.68.100 / 55.55.0 - avcodec.h
|
||||
Add av_packet_rescale_ts() to simplify timestamp conversion.
|
||||
|
||||
2014-06-18 - ac293b6 / 194be1f - lavf 55.44.100 / 55.20.0 - avformat.h
|
||||
The proper way for providing a hint about the desired timebase to the muxers
|
||||
is now setting AVStream.time_base, instead of AVStream.codec.time_base as was
|
||||
done previously. The old method is now deprecated.
|
||||
|
||||
2014-06-11 - 67d29da - lavc 55.66.101 - avcodec.h
|
||||
Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
|
||||
it
|
||||
|
||||
2014-06-10 - xxxxxxx - lavf 55.43.100 - avformat.h
|
||||
New field int64_t max_analyze_duration2 instead of deprecated
|
||||
int max_analyze_duration.
|
||||
|
||||
2014-05-30 - 00759d7 - lavu 52.89.100 - opt.h
|
||||
Add av_opt_copy()
|
||||
|
||||
2014-06-01 - 03bb99a / 0957b27 - lavc 55.66.100 / 55.54.0 - avcodec.h
|
||||
Add AVCodecContext.side_data_only_packets to allow encoders to output packets
|
||||
with only side data. This option may become mandatory in the future, so all
|
||||
users are recommended to update their code and enable this option.
|
||||
|
||||
2014-06-01 - 6e8e9f1 / 8c02adc - lavu 52.88.100 / 53.16.0 - frame.h, pixfmt.h
|
||||
Move all color-related enums (AVColorPrimaries, AVColorSpace, AVColorRange,
|
||||
AVColorTransferCharacteristic, and AVChromaLocation) inside lavu.
|
||||
And add AVFrame fields for them.
|
||||
|
||||
2014-05-29 - bdb2e80 / b2d4565 - lavr 1.3.0 - avresample.h
|
||||
Add avresample_max_output_samples
|
||||
|
||||
2014-05-24 - d858ee7 / 6d21259 - lavf 55.42.100 / 55.19.0 - avformat.h
|
||||
Add strict_std_compliance and related AVOptions to support experimental
|
||||
muxing.
|
||||
|
||||
2014-05-26 - xxxxxxx - lavu 52.87.100 - threadmessage.h
|
||||
Add thread message queue API.
|
||||
|
||||
2014-05-26 - c37d179 - lavf 55.41.100 - avformat.h
|
||||
Add format_probesize to AVFormatContext.
|
||||
|
||||
2014-05-19 - 7d25af1 / c23c96b - lavf 55.39.100 / 55.18.0 - avformat.h
|
||||
Add av_stream_get_side_data() to access stream-level side data
|
||||
in the same way as av_packet_get_side_data().
|
||||
|
||||
2014-05-xx - xxxxxxx - lavu 52.86.100 - fifo.h
|
||||
Add av_fifo_alloc_array() function.
|
||||
|
||||
2014-05-19 - ef1d4ee / bddd8cb - lavu 52.85.100 / 53.15.0 - frame.h, display.h
|
||||
Add AV_FRAME_DATA_DISPLAYMATRIX for exporting frame-level
|
||||
spatial rendering on video frames for proper display.
|
||||
|
||||
2014-05-xx - xxxxxxx - lavc 55.64.100 / 55.53.0 - avcodec.h
|
||||
Add AV_PKT_DATA_DISPLAYMATRIX for exporting packet-level
|
||||
spatial rendering on video frames for proper display.
|
||||
|
||||
2014-05-19 - 999a99c / a312f71 - lavf 55.38.101 / 55.17.1 - avformat.h
|
||||
Deprecate AVStream.pts and the AVFrac struct, which was its only use case.
|
||||
See use av_stream_get_end_pts()
|
||||
|
||||
2014-05-18 - 68c0518 / fd05602 - lavc 55.63.100 / 55.52.0 - avcodec.h
|
||||
Add avcodec_free_context(). From now on it should be used for freeing
|
||||
AVCodecContext.
|
||||
|
||||
2014-05-17 - 0eec06e - lavu 52.84.100 - time.h
|
||||
Add av_gettime_relative() av_gettime_relative_is_monotonic()
|
||||
|
||||
2014-05-15 - eacf7d6 / 0c1959b - lavf 55.38.100 / 55.17.0 - avformat.h
|
||||
Add AVMFT_FLAG_BITEXACT flag. Muxers now use it instead of checking
|
||||
CODEC_FLAG_BITEXACT on the first stream.
|
||||
|
||||
2014-05-15 - 96cb4c8 - lswr 0.19.100 - swresample.h
|
||||
Add swr_close()
|
||||
|
||||
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
||||
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
||||
|
||||
2014-05-xx - xxxxxxx - lavu 52.82.100 - fifo.h
|
||||
Add av_fifo_freep() function.
|
||||
|
||||
2014-05-02 - ba52fb11 - lavu 52.81.100 - opt.h
|
||||
Add av_opt_set_dict2() function.
|
||||
|
||||
2014-05-01 - e77b985 / a2941c8 - lavc 55.60.103 / 55.50.3 - avcodec.h
|
||||
Deprecate CODEC_FLAG_MV0. It is replaced by the flag "mv0" in the
|
||||
"mpv_flags" private option of the mpegvideo encoders.
|
||||
|
||||
2014-05-01 - e40ae8c / 6484149 - lavc 55.60.102 / 55.50.2 - avcodec.h
|
||||
Deprecate CODEC_FLAG_GMC. It is replaced by the "gmc" private option of the
|
||||
libxvid encoder.
|
||||
|
||||
2014-05-01 - 1851643 / b2c3171 - lavc 55.60.101 / 55.50.1 - avcodec.h
|
||||
Deprecate CODEC_FLAG_NORMALIZE_AQP. It is replaced by the flag "naq" in the
|
||||
"mpv_flags" private option of the mpegvideo encoders.
|
||||
|
||||
2014-05-01 - cac07d0 / 5fcceda - avcodec.h
|
||||
Deprecate CODEC_FLAG_INPUT_PRESERVED. Its functionality is replaced by passing
|
||||
reference-counted frames to encoders.
|
||||
|
||||
2014-04-29 - 1bf6396 - lavc 55.60.100 - avcodec.h
|
||||
Add AVCodecDescriptor.mime_types field.
|
||||
|
||||
2014-04-29 - xxxxxxx - lavu 52.80.0 - hash.h
|
||||
Add av_hash_final_bin(), av_hash_final_hex() and av_hash_final_b64().
|
||||
|
||||
2014-03-07 - 8b2a130 - lavc 55.50.0 / 55.53.100 - dxva2.h
|
||||
Add FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO for old Intel GPUs.
|
||||
|
||||
2014-04-22 - 502512e /dac7e8a - lavu 53.13.0 / 52.78.100 - avutil.h
|
||||
Add av_get_time_base_q().
|
||||
|
||||
2014-04-17 - a8d01a7 / 0983d48 - lavu 53.12.0 / 52.77.100 - crc.h
|
||||
Add AV_CRC_16_ANSI_LE crc variant.
|
||||
|
||||
2014-04-XX - xxxxxxx - lavf xx.xx.1xx - avformat.h
|
||||
Add av_format_inject_global_side_data()
|
||||
|
||||
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
||||
Add av_log_get_flags()
|
||||
|
||||
2014-04-11 - 6db42a2b - lavd 55.12.100 - avdevice.h
|
||||
Add avdevice_capabilities_create() function.
|
||||
Add avdevice_capabilities_free() function.
|
||||
|
||||
2014-04-07 - 0a1cc04 / 8b17243 - lavu 52.75.100 / 53.11.0 - pixfmt.h
|
||||
Add AV_PIX_FMT_YVYU422 pixel format.
|
||||
|
||||
2014-04-04 - c1d0536 / 8542f9c - lavu 52.74.100 / 53.10.0 - replaygain.h
|
||||
Full scale for peak values is now 100000 (instead of UINT32_MAX) and values
|
||||
may overflow.
|
||||
|
||||
2014-04-03 - c16e006 / 7763118 - lavu 52.73.100 / 53.9.0 - log.h
|
||||
Add AV_LOG(c) macro to have 256 color debug messages.
|
||||
|
||||
2014-04-03 - eaed4da9 - lavu 52.72.100 - opt.h
|
||||
Add AV_OPT_MULTI_COMPONENT_RANGE define to allow return
|
||||
multi-component option ranges.
|
||||
|
||||
2014-03-29 - cd50a44b - lavu 52.70.100 - mem.h
|
||||
Add av_dynarray_add_nofree() function.
|
||||
|
||||
2014-02-24 - 3e1f241 / d161ae0 - lavu 52.69.100 / 53.8.0 - frame.h
|
||||
Add av_frame_remove_side_data() for removing a single side data
|
||||
instance from a frame.
|
||||
|
||||
2014-03-24 - 83e8978 / 5a7e35d - lavu 52.68.100 / 53.7.0 - frame.h, replaygain.h
|
||||
Add AV_FRAME_DATA_REPLAYGAIN for exporting replaygain tags.
|
||||
Add a new header replaygain.h with the AVReplayGain struct.
|
||||
|
||||
2014-03-24 - 83e8978 / 5a7e35d - lavc 55.54.100 / 55.36.0 - avcodec.h
|
||||
Add AV_PKT_DATA_REPLAYGAIN for exporting replaygain tags.
|
||||
|
||||
2014-03-24 - 595ba3b / 25b3258 - lavf 55.35.100 / 55.13.0 - avformat.h
|
||||
Add AVStream.side_data and AVStream.nb_side_data for exporting stream-global
|
||||
side data (e.g. replaygain tags, video rotation)
|
||||
|
||||
2014-03-24 - bd34e26 / 0e2c3ee - lavc 55.53.100 / 55.35.0 - avcodec.h
|
||||
Give the name AVPacketSideData to the previously anonymous struct used for
|
||||
AVPacket.side_data.
|
||||
|
||||
2014-03-18 - 37c07d4 - lsws 2.5.102
|
||||
2014-03-18 - e9c8a9a - lsws 2.5.102
|
||||
Make gray16 full-scale.
|
||||
|
||||
2014-03-16 - 6b1ca17 / 1481d24 - lavu 52.67.100 / 53.6.0 - pixfmt.h
|
||||
Add RGBA64_LIBAV pixel format and variants for compatibility
|
||||
|
||||
2014-03-11 - 3f3229c - lavf 55.34.101 - avformat.h
|
||||
Set AVFormatContext.start_time_realtime when demuxing.
|
||||
|
||||
2014-03-03 - 06fed440 - lavd 55.11.100 - avdevice.h
|
||||
Add av_input_audio_device_next().
|
||||
Add av_input_video_device_next().
|
||||
Add av_output_audio_device_next().
|
||||
Add av_output_video_device_next().
|
||||
|
||||
2014-02-24 - fff5262 / 1155fd0 - lavu 52.66.100 / 53.5.0 - frame.h
|
||||
2014-xx-xx - xxxxxxx - lavu 53.05.0 - frame.h
|
||||
Add av_frame_copy() for copying the frame data.
|
||||
|
||||
2014-02-24 - a66be60 - lswr 0.18.100 - swresample.h
|
||||
2014-02-xx - xxxxxxx - lswr 0.18.100 - swresample.h
|
||||
Add swr_is_initialized() for checking whether a resample context is initialized.
|
||||
|
||||
2014-02-22 - 5367c0b / 7e86c27 - lavr 1.2.0 - avresample.h
|
||||
2014-02-xx - xxxxxxx - lavr 1.2.0 - avresample.h
|
||||
Add avresample_is_open() for checking whether a resample context is open.
|
||||
|
||||
2014-02-19 - 6a24d77 / c3ecd96 - lavu 52.65.100 / 53.4.0 - opt.h
|
||||
2014-xx-xx - xxxxxxx - lavu 53.04.0 - opt.h
|
||||
Add AV_OPT_FLAG_EXPORT and AV_OPT_FLAG_READONLY to mark options meant (only)
|
||||
for reading.
|
||||
|
||||
2014-02-19 - f4c8d00 / 6bb8720 - lavu 52.64.101 / 53.3.1 - opt.h
|
||||
2014-xx-xx - xxxxxxx - lavu 53.03.01 - opt.h
|
||||
Deprecate unused AV_OPT_FLAG_METADATA.
|
||||
|
||||
2014-02-xx - xxxxxxx - lavd 55.10.100 - avdevice.h
|
||||
@@ -226,7 +41,7 @@ API changes, most recent first:
|
||||
Add avio_find_protocol_name() to find out the name of the protocol that would
|
||||
be selected for a given URL.
|
||||
|
||||
2014-02-15 - a2bc6c1 / c98f316 - lavu 52.64.100 / 53.3.0 - frame.h
|
||||
2014-02-xx - xxxxxxx - lavu 53.3.0 - frame.h
|
||||
Add AV_FRAME_DATA_DOWNMIX_INFO value to the AVFrameSideDataType enum and
|
||||
downmix_info.h API, which identify downmix-related metadata.
|
||||
|
||||
@@ -237,7 +52,7 @@ API changes, most recent first:
|
||||
Add AVFormatContext.max_interleave_delta for controlling amount of buffering
|
||||
when interleaving.
|
||||
|
||||
2014-02-02 - 5871ee5 - lavf 55.29.100 - avformat.h
|
||||
2014-02-02 - xxxxxxx - lavf 55.29.100 - avformat.h
|
||||
Add output_ts_offset muxing option to AVFormatContext.
|
||||
|
||||
2014-01-27 - 102bd64 - lavd 55.7.100 - avdevice.h
|
||||
@@ -257,7 +72,7 @@ API changes, most recent first:
|
||||
(i.e. as if the CODEC_FLAG_EMU_EDGE flag was always on). Deprecate
|
||||
CODEC_FLAG_EMU_EDGE and avcodec_get_edge_width().
|
||||
|
||||
2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h
|
||||
2014-01-19 - xxxxxxx - lavf 55.25.100 - avformat.h
|
||||
Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
|
||||
|
||||
2014-01-19 - xxxxxxx - lavu 52.63.100 - rational.h
|
||||
@@ -270,16 +85,16 @@ API changes, most recent first:
|
||||
2014-01-05 - 751385f / 5c437fb - lavu 52.61.100 / 53.1.0 - channel_layout.h
|
||||
Add values for various Dolby flags to the AVMatrixEncoding enum.
|
||||
|
||||
2014-01-04 - b317f94 - lavu 52.60.100 - mathematics.h
|
||||
2014-01-04 - xxxxxxx - lavu 52.60.100 - mathematics.h
|
||||
Add av_add_stable() function.
|
||||
|
||||
2013-12-22 - 911676c - lavu 52.59.100 - avstring.h
|
||||
2013-12-22 - xxxxxxx - lavu 52.59.100 - avstring.h
|
||||
Add av_strnlen() function.
|
||||
|
||||
2013-12-09 - 64f73ac - lavu 52.57.100 - opencl.h
|
||||
2013-12-xx - xxxxxxx - lavu 52.57.100 - opencl.h
|
||||
Add av_opencl_benchmark() function.
|
||||
|
||||
2013-11-30 - 82b2e9c - lavu 52.56.100 - ffversion.h
|
||||
2013-11-xx - xxxxxxx - lavu 52.56.100 - ffversion.h
|
||||
Moves version.h to libavutil/ffversion.h.
|
||||
Install ffversion.h and make it public.
|
||||
|
||||
@@ -296,13 +111,13 @@ API changes, most recent first:
|
||||
Add AV_FRAME_DATA_A53_CC value to the AVFrameSideDataType enum, which
|
||||
identifies ATSC A53 Part 4 Closed Captions data.
|
||||
|
||||
2013-11-22 - 6859065 - lavu 52.54.100 - avstring.h
|
||||
2013-11-XX - xxxxxxx - lavu 52.54.100 - avstring.h
|
||||
Add av_utf8_decode() function.
|
||||
|
||||
2013-11-22 - fb7d70c - lavc 55.44.100 - avcodec.h
|
||||
Add HEVC profiles
|
||||
|
||||
2013-11-20 - c28b61c - lavc 55.44.100 - avcodec.h
|
||||
2013-11-xx - xxxxxxx - lavc 55.44.100 - avcodec.h
|
||||
Add av_packet_{un,}pack_dictionary()
|
||||
Add AV_PKT_METADATA_UPDATE side data type, used to transmit key/value
|
||||
strings between a stream and the application.
|
||||
@@ -314,7 +129,7 @@ API changes, most recent first:
|
||||
Deprecate AVCodecContext.error_rate, it is replaced by the 'error_rate'
|
||||
private option of the mpegvideo encoder family.
|
||||
|
||||
2013-11-14 - 31c09b7 / 728c465 - lavc 55.42.100 / 55.26.0 - vdpau.h
|
||||
2013-11-14 - 31c09b7 / 728c465 - lavc 55.26.0 - vdpau.h
|
||||
Add av_vdpau_get_profile().
|
||||
Add av_vdpau_alloc_context(). This function must from now on be
|
||||
used for allocating AVVDPAUContext.
|
||||
@@ -324,29 +139,29 @@ API changes, most recent first:
|
||||
Add ITU-R BT.2020 and other not yet included values to color primaries,
|
||||
transfer characteristics and colorspaces.
|
||||
|
||||
2013-11-04 - 85cabf1 - lavu 52.50.100 - avutil.h
|
||||
2013-11-04 - xxxxxxx - lavu 52.50.100 - avutil.h
|
||||
Add av_fopen_utf8()
|
||||
|
||||
2013-10-31 - 78265fc / 28096e0 - lavu 52.49.100 / 52.17.0 - frame.h
|
||||
Add AVFrame.flags and AV_FRAME_FLAG_CORRUPT.
|
||||
|
||||
2013-10-27 - dbe6f9f - lavc 55.39.100 - avcodec.h
|
||||
2013-10-27 - xxxxxxx - lavc 55.39.100 - avcodec.h
|
||||
Add CODEC_CAP_DELAY support to avcodec_decode_subtitle2.
|
||||
|
||||
2013-10-27 - d61617a - lavu 52.48.100 - parseutils.h
|
||||
2013-10-27 - xxxxxxx - lavu 52.48.100 - parseutils.h
|
||||
Add av_get_known_color_name().
|
||||
|
||||
2013-10-17 - 8696e51 - lavu 52.47.100 - opt.h
|
||||
2013-10-17 - xxxxxxx - lavu 52.47.100 - opt.h
|
||||
Add AV_OPT_TYPE_CHANNEL_LAYOUT and channel layout option handlers
|
||||
av_opt_get_channel_layout() and av_opt_set_channel_layout().
|
||||
|
||||
2013-10-06 - ccf96f8 -libswscale 2.5.101 - options.c
|
||||
2013-10-xx - xxxxxxx -libswscale 2.5.101 - options.c
|
||||
Change default scaler to bicubic
|
||||
|
||||
2013-10-03 - e57dba0 - lavc 55.34.100 - avcodec.h
|
||||
2013-10-03 - xxxxxxx - lavc 55.34.100 - avcodec.h
|
||||
Add av_codec_get_max_lowres()
|
||||
|
||||
2013-10-02 - 5082fcc - lavf 55.19.100 - avformat.h
|
||||
2013-10-02 - xxxxxxx - lavf 55.19.100 - avformat.h
|
||||
Add audio/video/subtitle AVCodec fields to AVFormatContext to force specific
|
||||
decoders
|
||||
|
||||
@@ -364,7 +179,7 @@ API changes, most recent first:
|
||||
2013-09-04 - 3e1f507 - lavc 55.31.101 - avcodec.h
|
||||
avcodec_close() argument can be NULL.
|
||||
|
||||
2013-09-04 - 36cd017a - lavf 55.16.101 - avformat.h
|
||||
2013-09-04 - 36cd017 - lavf 55.16.101 - avformat.h
|
||||
avformat_close_input() argument can be NULL and point on NULL.
|
||||
|
||||
2013-08-29 - e31db62 - lavf 55.15.100 - avformat.h
|
||||
@@ -373,10 +188,10 @@ API changes, most recent first:
|
||||
2013-08-15 - 1e0e193 - lsws 2.5.100 -
|
||||
Add a sws_dither AVOption, allowing to set the dither algorithm used
|
||||
|
||||
2013-08-11 - d404fe35 - lavc 55.27.100 - vdpau.h
|
||||
2013-08-xx - xxxxxxx - lavc 55.27.100 - vdpau.h
|
||||
Add a render2 alternative to the render callback function.
|
||||
|
||||
2013-08-11 - af05edc - lavc 55.26.100 - vdpau.h
|
||||
2013-08-xx - xxxxxxx - lavc 55.26.100 - vdpau.h
|
||||
Add allocation function for AVVDPAUContext, allowing
|
||||
to extend it in the future without breaking ABI/API.
|
||||
|
||||
@@ -386,7 +201,7 @@ API changes, most recent first:
|
||||
|
||||
2013-08-05 - 9547e3e / f824535 - lavc 55.22.100 / 55.13.0 - avcodec.h
|
||||
Deprecate the bitstream-related members from struct AVVDPAUContext.
|
||||
The bitstream buffers no longer need to be explicitly freed.
|
||||
The bistream buffers no longer need to be explicitly freed.
|
||||
|
||||
2013-08-05 - 3b805dc / 549294f - lavc 55.21.100 / 55.12.0 - avcodec.h
|
||||
Deprecate the CODEC_CAP_HWACCEL_VDPAU codec capability. Use CODEC_CAP_HWACCEL
|
||||
@@ -977,9 +792,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
Add avformat_close_input().
|
||||
Deprecate av_close_input_file() and av_close_input_stream().
|
||||
|
||||
2011-12-09 - c59b80c / b2890f5 - lavu 51.32.0 / 51.20.0 - audioconvert.h
|
||||
Expand the channel layout list.
|
||||
|
||||
2011-12-02 - e4de716 / 0eea212 - lavc 53.40.0 / 53.25.0
|
||||
Add nb_samples and extended_data fields to AVFrame.
|
||||
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
|
||||
@@ -993,10 +805,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
Change AVCodecContext.error[4] to [8] at next major bump.
|
||||
Add AV_NUM_DATA_POINTERS to simplify the bump transition.
|
||||
|
||||
2011-11-24 - lavu 51.29.0 / 51.19.0
|
||||
92afb43 / bd97b2e - add planar RGB pixel formats
|
||||
92afb43 / 6b0768e - add PIX_FMT_PLANAR and PIX_FMT_RGB pixel descriptions
|
||||
|
||||
2011-11-23 - 8e576d5 / bbb46f3 - lavu 51.27.0 / 51.18.0
|
||||
Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and
|
||||
av_samples_alloc(), to samplefmt.h.
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.3.6
|
||||
PROJECT_NUMBER = 2.2.3
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
@@ -759,7 +759,7 @@ ALPHABETICAL_INDEX = YES
|
||||
# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
|
||||
# in which this list will be split (can be a number in the range [1..20])
|
||||
|
||||
COLS_IN_ALPHA_INDEX = 5
|
||||
COLS_IN_ALPHA_INDEX = 2
|
||||
|
||||
# In case all classes in a project start with a common prefix, all
|
||||
# classes will be put under the same header in the alphabetical index.
|
||||
@@ -1056,7 +1056,7 @@ FORMULA_TRANSPARENT = YES
|
||||
# typically be disabled. For large projects the javascript based search engine
|
||||
# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
|
||||
|
||||
SEARCHENGINE = YES
|
||||
SEARCHENGINE = NO
|
||||
|
||||
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
|
||||
# implemented using a PHP enabled web server instead of at the web client
|
||||
@@ -1359,8 +1359,6 @@ PREDEFINED = "__attribute__(x)=" \
|
||||
"DECLARE_ALIGNED(a,t,n)=t n" \
|
||||
"offsetof(x,y)=0x42" \
|
||||
av_alloc_size \
|
||||
AV_GCC_VERSION_AT_LEAST(x,y)=1 \
|
||||
__GNUC__=1 \
|
||||
|
||||
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
|
||||
# this tag can be used to specify a list of macro names that should be expanded.
|
||||
|
@@ -48,7 +48,6 @@ DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
DOC_EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac
|
||||
DOC_EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE) += transcoding
|
||||
ALL_DOC_EXAMPLES_LIST = $(DOC_EXAMPLES-) $(DOC_EXAMPLES-yes)
|
||||
|
||||
DOC_EXAMPLES := $(DOC_EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
@@ -111,9 +110,8 @@ OBJDIRS += doc/examples
|
||||
|
||||
DOXY_INPUT = $(addprefix $(SRC_PATH)/, $(INSTHEADERS) $(DOC_EXAMPLES:%$(EXESUF)=%.c) $(LIB_EXAMPLES:%$(EXESUF)=%.c))
|
||||
|
||||
doc/doxy/html: TAG = DOXY
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT)
|
||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $< $(DOXYGEN) $(DOXY_INPUT)
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(DOXY_INPUT)
|
||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $< $(DOXY_INPUT)
|
||||
|
||||
install-doc: install-html install-man
|
||||
|
||||
|
16
doc/RELEASE_NOTES
Normal file
16
doc/RELEASE_NOTES
Normal file
@@ -0,0 +1,16 @@
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 2.2 "Muybridge" March, 2014
|
||||
|
||||
|
||||
General notes
|
||||
-------------
|
||||
See the Changelog file for a list of significant changes. Note, there
|
||||
are many more new features and bugfixes than whats listed there.
|
||||
|
||||
Bugreports against FFmpeg git master or the most recent FFmpeg release are
|
||||
accepted. If you are experiencing issues with any formally released version of
|
||||
FFmpeg, please try git master to check if the issue still exists. If it does,
|
||||
make your report against the development code following the usual bug reporting
|
||||
guidelines.
|
@@ -74,18 +74,7 @@ format with @command{ffmpeg}, you can use the command:
|
||||
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
|
||||
@end example
|
||||
|
||||
@section imxdump
|
||||
|
||||
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
|
||||
Pro decoder. This filter only applies to the mpeg2video codec, and is
|
||||
likely not needed for Final Cut Pro 7 and newer with the appropriate
|
||||
@option{-tag:v}.
|
||||
|
||||
For example, to remux 30 MB/sec NTSC IMX to MOV:
|
||||
|
||||
@example
|
||||
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
|
||||
@end example
|
||||
@section imx_dump_header
|
||||
|
||||
@section mjpeg2jpeg
|
||||
|
||||
@@ -132,13 +121,6 @@ ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
|
||||
@section noise
|
||||
|
||||
Damages the contents of packets without damaging the container. Can be
|
||||
used for fuzzing or testing error resilience/concealment.
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf noise output.mkv
|
||||
@end example
|
||||
|
||||
@section remove_extra
|
||||
|
||||
@c man end BITSTREAM FILTERS
|
||||
|
@@ -25,9 +25,6 @@ fate-list
|
||||
install
|
||||
Install headers, libraries and programs.
|
||||
|
||||
examples
|
||||
Build all examples located in doc/examples.
|
||||
|
||||
libavformat/output-example
|
||||
Build the libavformat basic example.
|
||||
|
||||
@@ -37,9 +34,6 @@ libavcodec/api-example
|
||||
libswscale/swscale-test
|
||||
Build the swscale self-test (useful also as example).
|
||||
|
||||
config
|
||||
Reconfigure the project with current configuration.
|
||||
|
||||
|
||||
Useful standard make commands:
|
||||
make -t <target>
|
||||
|
@@ -285,11 +285,6 @@ detect bitstream specification deviations
|
||||
detect improper bitstream length
|
||||
@item explode
|
||||
abort decoding on minor error detection
|
||||
@item ignore_err
|
||||
ignore decoding errors, and continue decoding.
|
||||
This is useful if you want to analyze the content of a video and thus want
|
||||
everything to be decoded no matter what. This option will not result in a video
|
||||
that is pleasing to watch in case of errors.
|
||||
@item careful
|
||||
consider things that violate the spec and have not been seen in the wild as errors
|
||||
@item compliant
|
||||
@@ -394,9 +389,6 @@ Possible values:
|
||||
|
||||
@item simplemmx
|
||||
|
||||
@item simpleauto
|
||||
Automatically pick a IDCT compatible with the simple one
|
||||
|
||||
@item arm
|
||||
|
||||
@item altivec
|
||||
@@ -432,8 +424,6 @@ Possible values:
|
||||
iterative motion vector (MV) search (slow)
|
||||
@item deblock
|
||||
use strong deblock filter for damaged MBs
|
||||
@item favor_inter
|
||||
favor predicting from the previous frame instead of the current
|
||||
@end table
|
||||
|
||||
@item bits_per_coded_sample @var{integer}
|
||||
@@ -889,7 +879,7 @@ Set frame skip factor.
|
||||
Set frame skip exponent.
|
||||
Negative values behave identical to the corresponding positive ones, except
|
||||
that the score is normalized.
|
||||
Positive values exist primarily for compatibility reasons and are not so useful.
|
||||
Positive values exist primarly for compatibility reasons and are not so useful.
|
||||
|
||||
@item skipcmp @var{integer} (@emph{encoding,video})
|
||||
Set frame skip compare function.
|
||||
@@ -1035,26 +1025,15 @@ Set the log level offset.
|
||||
Number of slices, used in parallelized encoding.
|
||||
|
||||
@item thread_type @var{flags} (@emph{decoding/encoding,video})
|
||||
Select which multithreading methods to use.
|
||||
|
||||
Use of @samp{frame} will increase decoding delay by one frame per
|
||||
thread, so clients which cannot provide future frames should not use
|
||||
it.
|
||||
Select multithreading type.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item slice
|
||||
Decode more than one part of a single frame at once.
|
||||
|
||||
Multithreading using slices works only when the video was encoded with
|
||||
slices.
|
||||
|
||||
@item frame
|
||||
Decode more than one frame at once.
|
||||
|
||||
@end table
|
||||
|
||||
Default value is @samp{slice+frame}.
|
||||
|
||||
@item audio_service_type @var{integer} (@emph{encoding,audio})
|
||||
Set audio service type.
|
||||
|
||||
|
@@ -74,7 +74,7 @@ following directive is recognized:
|
||||
Path to a file to read; special characters and spaces must be escaped with
|
||||
backslash or single quotes.
|
||||
|
||||
All subsequent file-related directives apply to that file.
|
||||
All subsequent directives apply to that file.
|
||||
|
||||
@item @code{ffconcat version 1.0}
|
||||
Identify the script type and version. It also sets the @option{safe} option
|
||||
@@ -92,22 +92,6 @@ file is not available or accurate.
|
||||
If the duration is set for all files, then it is possible to seek in the
|
||||
whole concatenated video.
|
||||
|
||||
@item @code{stream}
|
||||
Introduce a stream in the virtual file.
|
||||
All subsequent stream-related directives apply to the last introduced
|
||||
stream.
|
||||
Some streams properties must be set in order to allow identifying the
|
||||
matching streams in the subfiles.
|
||||
If no streams are defined in the script, the streams from the first file are
|
||||
copied.
|
||||
|
||||
@item @code{exact_stream_id @var{id}}
|
||||
Set the id of the stream.
|
||||
If this directive is given, the string with the corresponding id in the
|
||||
subfiles will be used.
|
||||
This is especially useful for MPEG-PS (VOB) files, where the order of the
|
||||
streams is not reliable.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
@@ -128,14 +112,6 @@ If set to 0, any file name is accepted.
|
||||
The default is -1, it is equivalent to 1 if the format was automatically
|
||||
probed and 0 otherwise.
|
||||
|
||||
@item auto_convert
|
||||
If set to 1, try to perform automatic conversions on packet data to make the
|
||||
streams concatenable.
|
||||
|
||||
Currently, the only conversion is adding the h264_mp4toannexb bitstream
|
||||
filter to H.264 streams in MP4 format. This is necessary in particular if
|
||||
there are resolution changes.
|
||||
|
||||
@end table
|
||||
|
||||
@section flv
|
||||
@@ -273,8 +249,6 @@ is 5.
|
||||
If set to 1, will set frame timestamp to modification time of image file. Note
|
||||
that monotonity of timestamps is not provided: images go in the same order as
|
||||
without this option. Default value is 0.
|
||||
If set to 2, will set frame timestamp to the modification time of the image file in
|
||||
nanosecond precision.
|
||||
@item video_size
|
||||
Set the video size of the images to read. If not specified the video
|
||||
size is guessed from the first image file in the sequence.
|
||||
|
@@ -2,11 +2,10 @@
|
||||
|
||||
SRC_PATH="${1}"
|
||||
DOXYFILE="${2}"
|
||||
DOXYGEN="${3}"
|
||||
|
||||
shift 3
|
||||
shift 2
|
||||
|
||||
$DOXYGEN - <<EOF
|
||||
doxygen - <<EOF
|
||||
@INCLUDE = ${DOXYFILE}
|
||||
INPUT = $@
|
||||
EXAMPLE_PATH = ${SRC_PATH}/doc/examples
|
||||
|
@@ -80,7 +80,7 @@ thresholds with quantizer steps to find the appropriate quantization with
|
||||
distortion below threshold band by band.
|
||||
|
||||
The quality of this method is comparable to the two loop searching method
|
||||
described below, but somewhat a little better and slower.
|
||||
descibed below, but somewhat a little better and slower.
|
||||
|
||||
@item anmr
|
||||
Average noise to mask ratio (ANMR) trellis-based solution.
|
||||
@@ -1566,34 +1566,25 @@ kilobits/s.
|
||||
|
||||
@item g (@emph{keyint})
|
||||
|
||||
@item qmin (@emph{qpmin})
|
||||
Minimum quantizer scale.
|
||||
|
||||
@item qmax (@emph{qpmax})
|
||||
Maximum quantizer scale.
|
||||
|
||||
@item qmin (@emph{qpmin})
|
||||
|
||||
@item qdiff (@emph{qpstep})
|
||||
Maximum difference between quantizer scales.
|
||||
|
||||
@item qblur (@emph{qblur})
|
||||
Quantizer curve blur
|
||||
|
||||
@item qcomp (@emph{qcomp})
|
||||
Quantizer curve compression factor
|
||||
|
||||
@item refs (@emph{ref})
|
||||
Number of reference frames each P-frame can use. The range is from @var{0-16}.
|
||||
|
||||
@item sc_threshold (@emph{scenecut})
|
||||
Sets the threshold for the scene change detection.
|
||||
|
||||
@item trellis (@emph{trellis})
|
||||
Performs Trellis quantization to increase efficiency. Enabled by default.
|
||||
|
||||
@item nr (@emph{nr})
|
||||
|
||||
@item me_range (@emph{merange})
|
||||
Maximum range of the motion search in pixels.
|
||||
|
||||
@item me_method (@emph{me})
|
||||
Set motion estimation method. Possible values in the decreasing order
|
||||
@@ -1615,13 +1606,10 @@ Hadamard exhaustive search (slowest).
|
||||
@end table
|
||||
|
||||
@item subq (@emph{subme})
|
||||
Sub-pixel motion estimation method.
|
||||
|
||||
@item b_strategy (@emph{b-adapt})
|
||||
Adaptive B-frame placement decision algorithm. Use only on first-pass.
|
||||
|
||||
@item keyint_min (@emph{min-keyint})
|
||||
Minimum GOP size.
|
||||
|
||||
@item coder
|
||||
Set entropy encoder. Possible values:
|
||||
@@ -1648,7 +1636,6 @@ Ignore chroma in motion estimation. It generates the same effect as
|
||||
@end table
|
||||
|
||||
@item threads (@emph{threads})
|
||||
Number of encoding threads.
|
||||
|
||||
@item thread_type
|
||||
Set multithreading technique. Possible values:
|
||||
@@ -2042,30 +2029,6 @@ fastest.
|
||||
|
||||
@end table
|
||||
|
||||
@section mpeg2
|
||||
|
||||
MPEG-2 video encoder.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item seq_disp_ext @var{integer}
|
||||
Specifies if the encoder should write a sequence_display_extension to the
|
||||
output.
|
||||
@table @option
|
||||
@item -1
|
||||
@itemx auto
|
||||
Decide automatically to write it or not (this is the default) by checking if
|
||||
the data to be written is different from the default or unspecified values.
|
||||
@item 0
|
||||
@itemx never
|
||||
Never write it.
|
||||
@item 1
|
||||
@itemx always
|
||||
Always write it.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@section png
|
||||
|
||||
PNG image encoder.
|
||||
@@ -2147,27 +2110,3 @@ For the fastest encoding speed set the @option{qscale} parameter (4 is the
|
||||
recommended value) and do not set a size constraint.
|
||||
|
||||
@c man end VIDEO ENCODERS
|
||||
|
||||
@chapter Subtitles Encoders
|
||||
@c man begin SUBTITLES ENCODERS
|
||||
|
||||
@section dvdsub
|
||||
|
||||
This codec encodes the bitmap subtitle format that is used in DVDs.
|
||||
Typically they are stored in VOBSUB file pairs (*.idx + *.sub),
|
||||
and they can also be used in Matroska files.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item even_rows_fix
|
||||
When set to 1, enable a work-around that makes the number of pixel rows
|
||||
even in all subtitles. This fixes a problem with some players that
|
||||
cut off the bottom row if the number is odd. The work-around just adds
|
||||
a fully transparent row if needed. The overhead is low, typically
|
||||
one byte per subtitle on average.
|
||||
|
||||
By default, this work-around is disabled.
|
||||
@end table
|
||||
|
||||
@c man end SUBTITLES ENCODERS
|
||||
|
@@ -12,7 +12,7 @@ CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= avio_reading \
|
||||
decoding_encoding \
|
||||
avcodec \
|
||||
demuxing_decoding \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
@@ -22,7 +22,6 @@ EXAMPLES= avio_reading \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcoding \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
|
@@ -24,7 +24,7 @@
|
||||
* @file
|
||||
* libavcodec API use example.
|
||||
*
|
||||
* @example decoding_encoding.c
|
||||
* @example avcodec.c
|
||||
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
|
||||
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
|
||||
* format handling
|
||||
@@ -375,13 +375,7 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
c->height = 288;
|
||||
/* frames per second */
|
||||
c->time_base = (AVRational){1,25};
|
||||
/* emit one intra frame every ten frames
|
||||
* check frame pict_type before passing frame
|
||||
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
|
||||
* then gop_size is ignored and the output of encoder
|
||||
* will always be I frame irrespective to gop_size
|
||||
*/
|
||||
c->gop_size = 10;
|
||||
c->gop_size = 10; /* emit one intra frame every ten frames */
|
||||
c->max_b_frames = 1;
|
||||
c->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
@@ -640,7 +634,7 @@ int main(int argc, char **argv)
|
||||
"This program generates a synthetic stream and encodes it to a file\n"
|
||||
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
|
||||
"The encoded stream is then decoded and written to a raw data output.\n"
|
||||
"output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
|
||||
"output_type must be choosen between 'h264', 'mp2', 'mpg'.\n",
|
||||
argv[0]);
|
||||
return 1;
|
||||
}
|
@@ -279,7 +279,7 @@ int main (int argc, char **argv)
|
||||
audio_dec_ctx = audio_stream->codec;
|
||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||
if (!audio_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
@@ -34,8 +34,6 @@
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
@@ -43,26 +41,13 @@
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
static int audio_is_eof, video_is_eof;
|
||||
|
||||
#define STREAM_DURATION 10.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
#define SCALE_FLAGS SWS_BICUBIC
|
||||
|
||||
// a wrapper around a single output AVStream
|
||||
typedef struct OutputStream {
|
||||
AVStream *st;
|
||||
|
||||
/* pts of the next frame that will be generated */
|
||||
int64_t next_pts;
|
||||
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
} OutputStream;
|
||||
static int sws_flags = SWS_BICUBIC;
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
{
|
||||
@@ -78,7 +63,9 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
|
||||
{
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, *time_base, st->time_base);
|
||||
pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
@@ -87,11 +74,11 @@ static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AV
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
AVCodec **codec,
|
||||
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
@@ -101,13 +88,13 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, *codec);
|
||||
if (!ost->st) {
|
||||
st = avformat_new_stream(oc, *codec);
|
||||
if (!st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->st->id = oc->nb_streams-1;
|
||||
c = ost->st->codec;
|
||||
st->id = oc->nb_streams-1;
|
||||
c = st->codec;
|
||||
|
||||
switch ((*codec)->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -116,7 +103,6 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
c->channels = 2;
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
@@ -153,21 +139,41 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static float t, tincr, tincr2;
|
||||
|
||||
AVFrame *audio_frame;
|
||||
static uint8_t **src_samples_data;
|
||||
static int src_samples_linesize;
|
||||
static int src_nb_samples;
|
||||
|
||||
static int max_dst_nb_samples;
|
||||
uint8_t **dst_samples_data;
|
||||
int dst_samples_linesize;
|
||||
int dst_samples_size;
|
||||
int samples_count;
|
||||
|
||||
struct SwrContext *swr_ctx = NULL;
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int ret;
|
||||
|
||||
c = ost->st->codec;
|
||||
c = st->codec;
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
audio_frame = av_frame_alloc();
|
||||
if (!audio_frame) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open it */
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
@@ -177,32 +183,25 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
|
||||
}
|
||||
|
||||
/* init signal generator */
|
||||
ost->t = 0;
|
||||
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
/* increment frequency by 110 Hz per second */
|
||||
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
ost->frame = av_frame_alloc();
|
||||
if (!ost->frame)
|
||||
src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
|
||||
10000 : c->frame_size;
|
||||
|
||||
ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels,
|
||||
src_nb_samples, AV_SAMPLE_FMT_S16, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->frame->sample_rate = c->sample_rate;
|
||||
ost->frame->format = AV_SAMPLE_FMT_S16;
|
||||
ost->frame->channel_layout = c->channel_layout;
|
||||
|
||||
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
ost->frame->nb_samples = 10000;
|
||||
else
|
||||
ost->frame->nb_samples = c->frame_size;
|
||||
|
||||
ost->tmp_frame = av_frame_alloc();
|
||||
if (!ost->frame)
|
||||
exit(1);
|
||||
|
||||
ost->tmp_frame->sample_rate = c->sample_rate;
|
||||
ost->tmp_frame->format = c->sample_fmt;
|
||||
ost->tmp_frame->channel_layout = c->channel_layout;
|
||||
ost->tmp_frame->nb_samples = ost->frame->nb_samples;
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = src_nb_samples;
|
||||
|
||||
/* create resampler context */
|
||||
if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
|
||||
@@ -225,145 +224,127 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
ret = av_frame_get_buffer(ost->frame, 0);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
|
||||
max_dst_nb_samples, c->sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate an audio frame.\n");
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
exit(1);
|
||||
}
|
||||
ret = av_frame_get_buffer(ost->tmp_frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate an audio frame.\n");
|
||||
exit(1);
|
||||
} else {
|
||||
dst_samples_data = src_samples_data;
|
||||
}
|
||||
dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples,
|
||||
c->sample_fmt, 0);
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
|
||||
{
|
||||
int j, i, v, ret;
|
||||
int16_t *q = (int16_t*)ost->frame->data[0];
|
||||
int j, i, v;
|
||||
int16_t *q;
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
for (j = 0; j < ost->frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->st->codec->channels; i++)
|
||||
q = samples;
|
||||
for (j = 0; j < frame_size; j++) {
|
||||
v = (int)(sin(t) * 10000);
|
||||
for (i = 0; i < nb_channels; i++)
|
||||
*q++ = v;
|
||||
ost->t += ost->tincr;
|
||||
ost->tincr += ost->tincr2;
|
||||
t += tincr;
|
||||
tincr += tincr2;
|
||||
}
|
||||
|
||||
ost->frame->pts = ost->next_pts;
|
||||
ost->next_pts += ost->frame->nb_samples;
|
||||
|
||||
return ost->frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one audio frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
static void write_audio_frame(AVFormatContext *oc, AVStream *st, int flush)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int got_packet;
|
||||
int dst_nb_samples;
|
||||
int got_packet, ret, dst_nb_samples;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = ost->st->codec;
|
||||
c = st->codec;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
if (!flush) {
|
||||
get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
if (swr_ctx) {
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_free(dst_samples_data[0]);
|
||||
ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
|
||||
dst_nb_samples, c->sample_fmt, 0);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
|
||||
c->sample_fmt, 0);
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx,
|
||||
ost->tmp_frame->data, dst_nb_samples,
|
||||
(const uint8_t **)frame->data, frame->nb_samples);
|
||||
dst_samples_data, dst_nb_samples,
|
||||
(const uint8_t **)src_samples_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
frame = ost->tmp_frame;
|
||||
} else {
|
||||
dst_nb_samples = frame->nb_samples;
|
||||
dst_nb_samples = src_nb_samples;
|
||||
}
|
||||
|
||||
frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
audio_frame->nb_samples = dst_nb_samples;
|
||||
audio_frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt,
|
||||
dst_samples_data[0], dst_samples_size, 0);
|
||||
samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
ret = avcodec_encode_audio2(c, &pkt, flush ? NULL : audio_frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
if (!got_packet) {
|
||||
if (flush)
|
||||
audio_is_eof = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
ret = write_frame(oc, &c->time_base, st, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
static void close_audio(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
avcodec_close(st->codec);
|
||||
if (dst_samples_data != src_samples_data) {
|
||||
av_free(dst_samples_data[0]);
|
||||
av_free(dst_samples_data);
|
||||
}
|
||||
av_free(src_samples_data[0]);
|
||||
av_free(src_samples_data);
|
||||
av_frame_free(&audio_frame);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *picture;
|
||||
int ret;
|
||||
static AVFrame *frame;
|
||||
static AVPicture src_picture, dst_picture;
|
||||
static int frame_count;
|
||||
|
||||
picture = av_frame_alloc();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
|
||||
picture->format = pix_fmt;
|
||||
picture->width = width;
|
||||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(picture, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
AVCodecContext *c = st->codec;
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
@@ -373,38 +354,43 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!ost->frame) {
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
frame->format = c->pix_fmt;
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
/* Allocate the encoded raw picture. */
|
||||
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
ost->tmp_frame = NULL;
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!ost->tmp_frame) {
|
||||
fprintf(stderr, "Could not allocate temporary picture\n");
|
||||
ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate temporary picture: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* copy data and linesize picture pointers to frame */
|
||||
*((AVPicture *)frame) = dst_picture;
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index,
|
||||
static void fill_yuv_image(AVPicture *pict, int frame_index,
|
||||
int width, int height)
|
||||
{
|
||||
int x, y, i, ret;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(pict);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
int x, y, i;
|
||||
|
||||
i = frame_index;
|
||||
|
||||
@@ -422,89 +408,65 @@ static void fill_yuv_image(AVFrame *pict, int frame_index,
|
||||
}
|
||||
}
|
||||
|
||||
static AVFrame *get_video_frame(OutputStream *ost)
|
||||
static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
|
||||
{
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
int ret;
|
||||
static struct SwsContext *sws_ctx;
|
||||
AVCodecContext *c = st->codec;
|
||||
|
||||
if (!flush) {
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
if (!ost->sws_ctx) {
|
||||
ost->sws_ctx = sws_getContext(c->width, c->height,
|
||||
AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height,
|
||||
c->pix_fmt,
|
||||
SCALE_FLAGS, NULL, NULL, NULL);
|
||||
if (!ost->sws_ctx) {
|
||||
if (!sws_ctx) {
|
||||
sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height, c->pix_fmt,
|
||||
sws_flags, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Could not initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
|
||||
sws_scale(ost->sws_ctx,
|
||||
(const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
|
||||
0, c->height, ost->frame->data, ost->frame->linesize);
|
||||
fill_yuv_image(&src_picture, frame_count, c->width, c->height);
|
||||
sws_scale(sws_ctx,
|
||||
(const uint8_t * const *)src_picture.data, src_picture.linesize,
|
||||
0, c->height, dst_picture.data, dst_picture.linesize);
|
||||
} else {
|
||||
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
|
||||
fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
|
||||
}
|
||||
}
|
||||
|
||||
ost->frame->pts = ost->next_pts++;
|
||||
|
||||
return ost->frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one video frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int got_packet = 0;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
frame = get_video_frame(ost);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
/* a hack to avoid data copy with some raw video muxers */
|
||||
if (oc->oformat->flags & AVFMT_RAWPICTURE && !flush) {
|
||||
/* Raw video case - directly store the picture in the packet */
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
if (!frame)
|
||||
return 1;
|
||||
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = ost->st->index;
|
||||
pkt.data = (uint8_t *)frame;
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = dst_picture.data[0];
|
||||
pkt.size = sizeof(AVPicture);
|
||||
|
||||
pkt.pts = pkt.dts = frame->pts;
|
||||
av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
AVPacket pkt = { 0 };
|
||||
int got_packet;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
frame->pts = frame_count;
|
||||
ret = avcodec_encode_video2(c, &pkt, flush ? NULL : frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
/* If size is zero, it means the image was buffered. */
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
ret = write_frame(oc, &c->time_base, st, &pkt);
|
||||
} else {
|
||||
if (flush)
|
||||
video_is_eof = 1;
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
@@ -513,16 +475,15 @@ static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
frame_count++;
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
static void close_video(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
avcodec_close(ost->st->codec);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
avcodec_close(st->codec);
|
||||
av_free(src_picture.data[0]);
|
||||
av_free(dst_picture.data[0]);
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
@@ -530,14 +491,13 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||
const char *filename;
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
AVStream *audio_st, *video_st;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
double audio_time, video_time;
|
||||
int flush, ret;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
av_register_all();
|
||||
@@ -568,24 +528,20 @@ int main(int argc, char **argv)
|
||||
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
|
||||
have_video = 1;
|
||||
encode_video = 1;
|
||||
}
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
|
||||
have_audio = 1;
|
||||
encode_audio = 1;
|
||||
}
|
||||
video_st = NULL;
|
||||
audio_st = NULL;
|
||||
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE)
|
||||
video_st = add_stream(oc, &video_codec, fmt->video_codec);
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE)
|
||||
audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (have_video)
|
||||
open_video(oc, video_codec, &video_st);
|
||||
|
||||
if (have_audio)
|
||||
open_audio(oc, audio_codec, &audio_st);
|
||||
if (video_st)
|
||||
open_video(oc, video_codec, video_st);
|
||||
if (audio_st)
|
||||
open_audio(oc, audio_codec, audio_st);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
@@ -607,14 +563,23 @@ int main(int argc, char **argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (encode_video || encode_audio) {
|
||||
/* select the stream to encode */
|
||||
if (encode_video &&
|
||||
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
|
||||
audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
|
||||
encode_video = !write_video_frame(oc, &video_st);
|
||||
} else {
|
||||
encode_audio = !write_audio_frame(oc, &audio_st);
|
||||
flush = 0;
|
||||
while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) {
|
||||
/* Compute current audio and video time. */
|
||||
audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY;
|
||||
video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;
|
||||
|
||||
if (!flush &&
|
||||
(!audio_st || audio_time >= STREAM_DURATION) &&
|
||||
(!video_st || video_time >= STREAM_DURATION)) {
|
||||
flush = 1;
|
||||
}
|
||||
|
||||
/* write interleaved audio and video frames */
|
||||
if (audio_st && !audio_is_eof && audio_time <= video_time) {
|
||||
write_audio_frame(oc, audio_st, flush);
|
||||
} else if (video_st && !video_is_eof && video_time < audio_time) {
|
||||
write_video_frame(oc, video_st, flush);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -625,10 +590,10 @@ int main(int argc, char **argv)
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
if (have_video)
|
||||
close_stream(oc, &video_st);
|
||||
if (have_audio)
|
||||
close_stream(oc, &audio_st);
|
||||
if (video_st)
|
||||
close_video(oc, video_st);
|
||||
if (audio_st)
|
||||
close_audio(oc, audio_st);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
|
@@ -99,7 +99,6 @@ int main(int argc, char **argv)
|
||||
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
|
||||
goto end;
|
||||
}
|
||||
out_stream->codec->codec_tag = 0;
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
@@ -168,7 +168,7 @@ int main(int argc, char **argv)
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
|
||||
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_freep(&dst_data[0]);
|
||||
av_free(dst_data[0]);
|
||||
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 1);
|
||||
if (ret < 0)
|
||||
|
@@ -1,601 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2014 Andrey Utkin
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for demuxing, decoding, filtering, encoding and muxing
|
||||
* @example transcoding.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx;
|
||||
static AVFormatContext *ofmt_ctx;
|
||||
typedef struct FilteringContext {
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
} FilteringContext;
|
||||
static FilteringContext *filter_ctx;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ifmt_ctx = NULL;
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *stream;
|
||||
AVCodecContext *codec_ctx;
|
||||
stream = ifmt_ctx->streams[i];
|
||||
codec_ctx = stream->codec;
|
||||
/* Reencode video & audio and remux subtitles etc. */
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* Open decoder */
|
||||
ret = avcodec_open2(codec_ctx,
|
||||
avcodec_find_decoder(codec_ctx->codec_id), NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, filename, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_output_file(const char *filename)
|
||||
{
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream;
|
||||
AVCodecContext *dec_ctx, *enc_ctx;
|
||||
AVCodec *encoder;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ofmt_ctx = NULL;
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
|
||||
if (!ofmt_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
out_stream = avformat_new_stream(ofmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
in_stream = ifmt_ctx->streams[i];
|
||||
dec_ctx = in_stream->codec;
|
||||
enc_ctx = out_stream->codec;
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* In this example, we transcode to same properties (picture size,
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
* streams easily using filters */
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
enc_ctx->height = dec_ctx->height;
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->pix_fmt = encoder->pix_fmts[0];
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
enc_ctx->time_base = dec_ctx->time_base;
|
||||
} else {
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
enc_ctx->channel_layout = dec_ctx->channel_layout;
|
||||
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
/* Third parameter can be used to pass settings to encoder */
|
||||
ret = avcodec_open2(enc_ctx, encoder, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
/* if this stream must be remuxed */
|
||||
ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
|
||||
ifmt_ctx->streams[i]->codec);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, filename, 1);
|
||||
|
||||
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* init muxer, write output file header */
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
AVCodecContext *enc_ctx, const char *filter_spec)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *buffersrc = NULL;
|
||||
AVFilter *buffersink = NULL;
|
||||
AVFilterContext *buffersrc_ctx = NULL;
|
||||
AVFilterContext *buffersink_ctx = NULL;
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVFilterGraph *filter_graph = avfilter_graph_alloc();
|
||||
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
buffersrc = avfilter_get_by_name("buffer");
|
||||
buffersink = avfilter_get_by_name("buffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num,
|
||||
dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
|
||||
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
buffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout =
|
||||
av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt),
|
||||
dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
|
||||
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
|
||||
(uint8_t*)&enc_ctx->channel_layout,
|
||||
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
|
||||
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if (!outputs->name || !inputs->name) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Fill FilteringContext */
|
||||
fctx->buffersrc_ctx = buffersrc_ctx;
|
||||
fctx->buffersink_ctx = buffersink_ctx;
|
||||
fctx->filter_graph = filter_graph;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_filters(void)
|
||||
{
|
||||
const char *filter_spec;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
|
||||
if (!filter_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
filter_ctx[i].buffersrc_ctx = NULL;
|
||||
filter_ctx[i].buffersink_ctx = NULL;
|
||||
filter_ctx[i].filter_graph = NULL;
|
||||
if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
|
||||
|| ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
|
||||
continue;
|
||||
|
||||
|
||||
if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
filter_spec = "null"; /* passthrough (dummy) filter for video */
|
||||
else
|
||||
filter_spec = "anull"; /* passthrough (dummy) filter for audio */
|
||||
ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
|
||||
ofmt_ctx->streams[i]->codec, filter_spec);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
|
||||
int ret;
|
||||
int got_frame_local;
|
||||
AVPacket enc_pkt;
|
||||
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
|
||||
(ifmt_ctx->streams[stream_index]->codec->codec_type ==
|
||||
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
|
||||
|
||||
if (!got_frame)
|
||||
got_frame = &got_frame_local;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
|
||||
/* encode filtered frame */
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
av_init_packet(&enc_pkt);
|
||||
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
|
||||
filt_frame, got_frame);
|
||||
av_frame_free(&filt_frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!(*got_frame))
|
||||
return 0;
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt.stream_index = stream_index;
|
||||
enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
enc_pkt.duration = av_rescale_q(enc_pkt.duration,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
|
||||
/* push the decoded frame into the filtergraph */
|
||||
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
|
||||
frame, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
filt_frame = av_frame_alloc();
|
||||
if (!filt_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
|
||||
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
|
||||
filt_frame);
|
||||
if (ret < 0) {
|
||||
/* if no more frames for output - returns AVERROR(EAGAIN)
|
||||
* if flushed and no more frames for output - returns AVERROR_EOF
|
||||
* rewrite retcode to 0 to show it as normal procedure completion
|
||||
*/
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
av_frame_free(&filt_frame);
|
||||
break;
|
||||
}
|
||||
|
||||
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(filt_frame, stream_index, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int flush_encoder(unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
int got_frame;
|
||||
|
||||
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
|
||||
CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
while (1) {
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
ret = encode_write_frame(NULL, stream_index, &got_frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
if (!got_frame)
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet = { .data = NULL, .size = 0 };
|
||||
AVFrame *frame = NULL;
|
||||
enum AVMediaType type;
|
||||
unsigned int stream_index;
|
||||
unsigned int i;
|
||||
int got_frame;
|
||||
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
|
||||
|
||||
if (argc != 3) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = open_output_file(argv[2])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters()) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
stream_index = packet.stream_index;
|
||||
type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
|
||||
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
|
||||
stream_index);
|
||||
|
||||
if (filter_ctx[stream_index].filter_graph) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
packet.dts = av_rescale_q_rnd(packet.dts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
packet.pts = av_rescale_q_rnd(packet.pts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||
avcodec_decode_audio4;
|
||||
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
|
||||
&got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&frame);
|
||||
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
ret = filter_encode_write_frame(frame, stream_index);
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
} else {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
packet.dts = av_rescale_q_rnd(packet.dts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
packet.pts = av_rescale_q_rnd(packet.pts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
|
||||
/* flush filters and encoders */
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
/* flush filter */
|
||||
if (!filter_ctx[i].filter_graph)
|
||||
continue;
|
||||
ret = filter_encode_write_frame(NULL, i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
ret = flush_encoder(i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_free_packet(&packet);
|
||||
av_frame_free(&frame);
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
avcodec_close(ifmt_ctx->streams[i]->codec);
|
||||
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
|
||||
avcodec_close(ofmt_ctx->streams[i]->codec);
|
||||
if (filter_ctx && filter_ctx[i].filter_graph)
|
||||
avfilter_graph_free(&filter_ctx[i].filter_graph);
|
||||
}
|
||||
av_free(filter_ctx);
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||
avio_close(ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
|
||||
|
||||
return ret ? 1 : 0;
|
||||
}
|
@@ -392,7 +392,7 @@ VOB and a few other formats do not have a global header that describes
|
||||
everything present in the file. Instead, applications are supposed to scan
|
||||
the file to see what it contains. Since VOB files are frequently large, only
|
||||
the beginning is scanned. If the subtitles happen only later in the file,
|
||||
they will not be initially detected.
|
||||
they will not be initally detected.
|
||||
|
||||
Some applications, including the @code{ffmpeg} command-line tool, can only
|
||||
work with streams that were detected during the initial scan; streams that
|
||||
|
@@ -90,8 +90,7 @@ the following diagram:
|
||||
| |
|
||||
| decoded |
|
||||
| frames |
|
||||
|_________|
|
||||
________ ______________ |
|
||||
________ ______________ |_________|
|
||||
| | | | |
|
||||
| output | <-------- | encoded data | <----+
|
||||
| file | muxer | packets | encoder
|
||||
@@ -124,16 +123,11 @@ the same type. In the above diagram they can be represented by simply inserting
|
||||
an additional step between decoding and encoding:
|
||||
|
||||
@example
|
||||
_________ ______________
|
||||
| | | |
|
||||
| decoded | | encoded data |
|
||||
| frames |\ _ | packets |
|
||||
|_________| \ /||______________|
|
||||
\ __________ /
|
||||
simple _\|| | / encoder
|
||||
filtergraph | filtered |/
|
||||
| frames |
|
||||
|__________|
|
||||
_________ __________ ______________
|
||||
| | simple | | | |
|
||||
| decoded | fltrgrph | filtered | encoder | encoded data |
|
||||
| frames | ----------> | frames | ---------> | packets |
|
||||
|_________| |__________| |______________|
|
||||
|
||||
@end example
|
||||
|
||||
@@ -272,13 +266,8 @@ ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT
|
||||
will copy all the streams except the second video, which will be encoded with
|
||||
libx264, and the 138th audio, which will be encoded with libvorbis.
|
||||
|
||||
@item -t @var{duration} (@emph{input/output})
|
||||
When used as an input option (before @code{-i}), limit the @var{duration} of
|
||||
data read from the input file.
|
||||
|
||||
When used as an output option (before an output filename), stop writing the
|
||||
output after its duration reaches @var{duration}.
|
||||
|
||||
@item -t @var{duration} (@emph{output})
|
||||
Stop writing the output after its duration reaches @var{duration}.
|
||||
@var{duration} may be a number in seconds, or in @code{hh:mm:ss[.xxx]} form.
|
||||
|
||||
-to and -t are mutually exclusive and -t has priority.
|
||||
@@ -534,7 +523,7 @@ filter the stream.
|
||||
This is an alias for @code{-filter:v}, see the @ref{filter_option,,-filter option}.
|
||||
@end table
|
||||
|
||||
@section Advanced Video options
|
||||
@section Advanced Video Options
|
||||
|
||||
@table @option
|
||||
@item -pix_fmt[:@var{stream_specifier}] @var{format} (@emph{input/output,per-stream})
|
||||
@@ -648,14 +637,8 @@ Do not use any hardware acceleration (the default).
|
||||
@item auto
|
||||
Automatically select the hardware acceleration method.
|
||||
|
||||
@item vda
|
||||
Use Apple VDA hardware acceleration.
|
||||
|
||||
@item vdpau
|
||||
Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
|
||||
|
||||
@item dxva2
|
||||
Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
|
||||
@end table
|
||||
|
||||
This option has no effect if the selected hwaccel is not available or not
|
||||
@@ -678,10 +661,6 @@ method chosen.
|
||||
@item vdpau
|
||||
For VDPAU, this option specifies the X11 display/screen to use. If this option
|
||||
is not specified, the value of the @var{DISPLAY} environment variable is used
|
||||
|
||||
@item dxva2
|
||||
For DXVA2, this option should contain the number of the display adapter to use.
|
||||
If this option is not specified, the default adapter is used.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@@ -717,7 +696,7 @@ filter the stream.
|
||||
This is an alias for @code{-filter:a}, see the @ref{filter_option,,-filter option}.
|
||||
@end table
|
||||
|
||||
@section Advanced Audio options
|
||||
@section Advanced Audio options:
|
||||
|
||||
@table @option
|
||||
@item -atag @var{fourcc/tag} (@emph{output})
|
||||
@@ -732,7 +711,7 @@ stereo but not 6 channels as 5.1. The default is to always try to guess. Use
|
||||
0 to disable all guessing.
|
||||
@end table
|
||||
|
||||
@section Subtitle options
|
||||
@section Subtitle options:
|
||||
|
||||
@table @option
|
||||
@item -scodec @var{codec} (@emph{input/output})
|
||||
@@ -743,7 +722,7 @@ Disable subtitle recording.
|
||||
Deprecated, see -bsf
|
||||
@end table
|
||||
|
||||
@section Advanced Subtitle options
|
||||
@section Advanced Subtitle options:
|
||||
|
||||
@table @option
|
||||
|
||||
@@ -1146,30 +1125,6 @@ requested by @command{ffserver}.
|
||||
The option is intended for cases where features are needed that cannot be
|
||||
specified to @command{ffserver} but can be to @command{ffmpeg}.
|
||||
|
||||
@item -discard (@emph{input})
|
||||
Allows discarding specific streams or frames of streams at the demuxer.
|
||||
Not all demuxers support this.
|
||||
|
||||
@table @option
|
||||
@item none
|
||||
Discard no frame.
|
||||
|
||||
@item default
|
||||
Default, which discards no frames.
|
||||
|
||||
@item noref
|
||||
Discard all non-reference frames.
|
||||
|
||||
@item bidir
|
||||
Discard all bidirectional frames.
|
||||
|
||||
@item nokey
|
||||
Discard all frames excepts keyframes.
|
||||
|
||||
@item all
|
||||
Discard all frames.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
As a special exception, you can use a bitmap subtitle stream as input: it
|
||||
|
@@ -84,9 +84,6 @@ output. In the filtergraph, the input is associated to the label
|
||||
ffmpeg-filters manual for more information about the filtergraph
|
||||
syntax.
|
||||
|
||||
You can specify this parameter multiple times and cycle through the specified
|
||||
filtergraphs along with the show modes by pressing the key @key{w}.
|
||||
|
||||
@item -af @var{filtergraph}
|
||||
@var{filtergraph} is a description of the filtergraph to apply to
|
||||
the input audio.
|
||||
@@ -162,10 +159,6 @@ Force a specific video decoder.
|
||||
|
||||
@item -scodec @var{codec_name}
|
||||
Force a specific subtitle decoder.
|
||||
|
||||
@item -autorotate
|
||||
Automatically rotate the video according to presentation metadata. Set by
|
||||
default, use -noautorotate to disable.
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
@@ -181,7 +174,7 @@ Toggle full screen.
|
||||
Pause.
|
||||
|
||||
@item a
|
||||
Cycle audio channel in the current program.
|
||||
Cycle audio channel in the curret program.
|
||||
|
||||
@item v
|
||||
Cycle video channel.
|
||||
@@ -193,7 +186,7 @@ Cycle subtitle channel in the current program.
|
||||
Cycle program.
|
||||
|
||||
@item w
|
||||
Cycle video filters or show modes.
|
||||
Show audio waves.
|
||||
|
||||
@item s
|
||||
Step to the next frame.
|
||||
|
@@ -8,15 +8,15 @@
|
||||
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
@@ -53,10 +53,6 @@
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
@@ -151,8 +147,6 @@
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
@@ -171,7 +165,6 @@
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
@@ -226,7 +219,8 @@
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
|
@@ -44,8 +44,8 @@ streams of this type.
|
||||
If @var{stream_index} is given, then it matches the stream with number @var{stream_index}
|
||||
in the program with the id @var{program_id}. Otherwise, it matches all streams in the
|
||||
program.
|
||||
@item #@var{stream_id} or i:@var{stream_id}
|
||||
Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
@item #@var{stream_id}
|
||||
Matches the stream by a format-specific ID.
|
||||
@end table
|
||||
|
||||
@section Generic options
|
||||
@@ -189,8 +189,6 @@ following option is recognized:
|
||||
set the file name to use for the report; @code{%p} is expanded to the name
|
||||
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
|
||||
to a plain @code{%}
|
||||
@item level
|
||||
set the log level
|
||||
@end table
|
||||
|
||||
Errors in parsing the environment variable are not fatal, and will not
|
||||
|
1468
doc/filters.texi
1468
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@@ -245,7 +245,6 @@ library:
|
||||
@tab Multimedia format used by Delphine Software games.
|
||||
@item CD+G @tab @tab X
|
||||
@tab Video format used by CD+G karaoke disks
|
||||
@item Phantom Cine @tab @tab X
|
||||
@item Commodore CDXL @tab @tab X
|
||||
@tab Amiga CD video format
|
||||
@item Core Audio Format @tab X @tab X
|
||||
@@ -259,7 +258,6 @@ library:
|
||||
@item Deluxe Paint Animation @tab @tab X
|
||||
@item DFA @tab @tab X
|
||||
@tab This format is used in Chronomaster game
|
||||
@item DSD Stream File (DSF) @tab @tab X
|
||||
@item DV video @tab X @tab X
|
||||
@item DXA @tab @tab X
|
||||
@tab This format is used in the non-Windows version of the Feeble Files
|
||||
@@ -310,11 +308,9 @@ library:
|
||||
@tab Used by Linux Media Labs MPEG-4 PCI boards
|
||||
@item LOAS @tab @tab X
|
||||
@tab contains LATM multiplexed AAC audio
|
||||
@item LRC @tab X @tab X
|
||||
@item LVF @tab @tab X
|
||||
@item LXF @tab @tab X
|
||||
@tab VR native stream format, used by Leitch/Harris' video servers.
|
||||
@item Magic Lantern Video (MLV) @tab @tab X
|
||||
@item Matroska @tab X @tab X
|
||||
@item Matroska audio @tab X @tab
|
||||
@item FFmpeg metadata @tab X @tab X
|
||||
@@ -492,13 +488,11 @@ following image formats are supported:
|
||||
@item Name @tab Encoding @tab Decoding @tab Comments
|
||||
@item .Y.U.V @tab X @tab X
|
||||
@tab one raw file per component
|
||||
@item Alias PIX @tab X @tab X
|
||||
@tab Alias/Wavefront PIX image format
|
||||
@item animated GIF @tab X @tab X
|
||||
@item BMP @tab X @tab X
|
||||
@tab Microsoft BMP image
|
||||
@item BRender PIX @tab @tab X
|
||||
@tab Argonaut BRender 3D engine image format.
|
||||
@item PIX @tab @tab X
|
||||
@tab PIX is an image format used in the Argonaut BRender engine.
|
||||
@item DPX @tab X @tab X
|
||||
@tab Digital Picture Exchange
|
||||
@item EXR @tab @tab X
|
||||
@@ -686,8 +680,8 @@ following image formats are supported:
|
||||
@item LCL (LossLess Codec Library) MSZH @tab @tab X
|
||||
@item LCL (LossLess Codec Library) ZLIB @tab E @tab E
|
||||
@item LOCO @tab @tab X
|
||||
@item LucasArts SANM/Smush @tab @tab X
|
||||
@tab Used in LucasArts games / SMUSH animations.
|
||||
@item LucasArts Smush @tab @tab X
|
||||
@tab Used in LucasArts games.
|
||||
@item lossless MJPEG @tab X @tab X
|
||||
@item Microsoft ATC Screen @tab @tab X
|
||||
@tab Also known as Microsoft Screen 3.
|
||||
@@ -722,8 +716,6 @@ following image formats are supported:
|
||||
@tab fourcc: VP50
|
||||
@item On2 VP6 @tab @tab X
|
||||
@tab fourcc: VP60,VP61,VP62
|
||||
@item On2 VP7 @tab @tab X
|
||||
@tab fourcc: VP70,VP71
|
||||
@item VP8 @tab E @tab X
|
||||
@tab fourcc: VP80, encoding supported through external library libvpx
|
||||
@item VP9 @tab E @tab X
|
||||
@@ -753,11 +745,11 @@ following image formats are supported:
|
||||
@tab Texture dictionaries used by the Renderware Engine.
|
||||
@item RL2 video @tab @tab X
|
||||
@tab used in some games by Entertainment Software Partners
|
||||
@item SGI RLE 8-bit @tab @tab X
|
||||
@item Sierra VMD video @tab @tab X
|
||||
@tab Used in Sierra VMD files.
|
||||
@item Silicon Graphics Motion Video Compressor 1 (MVC1) @tab @tab X
|
||||
@item Silicon Graphics Motion Video Compressor 2 (MVC2) @tab @tab X
|
||||
@item Silicon Graphics RLE 8-bit video @tab @tab X
|
||||
@item Smacker video @tab @tab X
|
||||
@tab Video encoding used in Smacker.
|
||||
@item SMPTE VC-1 @tab @tab X
|
||||
@@ -823,7 +815,7 @@ following image formats are supported:
|
||||
@tab encoding supported through external library libaacplus
|
||||
@item AAC @tab E @tab X
|
||||
@tab encoding supported through external library libfaac and libvo-aacenc
|
||||
@item AC-3 @tab IX @tab IX
|
||||
@item AC-3 @tab IX @tab X
|
||||
@item ADPCM 4X Movie @tab @tab X
|
||||
@item ADPCM CDROM XA @tab @tab X
|
||||
@item ADPCM Creative Technology @tab @tab X
|
||||
@@ -867,8 +859,6 @@ following image formats are supported:
|
||||
@item ADPCM Sound Blaster Pro 2-bit @tab @tab X
|
||||
@item ADPCM Sound Blaster Pro 2.6-bit @tab @tab X
|
||||
@item ADPCM Sound Blaster Pro 4-bit @tab @tab X
|
||||
@item ADPCM VIMA
|
||||
@tab Used in LucasArts SMUSH animations.
|
||||
@item ADPCM Westwood Studios IMA @tab @tab X
|
||||
@tab Used in Westwood Studios games like Command and Conquer.
|
||||
@item ADPCM Yamaha @tab X @tab X
|
||||
@@ -901,10 +891,6 @@ following image formats are supported:
|
||||
@item DPCM Sol @tab @tab X
|
||||
@item DPCM Xan @tab @tab X
|
||||
@tab Used in Origin's Wing Commander IV AVI files.
|
||||
@item DSD (Direct Stream Digitial), least significant bit first @tab @tab X
|
||||
@item DSD (Direct Stream Digitial), most significant bit first @tab @tab X
|
||||
@item DSD (Direct Stream Digitial), least significant bit first, planar @tab @tab X
|
||||
@item DSD (Direct Stream Digitial), most significant bit first, planar @tab @tab X
|
||||
@item DSP Group TrueSpeech @tab @tab X
|
||||
@item DV audio @tab @tab X
|
||||
@item Enhanced AC-3 @tab X @tab X
|
||||
@@ -927,14 +913,13 @@ following image formats are supported:
|
||||
@item Monkey's Audio @tab @tab X
|
||||
@item MP1 (MPEG audio layer 1) @tab @tab IX
|
||||
@item MP2 (MPEG audio layer 2) @tab IX @tab IX
|
||||
@tab encoding supported also through external library TwoLAME
|
||||
@tab libtwolame can be used alternatively for encoding.
|
||||
@item MP3 (MPEG audio layer 3) @tab E @tab IX
|
||||
@tab encoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported
|
||||
@item MPEG-4 Audio Lossless Coding (ALS) @tab @tab X
|
||||
@item Musepack SV7 @tab @tab X
|
||||
@item Musepack SV8 @tab @tab X
|
||||
@item Nellymoser Asao @tab X @tab X
|
||||
@item On2 AVC (Audio for Video Codec) @tab @tab X
|
||||
@item Opus @tab E @tab E
|
||||
@tab supported through external library libopus
|
||||
@item PCM A-law @tab X @tab X
|
||||
@@ -1037,7 +1022,7 @@ performance on systems without hardware floating point support).
|
||||
@item TED Talks captions @tab @tab X @tab @tab X
|
||||
@item VobSub (IDX+SUB) @tab @tab X @tab @tab X
|
||||
@item VPlayer @tab @tab X @tab @tab X
|
||||
@item WebVTT @tab X @tab X @tab X @tab X
|
||||
@item WebVTT @tab X @tab X @tab @tab X
|
||||
@item XSUB @tab @tab @tab X @tab X
|
||||
@end multitable
|
||||
|
||||
@@ -1065,7 +1050,6 @@ performance on systems without hardware floating point support).
|
||||
@item RTMPTE @tab X
|
||||
@item RTMPTS @tab X
|
||||
@item RTP @tab X
|
||||
@item SAMBA @tab E
|
||||
@item SCTP @tab X
|
||||
@item SFTP @tab E
|
||||
@item TCP @tab X
|
||||
@@ -1099,7 +1083,6 @@ performance on systems without hardware floating point support).
|
||||
@item Video4Linux2 @tab X @tab X
|
||||
@item VfW capture @tab X @tab
|
||||
@item X11 grabbing @tab X @tab
|
||||
@item Win32 grabbing @tab X @tab
|
||||
@end multitable
|
||||
|
||||
@code{X} means that input/output is supported.
|
||||
|
156
doc/indevs.texi
156
doc/indevs.texi
@@ -13,8 +13,8 @@ You can disable all the input devices using the configure option
|
||||
option "--enable-indev=@var{INDEV}", or you can disable a particular
|
||||
input device using the option "--disable-indev=@var{INDEV}".
|
||||
|
||||
The option "-devices" of the ff* tools will display the list of
|
||||
supported input devices.
|
||||
The option "-formats" of the ff* tools will display the list of
|
||||
supported input devices (amongst the demuxers).
|
||||
|
||||
A description of the currently available input devices follows.
|
||||
|
||||
@@ -51,41 +51,6 @@ ffmpeg -f alsa -i hw:0 alsaout.wav
|
||||
For more information see:
|
||||
@url{http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html}
|
||||
|
||||
@section avfoundation
|
||||
|
||||
AVFoundation input device.
|
||||
|
||||
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
|
||||
The older QTKit framework has been marked deprecated since OSX version 10.7.
|
||||
|
||||
The filename passed as input is parsed to contain either a device name or index.
|
||||
The device index can also be given by using -video_device_index.
|
||||
A given device index will override any given device name.
|
||||
If the desired device consists of numbers only, use -video_device_index to identify it.
|
||||
The default device will be chosen if an empty string or the device name "default" is given.
|
||||
The available devices can be enumerated by using -list_devices.
|
||||
The pixel format can be set using -pixel_format.
|
||||
Available formats:
|
||||
monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
|
||||
bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
|
||||
yuv420p, nv12, yuyv422, gray
|
||||
|
||||
@example
|
||||
ffmpeg -f avfoundation -i "0" out.mpg
|
||||
@end example
|
||||
|
||||
@example
|
||||
ffmpeg -f avfoundation -video_device_index 0 -i "" out.mpg
|
||||
@end example
|
||||
|
||||
@example
|
||||
ffmpeg -f avfoundation -pixel_format bgr0 -i "default" out.mpg
|
||||
@end example
|
||||
|
||||
@example
|
||||
ffmpeg -f avfoundation -list_devices true -i ""
|
||||
@end example
|
||||
|
||||
@section bktr
|
||||
|
||||
BSD video input device.
|
||||
@@ -227,81 +192,6 @@ ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
|
||||
|
||||
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
|
||||
|
||||
@section gdigrab
|
||||
|
||||
Win32 GDI-based screen capture device.
|
||||
|
||||
This device allows you to capture a region of the display on Windows.
|
||||
|
||||
There are two options for the input filename:
|
||||
@example
|
||||
desktop
|
||||
@end example
|
||||
or
|
||||
@example
|
||||
title=@var{window_title}
|
||||
@end example
|
||||
|
||||
The first option will capture the entire desktop, or a fixed region of the
|
||||
desktop. The second option will instead capture the contents of a single
|
||||
window, regardless of its position on the screen.
|
||||
|
||||
For example, to grab the entire desktop using @command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
|
||||
@end example
|
||||
|
||||
Grab a 640x480 region at position @code{10,20}:
|
||||
@example
|
||||
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
|
||||
@end example
|
||||
|
||||
Grab the contents of the window named "Calculator"
|
||||
@example
|
||||
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item draw_mouse
|
||||
Specify whether to draw the mouse pointer. Use the value @code{0} to
|
||||
not draw the pointer. Default value is @code{1}.
|
||||
|
||||
@item framerate
|
||||
Set the grabbing frame rate. Default value is @code{ntsc},
|
||||
corresponding to a frame rate of @code{30000/1001}.
|
||||
|
||||
@item show_region
|
||||
Show grabbed region on screen.
|
||||
|
||||
If @var{show_region} is specified with @code{1}, then the grabbing
|
||||
region will be indicated on screen. With this option, it is easy to
|
||||
know what is being grabbed if only a portion of the screen is grabbed.
|
||||
|
||||
Note that @var{show_region} is incompatible with grabbing the contents
|
||||
of a single window.
|
||||
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
|
||||
@end example
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. The default is to capture the full screen if @file{desktop} is selected, or the full window size if @file{title=@var{window_title}} is selected.
|
||||
|
||||
@item offset_x
|
||||
When capturing a region with @var{video_size}, set the distance from the left edge of the screen or desktop.
|
||||
|
||||
Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative @var{offset_x} value to move the region to that monitor.
|
||||
|
||||
@item offset_y
|
||||
When capturing a region with @var{video_size}, set the distance from the top edge of the screen or desktop.
|
||||
|
||||
Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative @var{offset_y} value to move the region to that monitor.
|
||||
|
||||
@end table
|
||||
|
||||
@section iec61883
|
||||
|
||||
FireWire DV/HDV input device using libiec61883.
|
||||
@@ -483,21 +373,6 @@ ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libcdio
|
||||
|
||||
Audio-CD input device based on cdio.
|
||||
|
||||
To enable this input device during configuration you need libcdio
|
||||
installed on your system.
|
||||
|
||||
This device allows playing and grabbing from an Audio-CD.
|
||||
|
||||
For example to copy with @command{ffmpeg} the entire Audio-CD in /dev/sr0,
|
||||
you may run the command:
|
||||
@example
|
||||
ffmpeg -f libcdio -i /dev/sr0 cd.wav
|
||||
@end example
|
||||
|
||||
@section libdc1394
|
||||
|
||||
IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||
@@ -656,33 +531,6 @@ Record a stream from default device:
|
||||
ffmpeg -f pulse -i default /tmp/pulse.wav
|
||||
@end example
|
||||
|
||||
@section qtkit
|
||||
|
||||
QTKit input device.
|
||||
|
||||
The filename passed as input is parsed to contain either a device name or index.
|
||||
The device index can also be given by using -video_device_index.
|
||||
A given device index will override any given device name.
|
||||
If the desired device consists of numbers only, use -video_device_index to identify it.
|
||||
The default device will be chosen if an empty string or the device name "default" is given.
|
||||
The available devices can be enumerated by using -list_devices.
|
||||
|
||||
@example
|
||||
ffmpeg -f qtkit -i "0" out.mpg
|
||||
@end example
|
||||
|
||||
@example
|
||||
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
|
||||
@end example
|
||||
|
||||
@example
|
||||
ffmpeg -f qtkit -i "default" out.mpg
|
||||
@end example
|
||||
|
||||
@example
|
||||
ffmpeg -f qtkit -list_devices true -i ""
|
||||
@end example
|
||||
|
||||
@section sndio
|
||||
|
||||
sndio input device.
|
||||
|
111
doc/muxers.texi
111
doc/muxers.texi
@@ -233,10 +233,6 @@ to @var{wrap}.
|
||||
Start the playlist sequence number from @var{number}. Default value is
|
||||
0.
|
||||
|
||||
@item hls_base_url @var{baseurl}
|
||||
Append @var{baseurl} to every entry in the playlist.
|
||||
Useful to generate playlists with absolute paths.
|
||||
|
||||
Note that the playlist sequence number must be unique for each segment
|
||||
and it is not to be confused with the segment filename sequence number
|
||||
which can be cyclic, for example if the @option{wrap} option is
|
||||
@@ -637,10 +633,7 @@ Set the first PID for data packets (default 0x0100, max 0x0f00).
|
||||
@item -mpegts_m2ts_mode @var{number}
|
||||
Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode.
|
||||
@item -muxrate @var{number}
|
||||
Set a constant muxrate (default VBR).
|
||||
@item -pcr_period @var{numer}
|
||||
Override the default PCR retransmission time (default 20ms), ignored
|
||||
if variable muxrate is selected.
|
||||
Set muxrate.
|
||||
@item -pes_payload_size @var{number}
|
||||
Set minimum PES packet payload in bytes.
|
||||
@item -mpegts_flags @var{flags}
|
||||
@@ -709,30 +702,6 @@ Alternatively you can write the command as:
|
||||
ffmpeg -benchmark -i INPUT -f null -
|
||||
@end example
|
||||
|
||||
@section nut
|
||||
|
||||
@table @option
|
||||
@item -syncpoints @var{flags}
|
||||
Change the syncpoint usage in nut:
|
||||
@table @option
|
||||
@item @var{default} use the normal low-overhead seeking aids.
|
||||
@item @var{none} do not use the syncpoints at all, reducing the overhead but making the stream non-seekable;
|
||||
Use of this option is not recommended, as the resulting files are very damage
|
||||
sensitive and seeking is not possible. Also in general the overhead from
|
||||
syncpoints is negligible. Note, -@code{write_index} 0 can be used to disable
|
||||
all growing data tables, allowing to mux endless streams with limited memory
|
||||
and wihout these disadvantages.
|
||||
@item @var{timestamped} extend the syncpoint with a wallclock field.
|
||||
@end table
|
||||
The @var{none} and @var{timestamped} flags are experimental.
|
||||
@item -write_index @var{bool}
|
||||
Write index at the end, the default is to write an index.
|
||||
@end table
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -f_strict experimental -syncpoints none - | processor
|
||||
@end example
|
||||
|
||||
@section ogg
|
||||
|
||||
Ogg container muxer.
|
||||
@@ -814,21 +783,17 @@ Allow caching (only affects M3U8 list files).
|
||||
Allow live-friendly file generation.
|
||||
@end table
|
||||
|
||||
@item segment_list_type @var{type}
|
||||
Select the listing format.
|
||||
@table @option
|
||||
@item @var{flat} use a simple flat list of entries.
|
||||
@item @var{hls} use a m3u8-like structure.
|
||||
@end table
|
||||
|
||||
@item segment_list_size @var{size}
|
||||
Update the list file so that it contains at most the last @var{size}
|
||||
segments. If 0 the list file will contain all the segments. Default
|
||||
value is 0.
|
||||
|
||||
@item segment_list_entry_prefix @var{prefix}
|
||||
Prepend @var{prefix} to each entry. Useful to generate absolute paths.
|
||||
By default no prefix is applied.
|
||||
Set @var{prefix} to prepend to the name of each entry filename. By
|
||||
default no prefix is applied.
|
||||
|
||||
@item segment_list_type @var{type}
|
||||
Specify the format for the segment list file.
|
||||
|
||||
The following values are recognized:
|
||||
@table @samp
|
||||
@@ -879,16 +844,6 @@ Note that splitting may not be accurate, unless you force the
|
||||
reference stream key-frames at the given time. See the introductory
|
||||
notice and the examples below.
|
||||
|
||||
@item segment_atclocktime @var{1|0}
|
||||
If set to "1" split at regular clock time intervals starting from 00:00
|
||||
o'clock. The @var{time} value specified in @option{segment_time} is
|
||||
used for setting the length of the splitting interval.
|
||||
|
||||
For example with @option{segment_time} set to "900" this makes it possible
|
||||
to create files at 12:00 o'clock, 12:15, 12:30, etc.
|
||||
|
||||
Default value is "0".
|
||||
|
||||
@item segment_time_delta @var{delta}
|
||||
Specify the accuracy time when selecting the start time for a
|
||||
segment, expressed as a duration specification. Default value is "0".
|
||||
@@ -994,28 +949,6 @@ ffmpeg -re -i in.mkv -codec copy -map 0 -f segment -segment_list playlist.m3u8 \
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section smoothstreaming
|
||||
|
||||
Smooth Streaming muxer generates a set of files (Manifest, chunks) suitable for serving with conventional web server.
|
||||
|
||||
@table @option
|
||||
@item window_size
|
||||
Specify the number of fragments kept in the manifest. Default 0 (keep all).
|
||||
|
||||
@item extra_window_size
|
||||
Specify the number of fragments kept outside of the manifest before removing from disk. Default 5.
|
||||
|
||||
@item lookahead_count
|
||||
Specify the number of lookahead fragments. Default 2.
|
||||
|
||||
@item min_frag_duration
|
||||
Specify the minimum fragment duration (in microseconds). Default 5000000.
|
||||
|
||||
@item remove_at_exit
|
||||
Specify whether to remove all fragments when finished. Default 0 (do not remove).
|
||||
|
||||
@end table
|
||||
|
||||
@section tee
|
||||
|
||||
The tee muxer can be used to write the same data to several files or any
|
||||
@@ -1053,7 +986,7 @@ It is possible to specify to which streams a given bitstream filter
|
||||
applies, by appending a stream specifier to the option separated by
|
||||
@code{/}. @var{spec} must be a stream specifier (see @ref{Format
|
||||
stream specifiers}). If the stream specifier is not specified, the
|
||||
bitstream filters will be applied to all streams in the output.
|
||||
bistream filters will be applied to all streams in the output.
|
||||
|
||||
Several bitstream filters can be specified, separated by ",".
|
||||
|
||||
@@ -1100,34 +1033,4 @@ Note: some codecs may need different options depending on the output format;
|
||||
the auto-detection of this can not work with the tee muxer. The main example
|
||||
is the @option{global_header} flag.
|
||||
|
||||
@section webm_dash_manifest
|
||||
|
||||
WebM DASH Manifest muxer.
|
||||
|
||||
This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
|
||||
|
||||
@subsection Options
|
||||
|
||||
This muxer supports the following options:
|
||||
|
||||
@table @option
|
||||
@item adaptation_sets
|
||||
This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
|
||||
unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
|
||||
audio and video streams. Any number of adaptation sets can be added using this option.
|
||||
@end table
|
||||
|
||||
@subsection Example
|
||||
@example
|
||||
ffmpeg -f webm_dash_manifest -i video1.webm \
|
||||
-f webm_dash_manifest -i video2.webm \
|
||||
-f webm_dash_manifest -i audio1.webm \
|
||||
-f webm_dash_manifest -i audio2.webm \
|
||||
-map 0 -map 1 -map 2 -map 3 \
|
||||
-c copy \
|
||||
-f webm_dash_manifest \
|
||||
-adaptation_sets "id=0,streams=0,1 id=1,streams=2,3" \
|
||||
manifest.xml
|
||||
@end example
|
||||
|
||||
@c man end MUXERS
|
||||
|
21
doc/nut.texi
21
doc/nut.texi
@@ -21,27 +21,6 @@ The official nut specification is at svn://svn.mplayerhq.hu/nut
|
||||
In case of any differences between this text and the official specification,
|
||||
the official specification shall prevail.
|
||||
|
||||
@chapter Modes
|
||||
NUT has some variants signaled by using the flags field in its main header.
|
||||
|
||||
@multitable @columnfractions .4 .4
|
||||
@item BROADCAST @tab Extend the syncpoint to report the sender wallclock
|
||||
@item PIPE @tab Omit completely the syncpoint
|
||||
@end multitable
|
||||
|
||||
@section BROADCAST
|
||||
|
||||
The BROADCAST variant provides a secondary time reference to facilitate
|
||||
detecting endpoint latency and network delays.
|
||||
It assumes all the endpoint clocks are syncronized.
|
||||
To be used in real-time scenarios.
|
||||
|
||||
@section PIPE
|
||||
|
||||
The PIPE variant assumes NUT is used as non-seekable intermediate container,
|
||||
by not using syncpoint removes unneeded overhead and reduces the overall
|
||||
memory usage.
|
||||
|
||||
@chapter Container-specific codec tags
|
||||
|
||||
@section Generic raw YUVA formats
|
||||
|
@@ -79,6 +79,9 @@ qpel{8,16}_mc??_old_c / *pixels{8,16}_l4
|
||||
Just used to work around a bug in an old libavcodec encoder version.
|
||||
Don't optimize them.
|
||||
|
||||
tpel_mc_func {put,avg}_tpel_pixels_tab
|
||||
Used only for SVQ3, so only optimize them if you need fast SVQ3 decoding.
|
||||
|
||||
add_bytes/diff_bytes
|
||||
For huffyuv only, optimize if you want a faster ffhuffyuv codec.
|
||||
|
||||
@@ -136,6 +139,9 @@ dct_unquantize_mpeg2
|
||||
dct_unquantize_h263
|
||||
Used in MPEG-4/H.263 en/decoding.
|
||||
|
||||
FIXME remaining functions?
|
||||
BTW, most of these functions are in dsputil.c/.h, some are in mpegvideo.c/.h.
|
||||
|
||||
|
||||
|
||||
Alignment:
|
||||
@@ -262,6 +268,17 @@ CELL/SPU:
|
||||
http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/30B3520C93F437AB87257060006FFE5E/$file/Language_Extensions_for_CBEA_2.4.pdf
|
||||
http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/9F820A5FFA3ECE8C8725716A0062585F/$file/CBE_Handbook_v1.1_24APR2007_pub.pdf
|
||||
|
||||
SPARC-specific:
|
||||
---------------
|
||||
SPARC Joint Programming Specification (JPS1): Commonality
|
||||
http://www.fujitsu.com/downloads/PRMPWR/JPS1-R1.0.4-Common-pub.pdf
|
||||
|
||||
UltraSPARC III Processor User's Manual (contains instruction timings)
|
||||
http://www.sun.com/processors/manuals/USIIIv2.pdf
|
||||
|
||||
VIS Whitepaper (contains optimization guidelines)
|
||||
http://www.sun.com/processors/vis/download/vis/vis_whitepaper.pdf
|
||||
|
||||
GCC asm links:
|
||||
--------------
|
||||
official doc but quite ugly
|
||||
|
@@ -13,8 +13,8 @@ You can disable all the output devices using the configure option
|
||||
option "--enable-outdev=@var{OUTDEV}", or you can disable a particular
|
||||
input device using the option "--disable-outdev=@var{OUTDEV}".
|
||||
|
||||
The option "-devices" of the ff* tools will display the list of
|
||||
enabled output devices.
|
||||
The option "-formats" of the ff* tools will display the list of
|
||||
enabled output devices (amongst the muxers).
|
||||
|
||||
A description of the currently available output devices follows.
|
||||
|
||||
@@ -220,11 +220,11 @@ This output device allows one to render to OpenGL context.
|
||||
Context may be provided by application or default SDL window is created.
|
||||
|
||||
When device renders to external context, application must implement handlers for following messages:
|
||||
@code{AV_DEV_TO_APP_CREATE_WINDOW_BUFFER} - create OpenGL context on current thread.
|
||||
@code{AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER} - make OpenGL context current.
|
||||
@code{AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER} - swap buffers.
|
||||
@code{AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER} - destroy OpenGL context.
|
||||
Application is also required to inform a device about current resolution by sending @code{AV_APP_TO_DEV_WINDOW_SIZE} message.
|
||||
@code{AV_CTL_MESSAGE_CREATE_WINDOW_BUFFER} - create OpenGL context on current thread.
|
||||
@code{AV_CTL_MESSAGE_PREPARE_WINDOW_BUFFER} - make OpenGL context current.
|
||||
@code{AV_CTL_MESSAGE_DISPLAY_WINDOW_BUFFER} - swap buffers.
|
||||
@code{AV_CTL_MESSAGE_DESTROY_WINDOW_BUFFER} - destroy OpenGL context.
|
||||
Application is also required to inform a device about current resolution by sending @code{AV_DEVICE_WINDOW_RESIZED} message.
|
||||
|
||||
@subsection Options
|
||||
@table @option
|
||||
@@ -237,10 +237,6 @@ Application must provide OpenGL context and both @code{window_size_cb} and @code
|
||||
@item window_title
|
||||
Set the SDL window title, if not specified default to the filename specified for the output device.
|
||||
Ignored when @option{no_window} is set.
|
||||
@item window_size
|
||||
Set preferred window size, can be a string of the form widthxheight or a video size abbreviation.
|
||||
If not specified it defaults to the size of the input video, downscaled according to the aspect ratio.
|
||||
Mostly usable when @option{no_window} is not set.
|
||||
|
||||
@end table
|
||||
|
||||
@@ -294,20 +290,6 @@ When both options are provided then the highest value is used
|
||||
are set to 0 (which is default), the device will use the default
|
||||
PulseAudio duration value. By default PulseAudio set buffer duration
|
||||
to around 2 seconds.
|
||||
|
||||
@item prebuf
|
||||
Specify pre-buffering size in bytes. The server does not start with
|
||||
playback before at least @option{prebuf} bytes are available in the
|
||||
buffer. By default this option is initialized to the same value as
|
||||
@option{buffer_size} or @option{buffer_duration} (whichever is bigger).
|
||||
|
||||
@item minreq
|
||||
Specify minimum request size in bytes. The server does not request less
|
||||
than @option{minreq} bytes from the client, instead waits until the buffer
|
||||
is free enough to request more bytes at once. It is recommended to not set
|
||||
this option, which will initialize this to a value that is deemed sensible
|
||||
by the server.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -406,26 +388,19 @@ For example, @code{dual-headed:0.1} would specify screen 1 of display
|
||||
Check the X11 specification for more detailed information about the
|
||||
display name format.
|
||||
|
||||
@item window_id
|
||||
When set to non-zero value then device doesn't create new window,
|
||||
but uses existing one with provided @var{window_id}. By default
|
||||
this options is set to zero and device creates its own window.
|
||||
|
||||
@item window_size
|
||||
Set the created window size, can be a string of the form
|
||||
@var{width}x@var{height} or a video size abbreviation. If not
|
||||
specified it defaults to the size of the input video.
|
||||
Ignored when @var{window_id} is set.
|
||||
|
||||
@item window_x
|
||||
@item window_y
|
||||
Set the X and Y window offsets for the created window. They are both
|
||||
set to 0 by default. The values may be ignored by the window manager.
|
||||
Ignored when @var{window_id} is set.
|
||||
|
||||
@item window_title
|
||||
Set the window title, if not specified default to the filename
|
||||
specified for the output device. Ignored when @var{window_id} is set.
|
||||
specified for the output device.
|
||||
@end table
|
||||
|
||||
For more information about XVideo see @url{http://www.x.org/}.
|
||||
|
@@ -24,17 +24,6 @@ If not, then you should install a different compiler that has no
|
||||
hard-coded path to gas. In the worst case pass @code{--disable-asm}
|
||||
to configure.
|
||||
|
||||
@section Advanced linking configuration
|
||||
|
||||
If you compiled FFmpeg libraries statically and you want to use them to
|
||||
build your own shared library, you may need to force PIC support (with
|
||||
@code{--enable-pic} during FFmpeg configure) and add the following option
|
||||
to your project LDFLAGS:
|
||||
|
||||
@example
|
||||
-Wl,-Bsymbolic
|
||||
@end example
|
||||
|
||||
@section BSD
|
||||
|
||||
BSD make will not build FFmpeg, you need to install and use GNU Make
|
||||
@@ -63,14 +52,14 @@ unacelerated code.
|
||||
|
||||
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
|
||||
@url{https://github.com/FFmpeg/gas-preprocessor} or
|
||||
@url{https://github.com/yuvi/gas-preprocessor}(currently outdated) to build the optimized
|
||||
assembly functions. Put the Perl script somewhere
|
||||
@url{http://github.com/yuvi/gas-preprocessor} to build the optimized
|
||||
assembler functions. Put the Perl script somewhere
|
||||
in your PATH, FFmpeg's configure will pick it up automatically.
|
||||
|
||||
Mac OS X on amd64 and x86 requires @command{yasm} to build most of the
|
||||
optimized assembly functions. @uref{http://www.finkproject.org/, Fink},
|
||||
optimized assembler functions. @uref{http://www.finkproject.org/, Fink},
|
||||
@uref{http://www.gentoo.org/proj/en/gentoo-alt/prefix/bootstrap-macos.xml, Gentoo Prefix},
|
||||
@uref{https://mxcl.github.com/homebrew/, Homebrew}
|
||||
@uref{http://mxcl.github.com/homebrew/, Homebrew}
|
||||
or @uref{http://www.macports.org, MacPorts} can easily provide it.
|
||||
|
||||
|
||||
|
@@ -166,7 +166,7 @@ This protocol accepts the following options.
|
||||
|
||||
@table @option
|
||||
@item timeout
|
||||
Set timeout in microseconds of socket I/O operations used by the underlying low level
|
||||
Set timeout of socket I/O operations used by the underlying low level
|
||||
operation. By default it is set to -1, which means that the timeout is
|
||||
not specified.
|
||||
|
||||
@@ -244,7 +244,7 @@ Override the User-Agent header. If not specified the protocol will use a
|
||||
string describing the libavformat build. ("Lavf/<version>")
|
||||
|
||||
@item timeout
|
||||
Set timeout in microseconds of socket I/O operations used by the underlying low level
|
||||
Set timeout of socket I/O operations used by the underlying low level
|
||||
operation. By default it is set to -1, which means that the timeout is
|
||||
not specified.
|
||||
|
||||
@@ -537,35 +537,6 @@ The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
|
||||
for streaming multimedia content within HTTPS requests to traverse
|
||||
firewalls.
|
||||
|
||||
@section libsmbclient
|
||||
|
||||
libsmbclient permits to manipulate CIFS/SMB network resources.
|
||||
|
||||
Following syntax is required.
|
||||
|
||||
@example
|
||||
smb://[[domain:]user[:password@@]]server[/share[/path[/file]]]
|
||||
@end example
|
||||
|
||||
This protocol accepts the following options.
|
||||
|
||||
@table @option
|
||||
@item timeout
|
||||
Set timeout in miliseconds of socket I/O operations used by the underlying
|
||||
low level operation. By default it is set to -1, which means that the timeout
|
||||
is not specified.
|
||||
|
||||
@item truncate
|
||||
Truncate existing files on write, if set to 1. A value of 0 prevents
|
||||
truncating. Default value is 1.
|
||||
|
||||
@item workgroup
|
||||
Set the workgroup used for making connections. By default workgroup is not specified.
|
||||
|
||||
@end table
|
||||
|
||||
For more information see: @url{http://www.samba.org/}.
|
||||
|
||||
@section libssh
|
||||
|
||||
Secure File Transfer Protocol via libssh
|
||||
@@ -720,7 +691,7 @@ data transferred over RDT).
|
||||
|
||||
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
|
||||
supporting it (currently Darwin Streaming Server and Mischa Spiegelmock's
|
||||
@uref{https://github.com/revmischa/rtsp-server, RTSP server}).
|
||||
@uref{http://github.com/revmischa/rtsp-server, RTSP server}).
|
||||
|
||||
The required syntax for a RTSP url is:
|
||||
@example
|
||||
@@ -739,7 +710,7 @@ Do not start playing the stream immediately if set to 1. Default value
|
||||
is 0.
|
||||
|
||||
@item rtsp_transport
|
||||
Set RTSP transport protocols.
|
||||
Set RTSP trasport protocols.
|
||||
|
||||
It accepts the following values:
|
||||
@table @samp
|
||||
@@ -771,8 +742,6 @@ The following values are accepted:
|
||||
Accept packets only from negotiated peer address and port.
|
||||
@item listen
|
||||
Act as a server, listening for an incoming connection.
|
||||
@item prefer_tcp
|
||||
Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
|
||||
@end table
|
||||
|
||||
Default value is @samp{none}.
|
||||
@@ -798,17 +767,17 @@ Set maximum local UDP port. Default value is 65000.
|
||||
@item timeout
|
||||
Set maximum timeout (in seconds) to wait for incoming connections.
|
||||
|
||||
A value of -1 means infinite (default). This option implies the
|
||||
A value of -1 mean infinite (default). This option implies the
|
||||
@option{rtsp_flags} set to @samp{listen}.
|
||||
|
||||
@item reorder_queue_size
|
||||
Set number of packets to buffer for handling of reordered packets.
|
||||
|
||||
@item stimeout
|
||||
Set socket TCP I/O timeout in microseconds.
|
||||
Set socket TCP I/O timeout in micro seconds.
|
||||
|
||||
@item user-agent
|
||||
Override User-Agent header. If not specified, it defaults to the
|
||||
Override User-Agent header. If not specified, it default to the
|
||||
libavformat identifier string.
|
||||
@end table
|
||||
|
||||
@@ -989,30 +958,6 @@ this binary block are used as master key, the following 14 bytes are
|
||||
used as master salt.
|
||||
@end table
|
||||
|
||||
@section subfile
|
||||
|
||||
Virtually extract a segment of a file or another stream.
|
||||
The underlying stream must be seekable.
|
||||
|
||||
Accepted options:
|
||||
@table @option
|
||||
@item start
|
||||
Start offset of the extracted segment, in bytes.
|
||||
@item end
|
||||
End offset of the extracted segment, in bytes.
|
||||
@end table
|
||||
|
||||
Examples:
|
||||
|
||||
Extract a chapter from a DVD VOB file (start and end sectors obtained
|
||||
externally and multiplied by 2048):
|
||||
@example
|
||||
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
|
||||
@end example
|
||||
|
||||
Play an AVI file directly from a TAR archive:
|
||||
subfile,,start,183241728,end,366490624,,:archive.tar
|
||||
|
||||
@section tcp
|
||||
|
||||
Transmission Control Protocol.
|
||||
@@ -1129,9 +1074,8 @@ The list of supported options follows.
|
||||
|
||||
@table @option
|
||||
@item buffer_size=@var{size}
|
||||
Set the UDP maximum socket buffer size in bytes. This is used to set either
|
||||
the receive or send buffer size, depending on what the socket is used for.
|
||||
Default is 64KB. See also @var{fifo_size}.
|
||||
Set the UDP socket buffer size in bytes. This is used both for the
|
||||
receiving and the sending buffer size.
|
||||
|
||||
@item localport=@var{port}
|
||||
Override the local UDP port to bind with.
|
||||
@@ -1182,12 +1126,6 @@ Set raise error timeout, expressed in microseconds.
|
||||
|
||||
This option is only relevant in read mode: if no data arrived in more
|
||||
than this time interval, raise error.
|
||||
|
||||
@item broadcast=@var{1|0}
|
||||
Explicitly allow or disallow UDP broadcasting.
|
||||
|
||||
Note that broadcasting may not work properly on networks having
|
||||
a broadcast storm protection.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
@@ -112,14 +112,6 @@ bayer dither
|
||||
|
||||
@item ed
|
||||
error diffusion dither
|
||||
|
||||
@item a_dither
|
||||
arithmetic dither, based using addition
|
||||
|
||||
@item x_dither
|
||||
arithmetic dither, based using xor (more random/less apparent patterning that
|
||||
a_dither).
|
||||
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
@@ -282,14 +282,6 @@ INF: while(<$inf>) {
|
||||
$_ = "\n=over 4\n";
|
||||
};
|
||||
|
||||
/^\@(multitable)\s+{.*/ and do {
|
||||
push @endwstack, $endw;
|
||||
push @icstack, $ic;
|
||||
$endw = $1;
|
||||
$ic = "";
|
||||
$_ = "\n=over 4\n";
|
||||
};
|
||||
|
||||
/^\@((?:small)?example|display)/ and do {
|
||||
push @endwstack, $endw;
|
||||
$endw = $1;
|
||||
@@ -306,10 +298,10 @@ INF: while(<$inf>) {
|
||||
|
||||
/^\@tab\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
|
||||
my $columns = $1;
|
||||
$columns =~ s/\@tab//;
|
||||
$columns =~ s/\@tab/ : /;
|
||||
|
||||
$_ = $columns;
|
||||
$chapter =~ s/$//;
|
||||
$_ = " : ". $columns;
|
||||
$chapter =~ s/\n+\s+$//;
|
||||
};
|
||||
|
||||
/^\@itemx?\s*(.+)?$/ and do {
|
||||
|
@@ -858,7 +858,7 @@ Return 1 if @var{x} is lesser than or equal to @var{y}, 0 otherwise.
|
||||
Return the maximum between @var{x} and @var{y}.
|
||||
|
||||
@item min(x, y)
|
||||
Return the minimum between @var{x} and @var{y}.
|
||||
Return the maximum between @var{x} and @var{y}.
|
||||
|
||||
@item mod(x, y)
|
||||
Compute the remainder of division of @var{x} by @var{y}.
|
||||
@@ -1031,7 +1031,7 @@ indication of the corresponding powers of 10 and of 2.
|
||||
10^24 / 2^70
|
||||
@end table
|
||||
|
||||
@c man end EXPRESSION EVALUATION
|
||||
@c man end
|
||||
|
||||
@chapter OpenCL Options
|
||||
@c man begin OPENCL OPTIONS
|
||||
|
@@ -1,424 +0,0 @@
|
||||
This document is a tutorial/initiation for writing simple filters in
|
||||
libavfilter.
|
||||
|
||||
Foreword: just like everything else in FFmpeg, libavfilter is monolithic, which
|
||||
means that it is highly recommended that you submit your filters to the FFmpeg
|
||||
development mailing-list and make sure it is applied. Otherwise, your filter is
|
||||
likely to have a very short lifetime due to more a less regular internal API
|
||||
changes, and a limited distribution, review, and testing.
|
||||
|
||||
Bootstrap
|
||||
=========
|
||||
|
||||
Let's say you want to write a new simple video filter called "foobar" which
|
||||
takes one frame in input, changes the pixels in whatever fashion you fancy, and
|
||||
outputs the modified frame. The most simple way of doing this is to take a
|
||||
similar filter. We'll pick edgedetect, but any other should do. You can look
|
||||
for others using the `./ffmpeg -v 0 -filters|grep ' V->V '` command.
|
||||
|
||||
- cp libavfilter/vf_{edgedetect,foobar}.c
|
||||
- sed -i s/edgedetect/foobar/g -i libavfilter/vf_foobar.c
|
||||
- sed -i s/EdgeDetect/Foobar/g -i libavfilter/vf_foobar.c
|
||||
- edit libavfilter/Makefile, and add an entry for "foobar" following the
|
||||
pattern of the other filters.
|
||||
- edit libavfilter/allfilters.c, and add an entry for "foobar" following the
|
||||
pattern of the other filters.
|
||||
- ./configure ...
|
||||
- make -j<whatever> ffmpeg
|
||||
- ./ffmpeg -i tests/lena.pnm -vf foobar foobar.png
|
||||
|
||||
If everything went right, you should get a foobar.png with Lena edge-detected.
|
||||
|
||||
That's it, your new playground is ready.
|
||||
|
||||
Some little details about what's going on:
|
||||
libavfilter/allfilters.c:avfilter_register_all() is called at runtime to create
|
||||
a list of the available filters, but it's important to know that this file is
|
||||
also parsed by the configure script, which in turn will define variables for
|
||||
the build system and the C:
|
||||
|
||||
--- after running configure ---
|
||||
|
||||
$ grep FOOBAR config.mak
|
||||
CONFIG_FOOBAR_FILTER=yes
|
||||
$ grep FOOBAR config.h
|
||||
#define CONFIG_FOOBAR_FILTER 1
|
||||
|
||||
CONFIG_FOOBAR_FILTER=yes from the config.mak is later used to enable the filter in
|
||||
libavfilter/Makefile and CONFIG_FOOBAR_FILTER=1 from the config.h will be used
|
||||
for registering the filter in libavfilter/allfilters.c.
|
||||
|
||||
Filter code layout
|
||||
==================
|
||||
|
||||
You now need some theory about the general code layout of a filter. Open your
|
||||
libavfilter/vf_foobar.c. This section will detail the important parts of the
|
||||
code you need to understand before messing with it.
|
||||
|
||||
Copyright
|
||||
---------
|
||||
|
||||
First chunk is the copyright. Most filters are LGPL, and we are assuming
|
||||
vf_foobar is as well. We are also assuming vf_foobar is not an edge detector
|
||||
filter, so you can update the boilerplate with your credits.
|
||||
|
||||
Doxy
|
||||
----
|
||||
|
||||
Next chunk is the Doxygen about the file. See http://ffmpeg.org/doxygen/trunk/.
|
||||
Detail here what the filter is, does, and add some references if you feel like
|
||||
it.
|
||||
|
||||
Context
|
||||
-------
|
||||
|
||||
Skip the headers and scroll down to the definition of FoobarContext. This is
|
||||
your local state context. It is already filled with 0 when you get it so do not
|
||||
worry about uninitialized read into this context. This is where you put every
|
||||
"global" information you need, typically the variable storing the user options.
|
||||
You'll notice the first field "const AVClass *class"; it's the only field you
|
||||
need to keep assuming you have a context. There are some magic you don't care
|
||||
about around this field, just let it be (in first position) for now.
|
||||
|
||||
Options
|
||||
-------
|
||||
|
||||
Then comes the options array. This is what will define the user accessible
|
||||
options. For example, -vf foobar=mode=colormix:high=0.4:low=0.1. Most options
|
||||
have the following pattern:
|
||||
name, description, offset, type, default value, minimum value, maximum value, flags
|
||||
|
||||
- name is the option name, keep it simple, lowercase
|
||||
- description are short, in lowercase, without period, and describe what they
|
||||
do, for example "set the foo of the bar"
|
||||
- offset is the offset of the field in your local context, see the OFFSET()
|
||||
macro; the option parser will use that information to fill the fields
|
||||
according to the user input
|
||||
- type is any of AV_OPT_TYPE_* defined in libavutil/opt.h
|
||||
- default value is an union where you pick the appropriate type; "{.dbl=0.3}",
|
||||
"{.i64=0x234}", "{.str=NULL}", ...
|
||||
- min and max values define the range of available values, inclusive
|
||||
- flags are AVOption generic flags. See AV_OPT_FLAG_* definitions
|
||||
|
||||
In doubt, just look at the other AVOption definitions all around the codebase,
|
||||
there are tons of examples.
|
||||
|
||||
Class
|
||||
-----
|
||||
|
||||
AVFILTER_DEFINE_CLASS(foobar) will define a unique foobar_class with some kind
|
||||
of signature referencing the options, etc. which will be referenced in the
|
||||
definition of the AVFilter.
|
||||
|
||||
Filter definition
|
||||
-----------------
|
||||
|
||||
At the end of the file, you will find foobar_inputs, foobar_outputs and
|
||||
the AVFilter ff_vf_foobar. Don't forget to update the AVFilter.description with
|
||||
a description of what the filter does, starting with a capitalized letter and
|
||||
ending with a period. You'd better drop the AVFilter.flags entry for now, and
|
||||
re-add them later depending on the capabilities of your filter.
|
||||
|
||||
Callbacks
|
||||
---------
|
||||
|
||||
Let's now study the common callbacks. Before going into details, note that all
|
||||
these callbacks are explained in details in libavfilter/avfilter.h, so in
|
||||
doubt, refer to the doxy in that file.
|
||||
|
||||
init()
|
||||
~~~~~~
|
||||
|
||||
First one to be called is init(). It's flagged as cold because not called
|
||||
often. Look for "cold" on
|
||||
http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html for more
|
||||
information.
|
||||
|
||||
As the name suggests, init() is where you eventually initialize and allocate
|
||||
your buffers, pre-compute your data, etc. Note that at this point, your local
|
||||
context already has the user options initialized, but you still haven't any
|
||||
clue about the kind of data input you will get, so this function is often
|
||||
mainly used to sanitize the user options.
|
||||
|
||||
Some init()s will also define the number of inputs or outputs dynamically
|
||||
according to the user options. A good example of this is the split filter, but
|
||||
we won't cover this here since vf_foobar is just a simple 1:1 filter.
|
||||
|
||||
uninit()
|
||||
~~~~~~~~
|
||||
|
||||
Similarly, there is the uninit() callback, doing what the name suggest. Free
|
||||
everything you allocated here.
|
||||
|
||||
query_formats()
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
This is following the init() and is used for the format negotiation, basically
|
||||
where you say what pixel format(s) (gray, rgb 32, yuv 4:2:0, ...) you accept
|
||||
for your inputs, and what you can output. All pixel formats are defined in
|
||||
libavutil/pixfmt.h. If you don't change the pixel format between the input and
|
||||
the output, you just have to define a pixel formats array and call
|
||||
ff_set_common_formats(). For more complex negotiation, you can refer to other
|
||||
filters such as vf_scale.
|
||||
|
||||
config_props()
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This callback is not necessary, but you will probably have one or more
|
||||
config_props() anyway. It's not a callback for the filter itself but for its
|
||||
inputs or outputs (they're called "pads" - AVFilterPad - in libavfilter's
|
||||
lexicon).
|
||||
|
||||
Inside the input config_props(), you are at a point where you know which pixel
|
||||
format has been picked after query_formats(), and more information such as the
|
||||
video width and height (inlink->{w,h}). So if you need to update your internal
|
||||
context state depending on your input you can do it here. In edgedetect you can
|
||||
see that this callback is used to allocate buffers depending on these
|
||||
information. They will be destroyed in uninit().
|
||||
|
||||
Inside the output config_props(), you can define what you want to change in the
|
||||
output. Typically, if your filter is going to double the size of the video, you
|
||||
will update outlink->w and outlink->h.
|
||||
|
||||
filter_frame()
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This is the callback you are waiting from the beginning: it is where you
|
||||
process the received frames. Along with the frame, you get the input link from
|
||||
where the frame comes from.
|
||||
|
||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { ... }
|
||||
|
||||
You can get the filter context through that input link:
|
||||
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
|
||||
Then access your internal state context:
|
||||
|
||||
FoobarContext *foobar = ctx->priv;
|
||||
|
||||
And also the output link where you will send your frame when you are done:
|
||||
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
|
||||
Here, we are picking the first output. You can have several, but in our case we
|
||||
only have one since we are in a 1:1 input-output situation.
|
||||
|
||||
If you want to define a simple pass-through filter, you can just do:
|
||||
|
||||
return ff_filter_frame(outlink, in);
|
||||
|
||||
But of course, you probably want to change the data of that frame.
|
||||
|
||||
This can be done by accessing frame->data[] and frame->linesize[]. Important
|
||||
note here: the width does NOT match the linesize. The linesize is always
|
||||
greater or equal to the width. The padding created should not be changed or
|
||||
even read. Typically, keep in mind that a previous filter in your chain might
|
||||
have altered the frame dimension but not the linesize. Imagine a crop filter
|
||||
that halves the video size: the linesizes won't be changed, just the width.
|
||||
|
||||
<-------------- linesize ------------------------>
|
||||
+-------------------------------+----------------+ ^
|
||||
| | | |
|
||||
| | | |
|
||||
| picture | padding | | height
|
||||
| | | |
|
||||
| | | |
|
||||
+-------------------------------+----------------+ v
|
||||
<----------- width ------------->
|
||||
|
||||
Before modifying the "in" frame, you have to make sure it is writable, or get a
|
||||
new one. Multiple scenarios are possible here depending on the kind of
|
||||
processing you are doing.
|
||||
|
||||
Let's say you want to change one pixel depending on multiple pixels (typically
|
||||
the surrounding ones) of the input. In that case, you can't do an in-place
|
||||
processing of the input so you will need to allocate a new frame, with the same
|
||||
properties as the input one, and send that new frame to the next filter:
|
||||
|
||||
AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
|
||||
if (!out) {
|
||||
av_frame_free(&in);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
av_frame_copy_props(out, in);
|
||||
|
||||
// out->data[...] = foobar(in->data[...])
|
||||
|
||||
av_frame_free(&in);
|
||||
return ff_filter_frame(outlink, out);
|
||||
|
||||
In-place processing
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If you can just alter the input frame, you probably just want to do that
|
||||
instead:
|
||||
|
||||
av_frame_make_writable(in);
|
||||
// in->data[...] = foobar(in->data[...])
|
||||
return ff_filter_frame(outlink, in);
|
||||
|
||||
You may wonder why a frame might not be writable. The answer is that for
|
||||
example a previous filter might still own the frame data: imagine a filter
|
||||
prior to yours in the filtergraph that needs to cache the frame. You must not
|
||||
alter that frame, otherwise it will make that previous filter buggy. This is
|
||||
where av_frame_make_writable() helps (it won't have any effect if the frame
|
||||
already is writable).
|
||||
|
||||
The problem with using av_frame_make_writable() is that in the worst case it
|
||||
will copy the whole input frame before you change it all over again with your
|
||||
filter: if the frame is not writable, av_frame_make_writable() will allocate
|
||||
new buffers, and copy the input frame data. You don't want that, and you can
|
||||
avoid it by just allocating a new buffer if necessary, and process from in to
|
||||
out in your filter, saving the memcpy. Generally, this is done following this
|
||||
scheme:
|
||||
|
||||
int direct = 0;
|
||||
AVFrame *out;
|
||||
|
||||
if (av_frame_is_writable(in)) {
|
||||
direct = 1;
|
||||
out = in;
|
||||
} else {
|
||||
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
|
||||
if (!out) {
|
||||
av_frame_free(&in);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
av_frame_copy_props(out, in);
|
||||
}
|
||||
|
||||
// out->data[...] = foobar(in->data[...])
|
||||
|
||||
if (!direct)
|
||||
av_frame_free(&in);
|
||||
return ff_filter_frame(outlink, out);
|
||||
|
||||
Of course, this will only work if you can do in-place processing. To test if
|
||||
your filter handles well the permissions, you can use the perms filter. For
|
||||
example with:
|
||||
|
||||
-vf perms=random,foobar
|
||||
|
||||
Make sure no automatic pixel conversion is inserted between perms and foobar,
|
||||
otherwise the frames permissions might change again and the test will be
|
||||
meaningless: add av_log(0,0,"direct=%d\n",direct) in your code to check that.
|
||||
You can avoid the issue with something like:
|
||||
|
||||
-vf format=rgb24,perms=random,foobar
|
||||
|
||||
...assuming your filter accepts rgb24 of course. This will make sure the
|
||||
necessary conversion is inserted before the perms filter.
|
||||
|
||||
Timeline
|
||||
~~~~~~~~
|
||||
|
||||
Adding timeline support
|
||||
(http://ffmpeg.org/ffmpeg-filters.html#Timeline-editing) is often an easy
|
||||
feature to add. In the most simple case, you just have to add
|
||||
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC to the AVFilter.flags. You can typically
|
||||
do this when your filter does not need to save the previous context frames, or
|
||||
basically if your filter just alter whatever goes in and doesn't need
|
||||
previous/future information. See for instance commit 86cb986ce that adds
|
||||
timeline support to the fieldorder filter.
|
||||
|
||||
In some cases, you might need to reset your context somehow. This is handled by
|
||||
the AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL flag which is used if the filter
|
||||
must not process the frames but still wants to keep track of the frames going
|
||||
through (to keep them in cache for when it's enabled again). See for example
|
||||
commit 69d72140a that adds timeline support to the phase filter.
|
||||
|
||||
Threading
|
||||
~~~~~~~~~
|
||||
|
||||
libavfilter does not yet support frame threading, but you can add slice
|
||||
threading to your filters.
|
||||
|
||||
Let's say the foobar filter has the following frame processing function:
|
||||
|
||||
dst = out->data[0];
|
||||
src = in ->data[0];
|
||||
|
||||
for (y = 0; y < inlink->h; y++) {
|
||||
for (x = 0; x < inlink->w; x++)
|
||||
dst[x] = foobar(src[x]);
|
||||
dst += out->linesize[0];
|
||||
src += in ->linesize[0];
|
||||
}
|
||||
|
||||
The first thing is to make this function work into slices. The new code will
|
||||
look like this:
|
||||
|
||||
for (y = slice_start; y < slice_end; y++) {
|
||||
for (x = 0; x < inlink->w; x++)
|
||||
dst[x] = foobar(src[x]);
|
||||
dst += out->linesize[0];
|
||||
src += in ->linesize[0];
|
||||
}
|
||||
|
||||
The source and destination pointers, and slice_start/slice_end will be defined
|
||||
according to the number of jobs. Generally, it looks like this:
|
||||
|
||||
const int slice_start = (in->height * jobnr ) / nb_jobs;
|
||||
const int slice_end = (in->height * (jobnr+1)) / nb_jobs;
|
||||
uint8_t *dst = out->data[0] + slice_start * out->linesize[0];
|
||||
const uint8_t *src = in->data[0] + slice_start * in->linesize[0];
|
||||
|
||||
This new code will be isolated in a new filter_slice():
|
||||
|
||||
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) { ... }
|
||||
|
||||
Note that we need our input and output frame to define slice_{start,end} and
|
||||
dst/src, which are not available in that callback. They will be transmitted
|
||||
through the opaque void *arg. You have to define a structure which contains
|
||||
everything you need:
|
||||
|
||||
typedef struct ThreadData {
|
||||
AVFrame *in, *out;
|
||||
} ThreadData;
|
||||
|
||||
If you need some more information from your local context, put them here.
|
||||
|
||||
In you filter_slice function, you access it like that:
|
||||
|
||||
const ThreadData *td = arg;
|
||||
|
||||
Then in your filter_frame() callback, you need to call the threading
|
||||
distributor with something like this:
|
||||
|
||||
ThreadData td;
|
||||
|
||||
// ...
|
||||
|
||||
td.in = in;
|
||||
td.out = out;
|
||||
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
|
||||
|
||||
// ...
|
||||
|
||||
return ff_filter_frame(outlink, out);
|
||||
|
||||
Last step is to add AVFILTER_FLAG_SLICE_THREADS flag to AVFilter.flags.
|
||||
|
||||
For more example of slice threading additions, you can try to run git log -p
|
||||
--grep 'slice threading' libavfilter/
|
||||
|
||||
Finalization
|
||||
~~~~~~~~~~~~
|
||||
|
||||
When your awesome filter is finished, you have a few more steps before you're
|
||||
done:
|
||||
|
||||
- write its documentation in doc/filters.texi, and test the output with make
|
||||
doc/ffmpeg-filters.html.
|
||||
- add a FATE test, generally by adding an entry in
|
||||
tests/fate/filter-video.mak, add running make fate-filter-foobar GEN=1 to
|
||||
generate the data.
|
||||
- add an entry in the Changelog
|
||||
- edit libavfilter/version.h and increase LIBAVFILTER_VERSION_MINOR by one
|
||||
(and reset LIBAVFILTER_VERSION_MICRO to 100)
|
||||
- git add ... && git commit -m "avfilter: add foobar filter." && git format-patch -1
|
||||
|
||||
When all of this is done, you can submit your patch to the ffmpeg-devel
|
||||
mailing-list for review. If you need any help, feel free to come on our IRC
|
||||
channel, #ffmpeg-devel on irc.freenode.net.
|
39
ffmpeg.h
39
ffmpeg.h
@@ -44,7 +44,6 @@
|
||||
#include "libavutil/fifo.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
#include "libavutil/rational.h"
|
||||
#include "libavutil/threadmessage.h"
|
||||
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
@@ -61,8 +60,6 @@ enum HWAccelID {
|
||||
HWACCEL_NONE = 0,
|
||||
HWACCEL_AUTO,
|
||||
HWACCEL_VDPAU,
|
||||
HWACCEL_DXVA2,
|
||||
HWACCEL_VDA,
|
||||
};
|
||||
|
||||
typedef struct HWAccel {
|
||||
@@ -204,8 +201,6 @@ typedef struct OptionsContext {
|
||||
int nb_guess_layout_max;
|
||||
SpecifierOpt *apad;
|
||||
int nb_apad;
|
||||
SpecifierOpt *discard;
|
||||
int nb_discard;
|
||||
} OptionsContext;
|
||||
|
||||
typedef struct InputFilter {
|
||||
@@ -242,9 +237,7 @@ typedef struct InputStream {
|
||||
int file_index;
|
||||
AVStream *st;
|
||||
int discard; /* true if stream data should be discarded */
|
||||
int user_set_discard;
|
||||
int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
|
||||
AVCodecContext *dec_ctx;
|
||||
AVCodec *dec;
|
||||
AVFrame *decoded_frame;
|
||||
AVFrame *filter_frame; /* a ref of decoded_frame, to be sent to filters */
|
||||
@@ -264,7 +257,7 @@ typedef struct InputStream {
|
||||
double ts_scale;
|
||||
int saw_first_ts;
|
||||
int showed_multi_packet_warning;
|
||||
AVDictionary *decoder_opts;
|
||||
AVDictionary *opts;
|
||||
AVRational framerate; /* framerate forced with -r */
|
||||
int top_field_first;
|
||||
int guess_layout_max;
|
||||
@@ -313,15 +306,6 @@ typedef struct InputStream {
|
||||
int (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame);
|
||||
enum AVPixelFormat hwaccel_pix_fmt;
|
||||
enum AVPixelFormat hwaccel_retrieved_pix_fmt;
|
||||
|
||||
/* stats */
|
||||
// combined size of all the packets read
|
||||
uint64_t data_size;
|
||||
/* number of packets successfully read for this stream */
|
||||
uint64_t nb_packets;
|
||||
// number of frames/samples retrieved from the decoder
|
||||
uint64_t frames_decoded;
|
||||
uint64_t samples_decoded;
|
||||
} InputStream;
|
||||
|
||||
typedef struct InputFile {
|
||||
@@ -341,10 +325,13 @@ typedef struct InputFile {
|
||||
int accurate_seek;
|
||||
|
||||
#if HAVE_PTHREADS
|
||||
AVThreadMessageQueue *in_thread_queue;
|
||||
pthread_t thread; /* thread reading from this file */
|
||||
int non_blocking; /* reading packets from the thread should not block */
|
||||
int finished; /* the thread has exited */
|
||||
int joined; /* the thread has been joined */
|
||||
pthread_mutex_t fifo_lock; /* lock for access to fifo */
|
||||
pthread_cond_t fifo_cond; /* the main thread will signal on this cond after reading from fifo */
|
||||
AVFifoBuffer *fifo; /* demuxed packets are stored here; freed by the main thread */
|
||||
#endif
|
||||
} InputFile;
|
||||
|
||||
@@ -381,7 +368,6 @@ typedef struct OutputStream {
|
||||
/* dts of the last packet sent to the muxer */
|
||||
int64_t last_mux_dts;
|
||||
AVBitStreamFilterContext *bitstream_filters;
|
||||
AVCodecContext *enc_ctx;
|
||||
AVCodec *enc;
|
||||
int64_t max_frames;
|
||||
AVFrame *filtered_frame;
|
||||
@@ -414,7 +400,7 @@ typedef struct OutputStream {
|
||||
char *filters_script; ///< filtergraph script associated to the -filter_script option
|
||||
|
||||
int64_t sws_flags;
|
||||
AVDictionary *encoder_opts;
|
||||
AVDictionary *opts;
|
||||
AVDictionary *swr_opts;
|
||||
AVDictionary *resample_opts;
|
||||
char *apad;
|
||||
@@ -428,15 +414,6 @@ typedef struct OutputStream {
|
||||
int keep_pix_fmt;
|
||||
|
||||
AVCodecParserContext *parser;
|
||||
|
||||
/* stats */
|
||||
// combined size of all the packets written
|
||||
uint64_t data_size;
|
||||
// number of packets send to the muxer
|
||||
uint64_t packets_written;
|
||||
// number of frames/samples sent to the encoder
|
||||
uint64_t frames_encoded;
|
||||
uint64_t samples_encoded;
|
||||
} OutputStream;
|
||||
|
||||
typedef struct OutputFile {
|
||||
@@ -506,7 +483,7 @@ void assert_avoptions(AVDictionary *m);
|
||||
|
||||
int guess_input_channel_layout(InputStream *ist);
|
||||
|
||||
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *avctx, AVCodec *codec, enum AVPixelFormat target);
|
||||
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFormat target);
|
||||
void choose_sample_fmt(AVStream *st, AVCodec *codec);
|
||||
|
||||
int configure_filtergraph(FilterGraph *fg);
|
||||
@@ -517,7 +494,5 @@ FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
|
||||
int ffmpeg_parse_options(int argc, char **argv);
|
||||
|
||||
int vdpau_init(AVCodecContext *s);
|
||||
int dxva2_init(AVCodecContext *s);
|
||||
int vda_init(AVCodecContext *s);
|
||||
|
||||
#endif /* FFMPEG_H */
|
||||
|
620
ffmpeg_dxva2.c
620
ffmpeg_dxva2.c
@@ -1,620 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#ifdef _WIN32_WINNT
|
||||
#undef _WIN32_WINNT
|
||||
#endif
|
||||
#define _WIN32_WINNT 0x0600
|
||||
#define DXVA2API_USE_BITFIELDS
|
||||
#define COBJMACROS
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <d3d9.h>
|
||||
#include <dxva2api.h>
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
#include "libavcodec/dxva2.h"
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/buffer.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
|
||||
/* define all the GUIDs used directly here,
|
||||
to avoid problems with inconsistent dxva2api.h versions in mingw-w64 and different MSVC version */
|
||||
#include <initguid.h>
|
||||
DEFINE_GUID(IID_IDirectXVideoDecoderService, 0xfc51a551,0xd5e7,0x11d9,0xaf,0x55,0x00,0x05,0x4e,0x43,0xff,0x02);
|
||||
|
||||
DEFINE_GUID(DXVA2_ModeMPEG2_VLD, 0xee27417f, 0x5e28,0x4e65,0xbe,0xea,0x1d,0x26,0xb5,0x08,0xad,0xc9);
|
||||
DEFINE_GUID(DXVA2_ModeMPEG2and1_VLD, 0x86695f12, 0x340e,0x4f04,0x9f,0xd3,0x92,0x53,0xdd,0x32,0x74,0x60);
|
||||
DEFINE_GUID(DXVA2_ModeH264_E, 0x1b81be68, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeH264_F, 0x1b81be69, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVADDI_Intel_ModeH264_E, 0x604F8E68, 0x4951,0x4C54,0x88,0xFE,0xAB,0xD2,0x5C,0x15,0xB3,0xD6);
|
||||
DEFINE_GUID(DXVA2_ModeVC1_D, 0x1b81beA3, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeVC1_D2010, 0x1b81beA4, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_NoEncrypt, 0x1b81beD0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(GUID_NULL, 0x00000000, 0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
|
||||
|
||||
typedef IDirect3D9* WINAPI pDirect3DCreate9(UINT);
|
||||
typedef HRESULT WINAPI pCreateDeviceManager9(UINT *, IDirect3DDeviceManager9 **);
|
||||
|
||||
typedef struct dxva2_mode {
|
||||
const GUID *guid;
|
||||
enum AVCodecID codec;
|
||||
} dxva2_mode;
|
||||
|
||||
static const dxva2_mode dxva2_modes[] = {
|
||||
/* MPEG-2 */
|
||||
{ &DXVA2_ModeMPEG2_VLD, AV_CODEC_ID_MPEG2VIDEO },
|
||||
{ &DXVA2_ModeMPEG2and1_VLD, AV_CODEC_ID_MPEG2VIDEO },
|
||||
|
||||
/* H.264 */
|
||||
{ &DXVA2_ModeH264_F, AV_CODEC_ID_H264 },
|
||||
{ &DXVA2_ModeH264_E, AV_CODEC_ID_H264 },
|
||||
/* Intel specific H.264 mode */
|
||||
{ &DXVADDI_Intel_ModeH264_E, AV_CODEC_ID_H264 },
|
||||
|
||||
/* VC-1 / WMV3 */
|
||||
{ &DXVA2_ModeVC1_D2010, AV_CODEC_ID_VC1 },
|
||||
{ &DXVA2_ModeVC1_D2010, AV_CODEC_ID_WMV3 },
|
||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_VC1 },
|
||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_WMV3 },
|
||||
|
||||
{ NULL, 0 },
|
||||
};
|
||||
|
||||
typedef struct surface_info {
|
||||
int used;
|
||||
uint64_t age;
|
||||
} surface_info;
|
||||
|
||||
typedef struct DXVA2Context {
|
||||
HMODULE d3dlib;
|
||||
HMODULE dxva2lib;
|
||||
|
||||
HANDLE deviceHandle;
|
||||
|
||||
IDirect3D9 *d3d9;
|
||||
IDirect3DDevice9 *d3d9device;
|
||||
IDirect3DDeviceManager9 *d3d9devmgr;
|
||||
IDirectXVideoDecoderService *decoder_service;
|
||||
IDirectXVideoDecoder *decoder;
|
||||
|
||||
GUID decoder_guid;
|
||||
DXVA2_ConfigPictureDecode decoder_config;
|
||||
|
||||
LPDIRECT3DSURFACE9 *surfaces;
|
||||
surface_info *surface_infos;
|
||||
uint32_t num_surfaces;
|
||||
uint64_t surface_age;
|
||||
|
||||
AVFrame *tmp_frame;
|
||||
} DXVA2Context;
|
||||
|
||||
typedef struct DXVA2SurfaceWrapper {
|
||||
DXVA2Context *ctx;
|
||||
LPDIRECT3DSURFACE9 surface;
|
||||
IDirectXVideoDecoder *decoder;
|
||||
} DXVA2SurfaceWrapper;
|
||||
|
||||
static void dxva2_destroy_decoder(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
int i;
|
||||
|
||||
if (ctx->surfaces) {
|
||||
for (i = 0; i < ctx->num_surfaces; i++) {
|
||||
if (ctx->surfaces[i])
|
||||
IDirect3DSurface9_Release(ctx->surfaces[i]);
|
||||
}
|
||||
}
|
||||
av_freep(&ctx->surfaces);
|
||||
av_freep(&ctx->surface_infos);
|
||||
ctx->num_surfaces = 0;
|
||||
ctx->surface_age = 0;
|
||||
|
||||
if (ctx->decoder) {
|
||||
IDirectXVideoDecoder_Release(ctx->decoder);
|
||||
ctx->decoder = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void dxva2_uninit(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
|
||||
ist->hwaccel_uninit = NULL;
|
||||
ist->hwaccel_get_buffer = NULL;
|
||||
ist->hwaccel_retrieve_data = NULL;
|
||||
|
||||
if (ctx->decoder)
|
||||
dxva2_destroy_decoder(s);
|
||||
|
||||
if (ctx->decoder_service)
|
||||
IDirectXVideoDecoderService_Release(ctx->decoder_service);
|
||||
|
||||
if (ctx->d3d9devmgr && ctx->deviceHandle != INVALID_HANDLE_VALUE)
|
||||
IDirect3DDeviceManager9_CloseDeviceHandle(ctx->d3d9devmgr, ctx->deviceHandle);
|
||||
|
||||
if (ctx->d3d9devmgr)
|
||||
IDirect3DDeviceManager9_Release(ctx->d3d9devmgr);
|
||||
|
||||
if (ctx->d3d9device)
|
||||
IDirect3DDevice9_Release(ctx->d3d9device);
|
||||
|
||||
if (ctx->d3d9)
|
||||
IDirect3D9_Release(ctx->d3d9);
|
||||
|
||||
if (ctx->d3dlib)
|
||||
FreeLibrary(ctx->d3dlib);
|
||||
|
||||
if (ctx->dxva2lib)
|
||||
FreeLibrary(ctx->dxva2lib);
|
||||
|
||||
av_frame_free(&ctx->tmp_frame);
|
||||
|
||||
av_freep(&ist->hwaccel_ctx);
|
||||
av_freep(&s->hwaccel_context);
|
||||
}
|
||||
|
||||
static void dxva2_release_buffer(void *opaque, uint8_t *data)
|
||||
{
|
||||
DXVA2SurfaceWrapper *w = opaque;
|
||||
DXVA2Context *ctx = w->ctx;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ctx->num_surfaces; i++) {
|
||||
if (ctx->surfaces[i] == w->surface) {
|
||||
ctx->surface_infos[i].used = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
IDirect3DSurface9_Release(w->surface);
|
||||
IDirectXVideoDecoder_Release(w->decoder);
|
||||
av_free(w);
|
||||
}
|
||||
|
||||
static int dxva2_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
int i, old_unused = -1;
|
||||
LPDIRECT3DSURFACE9 surface;
|
||||
DXVA2SurfaceWrapper *w = NULL;
|
||||
|
||||
av_assert0(frame->format == AV_PIX_FMT_DXVA2_VLD);
|
||||
|
||||
for (i = 0; i < ctx->num_surfaces; i++) {
|
||||
surface_info *info = &ctx->surface_infos[i];
|
||||
if (!info->used && (old_unused == -1 || info->age < ctx->surface_infos[old_unused].age))
|
||||
old_unused = i;
|
||||
}
|
||||
if (old_unused == -1) {
|
||||
av_log(NULL, AV_LOG_ERROR, "No free DXVA2 surface!\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
i = old_unused;
|
||||
|
||||
surface = ctx->surfaces[i];
|
||||
|
||||
w = av_mallocz(sizeof(*w));
|
||||
if (!w)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
frame->buf[0] = av_buffer_create((uint8_t*)surface, 0,
|
||||
dxva2_release_buffer, w,
|
||||
AV_BUFFER_FLAG_READONLY);
|
||||
if (!frame->buf[0]) {
|
||||
av_free(w);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
w->ctx = ctx;
|
||||
w->surface = surface;
|
||||
IDirect3DSurface9_AddRef(w->surface);
|
||||
w->decoder = ctx->decoder;
|
||||
IDirectXVideoDecoder_AddRef(w->decoder);
|
||||
|
||||
ctx->surface_infos[i].used = 1;
|
||||
ctx->surface_infos[i].age = ctx->surface_age++;
|
||||
|
||||
frame->data[3] = (uint8_t *)surface;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dxva2_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
{
|
||||
LPDIRECT3DSURFACE9 surface = (LPDIRECT3DSURFACE9)frame->data[3];
|
||||
InputStream *ist = s->opaque;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
D3DSURFACE_DESC surfaceDesc;
|
||||
D3DLOCKED_RECT LockedRect;
|
||||
HRESULT hr;
|
||||
int ret;
|
||||
|
||||
IDirect3DSurface9_GetDesc(surface, &surfaceDesc);
|
||||
|
||||
ctx->tmp_frame->width = frame->width;
|
||||
ctx->tmp_frame->height = frame->height;
|
||||
ctx->tmp_frame->format = AV_PIX_FMT_NV12;
|
||||
|
||||
ret = av_frame_get_buffer(ctx->tmp_frame, 32);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
hr = IDirect3DSurface9_LockRect(surface, &LockedRect, NULL, D3DLOCK_READONLY);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Unable to lock DXVA2 surface\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
av_image_copy_plane(ctx->tmp_frame->data[0], ctx->tmp_frame->linesize[0],
|
||||
(uint8_t*)LockedRect.pBits,
|
||||
LockedRect.Pitch, frame->width, frame->height);
|
||||
|
||||
av_image_copy_plane(ctx->tmp_frame->data[1], ctx->tmp_frame->linesize[1],
|
||||
(uint8_t*)LockedRect.pBits + LockedRect.Pitch * surfaceDesc.Height,
|
||||
LockedRect.Pitch, frame->width, frame->height / 2);
|
||||
|
||||
IDirect3DSurface9_UnlockRect(surface);
|
||||
|
||||
ret = av_frame_copy_props(ctx->tmp_frame, frame);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
av_frame_unref(frame);
|
||||
av_frame_move_ref(frame, ctx->tmp_frame);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
av_frame_unref(ctx->tmp_frame);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dxva2_alloc(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
DXVA2Context *ctx;
|
||||
pDirect3DCreate9 *createD3D = NULL;
|
||||
pCreateDeviceManager9 *createDeviceManager = NULL;
|
||||
HRESULT hr;
|
||||
D3DPRESENT_PARAMETERS d3dpp = {0};
|
||||
D3DDISPLAYMODE d3ddm;
|
||||
unsigned resetToken = 0;
|
||||
UINT adapter = D3DADAPTER_DEFAULT;
|
||||
|
||||
ctx = av_mallocz(sizeof(*ctx));
|
||||
if (!ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ctx->deviceHandle = INVALID_HANDLE_VALUE;
|
||||
|
||||
ist->hwaccel_ctx = ctx;
|
||||
ist->hwaccel_uninit = dxva2_uninit;
|
||||
ist->hwaccel_get_buffer = dxva2_get_buffer;
|
||||
ist->hwaccel_retrieve_data = dxva2_retrieve_data;
|
||||
|
||||
ctx->d3dlib = LoadLibrary("d3d9.dll");
|
||||
if (!ctx->d3dlib) {
|
||||
av_log(NULL, loglevel, "Failed to load D3D9 library\n");
|
||||
goto fail;
|
||||
}
|
||||
ctx->dxva2lib = LoadLibrary("dxva2.dll");
|
||||
if (!ctx->dxva2lib) {
|
||||
av_log(NULL, loglevel, "Failed to load DXVA2 library\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
createD3D = (pDirect3DCreate9 *)GetProcAddress(ctx->d3dlib, "Direct3DCreate9");
|
||||
if (!createD3D) {
|
||||
av_log(NULL, loglevel, "Failed to locate Direct3DCreate9\n");
|
||||
goto fail;
|
||||
}
|
||||
createDeviceManager = (pCreateDeviceManager9 *)GetProcAddress(ctx->dxva2lib, "DXVA2CreateDirect3DDeviceManager9");
|
||||
if (!createDeviceManager) {
|
||||
av_log(NULL, loglevel, "Failed to locate DXVA2CreateDirect3DDeviceManager9\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ctx->d3d9 = createD3D(D3D_SDK_VERSION);
|
||||
if (!ctx->d3d9) {
|
||||
av_log(NULL, loglevel, "Failed to create IDirect3D object\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (ist->hwaccel_device) {
|
||||
adapter = atoi(ist->hwaccel_device);
|
||||
av_log(NULL, AV_LOG_INFO, "Using HWAccel device %d\n", adapter);
|
||||
}
|
||||
|
||||
IDirect3D9_GetAdapterDisplayMode(ctx->d3d9, adapter, &d3ddm);
|
||||
d3dpp.Windowed = TRUE;
|
||||
d3dpp.BackBufferWidth = 640;
|
||||
d3dpp.BackBufferHeight = 480;
|
||||
d3dpp.BackBufferCount = 0;
|
||||
d3dpp.BackBufferFormat = d3ddm.Format;
|
||||
d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD;
|
||||
d3dpp.Flags = D3DPRESENTFLAG_VIDEO;
|
||||
|
||||
hr = IDirect3D9_CreateDevice(ctx->d3d9, adapter, D3DDEVTYPE_HAL, GetShellWindow(),
|
||||
D3DCREATE_SOFTWARE_VERTEXPROCESSING | D3DCREATE_MULTITHREADED | D3DCREATE_FPU_PRESERVE,
|
||||
&d3dpp, &ctx->d3d9device);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to create Direct3D device\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hr = createDeviceManager(&resetToken, &ctx->d3d9devmgr);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to create Direct3D device manager\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hr = IDirect3DDeviceManager9_ResetDevice(ctx->d3d9devmgr, ctx->d3d9device, resetToken);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to bind Direct3D device to device manager\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hr = IDirect3DDeviceManager9_OpenDeviceHandle(ctx->d3d9devmgr, &ctx->deviceHandle);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to open device handle\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hr = IDirect3DDeviceManager9_GetVideoService(ctx->d3d9devmgr, ctx->deviceHandle, &IID_IDirectXVideoDecoderService, (void **)&ctx->decoder_service);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to create IDirectXVideoDecoderService\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ctx->tmp_frame = av_frame_alloc();
|
||||
if (!ctx->tmp_frame)
|
||||
goto fail;
|
||||
|
||||
s->hwaccel_context = av_mallocz(sizeof(struct dxva_context));
|
||||
if (!s->hwaccel_context)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
dxva2_uninit(s);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
static int dxva2_get_decoder_configuration(AVCodecContext *s, const GUID *device_guid,
|
||||
const DXVA2_VideoDesc *desc,
|
||||
DXVA2_ConfigPictureDecode *config)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
unsigned cfg_count = 0, best_score = 0;
|
||||
DXVA2_ConfigPictureDecode *cfg_list = NULL;
|
||||
DXVA2_ConfigPictureDecode best_cfg = {{0}};
|
||||
HRESULT hr;
|
||||
int i;
|
||||
|
||||
hr = IDirectXVideoDecoderService_GetDecoderConfigurations(ctx->decoder_service, device_guid, desc, NULL, &cfg_count, &cfg_list);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Unable to retrieve decoder configurations\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
for (i = 0; i < cfg_count; i++) {
|
||||
DXVA2_ConfigPictureDecode *cfg = &cfg_list[i];
|
||||
|
||||
unsigned score;
|
||||
if (cfg->ConfigBitstreamRaw == 1)
|
||||
score = 1;
|
||||
else if (s->codec_id == AV_CODEC_ID_H264 && cfg->ConfigBitstreamRaw == 2)
|
||||
score = 2;
|
||||
else
|
||||
continue;
|
||||
if (IsEqualGUID(&cfg->guidConfigBitstreamEncryption, &DXVA2_NoEncrypt))
|
||||
score += 16;
|
||||
if (score > best_score) {
|
||||
best_score = score;
|
||||
best_cfg = *cfg;
|
||||
}
|
||||
}
|
||||
CoTaskMemFree(cfg_list);
|
||||
|
||||
if (!best_score) {
|
||||
av_log(NULL, loglevel, "No valid decoder configuration available\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
*config = best_cfg;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dxva2_create_decoder(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
struct dxva_context *dxva_ctx = s->hwaccel_context;
|
||||
GUID *guid_list = NULL;
|
||||
unsigned guid_count = 0, i, j;
|
||||
GUID device_guid = GUID_NULL;
|
||||
D3DFORMAT target_format = 0;
|
||||
DXVA2_VideoDesc desc = { 0 };
|
||||
DXVA2_ConfigPictureDecode config;
|
||||
HRESULT hr;
|
||||
int surface_alignment;
|
||||
int ret;
|
||||
|
||||
hr = IDirectXVideoDecoderService_GetDecoderDeviceGuids(ctx->decoder_service, &guid_count, &guid_list);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to retrieve decoder device GUIDs\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; dxva2_modes[i].guid; i++) {
|
||||
D3DFORMAT *target_list = NULL;
|
||||
unsigned target_count = 0;
|
||||
const dxva2_mode *mode = &dxva2_modes[i];
|
||||
if (mode->codec != s->codec_id)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < guid_count; j++) {
|
||||
if (IsEqualGUID(mode->guid, &guid_list[j]))
|
||||
break;
|
||||
}
|
||||
if (j == guid_count)
|
||||
continue;
|
||||
|
||||
hr = IDirectXVideoDecoderService_GetDecoderRenderTargets(ctx->decoder_service, mode->guid, &target_count, &target_list);
|
||||
if (FAILED(hr)) {
|
||||
continue;
|
||||
}
|
||||
for (j = 0; j < target_count; j++) {
|
||||
const D3DFORMAT format = target_list[j];
|
||||
if (format == MKTAG('N','V','1','2')) {
|
||||
target_format = format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
CoTaskMemFree(target_list);
|
||||
if (target_format) {
|
||||
device_guid = *mode->guid;
|
||||
break;
|
||||
}
|
||||
}
|
||||
CoTaskMemFree(guid_list);
|
||||
|
||||
if (IsEqualGUID(&device_guid, &GUID_NULL)) {
|
||||
av_log(NULL, loglevel, "No decoder device for codec found\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
desc.SampleWidth = s->coded_width;
|
||||
desc.SampleHeight = s->coded_height;
|
||||
desc.Format = target_format;
|
||||
|
||||
ret = dxva2_get_decoder_configuration(s, &device_guid, &desc, &config);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* decoding MPEG-2 requires additional alignment on some Intel GPUs,
|
||||
but it causes issues for H.264 on certain AMD GPUs..... */
|
||||
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO)
|
||||
surface_alignment = 32;
|
||||
else
|
||||
surface_alignment = 16;
|
||||
|
||||
/* 4 base work surfaces */
|
||||
ctx->num_surfaces = 4;
|
||||
|
||||
/* add surfaces based on number of possible refs */
|
||||
if (s->codec_id == AV_CODEC_ID_H264)
|
||||
ctx->num_surfaces += 16;
|
||||
else
|
||||
ctx->num_surfaces += 2;
|
||||
|
||||
/* add extra surfaces for frame threading */
|
||||
if (s->active_thread_type & FF_THREAD_FRAME)
|
||||
ctx->num_surfaces += s->thread_count;
|
||||
|
||||
ctx->surfaces = av_mallocz(ctx->num_surfaces * sizeof(*ctx->surfaces));
|
||||
ctx->surface_infos = av_mallocz(ctx->num_surfaces * sizeof(*ctx->surface_infos));
|
||||
|
||||
if (!ctx->surfaces || !ctx->surface_infos) {
|
||||
av_log(NULL, loglevel, "Unable to allocate surface arrays\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hr = IDirectXVideoDecoderService_CreateSurface(ctx->decoder_service,
|
||||
FFALIGN(s->coded_width, surface_alignment),
|
||||
FFALIGN(s->coded_height, surface_alignment),
|
||||
ctx->num_surfaces - 1,
|
||||
target_format, D3DPOOL_DEFAULT, 0,
|
||||
DXVA2_VideoDecoderRenderTarget,
|
||||
ctx->surfaces, NULL);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to create %d video surfaces\n", ctx->num_surfaces);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hr = IDirectXVideoDecoderService_CreateVideoDecoder(ctx->decoder_service, &device_guid,
|
||||
&desc, &config, ctx->surfaces,
|
||||
ctx->num_surfaces, &ctx->decoder);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to create DXVA2 video decoder\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ctx->decoder_guid = device_guid;
|
||||
ctx->decoder_config = config;
|
||||
|
||||
dxva_ctx->cfg = &ctx->decoder_config;
|
||||
dxva_ctx->decoder = ctx->decoder;
|
||||
dxva_ctx->surface = ctx->surfaces;
|
||||
dxva_ctx->surface_count = ctx->num_surfaces;
|
||||
|
||||
if (IsEqualGUID(&ctx->decoder_guid, &DXVADDI_Intel_ModeH264_E))
|
||||
dxva_ctx->workaround |= FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
dxva2_destroy_decoder(s);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
int dxva2_init(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
DXVA2Context *ctx;
|
||||
int ret;
|
||||
|
||||
if (!ist->hwaccel_ctx) {
|
||||
ret = dxva2_alloc(s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
ctx = ist->hwaccel_ctx;
|
||||
|
||||
if (s->codec_id == AV_CODEC_ID_H264 &&
|
||||
(s->profile & ~FF_PROFILE_H264_CONSTRAINED) > FF_PROFILE_H264_HIGH) {
|
||||
av_log(NULL, loglevel, "Unsupported H.264 profile for DXVA2 HWAccel: %d\n", s->profile);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (ctx->decoder)
|
||||
dxva2_destroy_decoder(s);
|
||||
|
||||
ret = dxva2_create_decoder(s);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, loglevel, "Error creating the DXVA2 decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@@ -37,23 +37,21 @@
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCodec *codec, enum AVPixelFormat target)
|
||||
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFormat target)
|
||||
{
|
||||
if (codec && codec->pix_fmts) {
|
||||
const enum AVPixelFormat *p = codec->pix_fmts;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
||||
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
||||
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
||||
static const enum AVPixelFormat mjpeg_formats[] =
|
||||
{ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||
static const enum AVPixelFormat ljpeg_formats[] =
|
||||
{ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||
const enum AVPixelFormat mjpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||
const enum AVPixelFormat ljpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||
|
||||
if (enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if (enc_ctx->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
p = mjpeg_formats;
|
||||
} else if (enc_ctx->codec_id == AV_CODEC_ID_LJPEG) {
|
||||
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||
p =ljpeg_formats;
|
||||
}
|
||||
}
|
||||
@@ -99,21 +97,21 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec)
|
||||
|
||||
static char *choose_pix_fmts(OutputStream *ost)
|
||||
{
|
||||
AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
|
||||
AVDictionaryEntry *strict_dict = av_dict_get(ost->opts, "strict", NULL, 0);
|
||||
if (strict_dict)
|
||||
// used by choose_pixel_fmt() and below
|
||||
av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
|
||||
av_opt_set(ost->st->codec, "strict", strict_dict->value, 0);
|
||||
|
||||
if (ost->keep_pix_fmt) {
|
||||
if (ost->filter)
|
||||
avfilter_graph_set_auto_convert(ost->filter->graph->graph,
|
||||
AVFILTER_AUTO_CONVERT_NONE);
|
||||
if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
|
||||
if (ost->st->codec->pix_fmt == AV_PIX_FMT_NONE)
|
||||
return NULL;
|
||||
return av_strdup(av_get_pix_fmt_name(ost->enc_ctx->pix_fmt));
|
||||
return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt));
|
||||
}
|
||||
if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
|
||||
return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
|
||||
if (ost->st->codec->pix_fmt != AV_PIX_FMT_NONE) {
|
||||
return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc, ost->st->codec->pix_fmt)));
|
||||
} else if (ost->enc && ost->enc->pix_fmts) {
|
||||
const enum AVPixelFormat *p;
|
||||
AVIOContext *s = NULL;
|
||||
@@ -124,10 +122,10 @@ static char *choose_pix_fmts(OutputStream *ost)
|
||||
exit_program(1);
|
||||
|
||||
p = ost->enc->pix_fmts;
|
||||
if (ost->enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if (ost->enc_ctx->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
if (ost->st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if (ost->st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||
} else if (ost->enc_ctx->codec_id == AV_CODEC_ID_LJPEG) {
|
||||
} else if (ost->st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||
}
|
||||
@@ -149,8 +147,8 @@ static char *choose_pix_fmts(OutputStream *ost)
|
||||
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name) \
|
||||
static char *choose_ ## var ## s(OutputStream *ost) \
|
||||
{ \
|
||||
if (ost->enc_ctx->var != none) { \
|
||||
get_name(ost->enc_ctx->var); \
|
||||
if (ost->st->codec->var != none) { \
|
||||
get_name(ost->st->codec->var); \
|
||||
return av_strdup(name); \
|
||||
} else if (ost->enc && ost->enc->supported_list) { \
|
||||
const type *p; \
|
||||
@@ -262,7 +260,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
||||
/* find the first unused stream of corresponding type */
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
ist = input_streams[i];
|
||||
if (ist->dec_ctx->codec_type == type && ist->discard)
|
||||
if (ist->st->codec->codec_type == type && ist->discard)
|
||||
break;
|
||||
}
|
||||
if (i == nb_input_streams) {
|
||||
@@ -344,7 +342,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
|
||||
char *pix_fmts;
|
||||
OutputStream *ost = ofilter->ost;
|
||||
OutputFile *of = output_files[ost->file_index];
|
||||
AVCodecContext *codec = ost->enc_ctx;
|
||||
AVCodecContext *codec = ost->st->codec;
|
||||
AVFilterContext *last_filter = out->filter_ctx;
|
||||
int pad_idx = out->pad_idx;
|
||||
int ret;
|
||||
@@ -434,7 +432,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
|
||||
{
|
||||
OutputStream *ost = ofilter->ost;
|
||||
OutputFile *of = output_files[ost->file_index];
|
||||
AVCodecContext *codec = ost->enc_ctx;
|
||||
AVCodecContext *codec = ost->st->codec;
|
||||
AVFilterContext *last_filter = out->filter_ctx;
|
||||
int pad_idx = out->pad_idx;
|
||||
char *sample_fmts, *sample_rates, *channel_layouts;
|
||||
@@ -595,8 +593,8 @@ static int sub2video_prepare(InputStream *ist)
|
||||
/* Compute the size of the canvas for the subtitles stream.
|
||||
If the subtitles codec has set a size, use it. Otherwise use the
|
||||
maximum dimensions of the video streams in the same file. */
|
||||
w = ist->dec_ctx->width;
|
||||
h = ist->dec_ctx->height;
|
||||
w = ist->st->codec->width;
|
||||
h = ist->st->codec->height;
|
||||
if (!(w && h)) {
|
||||
for (i = 0; i < avf->nb_streams; i++) {
|
||||
if (avf->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
@@ -610,12 +608,12 @@ static int sub2video_prepare(InputStream *ist)
|
||||
}
|
||||
av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
|
||||
}
|
||||
ist->sub2video.w = ist->dec_ctx->width = ist->resample_width = w;
|
||||
ist->sub2video.h = ist->dec_ctx->height = ist->resample_height = h;
|
||||
ist->sub2video.w = ist->st->codec->width = ist->resample_width = w;
|
||||
ist->sub2video.h = ist->st->codec->height = ist->resample_height = h;
|
||||
|
||||
/* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
|
||||
palettes for all rectangles are identical or compatible */
|
||||
ist->resample_pix_fmt = ist->dec_ctx->pix_fmt = AV_PIX_FMT_RGB32;
|
||||
ist->resample_pix_fmt = ist->st->codec->pix_fmt = AV_PIX_FMT_RGB32;
|
||||
|
||||
ist->sub2video.frame = av_frame_alloc();
|
||||
if (!ist->sub2video.frame)
|
||||
@@ -638,7 +636,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
char name[255];
|
||||
int ret, pad_idx = 0;
|
||||
|
||||
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
@@ -646,7 +644,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
if (!fr.num)
|
||||
fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
|
||||
|
||||
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
|
||||
if (ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
|
||||
ret = sub2video_prepare(ist);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -654,7 +652,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
|
||||
sar = ist->st->sample_aspect_ratio.num ?
|
||||
ist->st->sample_aspect_ratio :
|
||||
ist->dec_ctx->sample_aspect_ratio;
|
||||
ist->st->codec->sample_aspect_ratio;
|
||||
if(!sar.den)
|
||||
sar = (AVRational){0,1};
|
||||
av_bprint_init(&args, 0, 1);
|
||||
@@ -664,7 +662,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
ist->resample_height,
|
||||
ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt,
|
||||
tb.num, tb.den, sar.num, sar.den,
|
||||
SWS_BILINEAR + ((ist->dec_ctx->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
|
||||
SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
|
||||
if (fr.num && fr.den)
|
||||
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
|
||||
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
|
||||
@@ -732,21 +730,21 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
char name[255];
|
||||
int ret, pad_idx = 0;
|
||||
|
||||
if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
|
||||
if (ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
|
||||
1, ist->dec_ctx->sample_rate,
|
||||
ist->dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(ist->dec_ctx->sample_fmt));
|
||||
if (ist->dec_ctx->channel_layout)
|
||||
1, ist->st->codec->sample_rate,
|
||||
ist->st->codec->sample_rate,
|
||||
av_get_sample_fmt_name(ist->st->codec->sample_fmt));
|
||||
if (ist->st->codec->channel_layout)
|
||||
av_bprintf(&args, ":channel_layout=0x%"PRIx64,
|
||||
ist->dec_ctx->channel_layout);
|
||||
ist->st->codec->channel_layout);
|
||||
else
|
||||
av_bprintf(&args, ":channels=%d", ist->dec_ctx->channels);
|
||||
av_bprintf(&args, ":channels=%d", ist->st->codec->channels);
|
||||
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
|
||||
ist->file_index, ist->st->index);
|
||||
|
||||
@@ -874,7 +872,7 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
args[strlen(args) - 1] = '\0';
|
||||
fg->graph->resample_lavr_opts = av_strdup(args);
|
||||
|
||||
e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
|
||||
e = av_dict_get(ost->opts, "threads", NULL, 0);
|
||||
if (e)
|
||||
av_opt_set(fg->graph, "threads", e->value, 0);
|
||||
}
|
||||
@@ -919,16 +917,6 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
}
|
||||
|
||||
fg->reconfiguration = 1;
|
||||
|
||||
for (i = 0; i < fg->nb_outputs; i++) {
|
||||
OutputStream *ost = fg->outputs[i]->ost;
|
||||
if (ost &&
|
||||
ost->enc->type == AVMEDIA_TYPE_AUDIO &&
|
||||
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
|
||||
av_buffersink_set_frame_size(ost->filter->filter,
|
||||
ost->enc_ctx->frame_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
140
ffmpeg_opt.c
140
ffmpeg_opt.c
@@ -66,12 +66,6 @@
|
||||
const HWAccel hwaccels[] = {
|
||||
#if HAVE_VDPAU_X11
|
||||
{ "vdpau", vdpau_init, HWACCEL_VDPAU, AV_PIX_FMT_VDPAU },
|
||||
#endif
|
||||
#if HAVE_DXVA2_LIB
|
||||
{ "dxva2", dxva2_init, HWACCEL_DXVA2, AV_PIX_FMT_DXVA2_VLD },
|
||||
#endif
|
||||
#if CONFIG_VDA
|
||||
{ "vda", vda_init, HWACCEL_VDA, AV_PIX_FMT_VDA },
|
||||
#endif
|
||||
{ 0 },
|
||||
};
|
||||
@@ -104,6 +98,8 @@ float max_error_rate = 2.0/3;
|
||||
static int intra_only = 0;
|
||||
static int file_overwrite = 0;
|
||||
static int no_file_overwrite = 0;
|
||||
static int video_discard = 0;
|
||||
static int intra_dc_precision = 8;
|
||||
static int do_psnr = 0;
|
||||
static int input_sync;
|
||||
static int override_ffserver = 0;
|
||||
@@ -562,7 +558,7 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
|
||||
* list of input streams. */
|
||||
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
{
|
||||
int i, ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ic->nb_streams; i++) {
|
||||
AVStream *st = ic->streams[i];
|
||||
@@ -571,8 +567,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
char *framerate = NULL, *hwaccel = NULL, *hwaccel_device = NULL;
|
||||
char *codec_tag = NULL;
|
||||
char *next;
|
||||
char *discard_str = NULL;
|
||||
const AVOption *discard_opt = av_opt_find(dec, "skip_frame", NULL, 0, 0);
|
||||
|
||||
if (!ist)
|
||||
exit_program(1);
|
||||
@@ -597,33 +591,13 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
}
|
||||
|
||||
ist->dec = choose_decoder(o, ic, st);
|
||||
ist->decoder_opts = filter_codec_opts(o->g->codec_opts, ist->st->codec->codec_id, ic, st, ist->dec);
|
||||
ist->opts = filter_codec_opts(o->g->codec_opts, ist->st->codec->codec_id, ic, st, ist->dec);
|
||||
|
||||
ist->reinit_filters = -1;
|
||||
MATCH_PER_STREAM_OPT(reinit_filters, i, ist->reinit_filters, ic, st);
|
||||
|
||||
MATCH_PER_STREAM_OPT(discard, str, discard_str, ic, st);
|
||||
ist->user_set_discard = AVDISCARD_NONE;
|
||||
if (discard_str && av_opt_eval_int(dec, discard_opt, discard_str, &ist->user_set_discard) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error parsing discard %s.\n",
|
||||
discard_str);
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
|
||||
|
||||
ist->dec_ctx = avcodec_alloc_context3(ist->dec);
|
||||
if (!ist->dec_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error allocating the decoder context.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
ret = avcodec_copy_context(ist->dec_ctx, dec);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error initializing the decoder context.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
switch (dec->codec_type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if(!ist->dec)
|
||||
@@ -632,9 +606,9 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
dec->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
}
|
||||
|
||||
ist->resample_height = ist->dec_ctx->height;
|
||||
ist->resample_width = ist->dec_ctx->width;
|
||||
ist->resample_pix_fmt = ist->dec_ctx->pix_fmt;
|
||||
ist->resample_height = dec->height;
|
||||
ist->resample_width = dec->width;
|
||||
ist->resample_pix_fmt = dec->pix_fmt;
|
||||
|
||||
MATCH_PER_STREAM_OPT(frame_rates, str, framerate, ic, st);
|
||||
if (framerate && av_parse_video_rate(&ist->framerate,
|
||||
@@ -688,10 +662,10 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
MATCH_PER_STREAM_OPT(guess_layout_max, i, ist->guess_layout_max, ic, st);
|
||||
guess_input_channel_layout(ist);
|
||||
|
||||
ist->resample_sample_fmt = ist->dec_ctx->sample_fmt;
|
||||
ist->resample_sample_rate = ist->dec_ctx->sample_rate;
|
||||
ist->resample_channels = ist->dec_ctx->channels;
|
||||
ist->resample_channel_layout = ist->dec_ctx->channel_layout;
|
||||
ist->resample_sample_fmt = dec->sample_fmt;
|
||||
ist->resample_sample_rate = dec->sample_rate;
|
||||
ist->resample_channels = dec->channels;
|
||||
ist->resample_channel_layout = dec->channel_layout;
|
||||
|
||||
break;
|
||||
case AVMEDIA_TYPE_DATA:
|
||||
@@ -887,11 +861,9 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
ret = avformat_find_stream_info(ic, opts);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
|
||||
if (ic->nb_streams == 0) {
|
||||
avformat_close_input(&ic);
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
timestamp = (o->start_time == AV_NOPTS_VALUE) ? 0 : o->start_time;
|
||||
/* add the stream start time */
|
||||
@@ -933,7 +905,7 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
unused_opts = strip_specifiers(o->g->codec_opts);
|
||||
for (i = f->ist_index; i < nb_input_streams; i++) {
|
||||
e = NULL;
|
||||
while ((e = av_dict_get(input_streams[i]->decoder_opts, "", e,
|
||||
while ((e = av_dict_get(input_streams[i]->opts, "", e,
|
||||
AV_DICT_IGNORE_SUFFIX)))
|
||||
av_dict_set(&unused_opts, e->key, NULL, 0);
|
||||
}
|
||||
@@ -1075,19 +1047,11 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
ost->st = st;
|
||||
st->codec->codec_type = type;
|
||||
choose_encoder(o, oc, ost);
|
||||
|
||||
ost->enc_ctx = avcodec_alloc_context3(ost->enc);
|
||||
if (!ost->enc_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error allocating the encoding context.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
ost->enc_ctx->codec_type = type;
|
||||
|
||||
if (ost->enc) {
|
||||
AVIOContext *s = NULL;
|
||||
char *buf = NULL, *arg = NULL, *preset = NULL;
|
||||
|
||||
ost->encoder_opts = filter_codec_opts(o->g->codec_opts, ost->enc->id, oc, st, ost->enc);
|
||||
ost->opts = filter_codec_opts(o->g->codec_opts, ost->enc->id, oc, st, ost->enc);
|
||||
|
||||
MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
|
||||
if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
|
||||
@@ -1102,7 +1066,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
exit_program(1);
|
||||
}
|
||||
*arg++ = 0;
|
||||
av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
|
||||
av_free(buf);
|
||||
} while (!s->eof_reached);
|
||||
avio_close(s);
|
||||
@@ -1114,9 +1078,12 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
exit_program(1);
|
||||
}
|
||||
} else {
|
||||
ost->encoder_opts = filter_codec_opts(o->g->codec_opts, AV_CODEC_ID_NONE, oc, st, NULL);
|
||||
ost->opts = filter_codec_opts(o->g->codec_opts, AV_CODEC_ID_NONE, oc, st, NULL);
|
||||
}
|
||||
|
||||
avcodec_get_context_defaults3(st->codec, ost->enc);
|
||||
st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
|
||||
|
||||
ost->max_frames = INT64_MAX;
|
||||
MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
|
||||
for (i = 0; i<o->nb_max_frames; i++) {
|
||||
@@ -1152,17 +1119,17 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
uint32_t tag = strtol(codec_tag, &next, 0);
|
||||
if (*next)
|
||||
tag = AV_RL32(codec_tag);
|
||||
ost->enc_ctx->codec_tag = tag;
|
||||
st->codec->codec_tag = tag;
|
||||
}
|
||||
|
||||
MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
|
||||
if (qscale >= 0) {
|
||||
ost->enc_ctx->flags |= CODEC_FLAG_QSCALE;
|
||||
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
|
||||
st->codec->flags |= CODEC_FLAG_QSCALE;
|
||||
st->codec->global_quality = FF_QP2LAMBDA * qscale;
|
||||
}
|
||||
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
ost->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
av_opt_get_int(o->g->sws_opts, "sws_flags", 0, &ost->sws_flags);
|
||||
|
||||
@@ -1176,7 +1143,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
if (source_index >= 0) {
|
||||
ost->sync_ist = input_streams[source_index];
|
||||
input_streams[source_index]->discard = 0;
|
||||
input_streams[source_index]->st->discard = input_streams[source_index]->user_set_discard;
|
||||
input_streams[source_index]->st->discard = AVDISCARD_NONE;
|
||||
}
|
||||
ost->last_mux_dts = AV_NOPTS_VALUE;
|
||||
|
||||
@@ -1272,7 +1239,7 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
|
||||
ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO, source_index);
|
||||
st = ost->st;
|
||||
video_enc = ost->enc_ctx;
|
||||
video_enc = st->codec;
|
||||
|
||||
MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
|
||||
if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
|
||||
@@ -1377,6 +1344,7 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
if (p) p++;
|
||||
}
|
||||
video_enc->rc_override_count = i;
|
||||
video_enc->intra_dc_precision = intra_dc_precision - 8;
|
||||
|
||||
if (do_psnr)
|
||||
video_enc->flags|= CODEC_FLAG_PSNR;
|
||||
@@ -1386,11 +1354,11 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
if (do_pass) {
|
||||
if (do_pass & 1) {
|
||||
video_enc->flags |= CODEC_FLAG_PASS1;
|
||||
av_dict_set(&ost->encoder_opts, "flags", "+pass1", AV_DICT_APPEND);
|
||||
av_dict_set(&ost->opts, "flags", "+pass1", AV_DICT_APPEND);
|
||||
}
|
||||
if (do_pass & 2) {
|
||||
video_enc->flags |= CODEC_FLAG_PASS2;
|
||||
av_dict_set(&ost->encoder_opts, "flags", "+pass2", AV_DICT_APPEND);
|
||||
av_dict_set(&ost->opts, "flags", "+pass2", AV_DICT_APPEND);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1432,7 +1400,7 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO, source_index);
|
||||
st = ost->st;
|
||||
|
||||
audio_enc = ost->enc_ctx;
|
||||
audio_enc = st->codec;
|
||||
audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
|
||||
MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
|
||||
@@ -1462,21 +1430,10 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
/* check for channel mapping for this audio stream */
|
||||
for (n = 0; n < o->nb_audio_channel_maps; n++) {
|
||||
AudioChannelMap *map = &o->audio_channel_maps[n];
|
||||
if ((map->ofile_idx == -1 || ost->file_index == map->ofile_idx) &&
|
||||
InputStream *ist = input_streams[ost->source_index];
|
||||
if ((map->channel_idx == -1 || (ist->file_index == map->file_idx && ist->st->index == map->stream_idx)) &&
|
||||
(map->ofile_idx == -1 || ost->file_index == map->ofile_idx) &&
|
||||
(map->ostream_idx == -1 || ost->st->index == map->ostream_idx)) {
|
||||
InputStream *ist;
|
||||
|
||||
if (map->channel_idx == -1) {
|
||||
ist = NULL;
|
||||
} else if (ost->source_index < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Cannot determine input stream for channel mapping %d.%d\n",
|
||||
ost->file_index, ost->st->index);
|
||||
continue;
|
||||
} else {
|
||||
ist = input_streams[ost->source_index];
|
||||
}
|
||||
|
||||
if (!ist || (ist->file_index == map->file_idx && ist->st->index == map->stream_idx)) {
|
||||
if (ost->audio_channels_mapped < FF_ARRAY_ELEMS(ost->audio_channels_map))
|
||||
ost->audio_channels_map[ost->audio_channels_mapped++] = map->channel_idx;
|
||||
else
|
||||
@@ -1485,7 +1442,6 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ost->stream_copy)
|
||||
check_streamcopy_filters(o, oc, ost, AVMEDIA_TYPE_AUDIO);
|
||||
@@ -1522,7 +1478,7 @@ static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc,
|
||||
|
||||
ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE, source_index);
|
||||
st = ost->st;
|
||||
subtitle_enc = ost->enc_ctx;
|
||||
subtitle_enc = st->codec;
|
||||
|
||||
subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
|
||||
|
||||
@@ -1640,8 +1596,7 @@ static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const ch
|
||||
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
|
||||
choose_sample_fmt(st, codec);
|
||||
else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
|
||||
choose_pixel_fmt(st, st->codec, codec, st->codec->pix_fmt);
|
||||
avcodec_copy_context(ost->enc_ctx, st->codec);
|
||||
choose_pixel_fmt(st, codec, st->codec->pix_fmt);
|
||||
}
|
||||
|
||||
avformat_close_input(&ic);
|
||||
@@ -1814,7 +1769,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) ost->avfilter = av_strdup("anull");
|
||||
if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) ost->avfilter = av_strdup("null");
|
||||
ist->discard = 0;
|
||||
ist->st->discard = ist->user_set_discard;
|
||||
ist->st->discard = AVDISCARD_NONE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1828,7 +1783,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
/* pick the "best" stream of each type */
|
||||
|
||||
/* video: highest resolution */
|
||||
if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) {
|
||||
if (!o->video_disable && oc->oformat->video_codec != AV_CODEC_ID_NONE) {
|
||||
int area = 0, idx = -1;
|
||||
int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
@@ -1850,7 +1805,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
}
|
||||
|
||||
/* audio: most channels */
|
||||
if (!o->audio_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
|
||||
if (!o->audio_disable && oc->oformat->audio_codec != AV_CODEC_ID_NONE) {
|
||||
int channels = 0, idx = -1;
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
ist = input_streams[i];
|
||||
@@ -1959,8 +1914,8 @@ loop_end:
|
||||
ost->stream_copy = 0;
|
||||
ost->attachment_filename = o->attachments[i];
|
||||
ost->finished = 1;
|
||||
ost->enc_ctx->extradata = attachment;
|
||||
ost->enc_ctx->extradata_size = len;
|
||||
ost->st->codec->extradata = attachment;
|
||||
ost->st->codec->extradata_size = len;
|
||||
|
||||
p = strrchr(o->attachments[i], '/');
|
||||
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
|
||||
@@ -1982,7 +1937,7 @@ loop_end:
|
||||
unused_opts = strip_specifiers(o->g->codec_opts);
|
||||
for (i = of->ost_index; i < nb_output_streams; i++) {
|
||||
e = NULL;
|
||||
while ((e = av_dict_get(output_streams[i]->encoder_opts, "", e,
|
||||
while ((e = av_dict_get(output_streams[i]->opts, "", e,
|
||||
AV_DICT_IGNORE_SUFFIX)))
|
||||
av_dict_set(&unused_opts, e->key, NULL, 0);
|
||||
}
|
||||
@@ -2093,8 +2048,6 @@ loop_end:
|
||||
continue;
|
||||
ist = input_streams[output_streams[i]->source_index];
|
||||
av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
|
||||
if (!output_streams[i]->stream_copy)
|
||||
av_dict_set(&output_streams[i]->st->metadata, "encoder", NULL, 0);
|
||||
}
|
||||
|
||||
/* process manually set metadata */
|
||||
@@ -2398,11 +2351,7 @@ static int opt_old2new(void *optctx, const char *opt, const char *arg)
|
||||
static int opt_bitrate(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
OptionsContext *o = optctx;
|
||||
|
||||
if(!strcmp(opt, "ab")){
|
||||
av_dict_set(&o->g->codec_opts, "b:a", arg, 0);
|
||||
return 0;
|
||||
} else if(!strcmp(opt, "b")){
|
||||
if(!strcmp(opt, "b")){
|
||||
av_log(NULL, AV_LOG_WARNING, "Please use -b:a or -b:v, -b is ambiguous\n");
|
||||
av_dict_set(&o->g->codec_opts, "b:v", arg, 0);
|
||||
return 0;
|
||||
@@ -2866,9 +2815,6 @@ const OptionDef options[] = {
|
||||
"print timestamp debugging info" },
|
||||
{ "max_error_rate", HAS_ARG | OPT_FLOAT, { &max_error_rate },
|
||||
"maximum error rate", "ratio of errors (0.0: no errors, 1.0: 100% errors) above which ffmpeg returns an error instead of success." },
|
||||
{ "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||
OPT_INPUT, { .off = OFFSET(discard) },
|
||||
"discard", "" },
|
||||
|
||||
/* video options */
|
||||
{ "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
|
||||
@@ -2891,6 +2837,8 @@ const OptionDef options[] = {
|
||||
"deprecated use -g 1" },
|
||||
{ "vn", OPT_VIDEO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(video_disable) },
|
||||
"disable video" },
|
||||
{ "vdt", OPT_VIDEO | OPT_INT | HAS_ARG | OPT_EXPERT , { &video_discard },
|
||||
"discard threshold", "n" },
|
||||
{ "rc_override", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
|
||||
OPT_OUTPUT, { .off = OFFSET(rc_overrides) },
|
||||
"rate control override for specific intervals", "override" },
|
||||
@@ -2930,6 +2878,8 @@ const OptionDef options[] = {
|
||||
{ "top", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_INT| OPT_SPEC |
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(top_field_first) },
|
||||
"top=1/bottom=0/auto=-1 field first", "" },
|
||||
{ "dc", OPT_VIDEO | OPT_INT | HAS_ARG | OPT_EXPERT , { &intra_dc_precision },
|
||||
"intra_dc_precision", "precision" },
|
||||
{ "vtag", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
|
||||
OPT_OUTPUT, { .func_arg = opt_old2new },
|
||||
"force video tag/fourcc", "fourcc/tag" },
|
||||
@@ -2944,8 +2894,6 @@ const OptionDef options[] = {
|
||||
{ "force_key_frames", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
|
||||
OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(forced_key_frames) },
|
||||
"force key frames at specified timestamps", "timestamps" },
|
||||
{ "ab", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
|
||||
"audio bitrate (please use -b:a)", "bitrate" },
|
||||
{ "b", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
|
||||
"video bitrate (please use -b:v)", "bitrate" },
|
||||
{ "hwaccel", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
|
||||
|
134
ffmpeg_vda.c
134
ffmpeg_vda.c
@@ -1,134 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/vda.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
typedef struct VDAContext {
|
||||
AVFrame *tmp_frame;
|
||||
} VDAContext;
|
||||
|
||||
static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
VDAContext *vda = ist->hwaccel_ctx;
|
||||
CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
|
||||
OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
|
||||
CVReturn err;
|
||||
uint8_t *data[4] = { 0 };
|
||||
int linesize[4] = { 0 };
|
||||
int planes, ret, i;
|
||||
|
||||
av_frame_unref(vda->tmp_frame);
|
||||
|
||||
switch (pixel_format) {
|
||||
case kCVPixelFormatType_420YpCbCr8Planar: vda->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
|
||||
case kCVPixelFormatType_422YpCbCr8: vda->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
|
||||
default:
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Unsupported pixel format: %u\n", pixel_format);
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
vda->tmp_frame->width = frame->width;
|
||||
vda->tmp_frame->height = frame->height;
|
||||
ret = av_frame_get_buffer(vda->tmp_frame, 32);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
|
||||
if (err != kCVReturnSuccess) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
if (CVPixelBufferIsPlanar(pixbuf)) {
|
||||
|
||||
planes = CVPixelBufferGetPlaneCount(pixbuf);
|
||||
for (i = 0; i < planes; i++) {
|
||||
data[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
|
||||
linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
|
||||
}
|
||||
} else {
|
||||
data[0] = CVPixelBufferGetBaseAddress(pixbuf);
|
||||
linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
|
||||
}
|
||||
|
||||
av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize,
|
||||
data, linesize, vda->tmp_frame->format,
|
||||
frame->width, frame->height);
|
||||
|
||||
ret = av_frame_copy_props(vda->tmp_frame, frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
av_frame_unref(frame);
|
||||
av_frame_move_ref(frame, vda->tmp_frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vda_uninit(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
VDAContext *vda = ist->hwaccel_ctx;
|
||||
|
||||
ist->hwaccel_uninit = NULL;
|
||||
ist->hwaccel_retrieve_data = NULL;
|
||||
|
||||
av_frame_free(&vda->tmp_frame);
|
||||
|
||||
av_vda_default_free(s);
|
||||
av_freep(&ist->hwaccel_ctx);
|
||||
}
|
||||
|
||||
int vda_init(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
VDAContext *vda;
|
||||
int ret;
|
||||
|
||||
vda = av_mallocz(sizeof(*vda));
|
||||
if (!vda)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ist->hwaccel_ctx = vda;
|
||||
ist->hwaccel_uninit = vda_uninit;
|
||||
ist->hwaccel_retrieve_data = vda_retrieve_data;
|
||||
|
||||
vda->tmp_frame = av_frame_alloc();
|
||||
if (!vda->tmp_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = av_vda_default_init(s);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, loglevel, "Error creating VDA decoder.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
vda_uninit(s);
|
||||
return ret;
|
||||
}
|
236
ffplay.c
236
ffplay.c
@@ -67,13 +67,12 @@ const int program_birth_year = 2003;
|
||||
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
|
||||
#define MIN_FRAMES 5
|
||||
|
||||
/* Minimum SDL audio buffer size, in samples. */
|
||||
#define SDL_AUDIO_MIN_BUFFER_SIZE 512
|
||||
/* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
|
||||
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
|
||||
/* SDL audio buffer size, in samples. Should be small to have precise
|
||||
A/V sync as SDL does not have hardware buffer fullness info. */
|
||||
#define SDL_AUDIO_BUFFER_SIZE 1024
|
||||
|
||||
/* no AV sync correction is done if below the minimum AV sync threshold */
|
||||
#define AV_SYNC_THRESHOLD_MIN 0.04
|
||||
#define AV_SYNC_THRESHOLD_MIN 0.01
|
||||
/* AV sync correction is done if above the maximum AV sync threshold */
|
||||
#define AV_SYNC_THRESHOLD_MAX 0.1
|
||||
/* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
|
||||
@@ -120,7 +119,7 @@ typedef struct PacketQueue {
|
||||
} PacketQueue;
|
||||
|
||||
#define VIDEO_PICTURE_QUEUE_SIZE 3
|
||||
#define SUBPICTURE_QUEUE_SIZE 16
|
||||
#define SUBPICTURE_QUEUE_SIZE 4
|
||||
|
||||
typedef struct VideoPicture {
|
||||
double pts; // presentation timestamp for this picture
|
||||
@@ -203,7 +202,7 @@ typedef struct VideoState {
|
||||
AVStream *audio_st;
|
||||
PacketQueue audioq;
|
||||
int audio_hw_buf_size;
|
||||
uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE];
|
||||
uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
|
||||
uint8_t *audio_buf;
|
||||
uint8_t *audio_buf1;
|
||||
unsigned int audio_buf_size; /* in bytes */
|
||||
@@ -256,7 +255,7 @@ typedef struct VideoState {
|
||||
int64_t video_current_pos; // current displayed file pos
|
||||
double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
|
||||
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
|
||||
int pictq_size, pictq_rindex, pictq_windex, pictq_rindex_shown;
|
||||
int pictq_size, pictq_rindex, pictq_windex;
|
||||
SDL_mutex *pictq_mutex;
|
||||
SDL_cond *pictq_cond;
|
||||
#if !CONFIG_AVFILTER
|
||||
@@ -269,7 +268,6 @@ typedef struct VideoState {
|
||||
int step;
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
int vfilter_idx;
|
||||
AVFilterContext *in_video_filter; // the first filter in the video chain
|
||||
AVFilterContext *out_video_filter; // the last filter in the video chain
|
||||
AVFilterContext *in_audio_filter; // the first filter in the audio chain
|
||||
@@ -310,6 +308,7 @@ static int workaround_bugs = 1;
|
||||
static int fast = 0;
|
||||
static int genpts = 0;
|
||||
static int lowres = 0;
|
||||
static int error_concealment = 3;
|
||||
static int decoder_reorder_pts = -1;
|
||||
static int autoexit;
|
||||
static int exit_on_keydown;
|
||||
@@ -325,11 +324,9 @@ double rdftspeed = 0.02;
|
||||
static int64_t cursor_last_shown;
|
||||
static int cursor_hidden = 0;
|
||||
#if CONFIG_AVFILTER
|
||||
static const char **vfilters_list = NULL;
|
||||
static int nb_vfilters = 0;
|
||||
static char *vfilters = NULL;
|
||||
static char *afilters = NULL;
|
||||
#endif
|
||||
static int autorotate = 1;
|
||||
|
||||
/* current context */
|
||||
static int is_full_screen;
|
||||
@@ -342,15 +339,6 @@ static AVPacket flush_pkt;
|
||||
|
||||
static SDL_Surface *screen;
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
GROW_ARRAY(vfilters_list, nb_vfilters);
|
||||
vfilters_list[nb_vfilters - 1] = arg;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline
|
||||
int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
|
||||
enum AVSampleFormat fmt2, int64_t channel_count2)
|
||||
@@ -808,21 +796,19 @@ static void free_subpicture(SubPicture *sp)
|
||||
avsubtitle_free(&sp->sub);
|
||||
}
|
||||
|
||||
static void calculate_display_rect(SDL_Rect *rect,
|
||||
int scr_xleft, int scr_ytop, int scr_width, int scr_height,
|
||||
int pic_width, int pic_height, AVRational pic_sar)
|
||||
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
|
||||
{
|
||||
float aspect_ratio;
|
||||
int width, height, x, y;
|
||||
|
||||
if (pic_sar.num == 0)
|
||||
if (vp->sar.num == 0)
|
||||
aspect_ratio = 0;
|
||||
else
|
||||
aspect_ratio = av_q2d(pic_sar);
|
||||
aspect_ratio = av_q2d(vp->sar);
|
||||
|
||||
if (aspect_ratio <= 0.0)
|
||||
aspect_ratio = 1.0;
|
||||
aspect_ratio *= (float)pic_width / (float)pic_height;
|
||||
aspect_ratio *= (float)vp->width / (float)vp->height;
|
||||
|
||||
/* XXX: we suppose the screen has a 1.0 pixel ratio */
|
||||
height = scr_height;
|
||||
@@ -847,7 +833,7 @@ static void video_image_display(VideoState *is)
|
||||
SDL_Rect rect;
|
||||
int i;
|
||||
|
||||
vp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown) % VIDEO_PICTURE_QUEUE_SIZE];
|
||||
vp = &is->pictq[is->pictq_rindex];
|
||||
if (vp->bmp) {
|
||||
if (is->subtitle_st) {
|
||||
if (is->subpq_size > 0) {
|
||||
@@ -873,7 +859,7 @@ static void video_image_display(VideoState *is)
|
||||
}
|
||||
}
|
||||
|
||||
calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
|
||||
calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
|
||||
|
||||
SDL_DisplayYUVOverlay(vp->bmp, &rect);
|
||||
|
||||
@@ -913,7 +899,7 @@ static void video_audio_display(VideoState *s)
|
||||
/* to be more precise, we take into account the time spent since
|
||||
the last buffer computation */
|
||||
if (audio_callback_time) {
|
||||
time_diff = av_gettime_relative() - audio_callback_time;
|
||||
time_diff = av_gettime() - audio_callback_time;
|
||||
delay -= (time_diff * s->audio_tgt.freq) / 1000000;
|
||||
}
|
||||
|
||||
@@ -1065,7 +1051,7 @@ static void do_exit(VideoState *is)
|
||||
av_lockmgr_register(NULL);
|
||||
uninit_opts();
|
||||
#if CONFIG_AVFILTER
|
||||
av_freep(&vfilters_list);
|
||||
av_freep(&vfilters);
|
||||
#endif
|
||||
avformat_network_deinit();
|
||||
if (show_status)
|
||||
@@ -1080,10 +1066,10 @@ static void sigterm_handler(int sig)
|
||||
exit(123);
|
||||
}
|
||||
|
||||
static void set_default_window_size(int width, int height, AVRational sar)
|
||||
static void set_default_window_size(VideoPicture *vp)
|
||||
{
|
||||
SDL_Rect rect;
|
||||
calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
|
||||
calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
|
||||
default_width = rect.w;
|
||||
default_height = rect.h;
|
||||
}
|
||||
@@ -1097,7 +1083,7 @@ static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp
|
||||
else flags |= SDL_RESIZABLE;
|
||||
|
||||
if (vp && vp->width)
|
||||
set_default_window_size(vp->width, vp->height, vp->sar);
|
||||
set_default_window_size(vp);
|
||||
|
||||
if (is_full_screen && fs_screen_width) {
|
||||
w = fs_screen_width;
|
||||
@@ -1146,7 +1132,7 @@ static double get_clock(Clock *c)
|
||||
if (c->paused) {
|
||||
return c->pts;
|
||||
} else {
|
||||
double time = av_gettime_relative() / 1000000.0;
|
||||
double time = av_gettime() / 1000000.0;
|
||||
return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
|
||||
}
|
||||
}
|
||||
@@ -1161,7 +1147,7 @@ static void set_clock_at(Clock *c, double pts, int serial, double time)
|
||||
|
||||
static void set_clock(Clock *c, double pts, int serial)
|
||||
{
|
||||
double time = av_gettime_relative() / 1000000.0;
|
||||
double time = av_gettime() / 1000000.0;
|
||||
set_clock_at(c, pts, serial, time);
|
||||
}
|
||||
|
||||
@@ -1254,7 +1240,7 @@ static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_by
|
||||
static void stream_toggle_pause(VideoState *is)
|
||||
{
|
||||
if (is->paused) {
|
||||
is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
|
||||
is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
|
||||
if (is->read_pause_return != AVERROR(ENOSYS)) {
|
||||
is->vidclk.paused = 0;
|
||||
}
|
||||
@@ -1320,23 +1306,7 @@ static double vp_duration(VideoState *is, VideoPicture *vp, VideoPicture *nextvp
|
||||
}
|
||||
}
|
||||
|
||||
/* return the number of undisplayed pictures in the queue */
|
||||
static int pictq_nb_remaining(VideoState *is) {
|
||||
return is->pictq_size - is->pictq_rindex_shown;
|
||||
}
|
||||
|
||||
/* jump back to the previous picture if available by resetting rindex_shown */
|
||||
static int pictq_prev_picture(VideoState *is) {
|
||||
int ret = is->pictq_rindex_shown;
|
||||
is->pictq_rindex_shown = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pictq_next_picture(VideoState *is) {
|
||||
if (!is->pictq_rindex_shown) {
|
||||
is->pictq_rindex_shown = 1;
|
||||
return;
|
||||
}
|
||||
/* update queue size and signal for next picture */
|
||||
if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
|
||||
is->pictq_rindex = 0;
|
||||
@@ -1347,6 +1317,25 @@ static void pictq_next_picture(VideoState *is) {
|
||||
SDL_UnlockMutex(is->pictq_mutex);
|
||||
}
|
||||
|
||||
static int pictq_prev_picture(VideoState *is) {
|
||||
VideoPicture *prevvp;
|
||||
int ret = 0;
|
||||
/* update queue size and signal for the previous picture */
|
||||
prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
|
||||
if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
|
||||
SDL_LockMutex(is->pictq_mutex);
|
||||
if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE) {
|
||||
if (--is->pictq_rindex == -1)
|
||||
is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
|
||||
is->pictq_size++;
|
||||
ret = 1;
|
||||
}
|
||||
SDL_CondSignal(is->pictq_cond);
|
||||
SDL_UnlockMutex(is->pictq_mutex);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
|
||||
/* update current video pts */
|
||||
set_clock(&is->vidclk, pts, serial);
|
||||
@@ -1366,7 +1355,7 @@ static void video_refresh(void *opaque, double *remaining_time)
|
||||
check_external_clock_speed(is);
|
||||
|
||||
if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
|
||||
time = av_gettime_relative() / 1000000.0;
|
||||
time = av_gettime() / 1000000.0;
|
||||
if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
|
||||
video_display(is);
|
||||
is->last_vis_time = time;
|
||||
@@ -1379,15 +1368,15 @@ static void video_refresh(void *opaque, double *remaining_time)
|
||||
if (is->force_refresh)
|
||||
redisplay = pictq_prev_picture(is);
|
||||
retry:
|
||||
if (pictq_nb_remaining(is) == 0) {
|
||||
if (is->pictq_size == 0) {
|
||||
// nothing to do, no picture to display in the queue
|
||||
} else {
|
||||
double last_duration, duration, delay;
|
||||
VideoPicture *vp, *lastvp;
|
||||
|
||||
/* dequeue the picture */
|
||||
lastvp = &is->pictq[is->pictq_rindex];
|
||||
vp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown) % VIDEO_PICTURE_QUEUE_SIZE];
|
||||
vp = &is->pictq[is->pictq_rindex];
|
||||
lastvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
|
||||
|
||||
if (vp->serial != is->videoq.serial) {
|
||||
pictq_next_picture(is);
|
||||
@@ -1397,7 +1386,7 @@ retry:
|
||||
}
|
||||
|
||||
if (lastvp->serial != vp->serial && !redisplay)
|
||||
is->frame_timer = av_gettime_relative() / 1000000.0;
|
||||
is->frame_timer = av_gettime() / 1000000.0;
|
||||
|
||||
if (is->paused)
|
||||
goto display;
|
||||
@@ -1409,7 +1398,7 @@ retry:
|
||||
else
|
||||
delay = compute_target_delay(last_duration, is);
|
||||
|
||||
time= av_gettime_relative()/1000000.0;
|
||||
time= av_gettime()/1000000.0;
|
||||
if (time < is->frame_timer + delay && !redisplay) {
|
||||
*remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
|
||||
return;
|
||||
@@ -1424,8 +1413,8 @@ retry:
|
||||
update_video_pts(is, vp->pts, vp->pos, vp->serial);
|
||||
SDL_UnlockMutex(is->pictq_mutex);
|
||||
|
||||
if (pictq_nb_remaining(is) > 1) {
|
||||
VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown + 1) % VIDEO_PICTURE_QUEUE_SIZE];
|
||||
if (is->pictq_size > 1) {
|
||||
VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
|
||||
duration = vp_duration(is, vp, nextvp);
|
||||
if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
|
||||
if (!redisplay)
|
||||
@@ -1483,7 +1472,7 @@ display:
|
||||
int aqsize, vqsize, sqsize;
|
||||
double av_diff;
|
||||
|
||||
cur_time = av_gettime_relative();
|
||||
cur_time = av_gettime();
|
||||
if (!last_time || (cur_time - last_time) >= 30000) {
|
||||
aqsize = 0;
|
||||
vqsize = 0;
|
||||
@@ -1581,7 +1570,8 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double
|
||||
/* wait until we have space to put a new picture */
|
||||
SDL_LockMutex(is->pictq_mutex);
|
||||
|
||||
while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
|
||||
/* keep the last already displayed picture in the queue */
|
||||
while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
|
||||
!is->videoq.abort_request) {
|
||||
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
|
||||
}
|
||||
@@ -1783,7 +1773,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
char sws_flags_str[128];
|
||||
char buffersrc_args[256];
|
||||
int ret;
|
||||
AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
|
||||
AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
|
||||
AVCodecContext *codec = is->video_st->codec;
|
||||
AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
|
||||
|
||||
@@ -1814,49 +1804,16 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto fail;
|
||||
|
||||
last_filter = filt_out;
|
||||
|
||||
/* Note: this macro adds a filter before the lastly added filter, so the
|
||||
* processing order of the filters is in reverse */
|
||||
#define INSERT_FILT(name, arg) do { \
|
||||
AVFilterContext *filt_ctx; \
|
||||
\
|
||||
ret = avfilter_graph_create_filter(&filt_ctx, \
|
||||
avfilter_get_by_name(name), \
|
||||
"ffplay_" name, arg, NULL, graph); \
|
||||
if (ret < 0) \
|
||||
goto fail; \
|
||||
\
|
||||
ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
|
||||
if (ret < 0) \
|
||||
goto fail; \
|
||||
\
|
||||
last_filter = filt_ctx; \
|
||||
} while (0)
|
||||
|
||||
/* SDL YUV code is not handling odd width/height for some driver
|
||||
* combinations, therefore we crop the picture to an even width/height. */
|
||||
INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
|
||||
if ((ret = avfilter_graph_create_filter(&filt_crop,
|
||||
avfilter_get_by_name("crop"),
|
||||
"ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
|
||||
goto fail;
|
||||
if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
|
||||
goto fail;
|
||||
|
||||
if (autorotate) {
|
||||
AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
|
||||
if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
|
||||
if (!strcmp(rotate_tag->value, "90")) {
|
||||
INSERT_FILT("transpose", "clock");
|
||||
} else if (!strcmp(rotate_tag->value, "180")) {
|
||||
INSERT_FILT("hflip", NULL);
|
||||
INSERT_FILT("vflip", NULL);
|
||||
} else if (!strcmp(rotate_tag->value, "270")) {
|
||||
INSERT_FILT("transpose", "cclock");
|
||||
} else {
|
||||
char rotate_buf[64];
|
||||
snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
|
||||
INSERT_FILT("rotate", rotate_buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
|
||||
if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
|
||||
goto fail;
|
||||
|
||||
is->in_video_filter = filt_src;
|
||||
@@ -1962,7 +1919,6 @@ static int video_thread(void *arg)
|
||||
int last_h = 0;
|
||||
enum AVPixelFormat last_format = -2;
|
||||
int last_serial = -1;
|
||||
int last_vfilter_idx = 0;
|
||||
#endif
|
||||
|
||||
for (;;) {
|
||||
@@ -1981,8 +1937,7 @@ static int video_thread(void *arg)
|
||||
if ( last_w != frame->width
|
||||
|| last_h != frame->height
|
||||
|| last_format != frame->format
|
||||
|| last_serial != serial
|
||||
|| last_vfilter_idx != is->vfilter_idx) {
|
||||
|| last_serial != serial) {
|
||||
av_log(NULL, AV_LOG_DEBUG,
|
||||
"Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
|
||||
last_w, last_h,
|
||||
@@ -1991,7 +1946,7 @@ static int video_thread(void *arg)
|
||||
(const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
|
||||
avfilter_graph_free(&graph);
|
||||
graph = avfilter_graph_alloc();
|
||||
if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
|
||||
if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
|
||||
SDL_Event event;
|
||||
event.type = FF_QUIT_EVENT;
|
||||
event.user.data1 = is;
|
||||
@@ -2004,7 +1959,6 @@ static int video_thread(void *arg)
|
||||
last_h = frame->height;
|
||||
last_format = frame->format;
|
||||
last_serial = serial;
|
||||
last_vfilter_idx = is->vfilter_idx;
|
||||
frame_rate = filt_out->inputs[0]->frame_rate;
|
||||
}
|
||||
|
||||
@@ -2013,7 +1967,7 @@ static int video_thread(void *arg)
|
||||
goto the_end;
|
||||
|
||||
while (ret >= 0) {
|
||||
is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
|
||||
is->frame_last_returned_time = av_gettime() / 1000000.0;
|
||||
|
||||
ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
|
||||
if (ret < 0) {
|
||||
@@ -2023,7 +1977,7 @@ static int video_thread(void *arg)
|
||||
break;
|
||||
}
|
||||
|
||||
is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
|
||||
is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
|
||||
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
|
||||
is->frame_last_filter_delay = 0;
|
||||
tb = filt_out->inputs[0]->time_base;
|
||||
@@ -2424,7 +2378,7 @@ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
|
||||
VideoState *is = opaque;
|
||||
int audio_size, len1;
|
||||
|
||||
audio_callback_time = av_gettime_relative();
|
||||
audio_callback_time = av_gettime();
|
||||
|
||||
while (len > 0) {
|
||||
if (is->audio_buf_index >= is->audio_buf_size) {
|
||||
@@ -2461,8 +2415,6 @@ static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb
|
||||
SDL_AudioSpec wanted_spec, spec;
|
||||
const char *env;
|
||||
static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
|
||||
static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
|
||||
int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
|
||||
|
||||
env = SDL_getenv("SDL_AUDIO_CHANNELS");
|
||||
if (env) {
|
||||
@@ -2473,33 +2425,25 @@ static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb
|
||||
wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
|
||||
wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
|
||||
}
|
||||
wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
|
||||
wanted_spec.channels = wanted_nb_channels;
|
||||
wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
|
||||
wanted_spec.freq = wanted_sample_rate;
|
||||
if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
|
||||
return -1;
|
||||
}
|
||||
while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
|
||||
next_sample_rate_idx--;
|
||||
wanted_spec.format = AUDIO_S16SYS;
|
||||
wanted_spec.silence = 0;
|
||||
wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
|
||||
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
|
||||
wanted_spec.callback = sdl_audio_callback;
|
||||
wanted_spec.userdata = opaque;
|
||||
while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
|
||||
av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
|
||||
wanted_spec.channels, wanted_spec.freq, SDL_GetError());
|
||||
av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
|
||||
wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
|
||||
if (!wanted_spec.channels) {
|
||||
wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
|
||||
wanted_spec.channels = wanted_nb_channels;
|
||||
if (!wanted_spec.freq) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"No more combinations to try, audio open failed\n");
|
||||
"No more channel combinations to try, audio open failed\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
|
||||
}
|
||||
if (spec.format != AUDIO_S16SYS) {
|
||||
@@ -2572,6 +2516,7 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
stream_lowres = av_codec_get_max_lowres(codec);
|
||||
}
|
||||
av_codec_set_lowres(avctx, stream_lowres);
|
||||
avctx->error_concealment = error_concealment;
|
||||
|
||||
if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
|
||||
@@ -2629,7 +2574,7 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
is->audio_diff_avg_count = 0;
|
||||
/* since we do not have a precise anough audio fifo fullness,
|
||||
we correct audio sync only if larger than this threshold */
|
||||
is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
|
||||
is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec;
|
||||
|
||||
memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
|
||||
memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
|
||||
@@ -2807,8 +2752,6 @@ static int read_thread(void *arg)
|
||||
if (genpts)
|
||||
ic->flags |= AVFMT_FLAG_GENPTS;
|
||||
|
||||
av_format_inject_global_side_data(ic);
|
||||
|
||||
opts = setup_find_stream_info_opts(ic, codec_opts);
|
||||
orig_nb_streams = ic->nb_streams;
|
||||
|
||||
@@ -2879,9 +2822,9 @@ static int read_thread(void *arg)
|
||||
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
|
||||
AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
|
||||
AVCodecContext *avctx = st->codec;
|
||||
AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
|
||||
if (avctx->width)
|
||||
set_default_window_size(avctx->width, avctx->height, sar);
|
||||
VideoPicture vp = {.width = avctx->width, .height = avctx->height, .sar = av_guess_sample_aspect_ratio(ic, st, NULL)};
|
||||
if (vp.width)
|
||||
set_default_window_size(&vp);
|
||||
}
|
||||
|
||||
/* open the streams */
|
||||
@@ -2992,7 +2935,7 @@ static int read_thread(void *arg)
|
||||
}
|
||||
if (!is->paused &&
|
||||
(!is->audio_st || is->audio_finished == is->audioq.serial) &&
|
||||
(!is->video_st || (is->video_finished == is->videoq.serial && pictq_nb_remaining(is) == 0))) {
|
||||
(!is->video_st || (is->video_finished == is->videoq.serial && is->pictq_size == 0))) {
|
||||
if (loop != 1 && (!loop || --loop)) {
|
||||
stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
|
||||
} else if (autoexit) {
|
||||
@@ -3177,11 +3120,6 @@ static void stream_cycle_channel(VideoState *is, int codec_type)
|
||||
the_end:
|
||||
if (p && stream_index != -1)
|
||||
stream_index = p->stream_index[stream_index];
|
||||
av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
|
||||
av_get_media_type_string(codec_type),
|
||||
old_index,
|
||||
stream_index);
|
||||
|
||||
stream_component_close(is, old_index);
|
||||
stream_component_open(is, stream_index);
|
||||
}
|
||||
@@ -3219,7 +3157,7 @@ static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
|
||||
double remaining_time = 0.0;
|
||||
SDL_PumpEvents();
|
||||
while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
|
||||
if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
|
||||
if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
|
||||
SDL_ShowCursor(0);
|
||||
cursor_hidden = 1;
|
||||
}
|
||||
@@ -3305,17 +3243,7 @@ static void event_loop(VideoState *cur_stream)
|
||||
stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
|
||||
break;
|
||||
case SDLK_w:
|
||||
#if CONFIG_AVFILTER
|
||||
if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
|
||||
if (++cur_stream->vfilter_idx >= nb_vfilters)
|
||||
cur_stream->vfilter_idx = 0;
|
||||
} else {
|
||||
cur_stream->vfilter_idx = 0;
|
||||
toggle_audio_display(cur_stream);
|
||||
}
|
||||
#else
|
||||
toggle_audio_display(cur_stream);
|
||||
#endif
|
||||
break;
|
||||
case SDLK_PAGEUP:
|
||||
if (cur_stream->ic->nb_chapters <= 1) {
|
||||
@@ -3383,7 +3311,7 @@ static void event_loop(VideoState *cur_stream)
|
||||
SDL_ShowCursor(1);
|
||||
cursor_hidden = 0;
|
||||
}
|
||||
cursor_last_shown = av_gettime_relative();
|
||||
cursor_last_shown = av_gettime();
|
||||
if (event.type == SDL_MOUSEBUTTONDOWN) {
|
||||
x = event.button.x;
|
||||
} else {
|
||||
@@ -3571,6 +3499,7 @@ static const OptionDef options[] = {
|
||||
{ "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
|
||||
{ "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
|
||||
{ "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
|
||||
{ "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
|
||||
{ "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
|
||||
{ "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
|
||||
{ "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
|
||||
@@ -3580,7 +3509,7 @@ static const OptionDef options[] = {
|
||||
{ "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
|
||||
{ "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
|
||||
#if CONFIG_AVFILTER
|
||||
{ "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
|
||||
{ "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
|
||||
{ "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
|
||||
#endif
|
||||
{ "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
|
||||
@@ -3591,7 +3520,6 @@ static const OptionDef options[] = {
|
||||
{ "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
|
||||
{ "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
|
||||
{ "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
|
||||
{ "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
|
||||
{ NULL, },
|
||||
};
|
||||
|
||||
@@ -3624,7 +3552,7 @@ void show_help_default(const char *opt, const char *arg)
|
||||
"v cycle video channel\n"
|
||||
"t cycle subtitle channel in the current program\n"
|
||||
"c cycle program\n"
|
||||
"w cycle video filters or show modes\n"
|
||||
"w show audio waves\n"
|
||||
"s activate frame-step mode\n"
|
||||
"left/right seek backward/forward 10 seconds\n"
|
||||
"down/up seek backward/forward 1 minute\n"
|
||||
@@ -3697,7 +3625,7 @@ int main(int argc, char **argv)
|
||||
flags &= ~SDL_INIT_AUDIO;
|
||||
if (display_disable)
|
||||
SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
|
||||
#if !defined(_WIN32) && !defined(__APPLE__)
|
||||
#if !defined(__MINGW32__) && !defined(__APPLE__)
|
||||
flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
|
||||
#endif
|
||||
if (SDL_Init (flags)) {
|
||||
|
18
ffprobe.c
18
ffprobe.c
@@ -1991,7 +1991,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
const char *s;
|
||||
AVRational sar, dar;
|
||||
AVBPrint pbuf;
|
||||
const AVCodecDescriptor *cd;
|
||||
int ret = 0;
|
||||
|
||||
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
@@ -2009,12 +2008,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
if (dec->long_name) print_str ("codec_long_name", dec->long_name);
|
||||
else print_str_opt("codec_long_name", "unknown");
|
||||
}
|
||||
} else if ((cd = avcodec_descriptor_get(stream->codec->codec_id))) {
|
||||
print_str_opt("codec_name", cd->name);
|
||||
if (!do_bitexact) {
|
||||
print_str_opt("codec_long_name",
|
||||
cd->long_name ? cd->long_name : "unknown");
|
||||
}
|
||||
} else {
|
||||
print_str_opt("codec_name", "unknown");
|
||||
if (!do_bitexact) {
|
||||
@@ -2058,13 +2051,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
if (s) print_str ("pix_fmt", s);
|
||||
else print_str_opt("pix_fmt", "unknown");
|
||||
print_int("level", dec_ctx->level);
|
||||
if (dec_ctx->color_range != AVCOL_RANGE_UNSPECIFIED)
|
||||
print_str ("color_range", dec_ctx->color_range == AVCOL_RANGE_MPEG ? "tv": "pc");
|
||||
else
|
||||
print_str_opt("color_range", "N/A");
|
||||
s = av_get_colorspace_name(dec_ctx->colorspace);
|
||||
if (s) print_str ("color_space", s);
|
||||
else print_str_opt("color_space", "unknown");
|
||||
if (dec_ctx->timecode_frame_start >= 0) {
|
||||
char tcbuf[AV_TIMECODE_STR_SIZE];
|
||||
av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start);
|
||||
@@ -2129,8 +2115,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
print_time("duration", stream->duration, &stream->time_base);
|
||||
if (dec_ctx->bit_rate > 0) print_val ("bit_rate", dec_ctx->bit_rate, unit_bit_per_second_str);
|
||||
else print_str_opt("bit_rate", "N/A");
|
||||
if (dec_ctx->rc_max_rate > 0) print_val ("max_bit_rate", dec_ctx->rc_max_rate, unit_bit_per_second_str);
|
||||
else print_str_opt("max_bit_rate", "N/A");
|
||||
if (stream->nb_frames) print_fmt ("nb_frames", "%"PRId64, stream->nb_frames);
|
||||
else print_str_opt("nb_frames", "N/A");
|
||||
if (nb_streams_frames[stream_idx]) print_fmt ("nb_read_frames", "%"PRIu64, nb_streams_frames[stream_idx]);
|
||||
@@ -2749,7 +2733,7 @@ static int parse_read_intervals(const char *intervals_spec)
|
||||
n++;
|
||||
n++;
|
||||
|
||||
read_intervals = av_malloc_array(n, sizeof(*read_intervals));
|
||||
read_intervals = av_malloc(n * sizeof(*read_intervals));
|
||||
if (!read_intervals) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
|
119
ffserver.c
119
ffserver.c
@@ -38,7 +38,6 @@
|
||||
#include "libavformat/rtpdec.h"
|
||||
#include "libavformat/rtpproto.h"
|
||||
#include "libavformat/rtsp.h"
|
||||
#include "libavformat/rtspcodes.h"
|
||||
#include "libavformat/avio_internal.h"
|
||||
#include "libavformat/internal.h"
|
||||
#include "libavformat/url.h"
|
||||
@@ -56,9 +55,7 @@
|
||||
#include "libavutil/time.h"
|
||||
|
||||
#include <stdarg.h>
|
||||
#if HAVE_UNISTD_H
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
#include <fcntl.h>
|
||||
#include <sys/ioctl.h>
|
||||
#if HAVE_POLL_H
|
||||
@@ -225,7 +222,7 @@ typedef struct FFStream {
|
||||
IPAddressACL *acl;
|
||||
char dynamic_acl[1024];
|
||||
int nb_streams;
|
||||
int prebuffer; /* Number of milliseconds early to start */
|
||||
int prebuffer; /* Number of millseconds early to start */
|
||||
int64_t max_time; /* Number of milliseconds to run */
|
||||
int send_on_key;
|
||||
AVStream *streams[MAX_STREAMS];
|
||||
@@ -558,8 +555,7 @@ static int socket_open_listen(struct sockaddr_in *my_addr)
|
||||
}
|
||||
|
||||
tmp = 1;
|
||||
if (setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp)))
|
||||
av_log(NULL, AV_LOG_WARNING, "setsockopt SO_REUSEADDR failed\n");
|
||||
setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp));
|
||||
|
||||
my_addr->sin_family = AF_INET;
|
||||
if (bind (server_fd, (struct sockaddr *) my_addr, sizeof (*my_addr)) < 0) {
|
||||
@@ -575,9 +571,7 @@ static int socket_open_listen(struct sockaddr_in *my_addr)
|
||||
closesocket(server_fd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (ff_socket_nonblock(server_fd, 1) < 0)
|
||||
av_log(NULL, AV_LOG_WARNING, "ff_socket_nonblock failed\n");
|
||||
ff_socket_nonblock(server_fd, 1);
|
||||
|
||||
return server_fd;
|
||||
}
|
||||
@@ -653,24 +647,18 @@ static int http_server(void)
|
||||
|
||||
if (my_http_addr.sin_port) {
|
||||
server_fd = socket_open_listen(&my_http_addr);
|
||||
if (server_fd < 0) {
|
||||
av_free(poll_table);
|
||||
if (server_fd < 0)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (my_rtsp_addr.sin_port) {
|
||||
rtsp_server_fd = socket_open_listen(&my_rtsp_addr);
|
||||
if (rtsp_server_fd < 0) {
|
||||
av_free(poll_table);
|
||||
closesocket(server_fd);
|
||||
if (rtsp_server_fd < 0)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!rtsp_server_fd && !server_fd) {
|
||||
http_log("HTTP and RTSP disabled.\n");
|
||||
av_free(poll_table);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -749,10 +737,8 @@ static int http_server(void)
|
||||
do {
|
||||
ret = poll(poll_table, poll_entry - poll_table, delay);
|
||||
if (ret < 0 && ff_neterrno() != AVERROR(EAGAIN) &&
|
||||
ff_neterrno() != AVERROR(EINTR)) {
|
||||
av_free(poll_table);
|
||||
ff_neterrno() != AVERROR(EINTR))
|
||||
return -1;
|
||||
}
|
||||
} while (ret < 0);
|
||||
|
||||
cur_time = av_gettime() / 1000;
|
||||
@@ -815,8 +801,7 @@ static void http_send_too_busy_reply(int fd)
|
||||
"</body></html>\r\n",
|
||||
nb_connections, nb_max_connections);
|
||||
av_assert0(len < sizeof(buffer));
|
||||
if (send(fd, buffer, len, 0) < len)
|
||||
av_log(NULL, AV_LOG_WARNING, "Could not send too-busy reply, send() failed\n");
|
||||
send(fd, buffer, len, 0);
|
||||
}
|
||||
|
||||
|
||||
@@ -834,8 +819,7 @@ static void new_connection(int server_fd, int is_rtsp)
|
||||
http_log("error during accept %s\n", strerror(errno));
|
||||
return;
|
||||
}
|
||||
if (ff_socket_nonblock(fd, 1) < 0)
|
||||
av_log(NULL, AV_LOG_WARNING, "ff_socket_nonblock failed\n");
|
||||
ff_socket_nonblock(fd, 1);
|
||||
|
||||
if (nb_connections >= nb_max_connections) {
|
||||
http_send_too_busy_reply(fd);
|
||||
@@ -1340,7 +1324,7 @@ static void parse_acl_row(FFStream *stream, FFStream* feed, IPAddressACL *ext_ac
|
||||
get_arg(arg, sizeof(arg), &p);
|
||||
|
||||
if (resolve_host(&acl.first, arg) != 0) {
|
||||
fprintf(stderr, "%s:%d: ACL refers to invalid host or IP address '%s'\n",
|
||||
fprintf(stderr, "%s:%d: ACL refers to invalid host or ip address '%s'\n",
|
||||
filename, line_num, arg);
|
||||
errors++;
|
||||
} else
|
||||
@@ -1350,7 +1334,7 @@ static void parse_acl_row(FFStream *stream, FFStream* feed, IPAddressACL *ext_ac
|
||||
|
||||
if (arg[0]) {
|
||||
if (resolve_host(&acl.last, arg) != 0) {
|
||||
fprintf(stderr, "%s:%d: ACL refers to invalid host or IP address '%s'\n",
|
||||
fprintf(stderr, "%s:%d: ACL refers to invalid host or ip address '%s'\n",
|
||||
filename, line_num, arg);
|
||||
errors++;
|
||||
}
|
||||
@@ -1380,8 +1364,7 @@ static void parse_acl_row(FFStream *stream, FFStream* feed, IPAddressACL *ext_ac
|
||||
naclp = &(*naclp)->next;
|
||||
|
||||
*naclp = nacl;
|
||||
} else
|
||||
av_free(nacl);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1742,7 +1725,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
*p = '\0';
|
||||
snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 200 RTSP Redirect follows\r\n"
|
||||
/* XXX: incorrect MIME type ? */
|
||||
/* XXX: incorrect mime type ? */
|
||||
"Content-type: application/x-rtsp\r\n"
|
||||
"\r\n"
|
||||
"rtsp://%s:%d/%s\r\n", hostname, ntohs(my_rtsp_addr.sin_port), filename);
|
||||
@@ -1763,10 +1746,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
q += strlen(q);
|
||||
|
||||
len = sizeof(my_addr);
|
||||
|
||||
/* XXX: Should probably fail? */
|
||||
if (getsockname(c->fd, (struct sockaddr *)&my_addr, &len))
|
||||
http_log("getsockname() failed\n");
|
||||
getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
|
||||
|
||||
/* XXX: should use a dynamic buffer */
|
||||
sdp_data_size = prepare_sdp_description(stream,
|
||||
@@ -1806,7 +1786,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
/* if post, it means a feed is being sent */
|
||||
if (!stream->is_feed) {
|
||||
/* However it might be a status report from WMP! Let us log the
|
||||
* data as it might come handy one day. */
|
||||
* data as it might come in handy one day. */
|
||||
const char *logline = 0;
|
||||
int client_id = 0;
|
||||
|
||||
@@ -2328,7 +2308,7 @@ static int http_prepare_data(HTTPContext *c)
|
||||
c->fmt_ctx.pb->seekable = 0;
|
||||
|
||||
/*
|
||||
* HACK to avoid MPEG-PS muxer to spit many underflow errors
|
||||
* HACK to avoid mpeg ps muxer to spit many underflow errors
|
||||
* Default value from FFmpeg
|
||||
* Try to set it using configuration option
|
||||
*/
|
||||
@@ -2759,11 +2739,8 @@ static int http_receive_data(HTTPContext *c)
|
||||
/* a packet has been received : write it in the store, except
|
||||
if header */
|
||||
if (c->data_count > FFM_PACKET_SIZE) {
|
||||
/* XXX: use llseek or url_seek
|
||||
* XXX: Should probably fail? */
|
||||
if (lseek(c->feed_fd, feed->feed_write_index, SEEK_SET) == -1)
|
||||
http_log("Seek to %"PRId64" failed\n", feed->feed_write_index);
|
||||
|
||||
/* XXX: use llseek or url_seek */
|
||||
lseek(c->feed_fd, feed->feed_write_index, SEEK_SET);
|
||||
if (write(c->feed_fd, c->buffer, FFM_PACKET_SIZE) < 0) {
|
||||
http_log("Error writing to feed file: %s\n", strerror(errno));
|
||||
goto fail;
|
||||
@@ -2859,9 +2836,44 @@ static void rtsp_reply_header(HTTPContext *c, enum RTSPStatusCode error_number)
|
||||
struct tm *tm;
|
||||
char buf2[32];
|
||||
|
||||
str = RTSP_STATUS_CODE2STRING(error_number);
|
||||
if (!str)
|
||||
switch(error_number) {
|
||||
case RTSP_STATUS_OK:
|
||||
str = "OK";
|
||||
break;
|
||||
case RTSP_STATUS_METHOD:
|
||||
str = "Method Not Allowed";
|
||||
break;
|
||||
case RTSP_STATUS_BANDWIDTH:
|
||||
str = "Not Enough Bandwidth";
|
||||
break;
|
||||
case RTSP_STATUS_SESSION:
|
||||
str = "Session Not Found";
|
||||
break;
|
||||
case RTSP_STATUS_STATE:
|
||||
str = "Method Not Valid in This State";
|
||||
break;
|
||||
case RTSP_STATUS_AGGREGATE:
|
||||
str = "Aggregate operation not allowed";
|
||||
break;
|
||||
case RTSP_STATUS_ONLY_AGGREGATE:
|
||||
str = "Only aggregate operation allowed";
|
||||
break;
|
||||
case RTSP_STATUS_TRANSPORT:
|
||||
str = "Unsupported transport";
|
||||
break;
|
||||
case RTSP_STATUS_INTERNAL:
|
||||
str = "Internal Server Error";
|
||||
break;
|
||||
case RTSP_STATUS_SERVICE:
|
||||
str = "Service Unavailable";
|
||||
break;
|
||||
case RTSP_STATUS_VERSION:
|
||||
str = "RTSP Version not supported";
|
||||
break;
|
||||
default:
|
||||
str = "Unknown Error";
|
||||
break;
|
||||
}
|
||||
|
||||
avio_printf(c->pb, "RTSP/1.0 %d %s\r\n", error_number, str);
|
||||
avio_printf(c->pb, "CSeq: %d\r\n", c->seq);
|
||||
@@ -2977,8 +2989,6 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
AVDictionaryEntry *entry = av_dict_get(stream->metadata, "title", NULL, 0);
|
||||
int i;
|
||||
|
||||
*pbuffer = NULL;
|
||||
|
||||
avc = avformat_alloc_context();
|
||||
if (avc == NULL || !rtp_format) {
|
||||
return -1;
|
||||
@@ -3015,7 +3025,7 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
av_free(avc);
|
||||
av_free(avs);
|
||||
|
||||
return *pbuffer ? strlen(*pbuffer) : AVERROR(ENOMEM);
|
||||
return strlen(*pbuffer);
|
||||
}
|
||||
|
||||
static void rtsp_cmd_options(HTTPContext *c, const char *url)
|
||||
@@ -3037,7 +3047,7 @@ static void rtsp_cmd_describe(HTTPContext *c, const char *url)
|
||||
socklen_t len;
|
||||
struct sockaddr_in my_addr;
|
||||
|
||||
/* find which URL is asked */
|
||||
/* find which url is asked */
|
||||
av_url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
|
||||
path = path1;
|
||||
if (*path == '/')
|
||||
@@ -3055,7 +3065,7 @@ static void rtsp_cmd_describe(HTTPContext *c, const char *url)
|
||||
return;
|
||||
|
||||
found:
|
||||
/* prepare the media description in SDP format */
|
||||
/* prepare the media description in sdp format */
|
||||
|
||||
/* get the host IP */
|
||||
len = sizeof(my_addr);
|
||||
@@ -3114,7 +3124,7 @@ static void rtsp_cmd_setup(HTTPContext *c, const char *url,
|
||||
struct sockaddr_in dest_addr;
|
||||
RTSPActionServerSetup setup;
|
||||
|
||||
/* find which URL is asked */
|
||||
/* find which url is asked */
|
||||
av_url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
|
||||
path = path1;
|
||||
if (*path == '/')
|
||||
@@ -3156,7 +3166,7 @@ static void rtsp_cmd_setup(HTTPContext *c, const char *url,
|
||||
random0, random1);
|
||||
}
|
||||
|
||||
/* find RTP session, and create it if none found */
|
||||
/* find rtp session, and create it if none found */
|
||||
rtp_c = find_rtp_session(h->session_id);
|
||||
if (!rtp_c) {
|
||||
/* always prefer UDP */
|
||||
@@ -3245,7 +3255,7 @@ static void rtsp_cmd_setup(HTTPContext *c, const char *url,
|
||||
}
|
||||
|
||||
|
||||
/* find an RTP connection by using the session ID. Check consistency
|
||||
/* find an rtp connection by using the session ID. Check consistency
|
||||
with filename */
|
||||
static HTTPContext *find_rtp_session_with_url(const char *url,
|
||||
const char *session_id)
|
||||
@@ -3260,7 +3270,7 @@ static HTTPContext *find_rtp_session_with_url(const char *url,
|
||||
if (!rtp_c)
|
||||
return NULL;
|
||||
|
||||
/* find which URL is asked */
|
||||
/* find which url is asked */
|
||||
av_url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
|
||||
path = path1;
|
||||
if (*path == '/')
|
||||
@@ -3491,7 +3501,6 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
fail:
|
||||
if (h)
|
||||
ffurl_close(h);
|
||||
av_free(st);
|
||||
av_free(ctx);
|
||||
return -1;
|
||||
}
|
||||
@@ -3590,7 +3599,7 @@ static void remove_stream(FFStream *stream)
|
||||
}
|
||||
}
|
||||
|
||||
/* specific MPEG4 handling : we extract the raw parameters */
|
||||
/* specific mpeg4 handling : we extract the raw parameters */
|
||||
static void extract_mpeg4_header(AVFormatContext *infile)
|
||||
{
|
||||
int mpeg4_count, i, size;
|
||||
@@ -3815,7 +3824,7 @@ static void build_feed_streams(void)
|
||||
http_log("Container doesn't support the required parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
/* XXX: need better API */
|
||||
/* XXX: need better api */
|
||||
av_freep(&s->priv_data);
|
||||
avio_close(s->pb);
|
||||
s->streams = NULL;
|
||||
@@ -4312,7 +4321,7 @@ static int parse_ffconfig(const char *filename)
|
||||
stream->fmt = NULL;
|
||||
} else {
|
||||
stream->stream_type = STREAM_TYPE_LIVE;
|
||||
/* JPEG cannot be used here, so use single frame MJPEG */
|
||||
/* jpeg cannot be used here, so use single frame jpeg */
|
||||
if (!strcmp(arg, "jpeg"))
|
||||
strcpy(arg, "mjpeg");
|
||||
stream->fmt = ffserver_guess_format(arg, NULL, NULL);
|
||||
|
@@ -24,16 +24,13 @@
|
||||
* 4XM codec.
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "avcodec.h"
|
||||
#include "blockdsp.h"
|
||||
#include "bswapdsp.h"
|
||||
#include "bytestream.h"
|
||||
#include "dsputil.h"
|
||||
#include "get_bits.h"
|
||||
#include "internal.h"
|
||||
|
||||
@@ -134,8 +131,7 @@ typedef struct CFrameBuffer {
|
||||
|
||||
typedef struct FourXContext {
|
||||
AVCodecContext *avctx;
|
||||
BlockDSPContext bdsp;
|
||||
BswapDSPContext bbdsp;
|
||||
DSPContext dsp;
|
||||
uint16_t *frame_buffer;
|
||||
uint16_t *last_frame_buffer;
|
||||
GetBitContext pre_gb; ///< ac/dc prefix
|
||||
@@ -460,7 +456,7 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
bitstream_size);
|
||||
if (!f->bitstream_buffer)
|
||||
return AVERROR(ENOMEM);
|
||||
f->bbdsp.bswap_buf(f->bitstream_buffer, (const uint32_t *) (buf + extra),
|
||||
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)(buf + extra),
|
||||
bitstream_size / 4);
|
||||
init_get_bits(&f->gb, f->bitstream_buffer, 8 * bitstream_size);
|
||||
|
||||
@@ -594,7 +590,7 @@ static int decode_i_mb(FourXContext *f)
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
f->bdsp.clear_blocks(f->block[0]);
|
||||
f->dsp.clear_blocks(f->block[0]);
|
||||
|
||||
for (i = 0; i < 6; i++)
|
||||
if ((ret = decode_i_block(f, f->block[i])) < 0)
|
||||
@@ -799,7 +795,7 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
prestream_size);
|
||||
if (!f->bitstream_buffer)
|
||||
return AVERROR(ENOMEM);
|
||||
f->bbdsp.bswap_buf(f->bitstream_buffer, (const uint32_t *) prestream,
|
||||
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream,
|
||||
prestream_size / 4);
|
||||
init_get_bits(&f->pre_gb, f->bitstream_buffer, 8 * prestream_size);
|
||||
|
||||
@@ -835,7 +831,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
av_assert0(avctx->width % 16 == 0 && avctx->height % 16 == 0);
|
||||
|
||||
if (buf_size < AV_RL32(buf + 4) + 8) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %"PRIu32"\n",
|
||||
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n",
|
||||
buf_size, AV_RL32(buf + 4));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -1000,8 +996,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
f->version = AV_RL32(avctx->extradata) >> 16;
|
||||
ff_blockdsp_init(&f->bdsp, avctx);
|
||||
ff_bswapdsp_init(&f->bbdsp);
|
||||
ff_dsputil_init(&f->dsp, avctx);
|
||||
f->avctx = avctx;
|
||||
init_vlcs(f);
|
||||
|
||||
|
@@ -150,7 +150,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
c->planemap[0] = 0; // 1st plane is palette indexes
|
||||
break;
|
||||
case 24:
|
||||
avctx->pix_fmt = ff_get_format(avctx, pixfmt_rgb24);
|
||||
avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
|
||||
c->planes = 3;
|
||||
c->planemap[0] = 2; // 1st plane is red
|
||||
c->planemap[1] = 1; // 2nd plane is green
|
||||
|
@@ -1,10 +1,10 @@
|
||||
include $(SUBDIR)../config.mak
|
||||
|
||||
NAME = avcodec
|
||||
FFLIBS = avutil
|
||||
|
||||
HEADERS = avcodec.h \
|
||||
avfft.h \
|
||||
dv_profile.h \
|
||||
dxva2.h \
|
||||
old_codec_ids.h \
|
||||
vaapi.h \
|
||||
@@ -15,13 +15,11 @@ HEADERS = avcodec.h \
|
||||
|
||||
OBJS = allcodecs.o \
|
||||
audioconvert.o \
|
||||
avdct.o \
|
||||
avpacket.o \
|
||||
avpicture.o \
|
||||
bitstream.o \
|
||||
bitstream_filter.o \
|
||||
codec_desc.o \
|
||||
dv_profile.o \
|
||||
fmtconvert.o \
|
||||
imgconvert.o \
|
||||
mathtables.o \
|
||||
@@ -36,18 +34,14 @@ OBJS = allcodecs.o \
|
||||
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
|
||||
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o
|
||||
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE) += audio_frame_queue.o
|
||||
OBJS-$(CONFIG_AUDIODSP) += audiodsp.o
|
||||
OBJS-$(CONFIG_BLOCKDSP) += blockdsp.o
|
||||
OBJS-$(CONFIG_BSWAPDSP) += bswapdsp.o
|
||||
OBJS-$(CONFIG_CABAC) += cabac.o
|
||||
OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
|
||||
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
|
||||
OBJS-$(CONFIG_DSPUTIL) += dsputil.o
|
||||
OBJS-$(CONFIG_DXVA2) += dxva2.o
|
||||
OBJS-$(CONFIG_DSPUTIL) += dsputil.o faanidct.o \
|
||||
simple_idct.o jrevdct.o
|
||||
OBJS-$(CONFIG_ENCODERS) += faandct.o jfdctfst.o jfdctint.o
|
||||
OBJS-$(CONFIG_ERROR_RESILIENCE) += error_resilience.o
|
||||
OBJS-$(CONFIG_EXIF) += exif.o tiff_common.o
|
||||
OBJS-$(CONFIG_FDCTDSP) += fdctdsp.o faandct.o \
|
||||
jfdctfst.o jfdctint.o
|
||||
FFT-OBJS-$(CONFIG_HARDCODED_TABLES) += cos_tables.o cos_fixed_tables.o
|
||||
OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
|
||||
fft_fixed_32.o fft_init_table.o \
|
||||
@@ -55,44 +49,32 @@ OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
|
||||
OBJS-$(CONFIG_GOLOMB) += golomb.o
|
||||
OBJS-$(CONFIG_H263DSP) += h263dsp.o
|
||||
OBJS-$(CONFIG_H264CHROMA) += h264chroma.o
|
||||
OBJS-$(CONFIG_H264DSP) += h264dsp.o h264idct.o startcode.o
|
||||
OBJS-$(CONFIG_H264DSP) += h264dsp.o h264idct.o
|
||||
OBJS-$(CONFIG_H264PRED) += h264pred.o
|
||||
OBJS-$(CONFIG_H264QPEL) += h264qpel.o
|
||||
OBJS-$(CONFIG_HPELDSP) += hpeldsp.o
|
||||
OBJS-$(CONFIG_HUFFMAN) += huffman.o
|
||||
OBJS-$(CONFIG_HUFFYUVDSP) += huffyuvdsp.o
|
||||
OBJS-$(CONFIG_HUFFYUVENCDSP) += huffyuvencdsp.o
|
||||
OBJS-$(CONFIG_IDCTDSP) += idctdsp.o faanidct.o \
|
||||
simple_idct.o jrevdct.o
|
||||
OBJS-$(CONFIG_INTRAX8) += intrax8.o intrax8dsp.o
|
||||
OBJS-$(CONFIG_LIBXVID) += libxvid_rc.o
|
||||
OBJS-$(CONFIG_LLAUDDSP) += lossless_audiodsp.o
|
||||
OBJS-$(CONFIG_LLVIDDSP) += lossless_videodsp.o
|
||||
OBJS-$(CONFIG_LPC) += lpc.o
|
||||
OBJS-$(CONFIG_LSP) += lsp.o
|
||||
OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o mdct_fixed_32.o
|
||||
OBJS-$(CONFIG_MPEG_ER) += mpeg_er.o
|
||||
OBJS-$(CONFIG_MPEGAUDIO) += mpegaudio.o mpegaudiodata.o \
|
||||
mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_MPEGAUDIODSP) += mpegaudiodsp.o \
|
||||
mpegaudiodsp_data.o \
|
||||
mpegaudiodsp_fixed.o \
|
||||
mpegaudiodsp_float.o
|
||||
OBJS-$(CONFIG_MPEGVIDEO) += mpegvideo.o mpegvideodsp.o \
|
||||
mpegvideo_motion.o mpegutils.o
|
||||
OBJS-$(CONFIG_MPEGVIDEO) += mpegvideo.o mpegvideo_motion.o
|
||||
OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \
|
||||
motion_est.o ratecontrol.o \
|
||||
mpegvideoencdsp.o
|
||||
OBJS-$(CONFIG_PIXBLOCKDSP) += pixblockdsp.o
|
||||
OBJS-$(CONFIG_QPELDSP) += qpeldsp.o
|
||||
motion_est.o ratecontrol.o
|
||||
OBJS-$(CONFIG_RANGECODER) += rangecoder.o
|
||||
RDFT-OBJS-$(CONFIG_HARDCODED_TABLES) += sin_tables.o
|
||||
OBJS-$(CONFIG_RDFT) += rdft.o $(RDFT-OBJS-yes)
|
||||
OBJS-$(CONFIG_SHARED) += log2_tab.o
|
||||
OBJS-$(CONFIG_SINEWIN) += sinewin.o
|
||||
OBJS-$(CONFIG_TPELDSP) += tpeldsp.o
|
||||
OBJS-$(CONFIG_VAAPI) += vaapi.o
|
||||
OBJS-$(CONFIG_VDA) += vda.o
|
||||
OBJS-$(CONFIG_VDPAU) += vdpau.o
|
||||
OBJS-$(CONFIG_VIDEODSP) += videodsp.o
|
||||
OBJS-$(CONFIG_VP3DSP) += vp3dsp.o
|
||||
@@ -109,16 +91,13 @@ OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o \
|
||||
psymodel.o iirfilter.o \
|
||||
mpeg4audio.o kbdwin.o
|
||||
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
|
||||
OBJS-$(CONFIG_AC3_DECODER) += ac3dec_float.o ac3dec_data.o ac3.o kbdwin.o
|
||||
OBJS-$(CONFIG_AC3_FIXED_DECODER) += ac3dec_fixed.o ac3dec_data.o ac3.o kbdwin.o
|
||||
OBJS-$(CONFIG_AC3_DECODER) += ac3dec.o ac3dec_data.o ac3.o kbdwin.o
|
||||
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
|
||||
ac3.o kbdwin.o
|
||||
OBJS-$(CONFIG_AC3_FIXED_ENCODER) += ac3enc_fixed.o ac3enc.o ac3tab.o ac3.o
|
||||
OBJS-$(CONFIG_AIC_DECODER) += aic.o
|
||||
OBJS-$(CONFIG_ALAC_DECODER) += alac.o alac_data.o
|
||||
OBJS-$(CONFIG_ALAC_ENCODER) += alacenc.o alac_data.o
|
||||
OBJS-$(CONFIG_ALIAS_PIX_DECODER) += aliaspixdec.o
|
||||
OBJS-$(CONFIG_ALIAS_PIX_ENCODER) += aliaspixenc.o
|
||||
OBJS-$(CONFIG_ALS_DECODER) += alsdec.o bgmc.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_AMRNB_DECODER) += amrnbdec.o celp_filters.o \
|
||||
celp_math.o acelp_filters.o \
|
||||
@@ -128,7 +107,8 @@ OBJS-$(CONFIG_AMRWB_DECODER) += amrwbdec.o celp_filters.o \
|
||||
celp_math.o acelp_filters.o \
|
||||
acelp_vectors.o \
|
||||
acelp_pitch_delay.o
|
||||
OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpeg.o mjpegenc_common.o \
|
||||
OBJS-$(CONFIG_AMV_DECODER) += sp5xdec.o mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpeg.o \
|
||||
mpegvideo_enc.o motion_est.o \
|
||||
ratecontrol.o mpeg12data.o \
|
||||
mpegvideo.o
|
||||
@@ -165,9 +145,9 @@ OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o wma.o wma_common.o
|
||||
OBJS-$(CONFIG_BINTEXT_DECODER) += bintext.o cga_data.o
|
||||
OBJS-$(CONFIG_BMP_DECODER) += bmp.o msrledec.o
|
||||
OBJS-$(CONFIG_BMP_ENCODER) += bmpenc.o
|
||||
OBJS-$(CONFIG_BMV_AUDIO_DECODER) += bmvaudio.o
|
||||
OBJS-$(CONFIG_BMV_VIDEO_DECODER) += bmvvideo.o
|
||||
OBJS-$(CONFIG_BRENDER_PIX_DECODER) += brenderpix.o
|
||||
OBJS-$(CONFIG_BMV_VIDEO_DECODER) += bmv.o
|
||||
OBJS-$(CONFIG_BMV_AUDIO_DECODER) += bmv.o
|
||||
OBJS-$(CONFIG_BRENDER_PIX_DECODER) += brender_pix.o
|
||||
OBJS-$(CONFIG_C93_DECODER) += c93.o
|
||||
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
|
||||
cavsdata.o mpeg12data.o
|
||||
@@ -175,8 +155,8 @@ OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
|
||||
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
|
||||
OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
|
||||
OBJS-$(CONFIG_CINEPAK_ENCODER) += cinepakenc.o elbg.o
|
||||
OBJS-$(CONFIG_CLJR_DECODER) += cljrdec.o
|
||||
OBJS-$(CONFIG_CLJR_ENCODER) += cljrenc.o
|
||||
OBJS-$(CONFIG_CLJR_DECODER) += cljr.o
|
||||
OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
|
||||
OBJS-$(CONFIG_CLLC_DECODER) += cllc.o
|
||||
OBJS-$(CONFIG_COOK_DECODER) += cook.o
|
||||
OBJS-$(CONFIG_COMFORTNOISE_DECODER) += cngdec.o celp_filters.o
|
||||
@@ -194,18 +174,14 @@ OBJS-$(CONFIG_DNXHD_DECODER) += dnxhddec.o dnxhddata.o
|
||||
OBJS-$(CONFIG_DNXHD_ENCODER) += dnxhdenc.o dnxhddata.o
|
||||
OBJS-$(CONFIG_DPX_DECODER) += dpx.o
|
||||
OBJS-$(CONFIG_DPX_ENCODER) += dpxenc.o
|
||||
OBJS-$(CONFIG_DSD_LSBF_DECODER) += dsddec.o
|
||||
OBJS-$(CONFIG_DSD_MSBF_DECODER) += dsddec.o
|
||||
OBJS-$(CONFIG_DSD_LSBF_PLANAR_DECODER) += dsddec.o
|
||||
OBJS-$(CONFIG_DSD_MSBF_PLANAR_DECODER) += dsddec.o
|
||||
OBJS-$(CONFIG_DSICINAUDIO_DECODER) += dsicinaudio.o
|
||||
OBJS-$(CONFIG_DSICINVIDEO_DECODER) += dsicinvideo.o
|
||||
OBJS-$(CONFIG_DSICINAUDIO_DECODER) += dsicinav.o
|
||||
OBJS-$(CONFIG_DSICINVIDEO_DECODER) += dsicinav.o
|
||||
OBJS-$(CONFIG_DVBSUB_DECODER) += dvbsubdec.o
|
||||
OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
|
||||
OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
|
||||
OBJS-$(CONFIG_DVDSUB_ENCODER) += dvdsubenc.o
|
||||
OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o dv_profile.o
|
||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o dv_profile.o
|
||||
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
|
||||
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
|
||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
|
||||
@@ -215,7 +191,8 @@ OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
|
||||
mpeg12data.o
|
||||
OBJS-$(CONFIG_EATGQ_DECODER) += eatgq.o eaidct.o
|
||||
OBJS-$(CONFIG_EATGV_DECODER) += eatgv.o
|
||||
OBJS-$(CONFIG_EATQI_DECODER) += eatqi.o eaidct.o
|
||||
OBJS-$(CONFIG_EATQI_DECODER) += eatqi.o eaidct.o mpeg12dec.o \
|
||||
mpeg12.o mpeg12data.o
|
||||
OBJS-$(CONFIG_EIGHTBPS_DECODER) += 8bps.o
|
||||
OBJS-$(CONFIG_EIGHTSVX_EXP_DECODER) += 8svx.o
|
||||
OBJS-$(CONFIG_EIGHTSVX_FIB_DECODER) += 8svx.o
|
||||
@@ -225,6 +202,8 @@ OBJS-$(CONFIG_EVRC_DECODER) += evrcdec.o acelp_vectors.o lsp.o
|
||||
OBJS-$(CONFIG_EXR_DECODER) += exr.o
|
||||
OBJS-$(CONFIG_FFV1_DECODER) += ffv1dec.o ffv1.o
|
||||
OBJS-$(CONFIG_FFV1_ENCODER) += ffv1enc.o ffv1.o
|
||||
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o huffyuvdec.o
|
||||
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o huffyuvenc.o
|
||||
OBJS-$(CONFIG_FFWAVESYNTH_DECODER) += ffwavesynth.o
|
||||
OBJS-$(CONFIG_FIC_DECODER) += fic.o
|
||||
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o flacdsp.o
|
||||
@@ -253,17 +232,18 @@ OBJS-$(CONFIG_H263_DECODER) += h263dec.o h263.o ituh263dec.o \
|
||||
intelh263dec.o
|
||||
OBJS-$(CONFIG_H263_ENCODER) += mpeg4videoenc.o mpeg4video.o \
|
||||
h263.o ituh263enc.o flvenc.o
|
||||
OBJS-$(CONFIG_H264_DECODER) += h264.o h264_cabac.o h264_cavlc.o \
|
||||
h264_direct.o h264_loopfilter.o \
|
||||
h264_mb.o h264_picture.o h264_ps.o \
|
||||
h264_refs.o h264_sei.o h264_slice.o
|
||||
OBJS-$(CONFIG_H264_DECODER) += h264.o \
|
||||
h264_loopfilter.o h264_direct.o \
|
||||
cabac.o h264_sei.o h264_ps.o \
|
||||
h264_refs.o h264_cavlc.o h264_cabac.o
|
||||
OBJS-$(CONFIG_H264_VDA_DECODER) += vda_h264_dec.o
|
||||
OBJS-$(CONFIG_HEVC_DECODER) += hevc.o hevc_mvs.o hevc_ps.o hevc_sei.o \
|
||||
hevc_cabac.o hevc_refs.o hevcpred.o \
|
||||
hevcdsp.o hevc_filter.o
|
||||
hevcdsp.o hevc_filter.o cabac.o
|
||||
OBJS-$(CONFIG_HNM4_VIDEO_DECODER) += hnm4video.o
|
||||
OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o huffyuvdec.o
|
||||
OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o huffyuvenc.o
|
||||
OBJS-$(CONFIG_IAC_DECODER) += imc.o
|
||||
OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o
|
||||
OBJS-$(CONFIG_IDF_DECODER) += bintext.o cga_data.o
|
||||
OBJS-$(CONFIG_IFF_BYTERUN1_DECODER) += iff.o
|
||||
@@ -280,13 +260,14 @@ OBJS-$(CONFIG_JPEG2000_ENCODER) += j2kenc.o mqcenc.o mqc.o jpeg2000.o \
|
||||
jpeg2000dwt.o
|
||||
OBJS-$(CONFIG_JPEG2000_DECODER) += jpeg2000dec.o jpeg2000.o \
|
||||
jpeg2000dwt.o mqcdec.o mqc.o
|
||||
OBJS-$(CONFIG_JPEGLS_DECODER) += jpeglsdec.o jpegls.o
|
||||
OBJS-$(CONFIG_JPEGLS_DECODER) += jpeglsdec.o jpegls.o \
|
||||
mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_JPEGLS_ENCODER) += jpeglsenc.o jpegls.o
|
||||
OBJS-$(CONFIG_JV_DECODER) += jvdec.o
|
||||
OBJS-$(CONFIG_KGV1_DECODER) += kgv1dec.o
|
||||
OBJS-$(CONFIG_KMVC_DECODER) += kmvc.o
|
||||
OBJS-$(CONFIG_LAGARITH_DECODER) += lagarith.o lagarithrac.o
|
||||
OBJS-$(CONFIG_LJPEG_ENCODER) += ljpegenc.o mjpeg.o mjpegenc_common.o
|
||||
OBJS-$(CONFIG_LJPEG_ENCODER) += ljpegenc.o mjpegenc.o mjpeg.o
|
||||
OBJS-$(CONFIG_LOCO_DECODER) += loco.o
|
||||
OBJS-$(CONFIG_MACE3_DECODER) += mace.o
|
||||
OBJS-$(CONFIG_MACE6_DECODER) += mace.o
|
||||
@@ -296,8 +277,8 @@ OBJS-$(CONFIG_METASOUND_DECODER) += metasound.o metasound_data.o \
|
||||
OBJS-$(CONFIG_MICRODVD_DECODER) += microdvddec.o ass.o
|
||||
OBJS-$(CONFIG_MIMIC_DECODER) += mimic.o
|
||||
OBJS-$(CONFIG_MJPEG_DECODER) += mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpeg.o mjpegenc_common.o
|
||||
OBJS-$(CONFIG_MJPEGB_DECODER) += mjpegbdec.o
|
||||
OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpeg.o
|
||||
OBJS-$(CONFIG_MJPEGB_DECODER) += mjpegbdec.o mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_MLP_DECODER) += mlpdec.o mlpdsp.o
|
||||
OBJS-$(CONFIG_MMVIDEO_DECODER) += mmvideo.o
|
||||
OBJS-$(CONFIG_MOTIONPIXELS_DECODER) += motionpixels.o
|
||||
@@ -341,16 +322,12 @@ OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
|
||||
OBJS-$(CONFIG_MTS2_DECODER) += mss4.o mss34dsp.o
|
||||
OBJS-$(CONFIG_MVC1_DECODER) += mvcdec.o
|
||||
OBJS-$(CONFIG_MVC2_DECODER) += mvcdec.o
|
||||
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o
|
||||
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
|
||||
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
|
||||
OBJS-$(CONFIG_ON2AVC_DECODER) += on2avc.o on2avcdata.o
|
||||
OBJS-$(CONFIG_OPUS_DECODER) += opusdec.o opus.o opus_celt.o \
|
||||
opus_imdct.o opus_silk.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += pafaudio.o
|
||||
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += pafvideo.o
|
||||
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += paf.o
|
||||
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += paf.o
|
||||
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
|
||||
OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o
|
||||
OBJS-$(CONFIG_PBM_DECODER) += pnmdec.o pnm.o
|
||||
@@ -428,7 +405,7 @@ OBJS-$(CONFIG_SOL_DPCM_DECODER) += dpcm.o
|
||||
OBJS-$(CONFIG_SONIC_DECODER) += sonic.o
|
||||
OBJS-$(CONFIG_SONIC_ENCODER) += sonic.o
|
||||
OBJS-$(CONFIG_SONIC_LS_ENCODER) += sonic.o
|
||||
OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o
|
||||
OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o
|
||||
OBJS-$(CONFIG_SRT_ENCODER) += srtenc.o ass_split.o
|
||||
OBJS-$(CONFIG_SUBRIP_DECODER) += srtdec.o ass.o
|
||||
@@ -440,13 +417,17 @@ OBJS-$(CONFIG_SUNRAST_ENCODER) += sunrastenc.o
|
||||
OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o svq13.o h263.o
|
||||
OBJS-$(CONFIG_SVQ1_ENCODER) += svq1enc.o svq1.o \
|
||||
h263.o ituh263enc.o
|
||||
OBJS-$(CONFIG_SVQ3_DECODER) += svq3.o svq13.o mpegutils.o
|
||||
OBJS-$(CONFIG_SVQ3_DECODER) += svq3.o svq13.o h263.o h264.o \
|
||||
h264_loopfilter.o h264_direct.o \
|
||||
h264_sei.o h264_ps.o h264_refs.o \
|
||||
h264_cavlc.o h264_cabac.o cabac.o
|
||||
OBJS-$(CONFIG_TEXT_DECODER) += textdec.o ass.o
|
||||
OBJS-$(CONFIG_TAK_DECODER) += takdec.o tak.o
|
||||
OBJS-$(CONFIG_TARGA_DECODER) += targa.o
|
||||
OBJS-$(CONFIG_TARGA_ENCODER) += targaenc.o rle.o
|
||||
OBJS-$(CONFIG_TARGA_Y216_DECODER) += targa_y216dec.o
|
||||
OBJS-$(CONFIG_THEORA_DECODER) += xiph.o
|
||||
OBJS-$(CONFIG_THP_DECODER) += mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
|
||||
OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o tiff_data.o tiff_common.o
|
||||
OBJS-$(CONFIG_TIFF_ENCODER) += tiffenc.o rle.o lzwenc.o tiff_data.o
|
||||
@@ -477,10 +458,10 @@ OBJS-$(CONFIG_VB_DECODER) += vb.o
|
||||
OBJS-$(CONFIG_VBLE_DECODER) += vble.o
|
||||
OBJS-$(CONFIG_VC1_DECODER) += vc1dec.o vc1.o vc1data.o vc1dsp.o \
|
||||
msmpeg4dec.o msmpeg4.o msmpeg4data.o \
|
||||
wmv2dsp.o startcode.o
|
||||
wmv2dsp.o
|
||||
OBJS-$(CONFIG_VCR1_DECODER) += vcr1.o
|
||||
OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdaudio.o
|
||||
OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdvideo.o
|
||||
OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdav.o
|
||||
OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdav.o
|
||||
OBJS-$(CONFIG_VMNC_DECODER) += vmnc.o
|
||||
OBJS-$(CONFIG_VORBIS_DECODER) += vorbisdec.o vorbisdsp.o vorbis.o \
|
||||
vorbis_data.o xiph.o
|
||||
@@ -491,7 +472,6 @@ OBJS-$(CONFIG_VP5_DECODER) += vp5.o vp56.o vp56data.o vp56dsp.o \
|
||||
vp56rac.o
|
||||
OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o vp56dsp.o \
|
||||
vp6dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_VP7_DECODER) += vp8.o vp8dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp8dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_VP9_DECODER) += vp9.o vp9dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_VPLAYER_DECODER) += textdec.o ass.o
|
||||
@@ -501,7 +481,6 @@ OBJS-$(CONFIG_WAVPACK_ENCODER) += wavpackenc.o
|
||||
OBJS-$(CONFIG_WEBP_DECODER) += vp8.o vp8dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_WEBP_DECODER) += webp.o exif.o tiff_common.o
|
||||
OBJS-$(CONFIG_WEBVTT_DECODER) += webvttdec.o
|
||||
OBJS-$(CONFIG_WEBVTT_ENCODER) += webvttenc.o ass_split.o
|
||||
OBJS-$(CONFIG_WMALOSSLESS_DECODER) += wmalosslessdec.o wma_common.o
|
||||
OBJS-$(CONFIG_WMAPRO_DECODER) += wmaprodec.o wma.o wma_common.o
|
||||
OBJS-$(CONFIG_WMAV1_DECODER) += wmadec.o wma.o wma_common.o aactab.o
|
||||
@@ -512,7 +491,6 @@ OBJS-$(CONFIG_WMAVOICE_DECODER) += wmavoice.o \
|
||||
celp_filters.o \
|
||||
acelp_vectors.o acelp_filters.o
|
||||
OBJS-$(CONFIG_WMV1_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
|
||||
OBJS-$(CONFIG_WMV1_ENCODER) += msmpeg4enc.o
|
||||
OBJS-$(CONFIG_WMV2_DECODER) += wmv2dec.o wmv2.o wmv2dsp.o \
|
||||
msmpeg4dec.o msmpeg4.o msmpeg4data.o
|
||||
OBJS-$(CONFIG_WMV2_ENCODER) += wmv2enc.o wmv2.o wmv2dsp.o \
|
||||
@@ -641,7 +619,6 @@ OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_SWF_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_SWF_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_VIMA_DECODER) += vima.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
@@ -668,8 +645,11 @@ OBJS-$(CONFIG_VC1_VDPAU_HWACCEL) += vdpau_vc1.o
|
||||
|
||||
# libavformat dependencies
|
||||
OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_ADX_DEMUXER) += adx.o
|
||||
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||
ac3tab.o
|
||||
OBJS-$(CONFIG_DV_DEMUXER) += dv_profile.o
|
||||
OBJS-$(CONFIG_DV_MUXER) += dv_profile.o
|
||||
OBJS-$(CONFIG_FLAC_DEMUXER) += flac.o flacdata.o vorbis_data.o \
|
||||
vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_FLAC_MUXER) += flac.o flacdata.o vorbis_data.o
|
||||
@@ -680,8 +660,7 @@ OBJS-$(CONFIG_ISMV_MUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_LATM_MUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += xiph.o mpeg4audio.o vorbis_data.o \
|
||||
flac.o flacdata.o
|
||||
OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||
vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MATROSKA_MUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||
flac.o flacdata.o vorbis_data.o xiph.o
|
||||
OBJS-$(CONFIG_MP2_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
||||
@@ -692,7 +671,6 @@ OBJS-$(CONFIG_MPEGTS_MUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MXF_MUXER) += dnxhddata.o
|
||||
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
|
||||
OBJS-$(CONFIG_OGA_MUXER) += xiph.o flac.o flacdata.o
|
||||
OBJS-$(CONFIG_OGG_DEMUXER) += xiph.o flac.o flacdata.o \
|
||||
mpeg12data.o vorbis_parser.o \
|
||||
dirac.o vorbis_data.o
|
||||
@@ -784,7 +762,11 @@ OBJS-$(CONFIG_FLAC_PARSER) += flac_parser.o flacdata.o flac.o \
|
||||
OBJS-$(CONFIG_GSM_PARSER) += gsm_parser.o
|
||||
OBJS-$(CONFIG_H261_PARSER) += h261_parser.o
|
||||
OBJS-$(CONFIG_H263_PARSER) += h263_parser.o
|
||||
OBJS-$(CONFIG_H264_PARSER) += h264_parser.o
|
||||
OBJS-$(CONFIG_H264_PARSER) += h264_parser.o h264.o \
|
||||
cabac.o \
|
||||
h264_refs.o h264_sei.o h264_direct.o \
|
||||
h264_loopfilter.o h264_cabac.o \
|
||||
h264_cavlc.o h264_ps.o
|
||||
OBJS-$(CONFIG_HEVC_PARSER) += hevc_parser.o
|
||||
OBJS-$(CONFIG_MJPEG_PARSER) += mjpeg_parser.o
|
||||
OBJS-$(CONFIG_MLP_PARSER) += mlp_parser.o mlp.o
|
||||
@@ -796,15 +778,14 @@ OBJS-$(CONFIG_MPEGAUDIO_PARSER) += mpegaudio_parser.o \
|
||||
mpegaudiodecheader.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MPEGVIDEO_PARSER) += mpegvideo_parser.o \
|
||||
mpeg12.o mpeg12data.o
|
||||
OBJS-$(CONFIG_OPUS_PARSER) += opus_parser.o opus.o vorbis_data.o
|
||||
OBJS-$(CONFIG_PNG_PARSER) += png_parser.o
|
||||
OBJS-$(CONFIG_PNM_PARSER) += pnm_parser.o pnm.o
|
||||
OBJS-$(CONFIG_RV30_PARSER) += rv34_parser.o
|
||||
OBJS-$(CONFIG_RV40_PARSER) += rv34_parser.o
|
||||
OBJS-$(CONFIG_TAK_PARSER) += tak_parser.o tak.o
|
||||
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o vc1dsp.o \
|
||||
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o \
|
||||
msmpeg4.o msmpeg4data.o mpeg4video.o \
|
||||
h263.o startcode.o
|
||||
h263.o
|
||||
OBJS-$(CONFIG_VORBIS_PARSER) += vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
|
||||
OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
|
||||
@@ -839,7 +820,6 @@ SKIPHEADERS += %_tablegen.h \
|
||||
%_tables.h \
|
||||
aac_tablegen_decl.h \
|
||||
fft-internal.h \
|
||||
libutvideo.h \
|
||||
old_codec_ids.h \
|
||||
tableprint.h \
|
||||
$(ARCH)/vp56_arith.h \
|
||||
@@ -849,7 +829,7 @@ SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h
|
||||
SKIPHEADERS-$(CONFIG_LIBUTVIDEO) += libutvideo.h
|
||||
SKIPHEADERS-$(CONFIG_XVMC) += xvmc.h
|
||||
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
|
||||
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_internal.h
|
||||
SKIPHEADERS-$(CONFIG_VDA) += vda.h
|
||||
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h vdpau_internal.h
|
||||
|
||||
TESTPROGS = cabac \
|
||||
@@ -872,7 +852,6 @@ HOSTPROGS = aac_tablegen \
|
||||
aacps_tablegen \
|
||||
cbrt_tablegen \
|
||||
cos_tablegen \
|
||||
dsd_tablegen \
|
||||
dv_tablegen \
|
||||
motionpixels_tablegen \
|
||||
mpegaudio_tablegen \
|
||||
@@ -897,7 +876,7 @@ else
|
||||
$(SUBDIR)%_tablegen$(HOSTEXESUF): HOSTCFLAGS += -DCONFIG_SMALL=0
|
||||
endif
|
||||
|
||||
GEN_HEADERS = cbrt_tables.h aacps_tables.h aac_tables.h dsd_tables.h dv_tables.h \
|
||||
GEN_HEADERS = cbrt_tables.h aacps_tables.h aac_tables.h dv_tables.h \
|
||||
sinewin_tables.h mpegaudio_tables.h motionpixels_tables.h \
|
||||
pcm_tables.h qdm2_tables.h
|
||||
GEN_HEADERS := $(addprefix $(SUBDIR), $(GEN_HEADERS))
|
||||
@@ -909,7 +888,6 @@ ifdef CONFIG_HARDCODED_TABLES
|
||||
$(SUBDIR)aacdec.o: $(SUBDIR)cbrt_tables.h
|
||||
$(SUBDIR)aacps.o: $(SUBDIR)aacps_tables.h
|
||||
$(SUBDIR)aactab.o: $(SUBDIR)aac_tables.h
|
||||
$(SUBDIR)dsddec.o: $(SUBDIR)dsd_tables.h
|
||||
$(SUBDIR)dvenc.o: $(SUBDIR)dv_tables.h
|
||||
$(SUBDIR)sinewin.o: $(SUBDIR)sinewin_tables.h
|
||||
$(SUBDIR)mpegaudiodec_fixed.o: $(SUBDIR)mpegaudio_tables.h
|
||||
|
@@ -220,9 +220,9 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
||||
a64_palette[mc_colors[a]][2] * 0.11;
|
||||
}
|
||||
|
||||
if (!(c->mc_meta_charset = av_malloc_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
||||
if (!(c->mc_meta_charset = av_malloc(32000 * c->mc_lifetime * sizeof(int))) ||
|
||||
!(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
|
||||
!(c->mc_charmap = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
|
||||
!(c->mc_charmap = av_mallocz(1000 * c->mc_lifetime * sizeof(int))) ||
|
||||
!(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
|
||||
!(c->mc_charset = av_malloc(0x800 * (INTERLACED+1) * sizeof(uint8_t)))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
|
||||
|
@@ -34,7 +34,7 @@ static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
int size;
|
||||
union {
|
||||
uint64_t u64;
|
||||
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
uint8_t u8[8];
|
||||
} tmp;
|
||||
|
||||
tmp.u64 = av_be2ne64(state);
|
||||
|
@@ -31,7 +31,7 @@ int avpriv_aac_parse_header(GetBitContext *gbc, AACADTSHeaderInfo *hdr)
|
||||
int size, rdb, ch, sr;
|
||||
int aot, crc_abs;
|
||||
|
||||
if (get_bits(gbc, 12) != 0xfff)
|
||||
if(get_bits(gbc, 12) != 0xfff)
|
||||
return AAC_AC3_PARSE_ERROR_SYNC;
|
||||
|
||||
skip_bits1(gbc); /* id */
|
||||
@@ -39,7 +39,7 @@ int avpriv_aac_parse_header(GetBitContext *gbc, AACADTSHeaderInfo *hdr)
|
||||
crc_abs = get_bits1(gbc); /* protection_absent */
|
||||
aot = get_bits(gbc, 2); /* profile_objecttype */
|
||||
sr = get_bits(gbc, 4); /* sample_frequency_index */
|
||||
if (!avpriv_mpeg4audio_sample_rates[sr])
|
||||
if(!avpriv_mpeg4audio_sample_rates[sr])
|
||||
return AAC_AC3_PARSE_ERROR_SAMPLE_RATE;
|
||||
skip_bits1(gbc); /* private_bit */
|
||||
ch = get_bits(gbc, 3); /* channel_configuration */
|
||||
@@ -51,7 +51,7 @@ int avpriv_aac_parse_header(GetBitContext *gbc, AACADTSHeaderInfo *hdr)
|
||||
skip_bits1(gbc); /* copyright_identification_bit */
|
||||
skip_bits1(gbc); /* copyright_identification_start */
|
||||
size = get_bits(gbc, 13); /* aac_frame_length */
|
||||
if (size < AAC_ADTS_HEADER_SIZE)
|
||||
if(size < AAC_ADTS_HEADER_SIZE)
|
||||
return AAC_AC3_PARSE_ERROR_FRAME_SIZE;
|
||||
|
||||
skip_bits(gbc, 11); /* adts_buffer_fullness */
|
||||
|
@@ -1946,7 +1946,7 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
|
||||
avpriv_request_sample(ac->avctx, "SSR");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
// I see no textual basis in the spec for this occurring after SSR gain
|
||||
// I see no textual basis in the spec for this occuring after SSR gain
|
||||
// control, but this is what both reference and real implmentations do
|
||||
if (tns->present && er_syntax)
|
||||
if (decode_tns(ac, tns, gb, ics) < 0)
|
||||
@@ -2631,7 +2631,7 @@ static void apply_dependent_coupling(AACContext *ac,
|
||||
const float gain = cce->coup.gain[index][idx];
|
||||
for (group = 0; group < ics->group_len[g]; group++) {
|
||||
for (k = offsets[i]; k < offsets[i + 1]; k++) {
|
||||
// FIXME: SIMDify
|
||||
// XXX dsputil-ize
|
||||
dest[group * 128 + k] += gain * src[group * 128 + k];
|
||||
}
|
||||
}
|
||||
|
@@ -769,7 +769,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
||||
if (HAVE_MIPSDSPR1)
|
||||
ff_aac_coder_init_mips(s);
|
||||
|
||||
s->lambda = avctx->global_quality > 0 ? avctx->global_quality : 120;
|
||||
s->lambda = avctx->global_quality ? avctx->global_quality : 120;
|
||||
|
||||
ff_aac_tableinit();
|
||||
|
||||
|
@@ -354,7 +354,7 @@ static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
|
||||
}
|
||||
}
|
||||
|
||||
pctx->ch = av_mallocz_array(ctx->avctx->channels, sizeof(AacPsyChannel));
|
||||
pctx->ch = av_mallocz(sizeof(AacPsyChannel) * ctx->avctx->channels);
|
||||
|
||||
lame_window_init(pctx, ctx->avctx);
|
||||
|
||||
|
@@ -18,7 +18,7 @@
|
||||
|
||||
/**
|
||||
* @file
|
||||
* AAN (Arai, Agui and Nakajima) (I)DCT tables
|
||||
* AAN (Arai Agui Aakajima) (I)DCT tables
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
@@ -18,7 +18,7 @@
|
||||
|
||||
/**
|
||||
* @file
|
||||
* AAN (Arai, Agui and Nakajima) (I)DCT tables
|
||||
* AAN (Arai Agui Nakajima) (I)DCT tables
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_AANDCTTAB_H
|
||||
|
@@ -1,28 +1,14 @@
|
||||
OBJS-$(CONFIG_FFT) += aarch64/fft_init_aarch64.o
|
||||
OBJS-$(CONFIG_H264CHROMA) += aarch64/h264chroma_init_aarch64.o
|
||||
OBJS-$(CONFIG_H264DSP) += aarch64/h264dsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_H264QPEL) += aarch64/h264qpel_init_aarch64.o
|
||||
OBJS-$(CONFIG_HPELDSP) += aarch64/hpeldsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_MPEGAUDIODSP) += aarch64/mpegaudiodsp_init.o
|
||||
OBJS-$(CONFIG_NEON_CLOBBER_TEST) += aarch64/neontest.o
|
||||
OBJS-$(CONFIG_VIDEODSP) += aarch64/videodsp_init.o
|
||||
|
||||
OBJS-$(CONFIG_OPUS_DECODER) += aarch64/opus_imdct_init.o
|
||||
OBJS-$(CONFIG_RV40_DECODER) += aarch64/rv40dsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_VC1_DECODER) += aarch64/vc1dsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_VORBIS_DECODER) += aarch64/vorbisdsp_init.o
|
||||
|
||||
ARMV8-OBJS-$(CONFIG_VIDEODSP) += aarch64/videodsp.o
|
||||
|
||||
NEON-OBJS-$(CONFIG_FFT) += aarch64/fft_neon.o
|
||||
NEON-OBJS-$(CONFIG_H264CHROMA) += aarch64/h264cmc_neon.o
|
||||
NEON-OBJS-$(CONFIG_H264DSP) += aarch64/h264dsp_neon.o \
|
||||
aarch64/h264idct_neon.o
|
||||
NEON-OBJS-$(CONFIG_H264QPEL) += aarch64/h264qpel_neon.o \
|
||||
aarch64/hpeldsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_HPELDSP) += aarch64/hpeldsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_MPEGAUDIODSP) += aarch64/mpegaudiodsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_MDCT) += aarch64/mdct_neon.o
|
||||
|
||||
NEON-OBJS-$(CONFIG_OPUS_DECODER) += aarch64/opus_imdct_neon.o
|
||||
NEON-OBJS-$(CONFIG_VORBIS_DECODER) += aarch64/vorbisdsp_neon.o
|
||||
|
@@ -1,30 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_AARCH64_ASM_OFFSETS_H
|
||||
#define AVCODEC_AARCH64_ASM_OFFSETS_H
|
||||
|
||||
/* CeltIMDCTContext */
|
||||
#define CELT_EXPTAB 0x20
|
||||
#define CELT_FFT_N 0x00
|
||||
#define CELT_LEN2 0x04
|
||||
#define CELT_LEN4 (CELT_LEN2 + 0x4) // loaded as pair
|
||||
#define CELT_TMP 0x10
|
||||
#define CELT_TWIDDLE (CELT_TMP + 0x8) // loaded as pair
|
||||
|
||||
#endif /* AVCODEC_AARCH64_ASM_OFFSETS_H */
|
@@ -1,104 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_AARCH64_CABAC_H
|
||||
#define AVCODEC_AARCH64_CABAC_H
|
||||
|
||||
#include "config.h"
|
||||
#if HAVE_INLINE_ASM
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavcodec/cabac.h"
|
||||
|
||||
#define get_cabac_inline get_cabac_inline_aarch64
|
||||
static av_always_inline int get_cabac_inline_aarch64(CABACContext *c,
|
||||
uint8_t *const state)
|
||||
{
|
||||
int bit;
|
||||
void *reg_a, *reg_b, *reg_c, *tmp;
|
||||
|
||||
__asm__ volatile(
|
||||
"ldrb %w[bit] , [%[state]] \n\t"
|
||||
"add %[r_b] , %[tables] , %[lps_off] \n\t"
|
||||
"mov %w[tmp] , %w[range] \n\t"
|
||||
"and %w[range] , %w[range] , #0xC0 \n\t"
|
||||
"lsl %w[r_c] , %w[range] , #1 \n\t"
|
||||
"add %[r_b] , %[r_b] , %w[bit], UXTW \n\t"
|
||||
"ldrb %w[range] , [%[r_b], %w[r_c], SXTW] \n\t"
|
||||
"sub %w[r_c] , %w[tmp] , %w[range] \n\t"
|
||||
"lsl %w[tmp] , %w[r_c] , #17 \n\t"
|
||||
"cmp %w[tmp] , %w[low] \n\t"
|
||||
"csel %w[tmp] , %w[tmp] , wzr , cc \n\t"
|
||||
"csel %w[range] , %w[r_c] , %w[range], gt \n\t"
|
||||
"cinv %w[bit] , %w[bit] , cc \n\t"
|
||||
"sub %w[low] , %w[low] , %w[tmp] \n\t"
|
||||
"add %[r_b] , %[tables] , %[norm_off] \n\t"
|
||||
"add %[r_a] , %[tables] , %[mlps_off] \n\t"
|
||||
"ldrb %w[tmp] , [%[r_b], %w[range], SXTW] \n\t"
|
||||
"ldrb %w[r_a] , [%[r_a], %w[bit], SXTW] \n\t"
|
||||
"lsl %w[low] , %w[low] , %w[tmp] \n\t"
|
||||
"lsl %w[range] , %w[range] , %w[tmp] \n\t"
|
||||
"uxth %w[r_c] , %w[low] \n\t"
|
||||
"strb %w[r_a] , [%[state]] \n\t"
|
||||
"cbnz %w[r_c] , 2f \n\t"
|
||||
"ldr %[r_c] , [%[c], %[byte]] \n\t"
|
||||
"ldr %[r_a] , [%[c], %[end]] \n\t"
|
||||
"ldrh %w[tmp] , [%[r_c]] \n\t"
|
||||
"cmp %[r_c] , %[r_a] \n\t"
|
||||
"b.ge 1f \n\t"
|
||||
"add %[r_a] , %[r_c] , #2 \n\t"
|
||||
"str %[r_a] , [%[c], %[byte]] \n\t"
|
||||
"1: \n\t"
|
||||
"sub %w[r_c] , %w[low] , #1 \n\t"
|
||||
"eor %w[r_c] , %w[r_c] , %w[low] \n\t"
|
||||
"rev %w[tmp] , %w[tmp] \n\t"
|
||||
"lsr %w[r_c] , %w[r_c] , #15 \n\t"
|
||||
"lsr %w[tmp] , %w[tmp] , #15 \n\t"
|
||||
"ldrb %w[r_c] , [%[r_b], %w[r_c], SXTW] \n\t"
|
||||
"mov %w[r_b] , #0xFFFF \n\t"
|
||||
"mov %w[r_a] , #7 \n\t"
|
||||
"sub %w[tmp] , %w[tmp] , %w[r_b] \n\t"
|
||||
"sub %w[r_c] , %w[r_a] , %w[r_c] \n\t"
|
||||
"lsl %w[tmp] , %w[tmp] , %w[r_c] \n\t"
|
||||
"add %w[low] , %w[low] , %w[tmp] \n\t"
|
||||
"2: \n\t"
|
||||
: [bit]"=&r"(bit),
|
||||
[low]"+&r"(c->low),
|
||||
[range]"+&r"(c->range),
|
||||
[r_a]"=&r"(reg_a),
|
||||
[r_b]"=&r"(reg_b),
|
||||
[r_c]"=&r"(reg_c),
|
||||
[tmp]"=&r"(tmp)
|
||||
: [c]"r"(c),
|
||||
[state]"r"(state),
|
||||
[tables]"r"(ff_h264_cabac_tables),
|
||||
[byte]"i"(offsetof(CABACContext, bytestream)),
|
||||
[end]"i"(offsetof(CABACContext, bytestream_end)),
|
||||
[norm_off]"I"(H264_NORM_SHIFT_OFFSET),
|
||||
[lps_off]"I"(H264_LPS_RANGE_OFFSET),
|
||||
[mlps_off]"I"(H264_MLPS_STATE_OFFSET + 128)
|
||||
: "memory", "cc"
|
||||
);
|
||||
|
||||
return bit & 1;
|
||||
}
|
||||
|
||||
#endif /* HAVE_INLINE_ASM */
|
||||
|
||||
#endif /* AVCODEC_AARCH64_CABAC_H */
|
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include "libavutil/aarch64/cpu.h"
|
||||
#include "libavcodec/fft.h"
|
||||
|
||||
void ff_fft_permute_neon(FFTContext *s, FFTComplex *z);
|
||||
void ff_fft_calc_neon(FFTContext *s, FFTComplex *z);
|
||||
|
||||
void ff_imdct_calc_neon(FFTContext *s, FFTSample *output, const FFTSample *input);
|
||||
void ff_imdct_half_neon(FFTContext *s, FFTSample *output, const FFTSample *input);
|
||||
void ff_mdct_calc_neon(FFTContext *s, FFTSample *output, const FFTSample *input);
|
||||
|
||||
av_cold void ff_fft_init_aarch64(FFTContext *s)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (have_neon(cpu_flags)) {
|
||||
s->fft_permute = ff_fft_permute_neon;
|
||||
s->fft_calc = ff_fft_calc_neon;
|
||||
#if CONFIG_MDCT
|
||||
s->imdct_calc = ff_imdct_calc_neon;
|
||||
s->imdct_half = ff_imdct_half_neon;
|
||||
s->mdct_calc = ff_mdct_calc_neon;
|
||||
s->mdct_permutation = FF_MDCT_PERM_INTERLEAVE;
|
||||
#endif
|
||||
}
|
||||
}
|
@@ -1,442 +0,0 @@
|
||||
/*
|
||||
* ARM NEON optimised FFT
|
||||
*
|
||||
* Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
|
||||
* Copyright (c) 2009 Naotoshi Nojiri
|
||||
* Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
|
||||
*
|
||||
* This algorithm (though not any of the implementation details) is
|
||||
* based on libdjbfft by D. J. Bernstein.
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/aarch64/asm.S"
|
||||
|
||||
#define M_SQRT1_2 0.70710678118654752440
|
||||
|
||||
.macro transpose d0, d1, s0, s1
|
||||
trn1 \d0, \s0, \s1
|
||||
trn2 \d1, \s0, \s1
|
||||
.endm
|
||||
|
||||
|
||||
function fft4_neon
|
||||
ld1 {v0.2s,v1.2s,v2.2s,v3.2s}, [x0]
|
||||
|
||||
fadd v4.2s, v0.2s, v1.2s // r0+r1,i0+i1
|
||||
fsub v6.2s, v0.2s, v1.2s // r0-r1,i0-i1
|
||||
|
||||
ext v16.8b, v2.8b, v3.8b, #4
|
||||
ext v17.8b, v3.8b, v2.8b, #4
|
||||
|
||||
fadd v5.2s, v2.2s, v3.2s // i2+i3,r2+r3
|
||||
fsub v7.2s, v16.2s, v17.2s // r3-r2,i2-i3
|
||||
|
||||
fadd v0.2s, v4.2s, v5.2s
|
||||
fsub v2.2s, v4.2s, v5.2s
|
||||
fadd v1.2s, v6.2s, v7.2s
|
||||
fsub v3.2s, v6.2s, v7.2s
|
||||
|
||||
st1 {v0.2s,v1.2s,v2.2s,v3.2s}, [x0]
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function fft8_neon
|
||||
mov x1, x0
|
||||
ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32
|
||||
ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0]
|
||||
ext v22.8b, v2.8b, v3.8b, #4
|
||||
ext v23.8b, v3.8b, v2.8b, #4
|
||||
fadd v4.2s, v16.2s, v17.2s // r4+r5,i4+i5
|
||||
fadd v5.2s, v18.2s, v19.2s // r6+r7,i6+i7
|
||||
fsub v17.2s, v16.2s, v17.2s // r4-r5,i4-i5
|
||||
fsub v19.2s, v18.2s, v19.2s // r6-r7,i6-i7
|
||||
rev64 v27.2s, v28.2s // ???
|
||||
fadd v20.2s, v0.2s, v1.2s // r0+r1,i0+i1
|
||||
fadd v21.2s, v2.2s, v3.2s // r2+r3,i2+i3
|
||||
fmul v26.2s, v17.2s, v28.2s // -a2r*w,a2i*w
|
||||
ext v6.8b, v4.8b, v5.8b, #4
|
||||
ext v7.8b, v5.8b, v4.8b, #4
|
||||
fmul v27.2s, v19.2s, v27.2s // a3r*w,-a3i*w
|
||||
fsub v23.2s, v22.2s, v23.2s // i2-i3,r3-r2
|
||||
fsub v22.2s, v0.2s, v1.2s // r0-r1,i0-i1
|
||||
fmul v24.2s, v17.2s, v28.s[1] // a2r*w,a2i*w
|
||||
fmul v25.2s, v19.2s, v28.s[1] // a3r*w,a3i*w
|
||||
fadd v0.2s, v20.2s, v21.2s
|
||||
fsub v2.2s, v20.2s, v21.2s
|
||||
fadd v1.2s, v22.2s, v23.2s
|
||||
rev64 v26.2s, v26.2s
|
||||
rev64 v27.2s, v27.2s
|
||||
fsub v3.2s, v22.2s, v23.2s
|
||||
fsub v6.2s, v6.2s, v7.2s
|
||||
fadd v24.2s, v24.2s, v26.2s // a2r+a2i,a2i-a2r t1,t2
|
||||
fadd v25.2s, v25.2s, v27.2s // a3r-a3i,a3i+a3r t5,t6
|
||||
fadd v7.2s, v4.2s, v5.2s
|
||||
fsub v18.2s, v2.2s, v6.2s
|
||||
ext v26.8b, v24.8b, v25.8b, #4
|
||||
ext v27.8b, v25.8b, v24.8b, #4
|
||||
fadd v2.2s, v2.2s, v6.2s
|
||||
fsub v16.2s, v0.2s, v7.2s
|
||||
fadd v5.2s, v25.2s, v24.2s
|
||||
fsub v4.2s, v26.2s, v27.2s
|
||||
fadd v0.2s, v0.2s, v7.2s
|
||||
fsub v17.2s, v1.2s, v5.2s
|
||||
fsub v19.2s, v3.2s, v4.2s
|
||||
fadd v3.2s, v3.2s, v4.2s
|
||||
fadd v1.2s, v1.2s, v5.2s
|
||||
|
||||
st1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0]
|
||||
st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x1]
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function fft16_neon
|
||||
mov x1, x0
|
||||
ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32
|
||||
ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0], #32
|
||||
ext v22.8b, v2.8b, v3.8b, #4
|
||||
ext v23.8b, v3.8b, v2.8b, #4
|
||||
fadd v4.2s, v16.2s, v17.2s // r4+r5,i4+i5
|
||||
fadd v5.2s, v18.2s, v19.2s // r6+r7,i6+i7
|
||||
fsub v17.2s, v16.2s, v17.2s // r4-r5,i4-i5
|
||||
fsub v19.2s, v18.2s, v19.2s // r6-r7,i6-i7
|
||||
rev64 v27.2s, v28.2s // ???
|
||||
fadd v20.2s, v0.2s, v1.2s // r0+r1,i0+i1
|
||||
fadd v21.2s, v2.2s, v3.2s // r2+r3,i2+i3
|
||||
fmul v26.2s, v17.2s, v28.2s // -a2r*w,a2i*w
|
||||
ext v6.8b, v4.8b, v5.8b, #4
|
||||
ext v7.8b, v5.8b, v4.8b, #4
|
||||
fmul v27.2s, v19.2s, v27.2s // a3r*w,-a3i*w
|
||||
fsub v23.2s, v22.2s, v23.2s // i2-i3,r3-r2
|
||||
fsub v22.2s, v0.2s, v1.2s // r0-r1,i0-i1
|
||||
fmul v24.2s, v17.2s, v28.s[1] // a2r*w,a2i*w
|
||||
fmul v25.2s, v19.2s, v28.s[1] // a3r*w,a3i*w
|
||||
fadd v0.2s, v20.2s, v21.2s
|
||||
fsub v2.2s, v20.2s, v21.2s
|
||||
fadd v1.2s, v22.2s, v23.2s
|
||||
rev64 v26.2s, v26.2s
|
||||
rev64 v27.2s, v27.2s
|
||||
fsub v3.2s, v22.2s, v23.2s
|
||||
fsub v6.2s, v6.2s, v7.2s
|
||||
fadd v24.2s, v24.2s, v26.2s // a2r+a2i,a2i-a2r t1,t2
|
||||
fadd v25.2s, v25.2s, v27.2s // a3r-a3i,a3i+a3r t5,t6
|
||||
fadd v7.2s, v4.2s, v5.2s
|
||||
fsub v18.2s, v2.2s, v6.2s
|
||||
ld1 {v20.4s,v21.4s}, [x0], #32
|
||||
ld1 {v22.4s,v23.4s}, [x0], #32
|
||||
ext v26.8b, v24.8b, v25.8b, #4
|
||||
ext v27.8b, v25.8b, v24.8b, #4
|
||||
fadd v2.2s, v2.2s, v6.2s
|
||||
fsub v16.2s, v0.2s, v7.2s
|
||||
fadd v5.2s, v25.2s, v24.2s
|
||||
fsub v4.2s, v26.2s, v27.2s
|
||||
transpose v24.2d, v25.2d, v20.2d, v22.2d
|
||||
transpose v26.2d, v27.2d, v21.2d, v23.2d
|
||||
fadd v0.2s, v0.2s, v7.2s
|
||||
fsub v17.2s, v1.2s, v5.2s
|
||||
fsub v19.2s, v3.2s, v4.2s
|
||||
fadd v3.2s, v3.2s, v4.2s
|
||||
fadd v1.2s, v1.2s, v5.2s
|
||||
ext v20.16b, v21.16b, v21.16b, #4
|
||||
ext v21.16b, v23.16b, v23.16b, #4
|
||||
|
||||
zip1 v0.2d, v0.2d, v1.2d // {z[0], z[1]}
|
||||
zip1 v1.2d, v2.2d, v3.2d // {z[2], z[3]}
|
||||
zip1 v2.2d, v16.2d, v17.2d // {z[o1], z[o1+1]}
|
||||
zip1 v3.2d, v18.2d, v19.2d // {z[o1+2],z[o1+3]}
|
||||
|
||||
// 2 x fft4
|
||||
transpose v22.2d, v23.2d, v20.2d, v21.2d
|
||||
|
||||
fadd v4.4s, v24.4s, v25.4s
|
||||
fadd v5.4s, v26.4s, v27.4s
|
||||
fsub v6.4s, v24.4s, v25.4s
|
||||
fsub v7.4s, v22.4s, v23.4s
|
||||
|
||||
ld1 {v23.4s}, [x14]
|
||||
|
||||
fadd v24.4s, v4.4s, v5.4s // {z[o2+0],z[o2+1]}
|
||||
fsub v26.4s, v4.4s, v5.4s // {z[o2+2],z[o2+3]}
|
||||
fadd v25.4s, v6.4s, v7.4s // {z[o3+0],z[o3+1]}
|
||||
fsub v27.4s, v6.4s, v7.4s // {z[o3+2],z[o3+3]}
|
||||
|
||||
//fft_pass_neon_16
|
||||
rev64 v7.4s, v25.4s
|
||||
fmul v25.4s, v25.4s, v23.s[1]
|
||||
fmul v7.4s, v7.4s, v29.4s
|
||||
fmla v25.4s, v7.4s, v23.s[3] // {t1a,t2a,t5a,t6a}
|
||||
|
||||
zip1 v20.4s, v24.4s, v25.4s
|
||||
zip2 v21.4s, v24.4s, v25.4s
|
||||
fneg v22.4s, v20.4s
|
||||
fadd v4.4s, v21.4s, v20.4s
|
||||
fsub v6.4s, v20.4s, v21.4s // just the second half
|
||||
fadd v5.4s, v21.4s, v22.4s // just the first half
|
||||
|
||||
tbl v4.16b, {v4.16b}, v30.16b // trans4_float
|
||||
tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float
|
||||
|
||||
fsub v20.4s, v0.4s, v4.4s // {z[o2],z[o2+1]}
|
||||
fadd v16.4s, v0.4s, v4.4s // {z[0], z[1]}
|
||||
fsub v22.4s, v2.4s, v5.4s // {z[o3],z[o3+1]}
|
||||
fadd v18.4s, v2.4s, v5.4s // {z[o1],z[o1+1]}
|
||||
|
||||
//second half
|
||||
rev64 v6.4s, v26.4s
|
||||
fmul v26.4s, v26.4s, v23.s[2]
|
||||
rev64 v7.4s, v27.4s
|
||||
fmul v27.4s, v27.4s, v23.s[3]
|
||||
fmul v6.4s, v6.4s, v29.4s
|
||||
fmul v7.4s, v7.4s, v29.4s
|
||||
fmla v26.4s, v6.4s, v23.s[2] // {t1,t2,t5,t6}
|
||||
fmla v27.4s, v7.4s, v23.s[1] // {t1a,t2a,t5a,t6a}
|
||||
|
||||
zip1 v24.4s, v26.4s, v27.4s
|
||||
zip2 v25.4s, v26.4s, v27.4s
|
||||
fneg v26.4s, v24.4s
|
||||
fadd v4.4s, v25.4s, v24.4s
|
||||
fsub v6.4s, v24.4s, v25.4s // just the second half
|
||||
fadd v5.4s, v25.4s, v26.4s // just the first half
|
||||
|
||||
tbl v4.16b, {v4.16b}, v30.16b // trans4_float
|
||||
tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float
|
||||
|
||||
fadd v17.4s, v1.4s, v4.4s // {z[2], z[3]}
|
||||
fsub v21.4s, v1.4s, v4.4s // {z[o2+2],z[o2+3]}
|
||||
fadd v19.4s, v3.4s, v5.4s // {z[o1+2],z[o1+3]}
|
||||
fsub v23.4s, v3.4s, v5.4s // {z[o3+2],z[o3+3]}
|
||||
|
||||
st1 {v16.4s,v17.4s}, [x1], #32
|
||||
st1 {v18.4s,v19.4s}, [x1], #32
|
||||
st1 {v20.4s,v21.4s}, [x1], #32
|
||||
st1 {v22.4s,v23.4s}, [x1], #32
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
|
||||
const trans4_float, align=4
|
||||
.byte 0, 1, 2, 3
|
||||
.byte 8, 9, 10, 11
|
||||
.byte 4, 5, 6, 7
|
||||
.byte 12, 13, 14, 15
|
||||
endconst
|
||||
|
||||
const trans8_float, align=4
|
||||
.byte 24, 25, 26, 27
|
||||
.byte 0, 1, 2, 3
|
||||
.byte 28, 29, 30, 31
|
||||
.byte 4, 5, 6, 7
|
||||
endconst
|
||||
|
||||
function fft_pass_neon
|
||||
sub x6, x2, #1 // n - 1, loop counter
|
||||
lsl x5, x2, #3 // 2 * n * sizeof FFTSample
|
||||
lsl x1, x2, #4 // 2 * n * sizeof FFTComplex
|
||||
add x5, x4, x5 // wim
|
||||
add x3, x1, x2, lsl #5 // 4 * n * sizeof FFTComplex
|
||||
add x2, x0, x2, lsl #5 // &z[o2]
|
||||
add x3, x0, x3 // &z[o3]
|
||||
add x1, x0, x1 // &z[o1]
|
||||
ld1 {v20.4s},[x2] // {z[o2],z[o2+1]}
|
||||
ld1 {v22.4s},[x3] // {z[o3],z[o3+1]}
|
||||
ld1 {v4.2s}, [x4], #8 // {wre[0],wre[1]}
|
||||
trn2 v25.2d, v20.2d, v22.2d
|
||||
sub x5, x5, #4 // wim--
|
||||
trn1 v24.2d, v20.2d, v22.2d
|
||||
ld1 {v5.s}[0], [x5], x7 // d5[0] = wim[-1]
|
||||
rev64 v7.4s, v25.4s
|
||||
fmul v25.4s, v25.4s, v4.s[1]
|
||||
ld1 {v16.4s}, [x0] // {z[0],z[1]}
|
||||
fmul v7.4s, v7.4s, v29.4s
|
||||
ld1 {v17.4s}, [x1] // {z[o1],z[o1+1]}
|
||||
prfm pldl1keep, [x2, #16]
|
||||
prfm pldl1keep, [x3, #16]
|
||||
fmla v25.4s, v7.4s, v5.s[0] // {t1a,t2a,t5a,t6a}
|
||||
prfm pldl1keep, [x0, #16]
|
||||
prfm pldl1keep, [x1, #16]
|
||||
|
||||
zip1 v20.4s, v24.4s, v25.4s
|
||||
zip2 v21.4s, v24.4s, v25.4s
|
||||
fneg v22.4s, v20.4s
|
||||
fadd v4.4s, v21.4s, v20.4s
|
||||
fsub v6.4s, v20.4s, v21.4s // just the second half
|
||||
fadd v5.4s, v21.4s, v22.4s // just the first half
|
||||
|
||||
tbl v4.16b, {v4.16b}, v30.16b // trans4_float
|
||||
tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float
|
||||
|
||||
fadd v20.4s, v16.4s, v4.4s
|
||||
fsub v22.4s, v16.4s, v4.4s
|
||||
fadd v21.4s, v17.4s, v5.4s
|
||||
st1 {v20.4s}, [x0], #16 // {z[0], z[1]}
|
||||
fsub v23.4s, v17.4s, v5.4s
|
||||
|
||||
st1 {v21.4s}, [x1], #16 // {z[o1],z[o1+1]}
|
||||
st1 {v22.4s}, [x2], #16 // {z[o2],z[o2+1]}
|
||||
st1 {v23.4s}, [x3], #16 // {z[o3],z[o3+1]}
|
||||
1:
|
||||
ld1 {v20.4s},[x2] // {z[o2],z[o2+1]}
|
||||
ld1 {v22.4s},[x3] // {z[o3],z[o3+1]}
|
||||
ld1 {v4.2s}, [x4], #8 // {wre[0],wre[1]}
|
||||
transpose v26.2d, v27.2d, v20.2d, v22.2d
|
||||
ld1 {v5.2s}, [x5], x7 // {wim[-1],wim[0]}
|
||||
rev64 v6.4s, v26.4s
|
||||
fmul v26.4s, v26.4s, v4.s[0]
|
||||
rev64 v7.4s, v27.4s
|
||||
fmul v27.4s, v27.4s, v4.s[1]
|
||||
fmul v6.4s, v6.4s, v29.4s
|
||||
fmul v7.4s, v7.4s, v29.4s
|
||||
ld1 {v16.4s},[x0] // {z[0],z[1]}
|
||||
fmla v26.4s, v6.4s, v5.s[1] // {t1,t2,t5,t6}
|
||||
fmla v27.4s, v7.4s, v5.s[0] // {t1a,t2a,t5a,t6a}
|
||||
ld1 {v17.4s},[x1] // {z[o1],z[o1+1]}
|
||||
|
||||
subs x6, x6, #1 // n--
|
||||
|
||||
zip1 v20.4s, v26.4s, v27.4s
|
||||
zip2 v21.4s, v26.4s, v27.4s
|
||||
fneg v22.4s, v20.4s
|
||||
fadd v4.4s, v21.4s, v20.4s
|
||||
fsub v6.4s, v20.4s, v21.4s // just the second half
|
||||
fadd v5.4s, v21.4s, v22.4s // just the first half
|
||||
|
||||
tbl v4.16b, {v4.16b}, v30.16b // trans4_float
|
||||
tbl v5.16b, {v5.16b,v6.16b}, v31.16b // trans8_float
|
||||
|
||||
fadd v20.4s, v16.4s, v4.4s
|
||||
fsub v22.4s, v16.4s, v4.4s
|
||||
fadd v21.4s, v17.4s, v5.4s
|
||||
st1 {v20.4s}, [x0], #16 // {z[0], z[1]}
|
||||
fsub v23.4s, v17.4s, v5.4s
|
||||
|
||||
st1 {v21.4s}, [x1], #16 // {z[o1],z[o1+1]}
|
||||
st1 {v22.4s}, [x2], #16 // {z[o2],z[o2+1]}
|
||||
st1 {v23.4s}, [x3], #16 // {z[o3],z[o3+1]}
|
||||
b.ne 1b
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
.macro def_fft n, n2, n4
|
||||
function fft\n\()_neon align=6
|
||||
sub sp, sp, #16
|
||||
stp x28, x30, [sp]
|
||||
add x28, x0, #\n4*2*8
|
||||
bl fft\n2\()_neon
|
||||
mov x0, x28
|
||||
bl fft\n4\()_neon
|
||||
add x0, x28, #\n4*1*8
|
||||
bl fft\n4\()_neon
|
||||
sub x0, x28, #\n4*2*8
|
||||
ldp x28, x30, [sp], #16
|
||||
movrel x4, X(ff_cos_\n)
|
||||
mov x2, #\n4>>1
|
||||
b fft_pass_neon
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
def_fft 32, 16, 8
|
||||
def_fft 64, 32, 16
|
||||
def_fft 128, 64, 32
|
||||
def_fft 256, 128, 64
|
||||
def_fft 512, 256, 128
|
||||
def_fft 1024, 512, 256
|
||||
def_fft 2048, 1024, 512
|
||||
def_fft 4096, 2048, 1024
|
||||
def_fft 8192, 4096, 2048
|
||||
def_fft 16384, 8192, 4096
|
||||
def_fft 32768, 16384, 8192
|
||||
def_fft 65536, 32768, 16384
|
||||
|
||||
function ff_fft_calc_neon, export=1
|
||||
prfm pldl1keep, [x1]
|
||||
movrel x10, trans4_float
|
||||
ldr w2, [x0]
|
||||
movrel x11, trans8_float
|
||||
sub w2, w2, #2
|
||||
movrel x3, fft_tab_neon
|
||||
ld1 {v30.16b}, [x10]
|
||||
mov x7, #-8
|
||||
movrel x12, pmmp
|
||||
ldr x3, [x3, x2, lsl #3]
|
||||
movrel x13, mppm
|
||||
movrel x14, X(ff_cos_16)
|
||||
ld1 {v31.16b}, [x11]
|
||||
mov x0, x1
|
||||
ld1 {v29.4s}, [x12] // pmmp
|
||||
ld1 {v28.4s}, [x13]
|
||||
br x3
|
||||
endfunc
|
||||
|
||||
function ff_fft_permute_neon, export=1
|
||||
mov x6, #1
|
||||
ldr w2, [x0] // nbits
|
||||
ldr x3, [x0, #16] // tmp_buf
|
||||
ldr x0, [x0, #8] // revtab
|
||||
lsl x6, x6, x2
|
||||
mov x2, x6
|
||||
1:
|
||||
ld1 {v0.2s,v1.2s}, [x1], #16
|
||||
ldr w4, [x0], #4
|
||||
uxth w5, w4
|
||||
lsr w4, w4, #16
|
||||
add x5, x3, x5, lsl #3
|
||||
add x4, x3, x4, lsl #3
|
||||
st1 {v0.2s}, [x5]
|
||||
st1 {v1.2s}, [x4]
|
||||
subs x6, x6, #2
|
||||
b.gt 1b
|
||||
|
||||
sub x1, x1, x2, lsl #3
|
||||
1:
|
||||
ld1 {v0.4s,v1.4s}, [x3], #32
|
||||
st1 {v0.4s,v1.4s}, [x1], #32
|
||||
subs x2, x2, #4
|
||||
b.gt 1b
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
const fft_tab_neon
|
||||
.quad fft4_neon
|
||||
.quad fft8_neon
|
||||
.quad fft16_neon
|
||||
.quad fft32_neon
|
||||
.quad fft64_neon
|
||||
.quad fft128_neon
|
||||
.quad fft256_neon
|
||||
.quad fft512_neon
|
||||
.quad fft1024_neon
|
||||
.quad fft2048_neon
|
||||
.quad fft4096_neon
|
||||
.quad fft8192_neon
|
||||
.quad fft16384_neon
|
||||
.quad fft32768_neon
|
||||
.quad fft65536_neon
|
||||
endconst
|
||||
|
||||
const pmmp, align=4
|
||||
.float +1.0, -1.0, -1.0, +1.0
|
||||
endconst
|
||||
|
||||
const mppm, align=4
|
||||
.float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
|
||||
endconst
|
@@ -95,10 +95,9 @@ function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
|
||||
b.gt 1b
|
||||
ret
|
||||
|
||||
2: adds w12, w12, w6
|
||||
2: tst w6, w6
|
||||
add w12, w12, w6
|
||||
dup v0.8B, w4
|
||||
b.eq 5f
|
||||
tst w6, w6
|
||||
dup v1.8B, w12
|
||||
b.eq 4f
|
||||
|
||||
@@ -162,33 +161,6 @@ function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
|
||||
st1 {v17.8B}, [x0], x2
|
||||
b.gt 4b
|
||||
ret
|
||||
|
||||
5: ld1 {v4.8B}, [x1], x2
|
||||
ld1 {v5.8B}, [x1], x2
|
||||
prfm pldl1strm, [x1]
|
||||
subs w3, w3, #2
|
||||
umull v16.8H, v4.8B, v0.8B
|
||||
umull v17.8H, v5.8B, v0.8B
|
||||
prfm pldl1strm, [x1, x2]
|
||||
.ifc \codec,h264
|
||||
rshrn v16.8B, v16.8H, #6
|
||||
rshrn v17.8B, v17.8H, #6
|
||||
.else
|
||||
add v16.8H, v16.8H, v22.8H
|
||||
add v17.8H, v17.8H, v22.8H
|
||||
shrn v16.8B, v16.8H, #6
|
||||
shrn v17.8B, v17.8H, #6
|
||||
.endif
|
||||
.ifc \type,avg
|
||||
ld1 {v20.8B}, [x8], x2
|
||||
ld1 {v21.8B}, [x8], x2
|
||||
urhadd v16.8B, v16.8B, v20.8B
|
||||
urhadd v17.8B, v17.8B, v21.8B
|
||||
.endif
|
||||
st1 {v16.8B}, [x0], x2
|
||||
st1 {v17.8B}, [x0], x2
|
||||
b.gt 5b
|
||||
ret
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
@@ -266,10 +238,9 @@ function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
|
||||
b.gt 1b
|
||||
ret
|
||||
|
||||
2: adds w12, w12, w6
|
||||
2: tst w6, w6
|
||||
add w12, w12, w6
|
||||
dup v30.8B, w4
|
||||
b.eq 5f
|
||||
tst w6, w6
|
||||
dup v31.8B, w12
|
||||
trn1 v0.2S, v30.2S, v31.2S
|
||||
trn2 v1.2S, v30.2S, v31.2S
|
||||
@@ -332,28 +303,6 @@ function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
|
||||
st1 {v16.S}[1], [x0], x2
|
||||
b.gt 4b
|
||||
ret
|
||||
|
||||
5: ld1 {v4.S}[0], [x1], x2
|
||||
ld1 {v4.S}[1], [x1], x2
|
||||
umull v18.8H, v4.8B, v30.8B
|
||||
subs w3, w3, #2
|
||||
prfm pldl1strm, [x1]
|
||||
.ifc \codec,h264
|
||||
rshrn v16.8B, v18.8H, #6
|
||||
.else
|
||||
add v18.8H, v18.8H, v22.8H
|
||||
shrn v16.8B, v18.8H, #6
|
||||
.endif
|
||||
.ifc \type,avg
|
||||
ld1 {v20.S}[0], [x8], x2
|
||||
ld1 {v20.S}[1], [x8], x2
|
||||
urhadd v16.8B, v16.8B, v20.8B
|
||||
.endif
|
||||
prfm pldl1strm, [x1]
|
||||
st1 {v16.S}[0], [x0], x2
|
||||
st1 {v16.S}[1], [x0], x2
|
||||
b.gt 5b
|
||||
ret
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
|
@@ -1,323 +0,0 @@
|
||||
/*
|
||||
* AArch64 NEON optimised MDCT
|
||||
* Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
|
||||
* Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/aarch64/asm.S"
|
||||
|
||||
function ff_imdct_half_neon, export=1
|
||||
sub sp, sp, #32
|
||||
stp x19, x20, [sp]
|
||||
str x30, [sp, #16]
|
||||
mov x12, #1
|
||||
ldr w14, [x0, #28] // mdct_bits
|
||||
ldr x4, [x0, #32] // tcos
|
||||
ldr x3, [x0, #8] // revtab
|
||||
lsl x12, x12, x14 // n = 1 << nbits
|
||||
lsr x14, x12, #2 // n4 = n >> 2
|
||||
add x7, x2, x12, lsl #1
|
||||
mov x12, #-16
|
||||
sub x7, x7, #16
|
||||
|
||||
ld2 {v16.2s,v17.2s}, [x7], x12 // d16=x,n1 d17=x,n0
|
||||
ld2 {v0.2s,v1.2s}, [x2], #16 // d0 =m0,x d1 =m1,x
|
||||
rev64 v17.2s, v17.2s
|
||||
ld2 {v2.2s,v3.2s}, [x4], #16 // d2=c0,c1 d3=s0,s2
|
||||
fmul v6.2s, v17.2s, v2.2s
|
||||
fmul v7.2s, v0.2s, v2.2s
|
||||
1:
|
||||
subs x14, x14, #2
|
||||
ldr w6, [x3], #4
|
||||
fmul v4.2s, v0.2s, v3.2s
|
||||
fmul v5.2s, v17.2s, v3.2s
|
||||
fsub v4.2s, v6.2s, v4.2s
|
||||
fadd v5.2s, v5.2s, v7.2s
|
||||
ubfm x8, x6, #16, #31
|
||||
ubfm x6, x6, #0, #15
|
||||
add x8, x1, x8, lsl #3
|
||||
add x6, x1, x6, lsl #3
|
||||
b.eq 2f
|
||||
ld2 {v16.2s,v17.2s}, [x7], x12
|
||||
ld2 {v0.2s,v1.2s}, [x2], #16
|
||||
rev64 v17.2s, v17.2s
|
||||
ld2 {v2.2s,v3.2s}, [x4], #16 // d2=c0,c1 d3=s0,s2
|
||||
fmul v6.2s, v17.2s, v2.2s
|
||||
fmul v7.2s, v0.2s, v2.2s
|
||||
st2 {v4.s,v5.s}[0], [x6]
|
||||
st2 {v4.s,v5.s}[1], [x8]
|
||||
b 1b
|
||||
2:
|
||||
st2 {v4.s,v5.s}[0], [x6]
|
||||
st2 {v4.s,v5.s}[1], [x8]
|
||||
|
||||
mov x19, x0
|
||||
mov x20, x1
|
||||
bl X(ff_fft_calc_neon)
|
||||
|
||||
mov x12, #1
|
||||
ldr w14, [x19, #28] // mdct_bits
|
||||
ldr x4, [x19, #32] // tcos
|
||||
lsl x12, x12, x14 // n = 1 << nbits
|
||||
lsr x14, x12, #3 // n8 = n >> 3
|
||||
|
||||
add x4, x4, x14, lsl #3
|
||||
add x6, x20, x14, lsl #3
|
||||
sub x1, x4, #16
|
||||
sub x3, x6, #16
|
||||
|
||||
mov x7, #-16
|
||||
mov x8, x6
|
||||
mov x0, x3
|
||||
|
||||
ld2 {v0.2s,v1.2s}, [x3], x7 // d0 =i1,r1 d1 =i0,r0
|
||||
ld2 {v20.2s,v21.2s},[x6], #16 // d20=i2,r2 d21=i3,r3
|
||||
ld2 {v16.2s,v17.2s},[x1], x7 // d16=c1,c0 d18=s1,s0
|
||||
3:
|
||||
subs x14, x14, #2
|
||||
fmul v7.2s, v0.2s, v17.2s
|
||||
ld2 {v18.2s,v19.2s},[x4], #16 // d17=c2,c3 d19=s2,s3
|
||||
fmul v4.2s, v1.2s, v17.2s
|
||||
fmul v6.2s, v21.2s, v19.2s
|
||||
fmul v5.2s, v20.2s, v19.2s
|
||||
fmul v22.2s, v1.2s, v16.2s
|
||||
fmul v23.2s, v21.2s, v18.2s
|
||||
fmul v24.2s, v0.2s, v16.2s
|
||||
fmul v25.2s, v20.2s, v18.2s
|
||||
fadd v7.2s, v7.2s, v22.2s
|
||||
fadd v5.2s, v5.2s, v23.2s
|
||||
fsub v4.2s, v4.2s, v24.2s
|
||||
fsub v6.2s, v6.2s, v25.2s
|
||||
b.eq 4f
|
||||
ld2 {v0.2s,v1.2s}, [x3], x7
|
||||
ld2 {v20.2s,v21.2s},[x6], #16
|
||||
ld2 {v16.2s,v17.2s},[x1], x7 // d16=c1,c0 d18=s1,s0
|
||||
rev64 v5.2s, v5.2s
|
||||
rev64 v7.2s, v7.2s
|
||||
st2 {v4.2s,v5.2s}, [x0], x7
|
||||
st2 {v6.2s,v7.2s}, [x8], #16
|
||||
b 3b
|
||||
4:
|
||||
rev64 v5.2s, v5.2s
|
||||
rev64 v7.2s, v7.2s
|
||||
st2 {v4.2s,v5.2s}, [x0]
|
||||
st2 {v6.2s,v7.2s}, [x8]
|
||||
|
||||
ldp x19, x20, [sp]
|
||||
ldr x30, [sp, #16]
|
||||
add sp, sp, #32
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_imdct_calc_neon, export=1
|
||||
sub sp, sp, #32
|
||||
stp x19, x20, [sp]
|
||||
str x30, [sp, #16]
|
||||
ldr w3, [x0, #28] // mdct_bits
|
||||
mov x19, #1
|
||||
mov x20, x1
|
||||
lsl x19, x19, x3
|
||||
add x1, x1, x19
|
||||
|
||||
bl X(ff_imdct_half_neon)
|
||||
|
||||
add x0, x20, x19, lsl #2
|
||||
add x1, x20, x19, lsl #1
|
||||
sub x0, x0, #8
|
||||
sub x2, x1, #16
|
||||
mov x3, #-16
|
||||
mov x6, #-8
|
||||
1:
|
||||
ld1 {v0.4s}, [x2], x3
|
||||
prfum pldl1keep, [x0, #-16]
|
||||
rev64 v0.4s, v0.4s
|
||||
ld1 {v2.2s,v3.2s}, [x1], #16
|
||||
fneg v4.4s, v0.4s
|
||||
prfum pldl1keep, [x2, #-16]
|
||||
rev64 v2.2s, v2.2s
|
||||
rev64 v3.2s, v3.2s
|
||||
ext v4.16b, v4.16b, v4.16b, #8
|
||||
st1 {v2.2s}, [x0], x6
|
||||
st1 {v3.2s}, [x0], x6
|
||||
st1 {v4.4s}, [x20], #16
|
||||
subs x19, x19, #16
|
||||
b.gt 1b
|
||||
|
||||
ldp x19, x20, [sp], #16
|
||||
ldr x30, [sp], #16
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
|
||||
function ff_mdct_calc_neon, export=1
|
||||
sub sp, sp, #32
|
||||
stp x19, x20, [sp]
|
||||
str x30, [sp, #16]
|
||||
|
||||
mov x12, #1
|
||||
ldr w14, [x0, #28] // mdct_bits
|
||||
ldr x4, [x0, #32] // tcos
|
||||
ldr x3, [x0, #8] // revtab
|
||||
lsl x14, x12, x14 // n = 1 << nbits
|
||||
add x7, x2, x14 // in4u
|
||||
sub x9, x7, #16 // in4d
|
||||
add x2, x7, x14, lsl #1 // in3u
|
||||
add x8, x9, x14, lsl #1 // in3d
|
||||
add x5, x4, x14, lsl #1
|
||||
sub x5, x5, #16
|
||||
sub x3, x3, #4
|
||||
mov x12, #-16
|
||||
lsr x13, x14, #1
|
||||
|
||||
ld2 {v16.2s,v17.2s}, [x9], x12 // in0u0,in0u1 in4d1,in4d0
|
||||
ld2 {v18.2s,v19.2s}, [x8], x12 // in2u0,in2u1 in3d1,in3d0
|
||||
ld2 {v0.2s, v1.2s}, [x7], #16 // in4u0,in4u1 in2d1,in2d0
|
||||
rev64 v17.2s, v17.2s // in4d0,in4d1 in3d0,in3d1
|
||||
rev64 v19.2s, v19.2s // in4d0,in4d1 in3d0,in3d1
|
||||
ld2 {v2.2s, v3.2s}, [x2], #16 // in3u0,in3u1 in1d1,in1d0
|
||||
fsub v0.2s, v17.2s, v0.2s // in4d-in4u I
|
||||
ld2 {v20.2s,v21.2s}, [x4], #16 // c0,c1 s0,s1
|
||||
rev64 v1.2s, v1.2s // in2d0,in2d1 in1d0,in1d1
|
||||
rev64 v3.2s, v3.2s // in2d0,in2d1 in1d0,in1d1
|
||||
ld2 {v30.2s,v31.2s}, [x5], x12 // c2,c3 s2,s3
|
||||
fadd v2.2s, v2.2s, v19.2s // in3u+in3d -R
|
||||
fsub v16.2s, v16.2s, v1.2s // in0u-in2d R
|
||||
fadd v18.2s, v18.2s, v3.2s // in2u+in1d -I
|
||||
1:
|
||||
fmul v7.2s, v0.2s, v21.2s // I*s
|
||||
ldr w10, [x3, x13]
|
||||
fmul v6.2s, v2.2s, v20.2s // -R*c
|
||||
ldr w6, [x3, #4]!
|
||||
fmul v4.2s, v2.2s, v21.2s // -R*s
|
||||
fmul v5.2s, v0.2s, v20.2s // I*c
|
||||
fmul v24.2s, v16.2s, v30.2s // R*c
|
||||
fmul v25.2s, v18.2s, v31.2s // -I*s
|
||||
fmul v22.2s, v16.2s, v31.2s // R*s
|
||||
fmul v23.2s, v18.2s, v30.2s // I*c
|
||||
subs x14, x14, #16
|
||||
subs x13, x13, #8
|
||||
fsub v6.2s, v6.2s, v7.2s // -R*c-I*s
|
||||
fadd v7.2s, v4.2s, v5.2s // -R*s+I*c
|
||||
fsub v24.2s, v25.2s, v24.2s // I*s-R*c
|
||||
fadd v25.2s, v22.2s, v23.2s // R*s-I*c
|
||||
b.eq 1f
|
||||
mov x12, #-16
|
||||
ld2 {v16.2s,v17.2s}, [x9], x12 // in0u0,in0u1 in4d1,in4d0
|
||||
ld2 {v18.2s,v19.2s}, [x8], x12 // in2u0,in2u1 in3d1,in3d0
|
||||
fneg v7.2s, v7.2s // R*s-I*c
|
||||
ld2 {v0.2s, v1.2s}, [x7], #16 // in4u0,in4u1 in2d1,in2d0
|
||||
rev64 v17.2s, v17.2s // in4d0,in4d1 in3d0,in3d1
|
||||
rev64 v19.2s, v19.2s // in4d0,in4d1 in3d0,in3d1
|
||||
ld2 {v2.2s, v3.2s}, [x2], #16 // in3u0,in3u1 in1d1,in1d0
|
||||
fsub v0.2s, v17.2s, v0.2s // in4d-in4u I
|
||||
ld2 {v20.2s,v21.2s}, [x4], #16 // c0,c1 s0,s1
|
||||
rev64 v1.2s, v1.2s // in2d0,in2d1 in1d0,in1d1
|
||||
rev64 v3.2s, v3.2s // in2d0,in2d1 in1d0,in1d1
|
||||
ld2 {v30.2s,v31.2s}, [x5], x12 // c2,c3 s2,s3
|
||||
fadd v2.2s, v2.2s, v19.2s // in3u+in3d -R
|
||||
fsub v16.2s, v16.2s, v1.2s // in0u-in2d R
|
||||
fadd v18.2s, v18.2s, v3.2s // in2u+in1d -I
|
||||
ubfm x12, x6, #16, #31
|
||||
ubfm x6, x6, #0, #15
|
||||
add x12, x1, x12, lsl #3
|
||||
add x6, x1, x6, lsl #3
|
||||
st2 {v6.s,v7.s}[0], [x6]
|
||||
st2 {v6.s,v7.s}[1], [x12]
|
||||
ubfm x6, x10, #16, #31
|
||||
ubfm x10, x10, #0, #15
|
||||
add x6 , x1, x6, lsl #3
|
||||
add x10, x1, x10, lsl #3
|
||||
st2 {v24.s,v25.s}[0], [x10]
|
||||
st2 {v24.s,v25.s}[1], [x6]
|
||||
b 1b
|
||||
1:
|
||||
fneg v7.2s, v7.2s // R*s-I*c
|
||||
ubfm x12, x6, #16, #31
|
||||
ubfm x6, x6, #0, #15
|
||||
add x12, x1, x12, lsl #3
|
||||
add x6, x1, x6, lsl #3
|
||||
st2 {v6.s,v7.s}[0], [x6]
|
||||
st2 {v6.s,v7.s}[1], [x12]
|
||||
ubfm x6, x10, #16, #31
|
||||
ubfm x10, x10, #0, #15
|
||||
add x6 , x1, x6, lsl #3
|
||||
add x10, x1, x10, lsl #3
|
||||
st2 {v24.s,v25.s}[0], [x10]
|
||||
st2 {v24.s,v25.s}[1], [x6]
|
||||
|
||||
mov x19, x0
|
||||
mov x20, x1
|
||||
bl X(ff_fft_calc_neon)
|
||||
|
||||
mov x12, #1
|
||||
ldr w14, [x19, #28] // mdct_bits
|
||||
ldr x4, [x19, #32] // tcos
|
||||
lsl x12, x12, x14 // n = 1 << nbits
|
||||
lsr x14, x12, #3 // n8 = n >> 3
|
||||
|
||||
add x4, x4, x14, lsl #3
|
||||
add x6, x20, x14, lsl #3
|
||||
sub x1, x4, #16
|
||||
sub x3, x6, #16
|
||||
|
||||
mov x7, #-16
|
||||
mov x8, x6
|
||||
mov x0, x3
|
||||
|
||||
ld2 {v0.2s,v1.2s}, [x3], x7 // d0 =r1,i1 d1 =r0,i0
|
||||
ld2 {v20.2s,v21.2s}, [x6], #16 // d20=r2,i2 d21=r3,i3
|
||||
ld2 {v16.2s,v17.2s}, [x1], x7 // c1,c0 s1,s0
|
||||
1:
|
||||
subs x14, x14, #2
|
||||
fmul v7.2s, v0.2s, v17.2s // r1*s1,r0*s0
|
||||
ld2 {v18.2s,v19.2s}, [x4], #16 // c2,c3 s2,s3
|
||||
fmul v4.2s, v1.2s, v17.2s // i1*s1,i0*s0
|
||||
fmul v6.2s, v21.2s, v19.2s // i2*s2,i3*s3
|
||||
fmul v5.2s, v20.2s, v19.2s // r2*s2,r3*s3
|
||||
fmul v24.2s, v0.2s, v16.2s // r1*c1,r0*c0
|
||||
fmul v25.2s, v20.2s, v18.2s // r2*c2,r3*c3
|
||||
fmul v22.2s, v21.2s, v18.2s // i2*c2,i3*c3
|
||||
fmul v23.2s, v1.2s, v16.2s // i1*c1,i0*c0
|
||||
fadd v4.2s, v4.2s, v24.2s // i1*s1+r1*c1,i0*s0+r0*c0
|
||||
fadd v6.2s, v6.2s, v25.2s // i2*s2+r2*c2,i3*s3+r3*c3
|
||||
fsub v5.2s, v22.2s, v5.2s // i2*c2-r2*s2,i3*c3-r3*s3
|
||||
fsub v7.2s, v23.2s, v7.2s // i1*c1-r1*s1,i0*c0-r0*s0
|
||||
fneg v4.2s, v4.2s
|
||||
fneg v6.2s, v6.2s
|
||||
b.eq 1f
|
||||
ld2 {v0.2s, v1.2s}, [x3], x7
|
||||
ld2 {v20.2s,v21.2s}, [x6], #16
|
||||
ld2 {v16.2s,v17.2s}, [x1], x7 // c1,c0 s1,s0
|
||||
rev64 v5.2s, v5.2s
|
||||
rev64 v7.2s, v7.2s
|
||||
st2 {v4.2s,v5.2s}, [x0], x7
|
||||
st2 {v6.2s,v7.2s}, [x8], #16
|
||||
b 1b
|
||||
1:
|
||||
rev64 v5.2s, v5.2s
|
||||
rev64 v7.2s, v7.2s
|
||||
st2 {v4.2s,v5.2s}, [x0]
|
||||
st2 {v6.2s,v7.2s}, [x8]
|
||||
|
||||
ldp x19, x20, [sp], #16
|
||||
ldr x30, [sp], #16
|
||||
ret
|
||||
endfunc
|
@@ -1,226 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/aarch64/asm.S"
|
||||
|
||||
#define FRAC_BITS 23 // fractional bits for sb_samples and dct
|
||||
#define WFRAC_BITS 16 // fractional bits for window
|
||||
#define OUT_SHIFT (WFRAC_BITS + FRAC_BITS - 15)
|
||||
|
||||
const tbl_rev128.s align=4
|
||||
.byte 12, 13, 14, 15
|
||||
.byte 8, 9, 10, 11
|
||||
.byte 4, 5, 6, 7
|
||||
.byte 0, 1, 2, 3
|
||||
endconst
|
||||
|
||||
.macro apply_window type, st
|
||||
function ff_mpadsp_apply_window_\type\()_neon, export=1
|
||||
mov x7, x0
|
||||
sxtw x4, w4 // incr
|
||||
add x8, x0, #512<<2
|
||||
ld1 {v0.4s,v1.4s,v2.4s,v3.4s}, [x7], #64
|
||||
ld1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x7], #64
|
||||
st1 {v0.4s,v1.4s,v2.4s,v3.4s}, [x8], #64
|
||||
st1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x8], #64
|
||||
movrel x15, tbl_rev128.s
|
||||
ld1 {v27.4s}, [x15]
|
||||
.ifc \type, fixed
|
||||
lsl x4, x4, #1
|
||||
.else
|
||||
lsl x4, x4, #2
|
||||
.endif
|
||||
add x10, x0, #45<<2
|
||||
add x0, x0, #16<<2
|
||||
add x1, x1, #16<<2
|
||||
add x5, x3, x4, lsl #5
|
||||
sub x5, x5, x4 // samples2
|
||||
neg x13, x4 // -incr
|
||||
mov x9, #64<<2
|
||||
.ifc \type, fixed
|
||||
ld1r {v16.2s}, [x2] // dither_state
|
||||
sxtl v16.2d, v16.2s
|
||||
movi v29.2d, #0
|
||||
movi v30.2d, #(1<<OUT_SHIFT)-1
|
||||
trn1 v31.2d, v29.2d, v30.2d
|
||||
trn2 v30.2d, v30.2d, v29.2d
|
||||
trn1 v16.2d, v16.2d, v29.2d
|
||||
.else
|
||||
movi v16.4s, #0
|
||||
movi v28.4s, #0
|
||||
.endif
|
||||
mov x14, #4
|
||||
1:
|
||||
mov x8, x0
|
||||
sub x7, x1, #3<<2
|
||||
sub x6, x1, x14, lsl #4
|
||||
add x7, x7, x14, lsl #4
|
||||
add x11, x6, #(32)<<2 // w + 32
|
||||
add x12, x7, #(32)<<2 // w2 + 32
|
||||
mov x15, #8
|
||||
movi v17.2d, #0
|
||||
movi v18.2d, #0
|
||||
movi v19.2d, #0
|
||||
2:
|
||||
subs x15, x15, #1
|
||||
ld1 {v0.4s}, [x8], x9
|
||||
ld1 {v1.4s}, [x10], x9
|
||||
ld1 {v2.4s}, [x6], x9
|
||||
ld1 {v3.4s}, [x7], x9
|
||||
tbl v6.16b, {v0.16b}, v27.16b
|
||||
tbl v7.16b, {v1.16b}, v27.16b
|
||||
ld1 {v4.4s}, [x11], x9
|
||||
ld1 {v5.4s}, [x12], x9
|
||||
MLA v16, v2, v0
|
||||
MLA2 v17, v2, v0
|
||||
MLS v18, v3, v6
|
||||
MLS2 v19, v3, v6
|
||||
MLS v16, v4, v7
|
||||
MLS2 v17, v4, v7
|
||||
MLS v18, v5, v1
|
||||
MLS2 v19, v5, v1
|
||||
b.gt 2b
|
||||
|
||||
cmp x14, #4
|
||||
sub x10, x10, #64<<5 // 64 * 8 * sizeof(int32_t)
|
||||
|
||||
.ifc \type, fixed
|
||||
and v28.16b, v16.16b, v30.16b
|
||||
ext v28.16b, v29.16b, v28.16b, #8
|
||||
|
||||
b.eq 4f
|
||||
round_sample v19, 1, 1
|
||||
4:
|
||||
round_sample v16, 1, 0
|
||||
shrn v16.2s, v16.2d, #OUT_SHIFT
|
||||
round_sample v19, 0, 0
|
||||
shrn v19.2s, v19.2d, #OUT_SHIFT
|
||||
round_sample v17, 0, 1
|
||||
round_sample v18, 1, 1
|
||||
round_sample v17, 1, 0
|
||||
shrn2 v16.4s, v17.2d, #OUT_SHIFT
|
||||
round_sample v18, 0, 0
|
||||
shrn2 v19.4s, v18.2d, #OUT_SHIFT
|
||||
sqxtn v16.4h, v16.4s
|
||||
sqxtn v18.4h, v19.4s
|
||||
.else
|
||||
ext v18.16b, v18.16b, v18.16b, #8
|
||||
.endif
|
||||
|
||||
st1 {v16.\st\()}[0], [x3], x4
|
||||
b.eq 4f
|
||||
st1 {v18.\st\()}[1], [x5], x13
|
||||
4:
|
||||
st1 {v16.\st\()}[1], [x3], x4
|
||||
st1 {v18.\st\()}[0], [x5], x13
|
||||
st1 {v16.\st\()}[2], [x3], x4
|
||||
st1 {v18.\st\()}[3], [x5], x13
|
||||
st1 {v16.\st\()}[3], [x3], x4
|
||||
st1 {v18.\st\()}[2], [x5], x13
|
||||
|
||||
mov v16.16b, v28.16b
|
||||
|
||||
subs x14, x14, #1
|
||||
add x0, x0, #4<<2
|
||||
sub x10, x10, #4<<2
|
||||
b.gt 1b
|
||||
|
||||
// comuting samples[16]
|
||||
add x6, x1, #32<<2
|
||||
ld1 {v0.2s}, [x6], x9
|
||||
ld1 {v1.2s}, [x0], x9
|
||||
.rept 3
|
||||
ld1 {v2.2s}, [x6], x9
|
||||
ld1 {v3.2s}, [x0], x9
|
||||
MLS v16, v0, v1
|
||||
ld1 {v0.2s}, [x6], x9
|
||||
ld1 {v1.2s}, [x0], x9
|
||||
MLS v16, v2, v3
|
||||
.endr
|
||||
ld1 {v2.2s}, [x6], x9
|
||||
ld1 {v3.2s}, [x0], x9
|
||||
MLS v16, v0, v1
|
||||
MLS v16, v2, v3
|
||||
|
||||
.ifc \type, fixed
|
||||
and v28.16b, v16.16b, v30.16b
|
||||
shrn v20.2s, v16.2d, #OUT_SHIFT
|
||||
xtn v28.2s, v28.2d
|
||||
sqxtn v20.4h, v20.4s
|
||||
st1 {v28.s}[0], [x2] // save dither_state
|
||||
st1 {v20.h}[0], [x3]
|
||||
.else
|
||||
st1 {v16.s}[0], [x3]
|
||||
.endif
|
||||
|
||||
ret
|
||||
endfunc
|
||||
.purgem round_sample
|
||||
.purgem MLA
|
||||
.purgem MLA2
|
||||
.purgem MLS
|
||||
.purgem MLS2
|
||||
.endm
|
||||
|
||||
|
||||
.macro round_sample r, idx, next
|
||||
add \r\().2d, \r\().2d, v28.2d
|
||||
.if \idx == 0
|
||||
and v28.16b, \r\().16b, v30.16b
|
||||
.else // \idx == 1
|
||||
and v28.16b, \r\().16b, v31.16b
|
||||
.endif
|
||||
.if \idx != \next
|
||||
.if \next == 0
|
||||
ext v28.16b, v28.16b, v29.16b, #8
|
||||
.else
|
||||
ext v28.16b, v29.16b, v28.16b, #8
|
||||
.endif
|
||||
.endif
|
||||
.endm
|
||||
.macro MLA d, s1, s2
|
||||
smlal \d\().2d, \s1\().2s, \s2\().2s
|
||||
.endm
|
||||
.macro MLA2 d, s1, s2
|
||||
smlal2 \d\().2d, \s1\().4s, \s2\().4s
|
||||
.endm
|
||||
.macro MLS d, s1, s2
|
||||
smlsl \d\().2d, \s1\().2s, \s2\().2s
|
||||
.endm
|
||||
.macro MLS2 d, s1, s2
|
||||
smlsl2 \d\().2d, \s1\().4s, \s2\().4s
|
||||
.endm
|
||||
apply_window fixed, h
|
||||
|
||||
|
||||
// nothing to do for round_sample and ML{A,S}2
|
||||
.macro round_sample r, idx, next
|
||||
.endm
|
||||
.macro MLA2 d, s1, s2
|
||||
.endm
|
||||
.macro MLS2 d, s1, s2
|
||||
.endm
|
||||
.macro MLA d, s1, s2
|
||||
fmla \d\().4s, \s1\().4s, \s2\().4s
|
||||
.endm
|
||||
.macro MLS d, s1, s2
|
||||
fmls \d\().4s, \s1\().4s, \s2\().4s
|
||||
.endm
|
||||
apply_window float, s
|
@@ -1,45 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "libavutil/cpu.h"
|
||||
#include "libavutil/aarch64/cpu.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavcodec/opus_imdct.h"
|
||||
|
||||
#include "asm-offsets.h"
|
||||
|
||||
AV_CHECK_OFFSET(CeltIMDCTContext, exptab, CELT_EXPTAB);
|
||||
AV_CHECK_OFFSET(CeltIMDCTContext, fft_n, CELT_FFT_N);
|
||||
AV_CHECK_OFFSET(CeltIMDCTContext, len2, CELT_LEN2);
|
||||
AV_CHECK_OFFSET(CeltIMDCTContext, len4, CELT_LEN4);
|
||||
AV_CHECK_OFFSET(CeltIMDCTContext, tmp, CELT_TMP);
|
||||
AV_CHECK_OFFSET(CeltIMDCTContext, twiddle_exptab, CELT_TWIDDLE);
|
||||
|
||||
void ff_celt_imdct_half_neon(CeltIMDCTContext *s, float *dst, const float *src,
|
||||
ptrdiff_t stride, float scale);
|
||||
|
||||
void ff_celt_imdct_init_aarch64(CeltIMDCTContext *s)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (have_neon(cpu_flags)) {
|
||||
s->imdct_half = ff_celt_imdct_half_neon;
|
||||
}
|
||||
}
|
@@ -1,647 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/aarch64/asm.S"
|
||||
|
||||
#include "asm-offsets.h"
|
||||
|
||||
.macro shuffle a, b, c, d
|
||||
const shuffle_\a\b\c\d align=4
|
||||
.byte (\a * 4), (\a * 4 + 1), (\a * 4 + 2), (\a * 4 + 3)
|
||||
.byte (\b * 4), (\b * 4 + 1), (\b * 4 + 2), (\b * 4 + 3)
|
||||
.byte (\c * 4), (\c * 4 + 1), (\c * 4 + 2), (\c * 4 + 3)
|
||||
.byte (\d * 4), (\d * 4 + 1), (\d * 4 + 2), (\d * 4 + 3)
|
||||
endconst
|
||||
.endm
|
||||
|
||||
shuffle 0, 2, 1, 3
|
||||
shuffle 1, 0, 3, 2
|
||||
shuffle 2, 3, 0, 1
|
||||
shuffle 3, 1, 2, 0
|
||||
|
||||
|
||||
function fft5_neon
|
||||
lsl x2, x2, #3
|
||||
ld1 {v24.2s}, [x1], x2
|
||||
ld2 {v25.s,v26.s}[0], [x1], x2
|
||||
ld2 {v25.s,v26.s}[1], [x1], x2
|
||||
ld2 {v25.s,v26.s}[2], [x1], x2
|
||||
ld2 {v25.s,v26.s}[3], [x1]
|
||||
dup v6.4s, v24.s[0]
|
||||
dup v7.4s, v24.s[1]
|
||||
|
||||
faddp v0.4s, v25.4s, v26.4s
|
||||
// z[][0], z[][3]
|
||||
fmul v16.4s, v25.4s, v15.s[0] // rr
|
||||
fmul v17.4s, v25.4s, v15.s[1] // ri
|
||||
fmul v18.4s, v26.4s, v15.s[0] // ir
|
||||
fmul v19.4s, v26.4s, v15.s[1] // ii
|
||||
faddp v0.4s, v0.4s, v0.4s
|
||||
// z[][1], z[][2]
|
||||
fmul v20.4s, v25.4s, v15.s[2] // rr
|
||||
fmul v21.4s, v25.4s, v15.s[3] // ri
|
||||
fmul v22.4s, v26.4s, v15.s[2] // ir
|
||||
fmul v23.4s, v26.4s, v15.s[3] // ii
|
||||
fadd v0.2s, v24.2s, v0.2s // out[0]
|
||||
|
||||
// z[0123][0], z[0123][3]
|
||||
fsub v24.4s, v16.4s, v19.4s // (c).re = rr - ii;
|
||||
fadd v27.4s, v16.4s, v19.4s // (d).re = rr + ii;
|
||||
ld1 {v16.16b}, [x11]
|
||||
ld1 {v19.16b}, [x14]
|
||||
fadd v28.4s, v17.4s, v18.4s // (c).im = ri + ir;
|
||||
fsub v31.4s, v18.4s, v17.4s // (d).im = -ri + ir;
|
||||
ld1 {v17.16b}, [x12]
|
||||
// z[0123][1], z[0123][2]
|
||||
fsub v25.4s, v20.4s, v23.4s // (c).re = rr - ii;
|
||||
fadd v26.4s, v20.4s, v23.4s // (d).re = rr + ii;
|
||||
ld1 {v18.16b}, [x13]
|
||||
fadd v29.4s, v21.4s, v22.4s // (c).im = ri + ir;
|
||||
fsub v30.4s, v22.4s, v21.4s // (d).im = -ri + ir;
|
||||
|
||||
//real
|
||||
tbl v20.16b, {v24.16b}, v16.16b
|
||||
tbl v21.16b, {v25.16b}, v17.16b
|
||||
tbl v22.16b, {v26.16b}, v18.16b
|
||||
tbl v23.16b, {v27.16b}, v19.16b
|
||||
//imag
|
||||
tbl v16.16b, {v28.16b}, v16.16b
|
||||
tbl v17.16b, {v29.16b}, v17.16b
|
||||
tbl v18.16b, {v30.16b}, v18.16b
|
||||
tbl v19.16b, {v31.16b}, v19.16b
|
||||
|
||||
fadd v6.4s, v6.4s, v20.4s
|
||||
fadd v22.4s, v22.4s, v23.4s
|
||||
fadd v7.4s, v7.4s, v16.4s
|
||||
fadd v18.4s, v18.4s, v19.4s
|
||||
|
||||
fadd v21.4s, v21.4s, v22.4s
|
||||
fadd v17.4s, v17.4s, v18.4s
|
||||
fadd v6.4s, v6.4s, v21.4s
|
||||
fadd v7.4s, v7.4s, v17.4s
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function fft15_neon
|
||||
mov x8, x1
|
||||
mov x9, x30
|
||||
add x2, x3, x3, lsl #1 // 3 * stride
|
||||
|
||||
add x1, x8, x3, lsl #3 // in + 1 * stride
|
||||
bl fft5_neon
|
||||
mov v1.8b, v0.8b
|
||||
mov v2.16b, v6.16b
|
||||
mov v3.16b, v7.16b
|
||||
|
||||
add x1, x8, x3, lsl #4 // in + 2 * stride
|
||||
add x2, x3, x3, lsl #1 // 3 * stride
|
||||
bl fft5_neon
|
||||
zip1 v1.4s, v1.4s, v0.4s
|
||||
mov v4.16b, v6.16b
|
||||
mov v5.16b, v7.16b
|
||||
|
||||
mov x1, x8 // in + 0 * stride
|
||||
add x2, x3, x3, lsl #1 // 3 * stride
|
||||
bl fft5_neon
|
||||
|
||||
faddp v20.4s, v1.4s, v1.4s
|
||||
|
||||
ext v18.16b, v8.16b, v8.16b, #4
|
||||
ext v19.16b, v9.16b, v9.16b, #4
|
||||
mov v16.16b, v6.16b
|
||||
mov v17.16b, v7.16b
|
||||
fadd v20.2s, v20.2s, v0.2s
|
||||
|
||||
uzp1 v18.4s, v18.4s, v10.4s // exp[2,4,6,8].re
|
||||
uzp1 v19.4s, v19.4s, v11.4s // exp[2,4,6,8].im
|
||||
|
||||
st1 {v20.2s}, [x0], #8 // out[0]
|
||||
|
||||
fmla v16.4s, v2.4s, v8.4s
|
||||
fmls v16.4s, v3.4s, v9.4s
|
||||
|
||||
fmla v17.4s, v2.4s, v9.4s
|
||||
fmla v17.4s, v3.4s, v8.4s
|
||||
|
||||
fmla v16.4s, v4.4s, v18.4s
|
||||
fmls v16.4s, v5.4s, v19.4s
|
||||
|
||||
fmla v17.4s, v4.4s, v19.4s
|
||||
fmla v17.4s, v5.4s, v18.4s
|
||||
|
||||
zip1 v18.4s, v16.4s, v17.4s
|
||||
zip2 v19.4s, v16.4s, v17.4s
|
||||
|
||||
rev64 v31.4s, v14.4s
|
||||
trn1 v28.2d, v1.2d, v1.2d
|
||||
trn2 v29.2d, v1.2d, v1.2d
|
||||
zip1 v30.2d, v14.2d, v31.2d
|
||||
zip2 v31.2d, v14.2d, v31.2d
|
||||
|
||||
st1 {v18.4s,v19.4s}, [x0], #32 // out[1-4]
|
||||
|
||||
fmul v16.4s, v28.4s, v30.4s
|
||||
fmul v17.4s, v29.4s, v30.4s
|
||||
fmls v16.4s, v29.4s, v31.4s
|
||||
fmla v17.4s, v28.4s, v31.4s
|
||||
faddp v16.4s, v16.4s, v16.4s
|
||||
faddp v17.4s, v17.4s, v17.4s
|
||||
zip1 v18.2s, v16.2s, v17.2s
|
||||
zip2 v19.2s, v16.2s, v17.2s
|
||||
|
||||
fadd v18.2s, v18.2s, v0.2s
|
||||
fadd v0.2s, v19.2s, v0.2s
|
||||
|
||||
ext v30.16b, v12.16b, v12.16b, #4
|
||||
ext v31.16b, v13.16b, v13.16b, #4
|
||||
mov v16.16b, v6.16b
|
||||
mov v17.16b, v7.16b
|
||||
|
||||
uzp1 v30.4s, v30.4s, v8.4s
|
||||
uzp1 v31.4s, v31.4s, v9.4s
|
||||
|
||||
st1 {v18.2s}, [x0], #8 // out[5]
|
||||
|
||||
fmla v16.4s, v2.4s, v10.4s
|
||||
fmls v16.4s, v3.4s, v11.4s
|
||||
|
||||
fmla v17.4s, v2.4s, v11.4s
|
||||
fmla v17.4s, v3.4s, v10.4s
|
||||
|
||||
fmla v16.4s, v4.4s, v30.4s
|
||||
fmls v16.4s, v5.4s, v31.4s
|
||||
|
||||
fmla v17.4s, v4.4s, v31.4s
|
||||
fmla v17.4s, v5.4s, v30.4s
|
||||
|
||||
zip1 v18.4s, v16.4s, v17.4s
|
||||
zip2 v19.4s, v16.4s, v17.4s
|
||||
|
||||
ext v30.16b, v10.16b, v10.16b, #4
|
||||
ext v31.16b, v11.16b, v11.16b, #4
|
||||
|
||||
fmla v6.4s, v2.4s, v12.4s
|
||||
fmls v6.4s, v3.4s, v13.4s
|
||||
|
||||
st1 {v18.4s,v19.4s}, [x0], #32 // out[6-9]
|
||||
|
||||
uzp1 v30.4s, v30.4s, v12.4s
|
||||
uzp1 v31.4s, v31.4s, v13.4s
|
||||
|
||||
fmla v7.4s, v2.4s, v13.4s
|
||||
fmla v7.4s, v3.4s, v12.4s
|
||||
|
||||
st1 {v0.2s}, [x0], #8 // out[10]
|
||||
|
||||
fmla v6.4s, v4.4s, v30.4s
|
||||
fmls v6.4s, v5.4s, v31.4s
|
||||
|
||||
fmla v7.4s, v4.4s, v31.4s
|
||||
fmla v7.4s, v5.4s, v30.4s
|
||||
|
||||
zip1 v18.4s, v6.4s, v7.4s
|
||||
zip2 v19.4s, v6.4s, v7.4s
|
||||
|
||||
st1 {v18.4s,v19.4s}, [x0], #32 // out[11-14]
|
||||
|
||||
ret x9
|
||||
endfunc
|
||||
|
||||
// x0: out, x1: out+len2, x2: exptab, x3: len2
|
||||
function fft15_pass
|
||||
ands x6, x3, #3
|
||||
mov x4, x0
|
||||
mov x5, x1
|
||||
b.eq 9f
|
||||
ld1 {v0.2s}, [x0], #8
|
||||
ld1 {v1.2s}, [x1], #8
|
||||
sub x3, x3, x6
|
||||
subs x6, x6, #1
|
||||
fadd v2.2s, v0.2s, v1.2s
|
||||
fsub v3.2s, v0.2s, v1.2s
|
||||
add x2, x2, #8
|
||||
st1 {v2.2s}, [x4], #8
|
||||
st1 {v3.2s}, [x5], #8
|
||||
b.eq 9f
|
||||
1:
|
||||
subs x6, x6, #1
|
||||
ldp s4, s5, [x2], #8
|
||||
ldp s2, s3, [x1], #8
|
||||
ldp s0, s1, [x0], #8
|
||||
|
||||
fmul s6, s2, s4
|
||||
fmul s7, s2, s5
|
||||
fmls s6, s3, v5.s[0]
|
||||
fmla s7, s3, v4.s[0]
|
||||
|
||||
fsub s2, s0, s6
|
||||
fsub s3, s1, s7
|
||||
fadd s0, s0, s6
|
||||
fadd s1, s1, s7
|
||||
|
||||
stp s2, s3, [x5], #8
|
||||
stp s0, s1, [x4], #8
|
||||
b.gt 1b
|
||||
9:
|
||||
ld1 {v4.4s,v5.4s}, [x2], #32
|
||||
ld2 {v2.4s,v3.4s}, [x1], #32
|
||||
uzp1 v6.4s, v4.4s, v5.4s
|
||||
uzp2 v7.4s, v4.4s, v5.4s
|
||||
ld2 {v0.4s,v1.4s}, [x0], #32
|
||||
8:
|
||||
subs x3, x3, #8
|
||||
|
||||
fmul v4.4s, v2.4s, v6.4s
|
||||
fmul v5.4s, v2.4s, v7.4s
|
||||
b.lt 4f
|
||||
|
||||
ld1 {v18.4s,v19.4s}, [x2], #32
|
||||
|
||||
fmls v4.4s, v3.4s, v7.4s
|
||||
fmla v5.4s, v3.4s, v6.4s
|
||||
|
||||
ld2 {v22.4s,v23.4s}, [x1], #32
|
||||
|
||||
fsub v2.4s, v0.4s, v4.4s
|
||||
fadd v0.4s, v0.4s, v4.4s
|
||||
fsub v3.4s, v1.4s, v5.4s
|
||||
fadd v1.4s, v1.4s, v5.4s
|
||||
|
||||
uzp1 v16.4s, v18.4s, v19.4s
|
||||
uzp2 v17.4s, v18.4s, v19.4s
|
||||
|
||||
st2 {v2.4s,v3.4s}, [x5], #32
|
||||
st2 {v0.4s,v1.4s}, [x4], #32
|
||||
ld2 {v20.4s,v21.4s}, [x0], #32
|
||||
|
||||
fmul v18.4s, v22.4s, v16.4s
|
||||
fmul v19.4s, v22.4s, v17.4s
|
||||
b.eq 0f
|
||||
|
||||
ld1 {v4.4s,v5.4s}, [x2], #32
|
||||
|
||||
fmls v18.4s, v23.4s, v17.4s
|
||||
fmla v19.4s, v23.4s, v16.4s
|
||||
|
||||
ld2 {v2.4s,v3.4s}, [x1], #32
|
||||
|
||||
fsub v22.4s, v20.4s, v18.4s
|
||||
fadd v20.4s, v20.4s, v18.4s
|
||||
fsub v23.4s, v21.4s, v19.4s
|
||||
fadd v21.4s, v21.4s, v19.4s
|
||||
|
||||
uzp1 v6.4s, v4.4s, v5.4s
|
||||
uzp2 v7.4s, v4.4s, v5.4s
|
||||
|
||||
st2 {v22.4s,v23.4s}, [x5], #32
|
||||
st2 {v20.4s,v21.4s}, [x4], #32
|
||||
ld2 {v0.4s,v1.4s}, [x0], #32
|
||||
|
||||
b 8b
|
||||
4:
|
||||
fmls v4.4s, v3.4s, v7.4s
|
||||
fmla v5.4s, v3.4s, v6.4s
|
||||
|
||||
fsub v2.4s, v0.4s, v4.4s
|
||||
fadd v0.4s, v0.4s, v4.4s
|
||||
fsub v3.4s, v1.4s, v5.4s
|
||||
fadd v1.4s, v1.4s, v5.4s
|
||||
|
||||
st2 {v2.4s,v3.4s}, [x5], #32
|
||||
st2 {v0.4s,v1.4s}, [x4], #32
|
||||
|
||||
ret
|
||||
0:
|
||||
fmls v18.4s, v23.4s, v17.4s
|
||||
fmla v19.4s, v23.4s, v16.4s
|
||||
|
||||
fsub v22.4s, v20.4s, v18.4s
|
||||
fadd v20.4s, v20.4s, v18.4s
|
||||
fsub v23.4s, v21.4s, v19.4s
|
||||
fadd v21.4s, v21.4s, v19.4s
|
||||
|
||||
st2 {v22.4s,v23.4s}, [x5], #32
|
||||
st2 {v20.4s,v21.4s}, [x4], #32
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function fft30_neon align=6
|
||||
sub sp, sp, #0x20
|
||||
stp x20, x21, [sp]
|
||||
stp x22, x30, [sp, #0x10]
|
||||
mov x21, x1
|
||||
mov x22, x2
|
||||
mov x20, x4
|
||||
mov x0, x21
|
||||
mov x1, x22
|
||||
lsl x3, x20, #1
|
||||
bl fft15_neon
|
||||
|
||||
add x0, x21, #15*8
|
||||
add x1, x22, x20, lsl #3
|
||||
lsl x3, x20, #1
|
||||
bl fft15_neon
|
||||
|
||||
ldr x2, [x10, #(CELT_EXPTAB + 8)] // s->exptab[1]
|
||||
add x0, x21, #0
|
||||
add x1, x21, #15*8
|
||||
mov x3, #15
|
||||
ldp x20, x21, [sp]
|
||||
ldp x22, x30, [sp, #0x10]
|
||||
add sp, sp, #0x20
|
||||
b fft15_pass
|
||||
endfunc
|
||||
|
||||
.macro def_fft n, n2
|
||||
function fft\n\()_neon align=6
|
||||
sub sp, sp, #0x30
|
||||
stp x20, x21, [sp]
|
||||
stp x22, x30, [sp, #0x10]
|
||||
stp x23, x24, [sp, #0x20]
|
||||
mov x21, x1
|
||||
mov x22, x2
|
||||
mov x23, x3
|
||||
mov x20, x4
|
||||
sub x3, x3, #1
|
||||
lsl x4, x4, #1
|
||||
bl fft\n2\()_neon
|
||||
|
||||
add x1, x21, #(\n2 * 8)
|
||||
add x2, x22, x20, lsl #3
|
||||
sub x3, x23, #1
|
||||
lsl x4, x20, #1
|
||||
bl fft\n2\()_neon
|
||||
|
||||
add x5, x10, #CELT_EXPTAB
|
||||
mov x0, x21
|
||||
ldr x2, [x5, x23, lsl #3] // s->exptab[N]
|
||||
add x1, x21, #(\n2 * 8)
|
||||
mov x3, #\n2
|
||||
ldp x20, x21, [sp]
|
||||
ldp x22, x30, [sp, #0x10]
|
||||
ldp x23, x24, [sp, #0x20]
|
||||
add sp, sp, #0x30
|
||||
b fft15_pass
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
def_fft 60, 30
|
||||
def_fft 120, 60
|
||||
def_fft 240, 120
|
||||
def_fft 480, 240
|
||||
def_fft 960, 480
|
||||
|
||||
function fft_b15_calc_neon
|
||||
sub sp, sp, #0x50
|
||||
ldr x8, [x0, #CELT_EXPTAB] // s->exptab[0]
|
||||
movrel x6, fact5
|
||||
movrel x11, shuffle_0213
|
||||
movrel x12, shuffle_1032
|
||||
movrel x13, shuffle_2301
|
||||
movrel x14, shuffle_3120
|
||||
add x8, x8, #8
|
||||
movrel x5, fft_tab_neon
|
||||
stp x20, x30, [sp]
|
||||
stp d8, d9, [sp, #0x10]
|
||||
stp d10, d11, [sp, #0x20]
|
||||
stp d12, d13, [sp, #0x30]
|
||||
stp d14, d15, [sp, #0x40]
|
||||
ld1 {v15.4s}, [x6]
|
||||
ld1 {v0.4s,v1.4s}, [x8], #32
|
||||
ld1 {v6.2s}, [x8], #8
|
||||
ld1 {v2.4s,v3.4s}, [x8], #32
|
||||
ld1 {v7.2s}, [x8], #8
|
||||
ld1 {v4.4s,v5.4s}, [x8], #32
|
||||
uzp1 v8.4s, v0.4s, v1.4s // exp[ 1 - 4].re
|
||||
uzp2 v9.4s, v0.4s, v1.4s // exp[ 1 - 4].im
|
||||
uzp1 v10.4s, v2.4s, v3.4s // exp[ 6 - 9].re
|
||||
uzp2 v11.4s, v2.4s, v3.4s // exp[ 6 - 9].im
|
||||
uzp1 v12.4s, v4.4s, v5.4s // exp[11 - 14].re
|
||||
uzp2 v13.4s, v4.4s, v5.4s // exp[11 - 14].im
|
||||
zip1 v14.4s, v6.4s, v7.4s // exp[5,10].re/exp[5,10].im
|
||||
add x5, x5, x3, lsl #3
|
||||
ldr x5, [x5]
|
||||
mov x10, x0
|
||||
blr x5
|
||||
ldp x20, x30, [sp]
|
||||
ldp d8, d9, [sp, #0x10]
|
||||
ldp d10, d11, [sp, #0x20]
|
||||
ldp d12, d13, [sp, #0x30]
|
||||
ldp d14, d15, [sp, #0x40]
|
||||
add sp, sp, #0x50
|
||||
ret
|
||||
endfunc
|
||||
|
||||
const fft_tab_neon
|
||||
.quad fft15_neon
|
||||
.quad fft30_neon
|
||||
.quad fft60_neon
|
||||
.quad fft120_neon
|
||||
.quad fft240_neon
|
||||
.quad fft480_neon
|
||||
.quad fft960_neon
|
||||
endconst
|
||||
|
||||
function ff_celt_imdct_half_neon, export=1
|
||||
sub sp, sp, #0x20
|
||||
stp x21, x30, [sp]
|
||||
str s0, [sp, #0x10]
|
||||
|
||||
ldp w5, w6, [x0, #CELT_LEN2] // CELT_LEN4
|
||||
mov x10, x0
|
||||
mov x21, x1
|
||||
sub w5, w5, #1
|
||||
lsl x7, x3, #3 // 2 * stride * sizeof(float)
|
||||
sub x8, xzr, x3, lsl #3 // -2 * stride * sizeof(float)
|
||||
mul x5, x5, x3
|
||||
ldp x9, x10, [x0, #CELT_TMP] // CELT_TWIDDLE
|
||||
ldr w3, [x0, #CELT_FFT_N]
|
||||
add x5, x2, x5, lsl #2
|
||||
mov x11, x9
|
||||
|
||||
sub w6, w6, #4
|
||||
ld1 {v0.s}[0], [x5], x8
|
||||
ld1 {v1.s}[0], [x2], x7
|
||||
ld1 {v4.4s,v5.4s}, [x10], #32
|
||||
ld1 {v0.s}[1], [x5], x8
|
||||
ld1 {v1.s}[1], [x2], x7
|
||||
uzp1 v2.4s, v4.4s, v5.4s
|
||||
ld1 {v0.s}[2], [x5], x8
|
||||
ld1 {v1.s}[2], [x2], x7
|
||||
uzp2 v3.4s, v4.4s, v5.4s
|
||||
ld1 {v0.s}[3], [x5], x8
|
||||
ld1 {v1.s}[3], [x2], x7
|
||||
1:
|
||||
subs w6, w6, #4
|
||||
|
||||
ld1 {v20.s}[0], [x5], x8
|
||||
ld1 {v21.s}[0], [x2], x7
|
||||
ld1 {v4.4s,v5.4s}, [x10], #32
|
||||
|
||||
fmul v6.4s, v0.4s, v2.4s
|
||||
fmul v7.4s, v0.4s, v3.4s
|
||||
|
||||
ld1 {v20.s}[1], [x5], x8
|
||||
ld1 {v21.s}[1], [x2], x7
|
||||
|
||||
fmls v6.4s, v1.4s, v3.4s
|
||||
fmla v7.4s, v1.4s, v2.4s
|
||||
|
||||
ld1 {v20.s}[2], [x5], x8
|
||||
ld1 {v21.s}[2], [x2], x7
|
||||
|
||||
uzp1 v2.4s, v4.4s, v5.4s
|
||||
uzp2 v3.4s, v4.4s, v5.4s
|
||||
ld1 {v20.s}[3], [x5], x8
|
||||
ld1 {v21.s}[3], [x2], x7
|
||||
|
||||
zip1 v4.4s, v6.4s, v7.4s
|
||||
zip2 v5.4s, v6.4s, v7.4s
|
||||
|
||||
fmul v6.4s, v20.4s, v2.4s
|
||||
fmul v7.4s, v20.4s, v3.4s
|
||||
|
||||
st1 {v4.4s,v5.4s}, [x9], #32
|
||||
|
||||
fmls v6.4s, v21.4s, v3.4s
|
||||
fmla v7.4s, v21.4s, v2.4s
|
||||
|
||||
b.eq 3f
|
||||
|
||||
subs w6, w6, #4
|
||||
ld1 {v4.4s,v5.4s}, [x10], #32
|
||||
ld1 {v0.s}[0], [x5], x8
|
||||
ld1 {v1.s}[0], [x2], x7
|
||||
uzp1 v2.4s, v4.4s, v5.4s
|
||||
ld1 {v0.s}[1], [x5], x8
|
||||
ld1 {v1.s}[1], [x2], x7
|
||||
uzp2 v3.4s, v4.4s, v5.4s
|
||||
ld1 {v0.s}[2], [x5], x8
|
||||
ld1 {v1.s}[2], [x2], x7
|
||||
zip1 v4.4s, v6.4s, v7.4s
|
||||
zip2 v5.4s, v6.4s, v7.4s
|
||||
ld1 {v0.s}[3], [x5], x8
|
||||
ld1 {v1.s}[3], [x2], x7
|
||||
|
||||
st1 {v4.4s,v5.4s}, [x9], #32
|
||||
|
||||
b.gt 1b
|
||||
|
||||
fmul v6.4s, v0.4s, v2.4s
|
||||
fmul v7.4s, v0.4s, v3.4s
|
||||
fmls v6.4s, v1.4s, v3.4s
|
||||
fmla v7.4s, v1.4s, v2.4s
|
||||
3:
|
||||
zip1 v4.4s, v6.4s, v7.4s
|
||||
zip2 v5.4s, v6.4s, v7.4s
|
||||
st1 {v4.4s,v5.4s}, [x9], #32
|
||||
|
||||
mov x2, x11
|
||||
mov x4, #1
|
||||
|
||||
bl fft_b15_calc_neon
|
||||
|
||||
ldr w5, [x10, #CELT_LEN4]
|
||||
ldr x6, [x10, #CELT_TWIDDLE]
|
||||
ldr s31, [sp, #0x10]
|
||||
|
||||
add x1, x21, x5, lsl #2
|
||||
add x3, x6, x5, lsl #2
|
||||
sub x0, x1, #16
|
||||
sub x2, x3, #16
|
||||
mov x8, #-16
|
||||
mov x7, #16
|
||||
mov x10, x0
|
||||
mov x11, x1
|
||||
|
||||
sub w5, w5, #4
|
||||
|
||||
ld1 {v0.4s}, [x0], x8
|
||||
ld1 {v1.4s}, [x1], x7
|
||||
ld1 {v2.4s}, [x2], x8
|
||||
ld1 {v3.4s}, [x3], x7
|
||||
|
||||
uzp1 v4.4s, v0.4s, v1.4s // z[-i-2, -i-1, +i, i+1].re
|
||||
uzp2 v6.4s, v0.4s, v1.4s // z[-i-2, -i-1, +i, i+1].im
|
||||
|
||||
uzp1 v5.4s, v2.4s, v3.4s // twidlle_exptab[-i-2, -i-1, +i, i+1].re
|
||||
uzp2 v7.4s, v2.4s, v3.4s // twidlle_exptab[-i-2, -i-1, +i, i+1].im
|
||||
|
||||
fmul v1.4s, v6.4s, v5.4s
|
||||
fmul v0.4s, v6.4s, v7.4s
|
||||
2:
|
||||
subs w5, w5, #4
|
||||
|
||||
ld1 {v20.4s}, [x0], x8
|
||||
|
||||
fmla v1.4s, v4.4s, v7.4s
|
||||
fmls v0.4s, v4.4s, v5.4s
|
||||
|
||||
ld1 {v21.4s}, [x1], x7
|
||||
|
||||
ext v1.16b, v1.16b, v1.16b, #8
|
||||
fmul v0.4s, v0.4s, v31.s[0]
|
||||
|
||||
ld1 {v2.4s}, [x2], x8
|
||||
|
||||
rev64 v1.4s, v1.4s
|
||||
fmul v1.4s, v1.4s, v31.s[0]
|
||||
|
||||
ld1 {v3.4s}, [x3], x7
|
||||
|
||||
zip1 v5.4s, v0.4s, v1.4s
|
||||
zip2 v7.4s, v0.4s, v1.4s
|
||||
|
||||
uzp1 v4.4s, v20.4s, v21.4s // z[-i-2, -i-1, +i, i+1].re
|
||||
uzp2 v6.4s, v20.4s, v21.4s // z[-i-2, -i-1, +i, i+1].im
|
||||
|
||||
st1 {v5.4s}, [x10], x8
|
||||
st1 {v7.4s}, [x11], x7
|
||||
|
||||
uzp1 v5.4s, v2.4s, v3.4s // twidlle_exptab[-i-2, -i-1, +i, i+1].re
|
||||
uzp2 v7.4s, v2.4s, v3.4s // twidlle_exptab[-i-2, -i-1, +i, i+1].im
|
||||
|
||||
fmul v1.4s, v6.4s, v5.4s
|
||||
fmul v0.4s, v6.4s, v7.4s
|
||||
b.gt 2b
|
||||
|
||||
fmla v1.4s, v4.4s, v7.4s
|
||||
fmls v0.4s, v4.4s, v5.4s
|
||||
ext v1.16b, v1.16b, v1.16b, #8
|
||||
fmul v0.4s, v0.4s, v31.s[0]
|
||||
rev64 v1.4s, v1.4s
|
||||
fmul v1.4s, v1.4s, v31.s[0]
|
||||
zip1 v5.4s, v0.4s, v1.4s
|
||||
zip2 v7.4s, v0.4s, v1.4s
|
||||
st1 {v5.4s}, [x10], x8
|
||||
st1 {v7.4s}, [x11], x7
|
||||
|
||||
ldp x21, x30, [sp]
|
||||
add sp, sp, #0x20
|
||||
ret
|
||||
endfunc
|
||||
|
||||
// [0] = exp(2 * i * pi / 5), [1] = exp(2 * i * pi * 2 / 5)
|
||||
const fact5 align=4
|
||||
.float 0.30901699437494745, 0.95105651629515353
|
||||
.float -0.80901699437494734, 0.58778525229247325
|
||||
endconst
|
@@ -1,28 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/aarch64/asm.S"
|
||||
|
||||
function ff_prefetch_aarch64, export=1
|
||||
subs w2, w2, #2
|
||||
prfm pldl1strm, [x0]
|
||||
prfm pldl1strm, [x0, x1]
|
||||
add x0, x0, x1, lsl #1
|
||||
b.gt X(ff_prefetch_aarch64)
|
||||
ret
|
||||
endfunc
|
@@ -1,32 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/cpu.h"
|
||||
#include "libavutil/aarch64/cpu.h"
|
||||
#include "libavcodec/videodsp.h"
|
||||
|
||||
void ff_prefetch_aarch64(uint8_t *mem, ptrdiff_t stride, int h);
|
||||
|
||||
av_cold void ff_videodsp_init_aarch64(VideoDSPContext *ctx, int bpc)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (have_armv8(cpu_flags))
|
||||
ctx->prefetch = ff_prefetch_aarch64;
|
||||
}
|
@@ -1,34 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/cpu.h"
|
||||
#include "libavutil/aarch64/cpu.h"
|
||||
#include "libavcodec/vorbisdsp.h"
|
||||
|
||||
void ff_vorbis_inverse_coupling_neon(float *mag, float *ang,
|
||||
intptr_t blocksize);
|
||||
|
||||
av_cold void ff_vorbisdsp_init_aarch64(VorbisDSPContext *c)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (have_neon(cpu_flags)) {
|
||||
c->vorbis_inverse_coupling = ff_vorbis_inverse_coupling_neon;
|
||||
}
|
||||
}
|
@@ -1,82 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/aarch64/asm.S"
|
||||
|
||||
function ff_vorbis_inverse_coupling_neon, export=1
|
||||
movi v20.4s, #1<<7, lsl #24
|
||||
subs x2, x2, #4
|
||||
mov x3, x0
|
||||
mov x4, x1
|
||||
b.eq 3f
|
||||
|
||||
ld1 {v7.4s}, [x1], #16
|
||||
ld1 {v6.4s}, [x0], #16
|
||||
cmle v4.4s, v7.4s, #0
|
||||
and v5.16b, v6.16b, v20.16b
|
||||
eor v7.16b, v7.16b, v5.16b
|
||||
and v2.16b, v7.16b, v4.16b
|
||||
bic v3.16b, v7.16b, v4.16b
|
||||
fadd v7.4s, v6.4s, v2.4s
|
||||
fsub v6.4s, v6.4s, v3.4s
|
||||
1: ld1 {v1.4s}, [x1], #16
|
||||
ld1 {v0.4s}, [x0], #16
|
||||
cmle v4.4s, v1.4s, #0
|
||||
and v5.16b, v0.16b, v20.16b
|
||||
eor v1.16b, v1.16b, v5.16b
|
||||
st1 {v7.4s}, [x3], #16
|
||||
st1 {v6.4s}, [x4], #16
|
||||
and v2.16b, v1.16b, v4.16b
|
||||
bic v3.16b, v1.16b, v4.16b
|
||||
fadd v1.4s, v0.4s, v2.4s
|
||||
fsub v0.4s, v0.4s, v3.4s
|
||||
subs x2, x2, #8
|
||||
b.le 2f
|
||||
ld1 {v7.4s}, [x1], #16
|
||||
ld1 {v6.4s}, [x0], #16
|
||||
cmle v4.4s, v7.4s, #0
|
||||
and v5.16b, v6.16b, v20.16b
|
||||
eor v7.16b, v7.16b, v5.16b
|
||||
st1 {v1.4s}, [x3], #16
|
||||
st1 {v0.4s}, [x4], #16
|
||||
and v2.16b, v7.16b, v4.16b
|
||||
bic v3.16b, v7.16b, v4.16b
|
||||
fadd v7.4s, v6.4s, v2.4s
|
||||
fsub v6.4s, v6.4s, v3.4s
|
||||
b 1b
|
||||
|
||||
2: st1 {v1.4s}, [x3], #16
|
||||
st1 {v0.4s}, [x4], #16
|
||||
b.lt ret
|
||||
|
||||
3: ld1 {v1.4s}, [x1]
|
||||
ld1 {v0.4s}, [x0]
|
||||
cmle v4.4s, v1.4s, #0
|
||||
and v5.16b, v0.16b, v20.16b
|
||||
eor v1.16b, v1.16b, v5.16b
|
||||
and v2.16b, v1.16b, v4.16b
|
||||
bic v3.16b, v1.16b, v4.16b
|
||||
fadd v1.4s, v0.4s, v2.4s
|
||||
fsub v0.4s, v0.4s, v3.4s
|
||||
st1 {v1.4s}, [x0], #16
|
||||
st1 {v0.4s}, [x1], #16
|
||||
ret:
|
||||
ret
|
||||
endfunc
|
@@ -51,52 +51,6 @@
|
||||
#define EXP_D25 2
|
||||
#define EXP_D45 3
|
||||
|
||||
#ifndef USE_FIXED
|
||||
#define USE_FIXED 0
|
||||
#endif
|
||||
|
||||
#if USE_FIXED
|
||||
|
||||
#define FFT_FLOAT 0
|
||||
|
||||
#define FIXR(a) ((int)((a) * 0 + 0.5))
|
||||
#define FIXR12(a) ((int)((a) * 4096 + 0.5))
|
||||
#define FIXR15(a) ((int)((a) * 32768 + 0.5))
|
||||
#define ROUND15(x) ((x) + 16384) >> 15
|
||||
|
||||
#define AC3_RENAME(x) x ## _fixed
|
||||
#define AC3_NORM(norm) (1<<24)/(norm)
|
||||
#define AC3_MUL(a,b) ((((int64_t) (a)) * (b))>>12)
|
||||
#define AC3_RANGE(x) (x)
|
||||
#define AC3_DYNAMIC_RANGE(x) (x)
|
||||
#define AC3_SPX_BLEND(x) (x)
|
||||
#define AC3_DYNAMIC_RANGE1 0
|
||||
|
||||
#define INTFLOAT int
|
||||
#define SHORTFLOAT int16_t
|
||||
|
||||
#else /* USE_FIXED */
|
||||
|
||||
#define FIXR(x) ((float)(x))
|
||||
#define FIXR12(x) ((float)(x))
|
||||
#define FIXR15(x) ((float)(x))
|
||||
#define ROUND15(x) (x)
|
||||
|
||||
#define AC3_RENAME(x) x
|
||||
#define AC3_NORM(norm) (1.0f/(norm))
|
||||
#define AC3_MUL(a,b) ((a) * (b))
|
||||
#define AC3_RANGE(x) (dynamic_range_tab[(x)])
|
||||
#define AC3_DYNAMIC_RANGE(x) (powf(x, s->drc_scale))
|
||||
#define AC3_SPX_BLEND(x) (x)* (1.0f/32)
|
||||
#define AC3_DYNAMIC_RANGE1 1.0f
|
||||
|
||||
#define INTFLOAT float
|
||||
#define SHORTFLOAT float
|
||||
|
||||
#endif /* USE_FIXED */
|
||||
|
||||
#define AC3_LEVEL(x) ROUND15((x) * FIXR15(0.7071067811865476))
|
||||
|
||||
/* pre-defined gain values */
|
||||
#define LEVEL_PLUS_3DB 1.4142135623730950
|
||||
#define LEVEL_PLUS_1POINT5DB 1.1892071150027209
|
||||
|
@@ -166,7 +166,7 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
int err;
|
||||
union {
|
||||
uint64_t u64;
|
||||
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
uint8_t u8[8];
|
||||
} tmp = { av_be2ne64(state) };
|
||||
AC3HeaderInfo hdr, *phdr = &hdr;
|
||||
GetBitContext gbc;
|
||||
|
@@ -33,7 +33,6 @@
|
||||
#include "libavutil/crc.h"
|
||||
#include "libavutil/downmix_info.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "bswapdsp.h"
|
||||
#include "internal.h"
|
||||
#include "aac_ac3_parser.h"
|
||||
#include "ac3_parser.h"
|
||||
@@ -180,22 +179,13 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx)
|
||||
ac3_tables_init();
|
||||
ff_mdct_init(&s->imdct_256, 8, 1, 1.0);
|
||||
ff_mdct_init(&s->imdct_512, 9, 1, 1.0);
|
||||
AC3_RENAME(ff_kbd_window_init)(s->window, 5.0, 256);
|
||||
ff_bswapdsp_init(&s->bdsp);
|
||||
|
||||
#if (USE_FIXED)
|
||||
s->fdsp = avpriv_alloc_fixed_dsp(avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
#else
|
||||
ff_kbd_window_init(s->window, 5.0, 256);
|
||||
ff_dsputil_init(&s->dsp, avctx);
|
||||
avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
#endif
|
||||
|
||||
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
ff_fmt_convert_init(&s->fmt_conv, avctx);
|
||||
av_lfg_init(&s->dith_state, 0);
|
||||
|
||||
if (USE_FIXED)
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
|
||||
else
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
|
||||
|
||||
/* allow downmixing to stereo or mono */
|
||||
@@ -355,45 +345,40 @@ static void set_downmix_coeffs(AC3DecodeContext *s)
|
||||
float cmix = gain_levels[s-> center_mix_level];
|
||||
float smix = gain_levels[s->surround_mix_level];
|
||||
float norm0, norm1;
|
||||
float downmix_coeffs[AC3_MAX_CHANNELS][2];
|
||||
|
||||
for (i = 0; i < s->fbw_channels; i++) {
|
||||
downmix_coeffs[i][0] = gain_levels[ac3_default_coeffs[s->channel_mode][i][0]];
|
||||
downmix_coeffs[i][1] = gain_levels[ac3_default_coeffs[s->channel_mode][i][1]];
|
||||
s->downmix_coeffs[i][0] = gain_levels[ac3_default_coeffs[s->channel_mode][i][0]];
|
||||
s->downmix_coeffs[i][1] = gain_levels[ac3_default_coeffs[s->channel_mode][i][1]];
|
||||
}
|
||||
if (s->channel_mode > 1 && s->channel_mode & 1) {
|
||||
downmix_coeffs[1][0] = downmix_coeffs[1][1] = cmix;
|
||||
s->downmix_coeffs[1][0] = s->downmix_coeffs[1][1] = cmix;
|
||||
}
|
||||
if (s->channel_mode == AC3_CHMODE_2F1R || s->channel_mode == AC3_CHMODE_3F1R) {
|
||||
int nf = s->channel_mode - 2;
|
||||
downmix_coeffs[nf][0] = downmix_coeffs[nf][1] = smix * LEVEL_MINUS_3DB;
|
||||
s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf][1] = smix * LEVEL_MINUS_3DB;
|
||||
}
|
||||
if (s->channel_mode == AC3_CHMODE_2F2R || s->channel_mode == AC3_CHMODE_3F2R) {
|
||||
int nf = s->channel_mode - 4;
|
||||
downmix_coeffs[nf][0] = downmix_coeffs[nf+1][1] = smix;
|
||||
s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf+1][1] = smix;
|
||||
}
|
||||
|
||||
/* renormalize */
|
||||
norm0 = norm1 = 0.0;
|
||||
for (i = 0; i < s->fbw_channels; i++) {
|
||||
norm0 += downmix_coeffs[i][0];
|
||||
norm1 += downmix_coeffs[i][1];
|
||||
norm0 += s->downmix_coeffs[i][0];
|
||||
norm1 += s->downmix_coeffs[i][1];
|
||||
}
|
||||
norm0 = 1.0f / norm0;
|
||||
norm1 = 1.0f / norm1;
|
||||
for (i = 0; i < s->fbw_channels; i++) {
|
||||
downmix_coeffs[i][0] *= norm0;
|
||||
downmix_coeffs[i][1] *= norm1;
|
||||
s->downmix_coeffs[i][0] *= norm0;
|
||||
s->downmix_coeffs[i][1] *= norm1;
|
||||
}
|
||||
|
||||
if (s->output_mode == AC3_CHMODE_MONO) {
|
||||
for (i = 0; i < s->fbw_channels; i++)
|
||||
downmix_coeffs[i][0] = (downmix_coeffs[i][0] +
|
||||
downmix_coeffs[i][1]) * LEVEL_MINUS_3DB;
|
||||
}
|
||||
for (i = 0; i < s->fbw_channels; i++) {
|
||||
s->downmix_coeffs[i][0] = FIXR12(downmix_coeffs[i][0]);
|
||||
s->downmix_coeffs[i][1] = FIXR12(downmix_coeffs[i][1]);
|
||||
s->downmix_coeffs[i][0] = (s->downmix_coeffs[i][0] +
|
||||
s->downmix_coeffs[i][1]) * LEVEL_MINUS_3DB;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -661,30 +646,20 @@ static inline void do_imdct(AC3DecodeContext *s, int channels)
|
||||
for (ch = 1; ch <= channels; ch++) {
|
||||
if (s->block_switch[ch]) {
|
||||
int i;
|
||||
FFTSample *x = s->tmp_output + 128;
|
||||
float *x = s->tmp_output + 128;
|
||||
for (i = 0; i < 128; i++)
|
||||
x[i] = s->transform_coeffs[ch][2 * i];
|
||||
s->imdct_256.imdct_half(&s->imdct_256, s->tmp_output, x);
|
||||
#if USE_FIXED
|
||||
s->fdsp->vector_fmul_window_scaled(s->outptr[ch - 1], s->delay[ch - 1],
|
||||
s->tmp_output, s->window, 128, 8);
|
||||
#else
|
||||
s->fdsp.vector_fmul_window(s->outptr[ch - 1], s->delay[ch - 1],
|
||||
s->tmp_output, s->window, 128);
|
||||
#endif
|
||||
for (i = 0; i < 128; i++)
|
||||
x[i] = s->transform_coeffs[ch][2 * i + 1];
|
||||
s->imdct_256.imdct_half(&s->imdct_256, s->delay[ch - 1], x);
|
||||
} else {
|
||||
s->imdct_512.imdct_half(&s->imdct_512, s->tmp_output, s->transform_coeffs[ch]);
|
||||
#if USE_FIXED
|
||||
s->fdsp->vector_fmul_window_scaled(s->outptr[ch - 1], s->delay[ch - 1],
|
||||
s->tmp_output, s->window, 128, 8);
|
||||
#else
|
||||
s->fdsp.vector_fmul_window(s->outptr[ch - 1], s->delay[ch - 1],
|
||||
s->tmp_output, s->window, 128);
|
||||
#endif
|
||||
memcpy(s->delay[ch - 1], s->tmp_output + 128, 128 * sizeof(FFTSample));
|
||||
memcpy(s->delay[ch - 1], s->tmp_output + 128, 128 * sizeof(float));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -819,13 +794,13 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
if (get_bits1(gbc)) {
|
||||
/* Allow asymmetric application of DRC when drc_scale > 1.
|
||||
Amplification of quiet sounds is enhanced */
|
||||
INTFLOAT range = AC3_RANGE(get_bits(gbc, 8));
|
||||
float range = dynamic_range_tab[get_bits(gbc, 8)];
|
||||
if (range > 1.0 || s->drc_scale <= 1.0)
|
||||
s->dynamic_range[i] = AC3_DYNAMIC_RANGE(range);
|
||||
s->dynamic_range[i] = powf(range, s->drc_scale);
|
||||
else
|
||||
s->dynamic_range[i] = range;
|
||||
} else if (blk == 0) {
|
||||
s->dynamic_range[i] = AC3_DYNAMIC_RANGE1;
|
||||
s->dynamic_range[i] = 1.0f;
|
||||
}
|
||||
} while (i--);
|
||||
|
||||
@@ -851,9 +826,6 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
if (start_subband > 7)
|
||||
start_subband += start_subband - 7;
|
||||
end_subband = get_bits(gbc, 3) + 5;
|
||||
#if USE_FIXED
|
||||
s->spx_dst_end_freq = end_freq_inv_tab[end_subband];
|
||||
#endif
|
||||
if (end_subband > 7)
|
||||
end_subband += end_subband - 7;
|
||||
dst_start_freq = dst_start_freq * 12 + 25;
|
||||
@@ -874,7 +846,6 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
|
||||
s->spx_dst_start_freq = dst_start_freq;
|
||||
s->spx_src_start_freq = src_start_freq;
|
||||
if (!USE_FIXED)
|
||||
s->spx_dst_end_freq = dst_end_freq;
|
||||
|
||||
decode_band_structure(gbc, blk, s->eac3, 0,
|
||||
@@ -895,40 +866,18 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
for (ch = 1; ch <= fbw_channels; ch++) {
|
||||
if (s->channel_uses_spx[ch]) {
|
||||
if (s->first_spx_coords[ch] || get_bits1(gbc)) {
|
||||
INTFLOAT spx_blend;
|
||||
float spx_blend;
|
||||
int bin, master_spx_coord;
|
||||
|
||||
s->first_spx_coords[ch] = 0;
|
||||
spx_blend = AC3_SPX_BLEND(get_bits(gbc, 5));
|
||||
spx_blend = get_bits(gbc, 5) * (1.0f/32);
|
||||
master_spx_coord = get_bits(gbc, 2) * 3;
|
||||
|
||||
bin = s->spx_src_start_freq;
|
||||
for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
|
||||
int bandsize;
|
||||
int spx_coord_exp, spx_coord_mant;
|
||||
INTFLOAT nratio, sblend, nblend;
|
||||
#if USE_FIXED
|
||||
int64_t accu;
|
||||
/* calculate blending factors */
|
||||
bandsize = s->spx_band_sizes[bnd];
|
||||
accu = (int64_t)((bin << 23) + (bandsize << 22)) * s->spx_dst_end_freq;
|
||||
nratio = (int)(accu >> 32);
|
||||
nratio -= spx_blend << 18;
|
||||
|
||||
if (nratio < 0) {
|
||||
nblend = 0;
|
||||
sblend = 0x800000;
|
||||
} else if (nratio > 0x7fffff) {
|
||||
nblend = 0x800000;
|
||||
sblend = 0;
|
||||
} else {
|
||||
nblend = fixed_sqrt(nratio, 23);
|
||||
accu = (int64_t)nblend * 1859775393;
|
||||
nblend = (int)((accu + (1<<29)) >> 30);
|
||||
sblend = fixed_sqrt(0x800000 - nratio, 23);
|
||||
}
|
||||
#else
|
||||
float spx_coord;
|
||||
float nratio, sblend, nblend, spx_coord;
|
||||
|
||||
/* calculate blending factors */
|
||||
bandsize = s->spx_band_sizes[bnd];
|
||||
@@ -937,7 +886,6 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
nblend = sqrtf(3.0f * nratio); // noise is scaled by sqrt(3)
|
||||
// to give unity variance
|
||||
sblend = sqrtf(1.0f - nratio);
|
||||
#endif
|
||||
bin += bandsize;
|
||||
|
||||
/* decode spx coordinates */
|
||||
@@ -946,18 +894,11 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
if (spx_coord_exp == 15) spx_coord_mant <<= 1;
|
||||
else spx_coord_mant += 4;
|
||||
spx_coord_mant <<= (25 - spx_coord_exp - master_spx_coord);
|
||||
spx_coord = spx_coord_mant * (1.0f / (1 << 23));
|
||||
|
||||
/* multiply noise and signal blending factors by spx coordinate */
|
||||
#if USE_FIXED
|
||||
accu = (int64_t)nblend * spx_coord_mant;
|
||||
s->spx_noise_blend[ch][bnd] = (int)((accu + (1<<22)) >> 23);
|
||||
accu = (int64_t)sblend * spx_coord_mant;
|
||||
s->spx_signal_blend[ch][bnd] = (int)((accu + (1<<22)) >> 23);
|
||||
#else
|
||||
spx_coord = spx_coord_mant * (1.0f / (1 << 23));
|
||||
s->spx_noise_blend [ch][bnd] = nblend * spx_coord;
|
||||
s->spx_signal_blend[ch][bnd] = sblend * spx_coord;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -1314,19 +1255,14 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
|
||||
/* apply scaling to coefficients (headroom, dynrng) */
|
||||
for (ch = 1; ch <= s->channels; ch++) {
|
||||
INTFLOAT gain;
|
||||
if(s->channel_mode == AC3_CHMODE_DUALMONO) {
|
||||
gain = s->dynamic_range[2-ch];
|
||||
float gain = 1.0 / 4194304.0f;
|
||||
if (s->channel_mode == AC3_CHMODE_DUALMONO) {
|
||||
gain *= s->dynamic_range[2 - ch];
|
||||
} else {
|
||||
gain = s->dynamic_range[0];
|
||||
gain *= s->dynamic_range[0];
|
||||
}
|
||||
#if USE_FIXED
|
||||
scale_coefs(s->transform_coeffs[ch], s->fixed_coeffs[ch], gain, 256);
|
||||
#else
|
||||
gain *= 1.0 / 4194304.0f;
|
||||
s->fmt_conv.int32_to_float_fmul_scalar(s->transform_coeffs[ch],
|
||||
s->fixed_coeffs[ch], gain, 256);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* apply spectral extension to high frequency bins */
|
||||
@@ -1351,24 +1287,19 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
do_imdct(s, s->channels);
|
||||
|
||||
if (downmix_output) {
|
||||
#if USE_FIXED
|
||||
ac3_downmix_c_fixed16(s->outptr, s->downmix_coeffs,
|
||||
s->out_channels, s->fbw_channels, 256);
|
||||
#else
|
||||
s->ac3dsp.downmix(s->outptr, s->downmix_coeffs,
|
||||
s->out_channels, s->fbw_channels, 256);
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
if (downmix_output) {
|
||||
s->ac3dsp.AC3_RENAME(downmix)(s->xcfptr + 1, s->downmix_coeffs,
|
||||
s->ac3dsp.downmix(s->xcfptr + 1, s->downmix_coeffs,
|
||||
s->out_channels, s->fbw_channels, 256);
|
||||
}
|
||||
|
||||
if (downmix_output && !s->downmixed) {
|
||||
s->downmixed = 1;
|
||||
s->ac3dsp.AC3_RENAME(downmix)(s->dlyptr, s->downmix_coeffs,
|
||||
s->out_channels, s->fbw_channels, 128);
|
||||
s->ac3dsp.downmix(s->dlyptr, s->downmix_coeffs, s->out_channels,
|
||||
s->fbw_channels, 128);
|
||||
}
|
||||
|
||||
do_imdct(s, s->out_channels);
|
||||
@@ -1389,7 +1320,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
AC3DecodeContext *s = avctx->priv_data;
|
||||
int blk, ch, err, ret;
|
||||
const uint8_t *channel_map;
|
||||
const SHORTFLOAT *output[AC3_MAX_CHANNELS];
|
||||
const float *output[AC3_MAX_CHANNELS];
|
||||
enum AVMatrixEncoding matrix_encoding;
|
||||
AVDownmixInfo *downmix_info;
|
||||
|
||||
@@ -1398,8 +1329,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
if (buf_size >= 2 && AV_RB16(buf) == 0x770B) {
|
||||
// seems to be byte-swapped AC-3
|
||||
int cnt = FFMIN(buf_size, AC3_FRAME_BUFFER_SIZE) >> 1;
|
||||
s->bdsp.bswap16_buf((uint16_t *) s->input_buffer,
|
||||
(const uint16_t *) buf, cnt);
|
||||
s->dsp.bswap16_buf((uint16_t *)s->input_buffer, (const uint16_t *)buf, cnt);
|
||||
} else
|
||||
memcpy(s->input_buffer, buf, FFMIN(buf_size, AC3_FRAME_BUFFER_SIZE));
|
||||
buf = s->input_buffer;
|
||||
@@ -1517,7 +1447,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
}
|
||||
for (ch = 0; ch < s->channels; ch++) {
|
||||
if (ch < s->out_channels)
|
||||
s->outptr[channel_map[ch]] = (SHORTFLOAT *)frame->data[ch];
|
||||
s->outptr[channel_map[ch]] = (float *)frame->data[ch];
|
||||
}
|
||||
for (blk = 0; blk < s->num_blocks; blk++) {
|
||||
if (!err && decode_audio_block(s, blk)) {
|
||||
@@ -1526,7 +1456,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
}
|
||||
if (err)
|
||||
for (ch = 0; ch < s->out_channels; ch++)
|
||||
memcpy(((SHORTFLOAT*)frame->data[ch]) + AC3_BLOCK_SIZE*blk, output[ch], AC3_BLOCK_SIZE*sizeof(SHORTFLOAT));
|
||||
memcpy(((float*)frame->data[ch]) + AC3_BLOCK_SIZE*blk, output[ch], sizeof(**output) * AC3_BLOCK_SIZE);
|
||||
for (ch = 0; ch < s->out_channels; ch++)
|
||||
output[ch] = s->outptr[channel_map[ch]];
|
||||
for (ch = 0; ch < s->out_channels; ch++) {
|
||||
@@ -1539,7 +1469,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
|
||||
/* keep last block for error concealment in next frame */
|
||||
for (ch = 0; ch < s->out_channels; ch++)
|
||||
memcpy(s->output[ch], output[ch], AC3_BLOCK_SIZE*sizeof(SHORTFLOAT));
|
||||
memcpy(s->output[ch], output[ch], sizeof(**output) * AC3_BLOCK_SIZE);
|
||||
|
||||
/*
|
||||
* AVMatrixEncoding
|
||||
@@ -1610,12 +1540,66 @@ static av_cold int ac3_decode_end(AVCodecContext *avctx)
|
||||
AC3DecodeContext *s = avctx->priv_data;
|
||||
ff_mdct_end(&s->imdct_512);
|
||||
ff_mdct_end(&s->imdct_256);
|
||||
#if (USE_FIXED)
|
||||
av_freep(&s->fdsp);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define OFFSET(x) offsetof(AC3DecodeContext, x)
|
||||
#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
|
||||
static const AVOption options[] = {
|
||||
{ "drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), AV_OPT_TYPE_FLOAT, {.dbl = 1.0}, 0.0, 6.0, PAR },
|
||||
|
||||
{"dmix_mode", "Preferred Stereo Downmix Mode", OFFSET(preferred_stereo_downmix), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 2, 0, "dmix_mode"},
|
||||
{"ltrt_cmixlev", "Lt/Rt Center Mix Level", OFFSET(ltrt_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
|
||||
{"ltrt_surmixlev", "Lt/Rt Surround Mix Level", OFFSET(ltrt_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
|
||||
{"loro_cmixlev", "Lo/Ro Center Mix Level", OFFSET(loro_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
|
||||
{"loro_surmixlev", "Lo/Ro Surround Mix Level", OFFSET(loro_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
|
||||
|
||||
{ NULL},
|
||||
};
|
||||
|
||||
static const AVClass ac3_decoder_class = {
|
||||
.class_name = "AC3 decoder",
|
||||
.item_name = av_default_item_name,
|
||||
.option = options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
AVCodec ff_ac3_decoder = {
|
||||
.name = "ac3",
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.id = AV_CODEC_ID_AC3,
|
||||
.priv_data_size = sizeof (AC3DecodeContext),
|
||||
.init = ac3_decode_init,
|
||||
.close = ac3_decode_end,
|
||||
.decode = ac3_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.priv_class = &ac3_decoder_class,
|
||||
};
|
||||
|
||||
#if CONFIG_EAC3_DECODER
|
||||
static const AVClass eac3_decoder_class = {
|
||||
.class_name = "E-AC3 decoder",
|
||||
.item_name = av_default_item_name,
|
||||
.option = options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
AVCodec ff_eac3_decoder = {
|
||||
.name = "eac3",
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.id = AV_CODEC_ID_EAC3,
|
||||
.priv_data_size = sizeof (AC3DecodeContext),
|
||||
.init = ac3_decode_init,
|
||||
.close = ac3_decode_end,
|
||||
.decode = ac3_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.priv_class = &eac3_decoder_class,
|
||||
};
|
||||
#endif
|
||||
|
@@ -51,12 +51,11 @@
|
||||
#define AVCODEC_AC3DEC_H
|
||||
|
||||
#include "libavutil/float_dsp.h"
|
||||
#include "libavutil/fixed_dsp.h"
|
||||
#include "libavutil/lfg.h"
|
||||
#include "ac3.h"
|
||||
#include "ac3dsp.h"
|
||||
#include "bswapdsp.h"
|
||||
#include "get_bits.h"
|
||||
#include "dsputil.h"
|
||||
#include "fft.h"
|
||||
#include "fmtconvert.h"
|
||||
|
||||
@@ -139,8 +138,8 @@ typedef struct AC3DecodeContext {
|
||||
int num_spx_bands; ///< number of spx bands (nspxbnds)
|
||||
uint8_t spx_band_sizes[SPX_MAX_BANDS]; ///< number of bins in each spx band
|
||||
uint8_t first_spx_coords[AC3_MAX_CHANNELS]; ///< first spx coordinates states (firstspxcos)
|
||||
INTFLOAT spx_noise_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS]; ///< spx noise blending factor (nblendfact)
|
||||
INTFLOAT spx_signal_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS];///< spx signal blending factor (sblendfact)
|
||||
float spx_noise_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS]; ///< spx noise blending factor (nblendfact)
|
||||
float spx_signal_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS];///< spx signal blending factor (sblendfact)
|
||||
///@}
|
||||
|
||||
///@name Adaptive hybrid transform
|
||||
@@ -152,15 +151,15 @@ typedef struct AC3DecodeContext {
|
||||
int fbw_channels; ///< number of full-bandwidth channels
|
||||
int channels; ///< number of total channels
|
||||
int lfe_ch; ///< index of LFE channel
|
||||
SHORTFLOAT downmix_coeffs[AC3_MAX_CHANNELS][2]; ///< stereo downmix coefficients
|
||||
float downmix_coeffs[AC3_MAX_CHANNELS][2]; ///< stereo downmix coefficients
|
||||
int downmixed; ///< indicates if coeffs are currently downmixed
|
||||
int output_mode; ///< output channel configuration
|
||||
int out_channels; ///< number of output channels
|
||||
///@}
|
||||
|
||||
///@name Dynamic range
|
||||
INTFLOAT dynamic_range[2]; ///< dynamic range
|
||||
INTFLOAT drc_scale; ///< percentage of dynamic range compression to be applied
|
||||
float dynamic_range[2]; ///< dynamic range
|
||||
float drc_scale; ///< percentage of dynamic range compression to be applied
|
||||
///@}
|
||||
|
||||
///@name Bandwidth
|
||||
@@ -207,27 +206,23 @@ typedef struct AC3DecodeContext {
|
||||
///@}
|
||||
|
||||
///@name Optimization
|
||||
BswapDSPContext bdsp;
|
||||
#if USE_FIXED
|
||||
AVFixedDSPContext *fdsp;
|
||||
#else
|
||||
DSPContext dsp; ///< for optimization
|
||||
AVFloatDSPContext fdsp;
|
||||
#endif
|
||||
AC3DSPContext ac3dsp;
|
||||
FmtConvertContext fmt_conv; ///< optimized conversion functions
|
||||
///@}
|
||||
|
||||
SHORTFLOAT *outptr[AC3_MAX_CHANNELS];
|
||||
INTFLOAT *xcfptr[AC3_MAX_CHANNELS];
|
||||
INTFLOAT *dlyptr[AC3_MAX_CHANNELS];
|
||||
float *outptr[AC3_MAX_CHANNELS];
|
||||
float *xcfptr[AC3_MAX_CHANNELS];
|
||||
float *dlyptr[AC3_MAX_CHANNELS];
|
||||
|
||||
///@name Aligned arrays
|
||||
DECLARE_ALIGNED(16, int, fixed_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< fixed-point transform coefficients
|
||||
DECLARE_ALIGNED(32, INTFLOAT, transform_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< transform coefficients
|
||||
DECLARE_ALIGNED(32, INTFLOAT, delay)[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< delay - added to the next block
|
||||
DECLARE_ALIGNED(32, INTFLOAT, window)[AC3_BLOCK_SIZE]; ///< window coefficients
|
||||
DECLARE_ALIGNED(32, INTFLOAT, tmp_output)[AC3_BLOCK_SIZE]; ///< temporary storage for output before windowing
|
||||
DECLARE_ALIGNED(32, SHORTFLOAT, output)[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< output after imdct transform and windowing
|
||||
DECLARE_ALIGNED(16, int32_t, fixed_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< fixed-point transform coefficients
|
||||
DECLARE_ALIGNED(32, float, transform_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< transform coefficients
|
||||
DECLARE_ALIGNED(32, float, delay)[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< delay - added to the next block
|
||||
DECLARE_ALIGNED(32, float, window)[AC3_BLOCK_SIZE]; ///< window coefficients
|
||||
DECLARE_ALIGNED(32, float, tmp_output)[AC3_BLOCK_SIZE]; ///< temporary storage for output before windowing
|
||||
DECLARE_ALIGNED(32, float, output)[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< output after imdct transform and windowing
|
||||
DECLARE_ALIGNED(32, uint8_t, input_buffer)[AC3_FRAME_BUFFER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; ///< temp buffer to prevent overread
|
||||
///@}
|
||||
} AC3DecodeContext;
|
||||
|
@@ -1,176 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012
|
||||
* MIPS Technologies, Inc., California.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* Author: Stanislav Ocovaj (socovaj@mips.com)
|
||||
*
|
||||
* AC3 fixed-point decoder for MIPS platforms
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#define FFT_FLOAT 0
|
||||
#define USE_FIXED 1
|
||||
#define FFT_FIXED_32 1
|
||||
#include "ac3dec.h"
|
||||
|
||||
|
||||
/**
|
||||
* Table for center mix levels
|
||||
* reference: Section 5.4.2.4 cmixlev
|
||||
*/
|
||||
static const uint8_t center_levels[4] = { 4, 5, 6, 5 };
|
||||
|
||||
/**
|
||||
* Table for surround mix levels
|
||||
* reference: Section 5.4.2.5 surmixlev
|
||||
*/
|
||||
static const uint8_t surround_levels[4] = { 4, 6, 7, 6 };
|
||||
|
||||
int end_freq_inv_tab[8] =
|
||||
{
|
||||
50529027, 44278013, 39403370, 32292987, 27356480, 23729101, 20951060, 18755316
|
||||
};
|
||||
|
||||
static void scale_coefs (
|
||||
int32_t *dst,
|
||||
const int32_t *src,
|
||||
int dynrng,
|
||||
int len)
|
||||
{
|
||||
int i, shift, round;
|
||||
int16_t mul;
|
||||
int temp, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
|
||||
|
||||
mul = (dynrng & 0x1f) + 0x20;
|
||||
shift = 4 - ((dynrng << 24) >> 29);
|
||||
round = 1 << (shift-1);
|
||||
for (i=0; i<len; i+=8) {
|
||||
|
||||
temp = src[i] * mul;
|
||||
temp1 = src[i+1] * mul;
|
||||
temp = temp + round;
|
||||
temp2 = src[i+2] * mul;
|
||||
|
||||
temp1 = temp1 + round;
|
||||
dst[i] = temp >> shift;
|
||||
temp3 = src[i+3] * mul;
|
||||
temp2 = temp2 + round;
|
||||
|
||||
dst[i+1] = temp1 >> shift;
|
||||
temp4 = src[i + 4] * mul;
|
||||
temp3 = temp3 + round;
|
||||
dst[i+2] = temp2 >> shift;
|
||||
|
||||
temp5 = src[i+5] * mul;
|
||||
temp4 = temp4 + round;
|
||||
dst[i+3] = temp3 >> shift;
|
||||
temp6 = src[i+6] * mul;
|
||||
|
||||
dst[i+4] = temp4 >> shift;
|
||||
temp5 = temp5 + round;
|
||||
temp7 = src[i+7] * mul;
|
||||
temp6 = temp6 + round;
|
||||
|
||||
dst[i+5] = temp5 >> shift;
|
||||
temp7 = temp7 + round;
|
||||
dst[i+6] = temp6 >> shift;
|
||||
dst[i+7] = temp7 >> shift;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Downmix samples from original signal to stereo or mono (this is for 16-bit samples
|
||||
* and fixed point decoder - original (for 32-bit samples) is in ac3dsp.c).
|
||||
*/
|
||||
static void ac3_downmix_c_fixed16(int16_t **samples, int16_t (*matrix)[2],
|
||||
int out_ch, int in_ch, int len)
|
||||
{
|
||||
int i, j;
|
||||
int v0, v1;
|
||||
if (out_ch == 2) {
|
||||
for (i = 0; i < len; i++) {
|
||||
v0 = v1 = 0;
|
||||
for (j = 0; j < in_ch; j++) {
|
||||
v0 += samples[j][i] * matrix[j][0];
|
||||
v1 += samples[j][i] * matrix[j][1];
|
||||
}
|
||||
samples[0][i] = (v0+2048)>>12;
|
||||
samples[1][i] = (v1+2048)>>12;
|
||||
}
|
||||
} else if (out_ch == 1) {
|
||||
for (i = 0; i < len; i++) {
|
||||
v0 = 0;
|
||||
for (j = 0; j < in_ch; j++)
|
||||
v0 += samples[j][i] * matrix[j][0];
|
||||
samples[0][i] = (v0+2048)>>12;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#include "ac3dec.c"
|
||||
|
||||
static const AVOption options[] = {
|
||||
{ NULL},
|
||||
};
|
||||
|
||||
static const AVClass ac3_decoder_class = {
|
||||
.class_name = "Fixed-Point AC-3 Decoder",
|
||||
.item_name = av_default_item_name,
|
||||
.option = options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
AVCodec ff_ac3_fixed_decoder = {
|
||||
.name = "ac3_fixed",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.id = AV_CODEC_ID_AC3,
|
||||
.priv_data_size = sizeof (AC3DecodeContext),
|
||||
.init = ac3_decode_init,
|
||||
.close = ac3_decode_end,
|
||||
.decode = ac3_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.priv_class = &ac3_decoder_class,
|
||||
};
|
@@ -1,89 +0,0 @@
|
||||
/*
|
||||
* AC-3 Audio Decoder
|
||||
* This code was developed as part of Google Summer of Code 2006.
|
||||
* E-AC-3 support was added as part of Google Summer of Code 2007.
|
||||
*
|
||||
* Copyright (c) 2006 Kartikey Mahendra BHATT (bhattkm at gmail dot com)
|
||||
* Copyright (c) 2007-2008 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com>
|
||||
* Copyright (c) 2007 Justin Ruggles <justin.ruggles@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* Upmix delay samples from stereo to original channel layout.
|
||||
*/
|
||||
#include "ac3dec.h"
|
||||
#include "ac3dec.c"
|
||||
|
||||
static const AVOption options[] = {
|
||||
{ "drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), AV_OPT_TYPE_FLOAT, {.dbl = 1.0}, 0.0, 6.0, PAR },
|
||||
|
||||
{"dmix_mode", "Preferred Stereo Downmix Mode", OFFSET(preferred_stereo_downmix), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 2, 0, "dmix_mode"},
|
||||
{"ltrt_cmixlev", "Lt/Rt Center Mix Level", OFFSET(ltrt_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
|
||||
{"ltrt_surmixlev", "Lt/Rt Surround Mix Level", OFFSET(ltrt_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
|
||||
{"loro_cmixlev", "Lo/Ro Center Mix Level", OFFSET(loro_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
|
||||
{"loro_surmixlev", "Lo/Ro Surround Mix Level", OFFSET(loro_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
|
||||
|
||||
{ NULL},
|
||||
};
|
||||
|
||||
static const AVClass ac3_decoder_class = {
|
||||
.class_name = "AC3 decoder",
|
||||
.item_name = av_default_item_name,
|
||||
.option = options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
AVCodec ff_ac3_decoder = {
|
||||
.name = "ac3",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.id = AV_CODEC_ID_AC3,
|
||||
.priv_data_size = sizeof (AC3DecodeContext),
|
||||
.init = ac3_decode_init,
|
||||
.close = ac3_decode_end,
|
||||
.decode = ac3_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.priv_class = &ac3_decoder_class,
|
||||
};
|
||||
|
||||
#if CONFIG_EAC3_DECODER
|
||||
static const AVClass eac3_decoder_class = {
|
||||
.class_name = "E-AC3 decoder",
|
||||
.item_name = av_default_item_name,
|
||||
.option = options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
AVCodec ff_eac3_decoder = {
|
||||
.name = "eac3",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.id = AV_CODEC_ID_EAC3,
|
||||
.priv_data_size = sizeof (AC3DecodeContext),
|
||||
.init = ac3_decode_init,
|
||||
.close = ac3_decode_end,
|
||||
.decode = ac3_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.priv_class = &eac3_decoder_class,
|
||||
};
|
||||
#endif
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* AC-3 DSP functions
|
||||
* AC-3 DSP utils
|
||||
* Copyright (c) 2011 Justin Ruggles
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
@@ -239,31 +239,6 @@ static void ac3_downmix_c(float **samples, float (*matrix)[2],
|
||||
}
|
||||
}
|
||||
|
||||
static void ac3_downmix_c_fixed(int32_t **samples, int16_t (*matrix)[2],
|
||||
int out_ch, int in_ch, int len)
|
||||
{
|
||||
int i, j;
|
||||
int64_t v0, v1;
|
||||
if (out_ch == 2) {
|
||||
for (i = 0; i < len; i++) {
|
||||
v0 = v1 = 0;
|
||||
for (j = 0; j < in_ch; j++) {
|
||||
v0 += (int64_t)samples[j][i] * matrix[j][0];
|
||||
v1 += (int64_t)samples[j][i] * matrix[j][1];
|
||||
}
|
||||
samples[0][i] = (v0+2048)>>12;
|
||||
samples[1][i] = (v1+2048)>>12;
|
||||
}
|
||||
} else if (out_ch == 1) {
|
||||
for (i = 0; i < len; i++) {
|
||||
v0 = 0;
|
||||
for (j = 0; j < in_ch; j++)
|
||||
v0 += (int64_t)samples[j][i] * matrix[j][0];
|
||||
samples[0][i] = (v0+2048)>>12;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void apply_window_int16_c(int16_t *output, const int16_t *input,
|
||||
const int16_t *window, unsigned int len)
|
||||
{
|
||||
@@ -291,7 +266,6 @@ av_cold void ff_ac3dsp_init(AC3DSPContext *c, int bit_exact)
|
||||
c->sum_square_butterfly_int32 = ac3_sum_square_butterfly_int32_c;
|
||||
c->sum_square_butterfly_float = ac3_sum_square_butterfly_float_c;
|
||||
c->downmix = ac3_downmix_c;
|
||||
c->downmix_fixed = ac3_downmix_c_fixed;
|
||||
c->apply_window_int16 = apply_window_int16_c;
|
||||
|
||||
if (ARCH_ARM)
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* AC-3 DSP functions
|
||||
* AC-3 DSP utils
|
||||
* Copyright (c) 2011 Justin Ruggles
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
@@ -135,9 +135,6 @@ typedef struct AC3DSPContext {
|
||||
void (*downmix)(float **samples, float (*matrix)[2], int out_ch,
|
||||
int in_ch, int len);
|
||||
|
||||
void (*downmix_fixed)(int32_t **samples, int16_t (*matrix)[2], int out_ch,
|
||||
int in_ch, int len);
|
||||
|
||||
/**
|
||||
* Apply symmetric window in 16-bit fixed-point.
|
||||
* @param output destination array
|
||||
|
@@ -37,7 +37,6 @@
|
||||
#include "libavutil/opt.h"
|
||||
#include "avcodec.h"
|
||||
#include "put_bits.h"
|
||||
#include "audiodsp.h"
|
||||
#include "ac3dsp.h"
|
||||
#include "ac3.h"
|
||||
#include "fft.h"
|
||||
@@ -1382,7 +1381,7 @@ static void ac3_output_frame_header(AC3EncodeContext *s)
|
||||
*/
|
||||
static void output_audio_block(AC3EncodeContext *s, int blk)
|
||||
{
|
||||
int ch, i, baie, bnd, got_cpl, av_uninit(ch0);
|
||||
int ch, i, baie, bnd, got_cpl, ch0;
|
||||
AC3Block *block = &s->blocks[blk];
|
||||
|
||||
/* block switching */
|
||||
@@ -2247,7 +2246,7 @@ static av_cold int validate_options(AC3EncodeContext *s)
|
||||
*/
|
||||
static av_cold void set_bandwidth(AC3EncodeContext *s)
|
||||
{
|
||||
int blk, ch, av_uninit(cpl_start);
|
||||
int blk, ch, cpl_start;
|
||||
|
||||
if (s->cutoff) {
|
||||
/* calculate bandwidth based on user-specified cutoff frequency */
|
||||
@@ -2479,8 +2478,8 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
|
||||
if (ret)
|
||||
goto init_fail;
|
||||
|
||||
ff_audiodsp_init(&s->adsp);
|
||||
ff_dsputil_init(&s->dsp, avctx);
|
||||
avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
|
||||
dprint_options(s);
|
||||
|
@@ -31,15 +31,12 @@
|
||||
#include <stdint.h>
|
||||
|
||||
#include "libavutil/float_dsp.h"
|
||||
|
||||
#include "ac3.h"
|
||||
#include "ac3dsp.h"
|
||||
#include "avcodec.h"
|
||||
#include "dsputil.h"
|
||||
#include "fft.h"
|
||||
#include "mathops.h"
|
||||
#include "put_bits.h"
|
||||
#include "audiodsp.h"
|
||||
#include "fft.h"
|
||||
|
||||
#ifndef CONFIG_AC3ENC_FLOAT
|
||||
#define CONFIG_AC3ENC_FLOAT 0
|
||||
@@ -163,7 +160,6 @@ typedef struct AC3EncodeContext {
|
||||
AVCodecContext *avctx; ///< parent AVCodecContext
|
||||
PutBitContext pb; ///< bitstream writer context
|
||||
DSPContext dsp;
|
||||
AudioDSPContext adsp;
|
||||
AVFloatDSPContext fdsp;
|
||||
AC3DSPContext ac3dsp; ///< AC-3 optimized functions
|
||||
FFTContext mdct; ///< FFT context for MDCT calculation
|
||||
@@ -268,7 +264,6 @@ typedef struct AC3EncodeContext {
|
||||
extern const uint64_t ff_ac3_channel_layouts[19];
|
||||
|
||||
int ff_ac3_encode_init(AVCodecContext *avctx);
|
||||
int ff_ac3_float_encode_init(AVCodecContext *avctx);
|
||||
|
||||
int ff_ac3_encode_close(AVCodecContext *avctx);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user