Compare commits
293 Commits
release/0.
...
n0.10.5
Author | SHA1 | Date | |
---|---|---|---|
![]() |
50032a75d6 | ||
![]() |
eed53a38c9 | ||
![]() |
501e60dcf5 | ||
![]() |
d36c706b86 | ||
![]() |
fcb8bbf264 | ||
![]() |
38c5e8fec5 | ||
![]() |
1301942248 | ||
![]() |
e2c7b37fd2 | ||
![]() |
7f90fe1b4b | ||
![]() |
2cf6afffe5 | ||
![]() |
50e6e494c9 | ||
![]() |
0f54c97f58 | ||
![]() |
a1f678f7ca | ||
![]() |
94905d2af6 | ||
![]() |
b04fbd2cd2 | ||
![]() |
f7b045db09 | ||
![]() |
de1591b167 | ||
![]() |
c7b73724c7 | ||
![]() |
2fb4be9a99 | ||
![]() |
e1608014c5 | ||
![]() |
8c0c0e9eb3 | ||
![]() |
997e7692d8 | ||
![]() |
944b6a801e | ||
![]() |
ddd9483a10 | ||
![]() |
9c13d232a4 | ||
![]() |
c4926cba15 | ||
![]() |
321bbb6f49 | ||
![]() |
81476cf693 | ||
![]() |
3c69368e6b | ||
![]() |
fcf09ebff5 | ||
![]() |
d6c73986cc | ||
![]() |
aefa2bf70a | ||
![]() |
ece27b09d6 | ||
![]() |
479856a3b2 | ||
![]() |
fc0d962919 | ||
![]() |
0452ebfd4b | ||
![]() |
9e9e6bbe7b | ||
![]() |
3e4eea6c32 | ||
![]() |
cc0fec8393 | ||
![]() |
fa67ad85ac | ||
![]() |
0adc452146 | ||
![]() |
7df0e309fd | ||
![]() |
a4b329d622 | ||
![]() |
eefd6bbee9 | ||
![]() |
ce39a84a7d | ||
![]() |
514f3e7c02 | ||
![]() |
4dfea3e9f0 | ||
![]() |
f9ee7d13e8 | ||
![]() |
ec27262c4d | ||
![]() |
d34e9e61dd | ||
![]() |
c38d3e1a39 | ||
![]() |
5872580e65 | ||
![]() |
4713234518 | ||
![]() |
5836110018 | ||
![]() |
3fab87edc9 | ||
![]() |
b1f9ff45d4 | ||
![]() |
96acb0a4eb | ||
![]() |
df93682e64 | ||
![]() |
22285aba13 | ||
![]() |
097ad61100 | ||
![]() |
c785a7058a | ||
![]() |
6736de0ce6 | ||
![]() |
fe8508b948 | ||
![]() |
0d40fbaef0 | ||
![]() |
a4846943a3 | ||
![]() |
bf2534a5e2 | ||
![]() |
1ca4e70b6c | ||
![]() |
25a2802239 | ||
![]() |
581a830829 | ||
![]() |
43e5fda45c | ||
![]() |
a638e10ba0 | ||
![]() |
d5207e2af8 | ||
![]() |
9ea94c44b1 | ||
![]() |
aaa6a66677 | ||
![]() |
7240cc3f8b | ||
![]() |
7fe4c8cb76 | ||
![]() |
746f1594d7 | ||
![]() |
0e4bb0530f | ||
![]() |
994c0efcc7 | ||
![]() |
cf5e119d4a | ||
![]() |
1ee1e9e43f | ||
![]() |
15e9aee544 | ||
![]() |
e8050f313e | ||
![]() |
be424d86a8 | ||
![]() |
a08cb950b2 | ||
![]() |
46f8bbfc6d | ||
![]() |
562c6a7bf1 | ||
![]() |
e711ccee4d | ||
![]() |
d6372e80fe | ||
![]() |
29d91e9161 | ||
![]() |
583f57f04a | ||
![]() |
f8f6c14f54 | ||
![]() |
9e24f2a1f0 | ||
![]() |
e788c6e9cb | ||
![]() |
2e681cf50f | ||
![]() |
9ddd3abe78 | ||
![]() |
86bd0244ec | ||
![]() |
15de658c04 | ||
![]() |
19d3f7d8ac | ||
![]() |
c21b858b27 | ||
![]() |
0b9bb581fd | ||
![]() |
105601c151 | ||
![]() |
3a4949aa50 | ||
![]() |
ec554ee747 | ||
![]() |
bf3998d71e | ||
![]() |
87208b8fc4 | ||
![]() |
265a628f16 | ||
![]() |
a854d00acd | ||
![]() |
d076d0febd | ||
![]() |
a56eaa024f | ||
![]() |
fdc6f6507c | ||
![]() |
976d173606 | ||
![]() |
989431c02f | ||
![]() |
f9bdc93723 | ||
![]() |
e687d77d15 | ||
![]() |
abfafb6c81 | ||
![]() |
f139838d64 | ||
![]() |
0a224ab102 | ||
![]() |
d39b183d8d | ||
![]() |
dc8054128a | ||
![]() |
001f4c7dc6 | ||
![]() |
313ddbfe48 | ||
![]() |
7f5bd6c72b | ||
![]() |
0be85fd80f | ||
![]() |
9f253ebb41 | ||
![]() |
6242dae507 | ||
![]() |
1749b0d74d | ||
![]() |
568e9062bd | ||
![]() |
5dbc75870f | ||
![]() |
c91a14638e | ||
![]() |
c00c380724 | ||
![]() |
43625c5128 | ||
![]() |
5effcfa767 | ||
![]() |
1ee0cd1ad7 | ||
![]() |
b594732475 | ||
![]() |
ce15406e78 | ||
![]() |
c9e95636a8 | ||
![]() |
6e5c07f4c8 | ||
![]() |
c999a8ed65 | ||
![]() |
4d343a6f47 | ||
![]() |
a81a6d9c80 | ||
![]() |
48f0eeb2e5 | ||
![]() |
d26e47bf6c | ||
![]() |
568a474a08 | ||
![]() |
9a66cdbc16 | ||
![]() |
ddb1149e25 | ||
![]() |
f6778f58d4 | ||
![]() |
e4e4d92641 | ||
![]() |
de0ff4ce69 | ||
![]() |
6548cb2578 | ||
![]() |
f6257cf4b7 | ||
![]() |
a15adb18fa | ||
![]() |
666bd5848a | ||
![]() |
d94256d36c | ||
![]() |
7bb97a61df | ||
![]() |
c65eadee5d | ||
![]() |
a43f4bd601 | ||
![]() |
8f881885c2 | ||
![]() |
26521d87ba | ||
![]() |
e1a4143793 | ||
![]() |
b9482a6efd | ||
![]() |
88c3cc019c | ||
![]() |
9980e4df3b | ||
![]() |
d4f2786cda | ||
![]() |
2744fdbd9e | ||
![]() |
1fcc2c6091 | ||
![]() |
74871ac70a | ||
![]() |
9cb7f6e54a | ||
![]() |
ed6aaf579d | ||
![]() |
e1b4614ab4 | ||
![]() |
c3bf08d04c | ||
![]() |
12247a13e0 | ||
![]() |
7503861b42 | ||
![]() |
9def2f200e | ||
![]() |
7b676935ee | ||
![]() |
9550c63196 | ||
![]() |
4a15240a27 | ||
![]() |
a47b96bdd3 | ||
![]() |
fb049da952 | ||
![]() |
4a325ddeae | ||
![]() |
48ac765efe | ||
![]() |
522645e38f | ||
![]() |
e891ee4bf6 | ||
![]() |
ef673211e7 | ||
![]() |
eaeaeb265f | ||
![]() |
db315c796d | ||
![]() |
035dd77cbb | ||
![]() |
e3743869e9 | ||
![]() |
ce14f00dea | ||
![]() |
627f4621f5 | ||
![]() |
3e8434bcea | ||
![]() |
efd30c4d95 | ||
![]() |
d7fddc97d4 | ||
![]() |
feed0c6b6a | ||
![]() |
d0e53ecff7 | ||
![]() |
1ca84aa162 | ||
![]() |
d5f2382d03 | ||
![]() |
416849f2e0 | ||
![]() |
dd37038ac7 | ||
![]() |
e410dd1792 | ||
![]() |
ffdc41f039 | ||
![]() |
ca7e97bdcf | ||
![]() |
4ae138cb12 | ||
![]() |
003f7e3dd0 | ||
![]() |
85eb76a23f | ||
![]() |
5186984ee9 | ||
![]() |
b5331b979b | ||
![]() |
11f3173e1b | ||
![]() |
cd17195d1c | ||
![]() |
1128b10247 | ||
![]() |
6a073aa7a7 | ||
![]() |
073891e875 | ||
![]() |
2e341bc99a | ||
![]() |
b7c8fff803 | ||
![]() |
3f7e90cf0c | ||
![]() |
78d4f8cc56 | ||
![]() |
de2656ec25 | ||
![]() |
9686a2c2cf | ||
![]() |
b863979c0f | ||
![]() |
fecd7468fc | ||
![]() |
19da1a39e8 | ||
![]() |
7e88df99e1 | ||
![]() |
7f3f85544c | ||
![]() |
750f5baf30 | ||
![]() |
a63f3f714c | ||
![]() |
1dd1ee00d5 | ||
![]() |
4493af756b | ||
![]() |
e904e9b720 | ||
![]() |
5f896773e0 | ||
![]() |
b2dcac7141 | ||
![]() |
40ccc81146 | ||
![]() |
1c63d61372 | ||
![]() |
2ad77c60ef | ||
![]() |
a1556d37b8 | ||
![]() |
083a8a0037 | ||
![]() |
71a939fee4 | ||
![]() |
9dbd437da2 | ||
![]() |
2510e1476e | ||
![]() |
0f839cff6b | ||
![]() |
abe3572878 | ||
![]() |
0d30e2c6f2 | ||
![]() |
a0473085f3 | ||
![]() |
e537dc230b | ||
![]() |
19f4943d12 | ||
![]() |
bf6d1a1ca7 | ||
![]() |
424b6edd19 | ||
![]() |
4f48417fe7 | ||
![]() |
8e3dc37bc0 | ||
![]() |
0312969b9e | ||
![]() |
62beae313a | ||
![]() |
8011a29fa8 | ||
![]() |
fe710f2074 | ||
![]() |
bba43a1ea0 | ||
![]() |
f947e965be | ||
![]() |
5c365dc979 | ||
![]() |
95a9d44dc3 | ||
![]() |
27558bd87e | ||
![]() |
5ab9294a8d | ||
![]() |
cfd7d166e2 | ||
![]() |
5bcd47cf63 | ||
![]() |
0c60d5c59f | ||
![]() |
cd9bdc6395 | ||
![]() |
b68470707b | ||
![]() |
7046ae5593 | ||
![]() |
d19e3e19d6 | ||
![]() |
04597e2595 | ||
![]() |
d16653c3d4 | ||
![]() |
183e0eb5b9 | ||
![]() |
be0b3137d0 | ||
![]() |
683213230e | ||
![]() |
ad0ee682b3 | ||
![]() |
ba418ad400 | ||
![]() |
6dcbbdc011 | ||
![]() |
e43bd4fa58 | ||
![]() |
25b4ed053f | ||
![]() |
e1f2a6a32b | ||
![]() |
6fc3287b9c | ||
![]() |
f43b6e2b1e | ||
![]() |
697a45d861 | ||
![]() |
4c7879775e | ||
![]() |
a2c8db1b79 | ||
![]() |
fc89f15497 | ||
![]() |
e364f50718 | ||
![]() |
571a4cf273 | ||
![]() |
bafd38a352 | ||
![]() |
350d06d63f | ||
![]() |
9f82cbf7c1 | ||
![]() |
dcde8e1c90 | ||
![]() |
569cb94869 | ||
![]() |
0df7d7482c | ||
![]() |
b2f27d2926 | ||
![]() |
7e16636995 | ||
![]() |
83d78fece0 |
26
.gitignore
vendored
26
.gitignore
vendored
@@ -1,17 +1,9 @@
|
||||
.config
|
||||
.version
|
||||
*.a
|
||||
*.o
|
||||
*.d
|
||||
*.def
|
||||
*.dll
|
||||
*.exe
|
||||
*.ho
|
||||
*.lib
|
||||
*.pc
|
||||
*.so
|
||||
*.so.*
|
||||
*.ver
|
||||
*-example
|
||||
*-test
|
||||
*_g
|
||||
@@ -30,19 +22,20 @@ ffplay
|
||||
ffprobe
|
||||
ffserver
|
||||
avconv
|
||||
doc/avoptions_codec.texi
|
||||
doc/avoptions_format.texi
|
||||
doc/print_options
|
||||
doc/examples/decoding_encoding
|
||||
doc/examples/filtering_audio
|
||||
doc/examples/filtering_video
|
||||
doc/examples/metadata
|
||||
doc/examples/muxing
|
||||
libavcodec/*_tablegen
|
||||
libavcodec/*_tables.c
|
||||
libavcodec/*_tables.h
|
||||
libavcodec/codec_names.h
|
||||
libavcodec/libavcodec*
|
||||
libavcore/libavcore*
|
||||
libavdevice/libavdevice*
|
||||
libavfilter/libavfilter*
|
||||
libavformat/libavformat*
|
||||
libavutil/avconfig.h
|
||||
libavutil/libavutil*
|
||||
libpostproc/libpostproc*
|
||||
libswresample/libswresample*
|
||||
libswscale/libswscale*
|
||||
tests/audiogen
|
||||
tests/base64
|
||||
tests/data
|
||||
@@ -53,7 +46,6 @@ tests/vsynth1
|
||||
tests/vsynth2
|
||||
tools/aviocat
|
||||
tools/cws2fws
|
||||
tools/ffeval
|
||||
tools/graph2dot
|
||||
tools/ismindex
|
||||
tools/lavfi-showfiltfmts
|
||||
|
@@ -500,3 +500,5 @@ necessary. Here is a sample; alter the names:
|
||||
Ty Coon, President of Vice
|
||||
|
||||
That's all there is to it!
|
||||
|
||||
|
||||
|
99
Changelog
99
Changelog
@@ -3,53 +3,68 @@ releases are sorted from youngest to oldest.
|
||||
|
||||
version next:
|
||||
|
||||
version 0.11.2:
|
||||
version 0.10.5:
|
||||
|
||||
- Several bugs and crashes have been fixed as well as build problems
|
||||
with recent mingw64
|
||||
|
||||
|
||||
version 0.11:
|
||||
Fixes:CVE-2012-2772, CVE-2012-2774, CVE-2012-2775, CVE-2012-2776, CVE-2012-2777,
|
||||
CVE-2012-2779, CVE-2012-2782, CVE-2012-2783, CVE-2012-2784, CVE-2012-2785,
|
||||
CVE-2012-2786, CVE-2012-2787, CVE-2012-2788, CVE-2012-2789, CVE-2012-2790,
|
||||
CVE-2012-2791, CVE-2012-2792, CVE-2012-2793, CVE-2012-2794, CVE-2012-2795,
|
||||
CVE-2012-2796, CVE-2012-2797, CVE-2012-2798, CVE-2012-2799, CVE-2012-2800,
|
||||
CVE-2012-2801, CVE-2012-2802, CVE-2012-2803, CVE-2012-2804,
|
||||
- v408 Quicktime and Microsoft AYUV Uncompressed 4:4:4:4 encoder and decoder
|
||||
- setfield filter
|
||||
- CDXL demuxer and decoder
|
||||
- Apple ProRes encoder
|
||||
- ffprobe -count_packets and -count_frames options
|
||||
- Sun Rasterfile Encoder
|
||||
- ID3v2 attached pictures reading and writing
|
||||
- WMA Lossless decoder
|
||||
- bluray protocol
|
||||
- blackdetect filter
|
||||
- libutvideo encoder wrapper (--enable-libutvideo)
|
||||
version 0.10.4:
|
||||
|
||||
- Several bugs and crashes have been fixed
|
||||
Note, CVE-2012-0851 and CVE-2011-3937 have been fixed in previous releases
|
||||
|
||||
version 0.10.3:
|
||||
|
||||
- Security fixes in the 4xm demuxer, avi demuxer, cook decoder,
|
||||
mm demuxer, mpegvideo decoder, vqavideo decoder (CVE-2012-0947) and
|
||||
xmv demuxer.
|
||||
|
||||
- Several bugs and crashes have been fixed in the following codecs: AAC,
|
||||
APE, H.263, H.264, Indeo 4, Mimic, MJPEG, Motion Pixels Video, RAW,
|
||||
TTA, VC1, VQA, WMA Voice, vqavideo.
|
||||
|
||||
- Several bugs and crashes have been fixed in the following formats:
|
||||
ASF, ID3v2, MOV, xWMA
|
||||
|
||||
- This release additionally updates the following codecs to the
|
||||
bytestream2 API, and therefore benefit from additional overflow
|
||||
checks: truemotion2, utvideo, vqavideo
|
||||
|
||||
|
||||
version 0.10.1
|
||||
- Several security fixes, many bugfixes affecting many formats and
|
||||
codecs, the list below is not complete.
|
||||
|
||||
- swapuv filter
|
||||
- bbox filter
|
||||
- XBM encoder and decoder
|
||||
- RealAudio Lossless decoder
|
||||
- ZeroCodec decoder
|
||||
- tile video filter
|
||||
- Metal Gear Solid: The Twin Snakes demuxer
|
||||
- OpenEXR image decoder
|
||||
- removelogo filter
|
||||
- drop support for ffmpeg without libavfilter
|
||||
- drawtext video filter: fontconfig support
|
||||
- ffmpeg -benchmark_all option
|
||||
- super2xsai filter ported from libmpcodecs
|
||||
- add libavresample audio conversion library for compatibility
|
||||
- MicroDVD decoder
|
||||
- Avid Meridien (AVUI) encoder and decoder
|
||||
- accept + prefix to -pix_fmt option to disable automatic conversions.
|
||||
- complete audio filtering in libavfilter and ffmpeg
|
||||
- add fps filter
|
||||
- audio split filter
|
||||
- vorbis parser
|
||||
- png parser
|
||||
- audio mix filter
|
||||
|
||||
- Several bugs and crashes have been fixed in the following codecs: AAC,
|
||||
AC-3, ADPCM, AMR (both NB and WB), ATRAC3, CAVC, Cook, camstudio, DCA,
|
||||
DPCM, DSI CIN, DV, EA TGQ, FLAC, fraps, G.722 (both encoder and
|
||||
decoder), H.264, huvffyuv, BB JV decoder, Indeo 3, KGV1, LCL, the
|
||||
libx264 wrapper, MJPEG, mp3on4, Musepack, MPEG1/2, PNG, QDM2, Qt RLE,
|
||||
ROQ, RV10, RV30/RV34/RV40, shorten, smacker, subrip, SVQ3, TIFF,
|
||||
Truemotion2, TTA, VC1, VMware Screen codec, Vorbis, VP5, VP6, WMA,
|
||||
Westwood SNDx, XXAN.
|
||||
|
||||
- This release additionally updates the following codecs to the
|
||||
bytestream2 API, and therefore benefit from additional overflow
|
||||
checks: XXAN, ALG MM, TQG, SMC, Qt SMC, ROQ, PNG
|
||||
|
||||
- Several bugs and crashes have been fixed in the following formats:
|
||||
AIFF, ASF, DV, Matroska, NSV, MOV, MPEG-TS, Smacker, Sony OpenMG, RM,
|
||||
SWF.
|
||||
|
||||
- Libswscale has an potential overflow for large image size fixed.
|
||||
|
||||
- The following APIs have been added:
|
||||
|
||||
avcodec_is_open()
|
||||
avformat_get_riff_video_tags()
|
||||
avformat_get_riff_audio_tags()
|
||||
|
||||
Please see the file doc/APIchanges and the Doxygen documentation for
|
||||
further information.
|
||||
|
||||
|
||||
version 0.10:
|
||||
@@ -793,7 +808,7 @@ version 0.4.5:
|
||||
- MPEG-4 vol header fixes (Jonathan Marsden <snmjbm at pacbell.net>)
|
||||
- ARM optimizations (Lionel Ulmer <lionel.ulmer at free.fr>).
|
||||
- Windows porting of file converter
|
||||
- added MJPEG raw format (input/output)
|
||||
- added MJPEG raw format (input/ouput)
|
||||
- added JPEG image format support (input/output)
|
||||
|
||||
|
||||
|
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.11.5
|
||||
PROJECT_NUMBER = 0.10.5
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
8
LICENSE
8
LICENSE
@@ -20,12 +20,8 @@ Specifically, the GPL parts of FFmpeg are
|
||||
|
||||
There are a handful of files under other licensing terms, namely:
|
||||
|
||||
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint_template.c and
|
||||
libavcodec/jrevdct.c are taken from libjpeg, see the top of the files for
|
||||
licensing details. Specifically note that you must credit the IJG in the
|
||||
documentation accompanying your program if you only distribute executables.
|
||||
You must also indicate any changes including additions and deletions to
|
||||
those three files in the documentation.
|
||||
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint.c, libavcodec/jrevdct.c
|
||||
are taken from libjpeg, see the top of the files for licensing details.
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
the configure parameter --enable-version3 will activate this licensing option
|
||||
|
13
MAINTAINERS
13
MAINTAINERS
@@ -4,7 +4,7 @@ FFmpeg maintainers
|
||||
Below is a list of the people maintaining different parts of the
|
||||
FFmpeg code.
|
||||
|
||||
Please try to keep entries where you are the maintainer up to date!
|
||||
Please try to keep entries where you are the maintainer upto date!
|
||||
|
||||
Names in () mean that the maintainer currently has no time to maintain the code.
|
||||
A CC after the name means that the maintainer prefers to be CC-ed on patches
|
||||
@@ -14,6 +14,7 @@ and related discussions.
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
Michael Niedermayer
|
||||
final design decisions
|
||||
|
||||
|
||||
@@ -158,7 +159,6 @@ Codecs:
|
||||
indeo5* Kostya Shishkov
|
||||
interplayvideo.c Mike Melanson
|
||||
ivi* Kostya Shishkov
|
||||
jacosub* Clément Bœsch
|
||||
jpeg_ls.c Kostya Shishkov
|
||||
jvdec.c Peter Ross
|
||||
kmvc.c Kostya Shishkov
|
||||
@@ -171,7 +171,6 @@ Codecs:
|
||||
libschroedinger* David Conrad
|
||||
libspeexdec.c Justin Ruggles
|
||||
libtheoraenc.c David Conrad
|
||||
libutvideo* Derek Buitenhuis
|
||||
libvorbis.c David Conrad
|
||||
libxavs.c Stefan Gehrer
|
||||
libx264.c Mans Rullgard, Jason Garrett-Glaser
|
||||
@@ -244,7 +243,6 @@ Codecs:
|
||||
xan.c Mike Melanson
|
||||
xl.c Kostya Shishkov
|
||||
xvmc.c Ivan Kalvachev
|
||||
zerocodec.c Derek Buitenhuis
|
||||
zmbv* Kostya Shishkov
|
||||
|
||||
Hardware acceleration:
|
||||
@@ -315,7 +313,6 @@ Muxers/Demuxers:
|
||||
ipmovie.c Mike Melanson
|
||||
img2.c Michael Niedermayer
|
||||
iss.c Stefan Gehrer
|
||||
jacosub* Clément Bœsch
|
||||
jvdec.c Peter Ross
|
||||
libmodplug.c Clément Bœsch
|
||||
libnut.c Oded Shimon
|
||||
@@ -373,7 +370,6 @@ Muxers/Demuxers:
|
||||
wv.c Kostya Shishkov
|
||||
|
||||
Protocols:
|
||||
bluray.c Petri Hintukainen
|
||||
http.c Ronald S. Bultje
|
||||
mms*.c Ronald S. Bultje
|
||||
udp.c Luca Abeni
|
||||
@@ -399,11 +395,8 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
1.2 Michael Niedermayer
|
||||
1.1 Michael Niedermayer
|
||||
1.0 Michael Niedermayer
|
||||
0.9 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
||||
GnuPG Fingerprints of maintainers and contributors
|
||||
|
21
Makefile
21
Makefile
@@ -19,7 +19,7 @@ PROGS := $(PROGS-yes:%=%$(EXESUF))
|
||||
INSTPROGS = $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
OBJS = $(PROGS-yes:%=%.o) cmdutils.o
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr base64
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%)
|
||||
TOOLS = qt-faststart trasher
|
||||
TOOLS-$(CONFIG_ZLIB) += cws2fws
|
||||
|
||||
@@ -31,7 +31,6 @@ ALLMANPAGES = $(BASENAMES:%=%.1)
|
||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
||||
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
|
||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||
FFLIBS-$(CONFIG_SWRESAMPLE)+= swresample
|
||||
@@ -40,7 +39,6 @@ FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
FFLIBS := avutil
|
||||
|
||||
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset) $(SRC_PATH)/doc/ffprobe.xsd
|
||||
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/Makefile
|
||||
|
||||
SKIPHEADERS = cmdutils_common_opts.h
|
||||
|
||||
@@ -66,11 +64,9 @@ config.h: .config
|
||||
@-printf '\nWARNING: $(?F) newer than config.h, rerun configure\n\n'
|
||||
@-tput sgr0 2>/dev/null
|
||||
|
||||
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ALTIVEC-OBJS ARMV5TE-OBJS ARMV6-OBJS ARMVFP-OBJS MMI-OBJS \
|
||||
MMX-OBJS NEON-OBJS VIS-OBJS YASM-OBJS \
|
||||
OBJS TESTOBJS
|
||||
SUBDIR_VARS := OBJS FFLIBS CLEANFILES DIRS TESTPROGS EXAMPLES SKIPHEADERS \
|
||||
ALTIVEC-OBJS MMX-OBJS NEON-OBJS X86-OBJS YASM-OBJS-FFT YASM-OBJS \
|
||||
HOSTPROGS BUILT_HEADERS TESTOBJS ARCH_HEADERS ARMV6-OBJS TOOLS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
@@ -126,10 +122,9 @@ install-progs: install-progs-yes $(PROGS)
|
||||
$(Q)mkdir -p "$(BINDIR)"
|
||||
$(INSTALL) -c -m 755 $(INSTPROGS) "$(BINDIR)"
|
||||
|
||||
install-data: $(DATA_FILES) $(EXAMPLES_FILES)
|
||||
$(Q)mkdir -p "$(DATADIR)/examples"
|
||||
install-data: $(DATA_FILES)
|
||||
$(Q)mkdir -p "$(DATADIR)"
|
||||
$(INSTALL) -m 644 $(DATA_FILES) "$(DATADIR)"
|
||||
$(INSTALL) -m 644 $(EXAMPLES_FILES) "$(DATADIR)/examples"
|
||||
|
||||
uninstall: uninstall-libs uninstall-headers uninstall-progs uninstall-data
|
||||
|
||||
@@ -163,8 +158,6 @@ coverage-html: coverage.info
|
||||
$(Q)genhtml -o $@ $<
|
||||
$(Q)touch $@
|
||||
|
||||
check: all alltools checkheaders examples testprogs fate
|
||||
|
||||
include $(SRC_PATH)/doc/Makefile
|
||||
include $(SRC_PATH)/tests/Makefile
|
||||
|
||||
@@ -179,5 +172,5 @@ $(sort $(OBJDIRS)):
|
||||
# so this saves some time on slow systems.
|
||||
.SUFFIXES:
|
||||
|
||||
.PHONY: all all-yes alltools check *clean config install*
|
||||
.PHONY: all all-yes alltools *clean config examples install*
|
||||
.PHONY: testprogs uninstall*
|
||||
|
8
README
8
README
@@ -4,15 +4,9 @@ FFmpeg README
|
||||
1) Documentation
|
||||
----------------
|
||||
|
||||
* Read the documentation in the doc/ directory in git.
|
||||
You can also view it online at http://ffmpeg.org/documentation.html
|
||||
* Read the documentation in the doc/ directory.
|
||||
|
||||
2) Licensing
|
||||
------------
|
||||
|
||||
* See the LICENSE file.
|
||||
|
||||
3) Build and Install
|
||||
--------------------
|
||||
|
||||
* See the INSTALL file.
|
||||
|
13
arch.mak
13
arch.mak
@@ -1,13 +0,0 @@
|
||||
OBJS-$(HAVE_ARMV5TE) += $(ARMV5TE-OBJS) $(ARMV5TE-OBJS-yes)
|
||||
OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
|
||||
OBJS-$(HAVE_ARMVFP) += $(ARMVFP-OBJS) $(ARMVFP-OBJS-yes)
|
||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_VIS) += $(VIS-OBJS) $(VIS-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
|
||||
OBJS-$(HAVE_YASM) += $(YASM-OBJS) $(YASM-OBJS-yes)
|
85
cmdutils.c
85
cmdutils.c
@@ -32,13 +32,11 @@
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libavresample/avresample.h"
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libswresample/swresample.h"
|
||||
#if CONFIG_POSTPROC
|
||||
#include "libpostproc/postprocess.h"
|
||||
#endif
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
@@ -56,10 +54,9 @@
|
||||
#endif
|
||||
|
||||
struct SwsContext *sws_opts;
|
||||
SwrContext *swr_opts;
|
||||
AVDictionary *format_opts, *codec_opts;
|
||||
|
||||
const int this_year = 2014;
|
||||
const int this_year = 2012;
|
||||
|
||||
static FILE *report_file;
|
||||
|
||||
@@ -69,7 +66,6 @@ void init_opts(void)
|
||||
sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC,
|
||||
NULL, NULL, NULL);
|
||||
#endif
|
||||
swr_opts = swr_alloc();
|
||||
}
|
||||
|
||||
void uninit_opts(void)
|
||||
@@ -78,7 +74,6 @@ void uninit_opts(void)
|
||||
sws_freeContext(sws_opts);
|
||||
sws_opts = NULL;
|
||||
#endif
|
||||
swr_free(&swr_opts);
|
||||
av_dict_free(&format_opts);
|
||||
av_dict_free(&codec_opts);
|
||||
}
|
||||
@@ -255,12 +250,14 @@ int parse_option(void *optctx, const char *opt, const char *arg,
|
||||
if (!po->name && opt[0] == 'n' && opt[1] == 'o') {
|
||||
/* handle 'no' bool option */
|
||||
po = find_option(options, opt + 2);
|
||||
if ((po->name && (po->flags & OPT_BOOL)))
|
||||
bool_val = 0;
|
||||
if (!(po->name && (po->flags & OPT_BOOL)))
|
||||
goto unknown_opt;
|
||||
bool_val = 0;
|
||||
}
|
||||
if (!po->name)
|
||||
po = find_option(options, "default");
|
||||
if (!po->name) {
|
||||
unknown_opt:
|
||||
av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'\n", opt);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
@@ -345,8 +342,11 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
|
||||
}
|
||||
}
|
||||
|
||||
int locate_option(int argc, char **argv, const OptionDef *options,
|
||||
const char *optname)
|
||||
/*
|
||||
* Return index of option opt in argv or 0 if not found.
|
||||
*/
|
||||
static int locate_option(int argc, char **argv, const OptionDef *options,
|
||||
const char *optname)
|
||||
{
|
||||
const OptionDef *po;
|
||||
int i;
|
||||
@@ -420,10 +420,10 @@ void parse_loglevel(int argc, char **argv, const OptionDef *options)
|
||||
#define FLAGS(o) ((o)->type == AV_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0
|
||||
int opt_default(const char *opt, const char *arg)
|
||||
{
|
||||
const AVOption *oc, *of, *os, *oswr = NULL;
|
||||
const AVOption *oc, *of, *os;
|
||||
char opt_stripped[128];
|
||||
const char *p;
|
||||
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class(), *sc, *swr_class;
|
||||
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class(), *sc;
|
||||
|
||||
if (!(p = strchr(opt, ':')))
|
||||
p = opt + strlen(opt);
|
||||
@@ -449,17 +449,8 @@ int opt_default(const char *opt, const char *arg)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
swr_class = swr_get_class();
|
||||
if (!oc && !of && !os && (oswr = av_opt_find(&swr_class, opt, NULL, 0,
|
||||
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
|
||||
int ret = av_opt_set(swr_opts, opt, arg, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error setting option %s.\n", opt);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (oc || of || os || oswr)
|
||||
if (oc || of || os)
|
||||
return 0;
|
||||
av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'\n", opt);
|
||||
return AVERROR_OPTION_NOT_FOUND;
|
||||
@@ -546,18 +537,6 @@ int opt_max_alloc(const char *opt, const char *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int opt_cpuflags(const char *opt, const char *arg)
|
||||
{
|
||||
int ret;
|
||||
unsigned flags = av_get_cpu_flags();
|
||||
|
||||
if ((ret = av_parse_cpu_caps(&flags, arg)) < 0)
|
||||
return ret;
|
||||
|
||||
av_force_cpu_flags(flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int opt_codec_debug(const char *opt, const char *arg)
|
||||
{
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
@@ -599,8 +578,7 @@ static int warned_cfg = 0;
|
||||
const char *indent = flags & INDENT? " " : ""; \
|
||||
if (flags & SHOW_VERSION) { \
|
||||
unsigned int version = libname##_version(); \
|
||||
av_log(NULL, level, \
|
||||
"%slib%-11s %2d.%3d.%3d / %2d.%3d.%3d\n", \
|
||||
av_log(NULL, level, "%slib%-11s %2d.%3d.%3d / %2d.%3d.%3d\n",\
|
||||
indent, #libname, \
|
||||
LIB##LIBNAME##_VERSION_MAJOR, \
|
||||
LIB##LIBNAME##_VERSION_MINOR, \
|
||||
@@ -629,7 +607,6 @@ static void print_all_libs_info(int flags, int level)
|
||||
PRINT_LIB_INFO(avformat, AVFORMAT, flags, level);
|
||||
PRINT_LIB_INFO(avdevice, AVDEVICE, flags, level);
|
||||
PRINT_LIB_INFO(avfilter, AVFILTER, flags, level);
|
||||
// PRINT_LIB_INFO(avresample, AVRESAMPLE, flags, level);
|
||||
PRINT_LIB_INFO(swscale, SWSCALE, flags, level);
|
||||
PRINT_LIB_INFO(swresample,SWRESAMPLE, flags, level);
|
||||
#if CONFIG_POSTPROC
|
||||
@@ -827,9 +804,9 @@ int opt_codecs(const char *opt, const char *arg)
|
||||
decode = encode = cap = 0;
|
||||
}
|
||||
if (p2 && strcmp(p->name, p2->name) == 0) {
|
||||
if (av_codec_is_decoder(p))
|
||||
if (p->decode)
|
||||
decode = 1;
|
||||
if (av_codec_is_encoder(p))
|
||||
if (p->encode || p->encode2)
|
||||
encode = 1;
|
||||
cap |= p->capabilities;
|
||||
}
|
||||
@@ -875,16 +852,20 @@ int opt_bsfs(const char *opt, const char *arg)
|
||||
|
||||
int opt_protocols(const char *opt, const char *arg)
|
||||
{
|
||||
void *opaque = NULL;
|
||||
const char *name;
|
||||
URLProtocol *up=NULL;
|
||||
|
||||
printf("Supported file protocols:\n"
|
||||
"Input:\n");
|
||||
while ((name = avio_enum_protocols(&opaque, 0)))
|
||||
printf("%s\n", name);
|
||||
printf("Output:\n");
|
||||
while ((name = avio_enum_protocols(&opaque, 1)))
|
||||
printf("%s\n", name);
|
||||
"I.. = Input supported\n"
|
||||
".O. = Output supported\n"
|
||||
"..S = Seek supported\n"
|
||||
"FLAGS NAME\n"
|
||||
"----- \n");
|
||||
while((up = av_protocol_next(up)))
|
||||
printf("%c%c%c %s\n",
|
||||
up->url_read ? 'I' : '.',
|
||||
up->url_write ? 'O' : '.',
|
||||
up->url_seek ? 'S' : '.',
|
||||
up->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1052,7 +1033,7 @@ FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
if (!f && codec_name) {
|
||||
snprintf(filename, filename_size,
|
||||
"%s%s/%s-%s.ffpreset",
|
||||
base[i], i != 1 ? "" : "/.ffmpeg", codec_name,
|
||||
base[i], i != 1 ? "" : "/.ffmpeg", codec_name,
|
||||
preset_name);
|
||||
f = fopen(filename, "r");
|
||||
}
|
||||
@@ -1076,7 +1057,7 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
|
||||
case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
|
||||
case 'd': type = AVMEDIA_TYPE_DATA; break;
|
||||
case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
|
||||
default: av_assert0(0);
|
||||
default: abort(); // never reached, silence warning
|
||||
}
|
||||
if (type != st->codec->codec_type)
|
||||
return 0;
|
||||
@@ -1109,12 +1090,6 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
} else if (*spec == '#') {
|
||||
int sid;
|
||||
char *endptr;
|
||||
sid = strtol(spec + 1, &endptr, 0);
|
||||
if (!*endptr)
|
||||
return st->id == sid;
|
||||
} else if (!*spec) /* empty specifier, matches everything */
|
||||
return 1;
|
||||
|
||||
|
11
cmdutils.h
11
cmdutils.h
@@ -51,7 +51,6 @@ extern const int this_year;
|
||||
extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
|
||||
extern AVFormatContext *avformat_opts;
|
||||
extern struct SwsContext *sws_opts;
|
||||
extern struct SwrContext *swr_opts;
|
||||
extern AVDictionary *format_opts, *codec_opts;
|
||||
|
||||
/**
|
||||
@@ -86,8 +85,6 @@ int opt_report(const char *opt);
|
||||
|
||||
int opt_max_alloc(const char *opt, const char *arg);
|
||||
|
||||
int opt_cpuflags(const char *opt, const char *arg);
|
||||
|
||||
int opt_codec_debug(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
@@ -207,12 +204,6 @@ int parse_option(void *optctx, const char *opt, const char *arg,
|
||||
*/
|
||||
void parse_loglevel(int argc, char **argv, const OptionDef *options);
|
||||
|
||||
/**
|
||||
* Return index of option opt in argv or 0 if not found.
|
||||
*/
|
||||
int locate_option(int argc, char **argv, const OptionDef *options,
|
||||
const char *optname);
|
||||
|
||||
/**
|
||||
* Check if the given stream matches a stream specifier.
|
||||
*
|
||||
@@ -358,7 +349,7 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size);
|
||||
* at configuration time or in a "ffpresets" folder along the executable
|
||||
* on win32, in that order. If no such file is found and
|
||||
* codec_name is defined, then search for a file named
|
||||
* codec_name-preset_name.avpreset in the above-mentioned directories.
|
||||
* codec_name-preset_name.ffpreset in the above-mentioned directories.
|
||||
*
|
||||
* @param filename buffer where the name of the found filename is written
|
||||
* @param filename_size size in bytes of the filename buffer
|
||||
|
@@ -14,7 +14,5 @@
|
||||
{ "loglevel", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
|
||||
{ "v", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
|
||||
{ "debug", HAS_ARG, {(void*)opt_codec_debug}, "set debug flags", "flags" },
|
||||
{ "fdebug", HAS_ARG, {(void*)opt_codec_debug}, "set debug flags", "flags" },
|
||||
{ "report", 0, {(void*)opt_report}, "generate a report" },
|
||||
{ "max_alloc", HAS_ARG, {(void*)opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
|
||||
{ "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "force specific cpu flags", "flags" },
|
||||
|
@@ -20,7 +20,7 @@ $(foreach VAR,$(SILENT),$(eval override $(VAR) = @$($(VAR))))
|
||||
$(eval INSTALL = @$(call ECHO,INSTALL,$$(^:$(SRC_DIR)/%=%)); $(INSTALL))
|
||||
endif
|
||||
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avresample avutil postproc swscale swresample
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale swresample
|
||||
|
||||
# NASM requires -I path terminated with /
|
||||
IFLAGS := -I. -I$(SRC_PATH)/
|
||||
@@ -73,7 +73,7 @@ COMPILE_S = $(call COMPILE,AS)
|
||||
$(OBJS):
|
||||
endif
|
||||
|
||||
include $(SRC_PATH)/arch.mak
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS-yes)
|
||||
|
||||
OBJS += $(OBJS-yes)
|
||||
FFLIBS := $(FFLIBS-yes) $(FFLIBS)
|
||||
@@ -115,6 +115,6 @@ OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOSTOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.o *~ *.ho *.map *.ver *.gcno *.gcda
|
||||
DISTCLEANSUFFIXES = *.pc
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a *.exp
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(TESTOBJS:.o=.d))
|
||||
|
378
configure
vendored
378
configure
vendored
@@ -59,21 +59,8 @@ cat <<EOF
|
||||
Usage: configure [options]
|
||||
Options: [defaults in brackets after descriptions]
|
||||
|
||||
Help options:
|
||||
--help print this message
|
||||
--list-decoders show all available decoders
|
||||
--list-encoders show all available encoders
|
||||
--list-hwaccels show all available hardware accelerators
|
||||
--list-demuxers show all available demuxers
|
||||
--list-muxers show all available muxers
|
||||
--list-parsers show all available parsers
|
||||
--list-protocols show all available protocols
|
||||
--list-bsfs show all available bitstream filters
|
||||
--list-indevs show all available input devices
|
||||
--list-outdevs show all available output devices
|
||||
--list-filters show all available filters
|
||||
|
||||
Standard options:
|
||||
--help print this message
|
||||
--logfile=FILE log tests and output to FILE [config.log]
|
||||
--disable-logging do not log configure debug information
|
||||
--prefix=PREFIX install in PREFIX [$prefix]
|
||||
@@ -84,22 +71,14 @@ Standard options:
|
||||
--incdir=DIR install includes in DIR [PREFIX/include]
|
||||
--mandir=DIR install man page in DIR [PREFIX/share/man]
|
||||
|
||||
Licensing options:
|
||||
Configuration options:
|
||||
--disable-static do not build static libraries [no]
|
||||
--enable-shared build shared libraries [no]
|
||||
--enable-gpl allow use of GPL code, the resulting libs
|
||||
and binaries will be under GPL [no]
|
||||
--enable-version3 upgrade (L)GPL to version 3 [no]
|
||||
--enable-nonfree allow use of nonfree code, the resulting libs
|
||||
and binaries will be unredistributable [no]
|
||||
|
||||
Configuration options:
|
||||
--disable-static do not build static libraries [no]
|
||||
--enable-shared build shared libraries [no]
|
||||
--enable-small optimize for size instead of speed
|
||||
--enable-runtime-cpudetect detect cpu capabilities at runtime (bigger binary)
|
||||
--enable-gray enable full grayscale support (slower color)
|
||||
--disable-swscale-alpha disable alpha channel support in swscale
|
||||
|
||||
Component options:
|
||||
--disable-doc do not build documentation
|
||||
--disable-ffmpeg disable ffmpeg build
|
||||
--disable-ffplay disable ffplay build
|
||||
@@ -112,22 +91,34 @@ Component options:
|
||||
--disable-swscale disable libswscale build
|
||||
--disable-postproc disable libpostproc build
|
||||
--disable-avfilter disable video filter support [no]
|
||||
--enable-avresample enable libavresample build [no]
|
||||
--disable-pthreads disable pthreads [auto]
|
||||
--disable-w32threads disable Win32 threads [auto]
|
||||
--disable-os2threads disable OS/2 threads [auto]
|
||||
--enable-x11grab enable X11 grabbing [no]
|
||||
--disable-network disable network support [no]
|
||||
--enable-gray enable full grayscale support (slower color)
|
||||
--disable-swscale-alpha disable alpha channel support in swscale
|
||||
--disable-fastdiv disable table-based division
|
||||
--enable-small optimize for size instead of speed
|
||||
--disable-aandct disable AAN DCT code
|
||||
--disable-dct disable DCT code
|
||||
--disable-fft disable FFT code
|
||||
--disable-golomb disable Golomb code
|
||||
--disable-huffman disable Huffman code
|
||||
--disable-lpc disable LPC code
|
||||
--disable-mdct disable MDCT code
|
||||
--disable-rdft disable RDFT code
|
||||
--disable-fft disable FFT code
|
||||
--enable-dxva2 enable DXVA2 code
|
||||
--enable-vaapi enable VAAPI code [autodetect]
|
||||
--enable-vda enable VDA code [autodetect]
|
||||
--enable-vda enable VDA code [autodetect]
|
||||
--enable-vdpau enable VDPAU code [autodetect]
|
||||
|
||||
Individual component options:
|
||||
--disable-dxva2 disable DXVA2 code
|
||||
--disable-vda disable VDA code
|
||||
--enable-runtime-cpudetect detect cpu capabilities at runtime (bigger binary)
|
||||
--enable-hardcoded-tables use hardcoded tables instead of runtime generation
|
||||
--disable-safe-bitstream-reader
|
||||
disable buffer boundary checking in bitreaders
|
||||
(faster, but may crash)
|
||||
--enable-memalign-hack emulate memalign, interferes with memory debuggers
|
||||
--disable-everything disable all components listed below
|
||||
--disable-encoder=NAME disable encoder NAME
|
||||
--enable-encoder=NAME enable encoder NAME
|
||||
@@ -153,26 +144,33 @@ Individual component options:
|
||||
--enable-protocol=NAME enable protocol NAME
|
||||
--disable-protocol=NAME disable protocol NAME
|
||||
--disable-protocols disable all protocols
|
||||
--enable-indev=NAME enable input device NAME
|
||||
--disable-indev=NAME disable input device NAME
|
||||
--disable-indevs disable input devices
|
||||
--enable-outdev=NAME enable output device NAME
|
||||
--disable-outdev=NAME disable output device NAME
|
||||
--disable-indevs disable input devices
|
||||
--disable-outdevs disable output devices
|
||||
--disable-devices disable all devices
|
||||
--enable-filter=NAME enable filter NAME
|
||||
--disable-filter=NAME disable filter NAME
|
||||
--disable-filters disable all filters
|
||||
--list-decoders show all available decoders
|
||||
--list-encoders show all available encoders
|
||||
--list-hwaccels show all available hardware accelerators
|
||||
--list-muxers show all available muxers
|
||||
--list-demuxers show all available demuxers
|
||||
--list-parsers show all available parsers
|
||||
--list-protocols show all available protocols
|
||||
--list-bsfs show all available bitstream filters
|
||||
--list-indevs show all available input devices
|
||||
--list-outdevs show all available output devices
|
||||
--list-filters show all available filters
|
||||
|
||||
External library support:
|
||||
--enable-avisynth enable reading of AVISynth script files [no]
|
||||
--enable-bzlib enable bzlib [autodetect]
|
||||
--enable-fontconfig enable fontconfig
|
||||
--enable-frei0r enable frei0r video filtering
|
||||
--enable-gnutls enable gnutls [no]
|
||||
--enable-libaacplus enable AAC+ encoding via libaacplus [no]
|
||||
--enable-libass enable libass subtitles rendering [no]
|
||||
--enable-libbluray enable BluRay reading using libbluray [no]
|
||||
--enable-libcelt enable CELT decoding via libcelt [no]
|
||||
--enable-libopencore-amrnb enable AMR-NB de/encoding via libopencore-amrnb [no]
|
||||
--enable-libopencore-amrwb enable AMR-WB decoding via libopencore-amrwb [no]
|
||||
@@ -180,6 +178,7 @@ External library support:
|
||||
--enable-libcdio enable audio CD grabbing with libcdio
|
||||
--enable-libdc1394 enable IIDC-1394 grabbing using libdc1394
|
||||
and libraw1394 [no]
|
||||
--enable-libdirac enable Dirac support via libdirac [no]
|
||||
--enable-libfaac enable FAAC support via libfaac [no]
|
||||
--enable-libfreetype enable libfreetype [no]
|
||||
--enable-libgsm enable GSM support via libgsm [no]
|
||||
@@ -194,11 +193,11 @@ External library support:
|
||||
--enable-libspeex enable Speex support via libspeex [no]
|
||||
--enable-libstagefright-h264 enable H.264 decoding via libstagefright [no]
|
||||
--enable-libtheora enable Theora encoding via libtheora [no]
|
||||
--enable-libutvideo enable Ut Video encoding and decoding via libutvideo [no]
|
||||
--enable-libutvideo enable Ut Video decoding via libutvideo [no]
|
||||
--enable-libv4l2 enable libv4l2/v4l-utils [no]
|
||||
--enable-libvo-aacenc enable AAC encoding via libvo-aacenc [no]
|
||||
--enable-libvo-amrwbenc enable AMR-WB encoding via libvo-amrwbenc [no]
|
||||
--enable-libvorbis enable Vorbis en/decoding via libvorbis,
|
||||
--enable-libvorbis enable Vorbis encoding via libvorbis,
|
||||
native implementation exists [no]
|
||||
--enable-libvpx enable VP8 support via libvpx [no]
|
||||
--enable-libx264 enable H.264 encoding via x264 [no]
|
||||
@@ -206,6 +205,7 @@ External library support:
|
||||
--enable-libxvid enable Xvid encoding via xvidcore,
|
||||
native MPEG-4/Xvid encoder exists [no]
|
||||
--enable-openal enable OpenAL 1.1 capture support [no]
|
||||
--enable-mlib enable Sun medialib [no]
|
||||
--enable-openssl enable openssl [no]
|
||||
--enable-zlib enable zlib [autodetect]
|
||||
|
||||
@@ -233,24 +233,11 @@ Advanced options (experts only):
|
||||
--extra-ldflags=ELDFLAGS add ELDFLAGS to LDFLAGS [$LDFLAGS]
|
||||
--extra-libs=ELIBS add ELIBS [$ELIBS]
|
||||
--extra-version=STRING version string suffix []
|
||||
--optflags override optimization-related compiler flags
|
||||
--build-suffix=SUFFIX library name suffix []
|
||||
--malloc-prefix=PREFIX prefix malloc and related names with PREFIX
|
||||
--progs-suffix=SUFFIX program name suffix []
|
||||
--arch=ARCH select architecture [$arch]
|
||||
--cpu=CPU select the minimum required CPU (affects
|
||||
instruction selection, may crash on older CPUs)
|
||||
--enable-pic build position-independent code
|
||||
--enable-sram allow use of on-chip SRAM
|
||||
--disable-symver disable symbol versioning
|
||||
--disable-fastdiv disable table-based division
|
||||
--enable-hardcoded-tables use hardcoded tables instead of runtime generation
|
||||
--disable-safe-bitstream-reader
|
||||
disable buffer boundary checking in bitreaders
|
||||
(faster, but may crash)
|
||||
--enable-memalign-hack emulate memalign, interferes with memory debuggers
|
||||
|
||||
Optimization options (experts only):
|
||||
--disable-asm disable all assembler optimizations
|
||||
--disable-altivec disable AltiVec optimizations
|
||||
--disable-amd3dnow disable 3DNow! optimizations
|
||||
@@ -264,10 +251,16 @@ Optimization options (experts only):
|
||||
--disable-armv6 disable armv6 optimizations
|
||||
--disable-armv6t2 disable armv6t2 optimizations
|
||||
--disable-armvfp disable ARM VFP optimizations
|
||||
--disable-iwmmxt disable iwmmxt optimizations
|
||||
--disable-mmi disable MMI optimizations
|
||||
--disable-neon disable NEON optimizations
|
||||
--disable-vis disable VIS optimizations
|
||||
--disable-yasm disable use of yasm assembler
|
||||
--enable-pic build position-independent code
|
||||
--malloc-prefix=PFX prefix malloc and related names with PFX
|
||||
--enable-sram allow use of on-chip SRAM
|
||||
--disable-symver disable symbol versioning
|
||||
--optflags override optimization-related compiler flags
|
||||
--postproc-version=V build libpostproc version V.
|
||||
Where V can be '$ALT_PP_VER_MAJOR.$ALT_PP_VER_MINOR.$ALT_PP_VER_MICRO' or 'current'. [$postproc_version_default]
|
||||
|
||||
@@ -278,15 +271,11 @@ Developer options (useful when working on FFmpeg itself):
|
||||
--disable-optimizations disable compiler optimizations
|
||||
--enable-extra-warnings enable more compiler warnings
|
||||
--disable-stripping disable stripping of executables and shared libraries
|
||||
--assert-level=level 0(default), 1 or 2, amount of assertion testing,
|
||||
2 causes a slowdown at runtime.
|
||||
--valgrind=VALGRIND run "make fate" tests through valgrind to detect memory
|
||||
leaks and errors, using the specified valgrind binary.
|
||||
Cannot be combined with --target-exec
|
||||
--samples=PATH location of test samples for FATE, if not set use
|
||||
\$FATE_SAMPLES at make invocation time.
|
||||
--enable-xmm-clobber-test check XMM registers for clobbering (Win64-only;
|
||||
should be used only for debugging purposes)
|
||||
|
||||
NOTE: Object files are built at the place where configure is launched.
|
||||
EOF
|
||||
@@ -574,8 +563,7 @@ print_config_mak(){
|
||||
}
|
||||
|
||||
print_config_asm(){
|
||||
enabled $1 && v=1 || v=0
|
||||
echo "%define $2 $v"
|
||||
enabled $1 && echo "%define $2"
|
||||
}
|
||||
|
||||
print_config(){
|
||||
@@ -877,7 +865,6 @@ static void sighandler(int sig){
|
||||
int func(void){
|
||||
$code
|
||||
}
|
||||
int (*func_ptr)(void) = func;
|
||||
int main(void){
|
||||
signal(SIGILL, sighandler);
|
||||
signal(SIGFPE, sighandler);
|
||||
@@ -885,7 +872,7 @@ int main(void){
|
||||
#ifdef SIGBUS
|
||||
signal(SIGBUS, sighandler);
|
||||
#endif
|
||||
return func_ptr();
|
||||
return func();
|
||||
}
|
||||
EOF
|
||||
}
|
||||
@@ -1009,12 +996,15 @@ PROGRAM_LIST="
|
||||
CONFIG_LIST="
|
||||
$COMPONENT_LIST
|
||||
$PROGRAM_LIST
|
||||
avplay
|
||||
avprobe
|
||||
avserver
|
||||
aandct
|
||||
ac3dsp
|
||||
avcodec
|
||||
avdevice
|
||||
avfilter
|
||||
avformat
|
||||
avresample
|
||||
avisynth
|
||||
bzlib
|
||||
crystalhd
|
||||
@@ -1024,18 +1014,22 @@ CONFIG_LIST="
|
||||
dxva2
|
||||
fastdiv
|
||||
fft
|
||||
fontconfig
|
||||
frei0r
|
||||
gnutls
|
||||
golomb
|
||||
gpl
|
||||
gray
|
||||
h264chroma
|
||||
h264dsp
|
||||
h264pred
|
||||
hardcoded_tables
|
||||
huffman
|
||||
libaacplus
|
||||
libass
|
||||
libbluray
|
||||
libcdio
|
||||
libcelt
|
||||
libdc1394
|
||||
libdirac
|
||||
libfaac
|
||||
libfreetype
|
||||
libgsm
|
||||
@@ -1061,9 +1055,11 @@ CONFIG_LIST="
|
||||
libx264
|
||||
libxavs
|
||||
libxvid
|
||||
lpc
|
||||
lsp
|
||||
mdct
|
||||
memalign_hack
|
||||
mlib
|
||||
mpegaudiodsp
|
||||
network
|
||||
nonfree
|
||||
@@ -1088,7 +1084,6 @@ CONFIG_LIST="
|
||||
vda
|
||||
vdpau
|
||||
version3
|
||||
xmm_clobber_test
|
||||
x11grab
|
||||
zlib
|
||||
"
|
||||
@@ -1132,6 +1127,7 @@ ARCH_EXT_LIST='
|
||||
armv6t2
|
||||
armvfp
|
||||
avx
|
||||
iwmmxt
|
||||
mmi
|
||||
mmx
|
||||
mmx2
|
||||
@@ -1161,7 +1157,6 @@ HAVE_LIST="
|
||||
attribute_may_alias
|
||||
attribute_packed
|
||||
cbrtf
|
||||
clock_gettime
|
||||
closesocket
|
||||
cmov
|
||||
dcbzl
|
||||
@@ -1189,7 +1184,6 @@ HAVE_LIST="
|
||||
GetProcessMemoryInfo
|
||||
GetProcessTimes
|
||||
getrusage
|
||||
glob
|
||||
gnu_as
|
||||
ibm_asm
|
||||
inet_aton
|
||||
@@ -1197,8 +1191,6 @@ HAVE_LIST="
|
||||
isatty
|
||||
kbhit
|
||||
ldbrx
|
||||
libdc1394_1
|
||||
libdc1394_2
|
||||
llrint
|
||||
llrintf
|
||||
local_aligned_16
|
||||
@@ -1221,7 +1213,6 @@ HAVE_LIST="
|
||||
PeekNamedPipe
|
||||
poll_h
|
||||
posix_memalign
|
||||
pthread_cancel
|
||||
round
|
||||
roundf
|
||||
sched_getaffinity
|
||||
@@ -1237,7 +1228,6 @@ HAVE_LIST="
|
||||
struct_addrinfo
|
||||
struct_ipv6_mreq
|
||||
struct_rusage_ru_maxrss
|
||||
struct_sctp_event_subscribe
|
||||
struct_sockaddr_in6
|
||||
struct_sockaddr_sa_len
|
||||
struct_sockaddr_storage
|
||||
@@ -1267,17 +1257,9 @@ HAVE_LIST="
|
||||
|
||||
# options emitted with CONFIG_ prefix but not available on command line
|
||||
CONFIG_EXTRA="
|
||||
aandct
|
||||
avutil
|
||||
golomb
|
||||
gplv3
|
||||
h264chroma
|
||||
h264dsp
|
||||
h264pred
|
||||
h264qpel
|
||||
huffman
|
||||
lgplv3
|
||||
lpc
|
||||
"
|
||||
|
||||
CMDLINE_SELECT="
|
||||
@@ -1311,8 +1293,8 @@ CMDLINE_SET="
|
||||
ar
|
||||
arch
|
||||
as
|
||||
assert_level
|
||||
build_suffix
|
||||
progs_suffix
|
||||
cc
|
||||
cpu
|
||||
cross_prefix
|
||||
@@ -1331,8 +1313,6 @@ CMDLINE_SET="
|
||||
nm
|
||||
optflags
|
||||
pkg_config
|
||||
postproc_version
|
||||
progs_suffix
|
||||
samples
|
||||
strip
|
||||
sysinclude
|
||||
@@ -1340,6 +1320,7 @@ CMDLINE_SET="
|
||||
target_exec
|
||||
target_os
|
||||
target_path
|
||||
postproc_version
|
||||
valgrind
|
||||
yasmexe
|
||||
"
|
||||
@@ -1357,6 +1338,7 @@ armv5te_deps="arm"
|
||||
armv6_deps="arm"
|
||||
armv6t2_deps="arm"
|
||||
armvfp_deps="arm"
|
||||
iwmmxt_deps="arm"
|
||||
neon_deps="arm"
|
||||
vfpv3_deps="armvfp"
|
||||
|
||||
@@ -1434,7 +1416,7 @@ h263_vaapi_hwaccel_select="vaapi h263_decoder"
|
||||
h263i_decoder_select="h263_decoder"
|
||||
h263p_encoder_select="h263_encoder"
|
||||
h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
|
||||
h264_decoder_select="golomb h264chroma h264dsp h264pred h264qpel"
|
||||
h264_decoder_select="golomb h264chroma h264dsp h264pred"
|
||||
h264_dxva2_hwaccel_deps="dxva2api_h"
|
||||
h264_dxva2_hwaccel_select="dxva2 h264_decoder"
|
||||
h264_vaapi_hwaccel_select="vaapi h264_decoder"
|
||||
@@ -1491,13 +1473,12 @@ png_encoder_select="zlib"
|
||||
qcelp_decoder_select="lsp"
|
||||
qdm2_decoder_select="mdct rdft mpegaudiodsp"
|
||||
ra_144_encoder_select="lpc"
|
||||
ralf_decoder_select="golomb"
|
||||
rv10_decoder_select="h263_decoder"
|
||||
rv10_encoder_select="h263_encoder"
|
||||
rv20_decoder_select="h263_decoder"
|
||||
rv20_encoder_select="h263_encoder"
|
||||
rv30_decoder_select="golomb h264chroma h264pred h264qpel"
|
||||
rv40_decoder_select="golomb h264chroma h264pred h264qpel"
|
||||
rv30_decoder_select="golomb h264chroma h264pred"
|
||||
rv40_decoder_select="golomb h264chroma h264pred"
|
||||
shorten_decoder_select="golomb"
|
||||
sipr_decoder_select="lsp"
|
||||
snow_decoder_select="dwt"
|
||||
@@ -1506,7 +1487,7 @@ sonic_decoder_select="golomb"
|
||||
sonic_encoder_select="golomb"
|
||||
sonic_ls_encoder_select="golomb"
|
||||
svq1_encoder_select="aandct"
|
||||
svq3_decoder_select="golomb h264chroma h264dsp h264pred h264qpel"
|
||||
svq3_decoder_select="golomb h264chroma h264dsp h264pred"
|
||||
svq3_decoder_suggest="zlib"
|
||||
theora_decoder_select="vp3_decoder"
|
||||
tiff_decoder_suggest="zlib"
|
||||
@@ -1515,7 +1496,7 @@ truehd_decoder_select="mlp_decoder"
|
||||
tscc_decoder_select="zlib"
|
||||
twinvq_decoder_select="mdct lsp sinewin"
|
||||
vc1_crystalhd_decoder_select="crystalhd"
|
||||
vc1_decoder_select="h263_decoder h264chroma h264qpel"
|
||||
vc1_decoder_select="h263_decoder h264chroma"
|
||||
vc1_dxva2_hwaccel_deps="dxva2api_h"
|
||||
vc1_dxva2_hwaccel_select="dxva2 vc1_decoder"
|
||||
vc1_vaapi_hwaccel_select="vaapi vc1_decoder"
|
||||
@@ -1526,7 +1507,7 @@ vorbis_encoder_select="mdct"
|
||||
vp6_decoder_select="huffman"
|
||||
vp6a_decoder_select="vp6_decoder"
|
||||
vp6f_decoder_select="vp6_decoder"
|
||||
vp8_decoder_select="h264pred h264qpel"
|
||||
vp8_decoder_select="h264pred"
|
||||
wmapro_decoder_select="mdct sinewin"
|
||||
wmav1_decoder_select="mdct sinewin"
|
||||
wmav1_encoder_select="mdct sinewin"
|
||||
@@ -1543,7 +1524,6 @@ wmv3_dxva2_hwaccel_select="vc1_dxva2_hwaccel"
|
||||
wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel"
|
||||
wmv3_vdpau_decoder_select="vc1_vdpau_decoder"
|
||||
wmv3image_decoder_select="wmv3_decoder"
|
||||
zerocodec_decoder_select="zlib"
|
||||
zlib_decoder_select="zlib"
|
||||
zlib_encoder_select="zlib"
|
||||
zmbv_decoder_select="zlib"
|
||||
@@ -1555,11 +1535,13 @@ vda_deps="VideoDecodeAcceleration_VDADecoder_h pthreads"
|
||||
vdpau_deps="vdpau_vdpau_h vdpau_vdpau_x11_h"
|
||||
|
||||
# parsers
|
||||
h264_parser_select="golomb h264dsp h264pred"
|
||||
h264_parser_select="golomb h264chroma h264dsp h264pred"
|
||||
|
||||
# external libraries
|
||||
libaacplus_encoder_deps="libaacplus"
|
||||
libcelt_decoder_deps="libcelt"
|
||||
libdirac_decoder_deps="libdirac !libschroedinger"
|
||||
libdirac_encoder_deps="libdirac"
|
||||
libfaac_encoder_deps="libfaac"
|
||||
libgsm_decoder_deps="libgsm"
|
||||
libgsm_encoder_deps="libgsm"
|
||||
@@ -1580,7 +1562,6 @@ libstagefright_h264_decoder_deps="libstagefright_h264"
|
||||
libtheora_encoder_deps="libtheora"
|
||||
libvo_aacenc_encoder_deps="libvo_aacenc"
|
||||
libvo_amrwbenc_encoder_deps="libvo_amrwbenc"
|
||||
libvorbis_decoder_deps="libvorbis"
|
||||
libvorbis_encoder_deps="libvorbis"
|
||||
libvpx_decoder_deps="libvpx"
|
||||
libvpx_encoder_deps="libvpx"
|
||||
@@ -1588,8 +1569,7 @@ libx264_encoder_deps="libx264"
|
||||
libx264rgb_encoder_deps="libx264"
|
||||
libxavs_encoder_deps="libxavs"
|
||||
libxvid_encoder_deps="libxvid"
|
||||
libutvideo_decoder_deps="libutvideo"
|
||||
libutvideo_encoder_deps="libutvideo"
|
||||
libutvideo_decoder_deps="libutvideo gpl"
|
||||
|
||||
# demuxers / muxers
|
||||
ac3_demuxer_select="ac3_parser"
|
||||
@@ -1647,76 +1627,58 @@ v4l2_indev_deps_any="linux_videodev2_h sys_videoio_h"
|
||||
vfwcap_indev_deps="capCreateCaptureWindow vfwcap_defines"
|
||||
vfwcap_indev_extralibs="-lavicap32"
|
||||
x11_grab_device_indev_deps="x11grab XShmCreateImage"
|
||||
x11_grab_device_indev_extralibs="-lX11 -lXext -lXfixes"
|
||||
|
||||
# protocols
|
||||
bluray_protocol_deps="libbluray"
|
||||
gopher_protocol_deps="network"
|
||||
httpproxy_protocol_deps="network"
|
||||
httpproxy_protocol_select="tcp_protocol"
|
||||
http_protocol_deps="network"
|
||||
http_protocol_select="tcp_protocol"
|
||||
https_protocol_select="tls_protocol"
|
||||
librtmp_protocol_deps="librtmp"
|
||||
librtmpe_protocol_deps="librtmp"
|
||||
librtmps_protocol_deps="librtmp"
|
||||
librtmpt_protocol_deps="librtmp"
|
||||
librtmpte_protocol_deps="librtmp"
|
||||
mmsh_protocol_select="http_protocol"
|
||||
mmst_protocol_deps="network"
|
||||
rtmp_protocol_deps="!librtmp_protocol"
|
||||
rtmp_protocol_select="tcp_protocol"
|
||||
rtp_protocol_select="udp_protocol"
|
||||
sctp_protocol_deps="network struct_sctp_event_subscribe"
|
||||
tcp_protocol_deps="network"
|
||||
tls_protocol_deps_any="openssl gnutls"
|
||||
tls_protocol_select="tcp_protocol"
|
||||
udp_protocol_deps="network"
|
||||
|
||||
# filters
|
||||
aconvert_filter_deps="swresample"
|
||||
amovie_filter_deps="avcodec avformat"
|
||||
aresample_filter_deps="swresample"
|
||||
ass_filter_deps="libass"
|
||||
asyncts_filter_deps="avresample"
|
||||
blackframe_filter_deps="gpl"
|
||||
boxblur_filter_deps="gpl"
|
||||
colormatrix_filter_deps="gpl"
|
||||
cropdetect_filter_deps="gpl"
|
||||
delogo_filter_deps="gpl"
|
||||
deshake_filter_deps="avcodec"
|
||||
drawtext_filter_deps="libfreetype"
|
||||
frei0r_filter_deps="frei0r dlopen"
|
||||
frei0r_filter_extralibs='$ldl'
|
||||
frei0r_src_filter_deps="frei0r dlopen"
|
||||
frei0r_src_filter_extralibs='$ldl'
|
||||
hqdn3d_filter_deps="gpl"
|
||||
movie_filter_deps="avcodec avformat"
|
||||
mp_filter_deps="gpl avcodec swscale postproc"
|
||||
mp_filter_deps="gpl avcodec"
|
||||
mptestsrc_filter_deps="gpl"
|
||||
negate_filter_deps="lut_filter"
|
||||
resample_filter_deps="avresample"
|
||||
ocv_filter_deps="libopencv"
|
||||
pan_filter_deps="swresample"
|
||||
removelogo_filter_deps="avcodec avformat swscale"
|
||||
scale_filter_deps="swscale"
|
||||
super2xsai_filter_deps="gpl"
|
||||
tinterlace_filter_deps="gpl"
|
||||
yadif_filter_deps="gpl"
|
||||
|
||||
# libraries
|
||||
avdevice_deps="avcodec avformat"
|
||||
avfilter_deps="swscale"
|
||||
avformat_deps="avcodec"
|
||||
postproc_deps="gpl"
|
||||
|
||||
# programs
|
||||
ffplay_deps="avcodec avformat swscale swresample sdl"
|
||||
ffplay_deps="avcodec avformat swscale sdl"
|
||||
ffplay_select="buffersink_filter rdft"
|
||||
ffprobe_deps="avcodec avformat"
|
||||
ffserver_deps="avformat ffm_muxer fork rtp_protocol rtsp_demuxer"
|
||||
ffserver_extralibs='$ldl'
|
||||
ffmpeg_deps="avcodec avfilter avformat swscale swresample"
|
||||
ffmpeg_select="buffersink_filter format_filter aformat_filter"
|
||||
ffmpeg_deps="avcodec avformat swscale swresample"
|
||||
ffmpeg_select="buffersink_filter"
|
||||
|
||||
doc_deps="texi2html"
|
||||
|
||||
@@ -1800,11 +1762,11 @@ test_deps _muxer _demuxer \
|
||||
gxf \
|
||||
matroska=mkv \
|
||||
mmf \
|
||||
mov="mov ismv" \
|
||||
mov \
|
||||
pcm_mulaw=mulaw \
|
||||
mxf="mxf mxf_d10" \
|
||||
nut \
|
||||
ogg="ogg ogg_vp3" \
|
||||
ogg \
|
||||
rawvideo=pixfmt \
|
||||
rm \
|
||||
swf \
|
||||
@@ -1813,20 +1775,8 @@ test_deps _muxer _demuxer \
|
||||
wav \
|
||||
yuv4mpegpipe=yuv4mpeg \
|
||||
|
||||
ac3_fixed_test_deps="ac3_fixed_encoder ac3_decoder"
|
||||
colormatrix1_test_deps="colormatrix_filter"
|
||||
colormatrix2_test_deps="colormatrix_filter"
|
||||
flashsv2_test_deps="zlib"
|
||||
ac3_fixed_test_deps="ac3_fixed_encoder ac3_decoder rm_muxer rm_demuxer"
|
||||
mpg_test_deps="mpeg1system_muxer mpegps_demuxer"
|
||||
mpng_test_deps="zlib"
|
||||
pp_test_deps="mp_filter"
|
||||
pp2_test_deps="mp_filter"
|
||||
pp3_test_deps="mp_filter"
|
||||
pp4_test_deps="mp_filter"
|
||||
pp5_test_deps="mp_filter"
|
||||
pp6_test_deps="mp_filter"
|
||||
zlib_test_deps="zlib"
|
||||
zmbv_test_deps="zlib"
|
||||
|
||||
# default parameters
|
||||
|
||||
@@ -1984,12 +1934,11 @@ find_tests(){
|
||||
|
||||
ACODEC_TESTS=$(find_tests acodec)
|
||||
VCODEC_TESTS=$(find_tests vsynth1)
|
||||
LAVF_FATE_TESTS=$(find_tests lavf-fate)
|
||||
LAVF_TESTS=$(find_tests lavf)
|
||||
LAVFI_TESTS=$(find_tests lavfi)
|
||||
SEEK_TESTS=$(find_tests seek seek_)
|
||||
|
||||
ALL_TESTS="$ACODEC_TESTS $VCODEC_TESTS $LAVF_FATE_TESTS $LAVF_TESTS $LAVFI_TESTS $SEEK_TESTS"
|
||||
ALL_TESTS="$ACODEC_TESTS $VCODEC_TESTS $LAVF_TESTS $LAVFI_TESTS $SEEK_TESTS"
|
||||
|
||||
pcm_test_deps=$(map 'echo ${v%_*}_decoder $v' $(filter pcm_* $ENCODER_LIST))
|
||||
|
||||
@@ -2352,24 +2301,6 @@ elif $cc -v 2>&1 | grep -q Open64; then
|
||||
speed_cflags='-O2'
|
||||
size_cflags='-Os'
|
||||
filter_cflags='filter_out -Wdisabled-optimization|-Wtype-limits|-fno-signed-zeros'
|
||||
elif $cc -V 2>&1 | grep -q Portland; then
|
||||
cc_type=pgi
|
||||
cc_version='AV_STRINGIFY(__PGIC__.__PGIC_MINOR__.__PGIC_PATCHLEVEL__)'
|
||||
cc_ident="PGI $($cc -V 2>&1 | awk '/^pgcc/ { print $2; exit }')"
|
||||
opt_common='-alias=ansi -Mlre -Mpre'
|
||||
speed_cflags="-O3 -Mautoinline -Munroll=c:4 $opt_common"
|
||||
size_cflags="-O2 -Munroll=c:1 $opt_common"
|
||||
noopt_cflags="-O1"
|
||||
filter_cflags=pgi_flags
|
||||
pgi_flags(){
|
||||
for flag; do
|
||||
case $flag in
|
||||
-fomit-frame-pointer) echo -Mnoframe ;;
|
||||
-g) echo -gopt ;;
|
||||
*) echo $flag ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
fi
|
||||
|
||||
test -n "$cc_type" && enable $cc_type ||
|
||||
@@ -2489,12 +2420,9 @@ elif enabled ppc; then
|
||||
74*|ppc74*|powerpc74*)
|
||||
cpuflags="-mcpu=7400 -mpowerpc-gfxopt"
|
||||
;;
|
||||
g5|970|ppc970|powerpc970)
|
||||
g5|970|ppc970|powerpc970|power4*)
|
||||
cpuflags="-mcpu=970 -mpowerpc-gfxopt -mpowerpc64"
|
||||
;;
|
||||
power[3-7]*)
|
||||
cpuflags="-mcpu=$cpu -mpowerpc-gfxopt -mpowerpc64"
|
||||
;;
|
||||
cell)
|
||||
cpuflags="-mcpu=cell"
|
||||
enable ldbrx
|
||||
@@ -2666,7 +2594,7 @@ case $target_os in
|
||||
SHFLAGS='-shared -Wl,-h,$$(@F)'
|
||||
enabled x86 && SHFLAGS="-mimpure-text $SHFLAGS"
|
||||
network_extralibs="-lsocket -lnsl"
|
||||
add_cppflags -D__EXTENSIONS__ -D_XOPEN_SOURCE=600
|
||||
add_cppflags -D__EXTENSIONS__
|
||||
# When using suncc to build, the Solaris linker will mark
|
||||
# an executable with each instruction set encountered by
|
||||
# the Solaris assembler. As our libraries contain their own
|
||||
@@ -2681,6 +2609,7 @@ case $target_os in
|
||||
oss_outdev_extralibs="-lossaudio"
|
||||
;;
|
||||
openbsd)
|
||||
enable malloc_aligned
|
||||
# On OpenBSD 4.5. the compiler does not use PIC unless
|
||||
# explicitly using -fPIC. FFmpeg builds fine without PIC,
|
||||
# however the generated executable will not do anything
|
||||
@@ -2693,18 +2622,21 @@ case $target_os in
|
||||
oss_outdev_extralibs="-lossaudio"
|
||||
;;
|
||||
dragonfly)
|
||||
enable malloc_aligned
|
||||
disable symver
|
||||
;;
|
||||
freebsd)
|
||||
enable malloc_aligned
|
||||
;;
|
||||
bsd/os)
|
||||
add_extralibs -lpoll -lgnugetopt
|
||||
strip="strip -d"
|
||||
;;
|
||||
darwin)
|
||||
enable malloc_aligned
|
||||
gas="gas-preprocessor.pl $cc"
|
||||
enabled ppc && add_asflags -force_cpusubtype_ALL
|
||||
SHFLAGS='-dynamiclib -Wl,-single_module -Wl,-install_name,$(SHLIBDIR)/$(SLIBNAME_WITH_MAJOR),-current_version,$(LIBVERSION),-compatibility_version,$(LIBMAJOR)'
|
||||
SHFLAGS='-dynamiclib -Wl,-single_module -Wl,-install_name,$(SHLIBDIR)/$(SLIBNAME),-current_version,$(LIBVERSION),-compatibility_version,$(LIBMAJOR)'
|
||||
enabled x86_32 && append SHFLAGS -Wl,-read_only_relocs,suppress
|
||||
strip="${strip} -x"
|
||||
add_ldflags -Wl,-dynamic,-search_paths_first
|
||||
@@ -2725,6 +2657,7 @@ case $target_os in
|
||||
fi
|
||||
LIBTARGET=i386
|
||||
if enabled x86_64; then
|
||||
enable malloc_aligned
|
||||
LIBTARGET=x64
|
||||
elif enabled arm; then
|
||||
LIBTARGET=arm-wince
|
||||
@@ -2829,11 +2762,7 @@ case $target_os in
|
||||
;;
|
||||
esac
|
||||
|
||||
esc(){
|
||||
echo "$*" | sed 's/%/%25/g;s/:/%3a/g'
|
||||
}
|
||||
|
||||
echo "config:$arch:$subarch:$cpu:$target_os:$(esc $cc_ident):$(esc $FFMPEG_CONFIGURATION)" >config.fate
|
||||
echo "config:$arch:$subarch:$cpu:$target_os:$cc_ident:$FFMPEG_CONFIGURATION" >config.fate
|
||||
|
||||
check_cpp_condition stdlib.h "defined(__PIC__) || defined(__pic__) || defined(PIC)" && enable pic
|
||||
|
||||
@@ -2854,12 +2783,7 @@ die_license_disabled() {
|
||||
enabled $1 || { enabled $2 && die "$2 is $1 and --enable-$1 is not specified."; }
|
||||
}
|
||||
|
||||
die_license_disabled_gpl() {
|
||||
enabled $1 || { enabled $2 && die "$2 is incompatible with the gpl and --enable-$1 is not specified."; }
|
||||
}
|
||||
|
||||
die_license_disabled gpl libcdio
|
||||
die_license_disabled gpl libutvideo
|
||||
die_license_disabled gpl libx264
|
||||
die_license_disabled gpl libxavs
|
||||
die_license_disabled gpl libxvid
|
||||
@@ -2867,7 +2791,7 @@ die_license_disabled gpl x11grab
|
||||
|
||||
die_license_disabled nonfree libaacplus
|
||||
die_license_disabled nonfree libfaac
|
||||
enabled gpl && die_license_disabled_gpl nonfree openssl
|
||||
die_license_disabled nonfree openssl
|
||||
|
||||
die_license_disabled version3 libopencore_amrnb
|
||||
die_license_disabled version3 libopencore_amrwb
|
||||
@@ -2943,6 +2867,7 @@ EOF
|
||||
enabled armv6 && check_asm armv6 '"sadd16 r0, r0, r0"'
|
||||
enabled armv6t2 && check_asm armv6t2 '"movt r0, #0"'
|
||||
enabled armvfp && check_asm armvfp '"fadds s0, s0, s0"'
|
||||
enabled iwmmxt && check_asm iwmmxt '"wunpckelub wr6, wr4"'
|
||||
enabled neon && check_asm neon '"vadd.i16 q0, q0, q0"'
|
||||
enabled vfpv3 && check_asm vfpv3 '"vmov.f32 s0, #1.0"'
|
||||
|
||||
@@ -3071,7 +2996,6 @@ if enabled network; then
|
||||
check_type netinet/in.h "struct sockaddr_in6"
|
||||
check_type "sys/types.h sys/socket.h" "struct sockaddr_storage"
|
||||
check_struct "sys/types.h sys/socket.h" "struct sockaddr" sa_len
|
||||
check_type netinet/sctp.h "struct sctp_event_subscribe"
|
||||
# Prefer arpa/inet.h over winsock2
|
||||
if check_header arpa/inet.h ; then
|
||||
check_func closesocket
|
||||
@@ -3094,7 +3018,6 @@ fi
|
||||
# Solaris has nanosleep in -lrt, OpenSolaris no longer needs that
|
||||
check_func nanosleep || { check_func nanosleep -lrt && add_extralibs -lrt; }
|
||||
|
||||
check_func clock_gettime || { check_func clock_gettime -lrt && add_extralibs -lrt; }
|
||||
check_func fcntl
|
||||
check_func fork
|
||||
check_func getaddrinfo $network_extralibs
|
||||
@@ -3123,7 +3046,6 @@ check_func_headers windows.h GetProcessAffinityMask
|
||||
check_func_headers windows.h GetProcessTimes
|
||||
check_func_headers windows.h MapViewOfFile
|
||||
check_func_headers windows.h VirtualAlloc
|
||||
check_func_headers glob.h glob
|
||||
|
||||
check_header dlfcn.h
|
||||
check_header dxva.h
|
||||
@@ -3183,10 +3105,6 @@ for thread in $THREADS_LIST; do
|
||||
fi
|
||||
done
|
||||
|
||||
if enabled pthreads; then
|
||||
check_func pthread_cancel
|
||||
fi
|
||||
|
||||
check_lib math.h sin -lm && LIBM="-lm"
|
||||
disabled crystalhd || check_lib libcrystalhd/libcrystalhd_if.h DtsCrystalHDVersion -lcrystalhd || disable crystalhd
|
||||
enabled vaapi && require vaapi va/va.h vaInitialize -lva
|
||||
@@ -3207,15 +3125,17 @@ check_mathfunc truncf
|
||||
|
||||
# these are off by default, so fail if requested and not available
|
||||
enabled avisynth && require2 vfw32 "windows.h vfw.h" AVIFileInit -lavifil32
|
||||
enabled fontconfig && require_pkg_config fontconfig "fontconfig/fontconfig.h" FcInit
|
||||
enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not found"; }
|
||||
enabled gnutls && require_pkg_config gnutls gnutls/gnutls.h gnutls_global_init
|
||||
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
||||
enabled libass && require_pkg_config libass ass/ass.h ass_library_init
|
||||
enabled libbluray && require_pkg_config libbluray libbluray/bluray.h bd_open
|
||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
|
||||
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
|
||||
die "ERROR: libcelt version must be >= 0.11.0."; }
|
||||
enabled libdc1394 && require_pkg_config libdc1394-2 dc1394/dc1394.h dc1394_new
|
||||
enabled libdirac && require_pkg_config dirac \
|
||||
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \
|
||||
"dirac_decoder_init dirac_encoder_init"
|
||||
enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaac
|
||||
enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType
|
||||
enabled libgsm && require libgsm gsm/gsm.h gsm_create -lgsm
|
||||
@@ -3232,7 +3152,7 @@ enabled libschroedinger && require_pkg_config schroedinger-1.0 schroedinger/schr
|
||||
enabled libspeex && require libspeex speex/speex.h speex_decoder_init -lspeex
|
||||
enabled libstagefright_h264 && require_cpp libstagefright_h264 "binder/ProcessState.h media/stagefright/MetaData.h
|
||||
media/stagefright/MediaBufferGroup.h media/stagefright/MediaDebug.h media/stagefright/MediaDefs.h
|
||||
media/stagefright/OMXClient.h media/stagefright/OMXCodec.h" android::OMXClient -lstagefright -lmedia -lutils -lbinder -lgnustl_static
|
||||
media/stagefright/OMXClient.h media/stagefright/OMXCodec.h" android::OMXClient -lstagefright -lmedia -lutils -lbinder
|
||||
enabled libtheora && require libtheora theora/theoraenc.h th_info_init -ltheoraenc -ltheoradec -logg
|
||||
enabled libutvideo && require_cpp utvideo "stdint.h stdlib.h utvideo/utvideo.h utvideo/Codec.h" 'CCodec*' -lutvideo -lstdc++
|
||||
enabled libv4l2 && require_pkg_config libv4l2 libv4l2.h v4l2_ioctl
|
||||
@@ -3254,22 +3174,14 @@ enabled openal && { { for al_libs in "${OPENAL_LIBS}" "-lopenal" "-lOpenAL32
|
||||
die "ERROR: openal not found"; } &&
|
||||
{ check_cpp_condition "AL/al.h" "defined(AL_VERSION_1_1)" ||
|
||||
die "ERROR: openal version must be 1.1 or compatible"; }
|
||||
enabled mlib && require mediaLib mlib_types.h mlib_VectorSub_S16_U8_Mod -lmlib
|
||||
enabled openssl && { check_lib openssl/ssl.h SSL_library_init -lssl -lcrypto ||
|
||||
check_lib openssl/ssl.h SSL_library_init -lssl32 -leay32 ||
|
||||
check_lib openssl/ssl.h SSL_library_init -lssl -lcrypto -lws2_32 -lgdi32 ||
|
||||
die "ERROR: openssl not found"; }
|
||||
|
||||
# libdc1394 check
|
||||
if enabled libdc1394; then
|
||||
{ check_lib dc1394/dc1394.h dc1394_new -ldc1394 -lraw1394 &&
|
||||
enable libdc1394_2; } ||
|
||||
{ check_lib libdc1394/dc1394_control.h dc1394_create_handle -ldc1394_control -lraw1394 &&
|
||||
enable libdc1394_1; } ||
|
||||
die "ERROR: No version of libdc1394 found "
|
||||
fi
|
||||
|
||||
SDL_CONFIG="${cross_prefix}sdl-config"
|
||||
if check_pkg_config sdl SDL_events.h SDL_PollEvent; then
|
||||
if check_pkg_config sdl SDL_version.h SDL_Linked_Version; then
|
||||
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags &&
|
||||
enable sdl &&
|
||||
check_struct SDL.h SDL_VideoInfo current_w $sdl_cflags && enable sdl_video_size
|
||||
@@ -3291,14 +3203,7 @@ makeinfo --version > /dev/null 2>&1 && enable makeinfo || disable makeinfo
|
||||
check_header linux/fb.h
|
||||
check_header linux/videodev.h
|
||||
check_header linux/videodev2.h
|
||||
check_cc <<EOF && enable_safe struct_v4l2_frmivalenum_discrete
|
||||
#include <linux/videodev2.h>
|
||||
int main(void) {
|
||||
struct v4l2_frmsizeenum vfse;
|
||||
vfse.discrete.width = 0;
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
check_struct linux/videodev2.h "struct v4l2_frmivalenum" discrete
|
||||
|
||||
check_header sys/videoio.h
|
||||
|
||||
@@ -3338,10 +3243,13 @@ enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio
|
||||
enabled libcdio &&
|
||||
check_lib2 "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open "-lcdio_paranoia -lcdio_cdda -lcdio"
|
||||
|
||||
enabled x11grab &&
|
||||
require X11 X11/Xlib.h XOpenDisplay -lX11 &&
|
||||
require Xext X11/extensions/XShm.h XShmCreateImage -lXext &&
|
||||
require Xfixes X11/extensions/Xfixes.h XFixesGetCursorImage -lXfixes
|
||||
enabled x11grab &&
|
||||
check_header X11/Xlib.h &&
|
||||
check_header X11/extensions/XShm.h &&
|
||||
check_header X11/extensions/Xfixes.h &&
|
||||
check_func XOpenDisplay -lX11 &&
|
||||
check_func XShmCreateImage -lX11 -lXext &&
|
||||
check_func XFixesGetCursorImage -lX11 -lXext -lXfixes
|
||||
|
||||
if ! disabled vaapi; then
|
||||
check_lib va/va.h vaInitialize -lva && {
|
||||
@@ -3371,6 +3279,7 @@ check_cflags -Wdisabled-optimization
|
||||
check_cflags -Wpointer-arith
|
||||
check_cflags -Wredundant-decls
|
||||
check_cflags -Wno-pointer-sign
|
||||
check_cflags -Wcast-qual
|
||||
check_cflags -Wwrite-strings
|
||||
check_cflags -Wtype-limits
|
||||
check_cflags -Wundef
|
||||
@@ -3381,20 +3290,9 @@ enabled extra_warnings && check_cflags -Winline
|
||||
|
||||
# add some linker flags
|
||||
check_ldflags -Wl,--warn-common
|
||||
check_ldflags -Wl,-rpath-link=libpostproc:libswresample:libswscale:libavfilter:libavdevice:libavformat:libavcodec:libavutil:libavresample
|
||||
check_ldflags -Wl,-rpath-link=libpostproc:libswresample:libswscale:libavfilter:libavdevice:libavformat:libavcodec:libavutil
|
||||
test_ldflags -Wl,-Bsymbolic && append SHFLAGS -Wl,-Bsymbolic
|
||||
|
||||
enabled xmm_clobber_test && \
|
||||
check_ldflags -Wl,--wrap,avcodec_open2 \
|
||||
-Wl,--wrap,avcodec_decode_audio4 \
|
||||
-Wl,--wrap,avcodec_decode_video2 \
|
||||
-Wl,--wrap,avcodec_decode_subtitle2 \
|
||||
-Wl,--wrap,avcodec_encode_audio2 \
|
||||
-Wl,--wrap,avcodec_encode_video \
|
||||
-Wl,--wrap,avcodec_encode_subtitle \
|
||||
-Wl,--wrap,sws_scale || \
|
||||
disable xmm_clobber_test
|
||||
|
||||
echo "X{};" > $TMPV
|
||||
if test_ldflags -Wl,--version-script,$TMPV; then
|
||||
append SHFLAGS '-Wl,--version-script,\$(SUBDIR)lib\$(NAME).ver'
|
||||
@@ -3430,11 +3328,10 @@ if enabled icc; then
|
||||
# -wd: Disable following warnings
|
||||
# 144, 167, 556: -Wno-pointer-sign
|
||||
# 1292: attribute "foo" ignored
|
||||
# 1419: external declaration in primary source file
|
||||
# 10006: ignoring unknown option -fno-signed-zeros
|
||||
# 10148: ignoring unknown option -Wno-parentheses
|
||||
# 10156: ignoring option '-W'; no argument required
|
||||
check_cflags -wd144,167,556,1292,1419,10006,10148,10156
|
||||
check_cflags -wd144,167,556,1292,10006,10148,10156
|
||||
# 11030: Warning unknown option --as-needed
|
||||
# 10156: ignoring option '-export'; no argument required
|
||||
check_ldflags -wd10156,11030
|
||||
@@ -3459,13 +3356,11 @@ elif enabled gcc; then
|
||||
check_cflags -fno-tree-vectorize
|
||||
check_cflags -Werror=implicit-function-declaration
|
||||
check_cflags -Werror=missing-prototypes
|
||||
check_cflags -Werror=return-type
|
||||
elif enabled llvm_gcc; then
|
||||
check_cflags -mllvm -stack-alignment=16
|
||||
elif enabled clang; then
|
||||
check_cflags -mllvm -stack-alignment=16
|
||||
check_cflags -Qunused-arguments
|
||||
check_cflags -Werror=return-type
|
||||
elif enabled armcc; then
|
||||
# 2523: use of inline assembler is deprecated
|
||||
add_cflags -W${armcc_opt},--diag_suppress=2523
|
||||
@@ -3495,36 +3390,9 @@ if test $target_os = "haiku"; then
|
||||
disable posix_memalign
|
||||
fi
|
||||
|
||||
! enabled_any memalign posix_memalign &&
|
||||
! enabled_any memalign posix_memalign malloc_aligned &&
|
||||
enabled_any $need_memalign && enable memalign_hack
|
||||
|
||||
# add_dep lib dep
|
||||
# -> enable ${lib}_deps_${dep}
|
||||
# -> add $dep to ${lib}_deps only once
|
||||
add_dep() {
|
||||
lib=$1
|
||||
dep=$2
|
||||
enabled "${lib}_deps_${dep}" && return 0
|
||||
enable "${lib}_deps_${dep}"
|
||||
prepend "${lib}_deps" $dep
|
||||
}
|
||||
|
||||
# merge deps lib components
|
||||
# merge all ${component}_deps into ${lib}_deps and ${lib}_deps_*
|
||||
merge_deps() {
|
||||
lib=$1
|
||||
shift
|
||||
for comp in $*; do
|
||||
enabled $comp || continue
|
||||
eval "dep=\"\$${comp}_deps\""
|
||||
for d in $dep; do
|
||||
add_dep $lib $d
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
merge_deps libavfilter $FILTER_LIST
|
||||
|
||||
echo "install prefix $prefix"
|
||||
echo "source path $source_path"
|
||||
echo "C compiler $cc"
|
||||
@@ -3559,6 +3427,7 @@ if enabled arm; then
|
||||
echo "ARMv6 enabled ${armv6-no}"
|
||||
echo "ARMv6T2 enabled ${armv6t2-no}"
|
||||
echo "ARM VFP enabled ${armvfp-no}"
|
||||
echo "IWMMXT enabled ${iwmmxt-no}"
|
||||
echo "NEON enabled ${neon-no}"
|
||||
fi
|
||||
if enabled mips; then
|
||||
@@ -3584,6 +3453,7 @@ echo "network support ${network-no}"
|
||||
echo "threading support ${thread_type-no}"
|
||||
echo "safe bitstream reader ${safe_bitstream_reader-no}"
|
||||
echo "SDL support ${sdl-no}"
|
||||
echo "Sun medialib support ${mlib-no}"
|
||||
echo "libdxva2 enabled ${dxva2-no}"
|
||||
echo "libva enabled ${vaapi-no}"
|
||||
echo "libvdpau enabled ${vdpau-no}"
|
||||
@@ -3595,6 +3465,7 @@ echo "libass enabled ${libass-no}"
|
||||
echo "libcdio support ${libcdio-no}"
|
||||
echo "libcelt enabled ${libcelt-no}"
|
||||
echo "libdc1394 support ${libdc1394-no}"
|
||||
echo "libdirac enabled ${libdirac-no}"
|
||||
echo "libfaac enabled ${libfaac-no}"
|
||||
echo "libgsm enabled ${libgsm-no}"
|
||||
echo "libmodplug enabled ${libmodplug-no}"
|
||||
@@ -3753,7 +3624,6 @@ get_version LIBAVCODEC libavcodec/version.h
|
||||
get_version LIBAVDEVICE libavdevice/avdevice.h
|
||||
get_version LIBAVFILTER libavfilter/version.h
|
||||
get_version LIBAVFORMAT libavformat/version.h
|
||||
get_version LIBAVRESAMPLE libavresample/version.h
|
||||
get_version LIBAVUTIL libavutil/avutil.h
|
||||
get_version LIBPOSTPROC libpostproc/postprocess.h
|
||||
get_version LIBSWRESAMPLE libswresample/swresample.h
|
||||
@@ -3775,9 +3645,6 @@ cat > $TMPH <<EOF
|
||||
#define SLIBSUF "$SLIBSUF"
|
||||
EOF
|
||||
|
||||
test -n "$assert_level" &&
|
||||
echo "#define ASSERT_LEVEL $assert_level" >>$TMPH
|
||||
|
||||
test -n "$malloc_prefix" &&
|
||||
echo "#define MALLOC_PREFIX $malloc_prefix" >>$TMPH
|
||||
|
||||
@@ -3804,7 +3671,6 @@ print_config CONFIG_ "$config_files" $CONFIG_LIST \
|
||||
cat >>config.mak <<EOF
|
||||
ACODEC_TESTS=$(print_enabled -n _test $ACODEC_TESTS)
|
||||
VCODEC_TESTS=$(print_enabled -n _test $VCODEC_TESTS)
|
||||
LAVF_FATE_TESTS=$(print_enabled -n _test $LAVF_FATE_TESTS)
|
||||
LAVF_TESTS=$(print_enabled -n _test $LAVF_TESTS)
|
||||
LAVFI_TESTS=$(print_enabled -n _test $LAVFI_TESTS)
|
||||
SEEK_TESTS=$(print_enabled -n _test $SEEK_TESTS)
|
||||
@@ -3882,23 +3748,11 @@ Cflags: -I\${includedir}
|
||||
EOF
|
||||
}
|
||||
|
||||
libavfilter_pc_deps=""
|
||||
enabled libavfilter_deps_avcodec && prepend libavfilter_pc_deps "libavcodec = $LIBAVCODEC_VERSION,"
|
||||
enabled libavfilter_deps_avformat && prepend libavfilter_pc_deps "libavformat = $LIBAVFORMAT_VERSION,"
|
||||
enabled libavfilter_deps_swscale && prepend libavfilter_pc_deps "libswscale = $LIBSWSCALE_VERSION,"
|
||||
enabled libavfilter_deps_swresample && prepend libavfilter_pc_deps "libswresample = $LIBSWRESAMPLE_VERSION,"
|
||||
enabled libavfilter_deps_postproc && prepend libavfilter_pc_deps "libpostproc = $LIBPOSTPROC_VERSION,"
|
||||
libavfilter_pc_deps=${libavfilter_pc_deps%, }
|
||||
|
||||
libavdevice_pc_deps="libavformat = $LIBAVFORMAT_VERSION"
|
||||
enabled lavfi_indev && prepend libavdevice_pc_deps "libavfilter = $LIBAVFILTER_VERSION,"
|
||||
|
||||
pkgconfig_generate libavutil "FFmpeg utility library" "$LIBAVUTIL_VERSION" "$LIBM"
|
||||
pkgconfig_generate libavcodec "FFmpeg codec library" "$LIBAVCODEC_VERSION" "$extralibs" "libavutil = $LIBAVUTIL_VERSION"
|
||||
pkgconfig_generate libavformat "FFmpeg container format library" "$LIBAVFORMAT_VERSION" "$extralibs" "libavcodec = $LIBAVCODEC_VERSION"
|
||||
pkgconfig_generate libavdevice "FFmpeg device handling library" "$LIBAVDEVICE_VERSION" "$extralibs" "$libavdevice_pc_deps"
|
||||
pkgconfig_generate libavfilter "FFmpeg video filtering library" "$LIBAVFILTER_VERSION" "$extralibs" "$libavfilter_pc_deps"
|
||||
pkgconfig_generate libavdevice "FFmpeg device handling library" "$LIBAVDEVICE_VERSION" "$extralibs" "libavformat = $LIBAVFORMAT_VERSION"
|
||||
pkgconfig_generate libavfilter "FFmpeg video filtering library" "$LIBAVFILTER_VERSION" "$extralibs"
|
||||
pkgconfig_generate libpostproc "FFmpeg postprocessing library" "$LIBPOSTPROC_VERSION" "" "libavutil = $LIBAVUTIL_VERSION"
|
||||
pkgconfig_generate libavresample "Libav audio resampling library" "$LIBAVRESAMPLE_VERSION" "$extralibs"
|
||||
pkgconfig_generate libswscale "FFmpeg image rescaling library" "$LIBSWSCALE_VERSION" "$LIBM" "libavutil = $LIBAVUTIL_VERSION"
|
||||
pkgconfig_generate libswresample "FFmpeg audio rescaling library" "$LIBSWRESAMPLE_VERSION" "$LIBM" "libavutil = $LIBAVUTIL_VERSION"
|
||||
|
684
doc/APIchanges
684
doc/APIchanges
File diff suppressed because it is too large
Load Diff
25
doc/Makefile
25
doc/Makefile
@@ -8,7 +8,6 @@ HTMLPAGES = $(PROGS-yes:%=doc/%.html) \
|
||||
doc/git-howto.html \
|
||||
doc/libavfilter.html \
|
||||
doc/platform.html \
|
||||
doc/syntax.html \
|
||||
|
||||
TXTPAGES = doc/fate.txt \
|
||||
|
||||
@@ -29,30 +28,22 @@ doc/%.txt: doc/%.texi
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null
|
||||
|
||||
doc/print_options.o: libavformat/options_table.h libavcodec/options_table.h
|
||||
|
||||
GENTEXI = format codec
|
||||
GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi)
|
||||
|
||||
$(GENTEXI): TAG = GENTEXI
|
||||
$(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
|
||||
$(M)doc/print_options $* > $@
|
||||
|
||||
doc/%.html: TAG = HTML
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
$(M)texi2html -monolithic --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
|
||||
doc/%.pod: TAG = POD
|
||||
doc/%.pod: doc/%.texi $(GENTEXI)
|
||||
doc/%.pod: doc/%.texi
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)$(SRC_PATH)/doc/texi2pod.pl -Idoc $< $@
|
||||
$(M)$(SRC_PATH)/doc/texi2pod.pl $< $@
|
||||
|
||||
doc/%.1: TAG = MAN
|
||||
doc/%.1: doc/%.pod $(GENTEXI)
|
||||
doc/%.1: doc/%.pod
|
||||
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
|
||||
|
||||
$(DOCS): | doc/
|
||||
$(DOCS): | doc
|
||||
OBJDIRS += doc
|
||||
|
||||
install-progs-$(CONFIG_DOC): install-man
|
||||
|
||||
@@ -66,7 +57,7 @@ uninstall-man:
|
||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(ALLMANPAGES))
|
||||
|
||||
clean::
|
||||
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 $(CLEANSUFFIXES:%=doc/%) doc/avoptions_*.texi
|
||||
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 $(CLEANSUFFIXES:%=doc/%)
|
||||
|
||||
-include $(wildcard $(DOCS:%=%.d))
|
||||
|
||||
|
@@ -1,11 +1,13 @@
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 0.11 "Happiness" May, 2012
|
||||
* 0.10 "Freedom" January, 2012
|
||||
|
||||
|
||||
General notes
|
||||
-------------
|
||||
This release is binary compatible with 0.8 and 0.9.
|
||||
|
||||
See the Changelog file for a list of significant changes. Note, there
|
||||
are many more new features and bugfixes than whats listed there.
|
||||
|
||||
@@ -14,3 +16,34 @@ accepted. If you are experiencing issues with any formally released version of
|
||||
FFmpeg, please try git master to check if the issue still exists. If it does,
|
||||
make your report against the development code following the usual bug reporting
|
||||
guidelines.
|
||||
|
||||
|
||||
API changes
|
||||
-----------
|
||||
|
||||
A number of additional APIs have been introduced and some existing
|
||||
functions have been deprecated and are scheduled for removal in the next
|
||||
release. Significant API changes include:
|
||||
|
||||
* new audio decoding API which decodes from an AVPacket to an AVFrame and
|
||||
is able to use AVCodecContext.get_buffer() in the similar way as video decoding.
|
||||
|
||||
* new audio encoding API which encodes from an AVFrame to an AVPacket, thus
|
||||
allowing it to properly output timing information and side data.
|
||||
|
||||
Please see the git history and the file doc/APIchanges for details.
|
||||
|
||||
|
||||
Other notable changes
|
||||
---------------------
|
||||
|
||||
Libavcodec and libavformat built as shared libraries now hide non-public
|
||||
symbols. This will break applications using those symbols. Possible solutions
|
||||
are, in order of preference:
|
||||
1) Try finding a way of accomplishing the same with public API.
|
||||
2) If there is no corresponding public API, but you think there should be,
|
||||
post a request on the developer mailing list or IRC channel.
|
||||
3) Finally if your program needs access to FFmpeg / libavcodec / libavformat
|
||||
internals for some special reason then the best solution is to link statically.
|
||||
|
||||
Please see the Changelog file and git history for a more detailed list of changes.
|
||||
|
@@ -41,8 +41,6 @@ streams of this type.
|
||||
@item p:@var{program_id}[:@var{stream_index}]
|
||||
If @var{stream_index} is given, then matches stream number @var{stream_index} in
|
||||
program with id @var{program_id}. Otherwise matches all streams in this program.
|
||||
@item #@var{stream_id}
|
||||
Matches the stream by format-specific ID.
|
||||
@end table
|
||||
@section Generic options
|
||||
|
||||
@@ -136,15 +134,6 @@ It also implies @code{-loglevel verbose}.
|
||||
Note: setting the environment variable @code{FFREPORT} to any value has the
|
||||
same effect.
|
||||
|
||||
@item -cpuflags flags (@emph{global})
|
||||
Allows setting and clearing cpu flags. This option is intended
|
||||
for testing. Do not use it unless you know what you're doing.
|
||||
@example
|
||||
ffmpeg -cpuflags -sse+mmx ...
|
||||
ffmpeg -cpuflags mmx ...
|
||||
ffmpeg -cpuflags 0 ...
|
||||
@end example
|
||||
|
||||
@end table
|
||||
|
||||
@section AVOptions
|
||||
@@ -177,6 +166,3 @@ use @option{-option 0}/@option{-option 1}.
|
||||
|
||||
Note2 old undocumented way of specifying per-stream AVOptions by prepending
|
||||
v/a/s to the options name is now obsolete and will be removed soon.
|
||||
|
||||
@include avoptions_codec.texi
|
||||
@include avoptions_format.texi
|
||||
|
@@ -71,7 +71,7 @@ stream (carrying the AVI1 header ID and lacking a DHT segment) to
|
||||
produce fully qualified JPEG images.
|
||||
|
||||
@example
|
||||
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
|
||||
ffmpeg -i mjpeg-movie.avi -c:v copy -vbsf mjpeg2jpeg frame_%d.jpg
|
||||
exiftran -i -9 frame*.jpg
|
||||
ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
@end example
|
||||
|
@@ -60,67 +60,4 @@ This decoder generates wave patterns according to predefined sequences. Its
|
||||
use is purely internal and the format of the data it accepts is not publicly
|
||||
documented.
|
||||
|
||||
@section libcelt
|
||||
|
||||
libcelt decoder wrapper
|
||||
|
||||
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
|
||||
Requires the presence of the libcelt headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libcelt}.
|
||||
|
||||
@section libgsm
|
||||
|
||||
libgsm decoder wrapper
|
||||
|
||||
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
|
||||
the presence of the libgsm headers and library during configuration. You need
|
||||
to explicitly configure the build with @code{--enable-libgsm}.
|
||||
|
||||
This decoder supports both the ordinary GSM and the Microsoft variant.
|
||||
|
||||
@section libilbc
|
||||
|
||||
libilbc decoder wrapper
|
||||
|
||||
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
|
||||
audio codec. Requires the presence of the libilbc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libilbc}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libilbc wrapper.
|
||||
|
||||
@table @option
|
||||
@item enhance
|
||||
|
||||
Enable the enhancement of the decoded audio when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
libopencore-amrnb decoder wrapper
|
||||
|
||||
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Narrowband audio codec. Using it requires the presence of the
|
||||
libopencore-amrnb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrnb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
|
||||
without this library.
|
||||
|
||||
@section libopencore-amrwb
|
||||
|
||||
libopencore-amrwb decoder wrapper.
|
||||
|
||||
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Wideband audio codec. Using it requires the presence of the
|
||||
libopencore-amrwb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrwb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
|
||||
without this library.
|
||||
|
||||
@c man end AUDIO DECODERS
|
||||
|
@@ -14,13 +14,12 @@
|
||||
@section API
|
||||
@itemize @bullet
|
||||
@item libavcodec is the library containing the codecs (both encoding and
|
||||
decoding). Look at @file{doc/examples/decoding_encoding.c} to see how to use
|
||||
it.
|
||||
decoding). Look at @file{libavcodec/apiexample.c} to see how to use it.
|
||||
|
||||
@item libavformat is the library containing the file format handling (mux and
|
||||
demux code for several formats). Look at @file{ffplay.c} to use it in a
|
||||
player. See @file{doc/examples/muxing.c} to use it to generate audio or video
|
||||
streams.
|
||||
player. See @file{libavformat/output-example.c} to use it to generate
|
||||
audio or video streams.
|
||||
|
||||
@end itemize
|
||||
|
||||
@@ -188,8 +187,6 @@ the following snippet into your @file{.vimrc}:
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" allow tabs in Makefiles
|
||||
autocmd FileType make set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
@@ -201,16 +198,10 @@ autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@example
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode nil)
|
||||
(show-trailing-whitespace t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
(setq c-default-style "k&r")
|
||||
(setq-default c-basic-offset 4)
|
||||
(setq-default indent-tabs-mode nil)
|
||||
(setq-default show-trailing-whitespace t)
|
||||
@end example
|
||||
|
||||
@section Development Policy
|
||||
|
@@ -412,131 +412,6 @@ Selected by Encoder (default)
|
||||
|
||||
@end table
|
||||
|
||||
@section libmp3lame
|
||||
|
||||
LAME (Lame Ain't an MP3 Encoder) MP3 encoder wrapper
|
||||
|
||||
Requires the presence of the libmp3lame headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libmp3lame}.
|
||||
|
||||
@subsection Option Mapping
|
||||
|
||||
The following options are supported by the libmp3lame wrapper,
|
||||
the LAME-equivalent options follow the FFmpeg ones.
|
||||
|
||||
@multitable @columnfractions .2 .2
|
||||
@item FFmpeg @tab LAME
|
||||
@item b @tab b
|
||||
FFmpeg @code{b} option is expressed in bits/s, lame @code{bitrate}
|
||||
in kilobits/s.
|
||||
@item q @tab V
|
||||
Quality setting for VBR.
|
||||
@item compression_level @tab q
|
||||
Algorithm quality. Valid options are integers from 0-9.
|
||||
@item reservoir @tab N.A.
|
||||
Enable use of bit reservoir. LAME has this enabled by default.
|
||||
@item joint_stereo @tab -m j
|
||||
Enables the the encoder to use (on a frame by frame basis) either L/R
|
||||
stereo or mid/side stereo.
|
||||
@end multitable
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
OpenCORE Adaptive Multi-Rate Narrowband encoder.
|
||||
|
||||
Requires the presence of the libopencore-amrnb headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopencore-amrnb --enable-version3}.
|
||||
|
||||
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
|
||||
but you can override it by setting @option{strict} to @samp{unofficial} or
|
||||
lower.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Set bitrate in bits per second. Only the following bitrates are supported,
|
||||
otherwise libavcodec will round to the nearest valid bitrate.
|
||||
|
||||
@table @option
|
||||
@item 4750
|
||||
@item 5150
|
||||
@item 5900
|
||||
@item 6700
|
||||
@item 7400
|
||||
@item 7950
|
||||
@item 10200
|
||||
@item 12200
|
||||
@end table
|
||||
|
||||
@item dtx
|
||||
Allow discontinuous transmission (generate comfort noise) when set to 1. The
|
||||
default value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libvo-aacenc
|
||||
|
||||
VisualOn AAC encoder
|
||||
|
||||
Requires the presence of the libvo-aacenc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libvo-aacenc --enable-version3}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
|
||||
channels. It is also CBR-only. It is considered to be worse than the
|
||||
native experimental FFmpeg AAC encoder.
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Bitrate.
|
||||
|
||||
@end table
|
||||
|
||||
@section libvo-amrwbenc
|
||||
|
||||
VisualOn Adaptive Multi-Rate Wideband encoder
|
||||
|
||||
Requires the presence of the libvo-amrwbenc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libvo-amrwbenc --enable-version3}.
|
||||
|
||||
This is a mono-only encoder. Officially it only supports 16000Hz sample
|
||||
rate, but you can override it by setting @option{strict} to
|
||||
@samp{unofficial} or lower.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Set bitrate in bits/s. Only the following bitrates are supported, otherwise
|
||||
libavcodec will round to the nearest valid bitrate.
|
||||
|
||||
@table @samp
|
||||
@item 6600
|
||||
@item 8850
|
||||
@item 12650
|
||||
@item 14250
|
||||
@item 15850
|
||||
@item 18250
|
||||
@item 19850
|
||||
@item 23050
|
||||
@item 23850
|
||||
@end table
|
||||
|
||||
@item dtx
|
||||
Allow discontinuous transmission (generate comfort noise) when set to 1. The
|
||||
default value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@c man end AUDIO ENCODERS
|
||||
|
||||
@chapter Video Encoders
|
||||
@@ -710,116 +585,4 @@ ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
||||
For more information about libx264 and the supported options see:
|
||||
@url{http://www.videolan.org/developers/x264.html}
|
||||
|
||||
@section libxvid
|
||||
|
||||
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||
|
||||
This encoder requires the presence of the libxvidcore headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libxvid --enable-gpl}.
|
||||
|
||||
The native @code{mpeg4} encoder supports the MPEG-4 Part 2 format, so
|
||||
users can encode to this format without this library.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libxvid wrapper. Some of
|
||||
the following options are listed but are not documented, and
|
||||
correspond to shared codec options. The other shared options
|
||||
which are not listed have no effect for the libxvid encoder.
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
|
||||
@item g
|
||||
|
||||
@item qmin
|
||||
|
||||
@item qmax
|
||||
|
||||
@item mpeg_quant
|
||||
|
||||
@item threads
|
||||
|
||||
@item bf
|
||||
|
||||
@item b_qfactor
|
||||
|
||||
@item b_qoffset
|
||||
|
||||
@item flags
|
||||
Set specific encoding flags. Possible values:
|
||||
|
||||
@table @samp
|
||||
|
||||
@item mv4
|
||||
Use four motion vector by macroblock.
|
||||
|
||||
@item aic
|
||||
Enable high quality AC prediction.
|
||||
|
||||
@item gray
|
||||
Only encode grayscale.
|
||||
|
||||
@item gmc
|
||||
Enable the use of global motion compensation (GMC).
|
||||
|
||||
@item qpel
|
||||
Enable quarter-pixel motion compensation.
|
||||
|
||||
@item cgop
|
||||
Enable closed GOP.
|
||||
|
||||
@item global_header
|
||||
Place global headers in extradata instead of every keyframe.
|
||||
|
||||
@end table
|
||||
|
||||
@item trellis
|
||||
|
||||
@item me_method
|
||||
Set motion estimation method. Possible values in decreasing order of
|
||||
speed and increasing order of quality:
|
||||
|
||||
@table @samp
|
||||
@item zero
|
||||
Use no motion estimation (default).
|
||||
|
||||
@item phods
|
||||
@item x1
|
||||
@item log
|
||||
Enable advanced diamond zonal search for 16x16 blocks and half-pixel
|
||||
refinement for 16x16 blocks. @samp{x1} and @samp{log} are aliases for
|
||||
@samp{phods}.
|
||||
|
||||
@item epzs
|
||||
Enable all of the things described above, plus advanced diamond zonal
|
||||
search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
|
||||
estimation on chroma planes.
|
||||
|
||||
@item full
|
||||
Enable all of the things described above, plus extended 16x16 and 8x8
|
||||
blocks search.
|
||||
@end table
|
||||
|
||||
@item mbd
|
||||
Set macroblock decision algorithm. Possible values in the increasing
|
||||
order of quality:
|
||||
|
||||
@table @samp
|
||||
@item simple
|
||||
Use macroblock comparing function algorithm (default).
|
||||
|
||||
@item bits
|
||||
Enable rate distortion-based half pixel and quarter pixel refinement for
|
||||
16x16 blocks.
|
||||
|
||||
@item rd
|
||||
Enable all of the things described above, plus rate distortion-based
|
||||
half pixel and quarter pixel refinement for 8x8 blocks, and rate
|
||||
distortion-based search using square pattern.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@c man end VIDEO ENCODERS
|
||||
|
@@ -106,18 +106,6 @@ the evaluation of @var{y}, return 0 otherwise.
|
||||
@item ifnot(x, y)
|
||||
Evaluate @var{x}, and if the result is zero return the result of the
|
||||
evaluation of @var{y}, return 0 otherwise.
|
||||
|
||||
@item taylor(expr, x) taylor(expr, x, id)
|
||||
Evaluate a taylor series at x.
|
||||
expr represents the LD(id)-th derivates of f(x) at 0. If id is not specified
|
||||
then 0 is assumed.
|
||||
note, when you have the derivatives at y instead of 0
|
||||
taylor(expr, x-y) can be used
|
||||
When the series does not converge the results are undefined.
|
||||
|
||||
@item root(expr, max)
|
||||
Finds x where f(x)=0 in the interval 0..max.
|
||||
f() must be continuous or the result is undefined.
|
||||
@end table
|
||||
|
||||
The following constants are available:
|
||||
|
@@ -1,28 +1,17 @@
|
||||
# use pkg-config for getting CFLAGS and LDLIBS
|
||||
FFMPEG_LIBS= libavdevice \
|
||||
libavformat \
|
||||
libavfilter \
|
||||
libavcodec \
|
||||
libavresample \
|
||||
libswresample \
|
||||
libswscale \
|
||||
libavutil \
|
||||
# use pkg-config for getting CFLAGS abd LDFLAGS
|
||||
FFMPEG_LIBS=libavdevice libavformat libavfilter libavcodec libswscale libavutil
|
||||
CFLAGS+=$(shell pkg-config --cflags $(FFMPEG_LIBS))
|
||||
LDFLAGS+=$(shell pkg-config --libs $(FFMPEG_LIBS))
|
||||
|
||||
CFLAGS += -Wall -O2 -g
|
||||
CFLAGS += $(shell pkg-config --cflags $(FFMPEG_LIBS))
|
||||
LDLIBS += $(shell pkg-config --libs $(FFMPEG_LIBS))
|
||||
|
||||
EXAMPLES= decoding_encoding \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
metadata \
|
||||
muxing \
|
||||
EXAMPLES=decoding_encoding filtering metadata muxing
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
%: %.o
|
||||
$(CC) $< $(LDFLAGS) -o $@
|
||||
|
||||
%.o: %.c
|
||||
$(CC) $< $(CFLAGS) -c -o $@
|
||||
|
||||
.phony: all clean
|
||||
|
||||
|
@@ -29,13 +29,11 @@
|
||||
* format handling
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
#define INBUF_SIZE 4096
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
@@ -54,7 +52,7 @@ static void audio_encode_example(const char *filename)
|
||||
float t, tincr;
|
||||
uint8_t *outbuf;
|
||||
|
||||
printf("Encode audio file %s\n", filename);
|
||||
printf("Audio encoding\n");
|
||||
|
||||
/* find the MP2 encoder */
|
||||
codec = avcodec_find_encoder(CODEC_ID_MP2);
|
||||
@@ -72,7 +70,7 @@ static void audio_encode_example(const char *filename)
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
@@ -125,7 +123,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
printf("Decode audio file %s\n", filename);
|
||||
printf("Audio decoding\n");
|
||||
|
||||
/* find the mpeg audio decoder */
|
||||
codec = avcodec_find_decoder(CODEC_ID_MP2);
|
||||
@@ -137,7 +135,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
c = avcodec_alloc_context3(codec);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
@@ -213,13 +211,12 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int i, out_size, x, y, outbuf_size;
|
||||
int i, out_size, size, x, y, outbuf_size;
|
||||
FILE *f;
|
||||
AVFrame *picture;
|
||||
uint8_t *outbuf;
|
||||
int had_output=0;
|
||||
|
||||
printf("Encode video file %s\n", filename);
|
||||
printf("Video encoding\n");
|
||||
|
||||
/* find the mpeg1 video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
@@ -246,7 +243,7 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
av_opt_set(c->priv_data, "preset", "slow", 0);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
@@ -258,7 +255,7 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
}
|
||||
|
||||
/* alloc image and output buffer */
|
||||
outbuf_size = 100000 + 12*c->width*c->height;
|
||||
outbuf_size = 100000;
|
||||
outbuf = malloc(outbuf_size);
|
||||
|
||||
/* the image can be allocated by any means and av_image_alloc() is
|
||||
@@ -287,17 +284,15 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
|
||||
/* encode the image */
|
||||
out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
|
||||
had_output |= out_size;
|
||||
printf("encoding frame %3d (size=%5d)\n", i, out_size);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for(; out_size || !had_output; i++) {
|
||||
for(; out_size; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
|
||||
had_output |= out_size;
|
||||
printf("write frame %3d (size=%5d)\n", i, out_size);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
}
|
||||
@@ -351,7 +346,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
|
||||
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
printf("Decode video file %s\n", filename);
|
||||
printf("Video decoding\n");
|
||||
|
||||
/* find the mpeg1 video decoder */
|
||||
codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
|
||||
@@ -371,7 +366,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
available in the bitstream. */
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
@@ -458,6 +453,9 @@ int main(int argc, char **argv)
|
||||
{
|
||||
const char *filename;
|
||||
|
||||
/* must be called before using avcodec lib */
|
||||
avcodec_init();
|
||||
|
||||
/* register all the codecs */
|
||||
avcodec_register_all();
|
||||
|
||||
|
@@ -27,13 +27,11 @@
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/vsrc_buffer.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24";
|
||||
|
||||
@@ -47,7 +45,7 @@ static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
int ret, i;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
@@ -55,7 +53,7 @@ static int open_input_file(const char *filename)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
if ((ret = av_find_stream_info(fmt_ctx)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
@@ -70,7 +68,7 @@ static int open_input_file(const char *filename)
|
||||
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
if ((ret = avcodec_open(dec_ctx, dec)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
|
||||
return ret;
|
||||
}
|
||||
@@ -120,13 +118,12 @@ static int init_filters(const char *filters_descr)
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse(filter_graph, filters_descr,
|
||||
if ((ret = avfilter_graph_parse(filter_graph, filter_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void display_picref(AVFilterBufferRef *picref, AVRational time_base)
|
||||
@@ -198,14 +195,15 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame.pts = av_frame_get_best_effort_timestamp(&frame);
|
||||
|
||||
if (frame.pts == AV_NOPTS_VALUE)
|
||||
frame.pts = frame.pkt_dts == AV_NOPTS_VALUE ?
|
||||
frame.pkt_dts : frame.pkt_pts;
|
||||
/* push the decoded frame into the filtergraph */
|
||||
av_vsrc_buffer_add_frame(buffersrc_ctx, &frame, 0);
|
||||
av_vsrc_buffer_add_frame(buffersrc_ctx, &frame);
|
||||
|
||||
/* pull filtered pictures from the filtergraph */
|
||||
while (avfilter_poll_frame(buffersink_ctx->inputs[0])) {
|
||||
av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0);
|
||||
av_vsink_buffer_get_video_buffer_ref(buffersink_ctx, &picref, 0);
|
||||
if (picref) {
|
||||
display_picref(picref, buffersink_ctx->inputs[0]->time_base);
|
||||
avfilter_unref_buffer(picref);
|
||||
@@ -218,7 +216,7 @@ end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
if (dec_ctx)
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_close_input_file(fmt_ctx);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
char buf[1024];
|
@@ -1,235 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2012 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
|
||||
const char *filter_descr = "aresample=8000,aconvert=s16:mono";
|
||||
const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the audio stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
audio_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret;
|
||||
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
|
||||
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
const int64_t *chlayouts = avfilter_all_channel_layouts;
|
||||
AVABufferSinkParams *abuffersink_params;
|
||||
const AVFilterLink *outlink;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args), "%d:%d:0x%"PRIx64,
|
||||
dec_ctx->sample_rate, dec_ctx->sample_fmt, dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
abuffersink_params = av_abuffersink_params_alloc();
|
||||
abuffersink_params->sample_fmts = sample_fmts;
|
||||
abuffersink_params->channel_layouts = chlayouts;
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, abuffersink_params, filter_graph);
|
||||
av_free(abuffersink_params);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
return ret;
|
||||
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
args);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void print_samplesref(AVFilterBufferRef *samplesref)
|
||||
{
|
||||
const AVFilterBufferRefAudioProps *props = samplesref->audio;
|
||||
const int n = props->nb_samples * av_get_channel_layout_nb_channels(props->channel_layout);
|
||||
const uint16_t *p = (uint16_t*)samplesref->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
while (p < p_end) {
|
||||
fputc(*p & 0xff, stdout);
|
||||
fputc(*p>>8 & 0xff, stdout);
|
||||
p++;
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame frame;
|
||||
int got_frame;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
avcodec_register_all();
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
AVFilterBufferRef *samplesref;
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
avcodec_get_frame_defaults(&frame);
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_audio4(dec_ctx, &frame, &got_frame, &packet);
|
||||
av_free_packet(&packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
/* push the audio data from decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame(buffersrc_ctx, &frame, 0) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered audio from the filtergraph */
|
||||
while (avfilter_poll_frame(buffersink_ctx->inputs[0])) {
|
||||
av_buffersink_get_buffer_ref(buffersink_ctx, &samplesref, 0);
|
||||
if (samplesref) {
|
||||
print_samplesref(samplesref);
|
||||
avfilter_unref_buffer(samplesref);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
if (dec_ctx)
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
char buf[1024];
|
||||
av_strerror(ret, buf, sizeof(buf));
|
||||
fprintf(stderr, "Error occurred: %s\n", buf);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
@@ -50,6 +50,6 @@ int main (int argc, char **argv)
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
avformat_free_context(fmt_ctx);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -33,9 +33,9 @@
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libswscale/swscale.h"
|
||||
|
||||
#undef exit
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
#define STREAM_DURATION 200.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
|
||||
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
|
||||
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
static int sws_flags = SWS_BICUBIC;
|
||||
|
||||
@@ -52,6 +52,8 @@ static int sws_flags = SWS_BICUBIC;
|
||||
|
||||
static float t, tincr, tincr2;
|
||||
static int16_t *samples;
|
||||
static uint8_t *audio_outbuf;
|
||||
static int audio_outbuf_size;
|
||||
static int audio_input_frame_size;
|
||||
|
||||
/*
|
||||
@@ -61,16 +63,8 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
AVCodec *codec;
|
||||
|
||||
/* find the audio encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
st = avformat_new_stream(oc, codec);
|
||||
st = avformat_new_stream(oc, NULL);
|
||||
if (!st) {
|
||||
fprintf(stderr, "Could not alloc stream\n");
|
||||
exit(1);
|
||||
@@ -78,12 +72,14 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
|
||||
st->id = 1;
|
||||
|
||||
c = st->codec;
|
||||
c->codec_id = codec_id;
|
||||
c->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
|
||||
/* put sample parameters */
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
c->channels = 2;
|
||||
c->channels = 2;
|
||||
|
||||
// some formats want stream headers to be separate
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
@@ -95,32 +91,54 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
|
||||
static void open_audio(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVCodec *codec;
|
||||
|
||||
c = st->codec;
|
||||
|
||||
/* find the audio encoder */
|
||||
codec = avcodec_find_encoder(c->codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, NULL, NULL) < 0) {
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* init signal generator */
|
||||
t = 0;
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
/* increment frequency by 110 Hz per second */
|
||||
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
audio_input_frame_size = 10000;
|
||||
else
|
||||
audio_outbuf_size = 10000;
|
||||
audio_outbuf = av_malloc(audio_outbuf_size);
|
||||
|
||||
/* ugly hack for PCM codecs (will be removed ASAP with new PCM
|
||||
support to compute the input frame size in samples */
|
||||
if (c->frame_size <= 1) {
|
||||
audio_input_frame_size = audio_outbuf_size / c->channels;
|
||||
switch(st->codec->codec_id) {
|
||||
case CODEC_ID_PCM_S16LE:
|
||||
case CODEC_ID_PCM_S16BE:
|
||||
case CODEC_ID_PCM_U16LE:
|
||||
case CODEC_ID_PCM_U16BE:
|
||||
audio_input_frame_size >>= 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
audio_input_frame_size = c->frame_size;
|
||||
samples = av_malloc(audio_input_frame_size *
|
||||
av_get_bytes_per_sample(c->sample_fmt) *
|
||||
c->channels);
|
||||
}
|
||||
samples = av_malloc(audio_input_frame_size * 2 * c->channels);
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
'nb_channels' channels */
|
||||
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
|
||||
{
|
||||
int j, i, v;
|
||||
@@ -129,9 +147,9 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
|
||||
q = samples;
|
||||
for (j = 0; j < frame_size; j++) {
|
||||
v = (int)(sin(t) * 10000);
|
||||
for (i = 0; i < nb_channels; i++)
|
||||
for(i = 0; i < nb_channels; i++)
|
||||
*q++ = v;
|
||||
t += tincr;
|
||||
t += tincr;
|
||||
tincr += tincr2;
|
||||
}
|
||||
}
|
||||
@@ -139,28 +157,22 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
|
||||
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame = avcodec_alloc_frame();
|
||||
int got_packet;
|
||||
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
c = st->codec;
|
||||
|
||||
get_audio_frame(samples, audio_input_frame_size, c->channels);
|
||||
frame->nb_samples = audio_input_frame_size;
|
||||
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
(uint8_t *)samples,
|
||||
audio_input_frame_size *
|
||||
av_get_bytes_per_sample(c->sample_fmt) *
|
||||
c->channels, 1);
|
||||
|
||||
avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (!got_packet)
|
||||
return;
|
||||
pkt.size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
|
||||
|
||||
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
|
||||
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = audio_outbuf;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
/* write the compressed frame in the media file */
|
||||
if (av_interleaved_write_frame(oc, &pkt) != 0) {
|
||||
fprintf(stderr, "Error while writing audio frame\n");
|
||||
exit(1);
|
||||
@@ -172,6 +184,7 @@ static void close_audio(AVFormatContext *oc, AVStream *st)
|
||||
avcodec_close(st->codec);
|
||||
|
||||
av_free(samples);
|
||||
av_free(audio_outbuf);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
@@ -181,21 +194,14 @@ static AVFrame *picture, *tmp_picture;
|
||||
static uint8_t *video_outbuf;
|
||||
static int frame_count, video_outbuf_size;
|
||||
|
||||
/* Add a video output stream. */
|
||||
/* add a video output stream */
|
||||
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
AVCodec *codec;
|
||||
|
||||
/* find the video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
st = avformat_new_stream(oc, codec);
|
||||
st = avformat_new_stream(oc, NULL);
|
||||
if (!st) {
|
||||
fprintf(stderr, "Could not alloc stream\n");
|
||||
exit(1);
|
||||
@@ -213,30 +219,30 @@ static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
|
||||
|
||||
c->codec_id = codec_id;
|
||||
|
||||
/* Put sample parameters. */
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* time base: this is the fundamental unit of time (in seconds) in terms
|
||||
of which frame timestamps are represented. for fixed-fps content,
|
||||
timebase should be 1/framerate and timestamp increments should be
|
||||
identically 1. */
|
||||
c->time_base.den = STREAM_FRAME_RATE;
|
||||
c->time_base.num = 1;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
|
||||
if (c->codec_id == CODEC_ID_MPEG1VIDEO){
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
This does not happen with normal video, it just happens here as
|
||||
the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision=2;
|
||||
}
|
||||
/* Some formats want stream headers to be separate. */
|
||||
// some formats want stream headers to be separate
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
@@ -252,7 +258,7 @@ static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
|
||||
picture = avcodec_alloc_frame();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
size = avpicture_get_size(pix_fmt, width, height);
|
||||
size = avpicture_get_size(pix_fmt, width, height);
|
||||
picture_buf = av_malloc(size);
|
||||
if (!picture_buf) {
|
||||
av_free(picture);
|
||||
@@ -265,38 +271,46 @@ static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c;
|
||||
|
||||
c = st->codec;
|
||||
|
||||
/* find the video encoder */
|
||||
codec = avcodec_find_encoder(c->codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open the codec */
|
||||
if (avcodec_open2(c, NULL, NULL) < 0) {
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
video_outbuf = NULL;
|
||||
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
|
||||
/* Allocate output buffer. */
|
||||
/* XXX: API change will be done. */
|
||||
/* Buffers passed into lav* can be allocated any way you prefer,
|
||||
* as long as they're aligned enough for the architecture, and
|
||||
* they're freed appropriately (such as using av_free for buffers
|
||||
* allocated with av_malloc). */
|
||||
/* allocate output buffer */
|
||||
/* XXX: API change will be done */
|
||||
/* buffers passed into lav* can be allocated any way you prefer,
|
||||
as long as they're aligned enough for the architecture, and
|
||||
they're freed appropriately (such as using av_free for buffers
|
||||
allocated with av_malloc) */
|
||||
video_outbuf_size = 200000;
|
||||
video_outbuf = av_malloc(video_outbuf_size);
|
||||
video_outbuf = av_malloc(video_outbuf_size);
|
||||
}
|
||||
|
||||
/* Allocate the encoded raw picture. */
|
||||
/* allocate the encoded raw picture */
|
||||
picture = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!picture) {
|
||||
fprintf(stderr, "Could not allocate picture\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
/* if the output format is not YUV420P, then a temporary YUV420P
|
||||
picture is needed too. It is then converted to the required
|
||||
output format */
|
||||
tmp_picture = NULL;
|
||||
if (c->pix_fmt != PIX_FMT_YUV420P) {
|
||||
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
|
||||
@@ -307,22 +321,23 @@ static void open_video(AVFormatContext *oc, AVStream *st)
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index,
|
||||
int width, int height)
|
||||
/* prepare a dummy image */
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
|
||||
{
|
||||
int x, y, i;
|
||||
|
||||
i = frame_index;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
for (y = 0; y < height; y++) {
|
||||
for (x = 0; x < width; x++) {
|
||||
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
for (y = 0; y < height/2; y++) {
|
||||
for (x = 0; x < width/2; x++) {
|
||||
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
||||
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
@@ -338,13 +353,13 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
||||
c = st->codec;
|
||||
|
||||
if (frame_count >= STREAM_NB_FRAMES) {
|
||||
/* No more frames to compress. The codec has a latency of a few
|
||||
* frames if using B-frames, so we get the last frames by
|
||||
* passing the same picture again. */
|
||||
/* no more frame to compress. The codec has a latency of a few
|
||||
frames if using B frames, so we get the last frames by
|
||||
passing the same picture again */
|
||||
} else {
|
||||
if (c->pix_fmt != PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
to the codec pixel format if needed */
|
||||
if (img_convert_ctx == NULL) {
|
||||
img_convert_ctx = sws_getContext(c->width, c->height,
|
||||
PIX_FMT_YUV420P,
|
||||
@@ -352,8 +367,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
||||
c->pix_fmt,
|
||||
sws_flags, NULL, NULL, NULL);
|
||||
if (img_convert_ctx == NULL) {
|
||||
fprintf(stderr,
|
||||
"Cannot initialize the conversion context\n");
|
||||
fprintf(stderr, "Cannot initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@@ -365,38 +379,36 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
/* Raw video case - the API will change slightly in the near
|
||||
* future for that. */
|
||||
/* raw video case. The API will change slightly in the near
|
||||
future for that. */
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = (uint8_t *)picture;
|
||||
pkt.size = sizeof(AVPicture);
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = (uint8_t *)picture;
|
||||
pkt.size = sizeof(AVPicture);
|
||||
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
/* encode the image */
|
||||
out_size = avcodec_encode_video(c, video_outbuf,
|
||||
video_outbuf_size, picture);
|
||||
/* If size is zero, it means the image was buffered. */
|
||||
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
|
||||
/* if zero size, it means the image was buffered */
|
||||
if (out_size > 0) {
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
if (c->coded_frame->pts != AV_NOPTS_VALUE)
|
||||
pkt.pts = av_rescale_q(c->coded_frame->pts,
|
||||
c->time_base, st->time_base);
|
||||
if (c->coded_frame->key_frame)
|
||||
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
|
||||
if(c->coded_frame->key_frame)
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = video_outbuf;
|
||||
pkt.size = out_size;
|
||||
pkt.data = video_outbuf;
|
||||
pkt.size = out_size;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
/* write the compressed frame in the media file */
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
@@ -433,7 +445,7 @@ int main(int argc, char **argv)
|
||||
double audio_pts, video_pts;
|
||||
int i;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
/* initialize libavcodec, and register all codecs and formats */
|
||||
av_register_all();
|
||||
|
||||
if (argc != 2) {
|
||||
@@ -458,8 +470,8 @@ int main(int argc, char **argv)
|
||||
}
|
||||
fmt = oc->oformat;
|
||||
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
/* add the audio and video streams using the default format codecs
|
||||
and initialize the codecs */
|
||||
video_st = NULL;
|
||||
audio_st = NULL;
|
||||
if (fmt->video_codec != CODEC_ID_NONE) {
|
||||
@@ -469,15 +481,15 @@ int main(int argc, char **argv)
|
||||
audio_st = add_audio_stream(oc, fmt->audio_codec);
|
||||
}
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* now that all the parameters are set, we can open the audio and
|
||||
video codecs and allocate the necessary encode buffers */
|
||||
if (video_st)
|
||||
open_video(oc, video_st);
|
||||
if (audio_st)
|
||||
open_audio(oc, audio_st);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
|
||||
@@ -486,20 +498,18 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
avformat_write_header(oc, NULL);
|
||||
|
||||
/* write the stream header, if any */
|
||||
av_write_header(oc);
|
||||
picture->pts = 0;
|
||||
for (;;) {
|
||||
/* Compute current audio and video time. */
|
||||
for(;;) {
|
||||
/* compute current audio and video time */
|
||||
if (audio_st)
|
||||
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
|
||||
else
|
||||
audio_pts = 0.0;
|
||||
|
||||
if (video_st)
|
||||
video_pts = (double)video_st->pts.val * video_st->time_base.num /
|
||||
video_st->time_base.den;
|
||||
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
|
||||
else
|
||||
video_pts = 0.0;
|
||||
|
||||
@@ -516,27 +526,28 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
/* write the trailer, if any. the trailer must be written
|
||||
* before you close the CodecContexts open when you wrote the
|
||||
* header; otherwise write_trailer may try to use memory that
|
||||
* was freed on av_codec_close() */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
/* close each codec */
|
||||
if (video_st)
|
||||
close_video(oc, video_st);
|
||||
if (audio_st)
|
||||
close_audio(oc, audio_st);
|
||||
|
||||
/* Free the streams. */
|
||||
for (i = 0; i < oc->nb_streams; i++) {
|
||||
/* free the streams */
|
||||
for(i = 0; i < oc->nb_streams; i++) {
|
||||
av_freep(&oc->streams[i]->codec);
|
||||
av_freep(&oc->streams[i]);
|
||||
}
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
/* close the output file */
|
||||
avio_close(oc->pb);
|
||||
}
|
||||
|
||||
/* free the stream */
|
||||
av_free(oc);
|
||||
|
@@ -166,11 +166,9 @@ the synchronisation of the samples directory.
|
||||
@item THREADS
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
@item CPUFLAGS
|
||||
Specify CPU flags.
|
||||
@end table
|
||||
|
||||
Example:
|
||||
@example
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 fate
|
||||
@end example
|
||||
|
463
doc/ffmpeg.texi
463
doc/ffmpeg.texi
@@ -143,7 +143,7 @@ Stop writing the output after its duration reaches @var{duration}.
|
||||
@var{duration} may be a number in seconds, or in @code{hh:mm:ss[.xxx]} form.
|
||||
|
||||
@item -fs @var{limit_size} (@emph{output})
|
||||
Set the file size limit, expressed in bytes.
|
||||
Set the file size limit.
|
||||
|
||||
@item -ss @var{position} (@emph{input/output})
|
||||
When used as an input option (before @code{-i}), seeks in this input file to
|
||||
@@ -164,7 +164,7 @@ streams are delayed by @var{offset} seconds.
|
||||
Set the recording timestamp in the container.
|
||||
The syntax for @var{time} is:
|
||||
@example
|
||||
now|([(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...])|(HHMMSS[.m...]))[Z|z])
|
||||
now|([(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH[:MM[:SS[.m...]]])|(HH[MM[SS[.m...]]]))[Z|z])
|
||||
@end example
|
||||
If the value is "now" it takes the current time.
|
||||
Time is local time unless 'Z' or 'z' is appended, in which case it is
|
||||
@@ -224,23 +224,12 @@ codec-dependent.
|
||||
@var{filter_graph} is a description of the filter graph to apply to
|
||||
the stream. Use @code{-filters} to show all the available filters
|
||||
(including also sources and sinks).
|
||||
|
||||
See also the @option{-filter_complex} option if you want to create filter graphs
|
||||
with multiple inputs and/or outputs.
|
||||
@item -pre[:@var{stream_specifier}] @var{preset_name} (@emph{output,per-stream})
|
||||
Specify the preset for matching stream(s).
|
||||
|
||||
@item -stats (@emph{global})
|
||||
Print encoding progress/statistics. On by default.
|
||||
|
||||
@item -debug_ts (@emph{global})
|
||||
Print timestamp information. It is off by default. This option is
|
||||
mostly useful for testing and debugging purposes, and the output
|
||||
format may change from one version to another, so it should not be
|
||||
employed by portable scripts.
|
||||
|
||||
See also the option @code{-fdebug ts}.
|
||||
|
||||
@item -attach @var{filename} (@emph{output})
|
||||
Add an attachment to the output file. This is supported by a few formats
|
||||
like Matroska for e.g. fonts used in rendering subtitles. Attachments
|
||||
@@ -282,10 +271,70 @@ attachments.
|
||||
@item -vframes @var{number} (@emph{output})
|
||||
Set the number of video frames to record. This is an alias for @code{-frames:v}.
|
||||
@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream})
|
||||
Set frame rate (Hz value, fraction or abbreviation), (default = 25). For output
|
||||
streams implies @code{-vsync cfr}.
|
||||
Set frame rate (Hz value, fraction or abbreviation), (default = 25).
|
||||
@item -s[:@var{stream_specifier}] @var{size} (@emph{input/output,per-stream})
|
||||
Set frame size. The format is @samp{wxh} (default - same as source).
|
||||
The following abbreviations are recognized:
|
||||
@table @samp
|
||||
@item sqcif
|
||||
128x96
|
||||
@item qcif
|
||||
176x144
|
||||
@item cif
|
||||
352x288
|
||||
@item 4cif
|
||||
704x576
|
||||
@item 16cif
|
||||
1408x1152
|
||||
@item qqvga
|
||||
160x120
|
||||
@item qvga
|
||||
320x240
|
||||
@item vga
|
||||
640x480
|
||||
@item svga
|
||||
800x600
|
||||
@item xga
|
||||
1024x768
|
||||
@item uxga
|
||||
1600x1200
|
||||
@item qxga
|
||||
2048x1536
|
||||
@item sxga
|
||||
1280x1024
|
||||
@item qsxga
|
||||
2560x2048
|
||||
@item hsxga
|
||||
5120x4096
|
||||
@item wvga
|
||||
852x480
|
||||
@item wxga
|
||||
1366x768
|
||||
@item wsxga
|
||||
1600x1024
|
||||
@item wuxga
|
||||
1920x1200
|
||||
@item woxga
|
||||
2560x1600
|
||||
@item wqsxga
|
||||
3200x2048
|
||||
@item wquxga
|
||||
3840x2400
|
||||
@item whsxga
|
||||
6400x4096
|
||||
@item whuxga
|
||||
7680x4800
|
||||
@item cga
|
||||
320x200
|
||||
@item ega
|
||||
640x350
|
||||
@item hd480
|
||||
852x480
|
||||
@item hd720
|
||||
1280x720
|
||||
@item hd1080
|
||||
1920x1080
|
||||
@end table
|
||||
|
||||
@item -aspect[:@var{stream_specifier}] @var{aspect} (@emph{output,per-stream})
|
||||
Set the video display aspect ratio specified by @var{aspect}.
|
||||
@@ -312,7 +361,25 @@ pad=width:height:x:y:color instead.
|
||||
|
||||
@item -vn (@emph{output})
|
||||
Disable video recording.
|
||||
|
||||
@item -bt @var{tolerance}
|
||||
Set video bitrate tolerance (in bits, default 4000k).
|
||||
Has a minimum value of: (target_bitrate/target_framerate).
|
||||
In 1-pass mode, bitrate tolerance specifies how far ratecontrol is
|
||||
willing to deviate from the target average bitrate value. This is
|
||||
not related to min/max bitrate. Lowering tolerance too much has
|
||||
an adverse effect on quality.
|
||||
@item -maxrate @var{bitrate}
|
||||
Set max video bitrate (in bit/s).
|
||||
Requires -bufsize to be set.
|
||||
@item -minrate @var{bitrate}
|
||||
Set min video bitrate (in bit/s).
|
||||
Most useful in setting up a CBR encode:
|
||||
@example
|
||||
ffmpeg -i myfile.avi -b:v 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
|
||||
@end example
|
||||
It is of little use elsewise.
|
||||
@item -bufsize @var{size}
|
||||
Set video buffer verifier buffer size (in bits).
|
||||
@item -vcodec @var{codec} (@emph{output})
|
||||
Set the video codec. This is an alias for @code{-codec:v}.
|
||||
@item -same_quant
|
||||
@@ -361,29 +428,202 @@ also sources and sinks). This is an alias for @code{-filter:v}.
|
||||
@item -pix_fmt[:@var{stream_specifier}] @var{format} (@emph{input/output,per-stream})
|
||||
Set pixel format. Use @code{-pix_fmts} to show all the supported
|
||||
pixel formats.
|
||||
If the selected pixel format can not be selected, ffmpeg will print a
|
||||
warning and select the best pixel format supported by the encoder.
|
||||
If @var{pix_fmt} is prefixed by a @code{+}, ffmpeg will exit with an error
|
||||
if the requested pixel format can not be selected, and automatic conversions
|
||||
inside filter graphs are disabled.
|
||||
If @var{pix_fmt} is a single @code{+}, ffmpeg selects the same pixel format
|
||||
as the input (or graph output) and automatic conversions are disabled.
|
||||
|
||||
@item -sws_flags @var{flags} (@emph{input/output})
|
||||
Set SwScaler flags.
|
||||
@item -g @var{gop_size}
|
||||
Set the group of pictures size.
|
||||
@item -intra
|
||||
deprecated, use -g 1
|
||||
@item -vdt @var{n}
|
||||
Discard threshold.
|
||||
@item -qmin @var{q}
|
||||
minimum video quantizer scale (VBR)
|
||||
@item -qmax @var{q}
|
||||
maximum video quantizer scale (VBR)
|
||||
@item -qdiff @var{q}
|
||||
maximum difference between the quantizer scales (VBR)
|
||||
@item -qblur @var{blur}
|
||||
video quantizer scale blur (VBR) (range 0.0 - 1.0)
|
||||
@item -qcomp @var{compression}
|
||||
video quantizer scale compression (VBR) (default 0.5).
|
||||
Constant of ratecontrol equation. Recommended range for default rc_eq: 0.0-1.0
|
||||
|
||||
@item -lmin @var{lambda}
|
||||
minimum video lagrange factor (VBR)
|
||||
@item -lmax @var{lambda}
|
||||
max video lagrange factor (VBR)
|
||||
@item -mblmin @var{lambda}
|
||||
minimum macroblock quantizer scale (VBR)
|
||||
@item -mblmax @var{lambda}
|
||||
maximum macroblock quantizer scale (VBR)
|
||||
|
||||
These four options (lmin, lmax, mblmin, mblmax) use 'lambda' units,
|
||||
but you may use the QP2LAMBDA constant to easily convert from 'q' units:
|
||||
@example
|
||||
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
|
||||
@end example
|
||||
|
||||
@item -rc_init_cplx @var{complexity}
|
||||
initial complexity for single pass encoding
|
||||
@item -b_qfactor @var{factor}
|
||||
qp factor between P- and B-frames
|
||||
@item -i_qfactor @var{factor}
|
||||
qp factor between P- and I-frames
|
||||
@item -b_qoffset @var{offset}
|
||||
qp offset between P- and B-frames
|
||||
@item -i_qoffset @var{offset}
|
||||
qp offset between P- and I-frames
|
||||
@item -rc_eq @var{equation}
|
||||
Set rate control equation (see section "Expression Evaluation")
|
||||
(default = @code{tex^qComp}).
|
||||
|
||||
When computing the rate control equation expression, besides the
|
||||
standard functions defined in the section "Expression Evaluation", the
|
||||
following functions are available:
|
||||
@table @var
|
||||
@item bits2qp(bits)
|
||||
@item qp2bits(qp)
|
||||
@end table
|
||||
|
||||
and the following constants are available:
|
||||
@table @var
|
||||
@item iTex
|
||||
@item pTex
|
||||
@item tex
|
||||
@item mv
|
||||
@item fCode
|
||||
@item iCount
|
||||
@item mcVar
|
||||
@item var
|
||||
@item isI
|
||||
@item isP
|
||||
@item isB
|
||||
@item avgQP
|
||||
@item qComp
|
||||
@item avgIITex
|
||||
@item avgPITex
|
||||
@item avgPPTex
|
||||
@item avgBPTex
|
||||
@item avgTex
|
||||
@end table
|
||||
|
||||
@item -rc_override[:@var{stream_specifier}] @var{override} (@emph{output,per-stream})
|
||||
Rate control override for specific intervals, formatted as "int,int,int"
|
||||
list separated with slashes. Two first values are the beginning and
|
||||
end frame numbers, last one is quantizer to use if positive, or quality
|
||||
factor if negative.
|
||||
@item -me_method @var{method}
|
||||
Set motion estimation method to @var{method}.
|
||||
Available methods are (from lowest to best quality):
|
||||
@table @samp
|
||||
@item zero
|
||||
Try just the (0, 0) vector.
|
||||
@item phods
|
||||
@item log
|
||||
@item x1
|
||||
@item hex
|
||||
@item umh
|
||||
@item epzs
|
||||
(default method)
|
||||
@item full
|
||||
exhaustive search (slow and marginally better than epzs)
|
||||
@end table
|
||||
|
||||
@item -dct_algo @var{algo}
|
||||
Set DCT algorithm to @var{algo}. Available values are:
|
||||
@table @samp
|
||||
@item 0
|
||||
FF_DCT_AUTO (default)
|
||||
@item 1
|
||||
FF_DCT_FASTINT
|
||||
@item 2
|
||||
FF_DCT_INT
|
||||
@item 3
|
||||
FF_DCT_MMX
|
||||
@item 4
|
||||
FF_DCT_MLIB
|
||||
@item 5
|
||||
FF_DCT_ALTIVEC
|
||||
@end table
|
||||
|
||||
@item -idct_algo @var{algo}
|
||||
Set IDCT algorithm to @var{algo}. Available values are:
|
||||
@table @samp
|
||||
@item 0
|
||||
FF_IDCT_AUTO (default)
|
||||
@item 1
|
||||
FF_IDCT_INT
|
||||
@item 2
|
||||
FF_IDCT_SIMPLE
|
||||
@item 3
|
||||
FF_IDCT_SIMPLEMMX
|
||||
@item 4
|
||||
FF_IDCT_LIBMPEG2MMX
|
||||
@item 5
|
||||
FF_IDCT_PS2
|
||||
@item 6
|
||||
FF_IDCT_MLIB
|
||||
@item 7
|
||||
FF_IDCT_ARM
|
||||
@item 8
|
||||
FF_IDCT_ALTIVEC
|
||||
@item 9
|
||||
FF_IDCT_SH4
|
||||
@item 10
|
||||
FF_IDCT_SIMPLEARM
|
||||
@end table
|
||||
|
||||
@item -er @var{n}
|
||||
Set error resilience to @var{n}.
|
||||
@table @samp
|
||||
@item 1
|
||||
FF_ER_CAREFUL (default)
|
||||
@item 2
|
||||
FF_ER_COMPLIANT
|
||||
@item 3
|
||||
FF_ER_AGGRESSIVE
|
||||
@item 4
|
||||
FF_ER_VERY_AGGRESSIVE
|
||||
@end table
|
||||
|
||||
@item -ec @var{bit_mask}
|
||||
Set error concealment to @var{bit_mask}. @var{bit_mask} is a bit mask of
|
||||
the following values:
|
||||
@table @samp
|
||||
@item 1
|
||||
FF_EC_GUESS_MVS (default = enabled)
|
||||
@item 2
|
||||
FF_EC_DEBLOCK (default = enabled)
|
||||
@end table
|
||||
|
||||
@item -bf @var{frames}
|
||||
Use 'frames' B-frames (supported for MPEG-1, MPEG-2 and MPEG-4).
|
||||
@item -mbd @var{mode}
|
||||
macroblock decision
|
||||
@table @samp
|
||||
@item 0
|
||||
FF_MB_DECISION_SIMPLE: Use mb_cmp (cannot change it yet in ffmpeg).
|
||||
@item 1
|
||||
FF_MB_DECISION_BITS: Choose the one which needs the fewest bits.
|
||||
@item 2
|
||||
FF_MB_DECISION_RD: rate distortion
|
||||
@end table
|
||||
|
||||
@item -4mv
|
||||
Use four motion vector by macroblock (MPEG-4 only).
|
||||
@item -part
|
||||
Use data partitioning (MPEG-4 only).
|
||||
@item -bug @var{param}
|
||||
Work around encoder bugs that are not auto-detected.
|
||||
@item -strict @var{strictness}
|
||||
How strictly to follow the standards.
|
||||
@item -aic
|
||||
Enable Advanced intra coding (h263+).
|
||||
@item -umv
|
||||
Enable Unlimited Motion Vector (h263+)
|
||||
|
||||
@item -deinterlace
|
||||
Deinterlace pictures.
|
||||
This option is deprecated since the deinterlacing is very low quality.
|
||||
Use the yadif filter with @code{-filter:v yadif}.
|
||||
@item -ilme
|
||||
Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
|
||||
Use this option if your input file is interlaced and you want
|
||||
@@ -442,11 +682,6 @@ Set the audio codec. This is an alias for @code{-codec:a}.
|
||||
@item -sample_fmt[:@var{stream_specifier}] @var{sample_fmt} (@emph{output,per-stream})
|
||||
Set the audio sample format. Use @code{-sample_fmts} to get a list
|
||||
of supported sample formats.
|
||||
@item -af @var{filter_graph} (@emph{output})
|
||||
@var{filter_graph} is a description of the filter graph to apply to
|
||||
the input audio.
|
||||
Use the option "-filters" to show all the available filters (including
|
||||
also sources and sinks). This is an alias for @code{-filter:a}.
|
||||
@end table
|
||||
|
||||
@section Advanced Audio options:
|
||||
@@ -454,6 +689,28 @@ also sources and sinks). This is an alias for @code{-filter:a}.
|
||||
@table @option
|
||||
@item -atag @var{fourcc/tag} (@emph{output})
|
||||
Force audio tag/fourcc. This is an alias for @code{-tag:a}.
|
||||
@item -audio_service_type @var{type}
|
||||
Set the type of service that the audio stream contains.
|
||||
@table @option
|
||||
@item ma
|
||||
Main Audio Service (default)
|
||||
@item ef
|
||||
Effects
|
||||
@item vi
|
||||
Visually Impaired
|
||||
@item hi
|
||||
Hearing Impaired
|
||||
@item di
|
||||
Dialogue
|
||||
@item co
|
||||
Commentary
|
||||
@item em
|
||||
Emergency
|
||||
@item vo
|
||||
Voice Over
|
||||
@item ka
|
||||
Karaoke
|
||||
@end table
|
||||
@item -absf @var{bitstream_filter}
|
||||
Deprecated, see -bsf
|
||||
@end table
|
||||
@@ -481,7 +738,7 @@ Synchronize read on input.
|
||||
@section Advanced options
|
||||
|
||||
@table @option
|
||||
@item -map [-]@var{input_file_id}[:@var{stream_specifier}][,@var{sync_file_id}[:@var{stream_specifier}]] | @var{[linklabel]} (@emph{output})
|
||||
@item -map [-]@var{input_file_id}[:@var{stream_specifier}][,@var{sync_file_id}[:@var{stream_specifier}]] (@emph{output})
|
||||
|
||||
Designate one or more input streams as a source for the output file. Each input
|
||||
stream is identified by the input file index @var{input_file_id} and
|
||||
@@ -497,10 +754,6 @@ the source for output stream 1, etc.
|
||||
A @code{-} character before the stream identifier creates a "negative" mapping.
|
||||
It disables matching streams from already created mappings.
|
||||
|
||||
An alternative @var{[linklabel]} form will map outputs from complex filter
|
||||
graphs (see the @option{-filter_complex} option) to the output file.
|
||||
@var{linklabel} must correspond to a defined output link label in the graph.
|
||||
|
||||
For example, to map ALL streams from the first input file to output
|
||||
@example
|
||||
ffmpeg -i INPUT -map 0 output
|
||||
@@ -538,7 +791,7 @@ Note that using this option disables the default mappings for this output file.
|
||||
|
||||
@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][:@var{output_file_id}.@var{stream_specifier}]
|
||||
Map an audio channel from a given input to an output. If
|
||||
@var{output_file_id}.@var{stream_specifier} is not set, the audio channel will
|
||||
@var{output_file_id}.@var{stream_specifier} are not set, the audio channel will
|
||||
be mapped on all the audio streams.
|
||||
|
||||
Using "-1" instead of
|
||||
@@ -560,18 +813,18 @@ The order of the "-map_channel" option specifies the order of the channels in
|
||||
the output stream. The output channel layout is guessed from the number of
|
||||
channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac"
|
||||
in combination of "-map_channel" makes the channel gain levels to be updated if
|
||||
input and output channel layouts don't match (for instance two "-map_channel"
|
||||
options and "-ac 6").
|
||||
channel layouts don't match (for instance two "-map_channel" options and "-ac
|
||||
6").
|
||||
|
||||
You can also extract each channel of an input to specific outputs; the following
|
||||
command extracts two channels of the @var{INPUT} audio stream (file 0, stream 0)
|
||||
to the respective @var{OUTPUT_CH0} and @var{OUTPUT_CH1} outputs:
|
||||
You can also extract each channel of an @var{INPUT} to specific outputs; the
|
||||
following command extract each channel of the audio stream (file 0, stream 0)
|
||||
to the respective @var{OUTPUT_CH0} and @var{OUTPUT_CH1}:
|
||||
@example
|
||||
ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1
|
||||
@end example
|
||||
|
||||
The following example splits the channels of a stereo input into two separate
|
||||
streams, which are put into the same output file:
|
||||
The following example split the channels of a stereo input into streams:
|
||||
|
||||
@example
|
||||
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
|
||||
@end example
|
||||
@@ -581,20 +834,9 @@ input stream; you can't for example use "-map_channel" to pick multiple input
|
||||
audio channels contained in different streams (from the same or different files)
|
||||
and merge them into a single output stream. It is therefore not currently
|
||||
possible, for example, to turn two separate mono streams into a single stereo
|
||||
stream. However splitting a stereo stream into two single channel mono streams
|
||||
stream. However spliting a stereo stream into two single channel mono streams
|
||||
is possible.
|
||||
|
||||
If you need this feature, a possible workaround is to use the @emph{amerge}
|
||||
filter. For example, if you need to merge a media (here @file{input.mkv}) with 2
|
||||
mono audio streams into one single stereo channel audio stream (and keep the
|
||||
video stream), you can use the following command:
|
||||
@example
|
||||
ffmpeg -i input.mkv -f lavfi -i "
|
||||
amovie=input.mkv:si=1 [a1];
|
||||
amovie=input.mkv:si=2 [a2];
|
||||
[a1][a2] amerge" -c:a pcm_s16le -c:v copy output.mkv
|
||||
@end example
|
||||
|
||||
@item -map_metadata[:@var{metadata_spec_out}] @var{infile}[:@var{metadata_spec_in}] (@emph{output,per-metadata})
|
||||
Set metadata information of the next output file from @var{infile}. Note that
|
||||
those are file indices (zero-based), not filenames.
|
||||
@@ -679,15 +921,14 @@ Show benchmarking information at the end of an encode.
|
||||
Shows CPU time used and maximum memory consumption.
|
||||
Maximum memory consumption is not supported on all systems,
|
||||
it will usually display as 0 if not supported.
|
||||
@item -benchmark_all (@emph{global})
|
||||
Show benchmarking information during the encode.
|
||||
Shows CPU time used in various steps (audio/video encode/decode).
|
||||
@item -timelimit @var{duration} (@emph{global})
|
||||
Exit after ffmpeg has been running for @var{duration} seconds.
|
||||
@item -dump (@emph{global})
|
||||
Dump each input packet to stderr.
|
||||
@item -hex (@emph{global})
|
||||
When dumping packets, also dump the payload.
|
||||
@item -ps @var{size}
|
||||
Set RTP payload size in bytes.
|
||||
@item -re (@emph{input})
|
||||
Read input at native frame rate. Mainly used to simulate a grab device.
|
||||
@item -loop_input
|
||||
@@ -698,10 +939,10 @@ This option is deprecated, use -loop 1.
|
||||
Repeatedly loop output for formats that support looping such as animated GIF
|
||||
(0 will loop the output infinitely).
|
||||
This option is deprecated, use -loop.
|
||||
@item -threads @var{count}
|
||||
Thread count.
|
||||
@item -vsync @var{parameter}
|
||||
Video sync method.
|
||||
For compatibility reasons old values can be specified as numbers.
|
||||
Newly added values will have to be specified as strings always.
|
||||
|
||||
@table @option
|
||||
@item 0, passthrough
|
||||
@@ -712,9 +953,6 @@ constant framerate.
|
||||
@item 2, vfr
|
||||
Frames are passed through with their timestamp or dropped so as to
|
||||
prevent 2 frames from having the same timestamp.
|
||||
@item drop
|
||||
As passthrough but destroys all timestamps, making the muxer generate
|
||||
fresh timestamps based on frame-rate.
|
||||
@item -1, auto
|
||||
Chooses between 1 and 2 depending on muxer capabilities. This is the
|
||||
default method.
|
||||
@@ -729,33 +967,10 @@ Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps
|
||||
the parameter is the maximum samples per second by which the audio is changed.
|
||||
-async 1 is a special case where only the start of the audio stream is corrected
|
||||
without any later correction.
|
||||
This option has been deprecated. Use the @code{asyncts} audio filter instead.
|
||||
@item -copyts
|
||||
Copy timestamps from input to output.
|
||||
@item -copytb @var{mode}
|
||||
Specify how to set the encoder timebase when stream copying. @var{mode} is an
|
||||
integer numeric value, and can assume one of the following values:
|
||||
|
||||
@table @option
|
||||
@item 1
|
||||
Use the demuxer timebase.
|
||||
|
||||
The time base is copied to the output encoder from the corresponding input
|
||||
demuxer. This is sometimes required to avoid non monotonically increasing
|
||||
timestamps when copying video streams with variable frame rate.
|
||||
|
||||
@item 0
|
||||
Use the decoder timebase.
|
||||
|
||||
The time base is copied to the output encoder from the corresponding input
|
||||
decoder.
|
||||
|
||||
@item -1
|
||||
Try to make the choice automatically, in order to generate a sane output.
|
||||
@end table
|
||||
|
||||
Default value is -1.
|
||||
|
||||
@item -copytb
|
||||
Copy input stream time base from input to output when stream copying.
|
||||
@item -shortest
|
||||
Finish encoding when the shortest input stream ends.
|
||||
@item -dts_delta_threshold
|
||||
@@ -781,10 +996,10 @@ Set bitstream filters for matching streams. @var{bistream_filters} is
|
||||
a comma-separated list of bitstream filters. Use the @code{-bsfs} option
|
||||
to get the list of bitstream filters.
|
||||
@example
|
||||
ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
|
||||
ffmpeg -i h264.mp4 -c:v copy -vbsf h264_mp4toannexb -an out.h264
|
||||
@end example
|
||||
@example
|
||||
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
|
||||
ffmpeg -i file.mov -an -vn -sbsf mov2textsub -c:s copy -f rawvideo sub.txt
|
||||
@end example
|
||||
|
||||
@item -tag[:@var{stream_specifier}] @var{codec_tag} (@emph{per-stream})
|
||||
@@ -796,44 +1011,6 @@ Specify Timecode for writing. @var{SEP} is ':' for non drop timecode and ';'
|
||||
@example
|
||||
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
|
||||
@end example
|
||||
|
||||
@item -filter_complex @var{filtergraph} (@emph{global})
|
||||
Define a complex filter graph, i.e. one with arbitrary number of inputs and/or
|
||||
outputs. For simple graphs -- those with one input and one output of the same
|
||||
type -- see the @option{-filter} options. @var{filtergraph} is a description of
|
||||
the filter graph, as described in @ref{Filtergraph syntax}.
|
||||
|
||||
Input link labels must refer to input streams using the
|
||||
@code{[file_index:stream_specifier]} syntax (i.e. the same as @option{-map}
|
||||
uses). If @var{stream_specifier} matches multiple streams, the first one will be
|
||||
used. An unlabeled input will be connected to the first unused input stream of
|
||||
the matching type.
|
||||
|
||||
Output link labels are referred to with @option{-map}. Unlabeled outputs are
|
||||
added to the first output file.
|
||||
|
||||
For example, to overlay an image over video
|
||||
@example
|
||||
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
|
||||
'[out]' out.mkv
|
||||
@end example
|
||||
Here @code{[0:v]} refers to the first video stream in the first input file,
|
||||
which is linked to the first (main) input of the overlay filter. Similarly the
|
||||
first video stream in the second input is linked to the second (overlay) input
|
||||
of overlay.
|
||||
|
||||
Assuming there is only one video stream in each input file, we can omit input
|
||||
labels, so the above is equivalent to
|
||||
@example
|
||||
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
|
||||
'[out]' out.mkv
|
||||
@end example
|
||||
|
||||
Furthermore we can omit the output label and the single output from the filter
|
||||
graph will be added to the output file automatically, so we can simply write
|
||||
@example
|
||||
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@section Preset files
|
||||
@@ -895,7 +1072,7 @@ frame rate or decrease the frame size.
|
||||
@item
|
||||
If your computer is not fast enough, you can speed up the
|
||||
compression at the expense of the compression ratio. You can use
|
||||
'-me zero' to speed up motion estimation, and '-g 0' to disable
|
||||
'-me zero' to speed up motion estimation, and '-intra' to disable
|
||||
motion estimation completely (you have only I-frames, which means it
|
||||
is about as good as JPEG compression).
|
||||
|
||||
@@ -1084,18 +1261,6 @@ composed of three digits padded with zeroes to express the sequence
|
||||
number. It is the same syntax supported by the C printf function, but
|
||||
only formats accepting a normal integer are suitable.
|
||||
|
||||
When importing an image sequence, -i also supports expanding shell-like
|
||||
wildcard patterns (globbing) internally. To lower the chance of interfering
|
||||
with your actual file names and the shell's glob expansion, you are required
|
||||
to activate glob meta characters by prefixing them with a single @code{%}
|
||||
character, like in @code{foo-%*.jpeg}, @code{foo-%?%?%?.jpeg} or
|
||||
@code{foo-00%[234%]%*.jpeg}.
|
||||
If your filename actually contains a character sequence of a @code{%} character
|
||||
followed by a glob character, you must double the @code{%} character to escape
|
||||
it. Imagine your files begin with @code{%?-foo-}, then you could use a glob
|
||||
pattern like @code{%%?-foo-%*.jpeg}. For input patterns that could be both a
|
||||
printf or a glob pattern, ffmpeg will assume it is a glob pattern.
|
||||
|
||||
@item
|
||||
You can put many streams of the same type in the output:
|
||||
|
||||
@@ -1106,23 +1271,9 @@ ffmpeg -i test1.avi -i test2.avi -map 0.3 -map 0.2 -map 0.1 -map 0.0 -c copy tes
|
||||
The resulting output file @file{test12.avi} will contain first four streams from
|
||||
the input file in reverse order.
|
||||
|
||||
@item
|
||||
To force CBR video output:
|
||||
@example
|
||||
ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
|
||||
@end example
|
||||
|
||||
@item
|
||||
The four options lmin, lmax, mblmin and mblmax use 'lambda' units,
|
||||
but you may use the QP2LAMBDA constant to easily convert from 'q' units:
|
||||
@example
|
||||
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
@c man end EXAMPLES
|
||||
|
||||
@include syntax.texi
|
||||
@include eval.texi
|
||||
@include decoders.texi
|
||||
@include encoders.texi
|
||||
|
@@ -29,7 +29,7 @@
|
||||
\ / :
|
||||
+======\======================/======+ ^ :
|
||||
------> 0 | : source_index : st-:--- | : :
|
||||
OutputFile output_files[] / +------------------------------------+ : :
|
||||
OuputFile output_files[] / +------------------------------------+ : :
|
||||
/ 1 | : : : | : :
|
||||
^ +------+------------+-----+ / +------------------------------------+ : :
|
||||
: | : ost_index -:-----:------/ 2 | : : : | : :
|
||||
|
@@ -178,7 +178,6 @@ Seek to percentage in file corresponding to fraction of width.
|
||||
|
||||
@c man end
|
||||
|
||||
@include syntax.texi
|
||||
@include eval.texi
|
||||
@include decoders.texi
|
||||
@include demuxers.texi
|
||||
|
@@ -106,11 +106,6 @@ stream.
|
||||
All the container format information is printed within a section with
|
||||
name "FORMAT".
|
||||
|
||||
@item -show_format_entry @var{name}
|
||||
Like @option{-show_format}, but only prints the specified entry of the
|
||||
container format information, rather than all. This option may be given more
|
||||
than once, then all specified entries will be shown.
|
||||
|
||||
@item -show_packets
|
||||
Show information about each packet contained in the input multimedia
|
||||
stream.
|
||||
@@ -132,14 +127,6 @@ multimedia stream.
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "STREAM".
|
||||
|
||||
@item -count_frames
|
||||
Count the number of frames per stream and report it in the
|
||||
corresponding stream section.
|
||||
|
||||
@item -count_packets
|
||||
Count the number of packets per stream and report it in the
|
||||
corresponding stream section.
|
||||
|
||||
@item -show_private_data, -private
|
||||
Show private data, that is data depending on the format of the
|
||||
particular shown element.
|
||||
@@ -195,22 +182,6 @@ keyN=valN
|
||||
Metadata tags are printed as a line in the corresponding FORMAT or
|
||||
STREAM section, and are prefixed by the string "TAG:".
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
A description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item nokey, nk
|
||||
If set to 1 specify not to print the key of each field. Default value
|
||||
is 0.
|
||||
|
||||
@item noprint_wrappers, nw
|
||||
If set to 1 specify not to print the section header and footer.
|
||||
Default value is 0.
|
||||
@end table
|
||||
|
||||
@section compact
|
||||
Compact format.
|
||||
|
||||
@@ -294,10 +265,6 @@ XML based format.
|
||||
The XML output is described in the XML schema description file
|
||||
@file{ffprobe.xsd} installed in the FFmpeg datadir.
|
||||
|
||||
An updated version of the schema can be retrieved at the url
|
||||
@url{http://www.ffmpeg.org/schema/ffprobe.xsd}, which redirects to the
|
||||
latest schema committed into the FFmpeg development source code tree.
|
||||
|
||||
Note that the output issued will be compliant to the
|
||||
@file{ffprobe.xsd} schema only when no special global output options
|
||||
(@option{unit}, @option{prefix}, @option{byte_binary_prefix},
|
||||
@@ -324,31 +291,26 @@ This option automatically sets @option{fully_qualified} to 1.
|
||||
|
||||
For more information about the XML format, see
|
||||
@url{http://www.w3.org/XML/}.
|
||||
@c man end WRITERS
|
||||
|
||||
@chapter Timecode
|
||||
@c man begin TIMECODE
|
||||
|
||||
@command{ffprobe} supports Timecode extraction:
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
MPEG1/2 timecode is extracted from the GOP, and is available in the video
|
||||
@item MPEG1/2 timecode is extracted from the GOP, and is available in the video
|
||||
stream details (@option{-show_streams}, see @var{timecode}).
|
||||
|
||||
@item
|
||||
MOV timecode is extracted from tmcd track, so is available in the tmcd
|
||||
@item MOV timecode is extracted from tmcd track, so is available in the tmcd
|
||||
stream metadata (@option{-show_streams}, see @var{TAG:timecode}).
|
||||
|
||||
@item
|
||||
DV and GXF timecodes are available in format metadata
|
||||
@item DV and GXF timecodes are available in format metadata
|
||||
(@option{-show_format}, see @var{TAG:timecode}).
|
||||
|
||||
@end itemize
|
||||
@c man end TIMECODE
|
||||
|
||||
@include syntax.texi
|
||||
@c man end WRITERS
|
||||
|
||||
@include decoders.texi
|
||||
@include demuxers.texi
|
||||
@include protocols.texi
|
||||
|
@@ -110,10 +110,7 @@
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="formatType">
|
||||
|
@@ -373,3 +373,5 @@ ACL allow 192.168.0.0 192.168.255.255
|
||||
<Redirect index.html>
|
||||
URL http://www.ffmpeg.org/
|
||||
</Redirect>
|
||||
|
||||
|
||||
|
@@ -1,116 +0,0 @@
|
||||
Filter design
|
||||
=============
|
||||
|
||||
This document explains guidelines that should be observed (or ignored with
|
||||
good reason) when writing filters for libavfilter.
|
||||
|
||||
In this document, the word “frame” indicates either a video frame or a group
|
||||
of audio samples, as stored in an AVFilterBuffer structure.
|
||||
|
||||
|
||||
Format negotiation
|
||||
==================
|
||||
|
||||
The query_formats method should set, for each input and each output links,
|
||||
the list supported formats.
|
||||
|
||||
For video links, that means pixel format. For audio links, that means
|
||||
channel layout, and sample format (the sample packing is implied by the
|
||||
sample format).
|
||||
|
||||
The lists are not just lists, they are references to shared objects. When
|
||||
the negotiation mechanism computes the intersection of the formats
|
||||
supported at each ends of a link, all references to both lists are
|
||||
replaced with a reference to the intersection. And when a single format is
|
||||
eventually chosen for a link amongst the remaining list, again, all
|
||||
references to the list are updated.
|
||||
|
||||
That means that if a filter requires that its input and output have the
|
||||
same format amongst a supported list, all it have to do is use a reference
|
||||
to the same list of formats.
|
||||
|
||||
|
||||
Buffer references ownership and permissions
|
||||
===========================================
|
||||
|
||||
TODO
|
||||
|
||||
|
||||
Frame scheduling
|
||||
================
|
||||
|
||||
The purpose of these rules is to ensure that frames flow in the filter
|
||||
graph without getting stuck and accumulating somewhere.
|
||||
|
||||
Simple filters that output one frame for each input frame should not have
|
||||
to worry about it.
|
||||
|
||||
start_frame / filter_samples
|
||||
----------------------------
|
||||
|
||||
These methods are called when a frame is pushed to the filter's input.
|
||||
They can be called at any time except in a reentrant way.
|
||||
|
||||
If the input frame is enough to produce output, then the filter should
|
||||
push the output frames on the output link immediately.
|
||||
|
||||
As an exception to the previous rule, if the input frame is enough to
|
||||
produce several output frames, then the filter needs output only at
|
||||
least one per link. The additional frames can be left buffered in the
|
||||
filter; these buffered frames must be flushed immediately if a new input
|
||||
produces new output.
|
||||
|
||||
(Example: framerate-doubling filter: start_frame must (1) flush the
|
||||
second copy of the previous frame, if it is still there, (2) push the
|
||||
first copy of the incoming frame, (3) keep the second copy for later.)
|
||||
|
||||
If the input frame is not enough to produce output, the filter must not
|
||||
call request_frame to get more. It must just process the frame or queue
|
||||
it. The task of requesting more frames is left to the filter's
|
||||
request_frame method or the application.
|
||||
|
||||
If a filter has several inputs, the filter must be ready for frames
|
||||
arriving randomly on any input. Therefore, any filter with several input
|
||||
will most likely require some kind of queuing mechanism. It is perfectly
|
||||
acceptable to have a limited queue and to drop frames when the inputs
|
||||
are too unbalanced.
|
||||
|
||||
request_frame
|
||||
-------------
|
||||
|
||||
This method is called when a frame is wanted on an output.
|
||||
|
||||
For an input, it should directly call start_frame or filter_samples on
|
||||
the corresponding output.
|
||||
|
||||
For a filter, if there are queued frames already ready, one of these
|
||||
frames should be pushed. If not, the filter should request a frame on
|
||||
one of its input, repeatedly until at least one frame has been pushed.
|
||||
|
||||
Return values:
|
||||
if request_frame could produce a frame, it should return 0;
|
||||
if it could not for temporary reasons, it should return AVERROR(EAGAIN);
|
||||
if it could not because there are no more frames, it should return
|
||||
AVERROR_EOF.
|
||||
|
||||
The typical implementation of request_frame for a filter with several
|
||||
inputs will look like that:
|
||||
|
||||
if (frames_queued) {
|
||||
push_one_frame();
|
||||
return 0;
|
||||
}
|
||||
while (!frame_pushed) {
|
||||
input = input_where_a_frame_is_most_needed();
|
||||
ret = avfilter_request_frame(input);
|
||||
if (ret == AVERROR_EOF) {
|
||||
process_eof_on_input();
|
||||
} else if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
Note that, except for filters that can have queued frames, request_frame
|
||||
does not push frames: it requests them to its input, and as a reaction,
|
||||
the start_frame / filter_samples method will be called and do the work.
|
775
doc/filters.texi
775
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@@ -13,8 +13,7 @@
|
||||
|
||||
FFmpeg can be hooked up with a number of external libraries to add support
|
||||
for more formats. None of them are used by default, their use has to be
|
||||
explicitly requested by passing the appropriate flags to
|
||||
@command{./configure}.
|
||||
explicitly requested by passing the appropriate flags to @file{./configure}.
|
||||
|
||||
@section OpenJPEG
|
||||
|
||||
@@ -144,8 +143,6 @@ library:
|
||||
@tab Multimedia format used by Delphine Software games.
|
||||
@item CD+G @tab @tab X
|
||||
@tab Video format used by CD+G karaoke disks
|
||||
@item Commodore CDXL @tab @tab X
|
||||
@tab Amiga CD video format
|
||||
@item Core Audio Format @tab X @tab X
|
||||
@tab Apple Core Audio Format
|
||||
@item CRC testing format @tab X @tab
|
||||
@@ -211,7 +208,6 @@ library:
|
||||
@item MAXIS XA @tab @tab X
|
||||
@tab Used in Sim City 3000; file extension .xa.
|
||||
@item MD Studio @tab @tab X
|
||||
@item Metal Gear Solid: The Twin Snakes @tab @tab X
|
||||
@item Mobotix .mxg @tab @tab X
|
||||
@item Monkey's Audio @tab @tab X
|
||||
@item Motion Pixels MVI @tab @tab X
|
||||
@@ -353,7 +349,6 @@ library:
|
||||
@item eXtended BINary text (XBIN) @tab @tab X
|
||||
@item YUV4MPEG pipe @tab X @tab X
|
||||
@item Psygnosis YOP @tab @tab X
|
||||
@item ZeroCodec Lossless Video @tab @tab X
|
||||
@end multitable
|
||||
|
||||
@code{X} means that encoding (resp. decoding) is supported.
|
||||
@@ -373,8 +368,6 @@ following image formats are supported:
|
||||
@tab Microsoft BMP image
|
||||
@item DPX @tab X @tab X
|
||||
@tab Digital Picture Exchange
|
||||
@item EXR @tab @tab X
|
||||
@tab OpenEXR
|
||||
@item JPEG @tab X @tab X
|
||||
@tab Progressive JPEG is not supported.
|
||||
@item JPEG 2000 @tab X @tab X
|
||||
@@ -400,14 +393,12 @@ following image formats are supported:
|
||||
@tab V.Flash PTX format
|
||||
@item SGI @tab X @tab X
|
||||
@tab SGI RGB image format
|
||||
@item Sun Rasterfile @tab X @tab X
|
||||
@item Sun Rasterfile @tab @tab X
|
||||
@tab Sun RAS image format
|
||||
@item TIFF @tab X @tab X
|
||||
@tab YUV, JPEG and some extension is not supported yet.
|
||||
@item Truevision Targa @tab X @tab X
|
||||
@tab Targa (.TGA) image format
|
||||
@item XBM @tab X @tab X
|
||||
@tab X BitMap image format
|
||||
@item XWD @tab X @tab X
|
||||
@tab X Window Dump image format
|
||||
@end multitable
|
||||
@@ -453,8 +444,6 @@ following image formats are supported:
|
||||
@tab fourcc: AVrp
|
||||
@item AVS (Audio Video Standard) video @tab @tab X
|
||||
@tab Video encoding used by the Creature Shock game.
|
||||
@item AYUV @tab X @tab X
|
||||
@tab Microsoft uncompressed packed 4:4:4:4
|
||||
@item Beam Software VB @tab @tab X
|
||||
@item Bethesda VID video @tab @tab X
|
||||
@tab Used in some games from Bethesda Softworks.
|
||||
@@ -469,8 +458,6 @@ following image formats are supported:
|
||||
@tab fourcc: CSCD
|
||||
@item CD+G @tab @tab X
|
||||
@tab Video codec for CD+G karaoke disks
|
||||
@item CDXL @tab @tab X
|
||||
@tab Amiga CD video codec
|
||||
@item Chinese AVS video @tab E @tab X
|
||||
@tab AVS1-P2, JiZhun profile, encoding through external library libxavs
|
||||
@item Delphine Software International CIN video @tab @tab X
|
||||
@@ -483,7 +470,7 @@ following image formats are supported:
|
||||
@item DFA @tab @tab X
|
||||
@tab Codec used in Chronomaster game.
|
||||
@item Dirac @tab E @tab X
|
||||
@tab supported through external library libschroedinger
|
||||
@tab supported through external libdirac/libschroedinger libraries
|
||||
@item Deluxe Paint Animation @tab @tab X
|
||||
@item DNxHD @tab X @tab X
|
||||
@tab aka SMPTE VC3
|
||||
@@ -504,13 +491,12 @@ following image formats are supported:
|
||||
@item Escape 124 @tab @tab X
|
||||
@item Escape 130 @tab @tab X
|
||||
@item FFmpeg video codec #1 @tab X @tab X
|
||||
@tab lossless codec (fourcc: FFV1)
|
||||
@tab experimental lossless codec (fourcc: FFV1)
|
||||
@item Flash Screen Video v1 @tab X @tab X
|
||||
@tab fourcc: FSV1
|
||||
@item Flash Screen Video v2 @tab X @tab X
|
||||
@item Flash Video (FLV) @tab X @tab X
|
||||
@tab Sorenson H.263 used in Flash
|
||||
@item Forward Uncompressed @tab @tab X
|
||||
@item Fraps @tab @tab X
|
||||
@item H.261 @tab X @tab X
|
||||
@item H.263 / H.263-1996 @tab X @tab X
|
||||
@@ -626,7 +612,6 @@ following image formats are supported:
|
||||
@item Ut Video @tab @tab X
|
||||
@item v210 QuickTime uncompressed 4:2:2 10-bit @tab X @tab X
|
||||
@item v308 QuickTime uncompressed 4:4:4 @tab X @tab X
|
||||
@item v408 QuickTime uncompressed 4:4:4:4 @tab X @tab X
|
||||
@item v410 QuickTime uncompressed 4:4:4 10-bit @tab X @tab X
|
||||
@item VBLE Lossless Codec @tab @tab X
|
||||
@item VMware Screen Codec / VMware Video @tab @tab X
|
||||
@@ -796,7 +781,6 @@ following image formats are supported:
|
||||
@tab Real 28800 bit/s codec
|
||||
@item RealAudio 3.0 (dnet) @tab IX @tab X
|
||||
@tab Real low bitrate AC-3 codec
|
||||
@item RealAudio Lossless @tab @tab X
|
||||
@item RealAudio SIPR / ACELP.NET @tab @tab X
|
||||
@item Shorten @tab @tab X
|
||||
@item Sierra VMD audio @tab @tab X
|
||||
@@ -819,7 +803,6 @@ following image formats are supported:
|
||||
@item Westwood Audio (SND1) @tab @tab X
|
||||
@item Windows Media Audio 1 @tab X @tab X
|
||||
@item Windows Media Audio 2 @tab X @tab X
|
||||
@item Windows Media Audio Lossless @tab @tab X
|
||||
@item Windows Media Audio Pro @tab @tab X
|
||||
@item Windows Media Audio Voice @tab @tab X
|
||||
@end multitable
|
||||
@@ -838,8 +821,7 @@ performance on systems without hardware floating point support).
|
||||
@item SSA/ASS @tab X @tab X @tab X @tab X
|
||||
@item DVB @tab X @tab X @tab X @tab X
|
||||
@item DVD @tab X @tab X @tab X @tab X
|
||||
@item JACOsub @tab X @tab X @tab @tab X
|
||||
@item MicroDVD @tab X @tab X @tab @tab X
|
||||
@item MicroDVD @tab X @tab X @tab @tab
|
||||
@item PGS @tab @tab @tab @tab X
|
||||
@item SubRip (SRT) @tab X @tab X @tab X @tab X
|
||||
@item XSUB @tab @tab @tab X @tab X
|
||||
@@ -872,12 +854,11 @@ performance on systems without hardware floating point support).
|
||||
@item ALSA @tab X @tab X
|
||||
@item BKTR @tab X @tab
|
||||
@item DV1394 @tab X @tab
|
||||
@item Linux framebuffer @tab X @tab
|
||||
@item JACK @tab X @tab
|
||||
@item LIBCDIO @tab X
|
||||
@item LIBDC1394 @tab X @tab
|
||||
@item OSS @tab X @tab X
|
||||
@item Pulseaudio @tab X @tab
|
||||
@item Video4Linux @tab X @tab
|
||||
@item Video4Linux2 @tab X @tab
|
||||
@item VfW capture @tab X @tab
|
||||
@item X11 grabbing @tab X @tab
|
||||
@@ -893,7 +874,7 @@ performance on systems without hardware floating point support).
|
||||
@item GXF @tab X @tab X
|
||||
@item MOV @tab X @tab
|
||||
@item MPEG1/2 @tab X @tab X
|
||||
@item MXF @tab X @tab X
|
||||
@item MXF @tab @tab X
|
||||
@end multitable
|
||||
|
||||
@bye
|
||||
|
@@ -65,14 +65,6 @@ git clone git@@source.ffmpeg.org:ffmpeg <target>
|
||||
This will put the FFmpeg sources into the directory @var{<target>} and let
|
||||
you push back your changes to the remote repository.
|
||||
|
||||
Make sure that you do not have Windows line endings in your checkouts,
|
||||
otherwise you may experience spurious compilation failures. One way to
|
||||
achieve this is to run
|
||||
|
||||
@example
|
||||
git config --global core.autocrlf false
|
||||
@end example
|
||||
|
||||
|
||||
@section Updating the source tree to the latest revision
|
||||
|
||||
|
@@ -59,7 +59,7 @@ BSD video input device.
|
||||
|
||||
Windows DirectShow input device.
|
||||
|
||||
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
|
||||
DirectShow support is enabled when FFmpeg is built with mingw-w64.
|
||||
Currently only audio and video devices are supported.
|
||||
|
||||
Multiple devices may be opened as separate inputs, but they may also be
|
||||
@@ -504,9 +504,9 @@ command:
|
||||
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
|
||||
@end example
|
||||
|
||||
@section video4linux2
|
||||
@section video4linux and video4linux2
|
||||
|
||||
Video4Linux2 input video device.
|
||||
Video4Linux and Video4Linux2 input video devices.
|
||||
|
||||
The name of the device to grab is a file device node, usually Linux
|
||||
systems tend to automatically create such nodes when the device
|
||||
@@ -514,28 +514,36 @@ systems tend to automatically create such nodes when the device
|
||||
kind @file{/dev/video@var{N}}, where @var{N} is a number associated to
|
||||
the device.
|
||||
|
||||
Video4Linux2 devices usually support a limited set of
|
||||
Video4Linux and Video4Linux2 devices only support a limited set of
|
||||
@var{width}x@var{height} sizes and framerates. You can check which are
|
||||
supported using @command{-list_formats all} for Video4Linux2 devices.
|
||||
supported for example with the command @command{dov4l} for Video4Linux
|
||||
devices and using @command{-list_formats all} for Video4Linux2 devices.
|
||||
|
||||
Some usage examples of the video4linux2 devices with ffmpeg and ffplay:
|
||||
If the size for the device is set to 0x0, the input device will
|
||||
try to auto-detect the size to use.
|
||||
Only for the video4linux2 device, if the frame rate is set to 0/0 the
|
||||
input device will use the frame rate value already set in the driver.
|
||||
|
||||
The time base for the timestamps is 1 microsecond. Depending on the kernel
|
||||
version and configuration, the timestamps may be derived from the real time
|
||||
clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
|
||||
boot time, unaffected by NTP or manual changes to the clock). The
|
||||
@option{-timestamps abs} or @option{-ts abs} option can be used to force
|
||||
conversion into the real time clock.
|
||||
Video4Linux support is deprecated since Linux 2.6.30, and will be
|
||||
dropped in later versions.
|
||||
|
||||
Note that if FFmpeg is build with v4l-utils support ("--enable-libv4l2"
|
||||
option), it will always be used.
|
||||
@example
|
||||
# Grab and show the input of a video4linux2 device.
|
||||
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
|
||||
|
||||
# Grab and record the input of a video4linux2 device, leave the
|
||||
framerate and size as previously set.
|
||||
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
|
||||
Follow some usage examples of the video4linux devices with the ff*
|
||||
tools.
|
||||
@example
|
||||
# Grab and show the input of a video4linux device, frame rate is set
|
||||
# to the default of 25/1.
|
||||
ffplay -s 320x240 -f video4linux /dev/video0
|
||||
|
||||
# Grab and show the input of a video4linux2 device, auto-adjust size.
|
||||
ffplay -f video4linux2 /dev/video0
|
||||
|
||||
# Grab and record the input of a video4linux2 device, auto-adjust size,
|
||||
# frame rate value defaults to 0/0 so it is read from the video4linux2
|
||||
# driver.
|
||||
ffmpeg -f video4linux2 -i /dev/video0 out.mpeg
|
||||
@end example
|
||||
|
||||
"v4l" and "v4l2" can be used as aliases for the respective "video4linux" and
|
||||
|
@@ -24,7 +24,7 @@ a mail for every change to every issue.
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://trac.ffmpeg.org
|
||||
http(s)://ffmpeg.org/trac/ffmpeg
|
||||
|
||||
Type:
|
||||
-----
|
||||
|
206
doc/muxers.texi
206
doc/muxers.texi
@@ -18,23 +18,6 @@ enabled muxers.
|
||||
|
||||
A description of some of the currently available muxers follows.
|
||||
|
||||
@anchor{aiff}
|
||||
@section aiff
|
||||
|
||||
Audio Interchange File Format muxer.
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item write_id3v2
|
||||
Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
|
||||
|
||||
@item id3v2_version
|
||||
Select ID3v2 version to write. Currently only version 3 and 4 (aka.
|
||||
ID3v2.3 and ID3v2.4) are supported. The default is version 4.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{crc}
|
||||
@section crc
|
||||
|
||||
@@ -73,37 +56,31 @@ See also the @ref{framecrc} muxer.
|
||||
@anchor{framecrc}
|
||||
@section framecrc
|
||||
|
||||
Per-packet CRC (Cyclic Redundancy Check) testing format.
|
||||
Per-frame CRC (Cyclic Redundancy Check) testing format.
|
||||
|
||||
This muxer computes and prints the Adler-32 CRC for each audio
|
||||
and video packet. By default audio frames are converted to signed
|
||||
This muxer computes and prints the Adler-32 CRC for each decoded audio
|
||||
and video frame. By default audio frames are converted to signed
|
||||
16-bit raw audio and video frames to raw video before computing the
|
||||
CRC.
|
||||
|
||||
The output of the muxer consists of a line for each audio and video
|
||||
packet of the form:
|
||||
@example
|
||||
@var{stream_index}, @var{packet_dts}, @var{packet_pts}, @var{packet_duration}, @var{packet_size}, 0x@var{CRC}
|
||||
@end example
|
||||
frame of the form: @var{stream_index}, @var{frame_dts},
|
||||
@var{frame_size}, 0x@var{CRC}, where @var{CRC} is a hexadecimal
|
||||
number 0-padded to 8 digits containing the CRC of the decoded frame.
|
||||
|
||||
@var{CRC} is a hexadecimal number 0-padded to 8 digits containing the
|
||||
CRC of the packet.
|
||||
|
||||
For example to compute the CRC of the audio and video frames in
|
||||
@file{INPUT}, converted to raw audio and video packets, and store it
|
||||
in the file @file{out.crc}:
|
||||
For example to compute the CRC of each decoded frame in the input, and
|
||||
store it in the file @file{out.crc}:
|
||||
@example
|
||||
ffmpeg -i INPUT -f framecrc out.crc
|
||||
@end example
|
||||
|
||||
To print the information to stdout, use the command:
|
||||
You can print the CRC of each decoded frame to stdout with the command:
|
||||
@example
|
||||
ffmpeg -i INPUT -f framecrc -
|
||||
@end example
|
||||
|
||||
With @command{ffmpeg}, you can select the output format to which the
|
||||
audio and video frames are encoded before computing the CRC for each
|
||||
packet by specifying the audio and video codec. For example, to
|
||||
You can select the output format of each frame with @command{ffmpeg} by
|
||||
specifying the audio and video codec and format. For example, to
|
||||
compute the CRC of each decoded input audio frame converted to PCM
|
||||
unsigned 8-bit and of each decoded input video frame converted to
|
||||
MPEG-2 video, use the command:
|
||||
@@ -113,39 +90,6 @@ ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
|
||||
|
||||
See also the @ref{crc} muxer.
|
||||
|
||||
@anchor{framemd5}
|
||||
@section framemd5
|
||||
|
||||
Per-packet MD5 testing format.
|
||||
|
||||
This muxer computes and prints the MD5 hash for each audio
|
||||
and video packet. By default audio frames are converted to signed
|
||||
16-bit raw audio and video frames to raw video before computing the
|
||||
hash.
|
||||
|
||||
The output of the muxer consists of a line for each audio and video
|
||||
packet of the form:
|
||||
@example
|
||||
@var{stream_index}, @var{packet_dts}, @var{packet_pts}, @var{packet_duration}, @var{packet_size}, @var{MD5}
|
||||
@end example
|
||||
|
||||
@var{MD5} is a hexadecimal number representing the computed MD5 hash
|
||||
for the packet.
|
||||
|
||||
For example to compute the MD5 of the audio and video frames in
|
||||
@file{INPUT}, converted to raw audio and video packets, and store it
|
||||
in the file @file{out.md5}:
|
||||
@example
|
||||
ffmpeg -i INPUT -f framemd5 out.md5
|
||||
@end example
|
||||
|
||||
To print the information to stdout, use the command:
|
||||
@example
|
||||
ffmpeg -i INPUT -f framemd5 -
|
||||
@end example
|
||||
|
||||
See also the @ref{md5} muxer.
|
||||
|
||||
@anchor{image2}
|
||||
@section image2
|
||||
|
||||
@@ -204,104 +148,18 @@ each of the YUV420P components. To read or write this image file format,
|
||||
specify the name of the '.Y' file. The muxer will automatically open the
|
||||
'.U' and '.V' files as required.
|
||||
|
||||
@anchor{md5}
|
||||
@section md5
|
||||
@section mov
|
||||
|
||||
MD5 testing format.
|
||||
MOV / MP4 muxer
|
||||
|
||||
This muxer computes and prints the MD5 hash of all the input audio
|
||||
and video frames. By default audio frames are converted to signed
|
||||
16-bit raw audio and video frames to raw video before computing the
|
||||
hash.
|
||||
|
||||
The output of the muxer consists of a single line of the form:
|
||||
MD5=@var{MD5}, where @var{MD5} is a hexadecimal number representing
|
||||
the computed MD5 hash.
|
||||
|
||||
For example to compute the MD5 hash of the input converted to raw
|
||||
audio and video, and store it in the file @file{out.md5}:
|
||||
@example
|
||||
ffmpeg -i INPUT -f md5 out.md5
|
||||
@end example
|
||||
|
||||
You can print the MD5 to stdout with the command:
|
||||
@example
|
||||
ffmpeg -i INPUT -f md5 -
|
||||
@end example
|
||||
|
||||
See also the @ref{framemd5} muxer.
|
||||
|
||||
@section MOV/MP4/ISMV
|
||||
|
||||
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
|
||||
file has all the metadata about all packets stored in one location
|
||||
(written at the end of the file, it can be moved to the start for
|
||||
better playback using the @command{qt-faststart} tool). A fragmented
|
||||
file consists of a number of fragments, where packets and metadata
|
||||
about these packets are stored together. Writing a fragmented
|
||||
file has the advantage that the file is decodable even if the
|
||||
writing is interrupted (while a normal MOV/MP4 is undecodable if
|
||||
it is not properly finished), and it requires less memory when writing
|
||||
very long files (since writing normal MOV/MP4 files stores info about
|
||||
every single packet in memory until the file is closed). The downside
|
||||
is that it is less compatible with other applications.
|
||||
|
||||
Fragmentation is enabled by setting one of the AVOptions that define
|
||||
how to cut the file into fragments:
|
||||
The muxer options are:
|
||||
|
||||
@table @option
|
||||
@item -moov_size @var{bytes}
|
||||
Reserves space for the moov atom at the beginning of the file instead of placing the
|
||||
moov atom at the end. If the space reserved is insufficient, muxing will fail.
|
||||
@item -movflags frag_keyframe
|
||||
Start a new fragment at each video keyframe.
|
||||
@item -frag_duration @var{duration}
|
||||
Create fragments that are @var{duration} microseconds long.
|
||||
@item -frag_size @var{size}
|
||||
Create fragments that contain up to @var{size} bytes of payload data.
|
||||
@item -movflags frag_custom
|
||||
Allow the caller to manually choose when to cut fragments, by
|
||||
calling @code{av_write_frame(ctx, NULL)} to write a fragment with
|
||||
the packets written so far. (This is only useful with other
|
||||
applications integrating libavformat, not from @command{ffmpeg}.)
|
||||
@item -min_frag_duration @var{duration}
|
||||
Don't create fragments that are shorter than @var{duration} microseconds long.
|
||||
@end table
|
||||
|
||||
If more than one condition is specified, fragments are cut when
|
||||
one of the specified conditions is fulfilled. The exception to this is
|
||||
@code{-min_frag_duration}, which has to be fulfilled for any of the other
|
||||
conditions to apply.
|
||||
|
||||
Additionally, the way the output file is written can be adjusted
|
||||
through a few other options:
|
||||
|
||||
@table @option
|
||||
@item -movflags empty_moov
|
||||
Write an initial moov atom directly at the start of the file, without
|
||||
describing any samples in it. Generally, an mdat/moov pair is written
|
||||
at the start of the file, as a normal MOV/MP4 file, containing only
|
||||
a short portion of the file. With this option set, there is no initial
|
||||
mdat atom, and the moov atom only describes the tracks but has
|
||||
a zero duration.
|
||||
|
||||
Files written with this option set do not work in QuickTime.
|
||||
This option is implicitly set when writing ismv (Smooth Streaming) files.
|
||||
@item -movflags separate_moof
|
||||
Write a separate moof (movie fragment) atom for each track. Normally,
|
||||
packets for all tracks are written in a moof atom (which is slightly
|
||||
more efficient), but with this option set, the muxer writes one moof/mdat
|
||||
pair for each track, making it easier to separate tracks.
|
||||
|
||||
This option is implicitly set when writing ismv (Smooth Streaming) files.
|
||||
@end table
|
||||
|
||||
Smooth Streaming content can be pushed in real time to a publishing
|
||||
point on IIS with this muxer. Example:
|
||||
@example
|
||||
ffmpeg -re @var{<normal input/transcoding options>} -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
|
||||
@end example
|
||||
|
||||
@section mpegts
|
||||
|
||||
MPEG transport stream muxer.
|
||||
@@ -452,47 +310,11 @@ Set segment duration to @var{t} seconds.
|
||||
Generate also a listfile named @var{name}.
|
||||
@item segment_list_size @var{size}
|
||||
Overwrite the listfile once it reaches @var{size} entries.
|
||||
@item segment_wrap @var{limit}
|
||||
Wrap around segment index once it reaches @var{limit}.
|
||||
@end table
|
||||
|
||||
@example
|
||||
ffmpeg -i in.mkv -c copy -map 0 -f segment -list out.list out%03d.nut
|
||||
@end example
|
||||
|
||||
@section mp3
|
||||
|
||||
The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and
|
||||
optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the
|
||||
@code{id3v2_version} option controls which one is used. The legacy ID3v1 tag is
|
||||
not written by default, but may be enabled with the @code{write_id3v1} option.
|
||||
|
||||
For seekable output the muxer also writes a Xing frame at the beginning, which
|
||||
contains the number of frames in the file. It is useful for computing duration
|
||||
of VBR files.
|
||||
|
||||
The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures
|
||||
are supplied to the muxer in form of a video stream with a single packet. There
|
||||
can be any number of those streams, each will correspond to a single APIC frame.
|
||||
The stream metadata tags @var{title} and @var{comment} map to APIC
|
||||
@var{description} and @var{picture type} respectively. See
|
||||
@url{http://id3.org/id3v2.4.0-frames} for allowed picture types.
|
||||
|
||||
Note that the APIC frames must be written at the beginning, so the muxer will
|
||||
buffer the audio frames until it gets all the pictures. It is therefore advised
|
||||
to provide the pictures as soon as possible to avoid excessive buffering.
|
||||
|
||||
Examples:
|
||||
|
||||
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
|
||||
@example
|
||||
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
|
||||
@end example
|
||||
|
||||
Attach a picture to an mp3:
|
||||
@example
|
||||
ffmpeg -i input.mp3 -i cover.png -c copy -metadata:s:v title="Album cover"
|
||||
-metadata:s:v comment="Cover (Front)" out.mp3
|
||||
@end example
|
||||
|
||||
@c man end MUXERS
|
||||
|
@@ -27,11 +27,11 @@ to configure.
|
||||
@section BSD
|
||||
|
||||
BSD make will not build FFmpeg, you need to install and use GNU Make
|
||||
(@command{gmake}).
|
||||
(@file{gmake}).
|
||||
|
||||
@section (Open)Solaris
|
||||
|
||||
GNU Make is required to build FFmpeg, so you have to invoke (@command{gmake}),
|
||||
GNU Make is required to build FFmpeg, so you have to invoke (@file{gmake}),
|
||||
standard Solaris Make will not work. When building with a non-c99 front-end
|
||||
(gcc, generic suncc) add either @code{--extra-libs=/usr/lib/values-xpg6.o}
|
||||
or @code{--extra-libs=/usr/lib/64/values-xpg6.o} to the configure options
|
||||
@@ -89,7 +89,7 @@ section and the FAQ.
|
||||
|
||||
FFmpeg does not build out-of-the-box with the packages the automated MinGW
|
||||
installer provides. It also requires coreutils to be installed and many other
|
||||
packages updated to the latest version. The minimum versions for some packages
|
||||
packages updated to the latest version. The minimum version for some packages
|
||||
are listed below:
|
||||
|
||||
@itemize
|
||||
@@ -109,11 +109,14 @@ Notes:
|
||||
@item Building natively using MSYS can be sped up by disabling implicit rules
|
||||
in the Makefile by calling @code{make -r} instead of plain @code{make}. This
|
||||
speed up is close to non-existent for normal one-off builds and is only
|
||||
noticeable when running make for a second time (for example during
|
||||
noticeable when running make for a second time (for example in
|
||||
@code{make install}).
|
||||
|
||||
@item In order to compile FFplay, you must have the MinGW development library
|
||||
of @uref{http://www.libsdl.org/, SDL} and @code{pkg-config} installed.
|
||||
of @uref{http://www.libsdl.org/, SDL}.
|
||||
Edit the @file{bin/sdl-config} script so that it points to the correct prefix
|
||||
where SDL was installed. Verify that @file{sdl-config} can be launched from
|
||||
the MSYS command line.
|
||||
|
||||
@item By using @code{./configure --enable-shared} when configuring FFmpeg,
|
||||
you can build the FFmpeg libraries (e.g. libavutil, libavcodec,
|
||||
@@ -137,7 +140,7 @@ you might have to modify the procedures slightly.
|
||||
|
||||
@subsection Using static libraries
|
||||
|
||||
Assuming you have just built and installed FFmpeg in @file{/usr/local}:
|
||||
Assuming you have just built and installed FFmpeg in @file{/usr/local}.
|
||||
|
||||
@enumerate
|
||||
|
||||
@@ -272,7 +275,7 @@ To create import libraries that work with the @code{/OPT:REF} option
|
||||
|
||||
@enumerate
|
||||
|
||||
@item Open @emph{Visual Studio 2005 Command Prompt}.
|
||||
@item Open @file{Visual Studio 2005 Command Prompt}.
|
||||
|
||||
Alternatively, in a normal command line prompt, call @file{vcvars32.bat}
|
||||
which sets up the environment variables for the Visual C++ tools
|
||||
@@ -282,14 +285,17 @@ which sets up the environment variables for the Visual C++ tools
|
||||
@item Enter the @file{bin} directory where the created LIB and DLL files
|
||||
are stored.
|
||||
|
||||
@item Generate new import libraries with @command{lib.exe}:
|
||||
@item Generate new import libraries with @file{lib.exe}:
|
||||
|
||||
@example
|
||||
lib /machine:i386 /def:..\lib\foo-version.def /out:foo.lib
|
||||
lib /machine:i386 /def:..\lib\avcodec-53.def /out:avcodec.lib
|
||||
lib /machine:i386 /def:..\lib\avdevice-53.def /out:avdevice.lib
|
||||
lib /machine:i386 /def:..\lib\avfilter-2.def /out:avfilter.lib
|
||||
lib /machine:i386 /def:..\lib\avformat-53.def /out:avformat.lib
|
||||
lib /machine:i386 /def:..\lib\avutil-51.def /out:avutil.lib
|
||||
lib /machine:i386 /def:..\lib\swscale-2.def /out:swscale.lib
|
||||
@end example
|
||||
|
||||
Replace @code{foo-version} and @code{foo} with the respective library names.
|
||||
|
||||
@end enumerate
|
||||
|
||||
@anchor{Cross compilation for Windows with Linux}
|
||||
@@ -331,8 +337,8 @@ Then run
|
||||
|
||||
to make a static build.
|
||||
|
||||
To build shared libraries add a special compiler flag to work around current
|
||||
@code{gcc4-core} package bugs in addition to the normal configure flags:
|
||||
The current @code{gcc4-core} package is buggy and needs this flag to build
|
||||
shared libraries:
|
||||
|
||||
@example
|
||||
./configure --enable-shared --disable-static --extra-cflags=-fno-reorder-functions
|
||||
@@ -348,12 +354,16 @@ These library packages are only available from
|
||||
@uref{http://sourceware.org/cygwinports/, Cygwin Ports}:
|
||||
|
||||
@example
|
||||
yasm, libSDL-devel, libfaac-devel, libaacplus-devel, libgsm-devel, libmp3lame-devel,
|
||||
libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel
|
||||
yasm, libSDL-devel, libdirac-devel, libfaac-devel, libaacplus-devel, libgsm-devel,
|
||||
libmp3lame-devel, libschroedinger1.0-devel, speex-devel, libtheora-devel,
|
||||
libxvidcore-devel
|
||||
@end example
|
||||
|
||||
The recommendation for x264 is to build it from source, as it evolves too
|
||||
quickly for Cygwin Ports to be up to date.
|
||||
The recommendation for libnut and x264 is to build them from source by
|
||||
yourself, as they evolve too quickly for Cygwin Ports to be up to date.
|
||||
|
||||
Cygwin 1.7.x has IPv6 support. You can add IPv6 to Cygwin 1.5.x by means
|
||||
of the @code{libgetaddrinfo-devel} package, available at Cygwin Ports.
|
||||
|
||||
@section Crosscompilation for Windows under Cygwin
|
||||
|
||||
|
@@ -1,125 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Anton Khirnov
|
||||
*
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/*
|
||||
* generate texinfo manpages for avoptions
|
||||
*/
|
||||
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
#include <float.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
static void print_usage(void)
|
||||
{
|
||||
fprintf(stderr, "Usage: enum_options type\n"
|
||||
"type: format codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static void print_option(const AVOption *opts, const AVOption *o, int per_stream)
|
||||
{
|
||||
printf("@item -%s%s @var{", o->name, per_stream ? "[:stream_specifier]" : "");
|
||||
switch (o->type) {
|
||||
case AV_OPT_TYPE_BINARY: printf("hexadecimal string"); break;
|
||||
case AV_OPT_TYPE_STRING: printf("string"); break;
|
||||
case AV_OPT_TYPE_INT:
|
||||
case AV_OPT_TYPE_INT64: printf("integer"); break;
|
||||
case AV_OPT_TYPE_FLOAT:
|
||||
case AV_OPT_TYPE_DOUBLE: printf("float"); break;
|
||||
case AV_OPT_TYPE_RATIONAL: printf("rational number"); break;
|
||||
case AV_OPT_TYPE_FLAGS: printf("flags"); break;
|
||||
default: printf("value"); break;
|
||||
}
|
||||
printf("} (@emph{");
|
||||
|
||||
if (o->flags & AV_OPT_FLAG_DECODING_PARAM) {
|
||||
printf("input");
|
||||
if (o->flags & AV_OPT_FLAG_ENCODING_PARAM)
|
||||
printf("/");
|
||||
}
|
||||
if (o->flags & AV_OPT_FLAG_ENCODING_PARAM) printf("output");
|
||||
if (o->flags & AV_OPT_FLAG_AUDIO_PARAM) printf(",audio");
|
||||
if (o->flags & AV_OPT_FLAG_VIDEO_PARAM) printf(",video");
|
||||
if (o->flags & AV_OPT_FLAG_SUBTITLE_PARAM) printf(",subtitles");
|
||||
|
||||
printf("})\n");
|
||||
if (o->help)
|
||||
printf("%s\n", o->help);
|
||||
|
||||
if (o->unit) {
|
||||
const AVOption *u;
|
||||
printf("\nPossible values:\n@table @samp\n");
|
||||
|
||||
for (u = opts; u->name; u++) {
|
||||
if (u->type == AV_OPT_TYPE_CONST && u->unit && !strcmp(u->unit, o->unit))
|
||||
printf("@item %s\n%s\n", u->name, u->help ? u->help : "");
|
||||
}
|
||||
printf("@end table\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void show_opts(const AVOption *opts, int per_stream)
|
||||
{
|
||||
const AVOption *o;
|
||||
|
||||
printf("@table @option\n");
|
||||
for (o = opts; o->name; o++) {
|
||||
if (o->type != AV_OPT_TYPE_CONST)
|
||||
print_option(opts, o, per_stream);
|
||||
}
|
||||
printf("@end table\n");
|
||||
}
|
||||
|
||||
static void show_format_opts(void)
|
||||
{
|
||||
#include "libavformat/options_table.h"
|
||||
|
||||
printf("@section Format AVOptions\n");
|
||||
show_opts(options, 0);
|
||||
}
|
||||
|
||||
static void show_codec_opts(void)
|
||||
{
|
||||
#include "libavcodec/options_table.h"
|
||||
|
||||
printf("@section Codec AVOptions\n");
|
||||
show_opts(options, 1);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
if (argc < 2)
|
||||
print_usage();
|
||||
|
||||
printf("@c DO NOT EDIT THIS FILE!\n"
|
||||
"@c It was generated by print_options.\n\n");
|
||||
if (!strcmp(argv[1], "format"))
|
||||
show_format_opts();
|
||||
else if (!strcmp(argv[1], "codec"))
|
||||
show_codec_opts();
|
||||
else
|
||||
print_usage();
|
||||
|
||||
return 0;
|
||||
}
|
@@ -19,34 +19,20 @@ supported protocols.
|
||||
|
||||
A description of the currently available protocols follows.
|
||||
|
||||
@section bluray
|
||||
@section applehttp
|
||||
|
||||
Read BluRay playlist.
|
||||
Read Apple HTTP Live Streaming compliant segmented stream as
|
||||
a uniform one. The M3U8 playlists describing the segments can be
|
||||
remote HTTP resources or local files, accessed using the standard
|
||||
file protocol.
|
||||
HTTP is default, specific protocol can be declared by specifying
|
||||
"+@var{proto}" after the applehttp URI scheme name, where @var{proto}
|
||||
is either "file" or "http".
|
||||
|
||||
The accepted options are:
|
||||
@table @option
|
||||
|
||||
@item angle
|
||||
BluRay angle
|
||||
|
||||
@item chapter
|
||||
Start chapter (1...N)
|
||||
|
||||
@item playlist
|
||||
Playlist to read (BDMV/PLAYLIST/?????.mpls)
|
||||
|
||||
@end table
|
||||
|
||||
Examples:
|
||||
|
||||
Read longest playlist from BluRay mounted to /mnt/bluray:
|
||||
@example
|
||||
bluray:/mnt/bluray
|
||||
@end example
|
||||
|
||||
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
|
||||
@example
|
||||
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
|
||||
applehttp://host/path/to/remote/resource.m3u8
|
||||
applehttp+http://host/path/to/remote/resource.m3u8
|
||||
applehttp+file://path/to/local/resource.m3u8
|
||||
@end example
|
||||
|
||||
@section concat
|
||||
@@ -95,26 +81,6 @@ specified with the name "FILE.mpeg" is interpreted as the URL
|
||||
|
||||
Gopher protocol.
|
||||
|
||||
@section hls
|
||||
|
||||
Read Apple HTTP Live Streaming compliant segmented stream as
|
||||
a uniform one. The M3U8 playlists describing the segments can be
|
||||
remote HTTP resources or local files, accessed using the standard
|
||||
file protocol.
|
||||
The nested protocol is declared by specifying
|
||||
"+@var{proto}" after the hls URI scheme name, where @var{proto}
|
||||
is either "file" or "http".
|
||||
|
||||
@example
|
||||
hls+http://host/path/to/remote/resource.m3u8
|
||||
hls+file://path/to/local/resource.m3u8
|
||||
@end example
|
||||
|
||||
Using this protocol is discouraged - the hls demuxer should work
|
||||
just as well (if not, please report the issues) and is more complete.
|
||||
To use the hls demuxer instead, simply use the direct URLs to the
|
||||
m3u8 files.
|
||||
|
||||
@section http
|
||||
|
||||
HTTP (Hyper Text Transfer Protocol).
|
||||
@@ -318,9 +284,9 @@ Accept packets only from negotiated peer address and port.
|
||||
@end table
|
||||
|
||||
When receiving data over UDP, the demuxer tries to reorder received packets
|
||||
(since they may arrive out of order, or packets may get lost totally). This
|
||||
can be disabled by setting the maximum demuxing delay to zero (via
|
||||
the @code{max_delay} field of AVFormatContext).
|
||||
(since they may arrive out of order, or packets may get lost totally). In
|
||||
order for this to be enabled, a maximum delay must be specified in the
|
||||
@code{max_delay} field of AVFormatContext.
|
||||
|
||||
When watching multi-bitrate Real-RTSP streams with @command{ffplay}, the
|
||||
streams to display can be chosen with @code{-vst} @var{n} and
|
||||
|
@@ -32,7 +32,7 @@ Special Converter v
|
||||
Output
|
||||
|
||||
Planar/Packed convertion is done when needed during sample format convertion
|
||||
Every step can be skipped without memcpy when its not needed.
|
||||
Every step can be skiped without memcpy when its not needed.
|
||||
Either Resampling and Rematrixing can be performed first depending on which
|
||||
way its faster.
|
||||
The Buffers are needed for resampling due to resamplng being a process that
|
||||
|
@@ -96,3 +96,4 @@ would benefit from it.
|
||||
Also, as already hinted at, initFilter() accepts an optional convolutional
|
||||
filter as input that can be used for contrast, saturation, blur, sharpening
|
||||
shift, chroma vs. luma shift, ...
|
||||
|
||||
|
158
doc/syntax.texi
158
doc/syntax.texi
@@ -1,158 +0,0 @@
|
||||
@chapter Syntax
|
||||
@c man begin SYNTAX
|
||||
|
||||
When evaluating specific formats, FFmpeg uses internal library parsing
|
||||
functions, shared by the tools. This section documents the syntax of
|
||||
some of these formats.
|
||||
|
||||
@anchor{date syntax}
|
||||
@section Date
|
||||
|
||||
The accepted syntax is:
|
||||
@example
|
||||
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
|
||||
now
|
||||
@end example
|
||||
|
||||
If the value is "now" it takes the current time.
|
||||
|
||||
Time is local time unless Z is appended, in which case it is
|
||||
interpreted as UTC.
|
||||
If the year-month-day part is not specified it takes the current
|
||||
year-month-day.
|
||||
|
||||
@anchor{time duration syntax}
|
||||
@section Time duration
|
||||
|
||||
The accepted syntax is:
|
||||
@example
|
||||
[-]HH:MM:SS[.m...]
|
||||
[-]S+[.m...]
|
||||
@end example
|
||||
|
||||
@var{HH} expresses the number of hours, @var{MM} the number a of minutes
|
||||
and @var{SS} the number of seconds.
|
||||
|
||||
@anchor{video size syntax}
|
||||
@section Video size
|
||||
Specify the size of the sourced video, it may be a string of the form
|
||||
@var{width}x@var{height}, or the name of a size abbreviation.
|
||||
|
||||
The following abbreviations are recognized:
|
||||
@table @samp
|
||||
@item sqcif
|
||||
128x96
|
||||
@item qcif
|
||||
176x144
|
||||
@item cif
|
||||
352x288
|
||||
@item 4cif
|
||||
704x576
|
||||
@item 16cif
|
||||
1408x1152
|
||||
@item qqvga
|
||||
160x120
|
||||
@item qvga
|
||||
320x240
|
||||
@item vga
|
||||
640x480
|
||||
@item svga
|
||||
800x600
|
||||
@item xga
|
||||
1024x768
|
||||
@item uxga
|
||||
1600x1200
|
||||
@item qxga
|
||||
2048x1536
|
||||
@item sxga
|
||||
1280x1024
|
||||
@item qsxga
|
||||
2560x2048
|
||||
@item hsxga
|
||||
5120x4096
|
||||
@item wvga
|
||||
852x480
|
||||
@item wxga
|
||||
1366x768
|
||||
@item wsxga
|
||||
1600x1024
|
||||
@item wuxga
|
||||
1920x1200
|
||||
@item woxga
|
||||
2560x1600
|
||||
@item wqsxga
|
||||
3200x2048
|
||||
@item wquxga
|
||||
3840x2400
|
||||
@item whsxga
|
||||
6400x4096
|
||||
@item whuxga
|
||||
7680x4800
|
||||
@item cga
|
||||
320x200
|
||||
@item ega
|
||||
640x350
|
||||
@item hd480
|
||||
852x480
|
||||
@item hd720
|
||||
1280x720
|
||||
@item hd1080
|
||||
1920x1080
|
||||
@end table
|
||||
|
||||
@anchor{video rate syntax}
|
||||
@section Video rate
|
||||
|
||||
Specify the frame rate of a video, expressed as the number of frames
|
||||
generated per second. It has to be a string in the format
|
||||
@var{frame_rate_num}/@var{frame_rate_den}, an integer number, a float
|
||||
number or a valid video frame rate abbreviation.
|
||||
|
||||
The following abbreviations are recognized:
|
||||
@table @samp
|
||||
@item ntsc
|
||||
30000/1001
|
||||
@item pal
|
||||
25/1
|
||||
@item qntsc
|
||||
30000/1
|
||||
@item qpal
|
||||
25/1
|
||||
@item sntsc
|
||||
30000/1
|
||||
@item spal
|
||||
25/1
|
||||
@item film
|
||||
24/1
|
||||
@item ntsc-film
|
||||
24000/1
|
||||
@end table
|
||||
|
||||
@anchor{ratio syntax}
|
||||
@section Ratio
|
||||
|
||||
A ratio can be expressed as an expression, or in the form
|
||||
@var{numerator}:@var{denominator}.
|
||||
|
||||
Note that a ratio with infinite (1/0) or negative value is
|
||||
considered valid, so you should check on the returned value if you
|
||||
want to exclude those values.
|
||||
|
||||
The undefined value can be expressed using the "0:0" string.
|
||||
|
||||
@anchor{color syntax}
|
||||
@section Color
|
||||
|
||||
It can be the name of a color (case insensitive match) or a
|
||||
[0x|#]RRGGBB[AA] sequence, possibly followed by "@" and a string
|
||||
representing the alpha component.
|
||||
|
||||
The alpha component may be a string composed by "0x" followed by an
|
||||
hexadecimal number or a decimal number between 0.0 and 1.0, which
|
||||
represents the opacity value (0x00/0.0 means completely transparent,
|
||||
0xff/1.0 completely opaque).
|
||||
If the alpha component is not specified then 0xff is assumed.
|
||||
|
||||
The string "random" will result in a random color.
|
||||
|
||||
@c man end SYNTAX
|
114
doc/texi2pod.pl
114
doc/texi2pod.pl
@@ -1,4 +1,4 @@
|
||||
#! /usr/bin/perl
|
||||
#! /usr/bin/perl -w
|
||||
|
||||
# Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
|
||||
|
||||
@@ -23,13 +23,11 @@
|
||||
# markup to Perl POD format. It's intended to be used to extract
|
||||
# something suitable for a manpage from a Texinfo document.
|
||||
|
||||
use warnings;
|
||||
|
||||
$output = 0;
|
||||
$skipping = 0;
|
||||
%chapters = ();
|
||||
@chapters_sequence = ();
|
||||
$chapter = "";
|
||||
%sects = ();
|
||||
@sects_sequence = ();
|
||||
$section = "";
|
||||
@icstack = ();
|
||||
@endwstack = ();
|
||||
@skstack = ();
|
||||
@@ -38,7 +36,7 @@ $shift = "";
|
||||
%defs = ();
|
||||
$fnno = 1;
|
||||
$inf = "";
|
||||
@ibase = ();
|
||||
$ibase = "";
|
||||
|
||||
while ($_ = shift) {
|
||||
if (/^-D(.*)$/) {
|
||||
@@ -54,8 +52,6 @@ while ($_ = shift) {
|
||||
die "flags may only contain letters, digits, hyphens, dashes and underscores\n"
|
||||
unless $flag =~ /^[a-zA-Z0-9_-]+$/;
|
||||
$defs{$flag} = $value;
|
||||
} elsif (/^-I(.*)$/) {
|
||||
push @ibase, $1 ne "" ? $1 : shift;
|
||||
} elsif (/^-/) {
|
||||
usage();
|
||||
} else {
|
||||
@@ -65,12 +61,10 @@ while ($_ = shift) {
|
||||
}
|
||||
}
|
||||
|
||||
push @ibase, ".";
|
||||
|
||||
if (defined $in) {
|
||||
$inf = gensym();
|
||||
open($inf, "<$in") or die "opening \"$in\": $!\n";
|
||||
push @ibase, $1 if $in =~ m|^(.+)/[^/]+$|;
|
||||
$ibase = $1 if $in =~ m|^(.+)/[^/]+$|;
|
||||
} else {
|
||||
$inf = \*STDIN;
|
||||
}
|
||||
@@ -80,7 +74,7 @@ if (defined $out) {
|
||||
}
|
||||
|
||||
while(defined $inf) {
|
||||
INF: while(<$inf>) {
|
||||
while(<$inf>) {
|
||||
# Certain commands are discarded without further processing.
|
||||
/^\@(?:
|
||||
[a-z]+index # @*index: useful only in complete manual
|
||||
@@ -110,28 +104,23 @@ INF: while(<$inf>) {
|
||||
push @instack, $inf;
|
||||
$inf = gensym();
|
||||
|
||||
for (@ibase) {
|
||||
open($inf, "<" . $_ . "/" . $1) and next INF;
|
||||
}
|
||||
die "cannot open $1: $!\n";
|
||||
};
|
||||
|
||||
/^\@chapter\s+([A-Za-z ]+)/ and do {
|
||||
# close old chapter
|
||||
$chapters{$chapter_name} .= postprocess($chapter) if ($chapter_name);
|
||||
|
||||
# start new chapter
|
||||
$chapter_name = $1, push (@chapters_sequence, $chapter_name) unless $skipping;
|
||||
$chapters{$chapter_name} = "" unless exists $chapters{$chapter_name};
|
||||
$chapter = "";
|
||||
$output = 1;
|
||||
# Try cwd and $ibase.
|
||||
open($inf, "<" . $1)
|
||||
or open($inf, "<" . $ibase . "/" . $1)
|
||||
or die "cannot open $1 or $ibase/$1: $!\n";
|
||||
next;
|
||||
};
|
||||
|
||||
/^\@bye/ and do {
|
||||
# close old chapter
|
||||
$chapters{$chapter_name} .= postprocess($chapter) if ($chapter_name);
|
||||
last INF;
|
||||
# Look for blocks surrounded by @c man begin SECTION ... @c man end.
|
||||
# This really oughta be @ifman ... @end ifman and the like, but such
|
||||
# would require rev'ing all other Texinfo translators.
|
||||
/^\@c\s+man\s+begin\s+([A-Za-z ]+)/ and $sect = $1, push (@sects_sequence, $sect), $output = 1, next;
|
||||
/^\@c\s+man\s+end/ and do {
|
||||
$sects{$sect} = "" unless exists $sects{$sect};
|
||||
$sects{$sect} .= postprocess($section);
|
||||
$section = "";
|
||||
$output = 0;
|
||||
next;
|
||||
};
|
||||
|
||||
# handle variables
|
||||
@@ -156,20 +145,20 @@ INF: while(<$inf>) {
|
||||
# Ignore @end foo, where foo is not an operation which may
|
||||
# cause us to skip, if we are presently skipping.
|
||||
my $ended = $1;
|
||||
next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex|ifhtml|ifnothtml)$/;
|
||||
next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex)$/;
|
||||
|
||||
die "\@end $ended without \@$ended at line $.\n" unless defined $endw;
|
||||
die "\@$endw ended by \@end $ended at line $.\n" unless $ended eq $endw;
|
||||
|
||||
$endw = pop @endwstack;
|
||||
|
||||
if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex|ifhtml|ifnothtml)$/) {
|
||||
if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex)$/) {
|
||||
$skipping = pop @skstack;
|
||||
next;
|
||||
} elsif ($ended =~ /^(?:example|smallexample|display)$/) {
|
||||
$shift = "";
|
||||
$_ = ""; # need a paragraph break
|
||||
} elsif ($ended =~ /^(?:itemize|enumerate|(?:multi|[fv])?table)$/) {
|
||||
} elsif ($ended =~ /^(?:itemize|enumerate|[fv]?table)$/) {
|
||||
$_ = "\n=back\n";
|
||||
$ic = pop @icstack;
|
||||
} else {
|
||||
@@ -196,11 +185,11 @@ INF: while(<$inf>) {
|
||||
next;
|
||||
};
|
||||
|
||||
/^\@(ignore|menu|iftex|ifhtml|ifnothtml)\b/ and do {
|
||||
/^\@(ignore|menu|iftex)\b/ and do {
|
||||
push @endwstack, $endw;
|
||||
push @skstack, $skipping;
|
||||
$endw = $1;
|
||||
$skipping = $endw !~ /ifnothtml/;
|
||||
$skipping = 1;
|
||||
next;
|
||||
};
|
||||
|
||||
@@ -217,6 +206,7 @@ INF: while(<$inf>) {
|
||||
s/\@TeX\{\}/TeX/g;
|
||||
s/\@pounds\{\}/\#/g;
|
||||
s/\@minus(?:\{\})?/-/g;
|
||||
s/\\,/,/g;
|
||||
|
||||
# Now the ones that have to be replaced by special escapes
|
||||
# (which will be turned back into text by unmunge())
|
||||
@@ -269,16 +259,15 @@ INF: while(<$inf>) {
|
||||
$endw = "enumerate";
|
||||
};
|
||||
|
||||
/^\@((?:multi|[fv])?table)\s+(\@[a-z]+)/ and do {
|
||||
/^\@([fv]?table)\s+(\@[a-z]+)/ and do {
|
||||
push @endwstack, $endw;
|
||||
push @icstack, $ic;
|
||||
$endw = $1;
|
||||
$ic = $2;
|
||||
$ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env|command)/B/;
|
||||
$ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env)/B/;
|
||||
$ic =~ s/\@(?:code|kbd)/C/;
|
||||
$ic =~ s/\@(?:dfn|var|emph|cite|i)/I/;
|
||||
$ic =~ s/\@(?:file)/F/;
|
||||
$ic =~ s/\@(?:columnfractions)//;
|
||||
$_ = "\n=over 4\n";
|
||||
};
|
||||
|
||||
@@ -289,21 +278,6 @@ INF: while(<$inf>) {
|
||||
$_ = ""; # need a paragraph break
|
||||
};
|
||||
|
||||
/^\@item\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
|
||||
my $columns = $1;
|
||||
$columns =~ s/\@tab/ : /;
|
||||
|
||||
$_ = "\n=item B<". $columns .">\n";
|
||||
};
|
||||
|
||||
/^\@tab\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
|
||||
my $columns = $1;
|
||||
$columns =~ s/\@tab/ : /;
|
||||
|
||||
$_ = " : ". $columns;
|
||||
$chapter =~ s/\n+\s+$//;
|
||||
};
|
||||
|
||||
/^\@itemx?\s*(.+)?$/ and do {
|
||||
if (defined $1) {
|
||||
# Entity escapes prevent munging by the <> processing below.
|
||||
@@ -315,7 +289,7 @@ INF: while(<$inf>) {
|
||||
}
|
||||
};
|
||||
|
||||
$chapter .= $shift.$_."\n";
|
||||
$section .= $shift.$_."\n";
|
||||
}
|
||||
# End of current file.
|
||||
close($inf);
|
||||
@@ -324,15 +298,16 @@ $inf = pop @instack;
|
||||
|
||||
die "No filename or title\n" unless defined $fn && defined $tl;
|
||||
|
||||
$chapters{NAME} = "$fn \- $tl\n";
|
||||
$chapters{FOOTNOTES} .= "=back\n" if exists $chapters{FOOTNOTES};
|
||||
$sects{NAME} = "$fn \- $tl\n";
|
||||
$sects{FOOTNOTES} .= "=back\n" if exists $sects{FOOTNOTES};
|
||||
|
||||
unshift @chapters_sequence, "NAME";
|
||||
for $chapter (@chapters_sequence) {
|
||||
if (exists $chapters{$chapter}) {
|
||||
$head = uc($chapter);
|
||||
unshift @sects_sequence, "NAME";
|
||||
for $sect (@sects_sequence) {
|
||||
if(exists $sects{$sect}) {
|
||||
$head = $sect;
|
||||
$head =~ s/SEEALSO/SEE ALSO/;
|
||||
print "=head1 $head\n\n";
|
||||
print scalar unmunge ($chapters{$chapter});
|
||||
print scalar unmunge ($sects{$sect});
|
||||
print "\n";
|
||||
}
|
||||
}
|
||||
@@ -377,7 +352,6 @@ sub postprocess
|
||||
s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g;
|
||||
s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g;
|
||||
s/;\s+\@pxref\{(?:[^\}]*)\}//g;
|
||||
s/\@ref\{(?:[^,\}]*,)(?:[^,\}]*,)([^,\}]*).*\}/$1/g;
|
||||
s/\@ref\{([^\}]*)\}/$1/g;
|
||||
s/\@noindent\s*//g;
|
||||
s/\@refill//g;
|
||||
@@ -387,7 +361,7 @@ sub postprocess
|
||||
# @uref can take one, two, or three arguments, with different
|
||||
# semantics each time. @url and @email are just like @uref with
|
||||
# one argument, for our purposes.
|
||||
s/\@(?:uref|url|email)\{([^\},]*),?[^\}]*\}/<B<$1>>/g;
|
||||
s/\@(?:uref|url|email)\{([^\},]*)\}/<B<$1>>/g;
|
||||
s/\@uref\{([^\},]*),([^\},]*)\}/$2 (C<$1>)/g;
|
||||
s/\@uref\{([^\},]*),([^\},]*),([^\},]*)\}/$3/g;
|
||||
|
||||
@@ -431,13 +405,13 @@ sub unmunge
|
||||
|
||||
sub add_footnote
|
||||
{
|
||||
unless (exists $chapters{FOOTNOTES}) {
|
||||
$chapters{FOOTNOTES} = "\n=over 4\n\n";
|
||||
unless (exists $sects{FOOTNOTES}) {
|
||||
$sects{FOOTNOTES} = "\n=over 4\n\n";
|
||||
}
|
||||
|
||||
$chapters{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++;
|
||||
$chapters{FOOTNOTES} .= $_[0];
|
||||
$chapters{FOOTNOTES} .= "\n\n";
|
||||
$sects{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++;
|
||||
$sects{FOOTNOTES} .= $_[0];
|
||||
$sects{FOOTNOTES} .= "\n\n";
|
||||
}
|
||||
|
||||
# stolen from Symbol.pm
|
||||
|
@@ -107,3 +107,4 @@ one with score 3)
|
||||
|
||||
Author: Michael niedermayer
|
||||
Copyright LGPL
|
||||
|
||||
|
283
ffplay.c
283
ffplay.c
@@ -40,6 +40,7 @@
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libavcodec/audioconvert.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/avfft.h"
|
||||
#include "libswresample/swresample.h"
|
||||
@@ -63,6 +64,7 @@ const char program_name[] = "ffplay";
|
||||
const int program_birth_year = 2003;
|
||||
|
||||
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
|
||||
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
|
||||
#define MIN_FRAMES 5
|
||||
|
||||
/* SDL audio buffer size, in samples. Should be small to have precise
|
||||
@@ -104,7 +106,6 @@ typedef struct VideoPicture {
|
||||
int skip;
|
||||
SDL_Overlay *bmp;
|
||||
int width, height; /* source height & width */
|
||||
AVRational sample_aspect_ratio;
|
||||
int allocated;
|
||||
int reallocate;
|
||||
enum PixelFormat pix_fmt;
|
||||
@@ -132,7 +133,6 @@ typedef struct VideoState {
|
||||
AVInputFormat *iformat;
|
||||
int no_background;
|
||||
int abort_request;
|
||||
int force_refresh;
|
||||
int paused;
|
||||
int last_paused;
|
||||
int seek_req;
|
||||
@@ -232,14 +232,8 @@ typedef struct VideoState {
|
||||
#endif
|
||||
|
||||
int refresh;
|
||||
int last_video_stream, last_audio_stream, last_subtitle_stream;
|
||||
} VideoState;
|
||||
|
||||
typedef struct AllocEventProps {
|
||||
VideoState *is;
|
||||
AVFrame *frame;
|
||||
} AllocEventProps;
|
||||
|
||||
static int opt_help(const char *opt, const char *arg);
|
||||
|
||||
/* options specified by the user */
|
||||
@@ -304,12 +298,13 @@ void av_noreturn exit_program(int ret)
|
||||
exit(ret);
|
||||
}
|
||||
|
||||
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
|
||||
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
|
||||
{
|
||||
AVPacketList *pkt1;
|
||||
|
||||
if (q->abort_request)
|
||||
return -1;
|
||||
/* duplicate the packet */
|
||||
if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
|
||||
return -1;
|
||||
|
||||
pkt1 = av_malloc(sizeof(AVPacketList));
|
||||
if (!pkt1)
|
||||
@@ -317,7 +312,11 @@ static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
|
||||
pkt1->pkt = *pkt;
|
||||
pkt1->next = NULL;
|
||||
|
||||
|
||||
SDL_LockMutex(q->mutex);
|
||||
|
||||
if (!q->last_pkt)
|
||||
|
||||
q->first_pkt = pkt1;
|
||||
else
|
||||
q->last_pkt->next = pkt1;
|
||||
@@ -326,25 +325,9 @@ static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
|
||||
q->size += pkt1->pkt.size + sizeof(*pkt1);
|
||||
/* XXX: should duplicate packet data in DV case */
|
||||
SDL_CondSignal(q->cond);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* duplicate the packet */
|
||||
if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
|
||||
return -1;
|
||||
|
||||
SDL_LockMutex(q->mutex);
|
||||
ret = packet_queue_put_private(q, pkt);
|
||||
SDL_UnlockMutex(q->mutex);
|
||||
|
||||
if (pkt != &flush_pkt && ret < 0)
|
||||
av_free_packet(pkt);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* packet queue handling */
|
||||
@@ -353,7 +336,7 @@ static void packet_queue_init(PacketQueue *q)
|
||||
memset(q, 0, sizeof(PacketQueue));
|
||||
q->mutex = SDL_CreateMutex();
|
||||
q->cond = SDL_CreateCond();
|
||||
q->abort_request = 1;
|
||||
packet_queue_put(q, &flush_pkt);
|
||||
}
|
||||
|
||||
static void packet_queue_flush(PacketQueue *q)
|
||||
@@ -373,7 +356,7 @@ static void packet_queue_flush(PacketQueue *q)
|
||||
SDL_UnlockMutex(q->mutex);
|
||||
}
|
||||
|
||||
static void packet_queue_destroy(PacketQueue *q)
|
||||
static void packet_queue_end(PacketQueue *q)
|
||||
{
|
||||
packet_queue_flush(q);
|
||||
SDL_DestroyMutex(q->mutex);
|
||||
@@ -391,14 +374,6 @@ static void packet_queue_abort(PacketQueue *q)
|
||||
SDL_UnlockMutex(q->mutex);
|
||||
}
|
||||
|
||||
static void packet_queue_start(PacketQueue *q)
|
||||
{
|
||||
SDL_LockMutex(q->mutex);
|
||||
q->abort_request = 0;
|
||||
packet_queue_put_private(q, &flush_pkt);
|
||||
SDL_UnlockMutex(q->mutex);
|
||||
}
|
||||
|
||||
/* return < 0 if aborted, 0 if no packet and > 0 if packet. */
|
||||
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
|
||||
{
|
||||
@@ -692,11 +667,21 @@ static void video_image_display(VideoState *is)
|
||||
|
||||
vp = &is->pictq[is->pictq_rindex];
|
||||
if (vp->bmp) {
|
||||
if (vp->sample_aspect_ratio.num == 0)
|
||||
aspect_ratio = 0;
|
||||
else
|
||||
aspect_ratio = av_q2d(vp->sample_aspect_ratio);
|
||||
#if CONFIG_AVFILTER
|
||||
if (vp->picref->video->sample_aspect_ratio.num == 0)
|
||||
aspect_ratio = 0;
|
||||
else
|
||||
aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
|
||||
#else
|
||||
|
||||
/* XXX: use variable in the frame */
|
||||
if (is->video_st->sample_aspect_ratio.num)
|
||||
aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
|
||||
else if (is->video_st->codec->sample_aspect_ratio.num)
|
||||
aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
|
||||
else
|
||||
aspect_ratio = 0;
|
||||
#endif
|
||||
if (aspect_ratio <= 0.0)
|
||||
aspect_ratio = 1.0;
|
||||
aspect_ratio *= (float)vp->width / (float)vp->height;
|
||||
@@ -882,8 +867,7 @@ static void video_audio_display(VideoState *s)
|
||||
}
|
||||
}
|
||||
SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
|
||||
if (!s->paused)
|
||||
s->xpos++;
|
||||
s->xpos++;
|
||||
if (s->xpos >= s->width)
|
||||
s->xpos= s->xleft;
|
||||
}
|
||||
@@ -897,9 +881,6 @@ static void stream_close(VideoState *is)
|
||||
is->abort_request = 1;
|
||||
SDL_WaitThread(is->read_tid, NULL);
|
||||
SDL_WaitThread(is->refresh_tid, NULL);
|
||||
packet_queue_destroy(&is->videoq);
|
||||
packet_queue_destroy(&is->audioq);
|
||||
packet_queue_destroy(&is->subtitleq);
|
||||
|
||||
/* free all pictures */
|
||||
for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
|
||||
@@ -953,7 +934,6 @@ static int video_open(VideoState *is, int force_set_video_mode)
|
||||
{
|
||||
int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
|
||||
int w,h;
|
||||
VideoPicture *vp = &is->pictq[is->pictq_rindex];
|
||||
|
||||
if (is_full_screen) flags |= SDL_FULLSCREEN;
|
||||
else flags |= SDL_RESIZABLE;
|
||||
@@ -964,9 +944,15 @@ static int video_open(VideoState *is, int force_set_video_mode)
|
||||
} else if (!is_full_screen && screen_width) {
|
||||
w = screen_width;
|
||||
h = screen_height;
|
||||
} else if (vp->width) {
|
||||
w = vp->width;
|
||||
h = vp->height;
|
||||
#if CONFIG_AVFILTER
|
||||
} else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
|
||||
w = is->out_video_filter->inputs[0]->w;
|
||||
h = is->out_video_filter->inputs[0]->h;
|
||||
#else
|
||||
} else if (is->video_st && is->video_st->codec->width) {
|
||||
w = is->video_st->codec->width;
|
||||
h = is->video_st->codec->height;
|
||||
#endif
|
||||
} else {
|
||||
w = 640;
|
||||
h = 480;
|
||||
@@ -1007,7 +993,7 @@ static int refresh_thread(void *opaque)
|
||||
SDL_Event event;
|
||||
event.type = FF_REFRESH_EVENT;
|
||||
event.user.data1 = opaque;
|
||||
if (!is->refresh && (!is->paused || is->force_refresh)) {
|
||||
if (!is->refresh) {
|
||||
is->refresh = 1;
|
||||
SDL_PushEvent(&event);
|
||||
}
|
||||
@@ -1170,9 +1156,6 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (is->paused)
|
||||
goto display;
|
||||
|
||||
/* compute nominal last_duration */
|
||||
last_duration = vp->pts - is->frame_last_pts;
|
||||
if (last_duration > 0 && last_duration < 10.0) {
|
||||
@@ -1251,13 +1234,11 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
display:
|
||||
/* display picture */
|
||||
if (!display_disable)
|
||||
video_display(is);
|
||||
|
||||
if (!is->paused)
|
||||
pictq_next_picture(is);
|
||||
pictq_next_picture(is);
|
||||
}
|
||||
} else if (is->audio_st) {
|
||||
/* draw the next audio frame */
|
||||
@@ -1269,7 +1250,6 @@ display:
|
||||
if (!display_disable)
|
||||
video_display(is);
|
||||
}
|
||||
is->force_refresh = 0;
|
||||
if (show_status) {
|
||||
static int64_t last_time;
|
||||
int64_t cur_time;
|
||||
@@ -1307,10 +1287,9 @@ display:
|
||||
|
||||
/* allocate a picture (needs to do that in main thread to avoid
|
||||
potential locking problems */
|
||||
static void alloc_picture(AllocEventProps *event_props)
|
||||
static void alloc_picture(void *opaque)
|
||||
{
|
||||
VideoState *is = event_props->is;
|
||||
AVFrame *frame = event_props->frame;
|
||||
VideoState *is = opaque;
|
||||
VideoPicture *vp;
|
||||
|
||||
vp = &is->pictq[is->pictq_windex];
|
||||
@@ -1322,14 +1301,16 @@ static void alloc_picture(AllocEventProps *event_props)
|
||||
if (vp->picref)
|
||||
avfilter_unref_buffer(vp->picref);
|
||||
vp->picref = NULL;
|
||||
|
||||
vp->width = is->out_video_filter->inputs[0]->w;
|
||||
vp->height = is->out_video_filter->inputs[0]->h;
|
||||
vp->pix_fmt = is->out_video_filter->inputs[0]->format;
|
||||
#else
|
||||
vp->width = is->video_st->codec->width;
|
||||
vp->height = is->video_st->codec->height;
|
||||
vp->pix_fmt = is->video_st->codec->pix_fmt;
|
||||
#endif
|
||||
|
||||
vp->width = frame->width;
|
||||
vp->height = frame->height;
|
||||
vp->pix_fmt = frame->format;
|
||||
|
||||
video_open(event_props->is, 0);
|
||||
|
||||
vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
|
||||
SDL_YV12_OVERLAY,
|
||||
screen);
|
||||
@@ -1391,22 +1372,22 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_
|
||||
|
||||
/* alloc or resize hardware picture buffer */
|
||||
if (!vp->bmp || vp->reallocate ||
|
||||
vp->width != src_frame->width ||
|
||||
vp->height != src_frame->height) {
|
||||
#if CONFIG_AVFILTER
|
||||
vp->width != is->out_video_filter->inputs[0]->w ||
|
||||
vp->height != is->out_video_filter->inputs[0]->h) {
|
||||
#else
|
||||
vp->width != is->video_st->codec->width ||
|
||||
vp->height != is->video_st->codec->height) {
|
||||
#endif
|
||||
SDL_Event event;
|
||||
AllocEventProps event_props;
|
||||
|
||||
event_props.frame = src_frame;
|
||||
event_props.is = is;
|
||||
|
||||
vp->allocated = 0;
|
||||
vp->reallocate = 0;
|
||||
|
||||
/* the allocation must be done in the main thread to avoid
|
||||
locking problems. We wait in this block for the event to complete,
|
||||
so we can pass a pointer to event_props to it. */
|
||||
locking problems */
|
||||
event.type = FF_ALLOC_EVENT;
|
||||
event.user.data1 = &event_props;
|
||||
event.user.data1 = is;
|
||||
SDL_PushEvent(&event);
|
||||
|
||||
/* wait until the picture is allocated */
|
||||
@@ -1428,7 +1409,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_
|
||||
|
||||
/* if the frame is not skipped, then display it */
|
||||
if (vp->bmp) {
|
||||
AVPicture pict = { { 0 } };
|
||||
AVPicture pict;
|
||||
#if CONFIG_AVFILTER
|
||||
if (vp->picref)
|
||||
avfilter_unref_buffer(vp->picref);
|
||||
@@ -1438,6 +1419,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_
|
||||
/* get a pointer on the bitmap */
|
||||
SDL_LockYUVOverlay (vp->bmp);
|
||||
|
||||
memset(&pict, 0, sizeof(AVPicture));
|
||||
pict.data[0] = vp->bmp->pixels[0];
|
||||
pict.data[1] = vp->bmp->pixels[2];
|
||||
pict.data[2] = vp->bmp->pixels[1];
|
||||
@@ -1450,7 +1432,6 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_
|
||||
// FIXME use direct rendering
|
||||
av_picture_copy(&pict, (AVPicture *)src_frame,
|
||||
vp->pix_fmt, vp->width, vp->height);
|
||||
vp->sample_aspect_ratio = vp->picref->video->sample_aspect_ratio;
|
||||
#else
|
||||
sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
|
||||
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
|
||||
@@ -1462,7 +1443,6 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_
|
||||
}
|
||||
sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
|
||||
0, vp->height, pict.data, pict.linesize);
|
||||
vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
|
||||
#endif
|
||||
/* update the bitmap content */
|
||||
SDL_UnlockYUVOverlay(vp->bmp);
|
||||
@@ -1515,7 +1495,7 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
|
||||
int ret = 1;
|
||||
|
||||
if (decoder_reorder_pts == -1) {
|
||||
*pts = av_frame_get_best_effort_timestamp(frame);
|
||||
*pts = *(int64_t*)av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp");
|
||||
} else if (decoder_reorder_pts) {
|
||||
*pts = frame->pkt_pts;
|
||||
} else {
|
||||
@@ -1565,7 +1545,7 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
|
||||
AVFilterContext *ctx = codec->opaque;
|
||||
AVFilterBufferRef *ref;
|
||||
int perms = AV_PERM_WRITE;
|
||||
int i, w, h, stride[AV_NUM_DATA_POINTERS];
|
||||
int i, w, h, stride[4];
|
||||
unsigned edge;
|
||||
int pixel_size;
|
||||
|
||||
@@ -1584,7 +1564,7 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
|
||||
w = codec->width;
|
||||
h = codec->height;
|
||||
|
||||
if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
|
||||
if(av_image_check_size(w, h, 0, codec))
|
||||
return -1;
|
||||
|
||||
avcodec_align_dimensions2(codec, &w, &h, stride);
|
||||
@@ -1605,7 +1585,6 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
|
||||
unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
|
||||
unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
|
||||
|
||||
pic->base[i] = ref->data[i];
|
||||
if (ref->data[i]) {
|
||||
ref->data[i] += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
|
||||
}
|
||||
@@ -1615,10 +1594,6 @@ static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
|
||||
pic->opaque = ref;
|
||||
pic->type = FF_BUFFER_TYPE_USER;
|
||||
pic->reordered_opaque = codec->reordered_opaque;
|
||||
pic->width = codec->width;
|
||||
pic->height = codec->height;
|
||||
pic->format = codec->pix_fmt;
|
||||
pic->sample_aspect_ratio = codec->sample_aspect_ratio;
|
||||
if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
|
||||
else pic->pkt_pts = AV_NOPTS_VALUE;
|
||||
return 0;
|
||||
@@ -1696,19 +1671,18 @@ static int input_request_frame(AVFilterLink *link)
|
||||
if (priv->use_dr1 && priv->frame->opaque) {
|
||||
picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
|
||||
} else {
|
||||
picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, priv->frame->width, priv->frame->height);
|
||||
picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
|
||||
av_image_copy(picref->data, picref->linesize,
|
||||
(const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
|
||||
picref->format, priv->frame->width, priv->frame->height);
|
||||
priv->frame->data, priv->frame->linesize,
|
||||
picref->format, link->w, link->h);
|
||||
}
|
||||
av_free_packet(&pkt);
|
||||
|
||||
avfilter_copy_frame_props(picref, priv->frame);
|
||||
picref->video->sample_aspect_ratio = av_guess_sample_aspect_ratio(priv->is->ic, priv->is->video_st, priv->frame);
|
||||
picref->pts = pts;
|
||||
|
||||
avfilter_start_frame(link, picref);
|
||||
avfilter_draw_slice(link, 0, picref->video->h, 1);
|
||||
avfilter_draw_slice(link, 0, link->h, 1);
|
||||
avfilter_end_frame(link);
|
||||
|
||||
return 0;
|
||||
@@ -1760,11 +1734,11 @@ static AVFilter input_filter =
|
||||
|
||||
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
|
||||
{
|
||||
static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
|
||||
char sws_flags_str[128];
|
||||
int ret;
|
||||
enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
|
||||
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
|
||||
AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;;
|
||||
AVFilterContext *filt_src = NULL, *filt_out = NULL;
|
||||
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
|
||||
graph->scale_sws_opts = av_strdup(sws_flags_str);
|
||||
|
||||
@@ -1773,27 +1747,17 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
return ret;
|
||||
|
||||
#if FF_API_OLD_VSINK_API
|
||||
ret = avfilter_graph_create_filter(&filt_out,
|
||||
avfilter_get_by_name("buffersink"),
|
||||
"out", NULL, pix_fmts, graph);
|
||||
ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
|
||||
NULL, pix_fmts, graph);
|
||||
#else
|
||||
buffersink_params->pixel_fmts = pix_fmts;
|
||||
ret = avfilter_graph_create_filter(&filt_out,
|
||||
avfilter_get_by_name("buffersink"),
|
||||
"out", NULL, buffersink_params, graph);
|
||||
ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
|
||||
NULL, buffersink_params, graph);
|
||||
#endif
|
||||
av_freep(&buffersink_params);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = avfilter_graph_create_filter(&filt_format,
|
||||
avfilter_get_by_name("format"),
|
||||
"format", "yuv420p", NULL, graph)) < 0)
|
||||
return ret;
|
||||
if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
|
||||
if (vfilters) {
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
@@ -1804,14 +1768,14 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = filt_format;
|
||||
inputs->filter_ctx = filt_out;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
|
||||
if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1839,13 +1803,8 @@ static int video_thread(void *arg)
|
||||
int last_w = is->video_st->codec->width;
|
||||
int last_h = is->video_st->codec->height;
|
||||
|
||||
if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
|
||||
SDL_Event event;
|
||||
event.type = FF_QUIT_EVENT;
|
||||
event.user.data1 = is;
|
||||
SDL_PushEvent(&event);
|
||||
if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
|
||||
goto the_end;
|
||||
}
|
||||
filt_out = is->out_video_filter;
|
||||
#endif
|
||||
|
||||
@@ -1875,14 +1834,11 @@ static int video_thread(void *arg)
|
||||
if (picref) {
|
||||
avfilter_fill_frame_from_video_buffer_ref(frame, picref);
|
||||
pts_int = picref->pts;
|
||||
tb = filt_out->inputs[0]->time_base;
|
||||
pos = picref->pos;
|
||||
frame->opaque = picref;
|
||||
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
|
||||
if (av_cmp_q(tb, is->video_st->time_base)) {
|
||||
av_unused int64_t pts1 = pts_int;
|
||||
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
|
||||
av_dlog(NULL, "video_thread(): "
|
||||
@@ -1894,8 +1850,6 @@ static int video_thread(void *arg)
|
||||
ret = get_video_frame(is, frame, &pts_int, &pkt);
|
||||
pos = pkt.pos;
|
||||
av_free_packet(&pkt);
|
||||
if (ret == 0)
|
||||
continue;
|
||||
#endif
|
||||
|
||||
if (ret < 0)
|
||||
@@ -1921,9 +1875,7 @@ static int video_thread(void *arg)
|
||||
stream_toggle_pause(is);
|
||||
}
|
||||
the_end:
|
||||
avcodec_flush_buffers(is->video_st->codec);
|
||||
#if CONFIG_AVFILTER
|
||||
av_freep(&vfilters);
|
||||
avfilter_graph_free(&graph);
|
||||
#endif
|
||||
av_free(frame);
|
||||
@@ -2167,8 +2119,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
|
||||
/* if no pts, then compute it */
|
||||
pts = is->audio_clock;
|
||||
*pts_ptr = pts;
|
||||
is->audio_clock += (double)data_size /
|
||||
(dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
|
||||
is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
|
||||
#ifdef DEBUG
|
||||
{
|
||||
static double last_clock;
|
||||
@@ -2194,10 +2145,8 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
|
||||
if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
|
||||
return -1;
|
||||
|
||||
if (pkt->data == flush_pkt.data) {
|
||||
if (pkt->data == flush_pkt.data)
|
||||
avcodec_flush_buffers(dec);
|
||||
flush_complete = 0;
|
||||
}
|
||||
|
||||
*pkt_temp = *pkt;
|
||||
|
||||
@@ -2269,9 +2218,9 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
|
||||
|
||||
switch(avctx->codec_type){
|
||||
case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; if(audio_codec_name ) codec= avcodec_find_decoder_by_name( audio_codec_name); break;
|
||||
case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
|
||||
case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; if(video_codec_name ) codec= avcodec_find_decoder_by_name( video_codec_name); break;
|
||||
case AVMEDIA_TYPE_AUDIO : if(audio_codec_name ) codec= avcodec_find_decoder_by_name( audio_codec_name); break;
|
||||
case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
|
||||
case AVMEDIA_TYPE_VIDEO : if(video_codec_name ) codec= avcodec_find_decoder_by_name( video_codec_name); break;
|
||||
}
|
||||
if (!codec)
|
||||
return -1;
|
||||
@@ -2295,7 +2244,6 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
avctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
|
||||
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
|
||||
env = SDL_getenv("SDL_AUDIO_CHANNELS");
|
||||
if (env)
|
||||
wanted_channel_layout = av_get_default_channel_layout(SDL_atoi(env));
|
||||
@@ -2372,20 +2320,20 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
|
||||
|
||||
memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
|
||||
packet_queue_start(&is->audioq);
|
||||
packet_queue_init(&is->audioq);
|
||||
SDL_PauseAudio(0);
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
is->video_stream = stream_index;
|
||||
is->video_st = ic->streams[stream_index];
|
||||
|
||||
packet_queue_start(&is->videoq);
|
||||
packet_queue_init(&is->videoq);
|
||||
is->video_tid = SDL_CreateThread(video_thread, is);
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
is->subtitle_stream = stream_index;
|
||||
is->subtitle_st = ic->streams[stream_index];
|
||||
packet_queue_start(&is->subtitleq);
|
||||
packet_queue_init(&is->subtitleq);
|
||||
|
||||
is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
|
||||
break;
|
||||
@@ -2410,10 +2358,10 @@ static void stream_component_close(VideoState *is, int stream_index)
|
||||
|
||||
SDL_CloseAudio();
|
||||
|
||||
packet_queue_flush(&is->audioq);
|
||||
av_free_packet(&is->audio_pkt);
|
||||
packet_queue_end(&is->audioq);
|
||||
if (is->swr_ctx)
|
||||
swr_free(&is->swr_ctx);
|
||||
av_free_packet(&is->audio_pkt);
|
||||
av_freep(&is->audio_buf1);
|
||||
is->audio_buf = NULL;
|
||||
av_freep(&is->frame);
|
||||
@@ -2436,7 +2384,7 @@ static void stream_component_close(VideoState *is, int stream_index)
|
||||
|
||||
SDL_WaitThread(is->video_tid, NULL);
|
||||
|
||||
packet_queue_flush(&is->videoq);
|
||||
packet_queue_end(&is->videoq);
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
packet_queue_abort(&is->subtitleq);
|
||||
@@ -2451,7 +2399,7 @@ static void stream_component_close(VideoState *is, int stream_index)
|
||||
|
||||
SDL_WaitThread(is->subtitle_tid, NULL);
|
||||
|
||||
packet_queue_flush(&is->subtitleq);
|
||||
packet_queue_end(&is->subtitleq);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -2498,9 +2446,9 @@ static int read_thread(void *arg)
|
||||
int orig_nb_streams;
|
||||
|
||||
memset(st_index, -1, sizeof(st_index));
|
||||
is->last_video_stream = is->video_stream = -1;
|
||||
is->last_audio_stream = is->audio_stream = -1;
|
||||
is->last_subtitle_stream = is->subtitle_stream = -1;
|
||||
is->video_stream = -1;
|
||||
is->audio_stream = -1;
|
||||
is->subtitle_stream = -1;
|
||||
|
||||
ic = avformat_alloc_context();
|
||||
ic->interrupt_callback.callback = decode_interrupt_cb;
|
||||
@@ -2654,9 +2602,9 @@ static int read_thread(void *arg)
|
||||
|
||||
/* if the queue are full, no need to read more */
|
||||
if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
|
||||
|| ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
|
||||
&& (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
|
||||
&& (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request))) {
|
||||
|| ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
|
||||
&& (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0)
|
||||
&& (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0))) {
|
||||
/* wait 10 ms */
|
||||
SDL_Delay(10);
|
||||
continue;
|
||||
@@ -2761,10 +2709,6 @@ static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
|
||||
is->subpq_mutex = SDL_CreateMutex();
|
||||
is->subpq_cond = SDL_CreateCond();
|
||||
|
||||
packet_queue_init(&is->videoq);
|
||||
packet_queue_init(&is->audioq);
|
||||
packet_queue_init(&is->subtitleq);
|
||||
|
||||
is->av_sync_type = av_sync_type;
|
||||
is->read_tid = SDL_CreateThread(read_thread, is);
|
||||
if (!is->read_tid) {
|
||||
@@ -2778,19 +2722,16 @@ static void stream_cycle_channel(VideoState *is, int codec_type)
|
||||
{
|
||||
AVFormatContext *ic = is->ic;
|
||||
int start_index, stream_index;
|
||||
int old_index;
|
||||
AVStream *st;
|
||||
|
||||
if (codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
start_index = is->last_video_stream;
|
||||
old_index = is->video_stream;
|
||||
} else if (codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
start_index = is->last_audio_stream;
|
||||
old_index = is->audio_stream;
|
||||
} else {
|
||||
start_index = is->last_subtitle_stream;
|
||||
old_index = is->subtitle_stream;
|
||||
}
|
||||
if (codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
start_index = is->video_stream;
|
||||
else if (codec_type == AVMEDIA_TYPE_AUDIO)
|
||||
start_index = is->audio_stream;
|
||||
else
|
||||
start_index = is->subtitle_stream;
|
||||
if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
|
||||
return;
|
||||
stream_index = start_index;
|
||||
for (;;) {
|
||||
if (++stream_index >= is->ic->nb_streams)
|
||||
@@ -2798,12 +2739,9 @@ static void stream_cycle_channel(VideoState *is, int codec_type)
|
||||
if (codec_type == AVMEDIA_TYPE_SUBTITLE)
|
||||
{
|
||||
stream_index = -1;
|
||||
is->last_subtitle_stream = -1;
|
||||
goto the_end;
|
||||
}
|
||||
if (start_index == -1)
|
||||
return;
|
||||
stream_index = 0;
|
||||
} else
|
||||
stream_index = 0;
|
||||
}
|
||||
if (stream_index == start_index)
|
||||
return;
|
||||
@@ -2825,7 +2763,7 @@ static void stream_cycle_channel(VideoState *is, int codec_type)
|
||||
}
|
||||
}
|
||||
the_end:
|
||||
stream_component_close(is, old_index);
|
||||
stream_component_close(is, start_index);
|
||||
stream_component_open(is, stream_index);
|
||||
}
|
||||
|
||||
@@ -2889,7 +2827,6 @@ static void event_loop(VideoState *cur_stream)
|
||||
break;
|
||||
case SDLK_f:
|
||||
toggle_full_screen(cur_stream);
|
||||
cur_stream->force_refresh = 1;
|
||||
break;
|
||||
case SDLK_p:
|
||||
case SDLK_SPACE:
|
||||
@@ -2909,7 +2846,6 @@ static void event_loop(VideoState *cur_stream)
|
||||
break;
|
||||
case SDLK_w:
|
||||
toggle_audio_display(cur_stream);
|
||||
cur_stream->force_refresh = 1;
|
||||
break;
|
||||
case SDLK_PAGEUP:
|
||||
incr = 600.0;
|
||||
@@ -2952,9 +2888,6 @@ static void event_loop(VideoState *cur_stream)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case SDL_VIDEOEXPOSE:
|
||||
cur_stream->force_refresh = 1;
|
||||
break;
|
||||
case SDL_MOUSEBUTTONDOWN:
|
||||
if (exit_on_mousedown) {
|
||||
do_exit(cur_stream);
|
||||
@@ -2997,13 +2930,13 @@ static void event_loop(VideoState *cur_stream)
|
||||
SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
|
||||
screen_width = cur_stream->width = event.resize.w;
|
||||
screen_height = cur_stream->height = event.resize.h;
|
||||
cur_stream->force_refresh = 1;
|
||||
break;
|
||||
case SDL_QUIT:
|
||||
case FF_QUIT_EVENT:
|
||||
do_exit(cur_stream);
|
||||
break;
|
||||
case FF_ALLOC_EVENT:
|
||||
video_open(event.user.data1, 0);
|
||||
alloc_picture(event.user.data1);
|
||||
break;
|
||||
case FF_REFRESH_EVENT:
|
||||
@@ -3282,7 +3215,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
av_init_packet(&flush_pkt);
|
||||
flush_pkt.data = (char *)(intptr_t)"FLUSH";
|
||||
flush_pkt.data = "FLUSH";
|
||||
|
||||
is = stream_open(input_filename, file_iformat);
|
||||
if (!is) {
|
||||
|
17
ffpresets/libvpx-1080p.ffpreset
Normal file
17
ffpresets/libvpx-1080p.ffpreset
Normal file
@@ -0,0 +1,17 @@
|
||||
vcodec=libvpx
|
||||
g=120
|
||||
rc_lookahead=16
|
||||
quality=good
|
||||
speed=0
|
||||
profile=1
|
||||
qmax=51
|
||||
qmin=11
|
||||
slices=4
|
||||
vb=2M
|
||||
|
||||
#ignored unless using -pass 2
|
||||
maxrate=24M
|
||||
minrate=100k
|
||||
arnr_max_frames=7
|
||||
arnr_strength=5
|
||||
arnr_type=3
|
17
ffpresets/libvpx-1080p50_60.ffpreset
Normal file
17
ffpresets/libvpx-1080p50_60.ffpreset
Normal file
@@ -0,0 +1,17 @@
|
||||
vcodec=libvpx
|
||||
g=120
|
||||
rc_lookahead=25
|
||||
quality=good
|
||||
speed=0
|
||||
profile=1
|
||||
qmax=51
|
||||
qmin=11
|
||||
slices=4
|
||||
vb=2M
|
||||
|
||||
#ignored unless using -pass 2
|
||||
maxrate=24M
|
||||
minrate=100k
|
||||
arnr_max_frames=7
|
||||
arnr_strength=5
|
||||
arnr_type=3
|
16
ffpresets/libvpx-360p.ffpreset
Normal file
16
ffpresets/libvpx-360p.ffpreset
Normal file
@@ -0,0 +1,16 @@
|
||||
vcodec=libvpx
|
||||
g=120
|
||||
rc_lookahead=16
|
||||
quality=good
|
||||
speed=0
|
||||
profile=0
|
||||
qmax=63
|
||||
qmin=0
|
||||
vb=768k
|
||||
|
||||
#ignored unless using -pass 2
|
||||
maxrate=1.5M
|
||||
minrate=40k
|
||||
arnr_max_frames=7
|
||||
arnr_strength=5
|
||||
arnr_type=3
|
17
ffpresets/libvpx-720p.ffpreset
Normal file
17
ffpresets/libvpx-720p.ffpreset
Normal file
@@ -0,0 +1,17 @@
|
||||
vcodec=libvpx
|
||||
g=120
|
||||
rc_lookahead=16
|
||||
quality=good
|
||||
speed=0
|
||||
profile=0
|
||||
qmax=51
|
||||
qmin=11
|
||||
slices=4
|
||||
vb=2M
|
||||
|
||||
#ignored unless using -pass 2
|
||||
maxrate=24M
|
||||
minrate=100k
|
||||
arnr_max_frames=7
|
||||
arnr_strength=5
|
||||
arnr_type=3
|
17
ffpresets/libvpx-720p50_60.ffpreset
Normal file
17
ffpresets/libvpx-720p50_60.ffpreset
Normal file
@@ -0,0 +1,17 @@
|
||||
vcodec=libvpx
|
||||
g=120
|
||||
rc_lookahead=25
|
||||
quality=good
|
||||
speed=0
|
||||
profile=0
|
||||
qmax=51
|
||||
qmin=11
|
||||
slices=4
|
||||
vb=2M
|
||||
|
||||
#ignored unless using -pass 2
|
||||
maxrate=24M
|
||||
minrate=100k
|
||||
arnr_max_frames=7
|
||||
arnr_strength=5
|
||||
arnr_type=3
|
564
ffprobe.c
564
ffprobe.c
@@ -29,11 +29,9 @@
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/timecode.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libswresample/swresample.h"
|
||||
@@ -43,14 +41,9 @@
|
||||
const char program_name[] = "ffprobe";
|
||||
const int program_birth_year = 2007;
|
||||
|
||||
static int do_count_frames = 0;
|
||||
static int do_count_packets = 0;
|
||||
static int do_read_frames = 0;
|
||||
static int do_read_packets = 0;
|
||||
static int do_show_error = 0;
|
||||
static int do_show_format = 0;
|
||||
static int do_show_frames = 0;
|
||||
static AVDictionary *fmt_entries_to_show = NULL;
|
||||
static int do_show_packets = 0;
|
||||
static int do_show_streams = 0;
|
||||
static int do_show_program_version = 0;
|
||||
@@ -70,19 +63,16 @@ static const OptionDef options[];
|
||||
static const char *input_filename;
|
||||
static AVInputFormat *iformat = NULL;
|
||||
|
||||
static const char *const binary_unit_prefixes [] = { "", "Ki", "Mi", "Gi", "Ti", "Pi" };
|
||||
static const char *const decimal_unit_prefixes[] = { "", "K" , "M" , "G" , "T" , "P" };
|
||||
static const char *binary_unit_prefixes [] = { "", "Ki", "Mi", "Gi", "Ti", "Pi" };
|
||||
static const char *decimal_unit_prefixes[] = { "", "K" , "M" , "G" , "T" , "P" };
|
||||
|
||||
static const char unit_second_str[] = "s" ;
|
||||
static const char unit_hertz_str[] = "Hz" ;
|
||||
static const char unit_byte_str[] = "byte" ;
|
||||
static const char unit_bit_per_second_str[] = "bit/s";
|
||||
static uint64_t *nb_streams_packets;
|
||||
static uint64_t *nb_streams_frames;
|
||||
static const char *unit_second_str = "s" ;
|
||||
static const char *unit_hertz_str = "Hz" ;
|
||||
static const char *unit_byte_str = "byte" ;
|
||||
static const char *unit_bit_per_second_str = "bit/s";
|
||||
|
||||
void av_noreturn exit_program(int ret)
|
||||
{
|
||||
av_dict_free(&fmt_entries_to_show);
|
||||
exit(ret);
|
||||
}
|
||||
|
||||
@@ -178,8 +168,6 @@ struct WriterContext {
|
||||
unsigned int nb_item; ///< number of the item printed in the given section, starting at 0
|
||||
unsigned int nb_section; ///< number of the section printed in the given section sequence, starting at 0
|
||||
unsigned int nb_chapter; ///< number of the chapter, starting at 0
|
||||
|
||||
int is_fmt_chapter; ///< tells if the current chapter is "format", required by the print_format_entry option
|
||||
};
|
||||
|
||||
static const char *writer_get_name(void *p)
|
||||
@@ -254,8 +242,6 @@ static inline void writer_print_chapter_header(WriterContext *wctx,
|
||||
if (wctx->writer->print_chapter_header)
|
||||
wctx->writer->print_chapter_header(wctx, chapter);
|
||||
wctx->nb_section = 0;
|
||||
|
||||
wctx->is_fmt_chapter = !strcmp(chapter, "format");
|
||||
}
|
||||
|
||||
static inline void writer_print_chapter_footer(WriterContext *wctx,
|
||||
@@ -285,10 +271,8 @@ static inline void writer_print_section_footer(WriterContext *wctx,
|
||||
static inline void writer_print_integer(WriterContext *wctx,
|
||||
const char *key, long long int val)
|
||||
{
|
||||
if (!wctx->is_fmt_chapter || !fmt_entries_to_show || av_dict_get(fmt_entries_to_show, key, NULL, 0)) {
|
||||
wctx->writer->print_integer(wctx, key, val);
|
||||
wctx->nb_item++;
|
||||
}
|
||||
wctx->writer->print_integer(wctx, key, val);
|
||||
wctx->nb_item++;
|
||||
}
|
||||
|
||||
static inline void writer_print_string(WriterContext *wctx,
|
||||
@@ -296,10 +280,8 @@ static inline void writer_print_string(WriterContext *wctx,
|
||||
{
|
||||
if (opt && !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS))
|
||||
return;
|
||||
if (!wctx->is_fmt_chapter || !fmt_entries_to_show || av_dict_get(fmt_entries_to_show, key, NULL, 0)) {
|
||||
wctx->writer->print_string(wctx, key, val);
|
||||
wctx->nb_item++;
|
||||
}
|
||||
wctx->writer->print_string(wctx, key, val);
|
||||
wctx->nb_item++;
|
||||
}
|
||||
|
||||
static void writer_print_time(WriterContext *wctx, const char *key,
|
||||
@@ -307,14 +289,12 @@ static void writer_print_time(WriterContext *wctx, const char *key,
|
||||
{
|
||||
char buf[128];
|
||||
|
||||
if (!wctx->is_fmt_chapter || !fmt_entries_to_show || av_dict_get(fmt_entries_to_show, key, NULL, 0)) {
|
||||
if (ts == AV_NOPTS_VALUE) {
|
||||
writer_print_string(wctx, key, "N/A", 1);
|
||||
} else {
|
||||
double d = ts * av_q2d(*time_base);
|
||||
value_string(buf, sizeof(buf), (struct unit_value){.val.d=d, .unit=unit_second_str});
|
||||
writer_print_string(wctx, key, buf, 0);
|
||||
}
|
||||
if (ts == AV_NOPTS_VALUE) {
|
||||
writer_print_string(wctx, key, "N/A", 1);
|
||||
} else {
|
||||
double d = ts * av_q2d(*time_base);
|
||||
value_string(buf, sizeof(buf), (struct unit_value){.val.d=d, .unit=unit_second_str});
|
||||
writer_print_string(wctx, key, buf, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -358,68 +338,82 @@ static const Writer *writer_get_by_name(const char *name)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Print helpers */
|
||||
|
||||
struct print_buf {
|
||||
char *s;
|
||||
int len;
|
||||
};
|
||||
|
||||
static char *fast_asprintf(struct print_buf *pbuf, const char *fmt, ...)
|
||||
{
|
||||
va_list va;
|
||||
int len;
|
||||
|
||||
va_start(va, fmt);
|
||||
len = vsnprintf(NULL, 0, fmt, va);
|
||||
va_end(va);
|
||||
if (len < 0)
|
||||
goto fail;
|
||||
|
||||
if (pbuf->len < len) {
|
||||
char *p = av_realloc(pbuf->s, len + 1);
|
||||
if (!p)
|
||||
goto fail;
|
||||
pbuf->s = p;
|
||||
pbuf->len = len;
|
||||
}
|
||||
|
||||
va_start(va, fmt);
|
||||
len = vsnprintf(pbuf->s, len + 1, fmt, va);
|
||||
va_end(va);
|
||||
if (len < 0)
|
||||
goto fail;
|
||||
return pbuf->s;
|
||||
|
||||
fail:
|
||||
av_freep(&pbuf->s);
|
||||
pbuf->len = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define ESCAPE_INIT_BUF_SIZE 256
|
||||
|
||||
#define ESCAPE_CHECK_SIZE(src, size, max_size) \
|
||||
if (size > max_size) { \
|
||||
char buf[64]; \
|
||||
snprintf(buf, sizeof(buf), "%s", src); \
|
||||
av_log(log_ctx, AV_LOG_WARNING, \
|
||||
"String '%s...' with is too big\n", buf); \
|
||||
return "FFPROBE_TOO_BIG_STRING"; \
|
||||
}
|
||||
|
||||
#define ESCAPE_REALLOC_BUF(dst_size_p, dst_p, src, size) \
|
||||
if (*dst_size_p < size) { \
|
||||
char *q = av_realloc(*dst_p, size); \
|
||||
if (!q) { \
|
||||
char buf[64]; \
|
||||
snprintf(buf, sizeof(buf), "%s", src); \
|
||||
av_log(log_ctx, AV_LOG_WARNING, \
|
||||
"String '%s...' could not be escaped\n", buf); \
|
||||
return "FFPROBE_THIS_STRING_COULD_NOT_BE_ESCAPED"; \
|
||||
} \
|
||||
*dst_size_p = size; \
|
||||
*dst = q; \
|
||||
}
|
||||
|
||||
/* WRITERS */
|
||||
|
||||
/* Default output */
|
||||
|
||||
typedef struct DefaultContext {
|
||||
const AVClass *class;
|
||||
int nokey;
|
||||
int noprint_wrappers;
|
||||
} DefaultContext;
|
||||
|
||||
#define OFFSET(x) offsetof(DefaultContext, x)
|
||||
|
||||
static const AVOption default_options[] = {
|
||||
{ "noprint_wrappers", "do not print headers and footers", OFFSET(noprint_wrappers), AV_OPT_TYPE_INT, {.dbl=0}, 0, 1 },
|
||||
{ "nw", "do not print headers and footers", OFFSET(noprint_wrappers), AV_OPT_TYPE_INT, {.dbl=0}, 0, 1 },
|
||||
{ "nokey", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_INT, {.dbl=0}, 0, 1 },
|
||||
{ "nk", "force no key printing", OFFSET(nokey), AV_OPT_TYPE_INT, {.dbl=0}, 0, 1 },
|
||||
{NULL},
|
||||
};
|
||||
|
||||
static const char *default_get_name(void *ctx)
|
||||
{
|
||||
return "default";
|
||||
}
|
||||
|
||||
static const AVClass default_class = {
|
||||
"DefaultContext",
|
||||
default_get_name,
|
||||
default_options
|
||||
};
|
||||
|
||||
static av_cold int default_init(WriterContext *wctx, const char *args, void *opaque)
|
||||
{
|
||||
DefaultContext *def = wctx->priv;
|
||||
int err;
|
||||
|
||||
def->class = &default_class;
|
||||
av_opt_set_defaults(def);
|
||||
|
||||
if (args &&
|
||||
(err = (av_set_options_string(def, args, "=", ":"))) < 0) {
|
||||
av_log(wctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void default_print_footer(WriterContext *wctx)
|
||||
{
|
||||
DefaultContext *def = wctx->priv;
|
||||
|
||||
if (!def->noprint_wrappers)
|
||||
printf("\n");
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
static void default_print_chapter_header(WriterContext *wctx, const char *chapter)
|
||||
{
|
||||
DefaultContext *def = wctx->priv;
|
||||
|
||||
if (!def->noprint_wrappers && wctx->nb_chapter)
|
||||
if (wctx->nb_chapter)
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
@@ -435,55 +429,41 @@ static inline char *upcase_string(char *dst, size_t dst_size, const char *src)
|
||||
|
||||
static void default_print_section_header(WriterContext *wctx, const char *section)
|
||||
{
|
||||
DefaultContext *def = wctx->priv;
|
||||
char buf[32];
|
||||
|
||||
if (wctx->nb_section)
|
||||
printf("\n");
|
||||
if (!def->noprint_wrappers)
|
||||
printf("[%s]\n", upcase_string(buf, sizeof(buf), section));
|
||||
printf("[%s]\n", upcase_string(buf, sizeof(buf), section));
|
||||
}
|
||||
|
||||
static void default_print_section_footer(WriterContext *wctx, const char *section)
|
||||
{
|
||||
DefaultContext *def = wctx->priv;
|
||||
char buf[32];
|
||||
|
||||
if (!def->noprint_wrappers)
|
||||
printf("[/%s]", upcase_string(buf, sizeof(buf), section));
|
||||
printf("[/%s]", upcase_string(buf, sizeof(buf), section));
|
||||
}
|
||||
|
||||
static void default_print_str(WriterContext *wctx, const char *key, const char *value)
|
||||
{
|
||||
DefaultContext *def = wctx->priv;
|
||||
if (!def->nokey)
|
||||
printf("%s=", key);
|
||||
printf("%s\n", value);
|
||||
printf("%s=%s\n", key, value);
|
||||
}
|
||||
|
||||
static void default_print_int(WriterContext *wctx, const char *key, long long int value)
|
||||
{
|
||||
DefaultContext *def = wctx->priv;
|
||||
|
||||
if (!def->nokey)
|
||||
printf("%s=", key);
|
||||
printf("%lld\n", value);
|
||||
printf("%s=%lld\n", key, value);
|
||||
}
|
||||
|
||||
static void default_show_tags(WriterContext *wctx, AVDictionary *dict)
|
||||
{
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
while ((tag = av_dict_get(dict, "", tag, AV_DICT_IGNORE_SUFFIX))) {
|
||||
if (!fmt_entries_to_show || (tag->key && av_dict_get(fmt_entries_to_show, tag->key, NULL, 0)))
|
||||
printf("TAG:");
|
||||
printf("TAG:");
|
||||
writer_print_string(wctx, tag->key, tag->value, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static const Writer default_writer = {
|
||||
.name = "default",
|
||||
.priv_size = sizeof(DefaultContext),
|
||||
.init = default_init,
|
||||
.print_footer = default_print_footer,
|
||||
.print_chapter_header = default_print_chapter_header,
|
||||
.print_section_header = default_print_section_header,
|
||||
@@ -500,51 +480,81 @@ static const Writer default_writer = {
|
||||
* Escape \n, \r, \\ and sep characters contained in s, and print the
|
||||
* resulting string.
|
||||
*/
|
||||
static const char *c_escape_str(AVBPrint *dst, const char *src, const char sep, void *log_ctx)
|
||||
static const char *c_escape_str(char **dst, size_t *dst_size,
|
||||
const char *src, const char sep, void *log_ctx)
|
||||
{
|
||||
const char *p;
|
||||
char *q;
|
||||
size_t size = 1;
|
||||
|
||||
/* precompute size */
|
||||
for (p = src; *p; p++, size++) {
|
||||
ESCAPE_CHECK_SIZE(src, size, SIZE_MAX-2);
|
||||
if (*p == '\n' || *p == '\r' || *p == '\\')
|
||||
size++;
|
||||
}
|
||||
|
||||
ESCAPE_REALLOC_BUF(dst_size, dst, src, size);
|
||||
|
||||
q = *dst;
|
||||
for (p = src; *p; p++) {
|
||||
switch (*src) {
|
||||
case '\n': av_bprintf(dst, "%s", "\\n"); break;
|
||||
case '\r': av_bprintf(dst, "%s", "\\r"); break;
|
||||
case '\\': av_bprintf(dst, "%s", "\\\\"); break;
|
||||
case '\n': *q++ = '\\'; *q++ = 'n'; break;
|
||||
case '\r': *q++ = '\\'; *q++ = 'r'; break;
|
||||
case '\\': *q++ = '\\'; *q++ = '\\'; break;
|
||||
default:
|
||||
if (*p == sep)
|
||||
av_bprint_chars(dst, '\\', 1);
|
||||
av_bprint_chars(dst, *p, 1);
|
||||
*q++ = '\\';
|
||||
*q++ = *p;
|
||||
}
|
||||
}
|
||||
return dst->str;
|
||||
*q = 0;
|
||||
return *dst;
|
||||
}
|
||||
|
||||
/**
|
||||
* Quote fields containing special characters, check RFC4180.
|
||||
*/
|
||||
static const char *csv_escape_str(AVBPrint *dst, const char *src, const char sep, void *log_ctx)
|
||||
static const char *csv_escape_str(char **dst, size_t *dst_size,
|
||||
const char *src, const char sep, void *log_ctx)
|
||||
{
|
||||
const char *p;
|
||||
char *q;
|
||||
size_t size = 1;
|
||||
int quote = 0;
|
||||
|
||||
/* check if input needs quoting */
|
||||
for (p = src; *p; p++)
|
||||
/* precompute size */
|
||||
for (p = src; *p; p++, size++) {
|
||||
ESCAPE_CHECK_SIZE(src, size, SIZE_MAX-4);
|
||||
if (*p == '"' || *p == sep || *p == '\n' || *p == '\r')
|
||||
quote = 1;
|
||||
|
||||
if (quote)
|
||||
av_bprint_chars(dst, '\"', 1);
|
||||
|
||||
for (p = src; *p; p++) {
|
||||
if (!quote) {
|
||||
quote = 1;
|
||||
size += 2;
|
||||
}
|
||||
if (*p == '"')
|
||||
av_bprint_chars(dst, '\"', 1);
|
||||
av_bprint_chars(dst, *p, 1);
|
||||
size++;
|
||||
}
|
||||
|
||||
ESCAPE_REALLOC_BUF(dst_size, dst, src, size);
|
||||
|
||||
q = *dst;
|
||||
p = src;
|
||||
if (quote)
|
||||
*q++ = '\"';
|
||||
while (*p) {
|
||||
if (*p == '"')
|
||||
*q++ = '\"';
|
||||
*q++ = *p++;
|
||||
}
|
||||
if (quote)
|
||||
av_bprint_chars(dst, '\"', 1);
|
||||
return dst->str;
|
||||
*q++ = '\"';
|
||||
*q = 0;
|
||||
|
||||
return *dst;
|
||||
}
|
||||
|
||||
static const char *none_escape_str(AVBPrint *dst, const char *src, const char sep, void *log_ctx)
|
||||
static const char *none_escape_str(char **dst, size_t *dst_size,
|
||||
const char *src, const char sep, void *log_ctx)
|
||||
{
|
||||
return src;
|
||||
}
|
||||
@@ -554,11 +564,13 @@ typedef struct CompactContext {
|
||||
char *item_sep_str;
|
||||
char item_sep;
|
||||
int nokey;
|
||||
char *buf;
|
||||
size_t buf_size;
|
||||
char *escape_mode_str;
|
||||
const char * (*escape_str)(AVBPrint *dst, const char *src, const char sep, void *log_ctx);
|
||||
const char * (*escape_str)(char **dst, size_t *dst_size,
|
||||
const char *src, const char sep, void *log_ctx);
|
||||
} CompactContext;
|
||||
|
||||
#undef OFFSET
|
||||
#define OFFSET(x) offsetof(CompactContext, x)
|
||||
|
||||
static const AVOption compact_options[]= {
|
||||
@@ -602,6 +614,10 @@ static av_cold int compact_init(WriterContext *wctx, const char *args, void *opa
|
||||
}
|
||||
compact->item_sep = compact->item_sep_str[0];
|
||||
|
||||
compact->buf_size = ESCAPE_INIT_BUF_SIZE;
|
||||
if (!(compact->buf = av_malloc(compact->buf_size)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (!strcmp(compact->escape_mode_str, "none")) compact->escape_str = none_escape_str;
|
||||
else if (!strcmp(compact->escape_mode_str, "c" )) compact->escape_str = c_escape_str;
|
||||
else if (!strcmp(compact->escape_mode_str, "csv" )) compact->escape_str = csv_escape_str;
|
||||
@@ -618,6 +634,7 @@ static av_cold void compact_uninit(WriterContext *wctx)
|
||||
CompactContext *compact = wctx->priv;
|
||||
|
||||
av_freep(&compact->item_sep_str);
|
||||
av_freep(&compact->buf);
|
||||
av_freep(&compact->escape_mode_str);
|
||||
}
|
||||
|
||||
@@ -636,14 +653,12 @@ static void compact_print_section_footer(WriterContext *wctx, const char *sectio
|
||||
static void compact_print_str(WriterContext *wctx, const char *key, const char *value)
|
||||
{
|
||||
CompactContext *compact = wctx->priv;
|
||||
AVBPrint buf;
|
||||
|
||||
if (wctx->nb_item) printf("%c", compact->item_sep);
|
||||
if (!compact->nokey)
|
||||
printf("%s=", key);
|
||||
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
printf("%s", compact->escape_str(&buf, value, compact->item_sep, wctx));
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
printf("%s", compact->escape_str(&compact->buf, &compact->buf_size,
|
||||
value, compact->item_sep, wctx));
|
||||
}
|
||||
|
||||
static void compact_print_int(WriterContext *wctx, const char *key, long long int value)
|
||||
@@ -660,20 +675,14 @@ static void compact_show_tags(WriterContext *wctx, AVDictionary *dict)
|
||||
{
|
||||
CompactContext *compact = wctx->priv;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
AVBPrint buf;
|
||||
|
||||
while ((tag = av_dict_get(dict, "", tag, AV_DICT_IGNORE_SUFFIX))) {
|
||||
if (wctx->nb_item) printf("%c", compact->item_sep);
|
||||
|
||||
if (!compact->nokey) {
|
||||
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
printf("tag:%s=", compact->escape_str(&buf, tag->key, compact->item_sep, wctx));
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
}
|
||||
|
||||
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
printf("%s", compact->escape_str(&buf, tag->value, compact->item_sep, wctx));
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
if (!compact->nokey)
|
||||
printf("tag:%s=", compact->escape_str(&compact->buf, &compact->buf_size,
|
||||
tag->key, compact->item_sep, wctx));
|
||||
printf("%s", compact->escape_str(&compact->buf, &compact->buf_size,
|
||||
tag->value, compact->item_sep, wctx));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -715,6 +724,8 @@ static const Writer csv_writer = {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int multiple_entries; ///< tells if the given chapter requires multiple entries
|
||||
char *buf;
|
||||
size_t buf_size;
|
||||
int print_packets_and_frames;
|
||||
int indent_level;
|
||||
int compact;
|
||||
@@ -758,27 +769,52 @@ static av_cold int json_init(WriterContext *wctx, const char *args, void *opaque
|
||||
json->item_sep = json->compact ? ", " : ",\n";
|
||||
json->item_start_end = json->compact ? " " : "\n";
|
||||
|
||||
json->buf_size = ESCAPE_INIT_BUF_SIZE;
|
||||
if (!(json->buf = av_malloc(json->buf_size)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *json_escape_str(AVBPrint *dst, const char *src, void *log_ctx)
|
||||
static av_cold void json_uninit(WriterContext *wctx)
|
||||
{
|
||||
JSONContext *json = wctx->priv;
|
||||
av_freep(&json->buf);
|
||||
}
|
||||
|
||||
static const char *json_escape_str(char **dst, size_t *dst_size, const char *src,
|
||||
void *log_ctx)
|
||||
{
|
||||
static const char json_escape[] = {'"', '\\', '\b', '\f', '\n', '\r', '\t', 0};
|
||||
static const char json_subst[] = {'"', '\\', 'b', 'f', 'n', 'r', 't', 0};
|
||||
const char *p;
|
||||
char *q;
|
||||
size_t size = 1;
|
||||
|
||||
// compute the length of the escaped string
|
||||
for (p = src; *p; p++) {
|
||||
ESCAPE_CHECK_SIZE(src, size, SIZE_MAX-6);
|
||||
if (strchr(json_escape, *p)) size += 2; // simple escape
|
||||
else if ((unsigned char)*p < 32) size += 6; // handle non-printable chars
|
||||
else size += 1; // char copy
|
||||
}
|
||||
ESCAPE_REALLOC_BUF(dst_size, dst, src, size);
|
||||
|
||||
q = *dst;
|
||||
for (p = src; *p; p++) {
|
||||
char *s = strchr(json_escape, *p);
|
||||
if (s) {
|
||||
av_bprint_chars(dst, '\\', 1);
|
||||
av_bprint_chars(dst, json_subst[s - json_escape], 1);
|
||||
*q++ = '\\';
|
||||
*q++ = json_subst[s - json_escape];
|
||||
} else if ((unsigned char)*p < 32) {
|
||||
av_bprintf(dst, "\\u00%02x", *p & 0xff);
|
||||
snprintf(q, 7, "\\u00%02x", *p & 0xff);
|
||||
q += 6;
|
||||
} else {
|
||||
av_bprint_chars(dst, *p, 1);
|
||||
*q++ = *p;
|
||||
}
|
||||
}
|
||||
return dst->str;
|
||||
*q = 0;
|
||||
return *dst;
|
||||
}
|
||||
|
||||
static void json_print_header(WriterContext *wctx)
|
||||
@@ -800,7 +836,6 @@ static void json_print_footer(WriterContext *wctx)
|
||||
static void json_print_chapter_header(WriterContext *wctx, const char *chapter)
|
||||
{
|
||||
JSONContext *json = wctx->priv;
|
||||
AVBPrint buf;
|
||||
|
||||
if (wctx->nb_chapter)
|
||||
printf(",");
|
||||
@@ -810,9 +845,7 @@ static void json_print_chapter_header(WriterContext *wctx, const char *chapter)
|
||||
!strcmp(chapter, "streams") || !strcmp(chapter, "library_versions");
|
||||
if (json->multiple_entries) {
|
||||
JSON_INDENT();
|
||||
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
printf("\"%s\": [\n", json_escape_str(&buf, chapter, wctx));
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
printf("\"%s\": [\n", json_escape_str(&json->buf, &json->buf_size, chapter, wctx));
|
||||
json->print_packets_and_frames = !strcmp(chapter, "packets_and_frames");
|
||||
json->indent_level++;
|
||||
}
|
||||
@@ -863,15 +896,10 @@ static void json_print_section_footer(WriterContext *wctx, const char *section)
|
||||
static inline void json_print_item_str(WriterContext *wctx,
|
||||
const char *key, const char *value)
|
||||
{
|
||||
AVBPrint buf;
|
||||
JSONContext *json = wctx->priv;
|
||||
|
||||
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
printf("\"%s\":", json_escape_str(&buf, key, wctx));
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
|
||||
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
printf(" \"%s\"", json_escape_str(&buf, value, wctx));
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
printf("\"%s\":", json_escape_str(&json->buf, &json->buf_size, key, wctx));
|
||||
printf(" \"%s\"", json_escape_str(&json->buf, &json->buf_size, value, wctx));
|
||||
}
|
||||
|
||||
static void json_print_str(WriterContext *wctx, const char *key, const char *value)
|
||||
@@ -887,15 +915,12 @@ static void json_print_str(WriterContext *wctx, const char *key, const char *val
|
||||
static void json_print_int(WriterContext *wctx, const char *key, long long int value)
|
||||
{
|
||||
JSONContext *json = wctx->priv;
|
||||
AVBPrint buf;
|
||||
|
||||
if (wctx->nb_item) printf("%s", json->item_sep);
|
||||
if (!json->compact)
|
||||
JSON_INDENT();
|
||||
|
||||
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
printf("\"%s\": %lld", json_escape_str(&buf, key, wctx), value);
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
printf("\"%s\": %lld",
|
||||
json_escape_str(&json->buf, &json->buf_size, key, wctx), value);
|
||||
}
|
||||
|
||||
static void json_show_tags(WriterContext *wctx, AVDictionary *dict)
|
||||
@@ -928,6 +953,7 @@ static const Writer json_writer = {
|
||||
.name = "json",
|
||||
.priv_size = sizeof(JSONContext),
|
||||
.init = json_init,
|
||||
.uninit = json_uninit,
|
||||
.print_header = json_print_header,
|
||||
.print_footer = json_print_footer,
|
||||
.print_chapter_header = json_print_chapter_header,
|
||||
@@ -949,6 +975,8 @@ typedef struct {
|
||||
int indent_level;
|
||||
int fully_qualified;
|
||||
int xsd_strict;
|
||||
char *buf;
|
||||
size_t buf_size;
|
||||
} XMLContext;
|
||||
|
||||
#undef OFFSET
|
||||
@@ -1008,25 +1036,61 @@ static av_cold int xml_init(WriterContext *wctx, const char *args, void *opaque)
|
||||
}
|
||||
}
|
||||
|
||||
xml->buf_size = ESCAPE_INIT_BUF_SIZE;
|
||||
if (!(xml->buf = av_malloc(xml->buf_size)))
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *xml_escape_str(AVBPrint *dst, const char *src, void *log_ctx)
|
||||
static av_cold void xml_uninit(WriterContext *wctx)
|
||||
{
|
||||
XMLContext *xml = wctx->priv;
|
||||
av_freep(&xml->buf);
|
||||
}
|
||||
|
||||
static const char *xml_escape_str(char **dst, size_t *dst_size, const char *src,
|
||||
void *log_ctx)
|
||||
{
|
||||
const char *p;
|
||||
char *q;
|
||||
size_t size = 1;
|
||||
|
||||
for (p = src; *p; p++) {
|
||||
/* precompute size */
|
||||
for (p = src; *p; p++, size++) {
|
||||
ESCAPE_CHECK_SIZE(src, size, SIZE_MAX-10);
|
||||
switch (*p) {
|
||||
case '&' : av_bprintf(dst, "%s", "&"); break;
|
||||
case '<' : av_bprintf(dst, "%s", "<"); break;
|
||||
case '>' : av_bprintf(dst, "%s", ">"); break;
|
||||
case '\"': av_bprintf(dst, "%s", """); break;
|
||||
case '\'': av_bprintf(dst, "%s", "'"); break;
|
||||
default: av_bprint_chars(dst, *p, 1);
|
||||
case '&' : size += strlen("&"); break;
|
||||
case '<' : size += strlen("<"); break;
|
||||
case '>' : size += strlen(">"); break;
|
||||
case '\"': size += strlen("""); break;
|
||||
case '\'': size += strlen("'"); break;
|
||||
default: size++;
|
||||
}
|
||||
}
|
||||
ESCAPE_REALLOC_BUF(dst_size, dst, src, size);
|
||||
|
||||
return dst->str;
|
||||
#define COPY_STR(str) { \
|
||||
const char *s = str; \
|
||||
while (*s) \
|
||||
*q++ = *s++; \
|
||||
}
|
||||
|
||||
p = src;
|
||||
q = *dst;
|
||||
while (*p) {
|
||||
switch (*p) {
|
||||
case '&' : COPY_STR("&"); break;
|
||||
case '<' : COPY_STR("<"); break;
|
||||
case '>' : COPY_STR(">"); break;
|
||||
case '\"': COPY_STR("""); break;
|
||||
case '\'': COPY_STR("'"); break;
|
||||
default: *q++ = *p;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
*q = 0;
|
||||
|
||||
return *dst;
|
||||
}
|
||||
|
||||
static void xml_print_header(WriterContext *wctx)
|
||||
@@ -1101,13 +1165,11 @@ static void xml_print_section_footer(WriterContext *wctx, const char *section)
|
||||
|
||||
static void xml_print_str(WriterContext *wctx, const char *key, const char *value)
|
||||
{
|
||||
AVBPrint buf;
|
||||
XMLContext *xml = wctx->priv;
|
||||
|
||||
if (wctx->nb_item)
|
||||
printf(" ");
|
||||
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
printf("%s=\"%s\"", key, xml_escape_str(&buf, value, wctx));
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
printf("%s=\"%s\"", key, xml_escape_str(&xml->buf, &xml->buf_size, value, wctx));
|
||||
}
|
||||
|
||||
static void xml_print_int(WriterContext *wctx, const char *key, long long int value)
|
||||
@@ -1122,7 +1184,6 @@ static void xml_show_tags(WriterContext *wctx, AVDictionary *dict)
|
||||
XMLContext *xml = wctx->priv;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
int is_first = 1;
|
||||
AVBPrint buf;
|
||||
|
||||
xml->indent_level++;
|
||||
while ((tag = av_dict_get(dict, "", tag, AV_DICT_IGNORE_SUFFIX))) {
|
||||
@@ -1133,14 +1194,10 @@ static void xml_show_tags(WriterContext *wctx, AVDictionary *dict)
|
||||
is_first = 0;
|
||||
}
|
||||
XML_INDENT();
|
||||
|
||||
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
printf("<tag key=\"%s\"", xml_escape_str(&buf, tag->key, wctx));
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
|
||||
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
printf(" value=\"%s\"/>\n", xml_escape_str(&buf, tag->value, wctx));
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
printf("<tag key=\"%s\"",
|
||||
xml_escape_str(&xml->buf, &xml->buf_size, tag->key, wctx));
|
||||
printf(" value=\"%s\"/>\n",
|
||||
xml_escape_str(&xml->buf, &xml->buf_size, tag->value, wctx));
|
||||
}
|
||||
xml->indent_level--;
|
||||
}
|
||||
@@ -1149,6 +1206,7 @@ static Writer xml_writer = {
|
||||
.name = "xml",
|
||||
.priv_size = sizeof(XMLContext),
|
||||
.init = xml_init,
|
||||
.uninit = xml_uninit,
|
||||
.print_header = xml_print_header,
|
||||
.print_footer = xml_print_footer,
|
||||
.print_chapter_header = xml_print_chapter_header,
|
||||
@@ -1177,9 +1235,13 @@ static void writer_register_all(void)
|
||||
}
|
||||
|
||||
#define print_fmt(k, f, ...) do { \
|
||||
av_bprint_clear(&pbuf); \
|
||||
av_bprintf(&pbuf, f, __VA_ARGS__); \
|
||||
writer_print_string(w, k, pbuf.str, 0); \
|
||||
if (fast_asprintf(&pbuf, f, __VA_ARGS__)) \
|
||||
writer_print_string(w, k, pbuf.s, 0); \
|
||||
} while (0)
|
||||
|
||||
#define print_fmt_opt(k, f, ...) do { \
|
||||
if (fast_asprintf(&pbuf, f, __VA_ARGS__)) \
|
||||
writer_print_string(w, k, pbuf.s, 1); \
|
||||
} while (0)
|
||||
|
||||
#define print_int(k, v) writer_print_integer(w, k, v)
|
||||
@@ -1197,11 +1259,9 @@ static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pk
|
||||
{
|
||||
char val_str[128];
|
||||
AVStream *st = fmt_ctx->streams[pkt->stream_index];
|
||||
AVBPrint pbuf;
|
||||
struct print_buf pbuf = {.s = NULL};
|
||||
const char *s;
|
||||
|
||||
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
|
||||
print_section_header("packet");
|
||||
s = av_get_media_type_string(st->codec->codec_type);
|
||||
if (s) print_str ("codec_type", s);
|
||||
@@ -1219,17 +1279,15 @@ static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pk
|
||||
print_fmt("flags", "%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_');
|
||||
print_section_footer("packet");
|
||||
|
||||
av_bprint_finalize(&pbuf, NULL);
|
||||
av_free(pbuf.s);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream)
|
||||
{
|
||||
AVBPrint pbuf;
|
||||
struct print_buf pbuf = {.s = NULL};
|
||||
const char *s;
|
||||
|
||||
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
|
||||
print_section_header("frame");
|
||||
|
||||
s = av_get_media_type_string(stream->codec->codec_type);
|
||||
@@ -1276,7 +1334,7 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream)
|
||||
|
||||
print_section_footer("frame");
|
||||
|
||||
av_bprint_finalize(&pbuf, NULL);
|
||||
av_free(pbuf.s);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
@@ -1301,7 +1359,7 @@ static av_always_inline int get_decoded_frame(AVFormatContext *fmt_ctx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void read_packets(WriterContext *w, AVFormatContext *fmt_ctx)
|
||||
static void show_packets(WriterContext *w, AVFormatContext *fmt_ctx)
|
||||
{
|
||||
AVPacket pkt, pkt1;
|
||||
AVFrame frame;
|
||||
@@ -1310,23 +1368,18 @@ static void read_packets(WriterContext *w, AVFormatContext *fmt_ctx)
|
||||
av_init_packet(&pkt);
|
||||
|
||||
while (!av_read_frame(fmt_ctx, &pkt)) {
|
||||
if (do_read_packets) {
|
||||
if (do_show_packets)
|
||||
show_packet(w, fmt_ctx, &pkt, i++);
|
||||
nb_streams_packets[pkt.stream_index]++;
|
||||
}
|
||||
if (do_read_frames) {
|
||||
if (do_show_packets)
|
||||
show_packet(w, fmt_ctx, &pkt, i++);
|
||||
if (do_show_frames) {
|
||||
pkt1 = pkt;
|
||||
while (pkt1.size) {
|
||||
while (1) {
|
||||
avcodec_get_frame_defaults(&frame);
|
||||
ret = get_decoded_frame(fmt_ctx, &frame, &got_frame, &pkt1);
|
||||
if (ret < 0 || !got_frame)
|
||||
break;
|
||||
if (do_show_frames)
|
||||
show_frame(w, &frame, fmt_ctx->streams[pkt.stream_index]);
|
||||
show_frame(w, &frame, fmt_ctx->streams[pkt.stream_index]);
|
||||
pkt1.data += ret;
|
||||
pkt1.size -= ret;
|
||||
nb_streams_frames[pkt.stream_index]++;
|
||||
}
|
||||
}
|
||||
av_free_packet(&pkt);
|
||||
@@ -1337,13 +1390,8 @@ static void read_packets(WriterContext *w, AVFormatContext *fmt_ctx)
|
||||
//Flush remaining frames that are cached in the decoder
|
||||
for (i = 0; i < fmt_ctx->nb_streams; i++) {
|
||||
pkt.stream_index = i;
|
||||
while (get_decoded_frame(fmt_ctx, &frame, &got_frame, &pkt) >= 0 && got_frame) {
|
||||
if (do_read_frames) {
|
||||
if (do_show_frames)
|
||||
show_frame(w, &frame, fmt_ctx->streams[pkt.stream_index]);
|
||||
nb_streams_frames[pkt.stream_index]++;
|
||||
}
|
||||
}
|
||||
while (get_decoded_frame(fmt_ctx, &frame, &got_frame, &pkt) >= 0 && got_frame)
|
||||
show_frame(w, &frame, fmt_ctx->streams[pkt.stream_index]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1355,9 +1403,7 @@ static void show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_i
|
||||
char val_str[128];
|
||||
const char *s;
|
||||
AVRational display_aspect_ratio;
|
||||
AVBPrint pbuf;
|
||||
|
||||
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
struct print_buf pbuf = {.s = NULL};
|
||||
|
||||
print_section_header("stream");
|
||||
|
||||
@@ -1407,9 +1453,13 @@ static void show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_i
|
||||
else print_str_opt("pix_fmt", "unknown");
|
||||
print_int("level", dec_ctx->level);
|
||||
if (dec_ctx->timecode_frame_start >= 0) {
|
||||
char tcbuf[AV_TIMECODE_STR_SIZE];
|
||||
av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start);
|
||||
print_str("timecode", tcbuf);
|
||||
uint32_t tc = dec_ctx->timecode_frame_start;
|
||||
print_fmt("timecode", "%02d:%02d:%02d%c%02d",
|
||||
tc>>19 & 0x1f, // hours
|
||||
tc>>13 & 0x3f, // minutes
|
||||
tc>>6 & 0x3f, // seconds
|
||||
tc & 1<<24 ? ';' : ':', // drop
|
||||
tc & 0x3f); // frames
|
||||
} else {
|
||||
print_str_opt("timecode", "N/A");
|
||||
}
|
||||
@@ -1446,18 +1496,12 @@ static void show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_i
|
||||
print_fmt("time_base", "%d/%d", stream->time_base.num, stream->time_base.den);
|
||||
print_time("start_time", stream->start_time, &stream->time_base);
|
||||
print_time("duration", stream->duration, &stream->time_base);
|
||||
if (dec_ctx->bit_rate > 0) print_val ("bit_rate", dec_ctx->bit_rate, unit_bit_per_second_str);
|
||||
else print_str_opt("bit_rate", "N/A");
|
||||
if (stream->nb_frames) print_fmt ("nb_frames", "%"PRId64, stream->nb_frames);
|
||||
else print_str_opt("nb_frames", "N/A");
|
||||
if (nb_streams_frames[stream_idx]) print_fmt ("nb_read_frames", "%"PRIu64, nb_streams_frames[stream_idx]);
|
||||
else print_str_opt("nb_read_frames", "N/A");
|
||||
if (nb_streams_packets[stream_idx]) print_fmt ("nb_read_packets", "%"PRIu64, nb_streams_packets[stream_idx]);
|
||||
else print_str_opt("nb_read_packets", "N/A");
|
||||
show_tags(stream->metadata);
|
||||
|
||||
print_section_footer("stream");
|
||||
av_bprint_finalize(&pbuf, NULL);
|
||||
av_free(pbuf.s);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
@@ -1549,19 +1593,6 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void close_input_file(AVFormatContext **ctx_ptr)
|
||||
{
|
||||
int i;
|
||||
AVFormatContext *fmt_ctx = *ctx_ptr;
|
||||
|
||||
/* close decoder for each stream */
|
||||
for (i = 0; i < fmt_ctx->nb_streams; i++)
|
||||
if (fmt_ctx->streams[i]->codec->codec_id != CODEC_ID_NONE)
|
||||
avcodec_close(fmt_ctx->streams[i]->codec);
|
||||
|
||||
avformat_close_input(ctx_ptr);
|
||||
}
|
||||
|
||||
#define PRINT_CHAPTER(name) do { \
|
||||
if (do_show_ ## name) { \
|
||||
writer_print_chapter_header(wctx, #name); \
|
||||
@@ -1573,16 +1604,11 @@ static void close_input_file(AVFormatContext **ctx_ptr)
|
||||
static int probe_file(WriterContext *wctx, const char *filename)
|
||||
{
|
||||
AVFormatContext *fmt_ctx;
|
||||
int ret;
|
||||
|
||||
do_read_frames = do_show_frames || do_count_frames;
|
||||
do_read_packets = do_show_packets || do_count_packets;
|
||||
int ret, i;
|
||||
|
||||
ret = open_input_file(&fmt_ctx, filename);
|
||||
if (ret >= 0) {
|
||||
nb_streams_frames = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_frames));
|
||||
nb_streams_packets = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_packets));
|
||||
if (do_read_frames || do_read_packets) {
|
||||
if (do_show_packets || do_show_frames) {
|
||||
const char *chapter;
|
||||
if (do_show_frames && do_show_packets &&
|
||||
wctx->writer->flags & WRITER_FLAG_PUT_PACKETS_AND_FRAMES_IN_SAME_CHAPTER)
|
||||
@@ -1591,18 +1617,18 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
||||
chapter = "packets";
|
||||
else // (!do_show_packets && do_show_frames)
|
||||
chapter = "frames";
|
||||
if (do_show_frames || do_show_packets)
|
||||
writer_print_chapter_header(wctx, chapter);
|
||||
read_packets(wctx, fmt_ctx);
|
||||
if (do_show_frames || do_show_packets)
|
||||
writer_print_chapter_footer(wctx, chapter);
|
||||
writer_print_chapter_header(wctx, chapter);
|
||||
show_packets(wctx, fmt_ctx);
|
||||
writer_print_chapter_footer(wctx, chapter);
|
||||
}
|
||||
PRINT_CHAPTER(streams);
|
||||
PRINT_CHAPTER(format);
|
||||
close_input_file(&fmt_ctx);
|
||||
av_freep(&nb_streams_frames);
|
||||
av_freep(&nb_streams_packets);
|
||||
for (i = 0; i < fmt_ctx->nb_streams; i++)
|
||||
if (fmt_ctx->streams[i]->codec->codec_id != CODEC_ID_NONE)
|
||||
avcodec_close(fmt_ctx->streams[i]->codec);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1615,8 +1641,7 @@ static void show_usage(void)
|
||||
|
||||
static void ffprobe_show_program_version(WriterContext *w)
|
||||
{
|
||||
AVBPrint pbuf;
|
||||
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
struct print_buf pbuf = {.s = NULL};
|
||||
|
||||
writer_print_chapter_header(w, "program_version");
|
||||
print_section_header("program_version");
|
||||
@@ -1631,7 +1656,7 @@ static void ffprobe_show_program_version(WriterContext *w)
|
||||
print_section_footer("program_version");
|
||||
writer_print_chapter_footer(w, "program_version");
|
||||
|
||||
av_bprint_finalize(&pbuf, NULL);
|
||||
av_free(pbuf.s);
|
||||
}
|
||||
|
||||
#define SHOW_LIB_VERSION(libname, LIBNAME) \
|
||||
@@ -1672,13 +1697,6 @@ static int opt_format(const char *opt, const char *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int opt_show_format_entry(const char *opt, const char *arg)
|
||||
{
|
||||
do_show_format = 1;
|
||||
av_dict_set(&fmt_entries_to_show, arg, "", 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void opt_input_file(void *optctx, const char *arg)
|
||||
{
|
||||
if (input_filename) {
|
||||
@@ -1736,12 +1754,8 @@ static const OptionDef options[] = {
|
||||
{ "show_error", OPT_BOOL, {(void*)&do_show_error} , "show probing error" },
|
||||
{ "show_format", OPT_BOOL, {(void*)&do_show_format} , "show format/container info" },
|
||||
{ "show_frames", OPT_BOOL, {(void*)&do_show_frames} , "show frames info" },
|
||||
{ "show_format_entry", HAS_ARG, {(void*)opt_show_format_entry},
|
||||
"show a particular entry from the format/container info", "entry" },
|
||||
{ "show_packets", OPT_BOOL, {(void*)&do_show_packets}, "show packets info" },
|
||||
{ "show_streams", OPT_BOOL, {(void*)&do_show_streams}, "show streams info" },
|
||||
{ "count_frames", OPT_BOOL, {(void*)&do_count_frames}, "count the number of frames per stream" },
|
||||
{ "count_packets", OPT_BOOL, {(void*)&do_count_packets}, "count the number of packets per stream" },
|
||||
{ "show_program_version", OPT_BOOL, {(void*)&do_show_program_version}, "show ffprobe version" },
|
||||
{ "show_library_versions", OPT_BOOL, {(void*)&do_show_library_versions}, "show library versions" },
|
||||
{ "show_versions", 0, {(void*)&opt_show_versions}, "show program and library versions" },
|
||||
@@ -1813,10 +1827,6 @@ int main(int argc, char **argv)
|
||||
|
||||
end:
|
||||
av_freep(&print_format);
|
||||
|
||||
uninit_opts();
|
||||
av_dict_free(&fmt_entries_to_show);
|
||||
|
||||
avformat_network_deinit();
|
||||
|
||||
return ret;
|
||||
|
32
ffserver.c
32
ffserver.c
@@ -30,7 +30,7 @@
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "libavformat/avformat.h"
|
||||
// FIXME those are internal headers, ffserver _really_ shouldn't use them
|
||||
// FIXME those are internal headers, avserver _really_ shouldn't use them
|
||||
#include "libavformat/ffm.h"
|
||||
#include "libavformat/network.h"
|
||||
#include "libavformat/os_support.h"
|
||||
@@ -339,7 +339,8 @@ static int resolve_host(struct in_addr *sin_addr, const char *hostname)
|
||||
if (!ff_inet_aton(hostname, sin_addr)) {
|
||||
#if HAVE_GETADDRINFO
|
||||
struct addrinfo *ai, *cur;
|
||||
struct addrinfo hints = { 0 };
|
||||
struct addrinfo hints;
|
||||
memset(&hints, 0, sizeof(hints));
|
||||
hints.ai_family = AF_INET;
|
||||
if (getaddrinfo(hostname, NULL, &hints, &ai))
|
||||
return -1;
|
||||
@@ -1871,7 +1872,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
|
||||
static void fmt_bytecount(AVIOContext *pb, int64_t count)
|
||||
{
|
||||
static const char suffix[] = " kMGTP";
|
||||
static const char *suffix = " kMGTP";
|
||||
const char *s;
|
||||
|
||||
for (s = suffix; count >= 100000 && s[1]; count /= 1000, s++);
|
||||
@@ -2126,13 +2127,12 @@ static int open_input_stream(HTTPContext *c, const char *info)
|
||||
char buf[128];
|
||||
char input_filename[1024];
|
||||
AVFormatContext *s = NULL;
|
||||
int buf_size, i, ret;
|
||||
int i, ret;
|
||||
int64_t stream_pos;
|
||||
|
||||
/* find file name */
|
||||
if (c->stream->feed) {
|
||||
strcpy(input_filename, c->stream->feed->feed_filename);
|
||||
buf_size = FFM_PACKET_SIZE;
|
||||
/* compute position (absolute time) */
|
||||
if (av_find_info_tag(buf, sizeof(buf), "date", info)) {
|
||||
if ((ret = av_parse_time(&stream_pos, buf, 0)) < 0)
|
||||
@@ -2144,7 +2144,6 @@ static int open_input_stream(HTTPContext *c, const char *info)
|
||||
stream_pos = av_gettime() - c->stream->prebuffer * (int64_t)1000;
|
||||
} else {
|
||||
strcpy(input_filename, c->stream->feed_filename);
|
||||
buf_size = 0;
|
||||
/* compute position (relative time) */
|
||||
if (av_find_info_tag(buf, sizeof(buf), "date", info)) {
|
||||
if ((ret = av_parse_time(&stream_pos, buf, 1)) < 0)
|
||||
@@ -2160,10 +2159,6 @@ static int open_input_stream(HTTPContext *c, const char *info)
|
||||
http_log("could not open %s: %d\n", input_filename, ret);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* set buffer size */
|
||||
if (buf_size > 0) ffio_set_buf_size(s->pb, buf_size);
|
||||
|
||||
s->flags |= AVFMT_FLAG_GENPTS;
|
||||
c->fmt_in = s;
|
||||
if (strcmp(s->iformat->name, "ffm") && avformat_find_stream_info(c->fmt_in, NULL) < 0) {
|
||||
@@ -2585,11 +2580,8 @@ static int http_start_receive_data(HTTPContext *c)
|
||||
if (c->stream->truncate) {
|
||||
/* truncate feed file */
|
||||
ffm_write_write_index(c->feed_fd, FFM_PACKET_SIZE);
|
||||
ftruncate(c->feed_fd, FFM_PACKET_SIZE);
|
||||
http_log("Truncating feed file '%s'\n", c->stream->feed_filename);
|
||||
if (ftruncate(c->feed_fd, FFM_PACKET_SIZE) < 0) {
|
||||
http_log("Error truncating feed file: %s\n", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
if ((c->stream->feed_write_index = ffm_read_write_index(fd)) < 0) {
|
||||
http_log("Error reading write index from feed file: %s\n", strerror(errno));
|
||||
@@ -2837,7 +2829,7 @@ static int rtsp_parse_request(HTTPContext *c)
|
||||
char protocol[32];
|
||||
char line[1024];
|
||||
int len;
|
||||
RTSPMessageHeader header1 = { 0 }, *header = &header1;
|
||||
RTSPMessageHeader header1, *header = &header1;
|
||||
|
||||
c->buffer_ptr[0] = '\0';
|
||||
p = c->buffer;
|
||||
@@ -2863,6 +2855,7 @@ static int rtsp_parse_request(HTTPContext *c)
|
||||
}
|
||||
|
||||
/* parse each header line */
|
||||
memset(header, 0, sizeof(*header));
|
||||
/* skip to next line */
|
||||
while (*p != '\n' && *p != '\0')
|
||||
p++;
|
||||
@@ -3669,8 +3662,6 @@ static void build_feed_streams(void)
|
||||
int matches = 0;
|
||||
|
||||
if (avformat_open_input(&s, feed->feed_filename, NULL, NULL) >= 0) {
|
||||
/* set buffer size */
|
||||
ffio_set_buf_size(s->pb, FFM_PACKET_SIZE);
|
||||
/* Now see if it matches */
|
||||
if (s->nb_streams == feed->nb_streams) {
|
||||
matches = 1;
|
||||
@@ -4230,8 +4221,8 @@ static int parse_ffconfig(const char *filename)
|
||||
}
|
||||
|
||||
stream->fmt = ffserver_guess_format(NULL, stream->filename, NULL);
|
||||
avcodec_get_context_defaults3(&video_enc, NULL);
|
||||
avcodec_get_context_defaults3(&audio_enc, NULL);
|
||||
avcodec_get_context_defaults2(&video_enc, AVMEDIA_TYPE_VIDEO);
|
||||
avcodec_get_context_defaults2(&audio_enc, AVMEDIA_TYPE_AUDIO);
|
||||
|
||||
audio_id = CODEC_ID_NONE;
|
||||
video_id = CODEC_ID_NONE;
|
||||
@@ -4665,7 +4656,7 @@ static const OptionDef options[] = {
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct sigaction sigact = { { 0 } };
|
||||
struct sigaction sigact;
|
||||
|
||||
parse_loglevel(argc, argv, options);
|
||||
av_register_all();
|
||||
@@ -4683,6 +4674,7 @@ int main(int argc, char **argv)
|
||||
|
||||
av_lfg_init(&random_state, av_get_random_seed());
|
||||
|
||||
memset(&sigact, 0, sizeof(sigact));
|
||||
sigact.sa_handler = handle_child_exit;
|
||||
sigact.sa_flags = SA_NOCLDSTOP | SA_RESTART;
|
||||
sigaction(SIGCHLD, &sigact, 0);
|
||||
|
910
libavcodec/4xm.c
910
libavcodec/4xm.c
File diff suppressed because it is too large
Load Diff
@@ -38,20 +38,20 @@
|
||||
#include "avcodec.h"
|
||||
|
||||
|
||||
static const enum PixelFormat pixfmt_rgb24[] = {
|
||||
PIX_FMT_BGR24, PIX_FMT_RGB32, PIX_FMT_NONE };
|
||||
static const enum PixelFormat pixfmt_rgb24[] = {PIX_FMT_BGR24, PIX_FMT_RGB32, PIX_FMT_NONE};
|
||||
|
||||
/*
|
||||
* Decoder context
|
||||
*/
|
||||
typedef struct EightBpsContext {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame pic;
|
||||
|
||||
unsigned char planes;
|
||||
unsigned char planemap[4];
|
||||
AVCodecContext *avctx;
|
||||
AVFrame pic;
|
||||
|
||||
uint32_t pal[256];
|
||||
unsigned char planes;
|
||||
unsigned char planemap[4];
|
||||
|
||||
uint32_t pal[256];
|
||||
} EightBpsContext;
|
||||
|
||||
|
||||
@@ -60,90 +60,87 @@ typedef struct EightBpsContext {
|
||||
* Decode a frame
|
||||
*
|
||||
*/
|
||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *data_size, AVPacket *avpkt)
|
||||
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
EightBpsContext * const c = avctx->priv_data;
|
||||
const unsigned char *encoded = buf;
|
||||
unsigned char *pixptr, *pixptr_end;
|
||||
unsigned int height = avctx->height; // Real image height
|
||||
unsigned int dlen, p, row;
|
||||
const unsigned char *lp, *dp;
|
||||
unsigned char count;
|
||||
unsigned int planes = c->planes;
|
||||
unsigned char *planemap = c->planemap;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
EightBpsContext * const c = avctx->priv_data;
|
||||
const unsigned char *encoded = buf;
|
||||
unsigned char *pixptr, *pixptr_end;
|
||||
unsigned int height = avctx->height; // Real image height
|
||||
unsigned int dlen, p, row;
|
||||
const unsigned char *lp, *dp;
|
||||
unsigned char count;
|
||||
unsigned int planes = c->planes;
|
||||
unsigned char *planemap = c->planemap;
|
||||
|
||||
if (c->pic.data[0])
|
||||
avctx->release_buffer(avctx, &c->pic);
|
||||
if(c->pic.data[0])
|
||||
avctx->release_buffer(avctx, &c->pic);
|
||||
|
||||
c->pic.reference = 0;
|
||||
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
|
||||
if (avctx->get_buffer(avctx, &c->pic) < 0){
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
c->pic.reference = 0;
|
||||
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
|
||||
if(avctx->get_buffer(avctx, &c->pic) < 0){
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Set data pointer after line lengths */
|
||||
dp = encoded + planes * (height << 1);
|
||||
/* Set data pointer after line lengths */
|
||||
dp = encoded + planes * (height << 1);
|
||||
|
||||
for (p = 0; p < planes; p++) {
|
||||
/* Lines length pointer for this plane */
|
||||
lp = encoded + p * (height << 1);
|
||||
for (p = 0; p < planes; p++) {
|
||||
/* Lines length pointer for this plane */
|
||||
lp = encoded + p * (height << 1);
|
||||
|
||||
/* Decode a plane */
|
||||
for (row = 0; row < height; row++) {
|
||||
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
|
||||
pixptr_end = pixptr + c->pic.linesize[0];
|
||||
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
|
||||
/* Decode a row of this plane */
|
||||
while (dlen > 0) {
|
||||
if (dp + 1 >= buf + buf_size)
|
||||
return -1;
|
||||
if ((count = *dp++) <= 127) {
|
||||
count++;
|
||||
dlen -= count + 1;
|
||||
if (pixptr + count * planes > pixptr_end)
|
||||
break;
|
||||
if (dp + count > buf + buf_size)
|
||||
return -1;
|
||||
while (count--) {
|
||||
*pixptr = *dp++;
|
||||
pixptr += planes;
|
||||
}
|
||||
} else {
|
||||
count = 257 - count;
|
||||
if (pixptr + count * planes > pixptr_end)
|
||||
break;
|
||||
while (count--) {
|
||||
*pixptr = *dp;
|
||||
pixptr += planes;
|
||||
}
|
||||
dp++;
|
||||
dlen -= 2;
|
||||
/* Decode a plane */
|
||||
for(row = 0; row < height; row++) {
|
||||
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
|
||||
pixptr_end = pixptr + c->pic.linesize[0];
|
||||
dlen = av_be2ne16(*(const unsigned short *)(lp+row*2));
|
||||
/* Decode a row of this plane */
|
||||
while(dlen > 0) {
|
||||
if(dp + 1 >= buf+buf_size) return -1;
|
||||
if ((count = *dp++) <= 127) {
|
||||
count++;
|
||||
dlen -= count + 1;
|
||||
if (pixptr + count * planes > pixptr_end)
|
||||
break;
|
||||
if(dp + count > buf+buf_size) return -1;
|
||||
while(count--) {
|
||||
*pixptr = *dp++;
|
||||
pixptr += planes;
|
||||
}
|
||||
} else {
|
||||
count = 257 - count;
|
||||
if (pixptr + count * planes > pixptr_end)
|
||||
break;
|
||||
while(count--) {
|
||||
*pixptr = *dp;
|
||||
pixptr += planes;
|
||||
}
|
||||
dp++;
|
||||
dlen -= 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (avctx->bits_per_coded_sample <= 8) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt,
|
||||
AV_PKT_DATA_PALETTE,
|
||||
NULL);
|
||||
if (pal) {
|
||||
c->pic.palette_has_changed = 1;
|
||||
memcpy(c->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
if (avctx->bits_per_coded_sample <= 8) {
|
||||
const uint8_t *pal = av_packet_get_side_data(avpkt,
|
||||
AV_PKT_DATA_PALETTE,
|
||||
NULL);
|
||||
if (pal) {
|
||||
c->pic.palette_has_changed = 1;
|
||||
memcpy(c->pal, pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = c->pic;
|
||||
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
/* always report that the buffer was completely consumed */
|
||||
return buf_size;
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = c->pic;
|
||||
|
||||
/* always report that the buffer was completely consumed */
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
|
||||
@@ -154,47 +151,47 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
*/
|
||||
static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
EightBpsContext * const c = avctx->priv_data;
|
||||
EightBpsContext * const c = avctx->priv_data;
|
||||
|
||||
c->avctx = avctx;
|
||||
c->pic.data[0] = NULL;
|
||||
c->avctx = avctx;
|
||||
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
switch (avctx->bits_per_coded_sample) {
|
||||
case 8:
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
c->planes = 1;
|
||||
c->planemap[0] = 0; // 1st plane is palette indexes
|
||||
break;
|
||||
case 24:
|
||||
avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
|
||||
c->planes = 3;
|
||||
c->planemap[0] = 2; // 1st plane is red
|
||||
c->planemap[1] = 1; // 2nd plane is green
|
||||
c->planemap[2] = 0; // 3rd plane is blue
|
||||
break;
|
||||
case 32:
|
||||
avctx->pix_fmt = PIX_FMT_RGB32;
|
||||
c->planes = 4;
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
c->pic.data[0] = NULL;
|
||||
|
||||
switch (avctx->bits_per_coded_sample) {
|
||||
case 8:
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
c->planes = 1;
|
||||
c->planemap[0] = 0; // 1st plane is palette indexes
|
||||
break;
|
||||
case 24:
|
||||
avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
|
||||
c->planes = 3;
|
||||
c->planemap[0] = 2; // 1st plane is red
|
||||
c->planemap[1] = 1; // 2nd plane is green
|
||||
c->planemap[2] = 0; // 3rd plane is blue
|
||||
break;
|
||||
case 32:
|
||||
avctx->pix_fmt = PIX_FMT_RGB32;
|
||||
c->planes = 4;
|
||||
#if HAVE_BIGENDIAN
|
||||
c->planemap[0] = 1; // 1st plane is red
|
||||
c->planemap[1] = 2; // 2nd plane is green
|
||||
c->planemap[2] = 3; // 3rd plane is blue
|
||||
c->planemap[3] = 0; // 4th plane is alpha
|
||||
c->planemap[0] = 1; // 1st plane is red
|
||||
c->planemap[1] = 2; // 2nd plane is green
|
||||
c->planemap[2] = 3; // 3rd plane is blue
|
||||
c->planemap[3] = 0; // 4th plane is alpha
|
||||
#else
|
||||
c->planemap[0] = 2; // 1st plane is red
|
||||
c->planemap[1] = 1; // 2nd plane is green
|
||||
c->planemap[2] = 0; // 3rd plane is blue
|
||||
c->planemap[3] = 3; // 4th plane is alpha
|
||||
c->planemap[0] = 2; // 1st plane is red
|
||||
c->planemap[1] = 1; // 2nd plane is green
|
||||
c->planemap[2] = 0; // 3rd plane is blue
|
||||
c->planemap[3] = 3; // 4th plane is alpha
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n",
|
||||
avctx->bits_per_coded_sample);
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n", avctx->bits_per_coded_sample);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -207,12 +204,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
*/
|
||||
static av_cold int decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
EightBpsContext * const c = avctx->priv_data;
|
||||
EightBpsContext * const c = avctx->priv_data;
|
||||
|
||||
if (c->pic.data[0])
|
||||
avctx->release_buffer(avctx, &c->pic);
|
||||
if (c->pic.data[0])
|
||||
avctx->release_buffer(avctx, &c->pic);
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -3,14 +3,7 @@ include $(SUBDIR)../config.mak
|
||||
NAME = avcodec
|
||||
FFLIBS = avutil
|
||||
|
||||
HEADERS = avcodec.h \
|
||||
avfft.h \
|
||||
dxva2.h \
|
||||
vaapi.h \
|
||||
vda.h \
|
||||
vdpau.h \
|
||||
version.h \
|
||||
xvmc.h \
|
||||
HEADERS = avcodec.h avfft.h dxva2.h opt.h vaapi.h vda.h vdpau.h version.h xvmc.h
|
||||
|
||||
OBJS = allcodecs.o \
|
||||
audioconvert.o \
|
||||
@@ -46,7 +39,6 @@ OBJS-$(CONFIG_GOLOMB) += golomb.o
|
||||
OBJS-$(CONFIG_H264DSP) += h264dsp.o h264idct.o
|
||||
OBJS-$(CONFIG_H264PRED) += h264pred.o
|
||||
OBJS-$(CONFIG_HUFFMAN) += huffman.o
|
||||
OBJS-$(CONFIG_LIBXVID) += libxvid_rc.o
|
||||
OBJS-$(CONFIG_LPC) += lpc.o
|
||||
OBJS-$(CONFIG_LSP) += lsp.o
|
||||
OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o
|
||||
@@ -64,13 +56,11 @@ OBJS-$(CONFIG_VDPAU) += vdpau.o
|
||||
OBJS-$(CONFIG_A64MULTI_ENCODER) += a64multienc.o elbg.o
|
||||
OBJS-$(CONFIG_A64MULTI5_ENCODER) += a64multienc.o elbg.o
|
||||
OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps.o \
|
||||
aacadtsdec.o mpeg4audio.o kbdwin.o \
|
||||
sbrdsp.o aacpsdsp.o
|
||||
aacadtsdec.o mpeg4audio.o kbdwin.o
|
||||
OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o \
|
||||
aacpsy.o aactab.o \
|
||||
psymodel.o iirfilter.o \
|
||||
mpeg4audio.o kbdwin.o \
|
||||
audio_frame_queue.o
|
||||
mpeg4audio.o kbdwin.o
|
||||
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
|
||||
OBJS-$(CONFIG_AC3_DECODER) += ac3dec.o ac3dec_data.o ac3.o kbdwin.o
|
||||
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
|
||||
@@ -104,15 +94,11 @@ OBJS-$(CONFIG_AURA2_DECODER) += aura.o
|
||||
OBJS-$(CONFIG_AVRP_DECODER) += r210dec.o
|
||||
OBJS-$(CONFIG_AVRP_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_AVS_DECODER) += avs.o
|
||||
OBJS-$(CONFIG_AVUI_DECODER) += avuidec.o
|
||||
OBJS-$(CONFIG_AVUI_ENCODER) += avuienc.o
|
||||
OBJS-$(CONFIG_AYUV_DECODER) += v408dec.o
|
||||
OBJS-$(CONFIG_AYUV_ENCODER) += v408enc.o
|
||||
OBJS-$(CONFIG_BETHSOFTVID_DECODER) += bethsoftvideo.o
|
||||
OBJS-$(CONFIG_BFI_DECODER) += bfi.o
|
||||
OBJS-$(CONFIG_BINK_DECODER) += bink.o binkdsp.o
|
||||
OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER) += binkaudio.o wma.o wma_common.o
|
||||
OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o wma.o wma_common.o
|
||||
OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER) += binkaudio.o wma.o
|
||||
OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o wma.o
|
||||
OBJS-$(CONFIG_BINTEXT_DECODER) += bintext.o cga_data.o
|
||||
OBJS-$(CONFIG_BMP_DECODER) += bmp.o msrledec.o
|
||||
OBJS-$(CONFIG_BMP_ENCODER) += bmpenc.o
|
||||
@@ -122,15 +108,13 @@ OBJS-$(CONFIG_C93_DECODER) += c93.o
|
||||
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
|
||||
mpeg12data.o mpegvideo.o
|
||||
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
|
||||
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
|
||||
OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
|
||||
OBJS-$(CONFIG_CLJR_DECODER) += cljr.o
|
||||
OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
|
||||
OBJS-$(CONFIG_COOK_DECODER) += cook.o
|
||||
OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
|
||||
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
|
||||
OBJS-$(CONFIG_DCA_DECODER) += dca.o synth_filter.o dcadsp.o \
|
||||
dca_parser.o
|
||||
OBJS-$(CONFIG_DCA_DECODER) += dca.o synth_filter.o dcadsp.o
|
||||
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o
|
||||
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o \
|
||||
dirac_arith.o mpeg12data.o dwt.o
|
||||
@@ -148,8 +132,8 @@ OBJS-$(CONFIG_DVBSUB_DECODER) += dvbsubdec.o
|
||||
OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
|
||||
OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
|
||||
OBJS-$(CONFIG_DVDSUB_ENCODER) += dvdsubenc.o
|
||||
OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o dv_profile.o
|
||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o dvdata.o dv_profile.o
|
||||
OBJS-$(CONFIG_DVVIDEO_DECODER) += dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
|
||||
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
|
||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
|
||||
@@ -170,13 +154,12 @@ OBJS-$(CONFIG_EIGHTSVX_FIB_DECODER) += 8svx.o
|
||||
OBJS-$(CONFIG_EIGHTSVX_RAW_DECODER) += 8svx.o
|
||||
OBJS-$(CONFIG_ESCAPE124_DECODER) += escape124.o
|
||||
OBJS-$(CONFIG_ESCAPE130_DECODER) += escape130.o
|
||||
OBJS-$(CONFIG_EXR_DECODER) += exr.o
|
||||
OBJS-$(CONFIG_FFV1_DECODER) += ffv1.o rangecoder.o
|
||||
OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o rangecoder.o
|
||||
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o
|
||||
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o
|
||||
OBJS-$(CONFIG_FFWAVESYNTH_DECODER) += ffwavesynth.o
|
||||
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o
|
||||
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o vorbis_data.o
|
||||
OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o vorbis_data.o
|
||||
OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
|
||||
OBJS-$(CONFIG_FLASHSV_ENCODER) += flashsvenc.o
|
||||
@@ -188,15 +171,15 @@ OBJS-$(CONFIG_FRAPS_DECODER) += fraps.o
|
||||
OBJS-$(CONFIG_FRWU_DECODER) += frwu.o
|
||||
OBJS-$(CONFIG_G723_1_DECODER) += g723_1.o acelp_vectors.o \
|
||||
celp_filters.o celp_math.o
|
||||
OBJS-$(CONFIG_G723_1_ENCODER) += g723_1.o acelp_vectors.o celp_math.o
|
||||
OBJS-$(CONFIG_G723_1_ENCODER) += g723_1.o
|
||||
OBJS-$(CONFIG_G729_DECODER) += g729dec.o lsp.o celp_math.o acelp_filters.o acelp_pitch_delay.o acelp_vectors.o g729postfilter.o
|
||||
OBJS-$(CONFIG_GIF_DECODER) += gifdec.o lzw.o
|
||||
OBJS-$(CONFIG_GIF_ENCODER) += gif.o lzwenc.o
|
||||
OBJS-$(CONFIG_GSM_DECODER) += gsmdec.o gsmdec_data.o msgsmdec.o
|
||||
OBJS-$(CONFIG_GSM_MS_DECODER) += gsmdec.o gsmdec_data.o msgsmdec.o
|
||||
OBJS-$(CONFIG_H261_DECODER) += h261dec.o h261.o h261data.o \
|
||||
OBJS-$(CONFIG_H261_DECODER) += h261dec.o h261.o \
|
||||
mpegvideo.o error_resilience.o
|
||||
OBJS-$(CONFIG_H261_ENCODER) += h261enc.o h261.o h261data.o \
|
||||
OBJS-$(CONFIG_H261_ENCODER) += h261enc.o h261.o \
|
||||
mpegvideo_enc.o motion_est.o \
|
||||
ratecontrol.o mpeg12data.o \
|
||||
mpegvideo.o
|
||||
@@ -231,7 +214,6 @@ OBJS-$(CONFIG_INDEO4_DECODER) += indeo4.o ivi_common.o ivi_dsp.o
|
||||
OBJS-$(CONFIG_INDEO5_DECODER) += indeo5.o ivi_common.o ivi_dsp.o
|
||||
OBJS-$(CONFIG_INTERPLAY_DPCM_DECODER) += dpcm.o
|
||||
OBJS-$(CONFIG_INTERPLAY_VIDEO_DECODER) += interplayvideo.o
|
||||
OBJS-$(CONFIG_JACOSUB_DECODER) += jacosubdec.o ass.o
|
||||
OBJS-$(CONFIG_JPEG2000_DECODER) += j2kdec.o mqcdec.o mqc.o j2k.o j2k_dwt.o
|
||||
OBJS-$(CONFIG_JPEG2000_ENCODER) += j2kenc.o mqcenc.o mqc.o j2k.o j2k_dwt.o
|
||||
OBJS-$(CONFIG_JPEGLS_DECODER) += jpeglsdec.o jpegls.o \
|
||||
@@ -250,7 +232,6 @@ OBJS-$(CONFIG_MACE3_DECODER) += mace.o
|
||||
OBJS-$(CONFIG_MACE6_DECODER) += mace.o
|
||||
OBJS-$(CONFIG_MDEC_DECODER) += mdec.o mpeg12.o mpeg12data.o \
|
||||
mpegvideo.o error_resilience.o
|
||||
OBJS-$(CONFIG_MICRODVD_DECODER) += microdvddec.o ass.o
|
||||
OBJS-$(CONFIG_MIMIC_DECODER) += mimic.o
|
||||
OBJS-$(CONFIG_MJPEG_DECODER) += mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpeg.o \
|
||||
@@ -314,22 +295,19 @@ OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
|
||||
OBJS-$(CONFIG_MSMPEG4V1_DECODER) += msmpeg4.o msmpeg4data.o
|
||||
OBJS-$(CONFIG_MSMPEG4V2_DECODER) += msmpeg4.o msmpeg4data.o h263dec.o \
|
||||
h263.o ituh263dec.o mpeg4videodec.o
|
||||
OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4.o msmpeg4enc.o msmpeg4data.o \
|
||||
h263dec.o h263.o ituh263dec.o \
|
||||
mpeg4videodec.o
|
||||
OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4.o msmpeg4data.o h263dec.o \
|
||||
h263.o ituh263dec.o mpeg4videodec.o
|
||||
OBJS-$(CONFIG_MSMPEG4V3_DECODER) += msmpeg4.o msmpeg4data.o h263dec.o \
|
||||
h263.o ituh263dec.o mpeg4videodec.o
|
||||
OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4enc.o msmpeg4data.o \
|
||||
h263dec.o h263.o ituh263dec.o \
|
||||
mpeg4videodec.o
|
||||
OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4data.o h263dec.o \
|
||||
h263.o ituh263dec.o mpeg4videodec.o
|
||||
OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
|
||||
OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
|
||||
OBJS-$(CONFIG_MSVIDEO1_ENCODER) += msvideo1enc.o elbg.o
|
||||
OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
|
||||
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
|
||||
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
|
||||
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
|
||||
OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o pnm.o
|
||||
@@ -343,15 +321,13 @@ OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o
|
||||
OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o pnm.o
|
||||
OBJS-$(CONFIG_PGSSUB_DECODER) += pgssubdec.o
|
||||
OBJS-$(CONFIG_PICTOR_DECODER) += pictordec.o cga_data.o
|
||||
OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o pngdsp.o
|
||||
OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o
|
||||
OBJS-$(CONFIG_PNG_ENCODER) += png.o pngenc.o
|
||||
OBJS-$(CONFIG_PPM_DECODER) += pnmdec.o pnm.o
|
||||
OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o pnm.o
|
||||
OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o proresdsp.o
|
||||
OBJS-$(CONFIG_PRORES_LGPL_DECODER) += proresdec_lgpl.o proresdsp.o proresdata.o
|
||||
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc_anatoliy.o
|
||||
OBJS-$(CONFIG_PRORES_ANATOLIY_ENCODER) += proresenc_anatoliy.o
|
||||
OBJS-$(CONFIG_PRORES_KOSTYA_ENCODER) += proresenc_kostya.o proresdata.o proresdsp.o
|
||||
OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o
|
||||
OBJS-$(CONFIG_PRORES_LGPL_DECODER) += proresdec_lgpl.o proresdsp.o
|
||||
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc.o
|
||||
OBJS-$(CONFIG_PTX_DECODER) += ptx.o
|
||||
OBJS-$(CONFIG_QCELP_DECODER) += qcelpdec.o celp_math.o \
|
||||
celp_filters.o acelp_vectors.o \
|
||||
@@ -368,10 +344,8 @@ OBJS-$(CONFIG_R10K_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_R210_DECODER) += r210dec.o
|
||||
OBJS-$(CONFIG_R210_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_RA_144_DECODER) += ra144dec.o ra144.o celp_filters.o
|
||||
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o
|
||||
OBJS-$(CONFIG_RA_288_DECODER) += ra288.o celp_math.o celp_filters.o
|
||||
OBJS-$(CONFIG_RALF_DECODER) += ralf.o
|
||||
OBJS-$(CONFIG_RAWVIDEO_DECODER) += rawdec.o
|
||||
OBJS-$(CONFIG_RAWVIDEO_ENCODER) += rawenc.o
|
||||
OBJS-$(CONFIG_RL2_DECODER) += rl2.o
|
||||
@@ -413,7 +387,6 @@ OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o
|
||||
OBJS-$(CONFIG_SRT_ENCODER) += srtenc.o ass_split.o
|
||||
OBJS-$(CONFIG_SUNRAST_DECODER) += sunrast.o
|
||||
OBJS-$(CONFIG_SUNRAST_ENCODER) += sunrastenc.o
|
||||
OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o h263.o \
|
||||
mpegvideo.o error_resilience.o
|
||||
OBJS-$(CONFIG_SVQ1_ENCODER) += svq1enc.o svq1.o \
|
||||
@@ -448,8 +421,6 @@ OBJS-$(CONFIG_V210_DECODER) += v210dec.o
|
||||
OBJS-$(CONFIG_V210_ENCODER) += v210enc.o
|
||||
OBJS-$(CONFIG_V308_DECODER) += v308dec.o
|
||||
OBJS-$(CONFIG_V308_ENCODER) += v308enc.o
|
||||
OBJS-$(CONFIG_V408_DECODER) += v408dec.o
|
||||
OBJS-$(CONFIG_V408_ENCODER) += v408enc.o
|
||||
OBJS-$(CONFIG_V410_DECODER) += v410dec.o
|
||||
OBJS-$(CONFIG_V410_ENCODER) += v410enc.o
|
||||
OBJS-$(CONFIG_V210X_DECODER) += v210x.o
|
||||
@@ -477,12 +448,12 @@ OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o vp56dsp.o \
|
||||
OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp8dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
|
||||
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
|
||||
OBJS-$(CONFIG_WMALOSSLESS_DECODER) += wmalosslessdec.o wma_common.o
|
||||
OBJS-$(CONFIG_WMAPRO_DECODER) += wmaprodec.o wma.o wma_common.o
|
||||
OBJS-$(CONFIG_WMAV1_DECODER) += wmadec.o wma.o wma_common.o aactab.o
|
||||
OBJS-$(CONFIG_WMAV1_ENCODER) += wmaenc.o wma.o wma_common.o aactab.o
|
||||
OBJS-$(CONFIG_WMAV2_DECODER) += wmadec.o wma.o wma_common.o aactab.o
|
||||
OBJS-$(CONFIG_WMAV2_ENCODER) += wmaenc.o wma.o wma_common.o aactab.o
|
||||
OBJS-$(CONFIG_WMALOSSLESS_DECODER) += wmalosslessdec.o wma.o
|
||||
OBJS-$(CONFIG_WMAPRO_DECODER) += wmaprodec.o wma.o
|
||||
OBJS-$(CONFIG_WMAV1_DECODER) += wmadec.o wma.o aactab.o
|
||||
OBJS-$(CONFIG_WMAV1_ENCODER) += wmaenc.o wma.o aactab.o
|
||||
OBJS-$(CONFIG_WMAV2_DECODER) += wmadec.o wma.o aactab.o
|
||||
OBJS-$(CONFIG_WMAV2_ENCODER) += wmaenc.o wma.o aactab.o
|
||||
OBJS-$(CONFIG_WMAVOICE_DECODER) += wmavoice.o \
|
||||
celp_math.o celp_filters.o \
|
||||
acelp_vectors.o acelp_filters.o
|
||||
@@ -491,7 +462,7 @@ OBJS-$(CONFIG_WMV2_DECODER) += wmv2dec.o wmv2.o \
|
||||
msmpeg4.o msmpeg4data.o \
|
||||
intrax8.o intrax8dsp.o
|
||||
OBJS-$(CONFIG_WMV2_ENCODER) += wmv2enc.o wmv2.o \
|
||||
msmpeg4.o msmpeg4enc.o msmpeg4data.o \
|
||||
msmpeg4.o msmpeg4data.o \
|
||||
mpeg4videodec.o ituh263dec.o h263dec.o
|
||||
OBJS-$(CONFIG_WNV1_DECODER) += wnv1.o
|
||||
OBJS-$(CONFIG_WS_SND1_DECODER) += ws-snd1.o
|
||||
@@ -499,8 +470,6 @@ OBJS-$(CONFIG_XAN_DPCM_DECODER) += dpcm.o
|
||||
OBJS-$(CONFIG_XAN_WC3_DECODER) += xan.o
|
||||
OBJS-$(CONFIG_XAN_WC4_DECODER) += xxan.o
|
||||
OBJS-$(CONFIG_XBIN_DECODER) += bintext.o cga_data.o
|
||||
OBJS-$(CONFIG_XBM_DECODER) += xbmdec.o
|
||||
OBJS-$(CONFIG_XBM_ENCODER) += xbmenc.o
|
||||
OBJS-$(CONFIG_XL_DECODER) += xl.o
|
||||
OBJS-$(CONFIG_XSUB_DECODER) += xsubdec.o
|
||||
OBJS-$(CONFIG_XSUB_ENCODER) += xsubenc.o
|
||||
@@ -511,7 +480,6 @@ OBJS-$(CONFIG_Y41P_ENCODER) += y41penc.o
|
||||
OBJS-$(CONFIG_YOP_DECODER) += yop.o
|
||||
OBJS-$(CONFIG_YUV4_DECODER) += yuv4dec.o
|
||||
OBJS-$(CONFIG_YUV4_ENCODER) += yuv4enc.o
|
||||
OBJS-$(CONFIG_ZEROCODEC_DECODER) += zerocodec.o
|
||||
OBJS-$(CONFIG_ZLIB_DECODER) += lcldec.o
|
||||
OBJS-$(CONFIG_ZLIB_ENCODER) += lclenc.o
|
||||
OBJS-$(CONFIG_ZMBV_DECODER) += zmbv.o
|
||||
@@ -611,10 +579,9 @@ OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_ADX_DEMUXER) += adx.o
|
||||
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_DV_DEMUXER) += dv_profile.o
|
||||
OBJS-$(CONFIG_DV_MUXER) += dv_profile.o timecode.o
|
||||
OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o flacdata.o flac.o vorbis_data.o \
|
||||
vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_DV_DEMUXER) += dvdata.o
|
||||
OBJS-$(CONFIG_DV_MUXER) += dvdata.o timecode.o
|
||||
OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o flacdata.o flac.o vorbis_data.o
|
||||
OBJS-$(CONFIG_FLAC_MUXER) += flacdec.o flacdata.o flac.o vorbis_data.o
|
||||
OBJS-$(CONFIG_FLV_DEMUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_GXF_DEMUXER) += mpeg12data.o
|
||||
@@ -626,7 +593,6 @@ OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \
|
||||
flacdec.o flacdata.o flac.o \
|
||||
mpegaudiodata.o vorbis_data.o
|
||||
OBJS-$(CONFIG_MP2_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o timecode.o
|
||||
OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
@@ -635,8 +601,7 @@ OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MXF_MUXER) += timecode.o
|
||||
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
|
||||
OBJS-$(CONFIG_OGG_DEMUXER) += flacdec.o flacdata.o flac.o \
|
||||
dirac.o mpeg12data.o vorbis_parser.o \
|
||||
xiph.o vorbis_data.o
|
||||
dirac.o mpeg12data.o vorbis_data.o
|
||||
OBJS-$(CONFIG_OGG_MUXER) += xiph.o flacdec.o flacdata.o flac.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o mpegvideo.o xiph.o
|
||||
@@ -649,40 +614,38 @@ OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
# external codec libraries
|
||||
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
|
||||
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
||||
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o
|
||||
OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o
|
||||
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
|
||||
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_MS_ENCODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
|
||||
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
|
||||
OBJS-$(CONFIG_LIBSCHROEDINGER_DECODER) += libschroedingerdec.o \
|
||||
libschroedinger.o
|
||||
libschroedinger.o \
|
||||
libdirac_libschro.o
|
||||
OBJS-$(CONFIG_LIBSCHROEDINGER_ENCODER) += libschroedingerenc.o \
|
||||
libschroedinger.o
|
||||
libschroedinger.o \
|
||||
libdirac_libschro.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_DECODER) += libspeexdec.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o
|
||||
OBJS-$(CONFIG_LIBSTAGEFRIGHT_H264_DECODER)+= libstagefright.o
|
||||
OBJS-$(CONFIG_LIBTHEORA_ENCODER) += libtheoraenc.o
|
||||
OBJS-$(CONFIG_LIBUTVIDEO_DECODER) += libutvideodec.o
|
||||
OBJS-$(CONFIG_LIBUTVIDEO_ENCODER) += libutvideoenc.o
|
||||
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBUTVIDEO_DECODER) += libutvideo.o
|
||||
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
|
||||
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbis.o audio_frame_queue.o \
|
||||
vorbis_data.o vorbis_parser.o
|
||||
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbis.o vorbis_data.o
|
||||
OBJS-$(CONFIG_LIBVPX_DECODER) += libvpxdec.o
|
||||
OBJS-$(CONFIG_LIBVPX_ENCODER) += libvpxenc.o
|
||||
OBJS-$(CONFIG_LIBX264_ENCODER) += libx264.o
|
||||
OBJS-$(CONFIG_LIBXAVS_ENCODER) += libxavs.o
|
||||
OBJS-$(CONFIG_LIBXVID_ENCODER) += libxvid.o
|
||||
OBJS-$(CONFIG_LIBXVID) += libxvidff.o libxvid_rc.o
|
||||
|
||||
# parsers
|
||||
OBJS-$(CONFIG_AAC_PARSER) += aac_parser.o aac_ac3_parser.o \
|
||||
@@ -691,7 +654,6 @@ OBJS-$(CONFIG_AC3_PARSER) += ac3_parser.o ac3tab.o \
|
||||
aac_ac3_parser.o
|
||||
OBJS-$(CONFIG_ADX_PARSER) += adx_parser.o adx.o
|
||||
OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs_parser.o
|
||||
OBJS-$(CONFIG_COOK_PARSER) += cook_parser.o
|
||||
OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o
|
||||
OBJS-$(CONFIG_DIRAC_PARSER) += dirac_parser.o
|
||||
OBJS-$(CONFIG_DNXHD_PARSER) += dnxhd_parser.o
|
||||
@@ -715,7 +677,6 @@ OBJS-$(CONFIG_MPEG4VIDEO_PARSER) += mpeg4video_parser.o h263.o \
|
||||
mpegvideo.o error_resilience.o \
|
||||
mpeg4videodec.o mpeg4video.o \
|
||||
ituh263dec.o h263dec.o
|
||||
OBJS-$(CONFIG_PNG_PARSER) += png_parser.o
|
||||
OBJS-$(CONFIG_MPEGAUDIO_PARSER) += mpegaudio_parser.o \
|
||||
mpegaudiodecheader.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MPEGVIDEO_PARSER) += mpegvideo_parser.o \
|
||||
@@ -727,7 +688,6 @@ OBJS-$(CONFIG_RV40_PARSER) += rv34_parser.o
|
||||
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o \
|
||||
msmpeg4.o msmpeg4data.o mpeg4video.o \
|
||||
h263.o mpegvideo.o error_resilience.o
|
||||
OBJS-$(CONFIG_VORBIS_PARSER) += vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
|
||||
OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
|
||||
|
||||
@@ -753,6 +713,8 @@ OBJS-$(HAVE_PTHREADS) += pthread.o
|
||||
OBJS-$(HAVE_W32THREADS) += pthread.o
|
||||
OBJS-$(HAVE_OS2THREADS) += pthread.o
|
||||
|
||||
OBJS-$(CONFIG_MLIB) += mlib/dsputil_mlib.o \
|
||||
|
||||
# inverse.o contains the ff_inverse table definition, which is used by
|
||||
# the FASTDIV macro (from libavutil); since referencing the external
|
||||
# table has a negative effect on performance, copy it in libavcodec as
|
||||
@@ -762,43 +724,27 @@ OBJS-$(!CONFIG_SMALL) += inverse.o
|
||||
SKIPHEADERS += %_tablegen.h \
|
||||
%_tables.h \
|
||||
aac_tablegen_decl.h \
|
||||
codec_names.h \
|
||||
fft-internal.h \
|
||||
tableprint.h \
|
||||
$(ARCH)/vp56_arith.h \
|
||||
|
||||
$(ARCH)/vp56_arith.h
|
||||
SKIPHEADERS-$(CONFIG_DXVA2) += dxva2.h dxva2_internal.h
|
||||
SKIPHEADERS-$(CONFIG_LIBDIRAC) += libdirac.h
|
||||
SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h
|
||||
SKIPHEADERS-$(CONFIG_LIBUTVIDEO) += libutvideo.h
|
||||
SKIPHEADERS-$(CONFIG_MPEG_XVMC_DECODER) += xvmc.h
|
||||
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
|
||||
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_internal.h
|
||||
SKIPHEADERS-$(CONFIG_VDA) += vda_internal.h
|
||||
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h
|
||||
SKIPHEADERS-$(HAVE_OS2THREADS) += os2threads.h
|
||||
SKIPHEADERS-$(CONFIG_XVMC) += xvmc.h
|
||||
SKIPHEADERS-$(HAVE_W32THREADS) += w32pthreads.h
|
||||
|
||||
TESTPROGS = cabac \
|
||||
dct \
|
||||
fft \
|
||||
fft-fixed \
|
||||
golomb \
|
||||
iirfilter \
|
||||
rangecoder \
|
||||
snowenc \
|
||||
|
||||
TESTPROGS = cabac dct fft fft-fixed h264 iirfilter rangecoder snowenc
|
||||
TESTPROGS-$(HAVE_MMX) += motion
|
||||
TESTOBJS = dctref.o
|
||||
|
||||
HOSTPROGS = aac_tablegen \
|
||||
aacps_tablegen \
|
||||
cbrt_tablegen \
|
||||
cos_tablegen \
|
||||
dv_tablegen \
|
||||
motionpixels_tablegen \
|
||||
mpegaudio_tablegen \
|
||||
pcm_tablegen \
|
||||
qdm2_tablegen \
|
||||
sinewin_tablegen \
|
||||
HOSTPROGS = aac_tablegen aacps_tablegen cbrt_tablegen cos_tablegen \
|
||||
dv_tablegen motionpixels_tablegen mpegaudio_tablegen \
|
||||
pcm_tablegen qdm2_tablegen sinewin_tablegen
|
||||
|
||||
DIRS = alpha arm bfin mlib ppc ps2 sh4 sparc x86
|
||||
|
||||
CLEANFILES = *_tables.c *_tables.h *_tablegen$(HOSTEXESUF)
|
||||
|
||||
|
@@ -50,9 +50,6 @@ typedef struct A64Context {
|
||||
uint8_t *mc_colram;
|
||||
uint8_t *mc_palette;
|
||||
int mc_pal_size;
|
||||
|
||||
/* pts of the next packet that will be output */
|
||||
int64_t next_pts;
|
||||
} A64Context;
|
||||
|
||||
#endif /* AVCODEC_A64ENC_H */
|
||||
|
@@ -28,7 +28,6 @@
|
||||
#include "a64colors.h"
|
||||
#include "a64tables.h"
|
||||
#include "elbg.h"
|
||||
#include "internal.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
#define DITHERSTEPS 8
|
||||
@@ -222,8 +221,6 @@ static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
|
||||
if (!avctx->codec_tag)
|
||||
avctx->codec_tag = AV_RL32("a64m");
|
||||
|
||||
c->next_pts = AV_NOPTS_VALUE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -242,19 +239,19 @@ static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colra
|
||||
}
|
||||
}
|
||||
|
||||
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
const AVFrame *pict, int *got_packet)
|
||||
static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
|
||||
int buf_size, void *data)
|
||||
{
|
||||
A64Context *c = avctx->priv_data;
|
||||
AVFrame *const p = &c->picture;
|
||||
AVFrame *pict = data;
|
||||
AVFrame *const p = (AVFrame *) & c->picture;
|
||||
|
||||
int frame;
|
||||
int x, y;
|
||||
int b_height;
|
||||
int b_width;
|
||||
|
||||
int req_size, ret;
|
||||
uint8_t *buf = NULL;
|
||||
int req_size;
|
||||
|
||||
int *charmap = c->mc_charmap;
|
||||
uint8_t *colram = c->mc_colram;
|
||||
@@ -277,7 +274,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
}
|
||||
|
||||
/* no data, means end encoding asap */
|
||||
if (!pict) {
|
||||
if (!data) {
|
||||
/* all done, end encoding */
|
||||
if (!c->mc_lifetime) return 0;
|
||||
/* no more frames in queue, prepare to flush remaining frames */
|
||||
@@ -295,8 +292,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
p->key_frame = 1;
|
||||
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
||||
c->mc_frame_counter++;
|
||||
if (c->next_pts == AV_NOPTS_VALUE)
|
||||
c->next_pts = pict->pts;
|
||||
/* lifetime is not reached so wait for next frame first */
|
||||
return 0;
|
||||
}
|
||||
@@ -307,11 +302,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
req_size = 0;
|
||||
/* any frames to encode? */
|
||||
if (c->mc_lifetime) {
|
||||
req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
|
||||
return ret;
|
||||
buf = pkt->data;
|
||||
|
||||
/* calc optimal new charset + charmaps */
|
||||
ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
|
||||
ff_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
|
||||
@@ -320,12 +310,15 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
render_charset(avctx, charset, colram);
|
||||
|
||||
/* copy charset to buf */
|
||||
memcpy(buf, charset, charset_size);
|
||||
memcpy(buf,charset, charset_size);
|
||||
|
||||
/* advance pointers */
|
||||
buf += charset_size;
|
||||
charset += charset_size;
|
||||
req_size += charset_size;
|
||||
}
|
||||
/* no charset so clean buf */
|
||||
else memset(buf, 0, charset_size);
|
||||
|
||||
/* write x frames to buf */
|
||||
for (frame = 0; frame < c->mc_lifetime; frame++) {
|
||||
@@ -358,12 +351,11 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
/* reset counter */
|
||||
c->mc_frame_counter = 0;
|
||||
|
||||
pkt->pts = pkt->dts = c->next_pts;
|
||||
c->next_pts = AV_NOPTS_VALUE;
|
||||
|
||||
pkt->size = req_size;
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
*got_packet = !!req_size;
|
||||
if (req_size > buf_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "buf size too small (need %d, got %d)\n", req_size, buf_size);
|
||||
return -1;
|
||||
}
|
||||
return req_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -374,7 +366,7 @@ AVCodec ff_a64multi_encoder = {
|
||||
.id = CODEC_ID_A64_MULTI,
|
||||
.priv_data_size = sizeof(A64Context),
|
||||
.init = a64multi_init_encoder,
|
||||
.encode2 = a64multi_encode_frame,
|
||||
.encode = a64multi_encode_frame,
|
||||
.close = a64multi_close_encoder,
|
||||
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
|
||||
@@ -387,7 +379,7 @@ AVCodec ff_a64multi5_encoder = {
|
||||
.id = CODEC_ID_A64_MULTI5,
|
||||
.priv_data_size = sizeof(A64Context),
|
||||
.init = a64multi_init_encoder,
|
||||
.encode2 = a64multi_encode_frame,
|
||||
.encode = a64multi_encode_frame,
|
||||
.close = a64multi_close_encoder,
|
||||
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
|
||||
|
@@ -112,15 +112,6 @@ enum OCStatus {
|
||||
OC_LOCKED, ///< Output configuration locked in place
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
MPEG4AudioConfig m4ac;
|
||||
uint8_t layout_map[MAX_ELEM_ID*4][3];
|
||||
int layout_map_tags;
|
||||
int channels;
|
||||
uint64_t channel_layout;
|
||||
enum OCStatus status;
|
||||
} OutputConfiguration;
|
||||
|
||||
/**
|
||||
* Predictor State
|
||||
*/
|
||||
@@ -263,6 +254,8 @@ typedef struct {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
|
||||
MPEG4AudioConfig m4ac;
|
||||
|
||||
int is_saved; ///< Set if elements have stored overlap from previous frame.
|
||||
DynamicRangeControl che_drc;
|
||||
|
||||
@@ -270,6 +263,9 @@ typedef struct {
|
||||
* @name Channel element related data
|
||||
* @{
|
||||
*/
|
||||
enum ChannelPosition che_pos[4][MAX_ELEM_ID]; /**< channel element channel mapping with the
|
||||
* first index as the first 4 raw data block types
|
||||
*/
|
||||
ChannelElement *che[4][MAX_ELEM_ID];
|
||||
ChannelElement *tag_che_map[4][MAX_ELEM_ID];
|
||||
int tags_mapped;
|
||||
@@ -304,7 +300,7 @@ typedef struct {
|
||||
|
||||
DECLARE_ALIGNED(32, float, temp)[128];
|
||||
|
||||
OutputConfiguration oc[2];
|
||||
enum OCStatus output_configured;
|
||||
int warned_num_aac_frames;
|
||||
} AACContext;
|
||||
|
||||
|
@@ -93,7 +93,7 @@ get_next:
|
||||
avctx->channels = s->channels;
|
||||
avctx->channel_layout = s->channel_layout;
|
||||
}
|
||||
s1->duration = s->samples;
|
||||
avctx->frame_size = s->samples;
|
||||
avctx->audio_service_type = s->service_type;
|
||||
}
|
||||
|
||||
|
@@ -713,18 +713,16 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
|
||||
const float lambda)
|
||||
{
|
||||
int start = 0, i, w, w2, g;
|
||||
int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels * (lambda / 120.f);
|
||||
float dists[128] = { 0 }, uplims[128];
|
||||
int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels;
|
||||
float dists[128], uplims[128];
|
||||
float maxvals[128];
|
||||
int fflag, minscaler;
|
||||
int its = 0;
|
||||
int allz = 0;
|
||||
float minthr = INFINITY;
|
||||
|
||||
// for values above this the decoder might end up in an endless loop
|
||||
// due to always having more bits than what can be encoded.
|
||||
destbits = FFMIN(destbits, 5800);
|
||||
//XXX: some heuristic to determine initial quantizers will reduce search time
|
||||
memset(dists, 0, sizeof(dists));
|
||||
//determine zero bands and upper limits
|
||||
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
|
||||
for (g = 0; g < sce->ics.num_swb; g++) {
|
||||
@@ -878,7 +876,7 @@ static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s,
|
||||
} else {
|
||||
for (w = 0; w < 8; w++) {
|
||||
const float *coeffs = sce->coeffs + w*128;
|
||||
curband = start = 0;
|
||||
start = 0;
|
||||
for (i = 0; i < 128; i++) {
|
||||
if (i - start >= sce->ics.swb_sizes[curband]) {
|
||||
start += sce->ics.swb_sizes[curband];
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -80,14 +80,14 @@ static const float * const tns_tmp2_map[4] = {
|
||||
|
||||
static const int8_t tags_per_config[16] = { 0, 1, 1, 2, 3, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0 };
|
||||
|
||||
static const uint8_t aac_channel_layout_map[7][5][3] = {
|
||||
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, },
|
||||
{ { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, },
|
||||
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, },
|
||||
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_SCE, 1, AAC_CHANNEL_BACK }, },
|
||||
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 1, AAC_CHANNEL_BACK }, },
|
||||
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 1, AAC_CHANNEL_BACK }, { TYPE_LFE, 0, AAC_CHANNEL_LFE }, },
|
||||
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 1, AAC_CHANNEL_FRONT }, { TYPE_CPE, 2, AAC_CHANNEL_BACK }, { TYPE_LFE, 0, AAC_CHANNEL_LFE }, },
|
||||
static const uint8_t aac_channel_layout_map[7][5][2] = {
|
||||
{ { TYPE_SCE, 0 }, },
|
||||
{ { TYPE_CPE, 0 }, },
|
||||
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, },
|
||||
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_SCE, 1 }, },
|
||||
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_CPE, 1 }, },
|
||||
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_LFE, 0 }, { TYPE_CPE, 1 }, },
|
||||
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_LFE, 0 }, { TYPE_CPE, 2 }, { TYPE_CPE, 1 }, },
|
||||
};
|
||||
|
||||
static const uint64_t aac_channel_layout[8] = {
|
||||
@@ -97,7 +97,7 @@ static const uint64_t aac_channel_layout[8] = {
|
||||
AV_CH_LAYOUT_4POINT0,
|
||||
AV_CH_LAYOUT_5POINT0_BACK,
|
||||
AV_CH_LAYOUT_5POINT1_BACK,
|
||||
AV_CH_LAYOUT_7POINT1_WIDE_BACK,
|
||||
AV_CH_LAYOUT_7POINT1_WIDE,
|
||||
0,
|
||||
};
|
||||
|
||||
|
@@ -34,7 +34,6 @@
|
||||
#include "avcodec.h"
|
||||
#include "put_bits.h"
|
||||
#include "dsputil.h"
|
||||
#include "internal.h"
|
||||
#include "mpeg4audio.h"
|
||||
#include "kbdwin.h"
|
||||
#include "sinewin.h"
|
||||
@@ -145,7 +144,7 @@ static const uint8_t aac_chan_configs[6][5] = {
|
||||
};
|
||||
|
||||
/**
|
||||
* Table to remap channels from libavcodec's default order to AAC order.
|
||||
* Table to remap channels from Libav's default order to AAC order.
|
||||
*/
|
||||
static const uint8_t aac_chan_maps[AAC_MAX_CHANNELS][AAC_MAX_CHANNELS] = {
|
||||
{ 0 },
|
||||
@@ -224,9 +223,8 @@ WINDOW_FUNC(eight_short)
|
||||
const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
|
||||
const float *in = audio + 448;
|
||||
float *out = sce->ret;
|
||||
int w;
|
||||
|
||||
for (w = 0; w < 8; w++) {
|
||||
for (int w = 0; w < 8; w++) {
|
||||
dsp->vector_fmul (out, in, w ? pwindow : swindow, 128);
|
||||
out += 128;
|
||||
in += 128;
|
||||
@@ -475,9 +473,10 @@ static void put_bitstream_info(AVCodecContext *avctx, AACEncContext *s,
|
||||
|
||||
/*
|
||||
* Deinterleave input samples.
|
||||
* Channels are reordered from libavcodec's default order to AAC order.
|
||||
* Channels are reordered from Libav's default order to AAC order.
|
||||
*/
|
||||
static void deinterleave_input_samples(AACEncContext *s, const AVFrame *frame)
|
||||
static void deinterleave_input_samples(AACEncContext *s,
|
||||
const float *samples)
|
||||
{
|
||||
int ch, i;
|
||||
const int sinc = s->channels;
|
||||
@@ -485,46 +484,38 @@ static void deinterleave_input_samples(AACEncContext *s, const AVFrame *frame)
|
||||
|
||||
/* deinterleave and remap input samples */
|
||||
for (ch = 0; ch < sinc; ch++) {
|
||||
const float *sptr = samples + channel_map[ch];
|
||||
|
||||
/* copy last 1024 samples of previous frame to the start of the current frame */
|
||||
memcpy(&s->planar_samples[ch][1024], &s->planar_samples[ch][2048], 1024 * sizeof(s->planar_samples[0][0]));
|
||||
|
||||
/* deinterleave */
|
||||
i = 2048;
|
||||
if (frame) {
|
||||
const float *sptr = ((const float *)frame->data[0]) + channel_map[ch];
|
||||
for (; i < 2048 + frame->nb_samples; i++) {
|
||||
s->planar_samples[ch][i] = *sptr;
|
||||
sptr += sinc;
|
||||
}
|
||||
for (i = 2048; i < 3072; i++) {
|
||||
s->planar_samples[ch][i] = *sptr;
|
||||
sptr += sinc;
|
||||
}
|
||||
memset(&s->planar_samples[ch][i], 0,
|
||||
(3072 - i) * sizeof(s->planar_samples[0][0]));
|
||||
}
|
||||
}
|
||||
|
||||
static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
const AVFrame *frame, int *got_packet_ptr)
|
||||
static int aac_encode_frame(AVCodecContext *avctx,
|
||||
uint8_t *frame, int buf_size, void *data)
|
||||
{
|
||||
AACEncContext *s = avctx->priv_data;
|
||||
float **samples = s->planar_samples, *samples2, *la, *overlap;
|
||||
ChannelElement *cpe;
|
||||
int i, ch, w, g, chans, tag, start_ch, ret;
|
||||
int i, ch, w, g, chans, tag, start_ch;
|
||||
int chan_el_counter[4];
|
||||
FFPsyWindowInfo windows[AAC_MAX_CHANNELS];
|
||||
|
||||
if (s->last_frame == 2)
|
||||
if (s->last_frame)
|
||||
return 0;
|
||||
|
||||
/* add current frame to queue */
|
||||
if (frame) {
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
|
||||
return ret;
|
||||
if (data) {
|
||||
deinterleave_input_samples(s, data);
|
||||
if (s->psypp)
|
||||
ff_psy_preprocess(s->psypp, s->planar_samples, s->channels);
|
||||
}
|
||||
|
||||
deinterleave_input_samples(s, frame);
|
||||
if (s->psypp)
|
||||
ff_psy_preprocess(s->psypp, s->planar_samples, s->channels);
|
||||
|
||||
if (!avctx->frame_number)
|
||||
return 0;
|
||||
|
||||
@@ -540,7 +531,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
overlap = &samples[cur_channel][0];
|
||||
samples2 = overlap + 1024;
|
||||
la = samples2 + (448+64);
|
||||
if (!frame)
|
||||
if (!data)
|
||||
la = NULL;
|
||||
if (tag == TYPE_LFE) {
|
||||
wi[ch].window_type[0] = ONLY_LONG_SEQUENCE;
|
||||
@@ -571,15 +562,9 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
}
|
||||
start_ch += chans;
|
||||
}
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, 768 * s->channels))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
|
||||
return ret;
|
||||
}
|
||||
do {
|
||||
int frame_bits;
|
||||
|
||||
init_put_bits(&s->pb, avpkt->data, avpkt->size);
|
||||
|
||||
init_put_bits(&s->pb, frame, buf_size*8);
|
||||
if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & CODEC_FLAG_BITEXACT))
|
||||
put_bitstream_info(avctx, s, LIBAVCODEC_IDENT);
|
||||
start_ch = 0;
|
||||
@@ -659,15 +644,10 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
s->lambda = FFMIN(s->lambda, 65536.f);
|
||||
}
|
||||
|
||||
if (!frame)
|
||||
s->last_frame++;
|
||||
if (!data)
|
||||
s->last_frame = 1;
|
||||
|
||||
ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
|
||||
&avpkt->duration);
|
||||
|
||||
avpkt->size = put_bits_count(&s->pb) >> 3;
|
||||
*got_packet_ptr = 1;
|
||||
return 0;
|
||||
return put_bits_count(&s->pb)>>3;
|
||||
}
|
||||
|
||||
static av_cold int aac_encode_end(AVCodecContext *avctx)
|
||||
@@ -681,10 +661,6 @@ static av_cold int aac_encode_end(AVCodecContext *avctx)
|
||||
ff_psy_preprocess_end(s->psypp);
|
||||
av_freep(&s->buffer.samples);
|
||||
av_freep(&s->cpe);
|
||||
ff_af_queue_close(&s->afq);
|
||||
#if FF_API_OLD_ENCODE_AUDIO
|
||||
av_freep(&avctx->coded_frame);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -692,7 +668,7 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ff_dsputil_init(&s->dsp, avctx);
|
||||
dsputil_init(&s->dsp, avctx);
|
||||
|
||||
// window init
|
||||
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
|
||||
@@ -710,19 +686,13 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
|
||||
|
||||
static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
|
||||
{
|
||||
int ch;
|
||||
FF_ALLOCZ_OR_GOTO(avctx, s->buffer.samples, 3 * 1024 * s->channels * sizeof(s->buffer.samples[0]), alloc_fail);
|
||||
FF_ALLOCZ_OR_GOTO(avctx, s->cpe, sizeof(ChannelElement) * s->chan_map[0], alloc_fail);
|
||||
FF_ALLOCZ_OR_GOTO(avctx, avctx->extradata, 5 + FF_INPUT_BUFFER_PADDING_SIZE, alloc_fail);
|
||||
|
||||
for(ch = 0; ch < s->channels; ch++)
|
||||
for(int ch = 0; ch < s->channels; ch++)
|
||||
s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch;
|
||||
|
||||
#if FF_API_OLD_ENCODE_AUDIO
|
||||
if (!(avctx->coded_frame = avcodec_alloc_frame()))
|
||||
goto alloc_fail;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
alloc_fail:
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -784,9 +754,6 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
||||
for (i = 0; i < 428; i++)
|
||||
ff_aac_pow34sf_tab[i] = sqrt(ff_aac_pow2sf_tab[i] * sqrt(ff_aac_pow2sf_tab[i]));
|
||||
|
||||
avctx->delay = 1024;
|
||||
ff_af_queue_init(avctx, &s->afq);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
aac_encode_end(avctx);
|
||||
@@ -816,13 +783,10 @@ AVCodec ff_aac_encoder = {
|
||||
.id = CODEC_ID_AAC,
|
||||
.priv_data_size = sizeof(AACEncContext),
|
||||
.init = aac_encode_init,
|
||||
.encode2 = aac_encode_frame,
|
||||
.encode = aac_encode_frame,
|
||||
.close = aac_encode_end,
|
||||
.supported_samplerates = avpriv_mpeg4audio_sample_rates,
|
||||
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY |
|
||||
CODEC_CAP_EXPERIMENTAL,
|
||||
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
|
||||
.priv_class = &aacenc_class,
|
||||
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
|
||||
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
|
||||
.priv_class = &aacenc_class,
|
||||
};
|
||||
|
@@ -27,7 +27,7 @@
|
||||
#include "dsputil.h"
|
||||
|
||||
#include "aac.h"
|
||||
#include "audio_frame_queue.h"
|
||||
|
||||
#include "psymodel.h"
|
||||
|
||||
#define AAC_CODER_NB 4
|
||||
@@ -74,7 +74,6 @@ typedef struct AACEncContext {
|
||||
int cur_channel;
|
||||
int last_frame;
|
||||
float lambda;
|
||||
AudioFrameQueue afq;
|
||||
DECLARE_ALIGNED(16, int, qcoefs)[96]; ///< quantized coefficients
|
||||
DECLARE_ALIGNED(32, float, scoefs)[1024]; ///< scaled coefficients
|
||||
|
||||
|
@@ -27,7 +27,6 @@
|
||||
#include "aacps.h"
|
||||
#include "aacps_tablegen.h"
|
||||
#include "aacpsdata.c"
|
||||
#include "dsputil.h"
|
||||
|
||||
#define PS_BASELINE 0 ///< Operate in Baseline PS mode
|
||||
///< Baseline implies 10 or 20 stereo bands,
|
||||
@@ -285,7 +284,7 @@ err:
|
||||
|
||||
/** Split one subband into 2 subsubbands with a symmetric real filter.
|
||||
* The filter must have its non-center even coefficients equal to zero. */
|
||||
static void hybrid2_re(float (*in)[2], float (*out)[32][2], const float filter[8], int len, int reverse)
|
||||
static void hybrid2_re(float (*in)[2], float (*out)[32][2], const float filter[7], int len, int reverse)
|
||||
{
|
||||
int i, j;
|
||||
for (i = 0; i < len; i++, in++) {
|
||||
@@ -305,14 +304,26 @@ static void hybrid2_re(float (*in)[2], float (*out)[32][2], const float filter[8
|
||||
}
|
||||
|
||||
/** Split one subband into 6 subsubbands with a complex filter */
|
||||
static void hybrid6_cx(PSDSPContext *dsp, float (*in)[2], float (*out)[32][2], const float (*filter)[8][2], int len)
|
||||
static void hybrid6_cx(float (*in)[2], float (*out)[32][2], const float (*filter)[7][2], int len)
|
||||
{
|
||||
int i;
|
||||
int i, j, ssb;
|
||||
int N = 8;
|
||||
LOCAL_ALIGNED_16(float, temp, [8], [2]);
|
||||
float temp[8][2];
|
||||
|
||||
for (i = 0; i < len; i++, in++) {
|
||||
dsp->hybrid_analysis(temp, in, filter, 1, N);
|
||||
for (ssb = 0; ssb < N; ssb++) {
|
||||
float sum_re = filter[ssb][6][0] * in[6][0], sum_im = filter[ssb][6][0] * in[6][1];
|
||||
for (j = 0; j < 6; j++) {
|
||||
float in0_re = in[j][0];
|
||||
float in0_im = in[j][1];
|
||||
float in1_re = in[12-j][0];
|
||||
float in1_im = in[12-j][1];
|
||||
sum_re += filter[ssb][j][0] * (in0_re + in1_re) - filter[ssb][j][1] * (in0_im - in1_im);
|
||||
sum_im += filter[ssb][j][0] * (in0_im + in1_im) + filter[ssb][j][1] * (in0_re - in1_re);
|
||||
}
|
||||
temp[ssb][0] = sum_re;
|
||||
temp[ssb][1] = sum_im;
|
||||
}
|
||||
out[0][i][0] = temp[6][0];
|
||||
out[0][i][1] = temp[6][1];
|
||||
out[1][i][0] = temp[7][0];
|
||||
@@ -328,18 +339,28 @@ static void hybrid6_cx(PSDSPContext *dsp, float (*in)[2], float (*out)[32][2], c
|
||||
}
|
||||
}
|
||||
|
||||
static void hybrid4_8_12_cx(PSDSPContext *dsp, float (*in)[2], float (*out)[32][2], const float (*filter)[8][2], int N, int len)
|
||||
static void hybrid4_8_12_cx(float (*in)[2], float (*out)[32][2], const float (*filter)[7][2], int N, int len)
|
||||
{
|
||||
int i;
|
||||
int i, j, ssb;
|
||||
|
||||
for (i = 0; i < len; i++, in++) {
|
||||
dsp->hybrid_analysis(out[0] + i, in, filter, 32, N);
|
||||
for (ssb = 0; ssb < N; ssb++) {
|
||||
float sum_re = filter[ssb][6][0] * in[6][0], sum_im = filter[ssb][6][0] * in[6][1];
|
||||
for (j = 0; j < 6; j++) {
|
||||
float in0_re = in[j][0];
|
||||
float in0_im = in[j][1];
|
||||
float in1_re = in[12-j][0];
|
||||
float in1_im = in[12-j][1];
|
||||
sum_re += filter[ssb][j][0] * (in0_re + in1_re) - filter[ssb][j][1] * (in0_im - in1_im);
|
||||
sum_im += filter[ssb][j][0] * (in0_im + in1_im) + filter[ssb][j][1] * (in0_re - in1_re);
|
||||
}
|
||||
out[ssb][i][0] = sum_re;
|
||||
out[ssb][i][1] = sum_im;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void hybrid_analysis(PSDSPContext *dsp, float out[91][32][2],
|
||||
float in[5][44][2], float L[2][38][64],
|
||||
int is34, int len)
|
||||
static void hybrid_analysis(float out[91][32][2], float in[5][44][2], float L[2][38][64], int is34, int len)
|
||||
{
|
||||
int i, j;
|
||||
for (i = 0; i < 5; i++) {
|
||||
@@ -349,17 +370,27 @@ static void hybrid_analysis(PSDSPContext *dsp, float out[91][32][2],
|
||||
}
|
||||
}
|
||||
if (is34) {
|
||||
hybrid4_8_12_cx(dsp, in[0], out, f34_0_12, 12, len);
|
||||
hybrid4_8_12_cx(dsp, in[1], out+12, f34_1_8, 8, len);
|
||||
hybrid4_8_12_cx(dsp, in[2], out+20, f34_2_4, 4, len);
|
||||
hybrid4_8_12_cx(dsp, in[3], out+24, f34_2_4, 4, len);
|
||||
hybrid4_8_12_cx(dsp, in[4], out+28, f34_2_4, 4, len);
|
||||
dsp->hybrid_analysis_ileave(out + 27, L, 5, len);
|
||||
hybrid4_8_12_cx(in[0], out, f34_0_12, 12, len);
|
||||
hybrid4_8_12_cx(in[1], out+12, f34_1_8, 8, len);
|
||||
hybrid4_8_12_cx(in[2], out+20, f34_2_4, 4, len);
|
||||
hybrid4_8_12_cx(in[3], out+24, f34_2_4, 4, len);
|
||||
hybrid4_8_12_cx(in[4], out+28, f34_2_4, 4, len);
|
||||
for (i = 0; i < 59; i++) {
|
||||
for (j = 0; j < len; j++) {
|
||||
out[i+32][j][0] = L[0][j][i+5];
|
||||
out[i+32][j][1] = L[1][j][i+5];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
hybrid6_cx(dsp, in[0], out, f20_0_8, len);
|
||||
hybrid6_cx(in[0], out, f20_0_8, len);
|
||||
hybrid2_re(in[1], out+6, g1_Q2, len, 1);
|
||||
hybrid2_re(in[2], out+8, g1_Q2, len, 0);
|
||||
dsp->hybrid_analysis_ileave(out + 7, L, 3, len);
|
||||
for (i = 0; i < 61; i++) {
|
||||
for (j = 0; j < len; j++) {
|
||||
out[i+10][j][0] = L[0][j][i+3];
|
||||
out[i+10][j][1] = L[1][j][i+3];
|
||||
}
|
||||
}
|
||||
}
|
||||
//update in_buf
|
||||
for (i = 0; i < 5; i++) {
|
||||
@@ -367,8 +398,7 @@ static void hybrid_analysis(PSDSPContext *dsp, float out[91][32][2],
|
||||
}
|
||||
}
|
||||
|
||||
static void hybrid_synthesis(PSDSPContext *dsp, float out[2][38][64],
|
||||
float in[91][32][2], int is34, int len)
|
||||
static void hybrid_synthesis(float out[2][38][64], float in[91][32][2], int is34, int len)
|
||||
{
|
||||
int i, n;
|
||||
if (is34) {
|
||||
@@ -392,7 +422,12 @@ static void hybrid_synthesis(PSDSPContext *dsp, float out[2][38][64],
|
||||
out[1][n][4] += in[28+i][n][1];
|
||||
}
|
||||
}
|
||||
dsp->hybrid_synthesis_deint(out, in + 27, 5, len);
|
||||
for (i = 0; i < 59; i++) {
|
||||
for (n = 0; n < len; n++) {
|
||||
out[0][n][i+5] = in[i+32][n][0];
|
||||
out[1][n][i+5] = in[i+32][n][1];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (n = 0; n < len; n++) {
|
||||
out[0][n][0] = in[0][n][0] + in[1][n][0] + in[2][n][0] +
|
||||
@@ -404,7 +439,12 @@ static void hybrid_synthesis(PSDSPContext *dsp, float out[2][38][64],
|
||||
out[0][n][2] = in[8][n][0] + in[9][n][0];
|
||||
out[1][n][2] = in[8][n][1] + in[9][n][1];
|
||||
}
|
||||
dsp->hybrid_synthesis_deint(out, in + 7, 3, len);
|
||||
for (i = 0; i < 61; i++) {
|
||||
for (n = 0; n < len; n++) {
|
||||
out[0][n][i+3] = in[i+10][n][0];
|
||||
out[1][n][i+3] = in[i+10][n][1];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -608,8 +648,8 @@ static void map_val_20_to_34(float par[PS_MAX_NR_IIDICC])
|
||||
|
||||
static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[32][2], int is34)
|
||||
{
|
||||
LOCAL_ALIGNED_16(float, power, [34], [PS_QMF_TIME_SLOTS]);
|
||||
LOCAL_ALIGNED_16(float, transient_gain, [34], [PS_QMF_TIME_SLOTS]);
|
||||
float power[34][PS_QMF_TIME_SLOTS] = {{0}};
|
||||
float transient_gain[34][PS_QMF_TIME_SLOTS];
|
||||
float *peak_decay_nrg = ps->peak_decay_nrg;
|
||||
float *power_smooth = ps->power_smooth;
|
||||
float *peak_decay_diff_smooth = ps->peak_decay_diff_smooth;
|
||||
@@ -621,8 +661,10 @@ static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[3
|
||||
const float a_smooth = 0.25f; ///< Smoothing coefficient
|
||||
int i, k, m, n;
|
||||
int n0 = 0, nL = 32;
|
||||
|
||||
memset(power, 0, 34 * sizeof(*power));
|
||||
static const int link_delay[] = { 3, 4, 5 };
|
||||
static const float a[] = { 0.65143905753106f,
|
||||
0.56471812200776f,
|
||||
0.48954165955695f };
|
||||
|
||||
if (is34 != ps->is34bands_old) {
|
||||
memset(ps->peak_decay_nrg, 0, sizeof(ps->peak_decay_nrg));
|
||||
@@ -632,9 +674,11 @@ static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[3
|
||||
memset(ps->ap_delay, 0, sizeof(ps->ap_delay));
|
||||
}
|
||||
|
||||
for (k = 0; k < NR_BANDS[is34]; k++) {
|
||||
int i = k_to_i[k];
|
||||
ps->dsp.add_squares(power[i], s[k], nL - n0);
|
||||
for (n = n0; n < nL; n++) {
|
||||
for (k = 0; k < NR_BANDS[is34]; k++) {
|
||||
int i = k_to_i[k];
|
||||
power[i][n] += s[k][n][0] * s[k][n][0] + s[k][n][1] * s[k][n][1];
|
||||
}
|
||||
}
|
||||
|
||||
//Transient detection
|
||||
@@ -662,31 +706,54 @@ static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[3
|
||||
for (k = 0; k < NR_ALLPASS_BANDS[is34]; k++) {
|
||||
int b = k_to_i[k];
|
||||
float g_decay_slope = 1.f - DECAY_SLOPE * (k - DECAY_CUTOFF[is34]);
|
||||
float ag[PS_AP_LINKS];
|
||||
g_decay_slope = av_clipf(g_decay_slope, 0.f, 1.f);
|
||||
memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
|
||||
memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
|
||||
for (m = 0; m < PS_AP_LINKS; m++) {
|
||||
memcpy(ap_delay[k][m], ap_delay[k][m]+numQMFSlots, 5*sizeof(ap_delay[k][m][0]));
|
||||
ag[m] = a[m] * g_decay_slope;
|
||||
}
|
||||
for (n = n0; n < nL; n++) {
|
||||
float in_re = delay[k][n+PS_MAX_DELAY-2][0] * phi_fract[is34][k][0] -
|
||||
delay[k][n+PS_MAX_DELAY-2][1] * phi_fract[is34][k][1];
|
||||
float in_im = delay[k][n+PS_MAX_DELAY-2][0] * phi_fract[is34][k][1] +
|
||||
delay[k][n+PS_MAX_DELAY-2][1] * phi_fract[is34][k][0];
|
||||
for (m = 0; m < PS_AP_LINKS; m++) {
|
||||
float a_re = ag[m] * in_re;
|
||||
float a_im = ag[m] * in_im;
|
||||
float link_delay_re = ap_delay[k][m][n+5-link_delay[m]][0];
|
||||
float link_delay_im = ap_delay[k][m][n+5-link_delay[m]][1];
|
||||
float fractional_delay_re = Q_fract_allpass[is34][k][m][0];
|
||||
float fractional_delay_im = Q_fract_allpass[is34][k][m][1];
|
||||
ap_delay[k][m][n+5][0] = in_re;
|
||||
ap_delay[k][m][n+5][1] = in_im;
|
||||
in_re = link_delay_re * fractional_delay_re - link_delay_im * fractional_delay_im - a_re;
|
||||
in_im = link_delay_re * fractional_delay_im + link_delay_im * fractional_delay_re - a_im;
|
||||
ap_delay[k][m][n+5][0] += ag[m] * in_re;
|
||||
ap_delay[k][m][n+5][1] += ag[m] * in_im;
|
||||
}
|
||||
out[k][n][0] = transient_gain[b][n] * in_re;
|
||||
out[k][n][1] = transient_gain[b][n] * in_im;
|
||||
}
|
||||
ps->dsp.decorrelate(out[k], delay[k] + PS_MAX_DELAY - 2, ap_delay[k],
|
||||
phi_fract[is34][k], Q_fract_allpass[is34][k],
|
||||
transient_gain[b], g_decay_slope, nL - n0);
|
||||
}
|
||||
for (; k < SHORT_DELAY_BAND[is34]; k++) {
|
||||
int i = k_to_i[k];
|
||||
memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
|
||||
memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
|
||||
//H = delay 14
|
||||
ps->dsp.mul_pair_single(out[k], delay[k] + PS_MAX_DELAY - 14,
|
||||
transient_gain[i], nL - n0);
|
||||
for (n = n0; n < nL; n++) {
|
||||
//H = delay 14
|
||||
out[k][n][0] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-14][0];
|
||||
out[k][n][1] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-14][1];
|
||||
}
|
||||
}
|
||||
for (; k < NR_BANDS[is34]; k++) {
|
||||
int i = k_to_i[k];
|
||||
memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
|
||||
memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
|
||||
//H = delay 1
|
||||
ps->dsp.mul_pair_single(out[k], delay[k] + PS_MAX_DELAY - 1,
|
||||
transient_gain[i], nL - n0);
|
||||
for (n = n0; n < nL; n++) {
|
||||
//H = delay 1
|
||||
out[k][n][0] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-1][0];
|
||||
out[k][n][1] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-1][1];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -730,7 +797,7 @@ static void remap20(int8_t (**p_par_mapped)[PS_MAX_NR_IIDICC],
|
||||
|
||||
static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2], int is34)
|
||||
{
|
||||
int e, b, k;
|
||||
int e, b, k, n;
|
||||
|
||||
float (*H11)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H11;
|
||||
float (*H12)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H12;
|
||||
@@ -842,52 +909,78 @@ static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2
|
||||
H22[0][e+1][b] = h22;
|
||||
}
|
||||
for (k = 0; k < NR_BANDS[is34]; k++) {
|
||||
float h[2][4];
|
||||
float h_step[2][4];
|
||||
float h11r, h12r, h21r, h22r;
|
||||
float h11i, h12i, h21i, h22i;
|
||||
float h11r_step, h12r_step, h21r_step, h22r_step;
|
||||
float h11i_step, h12i_step, h21i_step, h22i_step;
|
||||
int start = ps->border_position[e];
|
||||
int stop = ps->border_position[e+1];
|
||||
float width = 1.f / (stop - start);
|
||||
b = k_to_i[k];
|
||||
h[0][0] = H11[0][e][b];
|
||||
h[0][1] = H12[0][e][b];
|
||||
h[0][2] = H21[0][e][b];
|
||||
h[0][3] = H22[0][e][b];
|
||||
h11r = H11[0][e][b];
|
||||
h12r = H12[0][e][b];
|
||||
h21r = H21[0][e][b];
|
||||
h22r = H22[0][e][b];
|
||||
if (!PS_BASELINE && ps->enable_ipdopd) {
|
||||
//Is this necessary? ps_04_new seems unchanged
|
||||
if ((is34 && k <= 13 && k >= 9) || (!is34 && k <= 1)) {
|
||||
h[1][0] = -H11[1][e][b];
|
||||
h[1][1] = -H12[1][e][b];
|
||||
h[1][2] = -H21[1][e][b];
|
||||
h[1][3] = -H22[1][e][b];
|
||||
h11i = -H11[1][e][b];
|
||||
h12i = -H12[1][e][b];
|
||||
h21i = -H21[1][e][b];
|
||||
h22i = -H22[1][e][b];
|
||||
} else {
|
||||
h[1][0] = H11[1][e][b];
|
||||
h[1][1] = H12[1][e][b];
|
||||
h[1][2] = H21[1][e][b];
|
||||
h[1][3] = H22[1][e][b];
|
||||
h11i = H11[1][e][b];
|
||||
h12i = H12[1][e][b];
|
||||
h21i = H21[1][e][b];
|
||||
h22i = H22[1][e][b];
|
||||
}
|
||||
}
|
||||
//Interpolation
|
||||
h_step[0][0] = (H11[0][e+1][b] - h[0][0]) * width;
|
||||
h_step[0][1] = (H12[0][e+1][b] - h[0][1]) * width;
|
||||
h_step[0][2] = (H21[0][e+1][b] - h[0][2]) * width;
|
||||
h_step[0][3] = (H22[0][e+1][b] - h[0][3]) * width;
|
||||
h11r_step = (H11[0][e+1][b] - h11r) * width;
|
||||
h12r_step = (H12[0][e+1][b] - h12r) * width;
|
||||
h21r_step = (H21[0][e+1][b] - h21r) * width;
|
||||
h22r_step = (H22[0][e+1][b] - h22r) * width;
|
||||
if (!PS_BASELINE && ps->enable_ipdopd) {
|
||||
h_step[1][0] = (H11[1][e+1][b] - h[1][0]) * width;
|
||||
h_step[1][1] = (H12[1][e+1][b] - h[1][1]) * width;
|
||||
h_step[1][2] = (H21[1][e+1][b] - h[1][2]) * width;
|
||||
h_step[1][3] = (H22[1][e+1][b] - h[1][3]) * width;
|
||||
h11i_step = (H11[1][e+1][b] - h11i) * width;
|
||||
h12i_step = (H12[1][e+1][b] - h12i) * width;
|
||||
h21i_step = (H21[1][e+1][b] - h21i) * width;
|
||||
h22i_step = (H22[1][e+1][b] - h22i) * width;
|
||||
}
|
||||
for (n = start + 1; n <= stop; n++) {
|
||||
//l is s, r is d
|
||||
float l_re = l[k][n][0];
|
||||
float l_im = l[k][n][1];
|
||||
float r_re = r[k][n][0];
|
||||
float r_im = r[k][n][1];
|
||||
h11r += h11r_step;
|
||||
h12r += h12r_step;
|
||||
h21r += h21r_step;
|
||||
h22r += h22r_step;
|
||||
if (!PS_BASELINE && ps->enable_ipdopd) {
|
||||
h11i += h11i_step;
|
||||
h12i += h12i_step;
|
||||
h21i += h21i_step;
|
||||
h22i += h22i_step;
|
||||
|
||||
l[k][n][0] = h11r*l_re + h21r*r_re - h11i*l_im - h21i*r_im;
|
||||
l[k][n][1] = h11r*l_im + h21r*r_im + h11i*l_re + h21i*r_re;
|
||||
r[k][n][0] = h12r*l_re + h22r*r_re - h12i*l_im - h22i*r_im;
|
||||
r[k][n][1] = h12r*l_im + h22r*r_im + h12i*l_re + h22i*r_re;
|
||||
} else {
|
||||
l[k][n][0] = h11r*l_re + h21r*r_re;
|
||||
l[k][n][1] = h11r*l_im + h21r*r_im;
|
||||
r[k][n][0] = h12r*l_re + h22r*r_re;
|
||||
r[k][n][1] = h12r*l_im + h22r*r_im;
|
||||
}
|
||||
}
|
||||
ps->dsp.stereo_interpolate[!PS_BASELINE && ps->enable_ipdopd](
|
||||
l[k] + start + 1, r[k] + start + 1,
|
||||
h, h_step, stop - start);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int ff_ps_apply(AVCodecContext *avctx, PSContext *ps, float L[2][38][64], float R[2][38][64], int top)
|
||||
{
|
||||
LOCAL_ALIGNED_16(float, Lbuf, [91], [32][2]);
|
||||
LOCAL_ALIGNED_16(float, Rbuf, [91], [32][2]);
|
||||
float Lbuf[91][32][2];
|
||||
float Rbuf[91][32][2];
|
||||
const int len = 32;
|
||||
int is34 = ps->is34bands;
|
||||
|
||||
@@ -896,11 +989,11 @@ int ff_ps_apply(AVCodecContext *avctx, PSContext *ps, float L[2][38][64], float
|
||||
if (top < NR_ALLPASS_BANDS[is34])
|
||||
memset(ps->ap_delay + top, 0, (NR_ALLPASS_BANDS[is34] - top)*sizeof(ps->ap_delay[0]));
|
||||
|
||||
hybrid_analysis(&ps->dsp, Lbuf, ps->in_buf, L, is34, len);
|
||||
hybrid_analysis(Lbuf, ps->in_buf, L, is34, len);
|
||||
decorrelation(ps, Rbuf, Lbuf, is34);
|
||||
stereo_processing(ps, Lbuf, Rbuf, is34);
|
||||
hybrid_synthesis(&ps->dsp, L, Lbuf, is34, len);
|
||||
hybrid_synthesis(&ps->dsp, R, Rbuf, is34, len);
|
||||
hybrid_synthesis(L, Lbuf, is34, len);
|
||||
hybrid_synthesis(R, Rbuf, is34, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -948,5 +1041,4 @@ av_cold void ff_ps_init(void) {
|
||||
|
||||
av_cold void ff_ps_ctx_init(PSContext *ps)
|
||||
{
|
||||
ff_psdsp_init(&ps->dsp);
|
||||
}
|
||||
|
@@ -24,7 +24,6 @@
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "aacpsdsp.h"
|
||||
#include "avcodec.h"
|
||||
#include "get_bits.h"
|
||||
|
||||
@@ -61,19 +60,18 @@ typedef struct {
|
||||
int is34bands;
|
||||
int is34bands_old;
|
||||
|
||||
DECLARE_ALIGNED(16, float, in_buf)[5][44][2];
|
||||
DECLARE_ALIGNED(16, float, delay)[PS_MAX_SSB][PS_QMF_TIME_SLOTS + PS_MAX_DELAY][2];
|
||||
DECLARE_ALIGNED(16, float, ap_delay)[PS_MAX_AP_BANDS][PS_AP_LINKS][PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2];
|
||||
DECLARE_ALIGNED(16, float, peak_decay_nrg)[34];
|
||||
DECLARE_ALIGNED(16, float, power_smooth)[34];
|
||||
DECLARE_ALIGNED(16, float, peak_decay_diff_smooth)[34];
|
||||
DECLARE_ALIGNED(16, float, H11)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||
DECLARE_ALIGNED(16, float, H12)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||
DECLARE_ALIGNED(16, float, H21)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||
DECLARE_ALIGNED(16, float, H22)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||
float in_buf[5][44][2];
|
||||
float delay[PS_MAX_SSB][PS_QMF_TIME_SLOTS + PS_MAX_DELAY][2];
|
||||
float ap_delay[PS_MAX_AP_BANDS][PS_AP_LINKS][PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2];
|
||||
float peak_decay_nrg[34];
|
||||
float power_smooth[34];
|
||||
float peak_decay_diff_smooth[34];
|
||||
float H11[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||
float H12[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||
float H21[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||
float H22[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||
int8_t opd_hist[PS_MAX_NR_IIDICC];
|
||||
int8_t ipd_hist[PS_MAX_NR_IIDICC];
|
||||
PSDSPContext dsp;
|
||||
} PSContext;
|
||||
|
||||
void ff_ps_init(void);
|
||||
|
@@ -69,23 +69,23 @@ int main(void)
|
||||
write_float_3d_array(HB, 46, 8, 4);
|
||||
printf("};\n");
|
||||
|
||||
printf("static const DECLARE_ALIGNED(16, float, f20_0_8)[8][8][2] = {\n");
|
||||
write_float_3d_array(f20_0_8, 8, 8, 2);
|
||||
printf("static const float f20_0_8[8][7][2] = {\n");
|
||||
write_float_3d_array(f20_0_8, 8, 7, 2);
|
||||
printf("};\n");
|
||||
printf("static const DECLARE_ALIGNED(16, float, f34_0_12)[12][8][2] = {\n");
|
||||
write_float_3d_array(f34_0_12, 12, 8, 2);
|
||||
printf("static const float f34_0_12[12][7][2] = {\n");
|
||||
write_float_3d_array(f34_0_12, 12, 7, 2);
|
||||
printf("};\n");
|
||||
printf("static const DECLARE_ALIGNED(16, float, f34_1_8)[8][8][2] = {\n");
|
||||
write_float_3d_array(f34_1_8, 8, 8, 2);
|
||||
printf("static const float f34_1_8[8][7][2] = {\n");
|
||||
write_float_3d_array(f34_1_8, 8, 7, 2);
|
||||
printf("};\n");
|
||||
printf("static const DECLARE_ALIGNED(16, float, f34_2_4)[4][8][2] = {\n");
|
||||
write_float_3d_array(f34_2_4, 4, 8, 2);
|
||||
printf("static const float f34_2_4[4][7][2] = {\n");
|
||||
write_float_3d_array(f34_2_4, 4, 7, 2);
|
||||
printf("};\n");
|
||||
|
||||
printf("static const DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2] = {\n");
|
||||
printf("static const float Q_fract_allpass[2][50][3][2] = {\n");
|
||||
write_float_4d_array(Q_fract_allpass, 2, 50, 3, 2);
|
||||
printf("};\n");
|
||||
printf("static const DECLARE_ALIGNED(16, float, phi_fract)[2][50][2] = {\n");
|
||||
printf("static const float phi_fract[2][50][2] = {\n");
|
||||
write_float_3d_array(phi_fract, 2, 50, 2);
|
||||
printf("};\n");
|
||||
|
||||
|
@@ -31,7 +31,6 @@
|
||||
#else
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/mem.h"
|
||||
#define NR_ALLPASS_BANDS20 30
|
||||
#define NR_ALLPASS_BANDS34 50
|
||||
#define PS_AP_LINKS 3
|
||||
@@ -39,12 +38,12 @@ static float pd_re_smooth[8*8*8];
|
||||
static float pd_im_smooth[8*8*8];
|
||||
static float HA[46][8][4];
|
||||
static float HB[46][8][4];
|
||||
static DECLARE_ALIGNED(16, float, f20_0_8) [ 8][8][2];
|
||||
static DECLARE_ALIGNED(16, float, f34_0_12)[12][8][2];
|
||||
static DECLARE_ALIGNED(16, float, f34_1_8) [ 8][8][2];
|
||||
static DECLARE_ALIGNED(16, float, f34_2_4) [ 4][8][2];
|
||||
static DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2];
|
||||
static DECLARE_ALIGNED(16, float, phi_fract)[2][50][2];
|
||||
static float f20_0_8 [ 8][7][2];
|
||||
static float f34_0_12[12][7][2];
|
||||
static float f34_1_8 [ 8][7][2];
|
||||
static float f34_2_4 [ 4][7][2];
|
||||
static float Q_fract_allpass[2][50][3][2];
|
||||
static float phi_fract[2][50][2];
|
||||
|
||||
static const float g0_Q8[] = {
|
||||
0.00746082949812f, 0.02270420949825f, 0.04546865930473f, 0.07266113929591f,
|
||||
@@ -66,7 +65,7 @@ static const float g2_Q4[] = {
|
||||
0.16486303567403f, 0.23279856662996f, 0.25f
|
||||
};
|
||||
|
||||
static void make_filters_from_proto(float (*filter)[8][2], const float *proto, int bands)
|
||||
static void make_filters_from_proto(float (*filter)[7][2], const float *proto, int bands)
|
||||
{
|
||||
int q, n;
|
||||
for (q = 0; q < bands; q++) {
|
||||
|
@@ -1,214 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
|
||||
*
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
#include "libavutil/attributes.h"
|
||||
#include "aacpsdsp.h"
|
||||
|
||||
static void ps_add_squares_c(float *dst, const float (*src)[2], int n)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < n; i++)
|
||||
dst[i] += src[i][0] * src[i][0] + src[i][1] * src[i][1];
|
||||
}
|
||||
|
||||
static void ps_mul_pair_single_c(float (*dst)[2], float (*src0)[2], float *src1,
|
||||
int n)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < n; i++) {
|
||||
dst[i][0] = src0[i][0] * src1[i];
|
||||
dst[i][1] = src0[i][1] * src1[i];
|
||||
}
|
||||
}
|
||||
|
||||
static void ps_hybrid_analysis_c(float (*out)[2], float (*in)[2],
|
||||
const float (*filter)[8][2],
|
||||
int stride, int n)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
float sum_re = filter[i][6][0] * in[6][0];
|
||||
float sum_im = filter[i][6][0] * in[6][1];
|
||||
|
||||
for (j = 0; j < 6; j++) {
|
||||
float in0_re = in[j][0];
|
||||
float in0_im = in[j][1];
|
||||
float in1_re = in[12-j][0];
|
||||
float in1_im = in[12-j][1];
|
||||
sum_re += filter[i][j][0] * (in0_re + in1_re) -
|
||||
filter[i][j][1] * (in0_im - in1_im);
|
||||
sum_im += filter[i][j][0] * (in0_im + in1_im) +
|
||||
filter[i][j][1] * (in0_re - in1_re);
|
||||
}
|
||||
out[i * stride][0] = sum_re;
|
||||
out[i * stride][1] = sum_im;
|
||||
}
|
||||
}
|
||||
|
||||
static void ps_hybrid_analysis_ileave_c(float (*out)[32][2], float L[2][38][64],
|
||||
int i, int len)
|
||||
{
|
||||
int j;
|
||||
|
||||
for (; i < 64; i++) {
|
||||
for (j = 0; j < len; j++) {
|
||||
out[i][j][0] = L[0][j][i];
|
||||
out[i][j][1] = L[1][j][i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ps_hybrid_synthesis_deint_c(float out[2][38][64],
|
||||
float (*in)[32][2],
|
||||
int i, int len)
|
||||
{
|
||||
int n;
|
||||
|
||||
for (; i < 64; i++) {
|
||||
for (n = 0; n < len; n++) {
|
||||
out[0][n][i] = in[i][n][0];
|
||||
out[1][n][i] = in[i][n][1];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ps_decorrelate_c(float (*out)[2], float (*delay)[2],
|
||||
float (*ap_delay)[PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2],
|
||||
const float phi_fract[2], float (*Q_fract)[2],
|
||||
const float *transient_gain,
|
||||
float g_decay_slope,
|
||||
int len)
|
||||
{
|
||||
static const float a[] = { 0.65143905753106f,
|
||||
0.56471812200776f,
|
||||
0.48954165955695f };
|
||||
float ag[PS_AP_LINKS];
|
||||
int m, n;
|
||||
|
||||
for (m = 0; m < PS_AP_LINKS; m++)
|
||||
ag[m] = a[m] * g_decay_slope;
|
||||
|
||||
for (n = 0; n < len; n++) {
|
||||
float in_re = delay[n][0] * phi_fract[0] - delay[n][1] * phi_fract[1];
|
||||
float in_im = delay[n][0] * phi_fract[1] + delay[n][1] * phi_fract[0];
|
||||
for (m = 0; m < PS_AP_LINKS; m++) {
|
||||
float a_re = ag[m] * in_re;
|
||||
float a_im = ag[m] * in_im;
|
||||
float link_delay_re = ap_delay[m][n+2-m][0];
|
||||
float link_delay_im = ap_delay[m][n+2-m][1];
|
||||
float fractional_delay_re = Q_fract[m][0];
|
||||
float fractional_delay_im = Q_fract[m][1];
|
||||
float apd_re = in_re;
|
||||
float apd_im = in_im;
|
||||
in_re = link_delay_re * fractional_delay_re -
|
||||
link_delay_im * fractional_delay_im - a_re;
|
||||
in_im = link_delay_re * fractional_delay_im +
|
||||
link_delay_im * fractional_delay_re - a_im;
|
||||
ap_delay[m][n+5][0] = apd_re + ag[m] * in_re;
|
||||
ap_delay[m][n+5][1] = apd_im + ag[m] * in_im;
|
||||
}
|
||||
out[n][0] = transient_gain[n] * in_re;
|
||||
out[n][1] = transient_gain[n] * in_im;
|
||||
}
|
||||
}
|
||||
|
||||
static void ps_stereo_interpolate_c(float (*l)[2], float (*r)[2],
|
||||
float h[2][4], float h_step[2][4],
|
||||
int len)
|
||||
{
|
||||
float h0 = h[0][0];
|
||||
float h1 = h[0][1];
|
||||
float h2 = h[0][2];
|
||||
float h3 = h[0][3];
|
||||
float hs0 = h_step[0][0];
|
||||
float hs1 = h_step[0][1];
|
||||
float hs2 = h_step[0][2];
|
||||
float hs3 = h_step[0][3];
|
||||
int n;
|
||||
|
||||
for (n = 0; n < len; n++) {
|
||||
//l is s, r is d
|
||||
float l_re = l[n][0];
|
||||
float l_im = l[n][1];
|
||||
float r_re = r[n][0];
|
||||
float r_im = r[n][1];
|
||||
h0 += hs0;
|
||||
h1 += hs1;
|
||||
h2 += hs2;
|
||||
h3 += hs3;
|
||||
l[n][0] = h0 * l_re + h2 * r_re;
|
||||
l[n][1] = h0 * l_im + h2 * r_im;
|
||||
r[n][0] = h1 * l_re + h3 * r_re;
|
||||
r[n][1] = h1 * l_im + h3 * r_im;
|
||||
}
|
||||
}
|
||||
|
||||
static void ps_stereo_interpolate_ipdopd_c(float (*l)[2], float (*r)[2],
|
||||
float h[2][4], float h_step[2][4],
|
||||
int len)
|
||||
{
|
||||
float h00 = h[0][0], h10 = h[1][0];
|
||||
float h01 = h[0][1], h11 = h[1][1];
|
||||
float h02 = h[0][2], h12 = h[1][2];
|
||||
float h03 = h[0][3], h13 = h[1][3];
|
||||
float hs00 = h_step[0][0], hs10 = h_step[1][0];
|
||||
float hs01 = h_step[0][1], hs11 = h_step[1][1];
|
||||
float hs02 = h_step[0][2], hs12 = h_step[1][2];
|
||||
float hs03 = h_step[0][3], hs13 = h_step[1][3];
|
||||
int n;
|
||||
|
||||
for (n = 0; n < len; n++) {
|
||||
//l is s, r is d
|
||||
float l_re = l[n][0];
|
||||
float l_im = l[n][1];
|
||||
float r_re = r[n][0];
|
||||
float r_im = r[n][1];
|
||||
h00 += hs00;
|
||||
h01 += hs01;
|
||||
h02 += hs02;
|
||||
h03 += hs03;
|
||||
h10 += hs10;
|
||||
h11 += hs11;
|
||||
h12 += hs12;
|
||||
h13 += hs13;
|
||||
|
||||
l[n][0] = h00 * l_re + h02 * r_re - h10 * l_im - h12 * r_im;
|
||||
l[n][1] = h00 * l_im + h02 * r_im + h10 * l_re + h12 * r_re;
|
||||
r[n][0] = h01 * l_re + h03 * r_re - h11 * l_im - h13 * r_im;
|
||||
r[n][1] = h01 * l_im + h03 * r_im + h11 * l_re + h13 * r_re;
|
||||
}
|
||||
}
|
||||
|
||||
av_cold void ff_psdsp_init(PSDSPContext *s)
|
||||
{
|
||||
s->add_squares = ps_add_squares_c;
|
||||
s->mul_pair_single = ps_mul_pair_single_c;
|
||||
s->hybrid_analysis = ps_hybrid_analysis_c;
|
||||
s->hybrid_analysis_ileave = ps_hybrid_analysis_ileave_c;
|
||||
s->hybrid_synthesis_deint = ps_hybrid_synthesis_deint_c;
|
||||
s->decorrelate = ps_decorrelate_c;
|
||||
s->stereo_interpolate[0] = ps_stereo_interpolate_c;
|
||||
s->stereo_interpolate[1] = ps_stereo_interpolate_ipdopd_c;
|
||||
|
||||
if (ARCH_ARM)
|
||||
ff_psdsp_init_arm(s);
|
||||
}
|
@@ -1,53 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Mans Rullgard
|
||||
*
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef LIBAVCODEC_AACPSDSP_H
|
||||
#define LIBAVCODEC_AACPSDSP_H
|
||||
|
||||
#define PS_QMF_TIME_SLOTS 32
|
||||
#define PS_AP_LINKS 3
|
||||
#define PS_MAX_AP_DELAY 5
|
||||
|
||||
typedef struct PSDSPContext {
|
||||
void (*add_squares)(float *dst, const float (*src)[2], int n);
|
||||
void (*mul_pair_single)(float (*dst)[2], float (*src0)[2], float *src1,
|
||||
int n);
|
||||
void (*hybrid_analysis)(float (*out)[2], float (*in)[2],
|
||||
const float (*filter)[8][2],
|
||||
int stride, int n);
|
||||
void (*hybrid_analysis_ileave)(float (*out)[32][2], float L[2][38][64],
|
||||
int i, int len);
|
||||
void (*hybrid_synthesis_deint)(float out[2][38][64], float (*in)[32][2],
|
||||
int i, int len);
|
||||
void (*decorrelate)(float (*out)[2], float (*delay)[2],
|
||||
float (*ap_delay)[PS_QMF_TIME_SLOTS+PS_MAX_AP_DELAY][2],
|
||||
const float phi_fract[2], float (*Q_fract)[2],
|
||||
const float *transient_gain,
|
||||
float g_decay_slope,
|
||||
int len);
|
||||
void (*stereo_interpolate[2])(float (*l)[2], float (*r)[2],
|
||||
float h[2][4], float h_step[2][4],
|
||||
int len);
|
||||
} PSDSPContext;
|
||||
|
||||
void ff_psdsp_init(PSDSPContext *s);
|
||||
void ff_psdsp_init_arm(PSDSPContext *s);
|
||||
|
||||
#endif /* LIBAVCODEC_AACPSDSP_H */
|
@@ -389,8 +389,9 @@ static av_unused FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx,
|
||||
AacPsyChannel *pch = &pctx->ch[channel];
|
||||
uint8_t grouping = 0;
|
||||
int next_type = pch->next_window_seq;
|
||||
FFPsyWindowInfo wi = { { 0 } };
|
||||
FFPsyWindowInfo wi;
|
||||
|
||||
memset(&wi, 0, sizeof(wi));
|
||||
if (la) {
|
||||
float s[8], v;
|
||||
int switch_to_eight = 0;
|
||||
@@ -784,8 +785,9 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio,
|
||||
int uselongblock = 1;
|
||||
int attacks[AAC_NUM_BLOCKS_SHORT + 1] = { 0 };
|
||||
int i;
|
||||
FFPsyWindowInfo wi = { { 0 } };
|
||||
FFPsyWindowInfo wi;
|
||||
|
||||
memset(&wi, 0, sizeof(wi));
|
||||
if (la) {
|
||||
float hpfsmpl[AAC_BLOCK_SIZE_LONG];
|
||||
float const *pf = hpfsmpl;
|
||||
|
@@ -32,7 +32,6 @@
|
||||
#include "aacsbrdata.h"
|
||||
#include "fft.h"
|
||||
#include "aacps.h"
|
||||
#include "sbrdsp.h"
|
||||
#include "libavutil/libm.h"
|
||||
#include "libavutil/avassert.h"
|
||||
|
||||
@@ -129,24 +128,13 @@ av_cold void ff_aac_sbr_init(void)
|
||||
ff_ps_init();
|
||||
}
|
||||
|
||||
/** Places SBR in pure upsampling mode. */
|
||||
static void sbr_turnoff(SpectralBandReplication *sbr) {
|
||||
sbr->start = 0;
|
||||
// Init defults used in pure upsampling mode
|
||||
sbr->kx[1] = 32; //Typo in spec, kx' inits to 32
|
||||
sbr->m[1] = 0;
|
||||
// Reset values for first SBR header
|
||||
sbr->data[0].e_a[1] = sbr->data[1].e_a[1] = -1;
|
||||
memset(&sbr->spectrum_params, -1, sizeof(SpectrumParameters));
|
||||
}
|
||||
|
||||
av_cold void ff_aac_sbr_ctx_init(AACContext *ac, SpectralBandReplication *sbr)
|
||||
{
|
||||
float mdct_scale;
|
||||
if(sbr->mdct.mdct_bits)
|
||||
return;
|
||||
sbr->kx[0] = sbr->kx[1];
|
||||
sbr_turnoff(sbr);
|
||||
sbr->kx[0] = sbr->kx[1] = 32; //Typo in spec, kx' inits to 32
|
||||
sbr->data[0].e_a[1] = sbr->data[1].e_a[1] = -1;
|
||||
sbr->data[0].synthesis_filterbank_samples_offset = SBR_SYNTHESIS_BUF_SIZE - (1280 - 128);
|
||||
sbr->data[1].synthesis_filterbank_samples_offset = SBR_SYNTHESIS_BUF_SIZE - (1280 - 128);
|
||||
/* SBR requires samples to be scaled to +/-32768.0 to work correctly.
|
||||
@@ -156,7 +144,6 @@ av_cold void ff_aac_sbr_ctx_init(AACContext *ac, SpectralBandReplication *sbr)
|
||||
ff_mdct_init(&sbr->mdct, 7, 1, 1.0 / (64 * mdct_scale));
|
||||
ff_mdct_init(&sbr->mdct_ana, 7, 1, -2.0 * mdct_scale);
|
||||
ff_ps_ctx_init(&sbr->ps);
|
||||
ff_sbrdsp_init(&sbr->dsp);
|
||||
}
|
||||
|
||||
av_cold void ff_aac_sbr_ctx_close(SpectralBandReplication *sbr)
|
||||
@@ -918,7 +905,7 @@ static void read_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
|
||||
{
|
||||
switch (bs_extension_id) {
|
||||
case EXTENSION_ID_PS:
|
||||
if (!ac->oc[1].m4ac.ps) {
|
||||
if (!ac->m4ac.ps) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "Parametric Stereo signaled to be not-present but was found in the bitstream.\n");
|
||||
skip_bits_long(gb, *num_bits_left); // bs_fill_bits
|
||||
*num_bits_left = 0;
|
||||
@@ -933,9 +920,7 @@ static void read_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
// some files contain 0-padding
|
||||
if (bs_extension_id || *num_bits_left > 16 || show_bits(gb, *num_bits_left))
|
||||
av_log_missing_feature(ac->avctx, "Reserved SBR extensions are", 1);
|
||||
av_log_missing_feature(ac->avctx, "Reserved SBR extensions are", 1);
|
||||
skip_bits_long(gb, *num_bits_left); // bs_fill_bits
|
||||
*num_bits_left = 0;
|
||||
break;
|
||||
@@ -1011,18 +996,18 @@ static unsigned int read_sbr_data(AACContext *ac, SpectralBandReplication *sbr,
|
||||
|
||||
if (id_aac == TYPE_SCE || id_aac == TYPE_CCE) {
|
||||
if (read_sbr_single_channel_element(ac, sbr, gb)) {
|
||||
sbr_turnoff(sbr);
|
||||
sbr->start = 0;
|
||||
return get_bits_count(gb) - cnt;
|
||||
}
|
||||
} else if (id_aac == TYPE_CPE) {
|
||||
if (read_sbr_channel_pair_element(ac, sbr, gb)) {
|
||||
sbr_turnoff(sbr);
|
||||
sbr->start = 0;
|
||||
return get_bits_count(gb) - cnt;
|
||||
}
|
||||
} else {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Invalid bitstream - cannot apply SBR to element type %d\n", id_aac);
|
||||
sbr_turnoff(sbr);
|
||||
sbr->start = 0;
|
||||
return get_bits_count(gb) - cnt;
|
||||
}
|
||||
if (get_bits1(gb)) { // bs_extended_data
|
||||
@@ -1054,7 +1039,7 @@ static void sbr_reset(AACContext *ac, SpectralBandReplication *sbr)
|
||||
if (err < 0) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"SBR reset failed. Switching SBR to pure upsampling mode.\n");
|
||||
sbr_turnoff(sbr);
|
||||
sbr->start = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1077,9 +1062,9 @@ int ff_decode_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
|
||||
sbr->reset = 0;
|
||||
|
||||
if (!sbr->sample_rate)
|
||||
sbr->sample_rate = 2 * ac->oc[1].m4ac.sample_rate; //TODO use the nominal sample rate for arbitrary sample rate support
|
||||
if (!ac->oc[1].m4ac.ext_sample_rate)
|
||||
ac->oc[1].m4ac.ext_sample_rate = 2 * ac->oc[1].m4ac.sample_rate;
|
||||
sbr->sample_rate = 2 * ac->m4ac.sample_rate; //TODO use the nominal sample rate for arbitrary sample rate support
|
||||
if (!ac->m4ac.ext_sample_rate)
|
||||
ac->m4ac.ext_sample_rate = 2 * ac->m4ac.sample_rate;
|
||||
|
||||
if (crc) {
|
||||
skip_bits(gb, 10); // bs_sbr_crc_bits; TODO - implement CRC check
|
||||
@@ -1089,7 +1074,6 @@ int ff_decode_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
|
||||
//Save some state from the previous frame.
|
||||
sbr->kx[0] = sbr->kx[1];
|
||||
sbr->m[0] = sbr->m[1];
|
||||
sbr->kx_and_m_pushed = 1;
|
||||
|
||||
num_sbr_bits++;
|
||||
if (get_bits1(gb)) // bs_header_flag
|
||||
@@ -1124,12 +1108,7 @@ static void sbr_dequant(SpectralBandReplication *sbr, int id_aac)
|
||||
for (k = 0; k < sbr->n[sbr->data[0].bs_freq_res[e]]; k++) {
|
||||
float temp1 = exp2f(sbr->data[0].env_facs[e][k] * alpha + 7.0f);
|
||||
float temp2 = exp2f((pan_offset - sbr->data[1].env_facs[e][k]) * alpha);
|
||||
float fac;
|
||||
if (temp1 > 1E20) {
|
||||
av_log(NULL, AV_LOG_ERROR, "envelope scalefactor overflow in dequant\n");
|
||||
temp1 = 1;
|
||||
}
|
||||
fac = temp1 / (1.0f + temp2);
|
||||
float fac = temp1 / (1.0f + temp2);
|
||||
sbr->data[0].env_facs[e][k] = fac;
|
||||
sbr->data[1].env_facs[e][k] = fac * temp2;
|
||||
}
|
||||
@@ -1138,12 +1117,7 @@ static void sbr_dequant(SpectralBandReplication *sbr, int id_aac)
|
||||
for (k = 0; k < sbr->n_q; k++) {
|
||||
float temp1 = exp2f(NOISE_FLOOR_OFFSET - sbr->data[0].noise_facs[e][k] + 1);
|
||||
float temp2 = exp2f(12 - sbr->data[1].noise_facs[e][k]);
|
||||
float fac;
|
||||
if (temp1 > 1E20) {
|
||||
av_log(NULL, AV_LOG_ERROR, "envelope scalefactor overflow in dequant\n");
|
||||
temp1 = 1;
|
||||
}
|
||||
fac = temp1 / (1.0f + temp2);
|
||||
float fac = temp1 / (1.0f + temp2);
|
||||
sbr->data[0].noise_facs[e][k] = fac;
|
||||
sbr->data[1].noise_facs[e][k] = fac * temp2;
|
||||
}
|
||||
@@ -1152,15 +1126,9 @@ static void sbr_dequant(SpectralBandReplication *sbr, int id_aac)
|
||||
for (ch = 0; ch < (id_aac == TYPE_CPE) + 1; ch++) {
|
||||
float alpha = sbr->data[ch].bs_amp_res ? 1.0f : 0.5f;
|
||||
for (e = 1; e <= sbr->data[ch].bs_num_env; e++)
|
||||
for (k = 0; k < sbr->n[sbr->data[ch].bs_freq_res[e]]; k++){
|
||||
for (k = 0; k < sbr->n[sbr->data[ch].bs_freq_res[e]]; k++)
|
||||
sbr->data[ch].env_facs[e][k] =
|
||||
exp2f(alpha * sbr->data[ch].env_facs[e][k] + 6.0f);
|
||||
if (sbr->data[ch].env_facs[e][k] > 1E20) {
|
||||
av_log(NULL, AV_LOG_ERROR, "envelope scalefactor overflow in dequant\n");
|
||||
sbr->data[ch].env_facs[e][k] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (e = 1; e <= sbr->data[ch].bs_num_noise; e++)
|
||||
for (k = 0; k < sbr->n_q; k++)
|
||||
sbr->data[ch].noise_facs[e][k] =
|
||||
@@ -1175,21 +1143,33 @@ static void sbr_dequant(SpectralBandReplication *sbr, int id_aac)
|
||||
* @param x pointer to the beginning of the first sample window
|
||||
* @param W array of complex-valued samples split into subbands
|
||||
*/
|
||||
static void sbr_qmf_analysis(DSPContext *dsp, FFTContext *mdct,
|
||||
SBRDSPContext *sbrdsp, const float *in, float *x,
|
||||
static void sbr_qmf_analysis(DSPContext *dsp, FFTContext *mdct, const float *in, float *x,
|
||||
float z[320], float W[2][32][32][2])
|
||||
{
|
||||
int i;
|
||||
int i, k;
|
||||
memcpy(W[0], W[1], sizeof(W[0]));
|
||||
memcpy(x , x+1024, (320-32)*sizeof(x[0]));
|
||||
memcpy(x+288, in, 1024*sizeof(x[0]));
|
||||
for (i = 0; i < 32; i++) { // numTimeSlots*RATE = 16*2 as 960 sample frames
|
||||
// are not supported
|
||||
dsp->vector_fmul_reverse(z, sbr_qmf_window_ds, x, 320);
|
||||
sbrdsp->sum64x5(z);
|
||||
sbrdsp->qmf_pre_shuffle(z);
|
||||
for (k = 0; k < 64; k++) {
|
||||
float f = z[k] + z[k + 64] + z[k + 128] + z[k + 192] + z[k + 256];
|
||||
z[k] = f;
|
||||
}
|
||||
//Shuffle to IMDCT
|
||||
z[64] = z[0];
|
||||
for (k = 1; k < 32; k++) {
|
||||
z[64+2*k-1] = z[ k];
|
||||
z[64+2*k ] = -z[64-k];
|
||||
}
|
||||
z[64+63] = z[32];
|
||||
|
||||
mdct->imdct_half(mdct, z, z+64);
|
||||
sbrdsp->qmf_post_shuffle(W[1][i], z);
|
||||
for (k = 0; k < 32; k++) {
|
||||
W[1][i][k][0] = -z[63-k];
|
||||
W[1][i][k][1] = z[k];
|
||||
}
|
||||
x += 32;
|
||||
}
|
||||
}
|
||||
@@ -1199,7 +1179,6 @@ static void sbr_qmf_analysis(DSPContext *dsp, FFTContext *mdct,
|
||||
* (14496-3 sp04 p206)
|
||||
*/
|
||||
static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
|
||||
SBRDSPContext *sbrdsp,
|
||||
float *out, float X[2][38][64],
|
||||
float mdct_buf[2][64],
|
||||
float *v0, int *v_off, const unsigned int div)
|
||||
@@ -1223,12 +1202,20 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
|
||||
X[0][i][32+n] = X[1][i][31-n];
|
||||
}
|
||||
mdct->imdct_half(mdct, mdct_buf[0], X[0][i]);
|
||||
sbrdsp->qmf_deint_neg(v, mdct_buf[0]);
|
||||
for (n = 0; n < 32; n++) {
|
||||
v[ n] = mdct_buf[0][63 - 2*n];
|
||||
v[63 - n] = -mdct_buf[0][62 - 2*n];
|
||||
}
|
||||
} else {
|
||||
sbrdsp->neg_odd_64(X[1][i]);
|
||||
for (n = 1; n < 64; n+=2) {
|
||||
X[1][i][n] = -X[1][i][n];
|
||||
}
|
||||
mdct->imdct_half(mdct, mdct_buf[0], X[0][i]);
|
||||
mdct->imdct_half(mdct, mdct_buf[1], X[1][i]);
|
||||
sbrdsp->qmf_deint_bfly(v, mdct_buf[1], mdct_buf[0]);
|
||||
for (n = 0; n < 64; n++) {
|
||||
v[ n] = -mdct_buf[0][63 - n] + mdct_buf[1][ n ];
|
||||
v[127 - n] = mdct_buf[0][63 - n] + mdct_buf[1][ n ];
|
||||
}
|
||||
}
|
||||
dsp->vector_fmul_add(out, v , sbr_qmf_window , zero64, 64 >> div);
|
||||
dsp->vector_fmul_add(out, v + ( 192 >> div), sbr_qmf_window + ( 64 >> div), out , 64 >> div);
|
||||
@@ -1244,20 +1231,45 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
|
||||
}
|
||||
}
|
||||
|
||||
static void autocorrelate(const float x[40][2], float phi[3][2][2], int lag)
|
||||
{
|
||||
int i;
|
||||
float real_sum = 0.0f;
|
||||
float imag_sum = 0.0f;
|
||||
if (lag) {
|
||||
for (i = 1; i < 38; i++) {
|
||||
real_sum += x[i][0] * x[i+lag][0] + x[i][1] * x[i+lag][1];
|
||||
imag_sum += x[i][0] * x[i+lag][1] - x[i][1] * x[i+lag][0];
|
||||
}
|
||||
phi[2-lag][1][0] = real_sum + x[ 0][0] * x[lag][0] + x[ 0][1] * x[lag][1];
|
||||
phi[2-lag][1][1] = imag_sum + x[ 0][0] * x[lag][1] - x[ 0][1] * x[lag][0];
|
||||
if (lag == 1) {
|
||||
phi[0][0][0] = real_sum + x[38][0] * x[39][0] + x[38][1] * x[39][1];
|
||||
phi[0][0][1] = imag_sum + x[38][0] * x[39][1] - x[38][1] * x[39][0];
|
||||
}
|
||||
} else {
|
||||
for (i = 1; i < 38; i++) {
|
||||
real_sum += x[i][0] * x[i][0] + x[i][1] * x[i][1];
|
||||
}
|
||||
phi[2][1][0] = real_sum + x[ 0][0] * x[ 0][0] + x[ 0][1] * x[ 0][1];
|
||||
phi[1][0][0] = real_sum + x[38][0] * x[38][0] + x[38][1] * x[38][1];
|
||||
}
|
||||
}
|
||||
|
||||
/** High Frequency Generation (14496-3 sp04 p214+) and Inverse Filtering
|
||||
* (14496-3 sp04 p214)
|
||||
* Warning: This routine does not seem numerically stable.
|
||||
*/
|
||||
static void sbr_hf_inverse_filter(SBRDSPContext *dsp,
|
||||
float (*alpha0)[2], float (*alpha1)[2],
|
||||
static void sbr_hf_inverse_filter(float (*alpha0)[2], float (*alpha1)[2],
|
||||
const float X_low[32][40][2], int k0)
|
||||
{
|
||||
int k;
|
||||
for (k = 0; k < k0; k++) {
|
||||
LOCAL_ALIGNED_16(float, phi, [3], [2][2]);
|
||||
float dk;
|
||||
float phi[3][2][2], dk;
|
||||
|
||||
dsp->autocorrelate(X_low[k], phi);
|
||||
autocorrelate(X_low[k], phi, 0);
|
||||
autocorrelate(X_low[k], phi, 1);
|
||||
autocorrelate(X_low[k], phi, 2);
|
||||
|
||||
dk = phi[2][1][0] * phi[1][0][0] -
|
||||
(phi[1][1][0] * phi[1][1][0] + phi[1][1][1] * phi[1][1][1]) / 1.000001f;
|
||||
@@ -1353,11 +1365,12 @@ static int sbr_hf_gen(AACContext *ac, SpectralBandReplication *sbr,
|
||||
const float bw_array[5], const uint8_t *t_env,
|
||||
int bs_num_env)
|
||||
{
|
||||
int j, x;
|
||||
int i, j, x;
|
||||
int g = 0;
|
||||
int k = sbr->kx[1];
|
||||
for (j = 0; j < sbr->num_patches; j++) {
|
||||
for (x = 0; x < sbr->patch_num_subbands[j]; x++, k++) {
|
||||
float alpha[4];
|
||||
const int p = sbr->patch_start_subband[j] + x;
|
||||
while (g <= sbr->n_q && k >= sbr->f_tablenoise[g])
|
||||
g++;
|
||||
@@ -1369,10 +1382,26 @@ static int sbr_hf_gen(AACContext *ac, SpectralBandReplication *sbr,
|
||||
return -1;
|
||||
}
|
||||
|
||||
sbr->dsp.hf_gen(X_high[k] + ENVELOPE_ADJUSTMENT_OFFSET,
|
||||
X_low[p] + ENVELOPE_ADJUSTMENT_OFFSET,
|
||||
alpha0[p], alpha1[p], bw_array[g],
|
||||
2 * t_env[0], 2 * t_env[bs_num_env]);
|
||||
alpha[0] = alpha1[p][0] * bw_array[g] * bw_array[g];
|
||||
alpha[1] = alpha1[p][1] * bw_array[g] * bw_array[g];
|
||||
alpha[2] = alpha0[p][0] * bw_array[g];
|
||||
alpha[3] = alpha0[p][1] * bw_array[g];
|
||||
|
||||
for (i = 2 * t_env[0]; i < 2 * t_env[bs_num_env]; i++) {
|
||||
const int idx = i + ENVELOPE_ADJUSTMENT_OFFSET;
|
||||
X_high[k][idx][0] =
|
||||
X_low[p][idx - 2][0] * alpha[0] -
|
||||
X_low[p][idx - 2][1] * alpha[1] +
|
||||
X_low[p][idx - 1][0] * alpha[2] -
|
||||
X_low[p][idx - 1][1] * alpha[3] +
|
||||
X_low[p][idx][0];
|
||||
X_high[k][idx][1] =
|
||||
X_low[p][idx - 2][1] * alpha[0] +
|
||||
X_low[p][idx - 2][0] * alpha[1] +
|
||||
X_low[p][idx - 1][1] * alpha[2] +
|
||||
X_low[p][idx - 1][0] * alpha[3] +
|
||||
X_low[p][idx][1];
|
||||
}
|
||||
}
|
||||
}
|
||||
if (k < sbr->m[1] + sbr->kx[1])
|
||||
@@ -1383,8 +1412,8 @@ static int sbr_hf_gen(AACContext *ac, SpectralBandReplication *sbr,
|
||||
|
||||
/// Generate the subband filtered lowband
|
||||
static int sbr_x_gen(SpectralBandReplication *sbr, float X[2][38][64],
|
||||
const float Y0[38][64][2], const float Y1[38][64][2],
|
||||
const float X_low[32][40][2], int ch)
|
||||
const float X_low[32][40][2], const float Y[2][38][64][2],
|
||||
int ch)
|
||||
{
|
||||
int k, i;
|
||||
const int i_f = 32;
|
||||
@@ -1398,8 +1427,8 @@ static int sbr_x_gen(SpectralBandReplication *sbr, float X[2][38][64],
|
||||
}
|
||||
for (; k < sbr->kx[0] + sbr->m[0]; k++) {
|
||||
for (i = 0; i < i_Temp; i++) {
|
||||
X[0][i][k] = Y0[i + i_f][k][0];
|
||||
X[1][i][k] = Y0[i + i_f][k][1];
|
||||
X[0][i][k] = Y[0][i + i_f][k][0];
|
||||
X[1][i][k] = Y[0][i + i_f][k][1];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1411,8 +1440,8 @@ static int sbr_x_gen(SpectralBandReplication *sbr, float X[2][38][64],
|
||||
}
|
||||
for (; k < sbr->kx[1] + sbr->m[1]; k++) {
|
||||
for (i = i_Temp; i < i_f; i++) {
|
||||
X[0][i][k] = Y1[i][k][0];
|
||||
X[1][i][k] = Y1[i][k][1];
|
||||
X[0][i][k] = Y[1][i][k][0];
|
||||
X[1][i][k] = Y[1][i][k][1];
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@@ -1421,7 +1450,7 @@ static int sbr_x_gen(SpectralBandReplication *sbr, float X[2][38][64],
|
||||
/** High Frequency Adjustment (14496-3 sp04 p217) and Mapping
|
||||
* (14496-3 sp04 p217)
|
||||
*/
|
||||
static int sbr_mapping(AACContext *ac, SpectralBandReplication *sbr,
|
||||
static void sbr_mapping(AACContext *ac, SpectralBandReplication *sbr,
|
||||
SBRData *ch_data, int e_a[2])
|
||||
{
|
||||
int e, i, m;
|
||||
@@ -1432,12 +1461,7 @@ static int sbr_mapping(AACContext *ac, SpectralBandReplication *sbr,
|
||||
uint16_t *table = ch_data->bs_freq_res[e + 1] ? sbr->f_tablehigh : sbr->f_tablelow;
|
||||
int k;
|
||||
|
||||
if (sbr->kx[1] != table[0]) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "kx != f_table{high,low}[0]. "
|
||||
"Derived frequency tables were not regenerated.\n");
|
||||
sbr_turnoff(sbr);
|
||||
return AVERROR_BUG;
|
||||
}
|
||||
av_assert0(sbr->kx[1] <= table[0]);
|
||||
for (i = 0; i < ilim; i++)
|
||||
for (m = table[i]; m < table[i + 1]; m++)
|
||||
sbr->e_origmapped[e][m - sbr->kx[1]] = ch_data->env_facs[e+1][i];
|
||||
@@ -1472,15 +1496,13 @@ static int sbr_mapping(AACContext *ac, SpectralBandReplication *sbr,
|
||||
}
|
||||
|
||||
memcpy(ch_data->s_indexmapped[0], ch_data->s_indexmapped[ch_data->bs_num_env], sizeof(ch_data->s_indexmapped[0]));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Estimation of current envelope (14496-3 sp04 p218)
|
||||
static void sbr_env_estimate(float (*e_curr)[48], float X_high[64][40][2],
|
||||
SpectralBandReplication *sbr, SBRData *ch_data)
|
||||
{
|
||||
int e, m;
|
||||
int kx1 = sbr->kx[1];
|
||||
int e, i, m;
|
||||
|
||||
if (sbr->bs_interpol_freq) {
|
||||
for (e = 0; e < ch_data->bs_num_env; e++) {
|
||||
@@ -1489,7 +1511,12 @@ static void sbr_env_estimate(float (*e_curr)[48], float X_high[64][40][2],
|
||||
int iub = ch_data->t_env[e + 1] * 2 + ENVELOPE_ADJUSTMENT_OFFSET;
|
||||
|
||||
for (m = 0; m < sbr->m[1]; m++) {
|
||||
float sum = sbr->dsp.sum_square(X_high[m+kx1] + ilb, iub - ilb);
|
||||
float sum = 0.0f;
|
||||
|
||||
for (i = ilb; i < iub; i++) {
|
||||
sum += X_high[m + sbr->kx[1]][i][0] * X_high[m + sbr->kx[1]][i][0] +
|
||||
X_high[m + sbr->kx[1]][i][1] * X_high[m + sbr->kx[1]][i][1];
|
||||
}
|
||||
e_curr[e][m] = sum * recip_env_size;
|
||||
}
|
||||
}
|
||||
@@ -1507,11 +1534,14 @@ static void sbr_env_estimate(float (*e_curr)[48], float X_high[64][40][2],
|
||||
const int den = env_size * (table[p + 1] - table[p]);
|
||||
|
||||
for (k = table[p]; k < table[p + 1]; k++) {
|
||||
sum += sbr->dsp.sum_square(X_high[k] + ilb, iub - ilb);
|
||||
for (i = ilb; i < iub; i++) {
|
||||
sum += X_high[k][i][0] * X_high[k][i][0] +
|
||||
X_high[k][i][1] * X_high[k][i][1];
|
||||
}
|
||||
}
|
||||
sum /= den;
|
||||
for (k = table[p]; k < table[p + 1]; k++) {
|
||||
e_curr[e][k - kx1] = sum;
|
||||
e_curr[e][k - sbr->kx[1]] = sum;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1578,8 +1608,7 @@ static void sbr_gain_calc(AACContext *ac, SpectralBandReplication *sbr,
|
||||
}
|
||||
|
||||
/// Assembling HF Signals (14496-3 sp04 p220)
|
||||
static void sbr_hf_assemble(float Y1[38][64][2],
|
||||
const float X_high[64][40][2],
|
||||
static void sbr_hf_assemble(float Y[2][38][64][2], const float X_high[64][40][2],
|
||||
SpectralBandReplication *sbr, SBRData *ch_data,
|
||||
const int e_a[2])
|
||||
{
|
||||
@@ -1601,6 +1630,7 @@ static void sbr_hf_assemble(float Y1[38][64][2],
|
||||
float (*g_temp)[48] = ch_data->g_temp, (*q_temp)[48] = ch_data->q_temp;
|
||||
int indexnoise = ch_data->f_indexnoise;
|
||||
int indexsine = ch_data->f_indexsine;
|
||||
memcpy(Y[0], Y[1], sizeof(Y[0]));
|
||||
|
||||
if (sbr->reset) {
|
||||
for (i = 0; i < h_SL; i++) {
|
||||
@@ -1622,44 +1652,63 @@ static void sbr_hf_assemble(float Y1[38][64][2],
|
||||
for (e = 0; e < ch_data->bs_num_env; e++) {
|
||||
for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
|
||||
int phi_sign = (1 - 2*(kx & 1));
|
||||
LOCAL_ALIGNED_16(float, g_filt_tab, [48]);
|
||||
LOCAL_ALIGNED_16(float, q_filt_tab, [48]);
|
||||
float *g_filt, *q_filt;
|
||||
|
||||
if (h_SL && e != e_a[0] && e != e_a[1]) {
|
||||
g_filt = g_filt_tab;
|
||||
q_filt = q_filt_tab;
|
||||
for (m = 0; m < m_max; m++) {
|
||||
const int idx1 = i + h_SL;
|
||||
g_filt[m] = 0.0f;
|
||||
q_filt[m] = 0.0f;
|
||||
for (j = 0; j <= h_SL; j++) {
|
||||
g_filt[m] += g_temp[idx1 - j][m] * h_smooth[j];
|
||||
q_filt[m] += q_temp[idx1 - j][m] * h_smooth[j];
|
||||
}
|
||||
float g_filt = 0.0f;
|
||||
for (j = 0; j <= h_SL; j++)
|
||||
g_filt += g_temp[idx1 - j][m] * h_smooth[j];
|
||||
Y[1][i][m + kx][0] =
|
||||
X_high[m + kx][i + ENVELOPE_ADJUSTMENT_OFFSET][0] * g_filt;
|
||||
Y[1][i][m + kx][1] =
|
||||
X_high[m + kx][i + ENVELOPE_ADJUSTMENT_OFFSET][1] * g_filt;
|
||||
}
|
||||
} else {
|
||||
g_filt = g_temp[i + h_SL];
|
||||
q_filt = q_temp[i];
|
||||
for (m = 0; m < m_max; m++) {
|
||||
const float g_filt = g_temp[i + h_SL][m];
|
||||
Y[1][i][m + kx][0] =
|
||||
X_high[m + kx][i + ENVELOPE_ADJUSTMENT_OFFSET][0] * g_filt;
|
||||
Y[1][i][m + kx][1] =
|
||||
X_high[m + kx][i + ENVELOPE_ADJUSTMENT_OFFSET][1] * g_filt;
|
||||
}
|
||||
}
|
||||
|
||||
sbr->dsp.hf_g_filt(Y1[i] + kx, X_high + kx, g_filt, m_max,
|
||||
i + ENVELOPE_ADJUSTMENT_OFFSET);
|
||||
|
||||
if (e != e_a[0] && e != e_a[1]) {
|
||||
sbr->dsp.hf_apply_noise[indexsine](Y1[i] + kx, sbr->s_m[e],
|
||||
q_filt, indexnoise,
|
||||
kx, m_max);
|
||||
} else {
|
||||
for (m = 0; m < m_max; m++) {
|
||||
Y1[i][m + kx][0] +=
|
||||
indexnoise = (indexnoise + 1) & 0x1ff;
|
||||
if (sbr->s_m[e][m]) {
|
||||
Y[1][i][m + kx][0] +=
|
||||
sbr->s_m[e][m] * phi[0][indexsine];
|
||||
Y[1][i][m + kx][1] +=
|
||||
sbr->s_m[e][m] * (phi[1][indexsine] * phi_sign);
|
||||
} else {
|
||||
float q_filt;
|
||||
if (h_SL) {
|
||||
const int idx1 = i + h_SL;
|
||||
q_filt = 0.0f;
|
||||
for (j = 0; j <= h_SL; j++)
|
||||
q_filt += q_temp[idx1 - j][m] * h_smooth[j];
|
||||
} else {
|
||||
q_filt = q_temp[i][m];
|
||||
}
|
||||
Y[1][i][m + kx][0] +=
|
||||
q_filt * sbr_noise_table[indexnoise][0];
|
||||
Y[1][i][m + kx][1] +=
|
||||
q_filt * sbr_noise_table[indexnoise][1];
|
||||
}
|
||||
phi_sign = -phi_sign;
|
||||
}
|
||||
} else {
|
||||
indexnoise = (indexnoise + m_max) & 0x1ff;
|
||||
for (m = 0; m < m_max; m++) {
|
||||
Y[1][i][m + kx][0] +=
|
||||
sbr->s_m[e][m] * phi[0][indexsine];
|
||||
Y1[i][m + kx][1] +=
|
||||
Y[1][i][m + kx][1] +=
|
||||
sbr->s_m[e][m] * (phi[1][indexsine] * phi_sign);
|
||||
phi_sign = -phi_sign;
|
||||
}
|
||||
}
|
||||
indexnoise = (indexnoise + m_max) & 0x1ff;
|
||||
indexsine = (indexsine + 1) & 3;
|
||||
}
|
||||
}
|
||||
@@ -1670,54 +1719,39 @@ static void sbr_hf_assemble(float Y1[38][64][2],
|
||||
void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
|
||||
float* L, float* R)
|
||||
{
|
||||
int downsampled = ac->oc[1].m4ac.ext_sample_rate < sbr->sample_rate;
|
||||
int downsampled = ac->m4ac.ext_sample_rate < sbr->sample_rate;
|
||||
int ch;
|
||||
int nch = (id_aac == TYPE_CPE) ? 2 : 1;
|
||||
int err;
|
||||
|
||||
if (!sbr->kx_and_m_pushed) {
|
||||
sbr->kx[0] = sbr->kx[1];
|
||||
sbr->m[0] = sbr->m[1];
|
||||
} else {
|
||||
sbr->kx_and_m_pushed = 0;
|
||||
}
|
||||
|
||||
if (sbr->start) {
|
||||
sbr_dequant(sbr, id_aac);
|
||||
}
|
||||
for (ch = 0; ch < nch; ch++) {
|
||||
/* decode channel */
|
||||
sbr_qmf_analysis(&ac->dsp, &sbr->mdct_ana, &sbr->dsp, ch ? R : L, sbr->data[ch].analysis_filterbank_samples,
|
||||
sbr_qmf_analysis(&ac->dsp, &sbr->mdct_ana, ch ? R : L, sbr->data[ch].analysis_filterbank_samples,
|
||||
(float*)sbr->qmf_filter_scratch,
|
||||
sbr->data[ch].W);
|
||||
sbr_lf_gen(ac, sbr, sbr->X_low, sbr->data[ch].W);
|
||||
sbr->data[ch].Ypos ^= 1;
|
||||
if (sbr->start) {
|
||||
sbr_hf_inverse_filter(&sbr->dsp, sbr->alpha0, sbr->alpha1, sbr->X_low, sbr->k[0]);
|
||||
sbr_hf_inverse_filter(sbr->alpha0, sbr->alpha1, sbr->X_low, sbr->k[0]);
|
||||
sbr_chirp(sbr, &sbr->data[ch]);
|
||||
sbr_hf_gen(ac, sbr, sbr->X_high, sbr->X_low, sbr->alpha0, sbr->alpha1,
|
||||
sbr->data[ch].bw_array, sbr->data[ch].t_env,
|
||||
sbr->data[ch].bs_num_env);
|
||||
|
||||
// hf_adj
|
||||
err = sbr_mapping(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a);
|
||||
if (!err) {
|
||||
sbr_env_estimate(sbr->e_curr, sbr->X_high, sbr, &sbr->data[ch]);
|
||||
sbr_gain_calc(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a);
|
||||
sbr_hf_assemble(sbr->data[ch].Y[sbr->data[ch].Ypos],
|
||||
sbr->X_high, sbr, &sbr->data[ch],
|
||||
sbr->data[ch].e_a);
|
||||
}
|
||||
sbr_mapping(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a);
|
||||
sbr_env_estimate(sbr->e_curr, sbr->X_high, sbr, &sbr->data[ch]);
|
||||
sbr_gain_calc(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a);
|
||||
sbr_hf_assemble(sbr->data[ch].Y, sbr->X_high, sbr, &sbr->data[ch],
|
||||
sbr->data[ch].e_a);
|
||||
}
|
||||
|
||||
/* synthesis */
|
||||
sbr_x_gen(sbr, sbr->X[ch],
|
||||
sbr->data[ch].Y[1-sbr->data[ch].Ypos],
|
||||
sbr->data[ch].Y[ sbr->data[ch].Ypos],
|
||||
sbr->X_low, ch);
|
||||
sbr_x_gen(sbr, sbr->X[ch], sbr->X_low, sbr->data[ch].Y, ch);
|
||||
}
|
||||
|
||||
if (ac->oc[1].m4ac.ps == 1) {
|
||||
if (ac->m4ac.ps == 1) {
|
||||
if (sbr->ps.start) {
|
||||
ff_ps_apply(ac->avctx, &sbr->ps, sbr->X[0], sbr->X[1], sbr->kx[1] + sbr->m[1]);
|
||||
} else {
|
||||
@@ -1726,12 +1760,12 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
|
||||
nch = 2;
|
||||
}
|
||||
|
||||
sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, &sbr->dsp, L, sbr->X[0], sbr->qmf_filter_scratch,
|
||||
sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, L, sbr->X[0], sbr->qmf_filter_scratch,
|
||||
sbr->data[0].synthesis_filterbank_samples,
|
||||
&sbr->data[0].synthesis_filterbank_samples_offset,
|
||||
downsampled);
|
||||
if (nch == 2)
|
||||
sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, &sbr->dsp, R, sbr->X[1], sbr->qmf_filter_scratch,
|
||||
sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, R, sbr->X[1], sbr->qmf_filter_scratch,
|
||||
sbr->data[1].synthesis_filterbank_samples,
|
||||
&sbr->data[1].synthesis_filterbank_samples_offset,
|
||||
downsampled);
|
||||
|
@@ -267,8 +267,8 @@ static const int8_t sbr_offset[6][16] = {
|
||||
};
|
||||
|
||||
///< window coefficients for analysis/synthesis QMF banks
|
||||
static DECLARE_ALIGNED(32, float, sbr_qmf_window_ds)[320];
|
||||
static DECLARE_ALIGNED(32, float, sbr_qmf_window_us)[640] = {
|
||||
static DECLARE_ALIGNED(16, float, sbr_qmf_window_ds)[320];
|
||||
static DECLARE_ALIGNED(16, float, sbr_qmf_window_us)[640] = {
|
||||
0.0000000000, -0.0005525286, -0.0005617692, -0.0004947518,
|
||||
-0.0004875227, -0.0004893791, -0.0005040714, -0.0005226564,
|
||||
-0.0005466565, -0.0005677802, -0.0005870930, -0.0006132747,
|
||||
@@ -352,8 +352,7 @@ static DECLARE_ALIGNED(32, float, sbr_qmf_window_us)[640] = {
|
||||
0.8537385600,
|
||||
};
|
||||
|
||||
/* First two entries repeated at end to simplify SIMD implementations. */
|
||||
const DECLARE_ALIGNED(16, float, ff_sbr_noise_table)[][2] = {
|
||||
static const float sbr_noise_table[512][2] = {
|
||||
{-0.99948153278296, -0.59483417516607}, { 0.97113454393991, -0.67528515225647},
|
||||
{ 0.14130051758487, -0.95090983575689}, {-0.47005496701697, -0.37340549728647},
|
||||
{ 0.80705063769351, 0.29653668284408}, {-0.38981478896926, 0.89572605717087},
|
||||
@@ -610,7 +609,6 @@ const DECLARE_ALIGNED(16, float, ff_sbr_noise_table)[][2] = {
|
||||
{-0.93412041758744, 0.41374052024363}, { 0.96063943315511, 0.93116709541280},
|
||||
{ 0.97534253457837, 0.86150930812689}, { 0.99642466504163, 0.70190043427512},
|
||||
{-0.94705089665984, -0.29580042814306}, { 0.91599807087376, -0.98147830385781},
|
||||
{-0.99948153278296, -0.59483417516607}, { 0.97113454393991, -0.67528515225647},
|
||||
};
|
||||
|
||||
#endif /* AVCODEC_AACSBRDATA_H */
|
||||
|
@@ -33,8 +33,8 @@
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
DECLARE_ALIGNED(32, float, ff_aac_kbd_long_1024)[1024];
|
||||
DECLARE_ALIGNED(32, float, ff_aac_kbd_short_128)[128];
|
||||
DECLARE_ALIGNED(16, float, ff_aac_kbd_long_1024)[1024];
|
||||
DECLARE_ALIGNED(16, float, ff_aac_kbd_short_128)[128];
|
||||
|
||||
const uint8_t ff_aac_num_swb_1024[] = {
|
||||
41, 41, 47, 49, 49, 51, 47, 47, 43, 43, 43, 40, 40
|
||||
|
@@ -44,8 +44,8 @@
|
||||
/* @name window coefficients
|
||||
* @{
|
||||
*/
|
||||
DECLARE_ALIGNED(32, extern float, ff_aac_kbd_long_1024)[1024];
|
||||
DECLARE_ALIGNED(32, extern float, ff_aac_kbd_short_128)[128];
|
||||
DECLARE_ALIGNED(16, extern float, ff_aac_kbd_long_1024)[1024];
|
||||
DECLARE_ALIGNED(16, extern float, ff_aac_kbd_short_128)[128];
|
||||
// @}
|
||||
|
||||
/* @name number of scalefactor window bands for long and short transform windows respectively
|
||||
|
@@ -34,26 +34,23 @@
|
||||
|
||||
typedef struct AascContext {
|
||||
AVCodecContext *avctx;
|
||||
GetByteContext gb;
|
||||
AVFrame frame;
|
||||
} AascContext;
|
||||
|
||||
#define FETCH_NEXT_STREAM_BYTE() \
|
||||
if (stream_ptr >= buf_size) \
|
||||
{ \
|
||||
av_log(s->avctx, AV_LOG_ERROR, " AASC: stream ptr just went out of bounds (fetch)\n"); \
|
||||
break; \
|
||||
} \
|
||||
stream_byte = buf[stream_ptr++];
|
||||
|
||||
static av_cold int aasc_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
AascContext *s = avctx->priv_data;
|
||||
|
||||
s->avctx = avctx;
|
||||
switch (avctx->bits_per_coded_sample) {
|
||||
case 16:
|
||||
avctx->pix_fmt = PIX_FMT_RGB555;
|
||||
break;
|
||||
case 24:
|
||||
avctx->pix_fmt = PIX_FMT_BGR24;
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", avctx->bits_per_coded_sample);
|
||||
return -1;
|
||||
}
|
||||
avctx->pix_fmt = PIX_FMT_BGR24;
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
|
||||
return 0;
|
||||
@@ -66,7 +63,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
AascContext *s = avctx->priv_data;
|
||||
int compr, i, stride, psize;
|
||||
int compr, i, stride;
|
||||
|
||||
s->frame.reference = 3;
|
||||
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
|
||||
@@ -78,39 +75,26 @@ static int aasc_decode_frame(AVCodecContext *avctx,
|
||||
compr = AV_RL32(buf);
|
||||
buf += 4;
|
||||
buf_size -= 4;
|
||||
psize = avctx->bits_per_coded_sample / 8;
|
||||
switch (avctx->codec_tag) {
|
||||
case MKTAG('A', 'A', 'S', '4'):
|
||||
bytestream2_init(&s->gb, buf - 4, buf_size + 4);
|
||||
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb);
|
||||
break;
|
||||
case MKTAG('A', 'A', 'S', 'C'):
|
||||
switch(compr){
|
||||
case 0:
|
||||
stride = (avctx->width * psize + psize) & ~psize;
|
||||
stride = (avctx->width * 3 + 3) & ~3;
|
||||
for(i = avctx->height - 1; i >= 0; i--){
|
||||
if(avctx->width * psize > buf_size){
|
||||
if(avctx->width*3 > buf_size){
|
||||
av_log(avctx, AV_LOG_ERROR, "Next line is beyond buffer bounds\n");
|
||||
break;
|
||||
}
|
||||
memcpy(s->frame.data[0] + i*s->frame.linesize[0], buf, avctx->width * psize);
|
||||
memcpy(s->frame.data[0] + i*s->frame.linesize[0], buf, avctx->width*3);
|
||||
buf += stride;
|
||||
buf_size -= stride;
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
bytestream2_init(&s->gb, buf, buf_size);
|
||||
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb);
|
||||
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, buf - 4, buf_size + 4);
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Unknown FourCC: %X\n", avctx->codec_tag);
|
||||
return -1;
|
||||
}
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = s->frame;
|
||||
@@ -139,5 +123,5 @@ AVCodec ff_aasc_decoder = {
|
||||
.close = aasc_decode_end,
|
||||
.decode = aasc_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Autodesk RLE"),
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Autodesk RLE"),
|
||||
};
|
||||
|
@@ -162,12 +162,17 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx)
|
||||
AC3DecodeContext *s = avctx->priv_data;
|
||||
s->avctx = avctx;
|
||||
|
||||
#if FF_API_DRC_SCALE
|
||||
if (avctx->drc_scale)
|
||||
s->drc_scale = avctx->drc_scale;
|
||||
#endif
|
||||
|
||||
ff_ac3_common_init();
|
||||
ac3_tables_init();
|
||||
ff_mdct_init(&s->imdct_256, 8, 1, 1.0);
|
||||
ff_mdct_init(&s->imdct_512, 9, 1, 1.0);
|
||||
ff_kbd_window_init(s->window, 5.0, 256);
|
||||
ff_dsputil_init(&s->dsp, avctx);
|
||||
dsputil_init(&s->dsp, avctx);
|
||||
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
ff_fmt_convert_init(&s->fmt_conv, avctx);
|
||||
av_lfg_init(&s->dith_state, 0);
|
||||
@@ -753,7 +758,9 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
int downmix_output;
|
||||
int cpl_in_use;
|
||||
GetBitContext *gbc = &s->gbc;
|
||||
uint8_t bit_alloc_stages[AC3_MAX_CHANNELS] = { 0 };
|
||||
uint8_t bit_alloc_stages[AC3_MAX_CHANNELS];
|
||||
|
||||
memset(bit_alloc_stages, 0, AC3_MAX_CHANNELS);
|
||||
|
||||
/* block switch flags */
|
||||
different_transforms = 0;
|
||||
@@ -1395,10 +1402,6 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
if (s->out_channels < s->channels)
|
||||
s->output_mode = s->out_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
|
||||
}
|
||||
if (avctx->channels != s->out_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "channel number mismatching on damaged frame\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
/* set audio service type based on bitstream mode for AC-3 */
|
||||
avctx->audio_service_type = s->bitstream_mode;
|
||||
if (s->bitstream_mode == 0x7 && s->channels > 1)
|
||||
|
@@ -1383,7 +1383,8 @@ static void ac3_output_frame_header(AC3EncodeContext *s)
|
||||
*/
|
||||
static void output_audio_block(AC3EncodeContext *s, int blk)
|
||||
{
|
||||
int ch, i, baie, bnd, got_cpl, ch0;
|
||||
int ch, i, baie, bnd, got_cpl;
|
||||
int av_uninit(ch0);
|
||||
AC3Block *block = &s->blocks[blk];
|
||||
|
||||
/* block switching */
|
||||
@@ -2050,9 +2051,7 @@ av_cold int ff_ac3_encode_close(AVCodecContext *avctx)
|
||||
|
||||
s->mdct_end(s);
|
||||
|
||||
#if FF_API_OLD_ENCODE_AUDIO
|
||||
av_freep(&avctx->coded_frame);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2071,7 +2070,7 @@ static av_cold int set_channel_info(AC3EncodeContext *s, int channels,
|
||||
return AVERROR(EINVAL);
|
||||
ch_layout = *channel_layout;
|
||||
if (!ch_layout)
|
||||
ch_layout = av_get_default_channel_layout(channels);
|
||||
ch_layout = avcodec_guess_channel_layout(channels, CODEC_ID_AC3, NULL);
|
||||
|
||||
s->lfe_on = !!(ch_layout & AV_CH_LOW_FREQUENCY);
|
||||
s->channels = channels;
|
||||
@@ -2140,17 +2139,6 @@ static av_cold int validate_options(AC3EncodeContext *s)
|
||||
s->bit_alloc.sr_code = i % 3;
|
||||
s->bitstream_id = s->eac3 ? 16 : 8 + s->bit_alloc.sr_shift;
|
||||
|
||||
/* select a default bit rate if not set by the user */
|
||||
if (!avctx->bit_rate) {
|
||||
switch (s->fbw_channels) {
|
||||
case 1: avctx->bit_rate = 96000; break;
|
||||
case 2: avctx->bit_rate = 192000; break;
|
||||
case 3: avctx->bit_rate = 320000; break;
|
||||
case 4: avctx->bit_rate = 384000; break;
|
||||
case 5: avctx->bit_rate = 448000; break;
|
||||
}
|
||||
}
|
||||
|
||||
/* validate bit rate */
|
||||
if (s->eac3) {
|
||||
int max_br, min_br, wpf, min_br_dist, min_br_code;
|
||||
@@ -2199,20 +2187,15 @@ static av_cold int validate_options(AC3EncodeContext *s)
|
||||
wpf--;
|
||||
s->frame_size_min = 2 * wpf;
|
||||
} else {
|
||||
int best_br = 0, best_code = 0, best_diff = INT_MAX;
|
||||
for (i = 0; i < 19; i++) {
|
||||
int br = (ff_ac3_bitrate_tab[i] >> s->bit_alloc.sr_shift) * 1000;
|
||||
int diff = abs(br - avctx->bit_rate);
|
||||
if (diff < best_diff) {
|
||||
best_br = br;
|
||||
best_code = i;
|
||||
best_diff = diff;
|
||||
}
|
||||
if (!best_diff)
|
||||
if ((ff_ac3_bitrate_tab[i] >> s->bit_alloc.sr_shift)*1000 == avctx->bit_rate)
|
||||
break;
|
||||
}
|
||||
avctx->bit_rate = best_br;
|
||||
s->frame_size_code = best_code << 1;
|
||||
if (i == 19) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid bit rate\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
s->frame_size_code = i << 1;
|
||||
s->frame_size_min = 2 * ff_ac3_frame_size_tab[s->frame_size_code][s->bit_alloc.sr_code];
|
||||
s->num_blks_code = 0x3;
|
||||
s->num_blocks = 6;
|
||||
@@ -2250,7 +2233,8 @@ static av_cold int validate_options(AC3EncodeContext *s)
|
||||
*/
|
||||
static av_cold void set_bandwidth(AC3EncodeContext *s)
|
||||
{
|
||||
int blk, ch, cpl_start;
|
||||
int blk, ch;
|
||||
int av_uninit(cpl_start);
|
||||
|
||||
if (s->cutoff) {
|
||||
/* calculate bandwidth based on user-specified cutoff frequency */
|
||||
@@ -2436,7 +2420,6 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
|
||||
return ret;
|
||||
|
||||
avctx->frame_size = AC3_BLOCK_SIZE * s->num_blocks;
|
||||
avctx->delay = AC3_BLOCK_SIZE;
|
||||
|
||||
s->bitstream_mode = avctx->audio_service_type;
|
||||
if (s->bitstream_mode == AV_AUDIO_SERVICE_TYPE_KARAOKE)
|
||||
@@ -2482,15 +2465,9 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
|
||||
if (ret)
|
||||
goto init_fail;
|
||||
|
||||
#if FF_API_OLD_ENCODE_AUDIO
|
||||
avctx->coded_frame= avcodec_alloc_frame();
|
||||
if (!avctx->coded_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto init_fail;
|
||||
}
|
||||
#endif
|
||||
|
||||
ff_dsputil_init(&s->dsp, avctx);
|
||||
dsputil_init(&s->dsp, avctx);
|
||||
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
|
||||
dprint_options(s);
|
||||
|
@@ -297,9 +297,9 @@ int ff_ac3_float_mdct_init(AC3EncodeContext *s);
|
||||
int ff_ac3_fixed_allocate_sample_buffers(AC3EncodeContext *s);
|
||||
int ff_ac3_float_allocate_sample_buffers(AC3EncodeContext *s);
|
||||
|
||||
int ff_ac3_fixed_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
const AVFrame *frame, int *got_packet_ptr);
|
||||
int ff_ac3_float_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
const AVFrame *frame, int *got_packet_ptr);
|
||||
int ff_ac3_fixed_encode_frame(AVCodecContext *avctx, unsigned char *frame,
|
||||
int buf_size, void *data);
|
||||
int ff_ac3_float_encode_frame(AVCodecContext *avctx, unsigned char *frame,
|
||||
int buf_size, void *data);
|
||||
|
||||
#endif /* AVCODEC_AC3ENC_H */
|
||||
|
@@ -28,7 +28,6 @@
|
||||
|
||||
#define CONFIG_FFT_FLOAT 0
|
||||
#undef CONFIG_AC3ENC_FLOAT
|
||||
#include "internal.h"
|
||||
#include "ac3enc.h"
|
||||
#include "eac3enc.h"
|
||||
|
||||
@@ -147,17 +146,15 @@ static av_cold int ac3_fixed_encode_init(AVCodecContext *avctx)
|
||||
|
||||
|
||||
AVCodec ff_ac3_fixed_encoder = {
|
||||
.name = "ac3_fixed",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.id = CODEC_ID_AC3,
|
||||
.priv_data_size = sizeof(AC3EncodeContext),
|
||||
.init = ac3_fixed_encode_init,
|
||||
.encode2 = ff_ac3_fixed_encode_frame,
|
||||
.close = ff_ac3_encode_close,
|
||||
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
|
||||
.priv_class = &ac3enc_class,
|
||||
.name = "ac3_fixed",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.id = CODEC_ID_AC3,
|
||||
.priv_data_size = sizeof(AC3EncodeContext),
|
||||
.init = ac3_fixed_encode_init,
|
||||
.encode = ff_ac3_fixed_encode_frame,
|
||||
.close = ff_ac3_encode_close,
|
||||
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
|
||||
.priv_class = &ac3enc_class,
|
||||
.channel_layouts = ff_ac3_channel_layouts,
|
||||
.defaults = ac3_defaults,
|
||||
};
|
||||
|
@@ -27,7 +27,6 @@
|
||||
*/
|
||||
|
||||
#define CONFIG_AC3ENC_FLOAT 1
|
||||
#include "internal.h"
|
||||
#include "ac3enc.h"
|
||||
#include "eac3enc.h"
|
||||
#include "kbdwin.h"
|
||||
@@ -145,18 +144,16 @@ static CoefType calc_cpl_coord(CoefSumType energy_ch, CoefSumType energy_cpl)
|
||||
|
||||
#if CONFIG_AC3_ENCODER
|
||||
AVCodec ff_ac3_encoder = {
|
||||
.name = "ac3",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.id = CODEC_ID_AC3,
|
||||
.priv_data_size = sizeof(AC3EncodeContext),
|
||||
.init = ff_ac3_encode_init,
|
||||
.encode2 = ff_ac3_float_encode_frame,
|
||||
.close = ff_ac3_encode_close,
|
||||
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
|
||||
.priv_class = &ac3enc_class,
|
||||
.name = "ac3",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.id = CODEC_ID_AC3,
|
||||
.priv_data_size = sizeof(AC3EncodeContext),
|
||||
.init = ff_ac3_encode_init,
|
||||
.encode = ff_ac3_float_encode_frame,
|
||||
.close = ff_ac3_encode_close,
|
||||
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
|
||||
.priv_class = &ac3enc_class,
|
||||
.channel_layouts = ff_ac3_channel_layouts,
|
||||
.defaults = ac3_defaults,
|
||||
};
|
||||
#endif
|
||||
|
@@ -20,7 +20,6 @@
|
||||
*/
|
||||
|
||||
#include "libavutil/opt.h"
|
||||
#include "internal.h"
|
||||
#include "ac3.h"
|
||||
|
||||
#if AC3ENC_TYPE == AC3ENC_TYPE_AC3_FIXED
|
||||
@@ -79,8 +78,3 @@ static const AVOption eac3_options[] = {
|
||||
{"auto", "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_AUTO }, INT_MIN, INT_MAX, AC3ENC_PARAM, "cpl_start_band"},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
static const AVCodecDefault ac3_defaults[] = {
|
||||
{ "b", "0" },
|
||||
{ NULL }
|
||||
};
|
||||
|
@@ -338,7 +338,7 @@ static void compute_rematrixing_strategy(AC3EncodeContext *s)
|
||||
{
|
||||
int nb_coefs;
|
||||
int blk, bnd;
|
||||
AC3Block *block, *block0;
|
||||
AC3Block *block, *av_uninit(block0);
|
||||
|
||||
if (s->channel_mode != AC3_CHMODE_STEREO)
|
||||
return;
|
||||
@@ -386,11 +386,11 @@ static void compute_rematrixing_strategy(AC3EncodeContext *s)
|
||||
}
|
||||
|
||||
|
||||
int AC3_NAME(encode_frame)(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
const AVFrame *frame, int *got_packet_ptr)
|
||||
int AC3_NAME(encode_frame)(AVCodecContext *avctx, unsigned char *frame,
|
||||
int buf_size, void *data)
|
||||
{
|
||||
AC3EncodeContext *s = avctx->priv_data;
|
||||
const SampleType *samples = (const SampleType *)frame->data[0];
|
||||
const SampleType *samples = data;
|
||||
int ret;
|
||||
|
||||
if (s->options.allow_per_frame_metadata) {
|
||||
@@ -437,13 +437,7 @@ int AC3_NAME(encode_frame)(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
|
||||
ff_ac3_quantize_mantissas(s);
|
||||
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, s->frame_size)))
|
||||
return ret;
|
||||
ff_ac3_output_frame(s, avpkt->data);
|
||||
ff_ac3_output_frame(s, frame);
|
||||
|
||||
if (frame->pts != AV_NOPTS_VALUE)
|
||||
avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay);
|
||||
|
||||
*got_packet_ptr = 1;
|
||||
return 0;
|
||||
return s->frame_size;
|
||||
}
|
||||
|
@@ -142,3 +142,4 @@ void ff_tilt_compensation(float *mem, float tilt, float *samples, int size)
|
||||
samples[0] -= tilt * *mem;
|
||||
*mem = new_tilt_mem;
|
||||
}
|
||||
|
||||
|
@@ -116,7 +116,7 @@ int16_t ff_acelp_decode_gain_code(
|
||||
);
|
||||
#else
|
||||
mr_energy = gain_corr_factor * exp(M_LN10 / (20 << 23) * mr_energy) /
|
||||
sqrt(dsp->scalarproduct_int16(fc_v, fc_v, subframe_size));
|
||||
sqrt(dsp->scalarproduct_int16(fc_v, fc_v, subframe_size, 0));
|
||||
return mr_energy >> 12;
|
||||
#endif
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -26,20 +26,18 @@
|
||||
#ifndef AVCODEC_ADPCM_H
|
||||
#define AVCODEC_ADPCM_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#define BLKSIZE 1024
|
||||
|
||||
typedef struct ADPCMChannelStatus {
|
||||
int predictor;
|
||||
int16_t step_index;
|
||||
short int step_index;
|
||||
int step;
|
||||
/* for encoding */
|
||||
int prev_sample;
|
||||
|
||||
/* MS version */
|
||||
int16_t sample1;
|
||||
int16_t sample2;
|
||||
short sample1;
|
||||
short sample2;
|
||||
int coeff1;
|
||||
int coeff2;
|
||||
int idelta;
|
||||
|
@@ -24,7 +24,6 @@
|
||||
#include "bytestream.h"
|
||||
#include "adpcm.h"
|
||||
#include "adpcm_data.h"
|
||||
#include "internal.h"
|
||||
|
||||
/**
|
||||
* @file
|
||||
@@ -59,23 +58,17 @@ typedef struct ADPCMEncodeContext {
|
||||
|
||||
#define FREEZE_INTERVAL 128
|
||||
|
||||
static av_cold int adpcm_encode_close(AVCodecContext *avctx);
|
||||
|
||||
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
ADPCMEncodeContext *s = avctx->priv_data;
|
||||
uint8_t *extradata;
|
||||
int i;
|
||||
int ret = AVERROR(ENOMEM);
|
||||
|
||||
if (avctx->channels > 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (avctx->channels > 2)
|
||||
return -1; /* only stereo or mono =) */
|
||||
|
||||
if (avctx->trellis && (unsigned)avctx->trellis > 16U) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
|
||||
return AVERROR(EINVAL);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (avctx->trellis) {
|
||||
@@ -112,12 +105,12 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
|
||||
/* each 16 bits sample gives one nibble
|
||||
and we have 7 bytes per channel overhead */
|
||||
avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
|
||||
avctx->block_align = BLKSIZE;
|
||||
avctx->bits_per_coded_sample = 4;
|
||||
avctx->block_align = BLKSIZE;
|
||||
if (!(avctx->extradata = av_malloc(32 + FF_INPUT_BUFFER_PADDING_SIZE)))
|
||||
goto error;
|
||||
avctx->extradata_size = 32;
|
||||
extradata = avctx->extradata;
|
||||
extradata = avctx->extradata = av_malloc(avctx->extradata_size);
|
||||
if (!extradata)
|
||||
return AVERROR(ENOMEM);
|
||||
bytestream_put_le16(&extradata, avctx->frame_size);
|
||||
bytestream_put_le16(&extradata, 7); /* wNumCoef */
|
||||
for (i = 0; i < 7; i++) {
|
||||
@@ -126,7 +119,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
|
||||
}
|
||||
break;
|
||||
case CODEC_ID_ADPCM_YAMAHA:
|
||||
avctx->frame_size = BLKSIZE * 2 / avctx->channels;
|
||||
avctx->frame_size = BLKSIZE * avctx->channels;
|
||||
avctx->block_align = BLKSIZE;
|
||||
break;
|
||||
case CODEC_ID_ADPCM_SWF:
|
||||
@@ -135,33 +128,30 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
|
||||
avctx->sample_rate != 44100) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
|
||||
"22050 or 44100\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
goto error;
|
||||
}
|
||||
avctx->frame_size = 512 * (avctx->sample_rate / 11025);
|
||||
break;
|
||||
default:
|
||||
ret = AVERROR(EINVAL);
|
||||
goto error;
|
||||
}
|
||||
|
||||
#if FF_API_OLD_ENCODE_AUDIO
|
||||
if (!(avctx->coded_frame = avcodec_alloc_frame()))
|
||||
goto error;
|
||||
#endif
|
||||
avctx->coded_frame = avcodec_alloc_frame();
|
||||
avctx->coded_frame->key_frame= 1;
|
||||
|
||||
return 0;
|
||||
error:
|
||||
adpcm_encode_close(avctx);
|
||||
return ret;
|
||||
av_freep(&s->paths);
|
||||
av_freep(&s->node_buf);
|
||||
av_freep(&s->nodep_buf);
|
||||
av_freep(&s->trellis_hash);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
|
||||
{
|
||||
ADPCMEncodeContext *s = avctx->priv_data;
|
||||
#if FF_API_OLD_ENCODE_AUDIO
|
||||
av_freep(&avctx->coded_frame);
|
||||
#endif
|
||||
av_freep(&s->paths);
|
||||
av_freep(&s->node_buf);
|
||||
av_freep(&s->nodep_buf);
|
||||
@@ -171,8 +161,8 @@ static av_cold int adpcm_encode_close(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
|
||||
static inline uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c,
|
||||
int16_t sample)
|
||||
static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c,
|
||||
short sample)
|
||||
{
|
||||
int delta = sample - c->prev_sample;
|
||||
int nibble = FFMIN(7, abs(delta) * 4 /
|
||||
@@ -184,8 +174,8 @@ static inline uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c,
|
||||
return nibble;
|
||||
}
|
||||
|
||||
static inline uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
|
||||
int16_t sample)
|
||||
static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
|
||||
short sample)
|
||||
{
|
||||
int delta = sample - c->prev_sample;
|
||||
int diff, step = ff_adpcm_step_table[c->step_index];
|
||||
@@ -221,8 +211,8 @@ static inline uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
|
||||
return nibble;
|
||||
}
|
||||
|
||||
static inline uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c,
|
||||
int16_t sample)
|
||||
static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c,
|
||||
short sample)
|
||||
{
|
||||
int predictor, nibble, bias;
|
||||
|
||||
@@ -238,20 +228,20 @@ static inline uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c,
|
||||
nibble = (nibble + bias) / c->idelta;
|
||||
nibble = av_clip(nibble, -8, 7) & 0x0F;
|
||||
|
||||
predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
|
||||
predictor += (signed)((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
|
||||
|
||||
c->sample2 = c->sample1;
|
||||
c->sample1 = av_clip_int16(predictor);
|
||||
|
||||
c->idelta = (ff_adpcm_AdaptationTable[nibble] * c->idelta) >> 8;
|
||||
c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
|
||||
if (c->idelta < 16)
|
||||
c->idelta = 16;
|
||||
|
||||
return nibble;
|
||||
}
|
||||
|
||||
static inline uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c,
|
||||
int16_t sample)
|
||||
static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c,
|
||||
short sample)
|
||||
{
|
||||
int nibble, delta;
|
||||
|
||||
@@ -272,9 +262,8 @@ static inline uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c,
|
||||
return nibble;
|
||||
}
|
||||
|
||||
static void adpcm_compress_trellis(AVCodecContext *avctx,
|
||||
const int16_t *samples, uint8_t *dst,
|
||||
ADPCMChannelStatus *c, int n)
|
||||
static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
|
||||
uint8_t *dst, ADPCMChannelStatus *c, int n)
|
||||
{
|
||||
//FIXME 6% faster if frontier is a compile-time constant
|
||||
ADPCMEncodeContext *s = avctx->priv_data;
|
||||
@@ -478,41 +467,35 @@ static void adpcm_compress_trellis(AVCodecContext *avctx,
|
||||
c->idelta = nodes[0]->step;
|
||||
}
|
||||
|
||||
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
const AVFrame *frame, int *got_packet_ptr)
|
||||
static int adpcm_encode_frame(AVCodecContext *avctx,
|
||||
unsigned char *frame, int buf_size, void *data)
|
||||
{
|
||||
int n, i, st, pkt_size, ret;
|
||||
const int16_t *samples;
|
||||
uint8_t *dst;
|
||||
int n, i, st;
|
||||
short *samples;
|
||||
unsigned char *dst;
|
||||
ADPCMEncodeContext *c = avctx->priv_data;
|
||||
uint8_t *buf;
|
||||
|
||||
samples = (const int16_t *)frame->data[0];
|
||||
dst = frame;
|
||||
samples = (short *)data;
|
||||
st = avctx->channels == 2;
|
||||
|
||||
if (avctx->codec_id == CODEC_ID_ADPCM_SWF)
|
||||
pkt_size = (2 + avctx->channels * (22 + 4 * (frame->nb_samples - 1)) + 7) / 8;
|
||||
else
|
||||
pkt_size = avctx->block_align;
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, pkt_size)))
|
||||
return ret;
|
||||
dst = avpkt->data;
|
||||
/* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */
|
||||
|
||||
switch(avctx->codec->id) {
|
||||
case CODEC_ID_ADPCM_IMA_WAV:
|
||||
n = frame->nb_samples / 8;
|
||||
c->status[0].prev_sample = samples[0];
|
||||
n = avctx->frame_size / 8;
|
||||
c->status[0].prev_sample = (signed short)samples[0]; /* XXX */
|
||||
/* c->status[0].step_index = 0;
|
||||
XXX: not sure how to init the state machine */
|
||||
bytestream_put_le16(&dst, c->status[0].prev_sample);
|
||||
*dst++ = c->status[0].step_index;
|
||||
*dst++ = (unsigned char)c->status[0].step_index;
|
||||
*dst++ = 0; /* unknown */
|
||||
samples++;
|
||||
if (avctx->channels == 2) {
|
||||
c->status[1].prev_sample = samples[0];
|
||||
c->status[1].prev_sample = (signed short)samples[0];
|
||||
/* c->status[1].step_index = 0; */
|
||||
bytestream_put_le16(&dst, c->status[1].prev_sample);
|
||||
*dst++ = c->status[1].step_index;
|
||||
*dst++ = (unsigned char)c->status[1].step_index;
|
||||
*dst++ = 0;
|
||||
samples++;
|
||||
}
|
||||
@@ -568,10 +551,10 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
{
|
||||
int ch, i;
|
||||
PutBitContext pb;
|
||||
init_put_bits(&pb, dst, pkt_size * 8);
|
||||
init_put_bits(&pb, dst, buf_size * 8);
|
||||
|
||||
for (ch = 0; ch < avctx->channels; ch++) {
|
||||
put_bits(&pb, 9, (c->status[ch].prev_sample & 0xFFFF) >> 7);
|
||||
put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7);
|
||||
put_bits(&pb, 7, c->status[ch].step_index);
|
||||
if (avctx->trellis > 0) {
|
||||
uint8_t buf[64];
|
||||
@@ -592,15 +575,16 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
}
|
||||
|
||||
flush_put_bits(&pb);
|
||||
dst += put_bits_count(&pb) >> 3;
|
||||
break;
|
||||
}
|
||||
case CODEC_ID_ADPCM_SWF:
|
||||
{
|
||||
int i;
|
||||
PutBitContext pb;
|
||||
init_put_bits(&pb, dst, pkt_size * 8);
|
||||
init_put_bits(&pb, dst, buf_size * 8);
|
||||
|
||||
n = frame->nb_samples - 1;
|
||||
n = avctx->frame_size - 1;
|
||||
|
||||
// store AdpcmCodeSize
|
||||
put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
|
||||
@@ -611,7 +595,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
|
||||
put_sbits(&pb, 16, samples[i]);
|
||||
put_bits(&pb, 6, c->status[i].step_index);
|
||||
c->status[i].prev_sample = samples[i];
|
||||
c->status[i].prev_sample = (signed short)samples[i];
|
||||
}
|
||||
|
||||
if (avctx->trellis > 0) {
|
||||
@@ -627,7 +611,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
}
|
||||
av_free(buf);
|
||||
} else {
|
||||
for (i = 1; i < frame->nb_samples; i++) {
|
||||
for (i = 1; i < avctx->frame_size; i++) {
|
||||
put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0],
|
||||
samples[avctx->channels * i]));
|
||||
if (avctx->channels == 2)
|
||||
@@ -636,6 +620,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
}
|
||||
}
|
||||
flush_put_bits(&pb);
|
||||
dst += put_bits_count(&pb) >> 3;
|
||||
break;
|
||||
}
|
||||
case CODEC_ID_ADPCM_MS:
|
||||
@@ -683,7 +668,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
}
|
||||
break;
|
||||
case CODEC_ID_ADPCM_YAMAHA:
|
||||
n = frame->nb_samples / 2;
|
||||
n = avctx->frame_size / 2;
|
||||
if (avctx->trellis > 0) {
|
||||
FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error);
|
||||
n *= 2;
|
||||
@@ -707,14 +692,10 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return AVERROR(EINVAL);
|
||||
error:
|
||||
return -1;
|
||||
}
|
||||
|
||||
avpkt->size = pkt_size;
|
||||
*got_packet_ptr = 1;
|
||||
return 0;
|
||||
error:
|
||||
return AVERROR(ENOMEM);
|
||||
return dst - frame;
|
||||
}
|
||||
|
||||
|
||||
@@ -725,10 +706,10 @@ AVCodec ff_ ## name_ ## _encoder = { \
|
||||
.id = id_, \
|
||||
.priv_data_size = sizeof(ADPCMEncodeContext), \
|
||||
.init = adpcm_encode_init, \
|
||||
.encode2 = adpcm_encode_frame, \
|
||||
.encode = adpcm_encode_frame, \
|
||||
.close = adpcm_encode_close, \
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, \
|
||||
AV_SAMPLE_FMT_NONE }, \
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, \
|
||||
AV_SAMPLE_FMT_NONE}, \
|
||||
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \
|
||||
}
|
||||
|
||||
|
@@ -80,9 +80,6 @@ static int adx_parse(AVCodecParserContext *s1,
|
||||
*poutbuf_size = 0;
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
s1->duration = BLOCK_SAMPLES;
|
||||
|
||||
*poutbuf = buf;
|
||||
*poutbuf_size = buf_size;
|
||||
return next;
|
||||
|
@@ -100,7 +100,6 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data,
|
||||
ADXContext *c = avctx->priv_data;
|
||||
int16_t *samples;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
const uint8_t *buf_end = buf + avpkt->size;
|
||||
int num_blocks, ch, ret;
|
||||
|
||||
if (c->eof) {
|
||||
@@ -149,7 +148,7 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
while (num_blocks--) {
|
||||
for (ch = 0; ch < c->channels; ch++) {
|
||||
if (buf_end - buf < BLOCK_SIZE || adx_decode(c, samples + ch, buf, ch)) {
|
||||
if (adx_decode(c, samples + ch, buf, ch)) {
|
||||
c->eof = 1;
|
||||
buf = avpkt->data + avpkt->size;
|
||||
break;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user