Compare commits
330 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d55059d048 | ||
![]() |
3926ebc702 | ||
![]() |
85162a60b8 | ||
![]() |
573d5fdeda | ||
![]() |
d234e1d333 | ||
![]() |
954ce59a0a | ||
![]() |
356f8634e9 | ||
![]() |
605c3f0515 | ||
![]() |
e0f351a979 | ||
![]() |
c44ac4d81e | ||
![]() |
84e1aa7768 | ||
![]() |
ea23dcc498 | ||
![]() |
87d335c007 | ||
![]() |
320df1c7c0 | ||
![]() |
b6f5a54fdd | ||
![]() |
ff3e385d84 | ||
![]() |
1b3803e8c1 | ||
![]() |
dae552fbca | ||
![]() |
5198623b0e | ||
![]() |
a6388942f5 | ||
![]() |
a62852bae0 | ||
![]() |
40d4e82e2e | ||
![]() |
14f140f369 | ||
![]() |
a4b4be7493 | ||
![]() |
4490b3db23 | ||
![]() |
edb4aad93d | ||
![]() |
e4ad9e9377 | ||
![]() |
de2eb6dab1 | ||
![]() |
50b2601338 | ||
![]() |
c681cf34c4 | ||
![]() |
32a81629da | ||
![]() |
21dd8f5baa | ||
![]() |
03e2e95beb | ||
![]() |
8c0261d685 | ||
![]() |
4b7036c1d9 | ||
![]() |
cf701b008f | ||
![]() |
d0a225d6c1 | ||
![]() |
d6641c0bab | ||
![]() |
0ca658a716 | ||
![]() |
340c1843c5 | ||
![]() |
ffe831fcb6 | ||
![]() |
92abb12863 | ||
![]() |
8e7e12dcbf | ||
![]() |
0147e9f7c6 | ||
![]() |
b22cc0c15d | ||
![]() |
6642804c22 | ||
![]() |
38cd7f66b7 | ||
![]() |
402bc31d87 | ||
![]() |
8b597077ae | ||
![]() |
dcdeeea820 | ||
![]() |
cbadebd8cc | ||
![]() |
9f0bf48b5c | ||
![]() |
9103d77ffc | ||
![]() |
8b48e2c654 | ||
![]() |
42abae1125 | ||
![]() |
436c011a77 | ||
![]() |
ec6271c019 | ||
![]() |
455b98b777 | ||
![]() |
e9aeab3909 | ||
![]() |
f7d7b01e81 | ||
![]() |
3c3ee65ebb | ||
![]() |
f6ebfeb5b4 | ||
![]() |
5d6982c13b | ||
![]() |
e11fa0879a | ||
![]() |
4702154464 | ||
![]() |
e390a9de6f | ||
![]() |
e70d202275 | ||
![]() |
9de0c8c60c | ||
![]() |
db041fd115 | ||
![]() |
7a877418e3 | ||
![]() |
cf9b04c6f2 | ||
![]() |
5667eaf010 | ||
![]() |
89f2d6c349 | ||
![]() |
32a79b5649 | ||
![]() |
515b7f08b1 | ||
![]() |
63945e2226 | ||
![]() |
ad6eefc0d8 | ||
![]() |
601e648458 | ||
![]() |
e540446f6c | ||
![]() |
fc9c5ad9ea | ||
![]() |
68f1212696 | ||
![]() |
0a5bca957d | ||
![]() |
0f61521f69 | ||
![]() |
58afa73338 | ||
![]() |
68ee43468e | ||
![]() |
80440c5b1d | ||
![]() |
89bd49b25c | ||
![]() |
d7e5301e43 | ||
![]() |
6a3fed8749 | ||
![]() |
49b8709870 | ||
![]() |
1bd1103175 | ||
![]() |
66569b375c | ||
![]() |
9f9b6388a3 | ||
![]() |
32b62c7d6a | ||
![]() |
9f561ec398 | ||
![]() |
103cb461b0 | ||
![]() |
7e97d98033 | ||
![]() |
632fd58a8f | ||
![]() |
68874c42e7 | ||
![]() |
a598f0a5d7 | ||
![]() |
80695c9d1f | ||
![]() |
17c3ec77c2 | ||
![]() |
e8c8b27f66 | ||
![]() |
4eff392a4b | ||
![]() |
e5ae872309 | ||
![]() |
3fe4055c5f | ||
![]() |
86fbd610ef | ||
![]() |
f6e360770d | ||
![]() |
b8cc9e206b | ||
![]() |
4ae111cf71 | ||
![]() |
0bf48d24d7 | ||
![]() |
2dcacbb68e | ||
![]() |
0064fcb486 | ||
![]() |
a4269652ab | ||
![]() |
3ccbd6b06d | ||
![]() |
fa97a8030f | ||
![]() |
53c430415a | ||
![]() |
f246d46ee6 | ||
![]() |
94773637ba | ||
![]() |
e1152971a6 | ||
![]() |
7cd9732b33 | ||
![]() |
acdc505b2a | ||
![]() |
f09bbd38b0 | ||
![]() |
690fda3ae4 | ||
![]() |
0280cf9aa7 | ||
![]() |
a47c277205 | ||
![]() |
b0355d3253 | ||
![]() |
e9ce8a4480 | ||
![]() |
e9378b42b1 | ||
![]() |
6caca26533 | ||
![]() |
446d11f5ed | ||
![]() |
a80205ff82 | ||
![]() |
0231a68ecd | ||
![]() |
7c67d9c6fb | ||
![]() |
49db360005 | ||
![]() |
42476635ba | ||
![]() |
462ecdb9bb | ||
![]() |
6210d62c5f | ||
![]() |
a1666ae8f0 | ||
![]() |
f69045793d | ||
![]() |
adf0c1c70c | ||
![]() |
3073564a97 | ||
![]() |
5e3389b742 | ||
![]() |
f41e935225 | ||
![]() |
897088d604 | ||
![]() |
7dbdf02abc | ||
![]() |
f21771260b | ||
![]() |
a9de82ac17 | ||
![]() |
340fee05f0 | ||
![]() |
a5546736bd | ||
![]() |
3aa89662ce | ||
![]() |
ed0dc01a47 | ||
![]() |
4a4e4b8139 | ||
![]() |
33636442e2 | ||
![]() |
3710f0b9ed | ||
![]() |
12d0e44bdb | ||
![]() |
857d64a58c | ||
![]() |
a644934b80 | ||
![]() |
89cf156ace | ||
![]() |
4f94de84e8 | ||
![]() |
9665ccda10 | ||
![]() |
8dce2dd9cc | ||
![]() |
532731369a | ||
![]() |
1c930fe915 | ||
![]() |
a25c7081ab | ||
![]() |
dfdb3ca341 | ||
![]() |
c21440637e | ||
![]() |
0620b6636e | ||
![]() |
f45a5c60b9 | ||
![]() |
6ba87ba6f9 | ||
![]() |
22721b2829 | ||
![]() |
9bd47bbcc6 | ||
![]() |
42b69286de | ||
![]() |
e8928f7e97 | ||
![]() |
b1933a1e12 | ||
![]() |
36e30d7ab9 | ||
![]() |
ea199c707c | ||
![]() |
09ce851c7e | ||
![]() |
5c2097cff7 | ||
![]() |
6a8826abf4 | ||
![]() |
46e4dd66da | ||
![]() |
7e35c3e177 | ||
![]() |
0f38b11939 | ||
![]() |
fd552756d9 | ||
![]() |
8056d020ad | ||
![]() |
d20e948f07 | ||
![]() |
acbdee0a2b | ||
![]() |
fa86884dbc | ||
![]() |
3d207417fb | ||
![]() |
cb75ed3014 | ||
![]() |
41ff9350e9 | ||
![]() |
f8320fd344 | ||
![]() |
ae26aab2c3 | ||
![]() |
7a8fc9906f | ||
![]() |
c2507fb986 | ||
![]() |
1db39228c6 | ||
![]() |
d8d25f143f | ||
![]() |
c550d56414 | ||
![]() |
d561025e49 | ||
![]() |
61fb7e17a2 | ||
![]() |
5750a95f8b | ||
![]() |
a1a079dce4 | ||
![]() |
2cdd89a561 | ||
![]() |
c0c5189e66 | ||
![]() |
125894f15f | ||
![]() |
54cfb532a0 | ||
![]() |
e9a7ab20be | ||
![]() |
0ab9072ee0 | ||
![]() |
3461d27506 | ||
![]() |
9c338b7c4c | ||
![]() |
25cadd366b | ||
![]() |
f57c629b18 | ||
![]() |
0815a2907a | ||
![]() |
d1e1674f66 | ||
![]() |
21ca96049f | ||
![]() |
9f1dde03d5 | ||
![]() |
d0269db8e3 | ||
![]() |
cb42ed171d | ||
![]() |
2bbda9872f | ||
![]() |
9dd420ede1 | ||
![]() |
a1974d1ea0 | ||
![]() |
bf58e5422d | ||
![]() |
88eaab2827 | ||
![]() |
0c72d64160 | ||
![]() |
1bb565360c | ||
![]() |
1acf921d6f | ||
![]() |
7bf3f79502 | ||
![]() |
e3316723b6 | ||
![]() |
17d82125c5 | ||
![]() |
513b0f4872 | ||
![]() |
cb2d66a470 | ||
![]() |
8c2fe23b1c | ||
![]() |
c2aa7b70d4 | ||
![]() |
8d055e9079 | ||
![]() |
4f23f24e30 | ||
![]() |
34cee5bd37 | ||
![]() |
95234da75e | ||
![]() |
cb7579ae6f | ||
![]() |
5341734b03 | ||
![]() |
3c7ba39d09 | ||
![]() |
7c98d284ff | ||
![]() |
c55b158b87 | ||
![]() |
5ca4ec76b7 | ||
![]() |
813c05fcd2 | ||
![]() |
dd5d2de5ec | ||
![]() |
c40296d1ad | ||
![]() |
cd5d5bf45c | ||
![]() |
aed8e8f9f3 | ||
![]() |
349cca9f12 | ||
![]() |
58b65c1241 | ||
![]() |
44e7ef15ae | ||
![]() |
e7cb6e1c04 | ||
![]() |
4af03698f3 | ||
![]() |
8af76473b3 | ||
![]() |
7d96f764d6 | ||
![]() |
b32554175c | ||
![]() |
134b7f57db | ||
![]() |
0177ac9637 | ||
![]() |
17044e1a2b | ||
![]() |
19babec06f | ||
![]() |
02798736bf | ||
![]() |
56e2190e10 | ||
![]() |
54aa78adaa | ||
![]() |
c575d63ac2 | ||
![]() |
3f1e77a44c | ||
![]() |
923deca05f | ||
![]() |
d1f74d838e | ||
![]() |
07892e33ef | ||
![]() |
0c62a00e68 | ||
![]() |
1804becb30 | ||
![]() |
a11c9d7778 | ||
![]() |
1d23f5bbef | ||
![]() |
c0ff6b6378 | ||
![]() |
6a805e5334 | ||
![]() |
6b7a14e599 | ||
![]() |
b6c19c9c91 | ||
![]() |
e8bd1af095 | ||
![]() |
09453e6bd3 | ||
![]() |
bf9af661b8 | ||
![]() |
63d5fa55fe | ||
![]() |
d344e45f19 | ||
![]() |
485d088f94 | ||
![]() |
84f3bdb8f4 | ||
![]() |
93442a27c5 | ||
![]() |
bfe7d96ed2 | ||
![]() |
97e6a0fa2a | ||
![]() |
9e4d30397b | ||
![]() |
399e5b675b | ||
![]() |
0a2fbb0a84 | ||
![]() |
b4ad641334 | ||
![]() |
60cb07196f | ||
![]() |
c30e59383b | ||
![]() |
b9e09c9eab | ||
![]() |
dfdf616773 | ||
![]() |
46d0aee3c4 | ||
![]() |
55e2dc7f77 | ||
![]() |
a46fa584c5 | ||
![]() |
980e4263bc | ||
![]() |
2a442eb70f | ||
![]() |
23ed56d307 | ||
![]() |
b84683a906 | ||
![]() |
01ff5378f0 | ||
![]() |
96545777d7 | ||
![]() |
1506becf36 | ||
![]() |
f7c196a1f9 | ||
![]() |
7614facf8a | ||
![]() |
35a42ca0bf | ||
![]() |
de69052b1a | ||
![]() |
bf928ebed2 | ||
![]() |
f52324629a | ||
![]() |
eee78ef30d | ||
![]() |
f600d709f0 | ||
![]() |
2a66a65341 | ||
![]() |
964d75735f | ||
![]() |
6db6fe82ca | ||
![]() |
388e3e7853 | ||
![]() |
866d5c958f | ||
![]() |
238c7ffd7d | ||
![]() |
9aaf250fb3 | ||
![]() |
834aef341d | ||
![]() |
657737e58d | ||
![]() |
b92ef485c7 | ||
![]() |
6289b6c811 | ||
![]() |
849ee7ee82 | ||
![]() |
5693443446 | ||
![]() |
57111c558e | ||
![]() |
e6a5de4421 | ||
![]() |
b8c41e5c08 | ||
![]() |
7881dad2b2 | ||
![]() |
9f8d8c57fb |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -15,7 +15,6 @@ config.*
|
||||
doc/*.1
|
||||
doc/*.html
|
||||
doc/*.pod
|
||||
doc/fate.txt
|
||||
doxy
|
||||
ffmpeg
|
||||
ffplay
|
||||
@@ -44,10 +43,8 @@ tests/tiny_psnr
|
||||
tests/videogen
|
||||
tests/vsynth1
|
||||
tests/vsynth2
|
||||
tools/aviocat
|
||||
tools/cws2fws
|
||||
tools/graph2dot
|
||||
tools/ismindex
|
||||
tools/lavfi-showfiltfmts
|
||||
tools/pktdumper
|
||||
tools/probetest
|
||||
|
51
Changelog
51
Changelog
@@ -1,48 +1,11 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version next:
|
||||
|
||||
version 0.10:
|
||||
- Fixes: CVE-2011-3929, CVE-2011-3934, CVE-2011-3935, CVE-2011-3936,
|
||||
CVE-2011-3937, CVE-2011-3940, CVE-2011-3941, CVE-2011-3944,
|
||||
CVE-2011-3945, CVE-2011-3946, CVE-2011-3947, CVE-2011-3949,
|
||||
CVE-2011-3950, CVE-2011-3951, CVE-2011-3952
|
||||
- v410 Quicktime Uncompressed 4:4:4 10-bit encoder and decoder
|
||||
- SBaGen (SBG) binaural beats script demuxer
|
||||
- OpenMG Audio muxer
|
||||
- Timecode extraction in DV and MOV
|
||||
- thumbnail video filter
|
||||
- XML output in ffprobe
|
||||
- asplit audio filter
|
||||
- tinterlace video filter
|
||||
- astreamsync audio filter
|
||||
- amerge audio filter
|
||||
- ISMV (Smooth Streaming) muxer
|
||||
- GSM audio parser
|
||||
- SMJPEG muxer
|
||||
- XWD encoder and decoder
|
||||
- Automatic thread count based on detection number of (available) CPU cores
|
||||
- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
|
||||
- ffprobe -show_error option
|
||||
- Avid 1:1 10-bit RGB Packer codec
|
||||
- v308 Quicktime Uncompressed 4:4:4 encoder and decoder
|
||||
- yuv4 libquicktime packed 4:2:0 encoder and decoder
|
||||
- ffprobe -show_frames option
|
||||
- silencedetect audio filter
|
||||
- ffprobe -show_program_version, -show_library_versions, -show_versions options
|
||||
- rv34: frame-level multi-threading
|
||||
- optimized iMDCT transform on x86 using SSE for for mpegaudiodec
|
||||
- Improved PGS subtitle decoder
|
||||
- dumpgraph option to lavfi device
|
||||
- r210 and r10k encoders
|
||||
- ffwavesynth decoder
|
||||
- aviocat tool
|
||||
- ffeval tool
|
||||
|
||||
|
||||
version 0.9:
|
||||
|
||||
- Indeo 4 decoder
|
||||
- SMJPEG demuxer
|
||||
- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
|
||||
- openal input device added
|
||||
- boxblur filter added
|
||||
- BWF muxer
|
||||
@@ -165,7 +128,7 @@ easier to use. The changes are:
|
||||
- pan audio filter
|
||||
- IFF Amiga Continuous Bitmap (ACBM) decoder
|
||||
- ass filter
|
||||
- CRI ADX audio format muxer and demuxer
|
||||
- CRI ADX audio format demuxer
|
||||
- Playstation Portable PMP format demuxer
|
||||
- Microsoft Windows ICO demuxer
|
||||
- life source
|
||||
@@ -174,13 +137,11 @@ easier to use. The changes are:
|
||||
- new option: -report
|
||||
- Dxtory capture format decoder
|
||||
- cellauto source
|
||||
- Simple segmenting muxer
|
||||
- Indeo 4 decoder
|
||||
- SMJPEG demuxer
|
||||
|
||||
- v410 Quicktime Uncompressed 4:4:4 10-bit encoder and decoder
|
||||
|
||||
version 0.8:
|
||||
|
||||
|
||||
- many many things we forgot because we rather write code than changelogs
|
||||
- WebM support in Matroska de/muxer
|
||||
- low overhead Ogg muxing
|
||||
|
19
Doxyfile
19
Doxyfile
@@ -31,13 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.10
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
# pixels and the maximum width should not exceed 200 pixels. Doxygen will
|
||||
# copy the logo to the output directory.
|
||||
PROJECT_LOGO =
|
||||
PROJECT_NUMBER = 0.9.4
|
||||
|
||||
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
|
||||
# base path where the generated documentation will be put.
|
||||
@@ -631,7 +625,8 @@ EXCLUDE_SYMLINKS = NO
|
||||
# for example use the pattern */test/*
|
||||
|
||||
EXCLUDE_PATTERNS = *.git \
|
||||
*.d
|
||||
*.d \
|
||||
avconv.c
|
||||
|
||||
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
|
||||
# (namespaces, classes, functions, etc.) that should be excluded from the
|
||||
@@ -766,7 +761,7 @@ ALPHABETICAL_INDEX = YES
|
||||
# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
|
||||
# in which this list will be split (can be a number in the range [1..20])
|
||||
|
||||
COLS_IN_ALPHA_INDEX = 2
|
||||
COLS_IN_ALPHA_INDEX = 5
|
||||
|
||||
# In case all classes in a project start with a common prefix, all
|
||||
# classes will be put under the same header in the alphabetical index.
|
||||
@@ -800,13 +795,13 @@ HTML_FILE_EXTENSION = .html
|
||||
# each generated HTML page. If it is left blank doxygen will generate a
|
||||
# standard header.
|
||||
|
||||
HTML_HEADER = doc/doxy/header.html
|
||||
HTML_HEADER =
|
||||
|
||||
# The HTML_FOOTER tag can be used to specify a personal HTML footer for
|
||||
# each generated HTML page. If it is left blank doxygen will generate a
|
||||
# standard footer.
|
||||
|
||||
HTML_FOOTER = doc/doxy/footer.html
|
||||
HTML_FOOTER =
|
||||
|
||||
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
|
||||
# style sheet that is used by each HTML page. It can be used to
|
||||
@@ -815,7 +810,7 @@ HTML_FOOTER = doc/doxy/footer.html
|
||||
# the style sheet file to the HTML output directory, so don't put your own
|
||||
# stylesheet in the HTML output directory as well, or it will be erased!
|
||||
|
||||
HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css
|
||||
HTML_STYLESHEET =
|
||||
|
||||
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
|
||||
# Doxygen will adjust the colors in the stylesheet and background images
|
||||
|
15
MAINTAINERS
15
MAINTAINERS
@@ -87,8 +87,6 @@ Generic Parts:
|
||||
bitstream.c, bitstream.h Michael Niedermayer
|
||||
CABAC:
|
||||
cabac.h, cabac.c Michael Niedermayer
|
||||
codec names:
|
||||
codec_names.sh Nicolas George
|
||||
DSP utilities:
|
||||
dsputils.c, dsputils.h Michael Niedermayer
|
||||
entropy coding:
|
||||
@@ -142,7 +140,6 @@ Codecs:
|
||||
dv.c Roman Shaposhnik
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
ffv1.c Michael Niedermayer
|
||||
ffwavesynth.c Nicolas George
|
||||
flac* Justin Ruggles
|
||||
flashsv* Benjamin Larsson
|
||||
flicvideo.c Mike Melanson
|
||||
@@ -163,7 +160,6 @@ Codecs:
|
||||
jvdec.c Peter Ross
|
||||
kmvc.c Kostya Shishkov
|
||||
lcl*.c Roberto Togni, Reimar Doeffinger
|
||||
libcelt_dec.c Nicolas George
|
||||
libgsm.c Michel Bardiaux
|
||||
libdirac* David Conrad
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
@@ -269,10 +265,6 @@ libavfilter
|
||||
===========
|
||||
|
||||
Video filters:
|
||||
graphdump.c Nicolas George
|
||||
af_amerge.c Nicolas George
|
||||
af_astreamsync.c Nicolas George
|
||||
af_pan.c Nicolas George
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
vf_yadif.c Michael Niedermayer
|
||||
|
||||
@@ -333,7 +325,6 @@ Muxers/Demuxers:
|
||||
msnwc_tcp.c Ramiro Polla
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier
|
||||
mxfdec.c Tomas Härdin
|
||||
nsvdec.c Francois Revol
|
||||
nut.c Michael Niedermayer
|
||||
nuv.c Reimar Doeffinger
|
||||
@@ -353,7 +344,6 @@ Muxers/Demuxers:
|
||||
rtpdec_asf.* Ronald S. Bultje
|
||||
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
|
||||
rtsp.c Luca Barbato
|
||||
sbgdec.c Nicolas George
|
||||
sdp.c Martin Storsjo
|
||||
segafilm.c Mike Melanson
|
||||
siff.c Kostya Shishkov
|
||||
@@ -395,6 +385,10 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
0.5 *Deprecated/Unmaintained*
|
||||
0.6 *Deprecated/Unmaintained*
|
||||
0.7 Michael Niedermayer
|
||||
0.8 Michael Niedermayer
|
||||
0.9 Michael Niedermayer
|
||||
|
||||
|
||||
@@ -417,7 +411,6 @@ Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
Lou Logan 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
|
||||
Luca Barbato 6677 4209 213C 8843 5B67 29E7 E84C 78C2 84E9 0E34
|
||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
|
||||
Reimar Döffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
|
8
Makefile
8
Makefile
@@ -8,9 +8,9 @@ vpath %.S $(SRC_PATH)
|
||||
vpath %.asm $(SRC_PATH)
|
||||
vpath %.v $(SRC_PATH)
|
||||
vpath %.texi $(SRC_PATH)
|
||||
vpath %/fate_config.sh.template $(SRC_PATH)
|
||||
|
||||
PROGS-$(CONFIG_FFMPEG) += ffmpeg
|
||||
PROGS-$(CONFIG_AVCONV) += avconv
|
||||
PROGS-$(CONFIG_FFPLAY) += ffplay
|
||||
PROGS-$(CONFIG_FFPROBE) += ffprobe
|
||||
PROGS-$(CONFIG_FFSERVER) += ffserver
|
||||
@@ -23,7 +23,7 @@ HOSTPROGS := $(TESTTOOLS:%=tests/%)
|
||||
TOOLS = qt-faststart trasher
|
||||
TOOLS-$(CONFIG_ZLIB) += cws2fws
|
||||
|
||||
BASENAMES = ffmpeg ffplay ffprobe ffserver
|
||||
BASENAMES = ffmpeg avconv ffplay ffprobe ffserver
|
||||
ALLPROGS = $(BASENAMES:%=%$(PROGSSUF)$(EXESUF))
|
||||
ALLPROGS_G = $(BASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
|
||||
ALLMANPAGES = $(BASENAMES:%=%.1)
|
||||
@@ -38,7 +38,7 @@ FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
|
||||
FFLIBS := avutil
|
||||
|
||||
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset) $(SRC_PATH)/doc/ffprobe.xsd
|
||||
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset)
|
||||
|
||||
SKIPHEADERS = cmdutils_common_opts.h
|
||||
|
||||
@@ -77,8 +77,6 @@ define DOSUBDIR
|
||||
$(foreach V,$(SUBDIR_VARS),$(eval $(call RESET,$(V))))
|
||||
SUBDIR := $(1)/
|
||||
include $(SRC_PATH)/$(1)/Makefile
|
||||
-include $(SRC_PATH)/$(1)/$(ARCH)/Makefile
|
||||
include $(SRC_PATH)/library.mak
|
||||
endef
|
||||
|
||||
$(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D))))
|
||||
|
437
cmdutils.c
437
cmdutils.c
@@ -33,10 +33,7 @@
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavdevice/avdevice.h"
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libswresample/swresample.h"
|
||||
#if CONFIG_POSTPROC
|
||||
#include "libpostproc/postprocess.h"
|
||||
#endif
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
@@ -56,15 +53,14 @@
|
||||
struct SwsContext *sws_opts;
|
||||
AVDictionary *format_opts, *codec_opts;
|
||||
|
||||
const int this_year = 2012;
|
||||
static const int this_year = 2014;
|
||||
|
||||
static FILE *report_file;
|
||||
|
||||
void init_opts(void)
|
||||
{
|
||||
#if CONFIG_SWSCALE
|
||||
sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC,
|
||||
NULL, NULL, NULL);
|
||||
sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC, NULL, NULL, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -78,7 +74,7 @@ void uninit_opts(void)
|
||||
av_dict_free(&codec_opts);
|
||||
}
|
||||
|
||||
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
|
||||
void log_callback_help(void* ptr, int level, const char* fmt, va_list vl)
|
||||
{
|
||||
vfprintf(stdout, fmt, vl);
|
||||
}
|
||||
@@ -97,20 +93,19 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
|
||||
fflush(report_file);
|
||||
}
|
||||
|
||||
double parse_number_or_die(const char *context, const char *numstr, int type,
|
||||
double min, double max)
|
||||
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
|
||||
{
|
||||
char *tail;
|
||||
const char *error;
|
||||
double d = av_strtod(numstr, &tail);
|
||||
if (*tail)
|
||||
error = "Expected number for %s but found: %s\n";
|
||||
error= "Expected number for %s but found: %s\n";
|
||||
else if (d < min || d > max)
|
||||
error = "The value for %s was %s which is not within %f - %f\n";
|
||||
else if (type == OPT_INT64 && (int64_t)d != d)
|
||||
error = "Expected int64 for %s but found %s\n";
|
||||
error= "The value for %s was %s which is not within %f - %f\n";
|
||||
else if(type == OPT_INT64 && (int64_t)d != d)
|
||||
error= "Expected int64 for %s but found %s\n";
|
||||
else if (type == OPT_INT && (int)d != d)
|
||||
error = "Expected int for %s but found %s\n";
|
||||
error= "Expected int for %s but found %s\n";
|
||||
else
|
||||
return d;
|
||||
av_log(NULL, AV_LOG_FATAL, error, context, numstr, min, max);
|
||||
@@ -118,8 +113,7 @@ double parse_number_or_die(const char *context, const char *numstr, int type,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int64_t parse_time_or_die(const char *context, const char *timestr,
|
||||
int is_duration)
|
||||
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
|
||||
{
|
||||
int64_t us;
|
||||
if (av_parse_time(&us, timestr, is_duration) < 0) {
|
||||
@@ -130,14 +124,13 @@ int64_t parse_time_or_die(const char *context, const char *timestr,
|
||||
return us;
|
||||
}
|
||||
|
||||
void show_help_options(const OptionDef *options, const char *msg, int mask,
|
||||
int value)
|
||||
void show_help_options(const OptionDef *options, const char *msg, int mask, int value)
|
||||
{
|
||||
const OptionDef *po;
|
||||
int first;
|
||||
|
||||
first = 1;
|
||||
for (po = options; po->name != NULL; po++) {
|
||||
for(po = options; po->name != NULL; po++) {
|
||||
char buf[64];
|
||||
if ((po->flags & mask) == value) {
|
||||
if (first) {
|
||||
@@ -164,8 +157,7 @@ void show_help_children(const AVClass *class, int flags)
|
||||
show_help_children(child, flags);
|
||||
}
|
||||
|
||||
static const OptionDef *find_option(const OptionDef *po, const char *name)
|
||||
{
|
||||
static const OptionDef* find_option(const OptionDef *po, const char *name){
|
||||
const char *p = strchr(name, ':');
|
||||
int len = p ? p - name : strlen(name);
|
||||
|
||||
@@ -212,8 +204,8 @@ static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
|
||||
buffsize += WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1,
|
||||
NULL, 0, NULL, NULL);
|
||||
|
||||
win32_argv_utf8 = av_mallocz(sizeof(char *) * (win32_argc + 1) + buffsize);
|
||||
argstr_flat = (char *)win32_argv_utf8 + sizeof(char *) * (win32_argc + 1);
|
||||
win32_argv_utf8 = av_mallocz(sizeof(char*) * (win32_argc + 1) + buffsize);
|
||||
argstr_flat = (char*)win32_argv_utf8 + sizeof(char*) * (win32_argc + 1);
|
||||
if (win32_argv_utf8 == NULL) {
|
||||
LocalFree(argv_w);
|
||||
return;
|
||||
@@ -238,8 +230,8 @@ static inline void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
|
||||
}
|
||||
#endif /* WIN32 && !__MINGW32CE__ */
|
||||
|
||||
int parse_option(void *optctx, const char *opt, const char *arg,
|
||||
const OptionDef *options)
|
||||
|
||||
int parse_option(void *optctx, const char *opt, const char *arg, const OptionDef *options)
|
||||
{
|
||||
const OptionDef *po;
|
||||
int bool_val = 1;
|
||||
@@ -268,14 +260,13 @@ unknown_opt:
|
||||
|
||||
/* new-style options contain an offset into optctx, old-style address of
|
||||
* a global var*/
|
||||
dst = po->flags & (OPT_OFFSET | OPT_SPEC) ? (uint8_t *)optctx + po->u.off
|
||||
: po->u.dst_ptr;
|
||||
dst = po->flags & (OPT_OFFSET|OPT_SPEC) ? (uint8_t*)optctx + po->u.off : po->u.dst_ptr;
|
||||
|
||||
if (po->flags & OPT_SPEC) {
|
||||
SpecifierOpt **so = dst;
|
||||
char *p = strchr(opt, ':');
|
||||
|
||||
dstcount = (int *)(so + 1);
|
||||
dstcount = (int*)(so + 1);
|
||||
*so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1);
|
||||
(*so)[*dstcount - 1].specifier = av_strdup(p ? p + 1 : "");
|
||||
dst = &(*so)[*dstcount - 1].u;
|
||||
@@ -284,25 +275,24 @@ unknown_opt:
|
||||
if (po->flags & OPT_STRING) {
|
||||
char *str;
|
||||
str = av_strdup(arg);
|
||||
*(char **)dst = str;
|
||||
*(char**)dst = str;
|
||||
} else if (po->flags & OPT_BOOL) {
|
||||
*(int *)dst = bool_val;
|
||||
*(int*)dst = bool_val;
|
||||
} else if (po->flags & OPT_INT) {
|
||||
*(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
|
||||
*(int*)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
|
||||
} else if (po->flags & OPT_INT64) {
|
||||
*(int64_t *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX);
|
||||
*(int64_t*)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX);
|
||||
} else if (po->flags & OPT_TIME) {
|
||||
*(int64_t *)dst = parse_time_or_die(opt, arg, 1);
|
||||
*(int64_t*)dst = parse_time_or_die(opt, arg, 1);
|
||||
} else if (po->flags & OPT_FLOAT) {
|
||||
*(float *)dst = parse_number_or_die(opt, arg, OPT_FLOAT, -INFINITY, INFINITY);
|
||||
*(float*)dst = parse_number_or_die(opt, arg, OPT_FLOAT, -INFINITY, INFINITY);
|
||||
} else if (po->flags & OPT_DOUBLE) {
|
||||
*(double *)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY);
|
||||
*(double*)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY);
|
||||
} else if (po->u.func_arg) {
|
||||
int ret = po->flags & OPT_FUNC2 ? po->u.func2_arg(optctx, opt, arg)
|
||||
: po->u.func_arg(opt, arg);
|
||||
int ret = po->flags & OPT_FUNC2 ? po->u.func2_arg(optctx, opt, arg) :
|
||||
po->u.func_arg(opt, arg);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Failed to set value '%s' for option '%s'\n", arg, opt);
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to set value '%s' for option '%s'\n", arg, opt);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -312,7 +302,7 @@ unknown_opt:
|
||||
}
|
||||
|
||||
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options,
|
||||
void (*parse_arg_function)(void *, const char*))
|
||||
void (* parse_arg_function)(void *, const char*))
|
||||
{
|
||||
const char *opt;
|
||||
int optindex, handleoptions = 1, ret;
|
||||
@@ -345,8 +335,7 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
|
||||
/*
|
||||
* Return index of option opt in argv or 0 if not found.
|
||||
*/
|
||||
static int locate_option(int argc, char **argv, const OptionDef *options,
|
||||
const char *optname)
|
||||
static int locate_option(int argc, char **argv, const OptionDef *options, const char *optname)
|
||||
{
|
||||
const OptionDef *po;
|
||||
int i;
|
||||
@@ -429,18 +418,15 @@ int opt_default(const char *opt, const char *arg)
|
||||
p = opt + strlen(opt);
|
||||
av_strlcpy(opt_stripped, opt, FFMIN(sizeof(opt_stripped), p - opt + 1));
|
||||
|
||||
if ((oc = av_opt_find(&cc, opt_stripped, NULL, 0,
|
||||
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) ||
|
||||
((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') &&
|
||||
(oc = av_opt_find(&cc, opt + 1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ))))
|
||||
if ((oc = av_opt_find(&cc, opt_stripped, NULL, 0, AV_OPT_SEARCH_CHILDREN|AV_OPT_SEARCH_FAKE_OBJ)) ||
|
||||
((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') &&
|
||||
(oc = av_opt_find(&cc, opt+1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ))))
|
||||
av_dict_set(&codec_opts, opt, arg, FLAGS(oc));
|
||||
if ((of = av_opt_find(&fc, opt, NULL, 0,
|
||||
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
|
||||
if ((of = av_opt_find(&fc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
|
||||
av_dict_set(&format_opts, opt, arg, FLAGS(of));
|
||||
#if CONFIG_SWSCALE
|
||||
sc = sws_get_class();
|
||||
if ((os = av_opt_find(&sc, opt, NULL, 0,
|
||||
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
|
||||
if ((os = av_opt_find(&sc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
|
||||
// XXX we only support sws_flags, not arbitrary sws options
|
||||
int ret = av_opt_set(sws_opts, opt, arg, 0);
|
||||
if (ret < 0) {
|
||||
@@ -523,20 +509,6 @@ int opt_report(const char *opt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int opt_max_alloc(const char *opt, const char *arg)
|
||||
{
|
||||
char *tail;
|
||||
size_t max;
|
||||
|
||||
max = strtol(arg, &tail, 10);
|
||||
if (*tail) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid max_alloc \"%s\".\n", arg);
|
||||
exit_program(1);
|
||||
}
|
||||
av_max_alloc(max);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int opt_codec_debug(const char *opt, const char *arg)
|
||||
{
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
@@ -571,14 +543,13 @@ static int warned_cfg = 0;
|
||||
#define INDENT 1
|
||||
#define SHOW_VERSION 2
|
||||
#define SHOW_CONFIG 4
|
||||
#define SHOW_COPYRIGHT 8
|
||||
|
||||
#define PRINT_LIB_INFO(libname, LIBNAME, flags, level) \
|
||||
if (CONFIG_##LIBNAME) { \
|
||||
const char *indent = flags & INDENT? " " : ""; \
|
||||
if (flags & SHOW_VERSION) { \
|
||||
unsigned int version = libname##_version(); \
|
||||
av_log(NULL, level, "%slib%-11s %2d.%3d.%3d / %2d.%3d.%3d\n",\
|
||||
av_log(NULL, level, "%slib%-9s %2d.%3d.%2d / %2d.%3d.%2d\n",\
|
||||
indent, #libname, \
|
||||
LIB##LIBNAME##_VERSION_MAJOR, \
|
||||
LIB##LIBNAME##_VERSION_MINOR, \
|
||||
@@ -608,24 +579,7 @@ static void print_all_libs_info(int flags, int level)
|
||||
PRINT_LIB_INFO(avdevice, AVDEVICE, flags, level);
|
||||
PRINT_LIB_INFO(avfilter, AVFILTER, flags, level);
|
||||
PRINT_LIB_INFO(swscale, SWSCALE, flags, level);
|
||||
PRINT_LIB_INFO(swresample,SWRESAMPLE, flags, level);
|
||||
#if CONFIG_POSTPROC
|
||||
PRINT_LIB_INFO(postproc, POSTPROC, flags, level);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void print_program_info(int flags, int level)
|
||||
{
|
||||
const char *indent = flags & INDENT? " " : "";
|
||||
|
||||
av_log(NULL, level, "%s version " FFMPEG_VERSION, program_name);
|
||||
if (flags & SHOW_COPYRIGHT)
|
||||
av_log(NULL, level, " Copyright (c) %d-%d the FFmpeg developers",
|
||||
program_birth_year, this_year);
|
||||
av_log(NULL, level, "\n");
|
||||
av_log(NULL, level, "%sbuilt on %s %s with %s %s\n",
|
||||
indent, __DATE__, __TIME__, CC_TYPE, CC_VERSION);
|
||||
av_log(NULL, level, "%sconfiguration: " FFMPEG_CONFIGURATION "\n", indent);
|
||||
}
|
||||
|
||||
void show_banner(int argc, char **argv, const OptionDef *options)
|
||||
@@ -634,14 +588,18 @@ void show_banner(int argc, char **argv, const OptionDef *options)
|
||||
if (idx)
|
||||
return;
|
||||
|
||||
print_program_info (INDENT|SHOW_COPYRIGHT, AV_LOG_INFO);
|
||||
av_log(NULL, AV_LOG_INFO, "%s version " FFMPEG_VERSION ", Copyright (c) %d-%d the FFmpeg developers\n",
|
||||
program_name, program_birth_year, this_year);
|
||||
av_log(NULL, AV_LOG_INFO, " built on %s %s with %s %s\n",
|
||||
__DATE__, __TIME__, CC_TYPE, CC_VERSION);
|
||||
av_log(NULL, AV_LOG_INFO, " configuration: " FFMPEG_CONFIGURATION "\n");
|
||||
print_all_libs_info(INDENT|SHOW_CONFIG, AV_LOG_INFO);
|
||||
print_all_libs_info(INDENT|SHOW_VERSION, AV_LOG_INFO);
|
||||
}
|
||||
|
||||
int opt_version(const char *opt, const char *arg) {
|
||||
av_log_set_callback(log_callback_help);
|
||||
print_program_info (0 , AV_LOG_INFO);
|
||||
printf("%s " FFMPEG_VERSION "\n", program_name);
|
||||
print_all_libs_info(SHOW_VERSION, AV_LOG_INFO);
|
||||
return 0;
|
||||
}
|
||||
@@ -718,133 +676,137 @@ int opt_license(const char *opt, const char *arg)
|
||||
|
||||
int opt_formats(const char *opt, const char *arg)
|
||||
{
|
||||
AVInputFormat *ifmt = NULL;
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
AVInputFormat *ifmt=NULL;
|
||||
AVOutputFormat *ofmt=NULL;
|
||||
const char *last_name;
|
||||
|
||||
printf("File formats:\n"
|
||||
" D. = Demuxing supported\n"
|
||||
" .E = Muxing supported\n"
|
||||
" --\n");
|
||||
last_name = "000";
|
||||
for (;;) {
|
||||
int decode = 0;
|
||||
int encode = 0;
|
||||
const char *name = NULL;
|
||||
const char *long_name = NULL;
|
||||
printf(
|
||||
"File formats:\n"
|
||||
" D. = Demuxing supported\n"
|
||||
" .E = Muxing supported\n"
|
||||
" --\n");
|
||||
last_name= "000";
|
||||
for(;;){
|
||||
int decode=0;
|
||||
int encode=0;
|
||||
const char *name=NULL;
|
||||
const char *long_name=NULL;
|
||||
|
||||
while ((ofmt = av_oformat_next(ofmt))) {
|
||||
if ((name == NULL || strcmp(ofmt->name, name) < 0) &&
|
||||
strcmp(ofmt->name, last_name) > 0) {
|
||||
name = ofmt->name;
|
||||
long_name = ofmt->long_name;
|
||||
encode = 1;
|
||||
while((ofmt= av_oformat_next(ofmt))) {
|
||||
if((name == NULL || strcmp(ofmt->name, name)<0) &&
|
||||
strcmp(ofmt->name, last_name)>0){
|
||||
name= ofmt->name;
|
||||
long_name= ofmt->long_name;
|
||||
encode=1;
|
||||
}
|
||||
}
|
||||
while ((ifmt = av_iformat_next(ifmt))) {
|
||||
if ((name == NULL || strcmp(ifmt->name, name) < 0) &&
|
||||
strcmp(ifmt->name, last_name) > 0) {
|
||||
name = ifmt->name;
|
||||
long_name = ifmt->long_name;
|
||||
encode = 0;
|
||||
while((ifmt= av_iformat_next(ifmt))) {
|
||||
if((name == NULL || strcmp(ifmt->name, name)<0) &&
|
||||
strcmp(ifmt->name, last_name)>0){
|
||||
name= ifmt->name;
|
||||
long_name= ifmt->long_name;
|
||||
encode=0;
|
||||
}
|
||||
if (name && strcmp(ifmt->name, name) == 0)
|
||||
decode = 1;
|
||||
if(name && strcmp(ifmt->name, name)==0)
|
||||
decode=1;
|
||||
}
|
||||
if (name == NULL)
|
||||
if(name==NULL)
|
||||
break;
|
||||
last_name = name;
|
||||
last_name= name;
|
||||
|
||||
printf(" %s%s %-15s %s\n",
|
||||
decode ? "D" : " ",
|
||||
encode ? "E" : " ",
|
||||
name,
|
||||
printf(
|
||||
" %s%s %-15s %s\n",
|
||||
decode ? "D":" ",
|
||||
encode ? "E":" ",
|
||||
name,
|
||||
long_name ? long_name:" ");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static char get_media_type_char(enum AVMediaType type)
|
||||
{
|
||||
static const char map[AVMEDIA_TYPE_NB] = {
|
||||
[AVMEDIA_TYPE_VIDEO] = 'V',
|
||||
[AVMEDIA_TYPE_AUDIO] = 'A',
|
||||
[AVMEDIA_TYPE_DATA] = 'D',
|
||||
[AVMEDIA_TYPE_SUBTITLE] = 'S',
|
||||
[AVMEDIA_TYPE_ATTACHMENT] = 'T',
|
||||
};
|
||||
return type >= 0 && type < AVMEDIA_TYPE_NB && map[type] ? map[type] : '?';
|
||||
}
|
||||
|
||||
int opt_codecs(const char *opt, const char *arg)
|
||||
{
|
||||
AVCodec *p = NULL, *p2;
|
||||
AVCodec *p=NULL, *p2;
|
||||
const char *last_name;
|
||||
printf("Codecs:\n"
|
||||
" D..... = Decoding supported\n"
|
||||
" .E.... = Encoding supported\n"
|
||||
" ..V... = Video codec\n"
|
||||
" ..A... = Audio codec\n"
|
||||
" ..S... = Subtitle codec\n"
|
||||
" ...S.. = Supports draw_horiz_band\n"
|
||||
" ....D. = Supports direct rendering method 1\n"
|
||||
" .....T = Supports weird frame truncation\n"
|
||||
" ------\n");
|
||||
printf(
|
||||
"Codecs:\n"
|
||||
" D..... = Decoding supported\n"
|
||||
" .E.... = Encoding supported\n"
|
||||
" ..V... = Video codec\n"
|
||||
" ..A... = Audio codec\n"
|
||||
" ..S... = Subtitle codec\n"
|
||||
" ...S.. = Supports draw_horiz_band\n"
|
||||
" ....D. = Supports direct rendering method 1\n"
|
||||
" .....T = Supports weird frame truncation\n"
|
||||
" ------\n");
|
||||
last_name= "000";
|
||||
for (;;) {
|
||||
int decode = 0;
|
||||
int encode = 0;
|
||||
int cap = 0;
|
||||
for(;;){
|
||||
int decode=0;
|
||||
int encode=0;
|
||||
int cap=0;
|
||||
const char *type_str;
|
||||
|
||||
p2 = NULL;
|
||||
while ((p = av_codec_next(p))) {
|
||||
if ((p2 == NULL || strcmp(p->name, p2->name) < 0) &&
|
||||
strcmp(p->name, last_name) > 0) {
|
||||
p2 = p;
|
||||
decode = encode = cap = 0;
|
||||
p2=NULL;
|
||||
while((p= av_codec_next(p))) {
|
||||
if((p2==NULL || strcmp(p->name, p2->name)<0) &&
|
||||
strcmp(p->name, last_name)>0){
|
||||
p2= p;
|
||||
decode= encode= cap=0;
|
||||
}
|
||||
if (p2 && strcmp(p->name, p2->name) == 0) {
|
||||
if (p->decode)
|
||||
decode = 1;
|
||||
if (p->encode)
|
||||
encode = 1;
|
||||
if(p2 && strcmp(p->name, p2->name)==0){
|
||||
if(p->decode) decode=1;
|
||||
if(p->encode) encode=1;
|
||||
cap |= p->capabilities;
|
||||
}
|
||||
}
|
||||
if (p2 == NULL)
|
||||
if(p2==NULL)
|
||||
break;
|
||||
last_name = p2->name;
|
||||
last_name= p2->name;
|
||||
|
||||
printf(" %s%s%c%s%s%s %-15s %s",
|
||||
decode ? "D" : (/* p2->decoder ? "d" : */ " "),
|
||||
encode ? "E" : " ",
|
||||
get_media_type_char(p2->type),
|
||||
cap & CODEC_CAP_DRAW_HORIZ_BAND ? "S" : " ",
|
||||
cap & CODEC_CAP_DR1 ? "D" : " ",
|
||||
cap & CODEC_CAP_TRUNCATED ? "T" : " ",
|
||||
p2->name,
|
||||
p2->long_name ? p2->long_name : "");
|
||||
#if 0
|
||||
if (p2->decoder && decode == 0)
|
||||
printf(" use %s for decoding", p2->decoder->name);
|
||||
#endif
|
||||
switch(p2->type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
type_str = "V";
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
type_str = "A";
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
type_str = "S";
|
||||
break;
|
||||
default:
|
||||
type_str = "?";
|
||||
break;
|
||||
}
|
||||
printf(
|
||||
" %s%s%s%s%s%s %-15s %s",
|
||||
decode ? "D": (/*p2->decoder ? "d":*/" "),
|
||||
encode ? "E":" ",
|
||||
type_str,
|
||||
cap & CODEC_CAP_DRAW_HORIZ_BAND ? "S":" ",
|
||||
cap & CODEC_CAP_DR1 ? "D":" ",
|
||||
cap & CODEC_CAP_TRUNCATED ? "T":" ",
|
||||
p2->name,
|
||||
p2->long_name ? p2->long_name : "");
|
||||
/* if(p2->decoder && decode==0)
|
||||
printf(" use %s for decoding", p2->decoder->name);*/
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n");
|
||||
printf("Note, the names of encoders and decoders do not always match, so there are\n"
|
||||
"several cases where the above table shows encoder only or decoder only entries\n"
|
||||
"even though both encoding and decoding are supported. For example, the h263\n"
|
||||
"decoder corresponds to the h263 and h263p encoders, for file formats it is even\n"
|
||||
"worse.\n");
|
||||
printf(
|
||||
"Note, the names of encoders and decoders do not always match, so there are\n"
|
||||
"several cases where the above table shows encoder only or decoder only entries\n"
|
||||
"even though both encoding and decoding are supported. For example, the h263\n"
|
||||
"decoder corresponds to the h263 and h263p encoders, for file formats it is even\n"
|
||||
"worse.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int opt_bsfs(const char *opt, const char *arg)
|
||||
{
|
||||
AVBitStreamFilter *bsf = NULL;
|
||||
AVBitStreamFilter *bsf=NULL;
|
||||
|
||||
printf("Bitstream filters:\n");
|
||||
while ((bsf = av_bitstream_filter_next(bsf)))
|
||||
while((bsf = av_bitstream_filter_next(bsf)))
|
||||
printf("%s\n", bsf->name);
|
||||
printf("\n");
|
||||
return 0;
|
||||
@@ -872,31 +834,11 @@ int opt_protocols(const char *opt, const char *arg)
|
||||
int opt_filters(const char *opt, const char *arg)
|
||||
{
|
||||
AVFilter av_unused(**filter) = NULL;
|
||||
char descr[64], *descr_cur;
|
||||
int i, j;
|
||||
const AVFilterPad *pad;
|
||||
|
||||
printf("Filters:\n");
|
||||
#if CONFIG_AVFILTER
|
||||
while ((filter = av_filter_next(filter)) && *filter) {
|
||||
descr_cur = descr;
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i) {
|
||||
*(descr_cur++) = '-';
|
||||
*(descr_cur++) = '>';
|
||||
}
|
||||
pad = i ? (*filter)->outputs : (*filter)->inputs;
|
||||
for (j = 0; pad[j].name; j++) {
|
||||
if (descr_cur >= descr + sizeof(descr) - 4)
|
||||
break;
|
||||
*(descr_cur++) = get_media_type_char(pad[j].type);
|
||||
}
|
||||
if (!j)
|
||||
*(descr_cur++) = '|';
|
||||
}
|
||||
*descr_cur = 0;
|
||||
printf("%-16s %-10s %s\n", (*filter)->name, descr, (*filter)->description);
|
||||
}
|
||||
while ((filter = av_filter_next(filter)) && *filter)
|
||||
printf("%-16s %s\n", (*filter)->name, (*filter)->description);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
@@ -905,14 +847,15 @@ int opt_pix_fmts(const char *opt, const char *arg)
|
||||
{
|
||||
enum PixelFormat pix_fmt;
|
||||
|
||||
printf("Pixel formats:\n"
|
||||
"I.... = Supported Input format for conversion\n"
|
||||
".O... = Supported Output format for conversion\n"
|
||||
"..H.. = Hardware accelerated format\n"
|
||||
"...P. = Paletted format\n"
|
||||
"....B = Bitstream format\n"
|
||||
"FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL\n"
|
||||
"-----\n");
|
||||
printf(
|
||||
"Pixel formats:\n"
|
||||
"I.... = Supported Input format for conversion\n"
|
||||
".O... = Supported Output format for conversion\n"
|
||||
"..H.. = Hardware accelerated format\n"
|
||||
"...P. = Paletted format\n"
|
||||
"....B = Bitstream format\n"
|
||||
"FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL\n"
|
||||
"-----\n");
|
||||
|
||||
#if !CONFIG_SWSCALE
|
||||
# define sws_isSupportedInput(x) 0
|
||||
@@ -962,8 +905,7 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
FILE *f = fopen(filename, "rb");
|
||||
|
||||
if (!f) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename,
|
||||
strerror(errno));
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename, strerror(errno));
|
||||
return AVERROR(errno);
|
||||
}
|
||||
fseek(f, 0, SEEK_END);
|
||||
@@ -994,14 +936,14 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
}
|
||||
|
||||
FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
const char *preset_name, int is_path,
|
||||
const char *codec_name)
|
||||
const char *preset_name, int is_path, const char *codec_name)
|
||||
{
|
||||
FILE *f = NULL;
|
||||
int i;
|
||||
const char *base[3] = { getenv("FFMPEG_DATADIR"),
|
||||
getenv("HOME"),
|
||||
FFMPEG_DATADIR, };
|
||||
const char *base[3]= { getenv("FFMPEG_DATADIR"),
|
||||
getenv("HOME"),
|
||||
FFMPEG_DATADIR,
|
||||
};
|
||||
|
||||
if (is_path) {
|
||||
av_strlcpy(filename, preset_name, filename_size);
|
||||
@@ -1027,14 +969,11 @@ FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
for (i = 0; i < 3 && !f; i++) {
|
||||
if (!base[i])
|
||||
continue;
|
||||
snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i],
|
||||
i != 1 ? "" : "/.ffmpeg", preset_name);
|
||||
snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", preset_name);
|
||||
f = fopen(filename, "r");
|
||||
if (!f && codec_name) {
|
||||
snprintf(filename, filename_size,
|
||||
"%s%s/%s-%s.ffpreset",
|
||||
base[i], i != 1 ? "" : "/.ffmpeg", codec_name,
|
||||
preset_name);
|
||||
"%s%s/%s-%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", codec_name, preset_name);
|
||||
f = fopen(filename, "r");
|
||||
}
|
||||
}
|
||||
@@ -1045,23 +984,22 @@ FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
|
||||
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
|
||||
{
|
||||
if (*spec <= '9' && *spec >= '0') /* opt:index */
|
||||
if (*spec <= '9' && *spec >= '0') /* opt:index */
|
||||
return strtol(spec, NULL, 0) == st->index;
|
||||
else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
|
||||
*spec == 't') { /* opt:[vasdt] */
|
||||
else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' || *spec == 't') { /* opt:[vasdt] */
|
||||
enum AVMediaType type;
|
||||
|
||||
switch (*spec++) {
|
||||
case 'v': type = AVMEDIA_TYPE_VIDEO; break;
|
||||
case 'a': type = AVMEDIA_TYPE_AUDIO; break;
|
||||
case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
|
||||
case 'd': type = AVMEDIA_TYPE_DATA; break;
|
||||
case 'v': type = AVMEDIA_TYPE_VIDEO; break;
|
||||
case 'a': type = AVMEDIA_TYPE_AUDIO; break;
|
||||
case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
|
||||
case 'd': type = AVMEDIA_TYPE_DATA; break;
|
||||
case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
|
||||
default: abort(); // never reached, silence warning
|
||||
}
|
||||
if (type != st->codec->codec_type)
|
||||
return 0;
|
||||
if (*spec++ == ':') { /* possibly followed by :index */
|
||||
if (*spec++ == ':') { /* possibly followed by :index */
|
||||
int i, index = strtol(spec, NULL, 0);
|
||||
for (i = 0; i < s->nb_streams; i++)
|
||||
if (s->streams[i]->codec->codec_type == type && index-- == 0)
|
||||
@@ -1080,9 +1018,8 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
|
||||
|
||||
if (*endptr++ == ':') {
|
||||
int stream_idx = strtol(endptr, NULL, 0);
|
||||
return stream_idx >= 0 &&
|
||||
stream_idx < s->programs[i]->nb_stream_indexes &&
|
||||
st->index == s->programs[i]->stream_index[stream_idx];
|
||||
return (stream_idx >= 0 && stream_idx < s->programs[i]->nb_stream_indexes &&
|
||||
st->index == s->programs[i]->stream_index[stream_idx]);
|
||||
}
|
||||
|
||||
for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
|
||||
@@ -1097,13 +1034,11 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
|
||||
AVFormatContext *s, AVStream *st)
|
||||
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatContext *s, AVStream *st)
|
||||
{
|
||||
AVDictionary *ret = NULL;
|
||||
AVDictionaryEntry *t = NULL;
|
||||
int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM
|
||||
: AV_OPT_FLAG_DECODING_PARAM;
|
||||
int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM;
|
||||
char prefix = 0;
|
||||
const AVClass *cc = avcodec_get_class();
|
||||
|
||||
@@ -1111,18 +1046,9 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
|
||||
return NULL;
|
||||
|
||||
switch (codec->type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
prefix = 'v';
|
||||
flags |= AV_OPT_FLAG_VIDEO_PARAM;
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
prefix = 'a';
|
||||
flags |= AV_OPT_FLAG_AUDIO_PARAM;
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
prefix = 's';
|
||||
flags |= AV_OPT_FLAG_SUBTITLE_PARAM;
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO: prefix = 'v'; flags |= AV_OPT_FLAG_VIDEO_PARAM; break;
|
||||
case AVMEDIA_TYPE_AUDIO: prefix = 'a'; flags |= AV_OPT_FLAG_AUDIO_PARAM; break;
|
||||
case AVMEDIA_TYPE_SUBTITLE: prefix = 's'; flags |= AV_OPT_FLAG_SUBTITLE_PARAM; break;
|
||||
}
|
||||
|
||||
while (t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX)) {
|
||||
@@ -1137,14 +1063,10 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
|
||||
}
|
||||
|
||||
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
|
||||
(codec && codec->priv_class &&
|
||||
av_opt_find(&codec->priv_class, t->key, NULL, flags,
|
||||
AV_OPT_SEARCH_FAKE_OBJ)))
|
||||
(codec && codec->priv_class && av_opt_find(&codec->priv_class, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ)))
|
||||
av_dict_set(&ret, t->key, t->value, 0);
|
||||
else if (t->key[0] == prefix &&
|
||||
av_opt_find(&cc, t->key + 1, NULL, flags,
|
||||
AV_OPT_SEARCH_FAKE_OBJ))
|
||||
av_dict_set(&ret, t->key + 1, t->value, 0);
|
||||
else if (t->key[0] == prefix && av_opt_find(&cc, t->key+1, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ))
|
||||
av_dict_set(&ret, t->key+1, t->value, 0);
|
||||
|
||||
if (p)
|
||||
*p = ':';
|
||||
@@ -1152,8 +1074,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
|
||||
return ret;
|
||||
}
|
||||
|
||||
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
|
||||
AVDictionary *codec_opts)
|
||||
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
|
||||
{
|
||||
int i;
|
||||
AVDictionary **opts;
|
||||
@@ -1162,13 +1083,11 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
|
||||
return NULL;
|
||||
opts = av_mallocz(s->nb_streams * sizeof(*opts));
|
||||
if (!opts) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Could not alloc memory for stream options.\n");
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not alloc memory for stream options.\n");
|
||||
return NULL;
|
||||
}
|
||||
for (i = 0; i < s->nb_streams; i++)
|
||||
opts[i] = filter_codec_opts(codec_opts, avcodec_find_decoder(s->streams[i]->codec->codec_id),
|
||||
s, s->streams[i]);
|
||||
opts[i] = filter_codec_opts(codec_opts, avcodec_find_decoder(s->streams[i]->codec->codec_id), s, s->streams[i]);
|
||||
return opts;
|
||||
}
|
||||
|
||||
|
31
cmdutils.h
31
cmdutils.h
@@ -43,11 +43,6 @@ extern const char program_name[];
|
||||
*/
|
||||
extern const int program_birth_year;
|
||||
|
||||
/**
|
||||
* this year, defined by the program for show_banner()
|
||||
*/
|
||||
extern const int this_year;
|
||||
|
||||
extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
|
||||
extern AVFormatContext *avformat_opts;
|
||||
extern struct SwsContext *sws_opts;
|
||||
@@ -83,8 +78,6 @@ int opt_loglevel(const char *opt, const char *arg);
|
||||
|
||||
int opt_report(const char *opt);
|
||||
|
||||
int opt_max_alloc(const char *opt, const char *arg);
|
||||
|
||||
int opt_codec_debug(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
@@ -98,15 +91,14 @@ int opt_timelimit(const char *opt, const char *arg);
|
||||
* parsed or the corresponding value is invalid.
|
||||
*
|
||||
* @param context the context of the value to be set (e.g. the
|
||||
* corresponding command line option name)
|
||||
* corresponding commandline option name)
|
||||
* @param numstr the string to be parsed
|
||||
* @param type the type (OPT_INT64 or OPT_FLOAT) as which the
|
||||
* string should be parsed
|
||||
* @param min the minimum valid accepted value
|
||||
* @param max the maximum valid accepted value
|
||||
*/
|
||||
double parse_number_or_die(const char *context, const char *numstr, int type,
|
||||
double min, double max);
|
||||
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max);
|
||||
|
||||
/**
|
||||
* Parse a string specifying a time and return its corresponding
|
||||
@@ -114,7 +106,7 @@ double parse_number_or_die(const char *context, const char *numstr, int type,
|
||||
* the string cannot be correctly parsed.
|
||||
*
|
||||
* @param context the context of the value to be set (e.g. the
|
||||
* corresponding command line option name)
|
||||
* corresponding commandline option name)
|
||||
* @param timestr the string to be parsed
|
||||
* @param is_duration a flag which tells how to interpret timestr, if
|
||||
* not zero timestr is interpreted as a duration, otherwise as a
|
||||
@@ -122,8 +114,7 @@ double parse_number_or_die(const char *context, const char *numstr, int type,
|
||||
*
|
||||
* @see parse_date()
|
||||
*/
|
||||
int64_t parse_time_or_die(const char *context, const char *timestr,
|
||||
int is_duration);
|
||||
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration);
|
||||
|
||||
typedef struct SpecifierOpt {
|
||||
char *specifier; /**< stream/chapter/program/... specifier */
|
||||
@@ -169,8 +160,7 @@ typedef struct {
|
||||
const char *argname;
|
||||
} OptionDef;
|
||||
|
||||
void show_help_options(const OptionDef *options, const char *msg, int mask,
|
||||
int value);
|
||||
void show_help_options(const OptionDef *options, const char *msg, int mask, int value);
|
||||
|
||||
/**
|
||||
* Show help for all options with given flags in class and all its
|
||||
@@ -196,11 +186,10 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
|
||||
*
|
||||
* @return on success 1 if arg was consumed, 0 otherwise; negative number on error
|
||||
*/
|
||||
int parse_option(void *optctx, const char *opt, const char *arg,
|
||||
const OptionDef *options);
|
||||
int parse_option(void *optctx, const char *opt, const char *arg, const OptionDef *options);
|
||||
|
||||
/**
|
||||
* Find the '-loglevel' option in the command line args and apply it.
|
||||
* Find the '-loglevel' option in the commandline args and apply it.
|
||||
*/
|
||||
void parse_loglevel(int argc, char **argv, const OptionDef *options);
|
||||
|
||||
@@ -225,8 +214,7 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec);
|
||||
* @param st A stream from s for which the options should be filtered.
|
||||
* @return a pointer to the created dictionary
|
||||
*/
|
||||
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
|
||||
AVFormatContext *s, AVStream *st);
|
||||
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatContext *s, AVStream *st);
|
||||
|
||||
/**
|
||||
* Setup AVCodecContext options for avformat_find_stream_info().
|
||||
@@ -239,8 +227,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
|
||||
* @return pointer to the created array of dictionaries, NULL if it
|
||||
* cannot be created
|
||||
*/
|
||||
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
|
||||
AVDictionary *codec_opts);
|
||||
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts);
|
||||
|
||||
/**
|
||||
* Print an error message to stderr, indicating filename and a human
|
||||
|
@@ -15,4 +15,3 @@
|
||||
{ "v", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
|
||||
{ "debug", HAS_ARG, {(void*)opt_codec_debug}, "set debug flags", "flags" },
|
||||
{ "report", 0, {(void*)opt_report}, "generate a report" },
|
||||
{ "max_alloc", HAS_ARG, {(void*)opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
|
||||
|
240
configure
vendored
240
configure
vendored
@@ -81,6 +81,7 @@ Configuration options:
|
||||
and binaries will be unredistributable [no]
|
||||
--disable-doc do not build documentation
|
||||
--disable-ffmpeg disable ffmpeg build
|
||||
--enable-avconv enable avconv build
|
||||
--disable-ffplay disable ffplay build
|
||||
--disable-ffprobe disable ffprobe build
|
||||
--disable-ffserver disable ffserver build
|
||||
@@ -217,13 +218,12 @@ Advanced options (experts only):
|
||||
--target-os=OS compiler targets OS [$target_os]
|
||||
--target-exec=CMD command to run executables on target
|
||||
--target-path=DIR path to view of build directory on target
|
||||
--nm=NM use nm tool NM [$nm_default]
|
||||
--nm=NM use nm tool
|
||||
--ar=AR use archive tool AR [$ar_default]
|
||||
--as=AS use assembler AS [$as_default]
|
||||
--yasmexe=EXE use yasm-compatible assembler EXE [$yasmexe_default]
|
||||
--cc=CC use C compiler CC [$cc_default]
|
||||
--cxx=CXX use C compiler CXX [$cxx_default]
|
||||
--ld=LD use linker LD [$ld_default]
|
||||
--ld=LD use linker LD
|
||||
--host-cc=HOSTCC use host C compiler HOSTCC
|
||||
--host-cflags=HCFLAGS use HCFLAGS when compiling for host
|
||||
--host-ldflags=HLDFLAGS use HLDFLAGS when linking for host
|
||||
@@ -253,7 +253,7 @@ Advanced options (experts only):
|
||||
--disable-armvfp disable ARM VFP optimizations
|
||||
--disable-iwmmxt disable iwmmxt optimizations
|
||||
--disable-mmi disable MMI optimizations
|
||||
--disable-neon disable NEON optimizations
|
||||
--disable-neon disable neon optimizations
|
||||
--disable-vis disable VIS optimizations
|
||||
--disable-yasm disable use of yasm assembler
|
||||
--enable-pic build position-independent code
|
||||
@@ -261,8 +261,6 @@ Advanced options (experts only):
|
||||
--enable-sram allow use of on-chip SRAM
|
||||
--disable-symver disable symbol versioning
|
||||
--optflags override optimization-related compiler flags
|
||||
--postproc-version=V build libpostproc version V.
|
||||
Where V can be '$ALT_PP_VER_MAJOR.$ALT_PP_VER_MINOR.$ALT_PP_VER_MICRO' or 'current'. [$postproc_version_default]
|
||||
|
||||
Developer options (useful when working on FFmpeg itself):
|
||||
--enable-coverage build with test coverage instrumentation
|
||||
@@ -271,9 +269,6 @@ Developer options (useful when working on FFmpeg itself):
|
||||
--disable-optimizations disable compiler optimizations
|
||||
--enable-extra-warnings enable more compiler warnings
|
||||
--disable-stripping disable stripping of executables and shared libraries
|
||||
--valgrind=VALGRIND run "make fate" tests through valgrind to detect memory
|
||||
leaks and errors, using the specified valgrind binary.
|
||||
Cannot be combined with --target-exec
|
||||
--samples=PATH location of test samples for FATE, if not set use
|
||||
\$FATE_SAMPLES at make invocation time.
|
||||
|
||||
@@ -986,19 +981,8 @@ COMPONENT_LIST="
|
||||
protocols
|
||||
"
|
||||
|
||||
PROGRAM_LIST="
|
||||
ffplay
|
||||
ffprobe
|
||||
ffserver
|
||||
ffmpeg
|
||||
"
|
||||
|
||||
CONFIG_LIST="
|
||||
$COMPONENT_LIST
|
||||
$PROGRAM_LIST
|
||||
avplay
|
||||
avprobe
|
||||
avserver
|
||||
aandct
|
||||
ac3dsp
|
||||
avcodec
|
||||
@@ -1013,13 +997,17 @@ CONFIG_LIST="
|
||||
dwt
|
||||
dxva2
|
||||
fastdiv
|
||||
ffmpeg
|
||||
avconv
|
||||
ffplay
|
||||
ffprobe
|
||||
ffserver
|
||||
fft
|
||||
frei0r
|
||||
gnutls
|
||||
golomb
|
||||
gpl
|
||||
gray
|
||||
h264chroma
|
||||
h264dsp
|
||||
h264pred
|
||||
hardcoded_tables
|
||||
@@ -1156,6 +1144,7 @@ HAVE_LIST="
|
||||
asm_types_h
|
||||
attribute_may_alias
|
||||
attribute_packed
|
||||
bswap
|
||||
cbrtf
|
||||
closesocket
|
||||
cmov
|
||||
@@ -1163,11 +1152,12 @@ HAVE_LIST="
|
||||
dev_bktr_ioctl_bt848_h
|
||||
dev_bktr_ioctl_meteor_h
|
||||
dev_ic_bt8xx_h
|
||||
dev_video_bktr_ioctl_bt848_h
|
||||
dev_video_meteor_ioctl_meteor_h
|
||||
dev_video_bktr_ioctl_bt848_h
|
||||
dlfcn_h
|
||||
dlopen
|
||||
dos_paths
|
||||
dxva_h
|
||||
ebp_available
|
||||
ebx_available
|
||||
exp2
|
||||
@@ -1179,11 +1169,11 @@ HAVE_LIST="
|
||||
fork
|
||||
getaddrinfo
|
||||
gethrtime
|
||||
GetProcessAffinityMask
|
||||
GetProcessMemoryInfo
|
||||
GetProcessTimes
|
||||
getrusage
|
||||
gnu_as
|
||||
struct_rusage_ru_maxrss
|
||||
ibm_asm
|
||||
inet_aton
|
||||
inline_asm
|
||||
@@ -1203,41 +1193,34 @@ HAVE_LIST="
|
||||
lzo1x_999_compress
|
||||
machine_ioctl_bt848_h
|
||||
machine_ioctl_meteor_h
|
||||
makeinfo
|
||||
malloc_h
|
||||
MapViewOfFile
|
||||
memalign
|
||||
mkstemp
|
||||
mmap
|
||||
PeekNamedPipe
|
||||
poll_h
|
||||
posix_memalign
|
||||
round
|
||||
roundf
|
||||
sched_getaffinity
|
||||
sdl
|
||||
sdl_video_size
|
||||
setmode
|
||||
setrlimit
|
||||
sndio_h
|
||||
socklen_t
|
||||
soundcard_h
|
||||
poll_h
|
||||
setrlimit
|
||||
strerror_r
|
||||
strptime
|
||||
struct_addrinfo
|
||||
struct_ipv6_mreq
|
||||
struct_rusage_ru_maxrss
|
||||
struct_sockaddr_in6
|
||||
struct_sockaddr_sa_len
|
||||
struct_sockaddr_storage
|
||||
struct_v4l2_frmivalenum_discrete
|
||||
symver
|
||||
symver_asm_label
|
||||
symver_gnu_asm
|
||||
sysconf
|
||||
sysctl
|
||||
symver_asm_label
|
||||
sys_mman_h
|
||||
sys_param_h
|
||||
sys_resource_h
|
||||
sys_select_h
|
||||
sys_soundcard_h
|
||||
@@ -1319,9 +1302,6 @@ CMDLINE_SET="
|
||||
target_exec
|
||||
target_os
|
||||
target_path
|
||||
postproc_version
|
||||
valgrind
|
||||
yasmexe
|
||||
"
|
||||
|
||||
CMDLINE_APPEND="
|
||||
@@ -1362,8 +1342,8 @@ fast_64bit_if_any="alpha ia64 mips64 parisc64 ppc64 sparc64 x86_64"
|
||||
fast_clz_if_any="alpha armv5te avr32 mips ppc x86"
|
||||
fast_unaligned_if_any="armv6 ppc x86"
|
||||
|
||||
inline_asm_deps="!tms470"
|
||||
need_memalign="altivec neon sse"
|
||||
inline_asm_deps="!tms470"
|
||||
|
||||
symver_if_any="symver_asm_label symver_gnu_asm"
|
||||
|
||||
@@ -1414,8 +1394,8 @@ h263_encoder_select="aandct"
|
||||
h263_vaapi_hwaccel_select="vaapi h263_decoder"
|
||||
h263i_decoder_select="h263_decoder"
|
||||
h263p_encoder_select="h263_encoder"
|
||||
h264_decoder_select="golomb h264dsp h264pred"
|
||||
h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
|
||||
h264_decoder_select="golomb h264chroma h264dsp h264pred"
|
||||
h264_dxva2_hwaccel_deps="dxva2api_h"
|
||||
h264_dxva2_hwaccel_select="dxva2 h264_decoder"
|
||||
h264_vaapi_hwaccel_select="vaapi h264_decoder"
|
||||
@@ -1430,34 +1410,32 @@ loco_decoder_select="golomb"
|
||||
mjpeg_encoder_select="aandct"
|
||||
mlp_decoder_select="mlp_parser"
|
||||
mp1_decoder_select="mpegaudiodsp"
|
||||
mp1float_decoder_select="mpegaudiodsp"
|
||||
mp2_decoder_select="mpegaudiodsp"
|
||||
mp2float_decoder_select="mpegaudiodsp"
|
||||
mp3_decoder_select="mpegaudiodsp"
|
||||
mp3adu_decoder_select="mpegaudiodsp"
|
||||
mp3_decoder_select="mpegaudiodsp"
|
||||
mp3on4_decoder_select="mpegaudiodsp"
|
||||
mp1float_decoder_select="mpegaudiodsp"
|
||||
mp2float_decoder_select="mpegaudiodsp"
|
||||
mp3adufloat_decoder_select="mpegaudiodsp"
|
||||
mp3float_decoder_select="mpegaudiodsp"
|
||||
mp3on4_decoder_select="mpegaudiodsp"
|
||||
mp3on4float_decoder_select="mpegaudiodsp"
|
||||
mpc7_decoder_select="mpegaudiodsp"
|
||||
mpc8_decoder_select="mpegaudiodsp"
|
||||
mpeg1video_encoder_select="aandct"
|
||||
mpeg2video_encoder_select="aandct"
|
||||
mpeg4_decoder_select="h263_decoder mpeg4video_parser"
|
||||
mpeg4_encoder_select="h263_encoder"
|
||||
mpeg_vdpau_decoder_select="vdpau mpegvideo_decoder"
|
||||
mpeg_xvmc_decoder_deps="X11_extensions_XvMClib_h"
|
||||
mpeg_xvmc_decoder_select="mpegvideo_decoder"
|
||||
mpeg1_vdpau_decoder_select="vdpau mpeg1video_decoder"
|
||||
mpeg1_vdpau_hwaccel_select="vdpau mpeg1video_decoder"
|
||||
mpeg1video_encoder_select="aandct"
|
||||
mpeg2_crystalhd_decoder_select="crystalhd"
|
||||
mpeg2_dxva2_hwaccel_deps="dxva2api_h"
|
||||
mpeg2_dxva2_hwaccel_select="dxva2 mpeg2video_decoder"
|
||||
mpeg2_vdpau_hwaccel_select="vdpau mpeg2video_decoder"
|
||||
mpeg2_vaapi_hwaccel_select="vaapi mpeg2video_decoder"
|
||||
mpeg2video_encoder_select="aandct"
|
||||
mpeg4_crystalhd_decoder_select="crystalhd"
|
||||
mpeg4_decoder_select="h263_decoder mpeg4video_parser"
|
||||
mpeg4_encoder_select="h263_encoder"
|
||||
mpeg4_vaapi_hwaccel_select="vaapi mpeg4_decoder"
|
||||
mpeg4_vdpau_decoder_select="vdpau mpeg4_decoder"
|
||||
mpeg_xvmc_decoder_deps="X11_extensions_XvMClib_h"
|
||||
mpeg_xvmc_decoder_select="mpegvideo_decoder"
|
||||
msmpeg4_crystalhd_decoder_select="crystalhd"
|
||||
msmpeg4v1_decoder_select="h263_decoder"
|
||||
msmpeg4v1_encoder_select="h263_encoder"
|
||||
@@ -1476,8 +1454,8 @@ rv10_decoder_select="h263_decoder"
|
||||
rv10_encoder_select="h263_encoder"
|
||||
rv20_decoder_select="h263_decoder"
|
||||
rv20_encoder_select="h263_encoder"
|
||||
rv30_decoder_select="golomb h264chroma h264pred"
|
||||
rv40_decoder_select="golomb h264chroma h264pred"
|
||||
rv30_decoder_select="golomb h264pred"
|
||||
rv40_decoder_select="golomb h264pred"
|
||||
shorten_decoder_select="golomb"
|
||||
sipr_decoder_select="lsp"
|
||||
snow_decoder_select="dwt"
|
||||
@@ -1486,7 +1464,7 @@ sonic_decoder_select="golomb"
|
||||
sonic_encoder_select="golomb"
|
||||
sonic_ls_encoder_select="golomb"
|
||||
svq1_encoder_select="aandct"
|
||||
svq3_decoder_select="golomb h264chroma h264dsp h264pred"
|
||||
svq3_decoder_select="golomb h264dsp h264pred"
|
||||
svq3_decoder_suggest="zlib"
|
||||
theora_decoder_select="vp3_decoder"
|
||||
tiff_decoder_suggest="zlib"
|
||||
@@ -1494,8 +1472,8 @@ tiff_encoder_suggest="zlib"
|
||||
truehd_decoder_select="mlp_decoder"
|
||||
tscc_decoder_select="zlib"
|
||||
twinvq_decoder_select="mdct lsp sinewin"
|
||||
vc1_decoder_select="h263_decoder"
|
||||
vc1_crystalhd_decoder_select="crystalhd"
|
||||
vc1_decoder_select="h263_decoder h264chroma"
|
||||
vc1_dxva2_hwaccel_deps="dxva2api_h"
|
||||
vc1_dxva2_hwaccel_select="dxva2 vc1_decoder"
|
||||
vc1_vaapi_hwaccel_select="vaapi vc1_decoder"
|
||||
@@ -1534,7 +1512,7 @@ vda_deps="VideoDecodeAcceleration_VDADecoder_h pthreads"
|
||||
vdpau_deps="vdpau_vdpau_h vdpau_vdpau_x11_h"
|
||||
|
||||
# parsers
|
||||
h264_parser_select="golomb h264chroma h264dsp h264pred"
|
||||
h264_parser_select="golomb h264dsp h264pred"
|
||||
|
||||
# external libraries
|
||||
libaacplus_encoder_deps="libaacplus"
|
||||
@@ -1660,9 +1638,7 @@ mp_filter_deps="gpl avcodec"
|
||||
mptestsrc_filter_deps="gpl"
|
||||
negate_filter_deps="lut_filter"
|
||||
ocv_filter_deps="libopencv"
|
||||
pan_filter_deps="swresample"
|
||||
scale_filter_deps="swscale"
|
||||
tinterlace_filter_deps="gpl"
|
||||
yadif_filter_deps="gpl"
|
||||
|
||||
# libraries
|
||||
@@ -1671,13 +1647,15 @@ avformat_deps="avcodec"
|
||||
postproc_deps="gpl"
|
||||
|
||||
# programs
|
||||
ffmpeg_deps="avcodec avformat swscale swresample"
|
||||
ffmpeg_select="buffer_filter buffersink_filter"
|
||||
avconv_deps="avcodec avformat swscale"
|
||||
avconv_select="buffer_filter"
|
||||
ffplay_deps="avcodec avformat swscale sdl"
|
||||
ffplay_select="buffersink_filter rdft"
|
||||
ffprobe_deps="avcodec avformat"
|
||||
ffserver_deps="avformat ffm_muxer fork rtp_protocol rtsp_demuxer"
|
||||
ffserver_extralibs='$ldl'
|
||||
ffmpeg_deps="avcodec avformat swscale swresample"
|
||||
ffmpeg_select="buffersink_filter"
|
||||
|
||||
doc_deps="texi2html"
|
||||
|
||||
@@ -1700,6 +1678,7 @@ mxf_d10_test_deps="avfilter"
|
||||
seek_lavf_mxf_d10_test_deps="mxf_d10_test"
|
||||
|
||||
test_deps _encoder _decoder \
|
||||
adpcm_g726=g726 \
|
||||
adpcm_ima_qt \
|
||||
adpcm_ima_wav \
|
||||
adpcm_ms \
|
||||
@@ -1715,7 +1694,6 @@ test_deps _encoder _decoder \
|
||||
flac \
|
||||
flashsv \
|
||||
flv \
|
||||
adpcm_g726=g726 \
|
||||
gif \
|
||||
h261 \
|
||||
h263="h263 h263p" \
|
||||
@@ -1789,7 +1767,6 @@ incdir_default='${prefix}/include'
|
||||
libdir_default='${prefix}/lib'
|
||||
mandir_default='${prefix}/share/man'
|
||||
shlibdir_default="$libdir_default"
|
||||
postproc_version_default="current"
|
||||
|
||||
# toolchain
|
||||
ar_default="ar"
|
||||
@@ -1804,10 +1781,10 @@ objformat="elf"
|
||||
pkg_config_default=pkg-config
|
||||
ranlib="ranlib"
|
||||
strip_default="strip"
|
||||
yasmexe_default="yasm"
|
||||
yasmexe="yasm"
|
||||
nogas=":"
|
||||
|
||||
nm_opts='-g'
|
||||
nogas=":"
|
||||
|
||||
# machine
|
||||
arch_default=$(uname -m)
|
||||
@@ -1817,33 +1794,29 @@ cpu="generic"
|
||||
target_os_default=$(tolower $(uname -s))
|
||||
host_os=$target_os_default
|
||||
|
||||
# alternative libpostproc version
|
||||
ALT_PP_VER_MAJOR=51
|
||||
ALT_PP_VER_MINOR=2
|
||||
ALT_PP_VER_MICRO=101
|
||||
ALT_PP_VER=$ALT_PP_VER_MAJOR.$ALT_PP_VER_MINOR.$ALT_PP_VER_MICRO
|
||||
|
||||
# configurable options
|
||||
enable $PROGRAM_LIST
|
||||
|
||||
enable avcodec
|
||||
enable avdevice
|
||||
enable avfilter
|
||||
enable avformat
|
||||
enable avutil
|
||||
enable postproc
|
||||
enable stripping
|
||||
enable swresample
|
||||
enable swscale
|
||||
|
||||
enable asm
|
||||
enable debug
|
||||
enable doc
|
||||
enable fastdiv
|
||||
enable ffmpeg
|
||||
enable ffplay
|
||||
enable ffprobe
|
||||
enable ffserver
|
||||
enable network
|
||||
enable optimizations
|
||||
enable postproc
|
||||
enable protocols
|
||||
enable safe_bitstream_reader
|
||||
enable static
|
||||
enable stripping
|
||||
enable swresample
|
||||
enable swscale
|
||||
enable swscale_alpha
|
||||
|
||||
# build settings
|
||||
@@ -1913,20 +1886,6 @@ INDEV_LIST=$(find_things indev _IN libavdevice/alldevices.c)
|
||||
PROTOCOL_LIST=$(find_things protocol PROTOCOL libavformat/allformats.c)
|
||||
FILTER_LIST=$(find_things filter FILTER libavfilter/allfilters.c)
|
||||
|
||||
ALL_COMPONENTS="
|
||||
$BSF_LIST
|
||||
$DECODER_LIST
|
||||
$DEMUXER_LIST
|
||||
$ENCODER_LIST
|
||||
$FILTER_LIST
|
||||
$HWACCEL_LIST
|
||||
$INDEV_LIST
|
||||
$MUXER_LIST
|
||||
$OUTDEV_LIST
|
||||
$PARSER_LIST
|
||||
$PROTOCOL_LIST
|
||||
"
|
||||
|
||||
find_tests(){
|
||||
map "echo ${2}\${v}_test" $(ls "$source_path"/tests/ref/$1 | grep -v '[^-a-z0-9_]')
|
||||
}
|
||||
@@ -1937,8 +1896,6 @@ LAVF_TESTS=$(find_tests lavf)
|
||||
LAVFI_TESTS=$(find_tests lavfi)
|
||||
SEEK_TESTS=$(find_tests seek seek_)
|
||||
|
||||
ALL_TESTS="$ACODEC_TESTS $VCODEC_TESTS $LAVF_TESTS $LAVFI_TESTS $SEEK_TESTS"
|
||||
|
||||
pcm_test_deps=$(map 'echo ${v%_*}_decoder $v' $(filter pcm_* $ENCODER_LIST))
|
||||
|
||||
for n in $COMPONENT_LIST; do
|
||||
@@ -1947,7 +1904,7 @@ for n in $COMPONENT_LIST; do
|
||||
eval ${n}_if_any="\$$v"
|
||||
done
|
||||
|
||||
enable $ARCH_EXT_LIST $ALL_TESTS
|
||||
enable $ARCH_EXT_LIST $ACODEC_TESTS $VCODEC_TESTS $LAVF_TESTS $LAVFI_TESTS $SEEK_TESTS
|
||||
|
||||
die_unknown(){
|
||||
echo "Unknown option \"$1\"."
|
||||
@@ -2029,17 +1986,7 @@ if enabled cross_compile; then
|
||||
die "Must specify target arch and OS when cross-compiling"
|
||||
fi
|
||||
|
||||
set_default arch target_os postproc_version
|
||||
|
||||
# Check if we should build alternative libpostproc version instead of current
|
||||
if test "$postproc_version" = $ALT_PP_VER; then
|
||||
LIBPOSTPROC_VERSION=$ALT_PP_VER
|
||||
LIBPOSTPROC_VERSION_MAJOR=$ALT_PP_VER_MAJOR
|
||||
LIBPOSTPROC_VERSION_MINOR=$ALT_PP_VER_MINOR
|
||||
LIBPOSTPROC_VERSION_MICRO=$ALT_PP_VER_MICRO
|
||||
elif test "$postproc_version" != current; then
|
||||
die "Invalid argument to --postproc-version. See --help output."
|
||||
fi
|
||||
set_default arch target_os
|
||||
|
||||
ar_default="${cross_prefix}${ar_default}"
|
||||
cc_default="${cross_prefix}${cc_default}"
|
||||
@@ -2051,7 +1998,7 @@ strip_default="${cross_prefix}${strip_default}"
|
||||
|
||||
sysinclude_default="${sysroot}/usr/include"
|
||||
|
||||
set_default cc cxx nm pkg_config strip sysinclude yasmexe
|
||||
set_default cc cxx nm pkg_config strip sysinclude
|
||||
enabled cross_compile || host_cc_default=$cc
|
||||
set_default host_cc
|
||||
|
||||
@@ -2092,15 +2039,15 @@ tmpfile(){
|
||||
|
||||
trap 'rm -f -- $TMPFILES' EXIT
|
||||
|
||||
tmpfile TMPASM .asm
|
||||
tmpfile TMPC .c
|
||||
tmpfile TMPC .c
|
||||
tmpfile TMPCPP .cpp
|
||||
tmpfile TMPE $EXESUF
|
||||
tmpfile TMPH .h
|
||||
tmpfile TMPO .o
|
||||
tmpfile TMPS .S
|
||||
tmpfile TMPSH .sh
|
||||
tmpfile TMPV .ver
|
||||
tmpfile TMPE $EXESUF
|
||||
tmpfile TMPH .h
|
||||
tmpfile TMPO .o
|
||||
tmpfile TMPS .S
|
||||
tmpfile TMPV .ver
|
||||
tmpfile TMPSH .sh
|
||||
tmpfile TMPASM .asm
|
||||
|
||||
unset -f mktemp
|
||||
|
||||
@@ -2119,9 +2066,9 @@ EOF
|
||||
die "Sanity test failed."
|
||||
fi
|
||||
|
||||
filter_asflags=echo
|
||||
filter_cflags=echo
|
||||
filter_cppflags=echo
|
||||
filter_asflags=echo
|
||||
|
||||
if $cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||
cc_type=llvm_gcc
|
||||
@@ -2944,6 +2891,8 @@ EOF
|
||||
enabled ssse3 && check_asm ssse3 '"pabsw %xmm0, %xmm0"'
|
||||
enabled mmx2 && check_asm mmx2 '"pmaxub %mm0, %mm1"'
|
||||
|
||||
check_asm bswap '"bswap %%eax" ::: "%eax"'
|
||||
|
||||
if ! disabled_any asm mmx yasm; then
|
||||
if check_cmd $yasmexe --version; then
|
||||
enabled x86_64 && yasm_extra="-m amd64"
|
||||
@@ -3033,26 +2982,22 @@ check_func ${malloc_prefix}posix_memalign && enable posix_memalign
|
||||
check_func setrlimit
|
||||
check_func strerror_r
|
||||
check_func strptime
|
||||
check_func sched_getaffinity
|
||||
check_func sysconf
|
||||
check_func sysctl
|
||||
check_func_headers conio.h kbhit
|
||||
check_func_headers windows.h PeekNamedPipe
|
||||
check_func_headers io.h setmode
|
||||
check_func_headers lzo/lzo1x.h lzo1x_999_compress
|
||||
check_lib2 "windows.h psapi.h" GetProcessMemoryInfo -lpsapi
|
||||
check_func_headers windows.h GetProcessAffinityMask
|
||||
check_func_headers windows.h GetProcessTimes
|
||||
check_func_headers windows.h MapViewOfFile
|
||||
check_func_headers windows.h VirtualAlloc
|
||||
|
||||
check_header dlfcn.h
|
||||
check_header dxva.h
|
||||
check_header dxva2api.h -D_WIN32_WINNT=0x0600
|
||||
check_header libcrystalhd/libcrystalhd_if.h
|
||||
check_header malloc.h
|
||||
check_header poll.h
|
||||
check_header sys/mman.h
|
||||
check_header sys/param.h
|
||||
check_header sys/resource.h
|
||||
check_header sys/select.h
|
||||
check_header termios.h
|
||||
@@ -3127,9 +3072,7 @@ enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not
|
||||
enabled gnutls && require_pkg_config gnutls gnutls/gnutls.h gnutls_global_init
|
||||
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
||||
enabled libass && require_pkg_config libass ass/ass.h ass_library_init
|
||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
|
||||
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
|
||||
die "ERROR: libcelt version must be >= 0.11.0."; }
|
||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0
|
||||
enabled libdc1394 && require_pkg_config libdc1394-2 dc1394/dc1394.h dc1394_new
|
||||
enabled libdirac && require_pkg_config dirac \
|
||||
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \
|
||||
@@ -3196,13 +3139,10 @@ fi
|
||||
enabled sdl && add_cflags $sdl_cflags && add_extralibs $sdl_libs
|
||||
|
||||
texi2html -version > /dev/null 2>&1 && enable texi2html || disable texi2html
|
||||
makeinfo --version > /dev/null 2>&1 && enable makeinfo || disable makeinfo
|
||||
|
||||
check_header linux/fb.h
|
||||
check_header linux/videodev.h
|
||||
check_header linux/videodev2.h
|
||||
check_struct linux/videodev2.h "struct v4l2_frmivalenum" discrete
|
||||
|
||||
check_header sys/videoio.h
|
||||
|
||||
check_func_headers "windows.h vfw.h" capCreateCaptureWindow "$vfwcap_indev_extralibs"
|
||||
@@ -3265,7 +3205,6 @@ fi
|
||||
|
||||
enabled debug && add_cflags -g"$debuglevel" && add_asflags -g"$debuglevel"
|
||||
enabled coverage && add_cflags "-fprofile-arcs -ftest-coverage" && add_ldflags "-fprofile-arcs -ftest-coverage"
|
||||
test -n "$valgrind" && target_exec="$valgrind --error-exitcode=1 --malloc-fill=0x2a --track-origins=yes --leak-check=full --gen-suppressions=all --suppressions=$source_path/tests/fate-valgrind.supp"
|
||||
|
||||
# add some useful compiler flags if supported
|
||||
check_cflags -Wdeclaration-after-statement
|
||||
@@ -3354,11 +3293,13 @@ elif enabled gcc; then
|
||||
check_cflags -fno-tree-vectorize
|
||||
check_cflags -Werror=implicit-function-declaration
|
||||
check_cflags -Werror=missing-prototypes
|
||||
check_cflags -Werror=return-type
|
||||
elif enabled llvm_gcc; then
|
||||
check_cflags -mllvm -stack-alignment=16
|
||||
elif enabled clang; then
|
||||
check_cflags -mllvm -stack-alignment=16
|
||||
check_cflags -Qunused-arguments
|
||||
check_cflags -Werror=return-type
|
||||
elif enabled armcc; then
|
||||
# 2523: use of inline assembler is deprecated
|
||||
add_cflags -W${armcc_opt},--diag_suppress=2523
|
||||
@@ -3378,8 +3319,22 @@ enabled_any $THREADS_LIST && enable threads
|
||||
check_deps $CONFIG_LIST \
|
||||
$CONFIG_EXTRA \
|
||||
$HAVE_LIST \
|
||||
$ALL_COMPONENTS \
|
||||
$ALL_TESTS \
|
||||
$DECODER_LIST \
|
||||
$ENCODER_LIST \
|
||||
$HWACCEL_LIST \
|
||||
$PARSER_LIST \
|
||||
$BSF_LIST \
|
||||
$DEMUXER_LIST \
|
||||
$MUXER_LIST \
|
||||
$FILTER_LIST \
|
||||
$INDEV_LIST \
|
||||
$OUTDEV_LIST \
|
||||
$PROTOCOL_LIST \
|
||||
$ACODEC_TESTS \
|
||||
$VCODEC_TESTS \
|
||||
$LAVF_TESTS \
|
||||
$LAVFI_TESTS \
|
||||
$SEEK_TESTS \
|
||||
|
||||
enabled asm || { arch=c; disable $ARCH_LIST $ARCH_EXT_LIST; }
|
||||
|
||||
@@ -3608,24 +3563,21 @@ EOF
|
||||
get_version(){
|
||||
name=$1
|
||||
file=$source_path/$2
|
||||
# This condition will be removed when we stop supporting old libpostproc versions
|
||||
if ! test "$name" = LIBPOSTPROC || test "$postproc_version" = current; then
|
||||
eval $(grep "#define ${name}_VERSION_M" "$file" | awk '{ print $2"="$3 }')
|
||||
eval ${name}_VERSION=\$${name}_VERSION_MAJOR.\$${name}_VERSION_MINOR.\$${name}_VERSION_MICRO
|
||||
fi
|
||||
lcname=$(tolower $name)
|
||||
eval echo "${lcname}_VERSION=\$${name}_VERSION" >> config.mak
|
||||
eval echo "${lcname}_VERSION_MAJOR=\$${name}_VERSION_MAJOR" >> config.mak
|
||||
}
|
||||
|
||||
get_version LIBSWSCALE libswscale/swscale.h
|
||||
get_version LIBSWRESAMPLE libswresample/swresample.h
|
||||
get_version LIBPOSTPROC libpostproc/postprocess.h
|
||||
get_version LIBAVCODEC libavcodec/version.h
|
||||
get_version LIBAVDEVICE libavdevice/avdevice.h
|
||||
get_version LIBAVFILTER libavfilter/version.h
|
||||
get_version LIBAVFORMAT libavformat/version.h
|
||||
get_version LIBAVUTIL libavutil/avutil.h
|
||||
get_version LIBPOSTPROC libpostproc/postprocess.h
|
||||
get_version LIBSWRESAMPLE libswresample/swresample.h
|
||||
get_version LIBSWSCALE libswscale/swscale.h
|
||||
get_version LIBAVFILTER libavfilter/avfilter.h
|
||||
|
||||
cat > $TMPH <<EOF
|
||||
/* Automatically generated by configure - do not modify! */
|
||||
@@ -3664,7 +3616,17 @@ print_config ARCH_ "$config_files" $ARCH_LIST
|
||||
print_config HAVE_ "$config_files" $HAVE_LIST
|
||||
print_config CONFIG_ "$config_files" $CONFIG_LIST \
|
||||
$CONFIG_EXTRA \
|
||||
$ALL_COMPONENTS \
|
||||
$DECODER_LIST \
|
||||
$ENCODER_LIST \
|
||||
$HWACCEL_LIST \
|
||||
$PARSER_LIST \
|
||||
$BSF_LIST \
|
||||
$DEMUXER_LIST \
|
||||
$MUXER_LIST \
|
||||
$FILTER_LIST \
|
||||
$PROTOCOL_LIST \
|
||||
$INDEV_LIST \
|
||||
$OUTDEV_LIST \
|
||||
|
||||
cat >>config.mak <<EOF
|
||||
ACODEC_TESTS=$(print_enabled -n _test $ACODEC_TESTS)
|
||||
@@ -3689,12 +3651,6 @@ cat > $TMPH <<EOF
|
||||
#define AVUTIL_AVCONFIG_H
|
||||
EOF
|
||||
|
||||
test "$postproc_version" != current && cat >> $TMPH <<EOF
|
||||
#define LIBPOSTPROC_VERSION_MAJOR $LIBPOSTPROC_VERSION_MAJOR
|
||||
#define LIBPOSTPROC_VERSION_MINOR $LIBPOSTPROC_VERSION_MINOR
|
||||
#define LIBPOSTPROC_VERSION_MICRO $LIBPOSTPROC_VERSION_MICRO
|
||||
EOF
|
||||
|
||||
print_config AV_HAVE_ $TMPH $HAVE_LIST_PUB
|
||||
|
||||
echo "#endif /* AVUTIL_AVCONFIG_H */" >> $TMPH
|
||||
|
220
doc/APIchanges
220
doc/APIchanges
@@ -13,26 +13,6 @@ libavutil: 2011-04-18
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
2012-01-24 - xxxxxxx - lavfi 2.60.100
|
||||
Add avfilter_graph_dump.
|
||||
|
||||
2012-01-25 - lavf 53.22.0
|
||||
f1caf01 Allow doing av_write_frame(ctx, NULL) for flushing possible
|
||||
buffered data within a muxer. Added AVFMT_ALLOW_FLUSH for
|
||||
muxers supporting it (av_write_frame makes sure it is called
|
||||
only for muxers with this flag).
|
||||
|
||||
2012-01-15 - lavc 53.34.0
|
||||
New audio encoding API:
|
||||
b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
|
||||
encoders.
|
||||
5ee5fa0 Add avcodec_fill_audio_frame() as a convenience function.
|
||||
b2c75b6 Add avcodec_encode_audio2() and deprecate avcodec_encode_audio().
|
||||
Add AVCodec.encode2().
|
||||
|
||||
2012-01-12 - 3167dc9 - lavfi 2.15.0
|
||||
Add a new installed header -- libavfilter/version.h -- with version macros.
|
||||
|
||||
2011-12-08 - a502939 - lavfi 2.52.0
|
||||
Add av_buffersink_poll_frame() to buffersink.h.
|
||||
|
||||
@@ -51,37 +31,31 @@ API changes, most recent first:
|
||||
2011-10-20 - b35e9e1 - lavu 51.22.0
|
||||
Add av_strtok() to avstring.h.
|
||||
|
||||
2011-01-03 - b73ec05 - lavu 51.21.0
|
||||
Add av_popcount64
|
||||
|
||||
2011-12-18 - 8400b12 - lavc 53.28.1
|
||||
Deprecate AVFrame.age. The field is unused.
|
||||
|
||||
2011-12-12 - 5266045 - lavf 53.17.0
|
||||
Add avformat_close_input().
|
||||
2011-xx-xx - xxxxxxx - lavf 53.17.0
|
||||
Add avformat_open_input().
|
||||
Deprecate av_close_input_file() and av_close_input_stream().
|
||||
|
||||
2011-12-02 - 0eea212 - lavc 53.25.0
|
||||
2011-xx-xx - xxxxxxx - lavc 53.25.0
|
||||
Add nb_samples and extended_data fields to AVFrame.
|
||||
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
|
||||
Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
|
||||
avcodec_decode_audio4() writes output samples to an AVFrame, which allows
|
||||
audio decoders to use get_buffer().
|
||||
|
||||
2011-12-04 - 560f773 - lavc 53.24.0
|
||||
2011-xx-xx - xxxxxxx - lavc 53.24.0
|
||||
Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
|
||||
Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
|
||||
Change AVCodecContext.error[4] to [8] at next major bump.
|
||||
Add AV_NUM_DATA_POINTERS to simplify the bump transition.
|
||||
|
||||
2011-11-23 - bbb46f3 - lavu 51.18.0
|
||||
2011-11-23 - 8e576d5 / bbb46f3 - lavu 51.27.0 / 51.18.0
|
||||
Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and
|
||||
av_samples_alloc(), to samplefmt.h.
|
||||
|
||||
2011-11-23 - 8889cc4 - lavu 51.17.0
|
||||
2011-11-23 - 8e576d5 / 8889cc4 - lavu 51.27.0 / 51.17.0
|
||||
Add planar sample formats and av_sample_fmt_is_planar() to samplefmt.h.
|
||||
|
||||
2011-11-19 - f3a29b7 - lavc 53.21.0
|
||||
2011-11-19 - dbb38bc / f3a29b7 - lavc 53.36.0 / 53.21.0
|
||||
Move some AVCodecContext fields to a new private struct, AVCodecInternal,
|
||||
which is accessed from a new field, AVCodecContext.internal.
|
||||
- fields moved:
|
||||
@@ -89,55 +63,55 @@ API changes, most recent first:
|
||||
AVCodecContext.internal_buffer_count --> AVCodecInternal.buffer_count
|
||||
AVCodecContext.is_copy --> AVCodecInternal.is_copy
|
||||
|
||||
2011-11-16 - 6270671 - lavu 51.16.0
|
||||
2011-11-16 - 8709ba9 / 6270671 - lavu 51.26.0 / 51.16.0
|
||||
Add av_timegm()
|
||||
|
||||
2011-11-13 - lavf 53.15.0
|
||||
2011-11-13 - lavf 53.21.0 / 53.15.0
|
||||
New interrupt callback API, allowing per-AVFormatContext/AVIOContext
|
||||
interrupt callbacks.
|
||||
6aa0b98 Add AVIOInterruptCB struct and the interrupt_callback field to
|
||||
5f268ca / 6aa0b98 Add AVIOInterruptCB struct and the interrupt_callback field to
|
||||
AVFormatContext.
|
||||
1dee0ac Add avio_open2() with additional parameters. Those are
|
||||
5f268ca / 1dee0ac Add avio_open2() with additional parameters. Those are
|
||||
an interrupt callback and an options AVDictionary.
|
||||
This will allow passing AVOptions to protocols after lavf
|
||||
54.0.
|
||||
|
||||
2011-11-06 - ba04ecf - lavu 51.14.0
|
||||
2011-11-06 - 13b7781 / ba04ecf - lavu 51.24.0 / 51.14.0
|
||||
Add av_strcasecmp() and av_strncasecmp() to avstring.h.
|
||||
|
||||
2011-11-06 - 07b172f - lavu 51.13.0
|
||||
2011-11-06 - 13b7781 / 07b172f - lavu 51.24.0 / 51.13.0
|
||||
Add av_toupper()/av_tolower()
|
||||
|
||||
2011-11-05 - b6d08f4 - lavf 53.13.0
|
||||
2011-11-05 - d8cab5c / b6d08f4 - lavf 53.19.0 / 53.13.0
|
||||
Add avformat_network_init()/avformat_network_uninit()
|
||||
|
||||
2011-10-27 - 512557b - lavc 53.15.0
|
||||
2011-10-27 - 6faf0a2 / 512557b - lavc 53.24.0 / 53.15.0
|
||||
Remove avcodec_parse_frame.
|
||||
Deprecate AVCodecContext.parse_only and CODEC_CAP_PARSE_ONLY.
|
||||
|
||||
2011-10-19 - 569129a - lavf 53.10.0
|
||||
2011-10-19 - d049257 / 569129a - lavf 53.17.0 / 53.10.0
|
||||
Add avformat_new_stream(). Deprecate av_new_stream().
|
||||
|
||||
2011-10-13 - b631fba - lavf 53.9.0
|
||||
2011-10-13 - 91eb1b1 / b631fba - lavf 53.16.0 / 53.9.0
|
||||
Add AVFMT_NO_BYTE_SEEK AVInputFormat flag.
|
||||
|
||||
2011-10-12 - lavu 51.12.0
|
||||
2011-10-12 - lavu 51.21.0 / 51.12.0
|
||||
AVOptions API rewrite.
|
||||
|
||||
- 145f741 FF_OPT_TYPE* renamed to AV_OPT_TYPE_*
|
||||
- f884ef0 / 145f741 FF_OPT_TYPE* renamed to AV_OPT_TYPE_*
|
||||
- new setting/getting functions with slightly different semantics:
|
||||
dac66da av_set_string3 -> av_opt_set
|
||||
f884ef0 / dac66da av_set_string3 -> av_opt_set
|
||||
av_set_double -> av_opt_set_double
|
||||
av_set_q -> av_opt_set_q
|
||||
av_set_int -> av_opt_set_int
|
||||
|
||||
41d9d51 av_get_string -> av_opt_get
|
||||
f884ef0 / 41d9d51 av_get_string -> av_opt_get
|
||||
av_get_double -> av_opt_get_double
|
||||
av_get_q -> av_opt_get_q
|
||||
av_get_int -> av_opt_get_int
|
||||
|
||||
- 8c5dcaa trivial rename av_next_option -> av_opt_next
|
||||
- 641c7af new functions - av_opt_child_next, av_opt_child_class_next
|
||||
- f884ef0 / 8c5dcaa trivial rename av_next_option -> av_opt_next
|
||||
- f884ef0 / 641c7af new functions - av_opt_child_next, av_opt_child_class_next
|
||||
and av_opt_find2()
|
||||
|
||||
2011-09-22 - a70e787 - lavu 51.17.0
|
||||
@@ -183,31 +157,27 @@ API changes, most recent first:
|
||||
2011-08-20 - 69e2c1a - lavu 51.13.0
|
||||
Add av_get_media_type_string().
|
||||
|
||||
2011-09-03 - fb4ca26 - lavc 53.13.0
|
||||
2011-09-03 - 1889c67 / fb4ca26 - lavc 53.13.0
|
||||
lavf 53.11.0
|
||||
lsws 2.1.0
|
||||
Add {avcodec,avformat,sws}_get_class().
|
||||
|
||||
2011-08-03 - c11fb82 - lavu 51.15.0
|
||||
2011-08-03 - 1889c67 / c11fb82 - lavu 51.15.0
|
||||
Add AV_OPT_SEARCH_FAKE_OBJ flag for av_opt_find() function.
|
||||
|
||||
2011-08-14 - 323b930 - lavu 51.12.0
|
||||
Add av_fifo_peek2(), deprecate av_fifo_peek().
|
||||
|
||||
2011-08-26 - lavu 51.9.0
|
||||
- add41de..abc78a5 Do not include intfloat_readwrite.h,
|
||||
mathematics.h, rational.h, pixfmt.h, or log.h from avutil.h.
|
||||
|
||||
2011-08-16 - 48f9e45 - lavf 53.8.0
|
||||
2011-08-16 - 27fbe31 / 48f9e45 - lavf 53.11.0 / 53.8.0
|
||||
Add avformat_query_codec().
|
||||
|
||||
2011-08-16 - bca06e7 - lavc 53.11.0
|
||||
2011-08-16 - 27fbe31 / bca06e7 - lavc 53.11.0
|
||||
Add avcodec_get_type().
|
||||
|
||||
2011-08-06 - 2f63440 - lavf 53.7.0
|
||||
2011-08-06 - 0cb233c / 2f63440 - lavf 53.7.0
|
||||
Add error_recognition to AVFormatContext.
|
||||
|
||||
2011-08-02 - 9d39cbf - lavc 53.9.1
|
||||
2011-08-02 - 1d186e9 / 9d39cbf - lavc 53.9.1
|
||||
Add AV_PKT_FLAG_CORRUPT AVPacket flag.
|
||||
|
||||
2011-07-16 - b57df29 - lavfi 2.27.0
|
||||
@@ -218,10 +188,10 @@ API changes, most recent first:
|
||||
avfilter_set_common_packing_formats()
|
||||
avfilter_all_packing_formats()
|
||||
|
||||
2011-07-10 - a67c061 - lavf 53.6.0
|
||||
2011-07-10 - 3602ad7 / a67c061 - lavf 53.6.0
|
||||
Add avformat_find_stream_info(), deprecate av_find_stream_info().
|
||||
|
||||
2011-07-10 - 0b950fe - lavc 53.8.0
|
||||
2011-07-10 - 3602ad7 / 0b950fe - lavc 53.8.0
|
||||
Add avcodec_open2(), deprecate avcodec_open().
|
||||
|
||||
2011-07-01 - b442ca6 - lavf 53.5.0 - avformat.h
|
||||
@@ -260,35 +230,35 @@ API changes, most recent first:
|
||||
2011-06-12 - 6119b23 - lavfi 2.16.0 - avfilter_graph_parse()
|
||||
Change avfilter_graph_parse() signature.
|
||||
|
||||
2011-06-23 - 67e9ae1 - lavu 51.8.0 - attributes.h
|
||||
2011-06-23 - 686959e / 67e9ae1 - lavu 51.10.0 / 51.8.0 - attributes.h
|
||||
Add av_printf_format().
|
||||
|
||||
2011-06-16 - 05e84c9, 25de595 - lavf 53.2.0 - avformat.h
|
||||
2011-06-16 - 2905e3f / 05e84c9, 2905e3f / 25de595 - lavf 53.4.0 / 53.2.0 - avformat.h
|
||||
Add avformat_open_input and avformat_write_header().
|
||||
Deprecate av_open_input_stream, av_open_input_file,
|
||||
AVFormatParameters and av_write_header.
|
||||
|
||||
2011-06-16 - 7e83e1c, dc59ec5 - lavu 51.7.0 - opt.h
|
||||
2011-06-16 - 2905e3f / 7e83e1c, 2905e3f / dc59ec5 - lavu 51.9.0 / 51.7.0 - opt.h
|
||||
Add av_opt_set_dict() and av_opt_find().
|
||||
Deprecate av_find_opt().
|
||||
Add AV_DICT_APPEND flag.
|
||||
|
||||
2011-06-10 - cb7c11c - lavu 51.6.0 - opt.h
|
||||
2011-06-10 - 45fb647 / cb7c11c - lavu 51.6.0 - opt.h
|
||||
Add av_opt_flag_is_set().
|
||||
|
||||
2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays
|
||||
Add avfilter_get_audio_buffer_ref_from_arrays() to avfilter.h.
|
||||
|
||||
2011-06-09 - d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
2011-06-09 - f9ecb84 / d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
Move AVMetadata from lavf to lavu and rename it to
|
||||
AVDictionary -- new installed header dict.h.
|
||||
All av_metadata_* functions renamed to av_dict_*.
|
||||
|
||||
2011-06-07 - a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
2011-06-07 - d552f61 / a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
Add av_get_bytes_per_sample() in libavutil/samplefmt.h.
|
||||
Deprecate av_get_bits_per_sample_fmt().
|
||||
|
||||
2011-06-05 - b39b062 - lavu 51.8.0 - opt.h
|
||||
2011-06-05 - f956924 / b39b062 - lavu 51.8.0 - opt.h
|
||||
Add av_opt_free convenience function.
|
||||
|
||||
2011-06-06 - 95a0242 - lavfi 2.14.0 - AVFilterBufferRefAudioProps
|
||||
@@ -318,7 +288,7 @@ API changes, most recent first:
|
||||
Add av_get_pix_fmt_name() in libavutil/pixdesc.h, and deprecate
|
||||
avcodec_get_pix_fmt_name() in libavcodec/avcodec.h in its favor.
|
||||
|
||||
2011-05-25 - 30315a8 - lavf 53.3.0 - avformat.h
|
||||
2011-05-25 - 39e4206 / 30315a8 - lavf 53.3.0 - avformat.h
|
||||
Add fps_probe_size to AVFormatContext.
|
||||
|
||||
2011-05-22 - 5ecdfd0 - lavf 53.2.0 - avformat.h
|
||||
@@ -334,10 +304,10 @@ API changes, most recent first:
|
||||
2011-05-14 - 9fdf772 - lavfi 2.6.0 - avcodec.h
|
||||
Add avfilter_get_video_buffer_ref_from_frame() to libavfilter/avcodec.h.
|
||||
|
||||
2011-05-18 - 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
2011-05-18 - 75a37b5 / 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
Add request_sample_fmt field to AVCodecContext.
|
||||
|
||||
2011-05-10 - 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
2011-05-10 - 59eb12f / 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
Deprecate AVLPCType and the following fields in
|
||||
AVCodecContext: lpc_coeff_precision, prediction_order_method,
|
||||
min_partition_order, max_partition_order, lpc_type, lpc_passes.
|
||||
@@ -367,81 +337,81 @@ API changes, most recent first:
|
||||
Add av_dynarray_add function for adding
|
||||
an element to a dynamic array.
|
||||
|
||||
2011-04-26 - bebe72f - lavu 51.1.0 - avutil.h
|
||||
2011-04-26 - d7e5aeb / bebe72f - lavu 51.1.0 - avutil.h
|
||||
Add AVPictureType enum and av_get_picture_type_char(), deprecate
|
||||
FF_*_TYPE defines and av_get_pict_type_char() defined in
|
||||
libavcodec/avcodec.h.
|
||||
|
||||
2011-04-26 - 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
2011-04-26 - d7e5aeb / 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
Add pict_type and key_frame fields to AVFilterBufferRefVideo.
|
||||
|
||||
2011-04-26 - 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
2011-04-26 - d7e5aeb / 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
Add sample_aspect_ratio fields to vsrc_buffer arguments
|
||||
|
||||
2011-04-21 - 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
2011-04-21 - 8772156 / 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
Add CODEC_CAP_SLICE_THREADS for codecs supporting sliced threading.
|
||||
|
||||
2011-04-15 - lavc 52.120.0 - avcodec.h
|
||||
AVPacket structure got additional members for passing side information:
|
||||
4de339e introduce side information for AVPacket
|
||||
2d8591c make containers pass palette change in AVPacket
|
||||
c407984 / 4de339e introduce side information for AVPacket
|
||||
c407984 / 2d8591c make containers pass palette change in AVPacket
|
||||
|
||||
2011-04-12 - lavf 52.107.0 - avio.h
|
||||
Avio cleanup, part II - deprecate the entire URLContext API:
|
||||
175389c add avio_check as a replacement for url_exist
|
||||
ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
c55780d / 175389c add avio_check as a replacement for url_exist
|
||||
9891004 / ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
for _av_url_read_fseek/fpause
|
||||
cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
d4d0932 / cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
should be used instead.
|
||||
80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
f8270bb add avio_enum_protocols.
|
||||
5593f03 deprecate URLProtocol.
|
||||
c486dad deprecate URLContext.
|
||||
026e175 deprecate the typedef for URLInterruptCB
|
||||
8e76a19 deprecate av_register_protocol2.
|
||||
b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
1305d93 deprecate av_url_read_seek
|
||||
fa104e1 deprecate av_url_read_pause
|
||||
727c7aa deprecate url_get_filename().
|
||||
5958df3 deprecate url_max_packet_size().
|
||||
1869ea0 deprecate url_get_file_handle().
|
||||
32a97d4 deprecate url_filesize().
|
||||
e52a914 deprecate url_close().
|
||||
58a48c6 deprecate url_seek().
|
||||
925e908 deprecate url_write().
|
||||
dce3756 deprecate url_read_complete().
|
||||
bc371ac deprecate url_read().
|
||||
0589da0 deprecate url_open().
|
||||
62eaaea deprecate url_connect.
|
||||
5652bb9 deprecate url_alloc.
|
||||
333e894 deprecate url_open_protocol
|
||||
e230705 deprecate url_poll and URLPollEntry
|
||||
c88caa5 / 80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
c88caa5 / f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
d4d0932 / f8270bb add avio_enum_protocols.
|
||||
d4d0932 / 5593f03 deprecate URLProtocol.
|
||||
d4d0932 / c486dad deprecate URLContext.
|
||||
d4d0932 / 026e175 deprecate the typedef for URLInterruptCB
|
||||
c88caa5 / 8e76a19 deprecate av_register_protocol2.
|
||||
11d7841 / b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
11d7841 / 1305d93 deprecate av_url_read_seek
|
||||
11d7841 / fa104e1 deprecate av_url_read_pause
|
||||
434f248 / 727c7aa deprecate url_get_filename().
|
||||
434f248 / 5958df3 deprecate url_max_packet_size().
|
||||
434f248 / 1869ea0 deprecate url_get_file_handle().
|
||||
434f248 / 32a97d4 deprecate url_filesize().
|
||||
434f248 / e52a914 deprecate url_close().
|
||||
434f248 / 58a48c6 deprecate url_seek().
|
||||
434f248 / 925e908 deprecate url_write().
|
||||
434f248 / dce3756 deprecate url_read_complete().
|
||||
434f248 / bc371ac deprecate url_read().
|
||||
434f248 / 0589da0 deprecate url_open().
|
||||
434f248 / 62eaaea deprecate url_connect.
|
||||
434f248 / 5652bb9 deprecate url_alloc.
|
||||
434f248 / 333e894 deprecate url_open_protocol
|
||||
434f248 / e230705 deprecate url_poll and URLPollEntry
|
||||
|
||||
2011-04-08 - lavf 52.106.0 - avformat.h
|
||||
Minor avformat.h cleanup:
|
||||
a9bf9d8 deprecate av_guess_image2_codec
|
||||
c3675df rename avf_sdp_create->av_sdp_create
|
||||
d4d0932 / a9bf9d8 deprecate av_guess_image2_codec
|
||||
d4d0932 / c3675df rename avf_sdp_create->av_sdp_create
|
||||
|
||||
2011-04-03 - lavf 52.105.0 - avio.h
|
||||
Large-scale renaming/deprecating of AVIOContext-related functions:
|
||||
724f6a0 deprecate url_fdopen
|
||||
403ee83 deprecate url_open_dyn_packet_buf
|
||||
6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
2cae980 / 724f6a0 deprecate url_fdopen
|
||||
2cae980 / 403ee83 deprecate url_open_dyn_packet_buf
|
||||
2cae980 / 6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
2cae980 / b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
2cae980 / 8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
AVIOContext.is_streamed and url_is_streamed()
|
||||
b64030f deprecate get_checksum()
|
||||
4c4427a deprecate init_checksum()
|
||||
4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
933e90a deprecate av_url_read_fseek/fpause
|
||||
8d9769a deprecate url_fileno
|
||||
b7f2fdd rename put_flush_packet -> avio_flush
|
||||
35f1023 deprecate url_close_buf
|
||||
83fddae deprecate url_open_buf
|
||||
d9d86e0 rename url_fprintf -> avio_printf
|
||||
59f65d9 deprecate url_setbufsize
|
||||
3e68b3b deprecate url_ferror
|
||||
1caa412 / b64030f deprecate get_checksum()
|
||||
1caa412 / 4c4427a deprecate init_checksum()
|
||||
2fd41c9 / 4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
4fa0e24 / 933e90a deprecate av_url_read_fseek/fpause
|
||||
4fa0e24 / 8d9769a deprecate url_fileno
|
||||
0fecf26 / b7f2fdd rename put_flush_packet -> avio_flush
|
||||
0fecf26 / 35f1023 deprecate url_close_buf
|
||||
0fecf26 / 83fddae deprecate url_open_buf
|
||||
0fecf26 / d9d86e0 rename url_fprintf -> avio_printf
|
||||
0fecf26 / 59f65d9 deprecate url_setbufsize
|
||||
6947b0c / 3e68b3b deprecate url_ferror
|
||||
e8bb2e2 deprecate url_fget_max_packet_size
|
||||
76aa876 rename url_fsize -> avio_size
|
||||
e519753 deprecate url_fgetc
|
||||
@@ -462,7 +432,7 @@ API changes, most recent first:
|
||||
b3db9ce deprecate get_partial_buffer
|
||||
8d9ac96 rename av_alloc_put_byte -> avio_alloc_context
|
||||
|
||||
2011-03-25 - 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
2011-03-25 - 27ef7b1 / 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
Add audio_service_type field to AVCodecContext.
|
||||
|
||||
2011-03-17 - e309fdc - lavu 50.40.0 - pixfmt.h
|
||||
@@ -500,11 +470,11 @@ API changes, most recent first:
|
||||
2011-02-10 - 12c14cd - lavf 52.99.0 - AVStream.disposition
|
||||
Add AV_DISPOSITION_HEARING_IMPAIRED and AV_DISPOSITION_VISUAL_IMPAIRED.
|
||||
|
||||
2011-02-09 - 5592734 - lavc 52.112.0 - avcodec_thread_init()
|
||||
2011-02-09 - c0b102c - lavc 52.112.0 - avcodec_thread_init()
|
||||
Deprecate avcodec_thread_init()/avcodec_thread_free() use; instead
|
||||
set thread_count before calling avcodec_open.
|
||||
|
||||
2011-02-09 - 778b08a - lavc 52.111.0 - threading API
|
||||
2011-02-09 - 37b00b4 - lavc 52.111.0 - threading API
|
||||
Add CODEC_CAP_FRAME_THREADS with new restrictions on get_buffer()/
|
||||
release_buffer()/draw_horiz_band() callbacks for appropriate codecs.
|
||||
Add thread_type and active_thread_type fields to AVCodecContext.
|
||||
|
15
doc/Makefile
15
doc/Makefile
@@ -9,24 +9,13 @@ HTMLPAGES = $(PROGS-yes:%=doc/%.html) \
|
||||
doc/libavfilter.html \
|
||||
doc/platform.html \
|
||||
|
||||
TXTPAGES = doc/fate.txt \
|
||||
|
||||
|
||||
DOCS = $(HTMLPAGES) $(MANPAGES) $(PODPAGES)
|
||||
ifdef HAVE_MAKEINFO
|
||||
DOCS += $(TXTPAGES)
|
||||
endif
|
||||
|
||||
all-$(CONFIG_DOC): documentation
|
||||
|
||||
documentation: $(DOCS)
|
||||
|
||||
TEXIDEP = awk '/^@(verbatim)?include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
|
||||
|
||||
doc/%.txt: TAG = TXT
|
||||
doc/%.txt: doc/%.texi
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null
|
||||
TEXIDEP = awk '/^@include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
|
||||
|
||||
doc/%.html: TAG = HTML
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init
|
||||
@@ -57,7 +46,7 @@ uninstall-man:
|
||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(ALLMANPAGES))
|
||||
|
||||
clean::
|
||||
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 $(CLEANSUFFIXES:%=doc/%)
|
||||
$(RM) doc/*.html doc/*.pod doc/*.1 $(CLEANSUFFIXES:%=doc/%)
|
||||
|
||||
-include $(wildcard $(DOCS:%=%.d))
|
||||
|
||||
|
@@ -1,12 +1,11 @@
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 0.10 "Freedom" January, 2012
|
||||
* 0.9 "Harmony" December, 2011
|
||||
|
||||
|
||||
General notes
|
||||
-------------
|
||||
This release is binary compatible with 0.8 and 0.9.
|
||||
|
||||
See the Changelog file for a list of significant changes. Note, there
|
||||
are many more new features and bugfixes than whats listed there.
|
||||
@@ -16,34 +15,3 @@ accepted. If you are experiencing issues with any formally released version of
|
||||
FFmpeg, please try git master to check if the issue still exists. If it does,
|
||||
make your report against the development code following the usual bug reporting
|
||||
guidelines.
|
||||
|
||||
|
||||
API changes
|
||||
-----------
|
||||
|
||||
A number of additional APIs have been introduced and some existing
|
||||
functions have been deprecated and are scheduled for removal in the next
|
||||
release. Significant API changes include:
|
||||
|
||||
* new audio decoding API which decodes from an AVPacket to an AVFrame and
|
||||
is able to use AVCodecContext.get_buffer() in the similar way as video decoding.
|
||||
|
||||
* new audio encoding API which encodes from an AVFrame to an AVPacket, thus
|
||||
allowing it to properly output timing information and side data.
|
||||
|
||||
Please see the git history and the file doc/APIchanges for details.
|
||||
|
||||
|
||||
Other notable changes
|
||||
---------------------
|
||||
|
||||
Libavcodec and libavformat built as shared libraries now hide non-public
|
||||
symbols. This will break applications using those symbols. Possible solutions
|
||||
are, in order of preference:
|
||||
1) Try finding a way of accomplishing the same with public API.
|
||||
2) If there is no corresponding public API, but you think there should be,
|
||||
post a request on the developer mailing list or IRC channel.
|
||||
3) Finally if your program needs access to FFmpeg / libavcodec / libavformat
|
||||
internals for some special reason then the best solution is to link statically.
|
||||
|
||||
Please see the Changelog file and git history for a more detailed list of changes.
|
||||
|
1041
doc/avconv.texi
Normal file
1041
doc/avconv.texi
Normal file
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,6 @@ corresponding value to true. They can be set to false by prefixing
|
||||
with "no" the option name, for example using "-nofoo" in the
|
||||
command line will set to false the boolean option with name "foo".
|
||||
|
||||
@anchor{Stream specifiers}
|
||||
@section Stream specifiers
|
||||
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
|
||||
are used to precisely specify which stream(s) does a given option belong to.
|
||||
@@ -119,8 +118,8 @@ Set the logging level used by the library.
|
||||
By default the program logs to stderr, if coloring is supported by the
|
||||
terminal, colors are used to mark errors and warnings. Log coloring
|
||||
can be disabled setting the environment variable
|
||||
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
|
||||
the environment variable @env{AV_LOG_FORCE_COLOR}.
|
||||
@env{FFMPEG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
|
||||
the environment variable @env{FFMPEG_FORCE_COLOR}.
|
||||
The use of the environment variable @env{NO_COLOR} is deprecated and
|
||||
will be dropped in a following FFmpeg version.
|
||||
|
||||
|
@@ -23,20 +23,6 @@ Below is a description of the currently available bitstream filters.
|
||||
|
||||
@section h264_mp4toannexb
|
||||
|
||||
Convert an H.264 bitstream from length prefixed mode to start code
|
||||
prefixed mode (as defined in the Annex B of the ITU-T H.264
|
||||
specification).
|
||||
|
||||
This is required by some streaming formats, typically the MPEG-2
|
||||
transport stream format ("mpegts").
|
||||
|
||||
For example to remux an MP4 file containing an H.264 stream to mpegts
|
||||
format with @command{ffmpeg}, you can use the command:
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
|
||||
@end example
|
||||
|
||||
@section imx_dump_header
|
||||
|
||||
@section mjpeg2jpeg
|
||||
|
@@ -48,16 +48,3 @@ top-field-first is assumed
|
||||
@end table
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@chapter Audio Decoders
|
||||
@c man begin AUDIO DECODERS
|
||||
|
||||
@section ffwavesynth
|
||||
|
||||
Internal wave synthetizer.
|
||||
|
||||
This decoder generates wave patterns according to predefined sequences. Its
|
||||
use is purely internal and the format of the data it accepts is not publicly
|
||||
documented.
|
||||
|
||||
@c man end AUDIO DECODERS
|
||||
|
@@ -75,34 +75,4 @@ the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
@section sbg
|
||||
|
||||
SBaGen script demuxer.
|
||||
|
||||
This demuxer reads the script language used by SBaGen
|
||||
@url{http://uazu.net/sbagen/} to generate binaural beats sessions. A SBG
|
||||
script looks like that:
|
||||
@example
|
||||
-SE
|
||||
a: 300-2.5/3 440+4.5/0
|
||||
b: 300-2.5/0 440+4.5/3
|
||||
off: -
|
||||
NOW == a
|
||||
+0:07:00 == b
|
||||
+0:14:00 == a
|
||||
+0:21:00 == b
|
||||
+0:30:00 off
|
||||
@end example
|
||||
|
||||
A SBG script can mix absolute and relative timestamps. If the script uses
|
||||
either only absolute timestamps (including the script start time) or only
|
||||
relative ones, then its layout is fixed, and the conversion is
|
||||
straightforward. On the other hand, if the script mixes both kind of
|
||||
timestamps, then the @var{NOW} reference for relative timestamps will be
|
||||
taken from the current time of day at the time the script is read, and the
|
||||
script layout will be frozen according to that reference. That means that if
|
||||
the script is directly played, the actual times will match the absolute
|
||||
timestamps up to the sound controller's clock accuracy, but if the user
|
||||
somehow pauses the playback or seeks, all times will be shifted accordingly.
|
||||
|
||||
@c man end INPUT DEVICES
|
||||
|
@@ -345,7 +345,7 @@ for us and greatly increases your chances of getting your patch applied.
|
||||
Use the patcheck tool of FFmpeg to check your patch.
|
||||
The tool is located in the tools directory.
|
||||
|
||||
Run the @ref{Regression tests} before submitting a patch in order to verify
|
||||
Run the @ref{Regression Tests} before submitting a patch in order to verify
|
||||
it does not cause unexpected problems.
|
||||
|
||||
Patches should be posted as base64 encoded attachments (or any other
|
||||
@@ -508,13 +508,12 @@ not related to the comments received during review. Such patches will
|
||||
be rejected. Instead, submit significant changes or new features as
|
||||
separate patches.
|
||||
|
||||
@anchor{Regression tests}
|
||||
@section Regression tests
|
||||
|
||||
Before submitting a patch (or committing to the repository), you should at least
|
||||
test that you did not break anything.
|
||||
|
||||
Running 'make fate' accomplishes this, please see @url{fate.html} for details.
|
||||
Running 'make fate' accomplishes this, please see @file{doc/fate.txt} for details.
|
||||
|
||||
[Of course, some patches may change the results of the regression tests. In
|
||||
this case, the reference results of the regression tests shall be modified
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,10 +0,0 @@
|
||||
</div>
|
||||
|
||||
<div id="footer">
|
||||
Generated on $datetime for $projectname by <a href="http://www.doxygen.org/index.html">doxygen</a> $doxygenversion
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
</body>
|
||||
</html>
|
@@ -1,14 +0,0 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
|
||||
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
|
||||
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
|
||||
<link href="$relpath$doxy_stylesheet.css" rel="stylesheet" type="text/css" />
|
||||
</head>
|
||||
|
||||
<div id="container">
|
||||
|
||||
<div id="body">
|
||||
<div>
|
@@ -577,7 +577,7 @@ Allow to set any x264 option, see x264 --fullhelp for a list.
|
||||
":".
|
||||
@end table
|
||||
|
||||
For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||
For example to specify libx264 encoding options with @file{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
||||
@end example
|
||||
|
@@ -98,14 +98,6 @@ point (@var{x}, @var{y}) from the origin.
|
||||
@item gcd(x, y)
|
||||
Return the greatest common divisor of @var{x} and @var{y}. If both @var{x} and
|
||||
@var{y} are 0 or either or both are less than zero then behavior is undefined.
|
||||
|
||||
@item if(x, y)
|
||||
Evaluate @var{x}, and if the result is non-zero return the result of
|
||||
the evaluation of @var{y}, return 0 otherwise.
|
||||
|
||||
@item ifnot(x, y)
|
||||
Evaluate @var{x}, and if the result is zero return the result of the
|
||||
evaluation of @var{y}, return 0 otherwise.
|
||||
@end table
|
||||
|
||||
The following constants are available:
|
||||
@@ -118,20 +110,19 @@ exp(1) (Euler's number), approximately 2.718
|
||||
golden ratio (1+sqrt(5))/2, approximately 1.618
|
||||
@end table
|
||||
|
||||
Assuming that an expression is considered "true" if it has a non-zero
|
||||
value, note that:
|
||||
Note that:
|
||||
|
||||
@code{*} works like AND
|
||||
|
||||
@code{+} works like OR
|
||||
|
||||
and the construct:
|
||||
thus
|
||||
@example
|
||||
if A then B else C
|
||||
@end example
|
||||
is equivalent to
|
||||
@example
|
||||
if(A,B) + ifnot(A,C)
|
||||
A*B + not(A)*C
|
||||
@end example
|
||||
|
||||
In your C code, you can extend the list of unary and binary functions,
|
||||
|
50
doc/faq.texi
50
doc/faq.texi
@@ -272,44 +272,6 @@ ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
|
||||
rm temp[12].[av] all.[av]
|
||||
@end example
|
||||
|
||||
@section -profile option fails when encoding H.264 video with AAC audio
|
||||
|
||||
@command{ffmpeg} prints an error like
|
||||
|
||||
@example
|
||||
Undefined constant or missing '(' in 'baseline'
|
||||
Unable to parse option value "baseline"
|
||||
Error setting option profile to value baseline.
|
||||
@end example
|
||||
|
||||
Short answer: write @option{-profile:v} instead of @option{-profile}.
|
||||
|
||||
Long answer: this happens because the @option{-profile} option can apply to both
|
||||
video and audio. Specifically the AAC encoder also defines some profiles, none
|
||||
of which are named @var{baseline}.
|
||||
|
||||
The solution is to apply the @option{-profile} option to the video stream only
|
||||
by using @url{http://ffmpeg.org/ffmpeg.html#Stream-specifiers-1, Stream specifiers}.
|
||||
Appending @code{:v} to it will do exactly that.
|
||||
|
||||
@section Using @option{-f lavfi}, audio becomes mono for no apparent reason.
|
||||
|
||||
Use @option{-dumpgraph -} to find out exactly where the channel layout is
|
||||
lost.
|
||||
|
||||
Most likely, it is through @code{auto-inserted aconvert}. Try to understand
|
||||
why the converting filter was needed at that place.
|
||||
|
||||
Just before the output is a likely place, as @option{-f lavfi} currently
|
||||
only support packed S16.
|
||||
|
||||
Then insert the correct @code{aconvert} explicitly in the filter graph,
|
||||
specifying the exact format.
|
||||
|
||||
@example
|
||||
aconvert=s16:stereo:packed
|
||||
@end example
|
||||
|
||||
@chapter Development
|
||||
|
||||
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
|
||||
@@ -415,16 +377,4 @@ wrong if it is larger than the average!
|
||||
For example, if you have mixed 25 and 30 fps content, then r_frame_rate
|
||||
will be 150.
|
||||
|
||||
@section Why is @code{make fate} not running all tests?
|
||||
|
||||
Make sure you have the fate-suite samples and the @code{SAMPLES} Make variable
|
||||
or @code{FATE_SAMPLES} environment variable or the @code{--samples}
|
||||
@command{configure} option is set to the right path.
|
||||
|
||||
@section Why is @code{make fate} not finding the samples?
|
||||
|
||||
Do you happen to have a @code{~} character in the samples path to indicate a
|
||||
home directory? The value is used in ways where the shell cannot expand it,
|
||||
causing FATE to not find files. Just replace @code{~} by the full path.
|
||||
|
||||
@bye
|
||||
|
229
doc/fate.texi
229
doc/fate.texi
@@ -5,170 +5,131 @@
|
||||
@center @titlefont{FATE Automated Testing Environment}
|
||||
@end titlepage
|
||||
|
||||
@node Top
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
FATE is an extended regression suite on the client-side and a means
|
||||
for results aggregation and presentation on the server-side.
|
||||
FATE provides a regression testsuite embedded within the FFmpeg build system.
|
||||
It can be run locally and optionally configured to send reports to a web
|
||||
aggregator and viewer @url{http://fate.ffmpeg.org}.
|
||||
|
||||
The first part of this document explains how you can use FATE from
|
||||
your FFmpeg source directory to test your ffmpeg binary. The second
|
||||
part describes how you can run FATE to submit the results to FFmpeg's
|
||||
FATE server.
|
||||
It is advised to run FATE before submitting patches to the current codebase
|
||||
and provide new tests when submitting patches to add additional features.
|
||||
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
by visiting this website:
|
||||
@chapter Running FATE
|
||||
|
||||
@url{http://fate.ffmpeg.org/}
|
||||
@section Samples and References
|
||||
In order to run, FATE needs a large amount of data (samples and references)
|
||||
that is provided separately from the actual source distribution.
|
||||
|
||||
This is especially recommended for all people contributing source
|
||||
code to FFmpeg, as it can be seen if some test on some platform broke
|
||||
with there recent contribution. This usually happens on the platforms
|
||||
the developers could not test on.
|
||||
To inform the build system about the testsuite location, pass
|
||||
@option{--samples=<path to the samples>} to @command{configure} or set the
|
||||
@var{SAMPLES} Make variable or the @var{FATE_SAMPLES} environment variable
|
||||
to a suitable value.
|
||||
|
||||
The second part of this document describes how you can run FATE to
|
||||
submit your results to FFmpeg's FATE server. If you want to submit your
|
||||
results be sure to check that your combination of CPU, OS and compiler
|
||||
is not already listed on the above mentioned website.
|
||||
|
||||
In the third part you can find a comprehensive listing of FATE makefile
|
||||
targets and variables.
|
||||
|
||||
|
||||
@chapter Using FATE from your FFmpeg source directory
|
||||
|
||||
If you want to run FATE on your machine you need to have the samples
|
||||
in place. You can get the samples via the build target fate-rsync.
|
||||
Use this command from the top-level source directory:
|
||||
The dataset is available through @command{rsync}, is possible to fetch
|
||||
the current sample using the straight rsync command or through a specific
|
||||
@ref{Makefile target}.
|
||||
|
||||
@example
|
||||
make fate-rsync SAMPLES=fate-suite/
|
||||
make fate SAMPLES=fate-suite/
|
||||
# rsync -aL rsync://fate.ffmpeg.org/fate-suite/ fate-suite
|
||||
@end example
|
||||
|
||||
The above commands set the samples location by passing a makefile
|
||||
variable via command line. It is also possible to set the samples
|
||||
location at source configuration time by invoking configure with
|
||||
`--samples=<path to the samples directory>'. Afterwards you can
|
||||
invoke the makefile targets without setting the SAMPLES makefile
|
||||
variable. This is illustrated by the following commands:
|
||||
|
||||
@example
|
||||
./configure --samples=fate-suite/
|
||||
make fate-rsync
|
||||
make fate
|
||||
# make fate-rsync SAMPLES=fate-suite
|
||||
@end example
|
||||
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
directory is by making sure the environment variable FATE_SAMPLES
|
||||
contains the path to your samples directory. This can be achieved
|
||||
by e.g. putting that variable in your shell profile or by setting
|
||||
it in your interactive session.
|
||||
|
||||
@example
|
||||
FATE_SAMPLES=fate-suite/ make fate
|
||||
@end example
|
||||
|
||||
@float NOTE
|
||||
Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
@end float
|
||||
|
||||
|
||||
@chapter Submitting the results to the FFmpeg result aggregation server
|
||||
|
||||
To submit your results to the server you should run fate through the
|
||||
shell script tests/fate.sh from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
|
||||
@example
|
||||
tests/fate.sh /path/to/fate_config
|
||||
@end example
|
||||
|
||||
A configuration file template with comments describing the individual
|
||||
configuration variables can be found at @file{tests/fate_config.sh.template}.
|
||||
|
||||
@ifhtml
|
||||
The mentioned configuration template is also available here:
|
||||
@verbatiminclude ../tests/fate_config.sh.template
|
||||
@end ifhtml
|
||||
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
template. The `slot' configuration variable can be any string that is not
|
||||
yet used, but it is suggested that you name it adhering to the following
|
||||
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
|
||||
itself will be sourced in a shell script, therefore all shell features may
|
||||
be used. This enables you to setup the environment as you need it for your
|
||||
build.
|
||||
|
||||
For your first test runs the `fate_recv' variable should be empty or
|
||||
commented out. This will run everything as normal except that it will omit
|
||||
the submission of the results to the server. The following files should be
|
||||
present in $workdir as specified in the configuration file:
|
||||
|
||||
@itemize
|
||||
@item configure.log
|
||||
@item compile.log
|
||||
@item test.log
|
||||
@item report
|
||||
@item version
|
||||
@end itemize
|
||||
|
||||
When you have everything working properly you can create an SSH key and
|
||||
send its public part to the FATE server administrator.
|
||||
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
when connecting to the FATE server. Also do not forget to check the identity
|
||||
of the server and to accept its host key. This can usually be achieved by
|
||||
running your SSH client manually and killing it after you accepted the key.
|
||||
The FATE server's fingerprint is:
|
||||
|
||||
b1:31:c8:79:3f:04:1d:f8:f2:23:26:5a:fd:55:fa:92
|
||||
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
the synchronisation of the samples directory.
|
||||
|
||||
|
||||
@chapter FATE makefile targets and variables
|
||||
|
||||
@section Makefile targets
|
||||
@chapter Manual Run
|
||||
FATE regression test can be run through @command{make}.
|
||||
Specific Makefile targets and Makefile variables are available:
|
||||
|
||||
@anchor{Makefile target}
|
||||
@section FATE Makefile targets
|
||||
@table @option
|
||||
@item fate-rsync
|
||||
Download/synchronize sample files to the configured samples directory.
|
||||
|
||||
@item fate-list
|
||||
Will list all fate/regression test targets.
|
||||
|
||||
List all fate/regression test targets.
|
||||
@item fate-rsync
|
||||
Shortcut to download the fate test samples to the specified testsuite location.
|
||||
@item fate
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
@end table
|
||||
|
||||
@section Makefile variables
|
||||
|
||||
@section Fate Makefile variables
|
||||
@table @option
|
||||
@item V
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
@itemize
|
||||
@item 0: show just the test arguments
|
||||
@item 1: show just the command used in the test
|
||||
@item 2: show everything
|
||||
@end itemize
|
||||
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
@table @option
|
||||
@item 0
|
||||
show just the test arguments
|
||||
@item 1
|
||||
show just the command used in the test
|
||||
@item 2
|
||||
show everything
|
||||
@end table
|
||||
@item SAMPLES
|
||||
Specify or override the path to the FATE samples at make time, it has a
|
||||
meaning only while running the regression tests.
|
||||
|
||||
Specify or override the path to the FATE samples at make time, it has a
|
||||
meaning only while running the regression tests.
|
||||
@item THREADS
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
@end table
|
||||
|
||||
Example:
|
||||
@example
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 fate
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 fate
|
||||
@end example
|
||||
|
||||
@chapter Automated Tests
|
||||
In order to automatically testing specific configurations, e.g. multiple
|
||||
compilers, @command{tests/fate.sh} is provided.
|
||||
|
||||
This shell script builds FFmpeg, runs the regression tests and prepares a
|
||||
report that can be sent to @url{fate.ffmpeg.org} or directly examined locally.
|
||||
|
||||
@section Testing Profiles
|
||||
The configuration file passed to @command{fate.sh} is shell scripts as well.
|
||||
|
||||
It must provide at least a @var{slot} identifier, the @var{repo} from
|
||||
which fetch the sources, the @var{samples} directory, a @var{workdir} with
|
||||
enough space to build and run all the tests.
|
||||
Optional submit command @var{fate_recv} and a @var{comment} to describe
|
||||
the testing profile are available.
|
||||
|
||||
Additional optional parameter to tune the FFmpeg building and reporting process
|
||||
can be passed.
|
||||
|
||||
@example
|
||||
slot= # some unique identifier
|
||||
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
|
||||
samples=/path/to/fate/samples
|
||||
workdir= # directory in which to do all the work
|
||||
fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
|
||||
comment= # optional description
|
||||
|
||||
# the following are optional and map to configure options
|
||||
arch=
|
||||
cpu=
|
||||
cross_prefix=
|
||||
cc=
|
||||
target_os=
|
||||
sysroot=
|
||||
target_exec=
|
||||
target_path=
|
||||
extra_cflags=
|
||||
extra_ldflags=
|
||||
extra_libs=
|
||||
extra_conf= # extra configure options not covered above
|
||||
|
||||
#make= # name of GNU make if not 'make'
|
||||
makeopts= # extra options passed to 'make'
|
||||
#tar= # command to create a tar archive from its arguments on
|
||||
# stdout, defaults to 'tar c'
|
||||
@end example
|
||||
|
||||
@section Submitting Reports
|
||||
In order to send reports you need to create an @command{ssh} key and send it
|
||||
to the fate server administrator.
|
||||
The current server fingerprint is @var{b1:31:c8:79:3f:04:1d:f8:f2:23:26:5a:fd:55:fa:92}
|
||||
|
||||
|
139
doc/fate.txt
Normal file
139
doc/fate.txt
Normal file
@@ -0,0 +1,139 @@
|
||||
FATE Automated Testing Environment
|
||||
==================================
|
||||
|
||||
FATE is an extended regression suite on the client-side and a means
|
||||
for results aggregation and presentation on the server-side.
|
||||
|
||||
The first part of this document explains how you can use FATE from
|
||||
your FFmpeg source directory to test your ffmpeg binary. The second
|
||||
part describes how you can run FATE to submit the results to FFmpeg's
|
||||
FATE server.
|
||||
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
by visiting this website:
|
||||
|
||||
http://fate.ffmpeg.org/
|
||||
|
||||
This is especially recommended for all people contributing source
|
||||
code to FFmpeg, as it can be seen if some test on some platform broke
|
||||
with there recent contribution. This usually happens on the platforms
|
||||
the developers could not test on.
|
||||
|
||||
The second part of this document describes how you can run FATE to
|
||||
submit your results to FFmpeg's FATE server. If you want to submit your
|
||||
results be sure to check that your combination of CPU, OS and compiler
|
||||
is not already listed on the above mentioned website.
|
||||
|
||||
In the third part you can find a comprehensive listing of FATE makefile
|
||||
targets and variables.
|
||||
|
||||
|
||||
1. Using FATE from your FFmpeg source directory
|
||||
-----------------------------------------------
|
||||
|
||||
If you want to run FATE on your machine you need to have the samples
|
||||
in place. You can get the samples via the build target fate-rsync.
|
||||
Use this command from the top-level source directory:
|
||||
|
||||
# make fate-rsync SAMPLES=fate-suite/
|
||||
# make fate SAMPLES=fate-suite/
|
||||
|
||||
The above commands set the samples location by passing a makefile
|
||||
variable via command line. It is also possible to set the samples
|
||||
location at source configuration time by invoking configure with
|
||||
`--samples=<path to the samples directory>'. Afterwards you can
|
||||
invoke the makefile targets without setting the SAMPLES makefile
|
||||
variable. This is illustrated by the following commands:
|
||||
|
||||
# ./configure --samples=fate-suite/
|
||||
# make fate-rsync
|
||||
# make fate
|
||||
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
directory is by making sure the environment variable FATE_SAMPLES
|
||||
contains the path to your samples directory. This can be achieved
|
||||
by e.g. putting that variable in your shell profile or by setting
|
||||
it in your interactive session.
|
||||
|
||||
# FATE_SAMPLES=fate-suite/ make fate
|
||||
|
||||
NOTE:
|
||||
Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
|
||||
|
||||
2. Submitting the results to the FFmpeg result aggregation server
|
||||
-----------------------------------------------------------------
|
||||
|
||||
To submit your results to the server you should run fate through the
|
||||
shell script tests/fate.sh from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
|
||||
# tests/fate.sh /path/to/fate_config
|
||||
|
||||
A configuration file template with comments describing the individual
|
||||
configuration variables can be found at tests/fate_config.sh.template .
|
||||
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
template. The `slot' configuration variable can be any string that is not
|
||||
yet used, but it is suggested that you name it adhering to the following
|
||||
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
|
||||
itself will be sourced in a shell script, therefore all shell features may
|
||||
be used. This enables you to setup the environment as you need it for your
|
||||
build.
|
||||
|
||||
For your first test runs the `fate_recv' variable should be empty or
|
||||
commented out. This will run everything as normal except that it will omit
|
||||
the submission of the results to the server. The following files should be
|
||||
present in $workdir as specified in the configuration file:
|
||||
|
||||
- configure.log
|
||||
- compile.log
|
||||
- test.log
|
||||
- report
|
||||
- version
|
||||
|
||||
When you have everything working properly you can create an SSH key and
|
||||
send its public part to the FATE server administrator.
|
||||
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
when connecting to the FATE server. Also do not forget to check the identity
|
||||
of the server and to accept its host key. This can usually be achieved by
|
||||
running your SSH client manually and killing it after you accepted the key.
|
||||
The FATE server's fingerprint is:
|
||||
|
||||
b1:31:c8:79:3f:04:1d:f8:f2:23:26:5a:fd:55:fa:92
|
||||
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
the synchronisation of the samples directory.
|
||||
|
||||
|
||||
3. FATE makefile targets and variables
|
||||
--------------------------------------
|
||||
|
||||
FATE Makefile targets:
|
||||
|
||||
fate-list
|
||||
Will list all fate/regression test targets.
|
||||
|
||||
fate
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
|
||||
FATE Makefile variables:
|
||||
|
||||
V
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
* 0: show just the test arguments
|
||||
* 1: show just the command used in the test
|
||||
* 2: show everything
|
||||
|
||||
SAMPLES
|
||||
Specify or override the path to the FATE samples at make time, it has a
|
||||
meaning only while running the regression tests.
|
||||
|
||||
THREADS
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
|
||||
Example:
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 fate
|
4561
doc/ffmpeg-mt-authorship.txt
Normal file
4561
doc/ffmpeg-mt-authorship.txt
Normal file
File diff suppressed because it is too large
Load Diff
@@ -29,7 +29,7 @@ rates and resize video on the fly with a high quality polyphase filter.
|
||||
ffmpeg reads from an arbitrary number of input "files" (which can be regular
|
||||
files, pipes, network streams, grabbing devices, etc.), specified by the
|
||||
@code{-i} option, and writes to an arbitrary number of output "files", which are
|
||||
specified by a plain output filename. Anything found on the command line which
|
||||
specified by a plain output filename. Anything found on the commandline which
|
||||
cannot be interpreted as an option is considered to be an output filename.
|
||||
|
||||
Each input or output file can in principle contain any number of streams of
|
||||
@@ -187,9 +187,9 @@ For example, for setting the title in the output file:
|
||||
ffmpeg -i in.avi -metadata title="my title" out.flv
|
||||
@end example
|
||||
|
||||
To set the language of the first audio stream:
|
||||
To set the language of the second stream:
|
||||
@example
|
||||
ffmpeg -i INPUT -metadata:s:a:1 language=eng OUTPUT
|
||||
ffmpeg -i INPUT -metadata:s:1 language=eng OUTPUT
|
||||
@end example
|
||||
|
||||
@item -target @var{type} (@emph{output})
|
||||
@@ -407,6 +407,10 @@ prefix is ``ffmpeg2pass''. The complete file name will be
|
||||
@file{PREFIX-N.log}, where N is a number specific to the output
|
||||
stream
|
||||
|
||||
Note that this option is overwritten by a local option of the same name
|
||||
when using @code{-vcodec libx264}. That option maps to the x264 option stats
|
||||
which has a different syntax.
|
||||
|
||||
@item -vlang @var{code}
|
||||
Set the ISO 639 language code (3 letters) of the current video stream.
|
||||
|
||||
@@ -825,36 +829,18 @@ The following example split the channels of a stereo input into streams:
|
||||
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
|
||||
@end example
|
||||
|
||||
Note that currently each output stream can only contain channels from a single
|
||||
input stream; you can't for example use "-map_channel" to pick multiple input
|
||||
audio channels contained in different streams (from the same or different files)
|
||||
and merge them into a single output stream. It is therefore not currently
|
||||
possible, for example, to turn two separate mono streams into a single stereo
|
||||
stream. However spliting a stereo stream into two single channel mono streams
|
||||
is possible.
|
||||
Note that "-map_channel" is currently limited to the scope of one input for
|
||||
each output; you can't for example use it to pick multiple input audio files
|
||||
and mix them into one single output.
|
||||
|
||||
@item -map_metadata[:@var{metadata_spec_out}] @var{infile}[:@var{metadata_spec_in}] (@emph{output,per-metadata})
|
||||
@item -map_metadata[:@var{metadata_type}][:@var{index}] @var{infile}[:@var{metadata_type}][:@var{index}] (@emph{output,per-metadata})
|
||||
Set metadata information of the next output file from @var{infile}. Note that
|
||||
those are file indices (zero-based), not filenames.
|
||||
Optional @var{metadata_spec_in/out} parameters specify, which metadata to copy.
|
||||
A metadata specifier can have the following forms:
|
||||
@table @option
|
||||
@item @var{g}
|
||||
global metadata, i.e. metadata that applies to the whole file
|
||||
|
||||
@item @var{s}[:@var{stream_spec}]
|
||||
per-stream metadata. @var{stream_spec} is a stream specifier as described
|
||||
in the @ref{Stream specifiers} chapter. In an input metadata specifier, the first
|
||||
matching stream is copied from. In an output metadata specifier, all matching
|
||||
streams are copied to.
|
||||
|
||||
@item @var{c}:@var{chapter_index}
|
||||
per-chapter metadata. @var{chapter_index} is the zero-based chapter index.
|
||||
|
||||
@item @var{p}:@var{program_index}
|
||||
per-program metadata. @var{program_index} is the zero-based program index.
|
||||
@end table
|
||||
If metadata specifier is omitted, it defaults to global.
|
||||
Optional @var{metadata_type} parameters specify, which metadata to copy - (g)lobal
|
||||
(i.e. metadata that applies to the whole file), per-(s)tream, per-(c)hapter or
|
||||
per-(p)rogram. All metadata specifiers other than global must be followed by the
|
||||
stream/chapter/program index. If metadata specifier is omitted, it defaults to
|
||||
global.
|
||||
|
||||
By default, global metadata is copied from the first input file,
|
||||
per-stream and per-chapter metadata is copied along with streams/chapters. These
|
||||
@@ -866,14 +852,6 @@ of the output file:
|
||||
@example
|
||||
ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3
|
||||
@end example
|
||||
|
||||
To do the reverse, i.e. copy global metadata to all audio streams:
|
||||
@example
|
||||
ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv
|
||||
@end example
|
||||
Note that simple @code{0} would work as well in this example, since global
|
||||
metadata is assumed by default.
|
||||
|
||||
@item -map_chapters @var{input_file_index} (@emph{output})
|
||||
Copy chapters from input file with index @var{input_file_index} to the next
|
||||
output file. If no chapter mapping is specified, then chapters are copied from
|
||||
@@ -941,15 +919,15 @@ Thread count.
|
||||
Video sync method.
|
||||
|
||||
@table @option
|
||||
@item 0, passthrough
|
||||
@item 0
|
||||
Each frame is passed with its timestamp from the demuxer to the muxer.
|
||||
@item 1, cfr
|
||||
@item 1
|
||||
Frames will be duplicated and dropped to achieve exactly the requested
|
||||
constant framerate.
|
||||
@item 2, vfr
|
||||
@item 2
|
||||
Frames are passed through with their timestamp or dropped so as to
|
||||
prevent 2 frames from having the same timestamp.
|
||||
@item -1, auto
|
||||
@item -1
|
||||
Chooses between 1 and 2 depending on muxer capabilities. This is the
|
||||
default method.
|
||||
@end table
|
||||
@@ -1000,13 +978,6 @@ ffmpeg -i file.mov -an -vn -sbsf mov2textsub -c:s copy -f rawvideo sub.txt
|
||||
|
||||
@item -tag[:@var{stream_specifier}] @var{codec_tag} (@emph{per-stream})
|
||||
Force a tag/fourcc for matching streams.
|
||||
|
||||
@item -timecode @var{hh}:@var{mm}:@var{ss}SEP@var{ff}
|
||||
Specify Timecode for writing. @var{SEP} is ':' for non drop timecode and ';'
|
||||
(or '.') for drop.
|
||||
@example
|
||||
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@section Preset files
|
||||
|
@@ -94,11 +94,6 @@ For example for printing the output in JSON format, specify:
|
||||
For more details on the available output printing formats, see the
|
||||
Writers section below.
|
||||
|
||||
@item -show_error
|
||||
Show information about the error found when trying to probe the input.
|
||||
|
||||
The error information is printed within a section with name "ERROR".
|
||||
|
||||
@item -show_format
|
||||
Show information about the container format of the input multimedia
|
||||
stream.
|
||||
@@ -113,13 +108,6 @@ stream.
|
||||
The information for each single packet is printed within a dedicated
|
||||
section with name "PACKET".
|
||||
|
||||
@item -show_frames
|
||||
Show information about each frame contained in the input multimedia
|
||||
stream.
|
||||
|
||||
The information for each single frame is printed within a dedicated
|
||||
section with name "FRAME".
|
||||
|
||||
@item -show_streams
|
||||
Show information about each media stream contained in the input
|
||||
multimedia stream.
|
||||
@@ -127,29 +115,6 @@ multimedia stream.
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "STREAM".
|
||||
|
||||
@item -show_private_data, -private
|
||||
Show private data, that is data depending on the format of the
|
||||
particular shown element.
|
||||
This option is enabled by default, but you may need to disable it
|
||||
for specific uses, for example when creating XSD-compliant XML output.
|
||||
|
||||
@item -show_program_version
|
||||
Show information related to program version.
|
||||
|
||||
Version information is printed within a section with name
|
||||
"PROGRAM_VERSION".
|
||||
|
||||
@item -show_library_versions
|
||||
Show information related to library versions.
|
||||
|
||||
Version information for each library is printed within a section with
|
||||
name "LIBRARY_VERSION".
|
||||
|
||||
@item -show_versions
|
||||
Show information related to program and library versions. This is the
|
||||
equivalent of setting both @option{-show_program_version} and
|
||||
@option{-show_library_versions} options.
|
||||
|
||||
@item -i @var{input_file}
|
||||
Read @var{input_file}.
|
||||
|
||||
@@ -159,7 +124,7 @@ Read @var{input_file}.
|
||||
@chapter Writers
|
||||
@c man begin WRITERS
|
||||
|
||||
A writer defines the output format adopted by @command{ffprobe}, and will be
|
||||
A writer defines the output format adopted by @file{ffprobe}, and will be
|
||||
used for printing all the parts of the output.
|
||||
|
||||
A writer may accept one or more arguments, which specify the options to
|
||||
@@ -245,70 +210,8 @@ JSON based format.
|
||||
|
||||
Each section is printed using JSON notation.
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item compact, c
|
||||
If set to 1 enable compact output, that is each section will be
|
||||
printed on a single line. Default value is 0.
|
||||
@end table
|
||||
|
||||
For more information about JSON, see @url{http://www.json.org/}.
|
||||
|
||||
@section xml
|
||||
XML based format.
|
||||
|
||||
The XML output is described in the XML schema description file
|
||||
@file{ffprobe.xsd} installed in the FFmpeg datadir.
|
||||
|
||||
Note that the output issued will be compliant to the
|
||||
@file{ffprobe.xsd} schema only when no special global output options
|
||||
(@option{unit}, @option{prefix}, @option{byte_binary_prefix},
|
||||
@option{sexagesimal} etc.) are specified.
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item fully_qualified, q
|
||||
If set to 1 specify if the output should be fully qualified. Default
|
||||
value is 0.
|
||||
This is required for generating an XML file which can be validated
|
||||
through an XSD file.
|
||||
|
||||
@item xsd_compliant, x
|
||||
If set to 1 perform more checks for ensuring that the output is XSD
|
||||
compliant. Default value is 0.
|
||||
This option automatically sets @option{fully_qualified} to 1.
|
||||
@end table
|
||||
|
||||
For more information about the XML format, see
|
||||
@url{http://www.w3.org/XML/}.
|
||||
|
||||
@chapter Timecode
|
||||
|
||||
@command{ffprobe} supports Timecode extraction:
|
||||
|
||||
@itemize
|
||||
|
||||
@item MPEG1/2 timecode is extracted from the GOP, and is available in the video
|
||||
stream details (@option{-show_streams}, see @var{timecode}).
|
||||
|
||||
@item MOV timecode is extracted from tmcd track, so is available in the tmcd
|
||||
stream metadata (@option{-show_streams}, see @var{TAG:timecode}).
|
||||
|
||||
@item DV and GXF timecodes are available in format metadata
|
||||
(@option{-show_format}, see @var{TAG:timecode}).
|
||||
|
||||
@end itemize
|
||||
|
||||
@c man end WRITERS
|
||||
|
||||
@include decoders.texi
|
||||
|
164
doc/ffprobe.xsd
164
doc/ffprobe.xsd
@@ -1,164 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
|
||||
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
|
||||
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
|
||||
|
||||
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
|
||||
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float" />
|
||||
<xsd:attribute name="dts" type="xsd:long" />
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
<xsd:attribute name="height" type="xsd:long" />
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="coded_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="display_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
<xsd:attribute name="reference" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="formatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:long"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagType">
|
||||
<xsd:attribute name="key" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="value" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="errorType">
|
||||
<xsd:attribute name="code" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="string" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionType">
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="major" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
</xsd:schema>
|
@@ -147,7 +147,7 @@ that only captures in stereo and also requires that one channel be flipped.
|
||||
If you are one of these people, then export 'AUDIO_FLIP_LEFT=1' before
|
||||
starting ffmpeg.
|
||||
|
||||
@subsection The audio and video lose sync after a while.
|
||||
@subsection The audio and video loose sync after a while.
|
||||
|
||||
Yes, they do.
|
||||
|
||||
|
244
doc/filters.texi
244
doc/filters.texi
@@ -93,7 +93,7 @@ Follows a BNF description for the filtergraph syntax:
|
||||
@c man begin AUDIO FILTERS
|
||||
|
||||
When you configure your FFmpeg build, you can disable any of the
|
||||
existing filters using @code{--disable-filters}.
|
||||
existing filters using --disable-filters.
|
||||
The configure output will show the audio filters included in your
|
||||
build.
|
||||
|
||||
@@ -156,39 +156,6 @@ aformat=u8\\,s16:mono:packed
|
||||
aformat=s16:mono\\,stereo:all
|
||||
@end example
|
||||
|
||||
@section amerge
|
||||
|
||||
Merge two audio streams into a single multi-channel stream.
|
||||
|
||||
This filter does not need any argument.
|
||||
|
||||
If the channel layouts of the inputs are disjoint, and therefore compatible,
|
||||
the channel layout of the output will be set accordingly and the channels
|
||||
will be reordered as necessary. If the channel layouts of the inputs are not
|
||||
disjoint, the output will have all the channels of the first input then all
|
||||
the channels of the second input, in that order, and the channel layout of
|
||||
the output will be the default value corresponding to the total number of
|
||||
channels.
|
||||
|
||||
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
|
||||
is FC+BL+BR, then the output will be in 5.1, with the channels in the
|
||||
following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
|
||||
first input, b1 is the first channel of the second input).
|
||||
|
||||
On the other hand, if both input are in stereo, the output channels will be
|
||||
in the default order: a1, a2, b1, b2, and the channel layout will be
|
||||
arbitrarily set to 4.0, which may or may not be the expected value.
|
||||
|
||||
Both inputs must have the same sample rate, format and packing.
|
||||
|
||||
If inputs do not have the same duration, the output will stop with the
|
||||
shortest.
|
||||
|
||||
Example: merge two mono files into a stereo stream:
|
||||
@example
|
||||
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
|
||||
@end example
|
||||
|
||||
@section anull
|
||||
|
||||
Pass the audio source unchanged to the output.
|
||||
@@ -257,48 +224,6 @@ expressed in the form "[@var{c0} @var{c1} @var{c2} @var{c3} @var{c4} @var{c5}
|
||||
@var{c6} @var{c7}]"
|
||||
@end table
|
||||
|
||||
@section asplit
|
||||
|
||||
Pass on the input audio to two outputs. Both outputs are identical to
|
||||
the input audio.
|
||||
|
||||
For example:
|
||||
@example
|
||||
[in] asplit[out0], showaudio[out1]
|
||||
@end example
|
||||
|
||||
will create two separate outputs from the same input, one cropped and
|
||||
one padded.
|
||||
|
||||
@section astreamsync
|
||||
|
||||
Forward two audio streams and control the order the buffers are forwarded.
|
||||
|
||||
The argument to the filter is an expression deciding which stream should be
|
||||
forwarded next: if the result is negative, the first stream is forwarded; if
|
||||
the result is positive or zero, the second stream is forwarded. It can use
|
||||
the following variables:
|
||||
|
||||
@table @var
|
||||
@item b1 b2
|
||||
number of buffers forwarded so far on each stream
|
||||
@item s1 s2
|
||||
number of samples forwarded so far on each stream
|
||||
@item t1 t2
|
||||
current timestamp of each stream
|
||||
@end table
|
||||
|
||||
The default value is @code{t1-t2}, which means to always forward the stream
|
||||
that has a smaller timestamp.
|
||||
|
||||
Example: stress-test @code{amerge} by randomly sending buffers on the wrong
|
||||
input, while avoiding too much of a desynchronization:
|
||||
@example
|
||||
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
|
||||
[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
|
||||
[a2] [b2] amerge
|
||||
@end example
|
||||
|
||||
@section earwax
|
||||
|
||||
Make audio easier to listen to on headphones.
|
||||
@@ -315,9 +240,6 @@ Ported from SoX.
|
||||
Mix channels with specific gain levels. The filter accepts the output
|
||||
channel layout followed by a set of channels definitions.
|
||||
|
||||
This filter is also designed to remap efficiently the channels of an audio
|
||||
stream.
|
||||
|
||||
The filter accepts parameters of the form:
|
||||
"@var{l}:@var{outdef}:@var{outdef}:..."
|
||||
|
||||
@@ -345,8 +267,6 @@ If the `=' in a channel specification is replaced by `<', then the gains for
|
||||
that specification will be renormalized so that the total is 1, thus
|
||||
avoiding clipping noise.
|
||||
|
||||
@subsection Mixing examples
|
||||
|
||||
For example, if you want to down-mix from stereo to mono, but with a bigger
|
||||
factor for the left channel:
|
||||
@example
|
||||
@@ -359,80 +279,10 @@ A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
|
||||
pan=stereo: FL < FL + 0.5*FC + 0.6*BL + 0.6*SL : FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
|
||||
@end example
|
||||
|
||||
Note that @command{ffmpeg} integrates a default down-mix (and up-mix) system
|
||||
Note that @file{ffmpeg} integrates a default down-mix (and up-mix) system
|
||||
that should be preferred (see "-ac" option) unless you have very specific
|
||||
needs.
|
||||
|
||||
@subsection Remapping examples
|
||||
|
||||
The channel remapping will be effective if, and only if:
|
||||
|
||||
@itemize
|
||||
@item gain coefficients are zeroes or ones,
|
||||
@item only one input per channel output,
|
||||
@item the number of output channels is supported by libswresample (16 at the
|
||||
moment)
|
||||
@c if SWR_CH_MAX changes, fix the line above.
|
||||
@end itemize
|
||||
|
||||
If all these conditions are satisfied, the filter will notify the user ("Pure
|
||||
channel mapping detected"), and use an optimized and lossless method to do the
|
||||
remapping.
|
||||
|
||||
For example, if you have a 5.1 source and want a stereo audio stream by
|
||||
dropping the extra channels:
|
||||
@example
|
||||
pan="stereo: c0=FL : c1=FR"
|
||||
@end example
|
||||
|
||||
Given the same source, you can also switch front left and front right channels
|
||||
and keep the input channel layout:
|
||||
@example
|
||||
pan="5.1: c0=c1 : c1=c0 : c2=c2 : c3=c3 : c4=c4 : c5=c5"
|
||||
@end example
|
||||
|
||||
If the input is a stereo audio stream, you can mute the front left channel (and
|
||||
still keep the stereo channel layout) with:
|
||||
@example
|
||||
pan="stereo:c1=c1"
|
||||
@end example
|
||||
|
||||
Still with a stereo audio stream input, you can copy the right channel in both
|
||||
front left and right:
|
||||
@example
|
||||
pan="stereo: c0=FR : c1=FR"
|
||||
@end example
|
||||
|
||||
@section silencedetect
|
||||
|
||||
Detect silence in an audio stream.
|
||||
|
||||
This filter logs a message when it detects that the input audio volume is less
|
||||
or equal to a noise tolerance value for a duration greater or equal to the
|
||||
minimum detected noise duration.
|
||||
|
||||
The printed times and duration are expressed in seconds.
|
||||
|
||||
@table @option
|
||||
@item duration, d
|
||||
Set silence duration until notification (default is 2 seconds).
|
||||
|
||||
@item noise, n
|
||||
Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
|
||||
specified value) or amplitude ratio. Default is -60dB, or 0.001.
|
||||
@end table
|
||||
|
||||
Detect 5 seconds of silence with -50dB noise tolerance:
|
||||
@example
|
||||
silencedetect=n=-50dB:d=5
|
||||
@end example
|
||||
|
||||
Complete example with @command{ffmpeg} to detect silence with 0.0001 noise
|
||||
tolerance in @file{silence.mp3}:
|
||||
@example
|
||||
ffmpeg -f lavfi -i amovie=silence.mp3,silencedetect=noise=0.0001 -f null -
|
||||
@end example
|
||||
|
||||
@section volume
|
||||
|
||||
Adjust the input audio volume.
|
||||
@@ -442,7 +292,7 @@ how the audio volume will be increased or decreased.
|
||||
|
||||
Output values are clipped to the maximum value.
|
||||
|
||||
If @var{vol} is expressed as a decimal number, the output audio
|
||||
If @var{vol} is expressed as a decimal number, and the output audio
|
||||
volume is given by the relation:
|
||||
@example
|
||||
@var{output_volume} = @var{vol} * @var{input_volume}
|
||||
@@ -728,7 +578,7 @@ tools.
|
||||
@c man begin VIDEO FILTERS
|
||||
|
||||
When you configure your FFmpeg build, you can disable any of the
|
||||
existing filters using @code{--disable-filters}.
|
||||
existing filters using --disable-filters.
|
||||
The configure output will show the video filters included in your
|
||||
build.
|
||||
|
||||
@@ -1298,15 +1148,6 @@ the number of input frame, starting from 0
|
||||
|
||||
@item t
|
||||
timestamp expressed in seconds, NAN if the input timestamp is unknown
|
||||
|
||||
@item timecode
|
||||
initial timecode representation in "hh:mm:ss[:;.]ff" format. It can be used
|
||||
with or without text parameter. @var{rate} option must be specified.
|
||||
Note that timecode options are @emph{not} effective if FFmpeg is build with
|
||||
@code{--disable-avcodec}.
|
||||
|
||||
@item r, rate
|
||||
frame rate (timecode only)
|
||||
@end table
|
||||
|
||||
Some examples follow.
|
||||
@@ -1488,7 +1329,7 @@ format=yuv420p:yuv444p:yuv410p
|
||||
Apply a frei0r effect to the input video.
|
||||
|
||||
To enable compilation of this filter you need to install the frei0r
|
||||
header and configure FFmpeg with @code{--enable-frei0r}.
|
||||
header and configure FFmpeg with --enable-frei0r.
|
||||
|
||||
The filter supports the syntax:
|
||||
@example
|
||||
@@ -1847,7 +1688,7 @@ Pass the video source unchanged to the output.
|
||||
Apply video transform using libopencv.
|
||||
|
||||
To enable this filter install libopencv library and headers and
|
||||
configure FFmpeg with @code{--enable-libopencv}.
|
||||
configure FFmpeg with --enable-libopencv.
|
||||
|
||||
The filter takes the parameters: @var{filter_name}@{:=@}@var{filter_params}.
|
||||
|
||||
@@ -2157,6 +1998,9 @@ input sample aspect ratio
|
||||
@item dar
|
||||
input display aspect ratio, it is the same as (@var{iw} / @var{ih}) * @var{sar}
|
||||
|
||||
@item sar
|
||||
input sample aspect ratio
|
||||
|
||||
@item hsub, vsub
|
||||
horizontal and vertical chroma subsample values. For example for the
|
||||
pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
|
||||
@@ -2549,64 +2393,6 @@ For example:
|
||||
will create two separate outputs from the same input, one cropped and
|
||||
one padded.
|
||||
|
||||
@section thumbnail
|
||||
Select the most representative frame in a given sequence of consecutive frames.
|
||||
|
||||
It accepts as argument the frames batch size to analyze (default @var{N}=100);
|
||||
in a set of @var{N} frames, the filter will pick one of them, and then handle
|
||||
the next batch of @var{N} frames until the end.
|
||||
|
||||
Since the filter keeps track of the whole frames sequence, a bigger @var{N}
|
||||
value will result in a higher memory usage, so a high value is not recommended.
|
||||
|
||||
The following example extract one picture each 50 frames:
|
||||
@example
|
||||
thumbnail=50
|
||||
@end example
|
||||
|
||||
Complete example of a thumbnail creation with @command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
|
||||
@end example
|
||||
|
||||
@section tinterlace
|
||||
|
||||
Perform various types of temporal field interlacing.
|
||||
|
||||
Frames are counted starting from 1, so the first input frame is
|
||||
considered odd.
|
||||
|
||||
This filter accepts a single parameter specifying the mode. Available
|
||||
modes are:
|
||||
|
||||
@table @samp
|
||||
@item 0
|
||||
Move odd frames into the upper field, even into the lower field,
|
||||
generating a double height frame at half framerate.
|
||||
|
||||
@item 1
|
||||
Only output even frames, odd frames are dropped, generating a frame with
|
||||
unchanged height at half framerate.
|
||||
|
||||
@item 2
|
||||
Only output odd frames, even frames are dropped, generating a frame with
|
||||
unchanged height at half framerate.
|
||||
|
||||
@item 3
|
||||
Expand each frame to full height, but pad alternate lines with black,
|
||||
generating a frame with double height at the same input framerate.
|
||||
|
||||
@item 4
|
||||
Interleave the upper field from odd frames with the lower field from
|
||||
even frames, generating a frame with unchanged height at half framerate.
|
||||
|
||||
@item 5
|
||||
Interleave the lower field from odd frames with the upper field from
|
||||
even frames, generating a frame with unchanged height at half framerate.
|
||||
@end table
|
||||
|
||||
Default mode is 0.
|
||||
|
||||
@section transpose
|
||||
|
||||
Transpose rows with columns in the input video and optionally flip it.
|
||||
@@ -3089,7 +2875,7 @@ will generate a "dc_luma" test pattern.
|
||||
Provide a frei0r source.
|
||||
|
||||
To enable compilation of this filter you need to install the frei0r
|
||||
header and configure FFmpeg with @code{--enable-frei0r}.
|
||||
header and configure FFmpeg with --enable-frei0r.
|
||||
|
||||
The source supports the syntax:
|
||||
@example
|
||||
@@ -3272,7 +3058,7 @@ number or a valid video frame rate abbreviation. The default value is
|
||||
@item sar
|
||||
Set the sample aspect ratio of the sourced video.
|
||||
|
||||
@item duration, d
|
||||
@item duration
|
||||
Set the video duration of the sourced video. The accepted syntax is:
|
||||
@example
|
||||
[-]HH[:MM[:SS[.m...]]]
|
||||
@@ -3282,14 +3068,6 @@ See also the function @code{av_parse_time()}.
|
||||
|
||||
If not specified, or the expressed duration is negative, the video is
|
||||
supposed to be generated forever.
|
||||
|
||||
@item decimals, n
|
||||
Set the number of decimals to show in the timestamp, only used in the
|
||||
@code{testsrc} source.
|
||||
|
||||
The displayed timestamp value will correspond to the original
|
||||
timestamp value multiplied by the power of 10 of the specified
|
||||
value. Default value is 0.
|
||||
@end table
|
||||
|
||||
For example the following:
|
||||
|
@@ -94,7 +94,7 @@ details), you must upgrade FFmpeg's license to GPL in order to use it.
|
||||
|
||||
|
||||
|
||||
@chapter Supported File Formats, Codecs or Features
|
||||
@chapter Supported File Formats and Codecs
|
||||
|
||||
You can use the @code{-formats} and @code{-codecs} options to have an exhaustive list.
|
||||
|
||||
@@ -134,7 +134,7 @@ library:
|
||||
@item Brute Force & Ignorance @tab @tab X
|
||||
@tab Used in the game Flash Traffic: City of Angels.
|
||||
@item BWF @tab X @tab X
|
||||
@item CRI ADX @tab X @tab X
|
||||
@item CRI ADX @tab @tab X
|
||||
@tab Audio-only format used in console video games.
|
||||
@item Discworld II BMV @tab @tab X
|
||||
@item Interplay C93 @tab @tab X
|
||||
@@ -307,7 +307,6 @@ library:
|
||||
@item RTP @tab X @tab X
|
||||
@item RTSP @tab X @tab X
|
||||
@item SAP @tab X @tab X
|
||||
@item SBG @tab @tab X
|
||||
@item SDP @tab @tab X
|
||||
@item Sega FILM/CPK @tab @tab X
|
||||
@tab Used in many Sega Saturn console games.
|
||||
@@ -317,9 +316,8 @@ library:
|
||||
@tab Used in Sierra CD-ROM games.
|
||||
@item Smacker @tab @tab X
|
||||
@tab Multimedia format used by many games.
|
||||
@item SMJPEG @tab X @tab X
|
||||
@tab Used in certain Loki game ports.
|
||||
@item Sony OpenMG (OMA) @tab X @tab X
|
||||
@item SMJPEG @tab @tab X
|
||||
@item Sony OpenMG (OMA) @tab @tab X
|
||||
@tab Audio format used in Sony Sonic Stage and Sony Vegas.
|
||||
@item Sony PlayStation STR @tab @tab X
|
||||
@item Sony Wave64 (W64) @tab @tab X
|
||||
@@ -399,8 +397,6 @@ following image formats are supported:
|
||||
@tab YUV, JPEG and some extension is not supported yet.
|
||||
@item Truevision Targa @tab X @tab X
|
||||
@tab Targa (.TGA) image format
|
||||
@item XWD @tab X @tab X
|
||||
@tab X Window Dump image format
|
||||
@end multitable
|
||||
|
||||
@code{X} means that encoding (resp. decoding) is supported.
|
||||
@@ -440,8 +436,6 @@ following image formats are supported:
|
||||
@item Autodesk Animator Flic video @tab @tab X
|
||||
@item Autodesk RLE @tab @tab X
|
||||
@tab fourcc: AASC
|
||||
@item Avid 1:1 10-bit RGB Packer @tab X @tab X
|
||||
@tab fourcc: AVrp
|
||||
@item AVS (Audio Video Standard) video @tab @tab X
|
||||
@tab Video encoding used by the Creature Shock game.
|
||||
@item Beam Software VB @tab @tab X
|
||||
@@ -577,8 +571,8 @@ following image formats are supported:
|
||||
@tab fourcc: 'smc '
|
||||
@item QuickTime video (RPZA) @tab @tab X
|
||||
@tab fourcc: rpza
|
||||
@item R10K AJA Kona 10-bit RGB Codec @tab X @tab X
|
||||
@item R210 Quicktime Uncompressed RGB 10-bit @tab X @tab X
|
||||
@item R10K AJA Kona 10-bit RGB Codec @tab @tab X
|
||||
@item R210 Quicktime Uncompressed RGB 10-bit @tab @tab X
|
||||
@item Raw Video @tab X @tab X
|
||||
@item RealVideo 1.0 @tab X @tab X
|
||||
@item RealVideo 2.0 @tab X @tab X
|
||||
@@ -610,9 +604,8 @@ following image formats are supported:
|
||||
@item Tiertex Limited SEQ video @tab @tab X
|
||||
@tab Codec used in DOS CD-ROM FlashBack game.
|
||||
@item Ut Video @tab @tab X
|
||||
@item v210 QuickTime uncompressed 4:2:2 10-bit @tab X @tab X
|
||||
@item v308 QuickTime uncompressed 4:4:4 @tab X @tab X
|
||||
@item v410 QuickTime uncompressed 4:4:4 10-bit @tab X @tab X
|
||||
@item V210 Quicktime Uncompressed 4:2:2 10-bit @tab X @tab X
|
||||
@item v410 Quicktime Uncompressed 4:4:4 10-bit @tab X @tab X
|
||||
@item VBLE Lossless Codec @tab @tab X
|
||||
@item VMware Screen Codec / VMware Video @tab @tab X
|
||||
@tab Codec used in videos captured by VMware.
|
||||
@@ -630,8 +623,6 @@ following image formats are supported:
|
||||
@item WMV7 @tab X @tab X
|
||||
@item YAMAHA SMAF @tab X @tab X
|
||||
@item Psygnosis YOP Video @tab @tab X
|
||||
@item yuv4 @tab X @tab X
|
||||
@tab libquicktime uncompressed packed 4:2:0
|
||||
@item ZLIB @tab X @tab X
|
||||
@tab part of LCL, encoder experimental
|
||||
@item Zip Motion Blocks Video @tab X @tab X
|
||||
@@ -866,15 +857,4 @@ performance on systems without hardware floating point support).
|
||||
|
||||
@code{X} means that input/output is supported.
|
||||
|
||||
@section Timecode
|
||||
|
||||
@multitable @columnfractions .4 .1 .1
|
||||
@item Codec/format @tab Read @tab Write
|
||||
@item DV @tab X @tab X
|
||||
@item GXF @tab X @tab X
|
||||
@item MOV @tab X @tab
|
||||
@item MPEG1/2 @tab X @tab X
|
||||
@item MXF @tab @tab X
|
||||
@end multitable
|
||||
|
||||
@bye
|
||||
|
@@ -196,12 +196,12 @@ device.
|
||||
Once you have created one or more JACK readable clients, you need to
|
||||
connect them to one or more JACK writable clients.
|
||||
|
||||
To connect or disconnect JACK clients you can use the @command{jack_connect}
|
||||
and @command{jack_disconnect} programs, or do it through a graphical interface,
|
||||
for example with @command{qjackctl}.
|
||||
To connect or disconnect JACK clients you can use the
|
||||
@file{jack_connect} and @file{jack_disconnect} programs, or do it
|
||||
through a graphical interface, for example with @file{qjackctl}.
|
||||
|
||||
To list the JACK clients and their properties you can invoke the command
|
||||
@command{jack_lsp}.
|
||||
@file{jack_lsp}.
|
||||
|
||||
Follows an example which shows how to capture a JACK readable client
|
||||
with @command{ffmpeg}.
|
||||
@@ -260,7 +260,7 @@ device.
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Create a color video stream and play it back with @command{ffplay}:
|
||||
Create a color video stream and play it back with @file{ffplay}:
|
||||
@example
|
||||
ffplay -f lavfi -graph "color=pink [out0]" dummy
|
||||
@end example
|
||||
@@ -280,14 +280,14 @@ ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [ou
|
||||
|
||||
@item
|
||||
Read an audio stream from a file using the amovie source and play it
|
||||
back with @command{ffplay}:
|
||||
back with @file{ffplay}:
|
||||
@example
|
||||
ffplay -f lavfi "amovie=test.wav"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read an audio stream and a video stream and play it back with
|
||||
@command{ffplay}:
|
||||
@file{ffplay}:
|
||||
@example
|
||||
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
||||
@end example
|
||||
@@ -380,7 +380,7 @@ $ ffmpeg -f openal -i '' out.ogg
|
||||
@end example
|
||||
|
||||
Capture from two devices simultaneously, writing to two different files,
|
||||
within the same @command{ffmpeg} command:
|
||||
within the same @file{ffmpeg} command:
|
||||
@example
|
||||
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
|
||||
@end example
|
||||
@@ -415,7 +415,7 @@ The filename to provide to the input device is a source device or the
|
||||
string "default"
|
||||
|
||||
To list the pulse source devices and their properties you can invoke
|
||||
the command @command{pactl list sources}.
|
||||
the command @file{pactl list sources}.
|
||||
|
||||
@example
|
||||
ffmpeg -f pulse -i default /tmp/pulse.wav
|
||||
@@ -515,9 +515,9 @@ kind @file{/dev/video@var{N}}, where @var{N} is a number associated to
|
||||
the device.
|
||||
|
||||
Video4Linux and Video4Linux2 devices only support a limited set of
|
||||
@var{width}x@var{height} sizes and framerates. You can check which are
|
||||
supported for example with the command @command{dov4l} for Video4Linux
|
||||
devices and using @command{-list_formats all} for Video4Linux2 devices.
|
||||
@var{width}x@var{height} sizes and frame rates. You can check which are
|
||||
supported for example with the command @file{dov4l} for Video4Linux
|
||||
devices and the command @file{v4l-info} for Video4Linux2 devices.
|
||||
|
||||
If the size for the device is set to 0x0, the input device will
|
||||
try to auto-detect the size to use.
|
||||
@@ -579,7 +579,7 @@ default to 0.
|
||||
|
||||
Check the X11 documentation (e.g. man X) for more detailed information.
|
||||
|
||||
Use the @command{dpyinfo} program for getting basic information about the
|
||||
Use the @file{dpyinfo} program for getting basic information about the
|
||||
properties of your X11 display (e.g. grep for "name" or "dimensions").
|
||||
|
||||
For example to grab from @file{:0.0} using @command{ffmpeg}:
|
||||
|
@@ -24,7 +24,7 @@ a mail for every change to every issue.
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://ffmpeg.org/trac/ffmpeg
|
||||
http(s)://trac.ffmpeg.org
|
||||
|
||||
Type:
|
||||
-----
|
||||
|
@@ -43,13 +43,13 @@ The result will be that in output the top half of the video is mirrored
|
||||
onto the bottom half.
|
||||
|
||||
Video filters are loaded using the @var{-vf} option passed to
|
||||
@command{ffmpeg} or to @command{ffplay}. Filters in the same linear
|
||||
chain are separated by commas. In our example, @var{split, fifo,
|
||||
overlay} are in one linear chain, and @var{fifo, crop, vflip} are in
|
||||
another. The points where the linear chains join are labeled by names
|
||||
enclosed in square brackets. In our example, that is @var{[T1]} and
|
||||
@var{[T2]}. The magic labels @var{[in]} and @var{[out]} are the points
|
||||
where video is input and output.
|
||||
ffmpeg or to ffplay. Filters in the same linear chain are separated by
|
||||
commas. In our example, @var{split, fifo, overlay} are in one linear
|
||||
chain, and @var{fifo, crop, vflip} are in another. The points where
|
||||
the linear chains join are labeled by names enclosed in square
|
||||
brackets. In our example, that is @var{[T1]} and @var{[T2]}. The magic
|
||||
labels @var{[in]} and @var{[out]} are the points where video is input
|
||||
and output.
|
||||
|
||||
Some filters take in input a list of parameters: they are specified
|
||||
after the filter name and an equal sign, and are separated each other
|
||||
|
@@ -90,7 +90,6 @@ ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
|
||||
|
||||
See also the @ref{crc} muxer.
|
||||
|
||||
@anchor{image2}
|
||||
@section image2
|
||||
|
||||
Image file muxer.
|
||||
@@ -286,35 +285,4 @@ For example a 3D WebM clip can be created using the following command line:
|
||||
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
|
||||
@end example
|
||||
|
||||
@section segment
|
||||
|
||||
Basic stream segmenter.
|
||||
|
||||
The segmenter muxer outputs streams to a number of separate files of nearly
|
||||
fixed duration. Output filename pattern can be set in a fashion similar to
|
||||
@ref{image2}.
|
||||
|
||||
Every segment starts with a video keyframe, if a video stream is present.
|
||||
The segment muxer works best with a single constant frame rate video.
|
||||
|
||||
Optionally it can generate a flat list of the created segments, one segment
|
||||
per line.
|
||||
|
||||
@table @option
|
||||
@item segment_format @var{format}
|
||||
Override the inner container format, by default it is guessed by the filename
|
||||
extension.
|
||||
@item segment_time @var{t}
|
||||
Set segment duration to @var{t} seconds.
|
||||
@item segment_list @var{name}
|
||||
Generate also a listfile named @var{name}.
|
||||
@item segment_list_size @var{size}
|
||||
Overwrite the listfile once it reaches @var{size} entries.
|
||||
@end table
|
||||
|
||||
@example
|
||||
ffmpeg -i in.mkv -c copy -map 0 -f segment -list out.list out%03d.nut
|
||||
@end example
|
||||
|
||||
|
||||
@c man end MUXERS
|
||||
|
@@ -60,7 +60,7 @@ If not specified it defaults to the size of the input video.
|
||||
|
||||
@subsection Examples
|
||||
|
||||
The following command shows the @command{ffmpeg} output is an
|
||||
The following command shows the @file{ffmpeg} output is an
|
||||
SDL window, forcing its size to the qcif format:
|
||||
@example
|
||||
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
|
||||
|
@@ -235,8 +235,6 @@ make install
|
||||
Your install path (@file{/usr/local/} by default) should now have the
|
||||
necessary DLL and LIB files under the @file{bin} directory.
|
||||
|
||||
@end enumerate
|
||||
|
||||
Alternatively, build the libraries with a cross compiler, according to
|
||||
the instructions below in @ref{Cross compilation for Windows with Linux}.
|
||||
|
||||
|
@@ -52,7 +52,7 @@ resource to be concatenated, each one possibly specifying a distinct
|
||||
protocol.
|
||||
|
||||
For example to read a sequence of files @file{split1.mpeg},
|
||||
@file{split2.mpeg}, @file{split3.mpeg} with @command{ffplay} use the
|
||||
@file{split2.mpeg}, @file{split3.mpeg} with @file{ffplay} use the
|
||||
command:
|
||||
@example
|
||||
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
|
||||
@@ -155,8 +155,8 @@ be seekable, so they will fail with the pipe output protocol.
|
||||
|
||||
Real-Time Messaging Protocol.
|
||||
|
||||
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
|
||||
content across a TCP/IP network.
|
||||
The Real-Time Messaging Protocol (RTMP) is used for streaming
|
||||
multimedia content across a TCP/IP network.
|
||||
|
||||
The required syntax is:
|
||||
@example
|
||||
@@ -183,7 +183,7 @@ application specified in @var{app}, may be prefixed by "mp4:".
|
||||
|
||||
@end table
|
||||
|
||||
For example to read with @command{ffplay} a multimedia resource named
|
||||
For example to read with @file{ffplay} a multimedia resource named
|
||||
"sample" from the application "vod" from an RTMP server "myserver":
|
||||
@example
|
||||
ffplay rtmp://myserver/vod/sample
|
||||
@@ -224,7 +224,7 @@ For example, to stream a file in real-time to an RTMP server using
|
||||
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
|
||||
@end example
|
||||
|
||||
To play the same stream using @command{ffplay}:
|
||||
To play the same stream using @file{ffplay}:
|
||||
@example
|
||||
ffplay "rtmp://myserver/live/mystream live=1"
|
||||
@end example
|
||||
@@ -249,7 +249,7 @@ The required syntax for a RTSP url is:
|
||||
rtsp://@var{hostname}[:@var{port}]/@var{path}
|
||||
@end example
|
||||
|
||||
The following options (set on the @command{ffmpeg}/@command{ffplay} command
|
||||
The following options (set on the @command{ffmpeg}/@file{ffplay} command
|
||||
line, or set in code via @code{AVOption}s or in @code{avformat_open_input}),
|
||||
are supported:
|
||||
|
||||
@@ -288,7 +288,7 @@ When receiving data over UDP, the demuxer tries to reorder received packets
|
||||
order for this to be enabled, a maximum delay must be specified in the
|
||||
@code{max_delay} field of AVFormatContext.
|
||||
|
||||
When watching multi-bitrate Real-RTSP streams with @command{ffplay}, the
|
||||
When watching multi-bitrate Real-RTSP streams with @file{ffplay}, the
|
||||
streams to display can be chosen with @code{-vst} @var{n} and
|
||||
@code{-ast} @var{n} for video and audio respectively, and can be switched
|
||||
on the fly by pressing @code{v} and @code{a}.
|
||||
@@ -365,13 +365,13 @@ To broadcast a stream on the local subnet, for watching in VLC:
|
||||
ffmpeg -re -i @var{input} -f sap sap://224.0.0.255?same_port=1
|
||||
@end example
|
||||
|
||||
Similarly, for watching in @command{ffplay}:
|
||||
Similarly, for watching in ffplay:
|
||||
|
||||
@example
|
||||
ffmpeg -re -i @var{input} -f sap sap://224.0.0.255
|
||||
@end example
|
||||
|
||||
And for watching in @command{ffplay}, over IPv6:
|
||||
And for watching in ffplay, over IPv6:
|
||||
|
||||
@example
|
||||
ffmpeg -re -i @var{input} -f sap sap://[ff0e::1:2:3:4]
|
||||
|
@@ -18,7 +18,7 @@ essential that changes to their codebase are publicly visible, clean and
|
||||
easy reviewable that again leads us to:
|
||||
* use of a revision control system like git
|
||||
* separation of cosmetic from non-cosmetic changes (this is almost entirely
|
||||
ignored by mentors and students in soc 2006 which might lead to a surprise
|
||||
ignored by mentors and students in soc 2006 which might lead to a suprise
|
||||
when the code will be reviewed at the end before a possible inclusion in
|
||||
FFmpeg, individual changes were generally not reviewable due to cosmetics).
|
||||
* frequent commits, so that comments can be provided early
|
||||
|
72
ffserver.c
72
ffserver.c
@@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Multiple format streaming server
|
||||
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
@@ -18,11 +19,6 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* multiple format streaming server based on the FFmpeg libraries
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
#if !HAVE_CLOSESOCKET
|
||||
#define closesocket close
|
||||
@@ -30,16 +26,13 @@
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "libavformat/avformat.h"
|
||||
// FIXME those are internal headers, avserver _really_ shouldn't use them
|
||||
#include "libavformat/ffm.h"
|
||||
#include "libavformat/network.h"
|
||||
#include "libavformat/os_support.h"
|
||||
#include "libavformat/rtpdec.h"
|
||||
#include "libavformat/rtsp.h"
|
||||
// XXX for ffio_open_dyn_packet_buffer, to be removed
|
||||
#include "libavformat/avio_internal.h"
|
||||
#include "libavformat/internal.h"
|
||||
#include "libavformat/url.h"
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/lfg.h"
|
||||
#include "libavutil/dict.h"
|
||||
@@ -482,7 +475,7 @@ static void start_children(FFStream *feed)
|
||||
slash++;
|
||||
strcpy(slash, "ffmpeg");
|
||||
|
||||
http_log("Launch command line: ");
|
||||
http_log("Launch commandline: ");
|
||||
http_log("%s ", pathname);
|
||||
for (i = 1; feed->child_argv[i] && feed->child_argv[i][0]; i++)
|
||||
http_log("%s ", feed->child_argv[i]);
|
||||
@@ -502,10 +495,7 @@ static void start_children(FFStream *feed)
|
||||
}
|
||||
|
||||
/* This is needed to make relative pathnames work */
|
||||
if (chdir(my_program_dir) < 0) {
|
||||
http_log("chdir failed\n");
|
||||
exit(1);
|
||||
}
|
||||
chdir(my_program_dir);
|
||||
|
||||
signal(SIGPIPE, SIG_DFL);
|
||||
|
||||
@@ -859,7 +849,7 @@ static void close_connection(HTTPContext *c)
|
||||
if (st->codec->codec)
|
||||
avcodec_close(st->codec);
|
||||
}
|
||||
avformat_close_input(&c->fmt_in);
|
||||
av_close_input_file(c->fmt_in);
|
||||
}
|
||||
|
||||
/* free RTP output streams if any */
|
||||
@@ -877,7 +867,7 @@ static void close_connection(HTTPContext *c)
|
||||
}
|
||||
h = c->rtp_handles[i];
|
||||
if (h)
|
||||
ffurl_close(h);
|
||||
url_close(h);
|
||||
}
|
||||
|
||||
ctx = &c->fmt_ctx;
|
||||
@@ -2122,6 +2112,22 @@ static void compute_status(HTTPContext *c)
|
||||
c->buffer_end = c->pb_buffer + len;
|
||||
}
|
||||
|
||||
/* check if the parser needs to be opened for stream i */
|
||||
static void open_parser(AVFormatContext *s, int i)
|
||||
{
|
||||
AVStream *st = s->streams[i];
|
||||
AVCodec *codec;
|
||||
|
||||
if (!st->codec->codec) {
|
||||
codec = avcodec_find_decoder(st->codec->codec_id);
|
||||
if (codec && (codec->capabilities & CODEC_CAP_PARSE_ONLY)) {
|
||||
st->codec->parse_only = 1;
|
||||
if (avcodec_open2(st->codec, codec, NULL) < 0)
|
||||
st->codec->parse_only = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int open_input_stream(HTTPContext *c, const char *info)
|
||||
{
|
||||
char buf[128];
|
||||
@@ -2163,10 +2169,14 @@ static int open_input_stream(HTTPContext *c, const char *info)
|
||||
c->fmt_in = s;
|
||||
if (strcmp(s->iformat->name, "ffm") && avformat_find_stream_info(c->fmt_in, NULL) < 0) {
|
||||
http_log("Could not find stream info '%s'\n", input_filename);
|
||||
avformat_close_input(&s);
|
||||
av_close_input_file(s);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* open each parser */
|
||||
for(i=0;i<s->nb_streams;i++)
|
||||
open_parser(s, i);
|
||||
|
||||
/* choose stream as clock source (we favorize video stream if
|
||||
present) for packet sending */
|
||||
c->pts_stream_index = 0;
|
||||
@@ -2258,6 +2268,7 @@ static int http_prepare_data(HTTPContext *c)
|
||||
* Default value from FFmpeg
|
||||
* Try to set it use configuration option
|
||||
*/
|
||||
c->fmt_ctx.preload = (int)(0.5*AV_TIME_BASE);
|
||||
c->fmt_ctx.max_delay = (int)(0.7*AV_TIME_BASE);
|
||||
|
||||
if (avformat_write_header(&c->fmt_ctx, NULL) < 0) {
|
||||
@@ -2300,7 +2311,8 @@ static int http_prepare_data(HTTPContext *c)
|
||||
return 0;
|
||||
} else {
|
||||
if (c->stream->loop) {
|
||||
avformat_close_input(&c->fmt_in);
|
||||
av_close_input_file(c->fmt_in);
|
||||
c->fmt_in = NULL;
|
||||
if (open_input_stream(c, "") < 0)
|
||||
goto no_loop;
|
||||
goto redo;
|
||||
@@ -2376,7 +2388,7 @@ static int http_prepare_data(HTTPContext *c)
|
||||
if (c->rtp_protocol == RTSP_LOWER_TRANSPORT_TCP)
|
||||
max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
|
||||
else
|
||||
max_packet_size = c->rtp_handles[c->packet_stream_index]->max_packet_size;
|
||||
max_packet_size = url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]);
|
||||
ret = ffio_open_dyn_packet_buf(&ctx->pb, max_packet_size);
|
||||
} else {
|
||||
ret = avio_open_dyn_buf(&ctx->pb);
|
||||
@@ -2529,8 +2541,8 @@ static int http_send_data(HTTPContext *c)
|
||||
} else {
|
||||
/* send RTP packet directly in UDP */
|
||||
c->buffer_ptr += 4;
|
||||
ffurl_write(c->rtp_handles[c->packet_stream_index],
|
||||
c->buffer_ptr, len);
|
||||
url_write(c->rtp_handles[c->packet_stream_index],
|
||||
c->buffer_ptr, len);
|
||||
c->buffer_ptr += len;
|
||||
/* here we continue as we can send several packets per 10 ms slot */
|
||||
}
|
||||
@@ -2724,7 +2736,7 @@ static int http_receive_data(HTTPContext *c)
|
||||
|
||||
/* Now we have the actual streams */
|
||||
if (s->nb_streams != feed->nb_streams) {
|
||||
avformat_close_input(&s);
|
||||
av_close_input_stream(s);
|
||||
av_free(pb);
|
||||
http_log("Feed '%s' stream number does not match registered feed\n",
|
||||
c->stream->feed_filename);
|
||||
@@ -2737,7 +2749,7 @@ static int http_receive_data(HTTPContext *c)
|
||||
avcodec_copy_context(fst->codec, st->codec);
|
||||
}
|
||||
|
||||
avformat_close_input(&s);
|
||||
av_close_input_stream(s);
|
||||
av_free(pb);
|
||||
}
|
||||
c->buffer_ptr = c->buffer;
|
||||
@@ -3413,10 +3425,10 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
"rtp://%s:%d", ipaddr, ntohs(dest_addr->sin_port));
|
||||
}
|
||||
|
||||
if (ffurl_open(&h, ctx->filename, AVIO_FLAG_WRITE, NULL, NULL) < 0)
|
||||
if (url_open(&h, ctx->filename, AVIO_FLAG_WRITE) < 0)
|
||||
goto fail;
|
||||
c->rtp_handles[stream_index] = h;
|
||||
max_packet_size = h->max_packet_size;
|
||||
max_packet_size = url_get_max_packet_size(h);
|
||||
break;
|
||||
case RTSP_LOWER_TRANSPORT_TCP:
|
||||
/* RTP/TCP case */
|
||||
@@ -3439,7 +3451,7 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
if (avformat_write_header(ctx, NULL) < 0) {
|
||||
fail:
|
||||
if (h)
|
||||
ffurl_close(h);
|
||||
url_close(h);
|
||||
av_free(ctx);
|
||||
return -1;
|
||||
}
|
||||
@@ -3476,7 +3488,7 @@ static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int cop
|
||||
}
|
||||
fst->priv_data = av_mallocz(sizeof(FeedData));
|
||||
fst->index = stream->nb_streams;
|
||||
avpriv_set_pts_info(fst, 33, 1, 90000);
|
||||
av_set_pts_info(fst, 33, 1, 90000);
|
||||
fst->sample_aspect_ratio = codec->sample_aspect_ratio;
|
||||
stream->streams[stream->nb_streams++] = fst;
|
||||
return fst;
|
||||
@@ -3617,7 +3629,7 @@ static void build_file_streams(void)
|
||||
if (avformat_find_stream_info(infile, NULL) < 0) {
|
||||
http_log("Could not find codec parameters from '%s'\n",
|
||||
stream->feed_filename);
|
||||
avformat_close_input(&infile);
|
||||
av_close_input_file(infile);
|
||||
goto fail;
|
||||
}
|
||||
extract_mpeg4_header(infile);
|
||||
@@ -3625,7 +3637,7 @@ static void build_file_streams(void)
|
||||
for(i=0;i<infile->nb_streams;i++)
|
||||
add_av_stream1(stream, infile->streams[i]->codec, 1);
|
||||
|
||||
avformat_close_input(&infile);
|
||||
av_close_input_file(infile);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3715,7 +3727,7 @@ static void build_feed_streams(void)
|
||||
http_log("Deleting feed file '%s' as stream counts differ (%d != %d)\n",
|
||||
feed->feed_filename, s->nb_streams, feed->nb_streams);
|
||||
|
||||
avformat_close_input(&s);
|
||||
av_close_input_file(s);
|
||||
} else
|
||||
http_log("Deleting feed file '%s' as it appears to be corrupt\n",
|
||||
feed->feed_filename);
|
||||
|
@@ -132,8 +132,10 @@ typedef struct FourXContext{
|
||||
AVFrame current_picture, last_picture;
|
||||
GetBitContext pre_gb; ///< ac/dc prefix
|
||||
GetBitContext gb;
|
||||
GetByteContext g;
|
||||
GetByteContext g2;
|
||||
const uint8_t *bytestream;
|
||||
const uint8_t *bytestream_end;
|
||||
const uint16_t *wordstream;
|
||||
const uint16_t *wordstream_end;
|
||||
int mv[256];
|
||||
VLC pre_vlc;
|
||||
int last_dc;
|
||||
@@ -328,11 +330,11 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int lo
|
||||
assert(code>=0 && code<=6);
|
||||
|
||||
if(code == 0){
|
||||
if (f->g.buffer_end - f->g.buffer < 1){
|
||||
if (f->bytestream_end - f->bytestream < 1){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
|
||||
return;
|
||||
}
|
||||
src += f->mv[ *f->g.buffer++ ];
|
||||
src += f->mv[ *f->bytestream++ ];
|
||||
if(start > src || src > end){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||
return;
|
||||
@@ -349,37 +351,37 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int lo
|
||||
}else if(code == 3 && f->version<2){
|
||||
mcdc(dst, src, log2w, h, stride, 1, 0);
|
||||
}else if(code == 4){
|
||||
if (f->g.buffer_end - f->g.buffer < 1){
|
||||
if (f->bytestream_end - f->bytestream < 1){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
|
||||
return;
|
||||
}
|
||||
src += f->mv[ *f->g.buffer++ ];
|
||||
src += f->mv[ *f->bytestream++ ];
|
||||
if(start > src || src > end){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||
return;
|
||||
}
|
||||
if (f->g2.buffer_end - f->g2.buffer < 1){
|
||||
if (f->wordstream_end - f->wordstream < 1){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return;
|
||||
}
|
||||
mcdc(dst, src, log2w, h, stride, 1, bytestream2_get_le16(&f->g2));
|
||||
mcdc(dst, src, log2w, h, stride, 1, av_le2ne16(*f->wordstream++));
|
||||
}else if(code == 5){
|
||||
if (f->g2.buffer_end - f->g2.buffer < 1){
|
||||
if (f->wordstream_end - f->wordstream < 1){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return;
|
||||
}
|
||||
mcdc(dst, src, log2w, h, stride, 0, bytestream2_get_le16(&f->g2));
|
||||
mcdc(dst, src, log2w, h, stride, 0, av_le2ne16(*f->wordstream++));
|
||||
}else if(code == 6){
|
||||
if (f->g2.buffer_end - f->g2.buffer < 2){
|
||||
if (f->wordstream_end - f->wordstream < 2){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return;
|
||||
}
|
||||
if(log2w){
|
||||
dst[0] = bytestream2_get_le16(&f->g2);
|
||||
dst[1] = bytestream2_get_le16(&f->g2);
|
||||
dst[0] = av_le2ne16(*f->wordstream++);
|
||||
dst[1] = av_le2ne16(*f->wordstream++);
|
||||
}else{
|
||||
dst[0 ] = bytestream2_get_le16(&f->g2);
|
||||
dst[stride] = bytestream2_get_le16(&f->g2);
|
||||
dst[0 ] = av_le2ne16(*f->wordstream++);
|
||||
dst[stride] = av_le2ne16(*f->wordstream++);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -391,7 +393,7 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
uint16_t *src= (uint16_t*)f->last_picture.data[0];
|
||||
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
||||
const int stride= f->current_picture.linesize[0]>>1;
|
||||
unsigned int bitstream_size, bytestream_size, wordstream_size, extra, bytestream_offset, wordstream_offset;
|
||||
unsigned int bitstream_size, bytestream_size, wordstream_size, extra;
|
||||
|
||||
if(f->version>1){
|
||||
extra=20;
|
||||
@@ -423,10 +425,10 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
memset((uint8_t*)f->bitstream_buffer + bitstream_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
init_get_bits(&f->gb, f->bitstream_buffer, 8*bitstream_size);
|
||||
|
||||
wordstream_offset = extra + bitstream_size;
|
||||
bytestream_offset = extra + bitstream_size + wordstream_size;
|
||||
bytestream2_init(&f->g2, buf + wordstream_offset, length - wordstream_offset);
|
||||
bytestream2_init(&f->g, buf + bytestream_offset, length - bytestream_offset);
|
||||
f->wordstream= (const uint16_t*)(buf + extra + bitstream_size);
|
||||
f->wordstream_end= f->wordstream + wordstream_size/2;
|
||||
f->bytestream= buf + extra + bitstream_size + wordstream_size;
|
||||
f->bytestream_end = f->bytestream + bytestream_size;
|
||||
|
||||
init_mv(f);
|
||||
|
||||
@@ -438,6 +440,15 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
dst += 8*stride;
|
||||
}
|
||||
|
||||
if( bitstream_size != (get_bits_count(&f->gb)+31)/32*4
|
||||
|| (((const char*)f->wordstream - (const char*)buf + 2)&~2) != extra + bitstream_size + wordstream_size
|
||||
|| (((const char*)f->bytestream - (const char*)buf + 3)&~3) != extra + bitstream_size + wordstream_size + bytestream_size)
|
||||
av_log(f->avctx, AV_LOG_ERROR, " %d %td %td bytes left\n",
|
||||
bitstream_size - (get_bits_count(&f->gb)+31)/32*4,
|
||||
-(((const char*)f->bytestream - (const char*)buf + 3)&~3) + (extra + bitstream_size + wordstream_size + bytestream_size),
|
||||
-(((const char*)f->wordstream - (const char*)buf + 2)&~2) + (extra + bitstream_size + wordstream_size)
|
||||
);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -643,17 +654,9 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
int x, y, x2, y2;
|
||||
const int width= f->avctx->width;
|
||||
const int height= f->avctx->height;
|
||||
const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4);
|
||||
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
||||
const int stride= f->current_picture.linesize[0]>>1;
|
||||
const uint8_t *buf_end = buf + length;
|
||||
GetByteContext g3;
|
||||
|
||||
if(length < mbs * 8) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "packet size too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
bytestream2_init(&g3, buf, length);
|
||||
|
||||
for(y=0; y<height; y+=16){
|
||||
for(x=0; x<width; x+=16){
|
||||
@@ -662,8 +665,8 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
return -1;
|
||||
memset(color, 0, sizeof(color));
|
||||
//warning following is purely guessed ...
|
||||
color[0]= bytestream2_get_le16u(&g3);
|
||||
color[1]= bytestream2_get_le16u(&g3);
|
||||
color[0]= bytestream_get_le16(&buf);
|
||||
color[1]= bytestream_get_le16(&buf);
|
||||
|
||||
if(color[0]&0x8000) av_log(NULL, AV_LOG_ERROR, "unk bit 1\n");
|
||||
if(color[1]&0x8000) av_log(NULL, AV_LOG_ERROR, "unk bit 2\n");
|
||||
@@ -671,7 +674,7 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
color[2]= mix(color[0], color[1]);
|
||||
color[3]= mix(color[1], color[0]);
|
||||
|
||||
bits= bytestream2_get_le32u(&g3);
|
||||
bits= bytestream_get_le32(&buf);
|
||||
for(y2=0; y2<16; y2++){
|
||||
for(x2=0; x2<16; x2++){
|
||||
int index= 2*(x2>>2) + 8*(y2>>2);
|
||||
@@ -680,7 +683,7 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
}
|
||||
dst+=16;
|
||||
}
|
||||
dst += 16 * stride - x;
|
||||
dst += 16*stride - width;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -690,17 +693,16 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
int x, y;
|
||||
const int width= f->avctx->width;
|
||||
const int height= f->avctx->height;
|
||||
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
||||
const int stride= f->current_picture.linesize[0]>>1;
|
||||
const unsigned int bitstream_size= AV_RL32(buf);
|
||||
unsigned int prestream_size;
|
||||
const uint8_t *prestream;
|
||||
|
||||
if (bitstream_size > (1<<26) || length < bitstream_size + 12) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "packet size too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
prestream_size = 4 * AV_RL32(buf + bitstream_size + 4);
|
||||
prestream = buf + bitstream_size + 12;
|
||||
if (bitstream_size > (1<<26) || length < bitstream_size + 12)
|
||||
return -1;
|
||||
prestream_size = 4*AV_RL32(buf + bitstream_size + 4);
|
||||
prestream = buf + bitstream_size + 12;
|
||||
|
||||
if (prestream_size > (1<<26) ||
|
||||
prestream_size != length - (bitstream_size + 12)){
|
||||
@@ -732,6 +734,7 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
|
||||
|
||||
idct_put(f, x, y);
|
||||
}
|
||||
dst += 16*stride;
|
||||
}
|
||||
|
||||
if(get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3) != 256)
|
||||
@@ -831,7 +834,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(frame_4cc == AV_RL32("ifr2")){
|
||||
p->pict_type= AV_PICTURE_TYPE_I;
|
||||
if(decode_i2_frame(f, buf-4, frame_size + 4) < 0) {
|
||||
if(decode_i2_frame(f, buf-4, frame_size+4) < 0){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "decode i2 frame failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
@@ -106,11 +106,12 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
|
||||
{
|
||||
EightSvxContext *esc = avctx->priv_data;
|
||||
int n, out_data_size, ret;
|
||||
uint8_t *out_date;
|
||||
uint8_t *src, *dst;
|
||||
|
||||
/* decode and interleave the first packet */
|
||||
if (!esc->samples && avpkt) {
|
||||
uint8_t *deinterleaved_samples, *p = NULL;
|
||||
uint8_t *deinterleaved_samples;
|
||||
|
||||
esc->samples_size = avctx->codec->id == CODEC_ID_8SVX_RAW || avctx->codec->id ==CODEC_ID_PCM_S8_PLANAR?
|
||||
avpkt->size : avctx->channels + (avpkt->size-avctx->channels) * 2;
|
||||
@@ -129,7 +130,6 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
if (!(deinterleaved_samples = av_mallocz(n)))
|
||||
return AVERROR(ENOMEM);
|
||||
p = deinterleaved_samples;
|
||||
|
||||
/* the uncompressed starting value is contained in the first byte */
|
||||
if (avctx->channels == 2) {
|
||||
@@ -146,7 +146,6 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
|
||||
interleave_stereo(esc->samples, deinterleaved_samples, esc->samples_size);
|
||||
else
|
||||
memcpy(esc->samples, deinterleaved_samples, esc->samples_size);
|
||||
av_freep(&p);
|
||||
}
|
||||
|
||||
/* get output buffer */
|
||||
|
@@ -91,8 +91,6 @@ OBJS-$(CONFIG_ATRAC1_DECODER) += atrac1.o atrac.o
|
||||
OBJS-$(CONFIG_ATRAC3_DECODER) += atrac3.o atrac.o
|
||||
OBJS-$(CONFIG_AURA_DECODER) += cyuv.o
|
||||
OBJS-$(CONFIG_AURA2_DECODER) += aura.o
|
||||
OBJS-$(CONFIG_AVRP_DECODER) += r210dec.o
|
||||
OBJS-$(CONFIG_AVRP_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_AVS_DECODER) += avs.o
|
||||
OBJS-$(CONFIG_BETHSOFTVID_DECODER) += bethsoftvideo.o
|
||||
OBJS-$(CONFIG_BFI_DECODER) += bfi.o
|
||||
@@ -158,7 +156,6 @@ OBJS-$(CONFIG_FFV1_DECODER) += ffv1.o rangecoder.o
|
||||
OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o rangecoder.o
|
||||
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o
|
||||
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o
|
||||
OBJS-$(CONFIG_FFWAVESYNTH_DECODER) += ffwavesynth.o
|
||||
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o vorbis_data.o
|
||||
OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o vorbis_data.o
|
||||
OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
|
||||
@@ -340,9 +337,7 @@ OBJS-$(CONFIG_QPEG_DECODER) += qpeg.o
|
||||
OBJS-$(CONFIG_QTRLE_DECODER) += qtrle.o
|
||||
OBJS-$(CONFIG_QTRLE_ENCODER) += qtrleenc.o
|
||||
OBJS-$(CONFIG_R10K_DECODER) += r210dec.o
|
||||
OBJS-$(CONFIG_R10K_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_R210_DECODER) += r210dec.o
|
||||
OBJS-$(CONFIG_R210_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_RA_144_DECODER) += ra144dec.o ra144.o celp_filters.o
|
||||
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o
|
||||
OBJS-$(CONFIG_RA_288_DECODER) += ra288.o celp_math.o celp_filters.o
|
||||
@@ -419,8 +414,6 @@ OBJS-$(CONFIG_ULTI_DECODER) += ulti.o
|
||||
OBJS-$(CONFIG_UTVIDEO_DECODER) += utvideo.o
|
||||
OBJS-$(CONFIG_V210_DECODER) += v210dec.o
|
||||
OBJS-$(CONFIG_V210_ENCODER) += v210enc.o
|
||||
OBJS-$(CONFIG_V308_DECODER) += v308dec.o
|
||||
OBJS-$(CONFIG_V308_ENCODER) += v308enc.o
|
||||
OBJS-$(CONFIG_V410_DECODER) += v410dec.o
|
||||
OBJS-$(CONFIG_V410_ENCODER) += v410enc.o
|
||||
OBJS-$(CONFIG_V210X_DECODER) += v210x.o
|
||||
@@ -473,13 +466,9 @@ OBJS-$(CONFIG_XBIN_DECODER) += bintext.o cga_data.o
|
||||
OBJS-$(CONFIG_XL_DECODER) += xl.o
|
||||
OBJS-$(CONFIG_XSUB_DECODER) += xsubdec.o
|
||||
OBJS-$(CONFIG_XSUB_ENCODER) += xsubenc.o
|
||||
OBJS-$(CONFIG_XWD_DECODER) += xwddec.o
|
||||
OBJS-$(CONFIG_XWD_ENCODER) += xwdenc.o
|
||||
OBJS-$(CONFIG_Y41P_DECODER) += y41pdec.o
|
||||
OBJS-$(CONFIG_Y41P_ENCODER) += y41penc.o
|
||||
OBJS-$(CONFIG_YOP_DECODER) += yop.o
|
||||
OBJS-$(CONFIG_YUV4_DECODER) += yuv4dec.o
|
||||
OBJS-$(CONFIG_YUV4_ENCODER) += yuv4enc.o
|
||||
OBJS-$(CONFIG_ZLIB_DECODER) += lcldec.o
|
||||
OBJS-$(CONFIG_ZLIB_ENCODER) += lclenc.o
|
||||
OBJS-$(CONFIG_ZMBV_DECODER) += zmbv.o
|
||||
@@ -538,20 +527,19 @@ OBJS-$(CONFIG_PCM_ZORK_DECODER) += pcm.o
|
||||
|
||||
OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_ADX_DECODER) += adxdec.o adx.o
|
||||
OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o adx.o
|
||||
OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o
|
||||
OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_MAXIS_XA_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_R2_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_R3_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_XAS_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_MAXIS_XA_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_R2_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_R3_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_XAS_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o g722dec.o
|
||||
OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o g722enc.o
|
||||
OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o
|
||||
OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_APC_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_DK3_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_DK4_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_EA_EACS_DECODER) += adpcm.o adpcm_data.o
|
||||
@@ -565,13 +553,13 @@ OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_IMA_WS_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_MS_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_MS_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_SBPRO_2_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_SBPRO_3_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_SBPRO_2_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_SBPRO_3_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_SWF_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_SWF_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o
|
||||
OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
|
||||
@@ -594,7 +582,7 @@ OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \
|
||||
flacdec.o flacdata.o flac.o \
|
||||
mpegaudiodata.o vorbis_data.o
|
||||
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o timecode.o
|
||||
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
@@ -661,7 +649,6 @@ OBJS-$(CONFIG_DVBSUB_PARSER) += dvbsub_parser.o
|
||||
OBJS-$(CONFIG_DVDSUB_PARSER) += dvdsub_parser.o
|
||||
OBJS-$(CONFIG_FLAC_PARSER) += flac_parser.o flacdata.o flac.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_GSM_PARSER) += gsm_parser.o
|
||||
OBJS-$(CONFIG_H261_PARSER) += h261_parser.o
|
||||
OBJS-$(CONFIG_H263_PARSER) += h263_parser.o
|
||||
OBJS-$(CONFIG_H264_PARSER) += h264_parser.o h264.o \
|
||||
@@ -721,6 +708,8 @@ OBJS-$(CONFIG_MLIB) += mlib/dsputil_mlib.o \
|
||||
# well.
|
||||
OBJS-$(!CONFIG_SMALL) += inverse.o
|
||||
|
||||
-include $(SRC_PATH)/$(SUBDIR)$(ARCH)/Makefile
|
||||
|
||||
SKIPHEADERS += %_tablegen.h \
|
||||
%_tables.h \
|
||||
aac_tablegen_decl.h \
|
||||
@@ -748,6 +737,8 @@ DIRS = alpha arm bfin mlib ppc ps2 sh4 sparc x86
|
||||
|
||||
CLEANFILES = *_tables.c *_tables.h *_tablegen$(HOSTEXESUF)
|
||||
|
||||
include $(SRC_PATH)/subdir.mak
|
||||
|
||||
$(SUBDIR)dct-test$(EXESUF): $(SUBDIR)dctref.o
|
||||
|
||||
TRIG_TABLES = cos cos_fixed sin
|
||||
|
@@ -84,7 +84,6 @@ enum BandType {
|
||||
#define IS_CODEBOOK_UNSIGNED(x) ((x - 1) & 10)
|
||||
|
||||
enum ChannelPosition {
|
||||
AAC_CHANNEL_OFF = 0,
|
||||
AAC_CHANNEL_FRONT = 1,
|
||||
AAC_CHANNEL_SIDE = 2,
|
||||
AAC_CHANNEL_BACK = 3,
|
||||
|
@@ -110,15 +110,14 @@ static av_always_inline float quantize_and_encode_band_cost_template(
|
||||
int *bits, int BT_ZERO, int BT_UNSIGNED,
|
||||
int BT_PAIR, int BT_ESC)
|
||||
{
|
||||
const int q_idx = POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512;
|
||||
const float Q = ff_aac_pow2sf_tab [q_idx];
|
||||
const float Q34 = ff_aac_pow34sf_tab[q_idx];
|
||||
const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
|
||||
const float IQ = ff_aac_pow2sf_tab[POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
|
||||
const float Q = ff_aac_pow2sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
|
||||
const float CLIPPED_ESCAPE = 165140.0f*IQ;
|
||||
int i, j;
|
||||
float cost = 0;
|
||||
const int dim = BT_PAIR ? 2 : 4;
|
||||
int resbits = 0;
|
||||
const float Q34 = sqrtf(Q * sqrtf(Q));
|
||||
const int range = aac_cb_range[cb];
|
||||
const int maxval = aac_cb_maxval[cb];
|
||||
int off;
|
||||
@@ -421,7 +420,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
|
||||
const int run_esc = (1 << run_bits) - 1;
|
||||
int idx, ppos, count;
|
||||
int stackrun[120], stackcb[120], stack_len;
|
||||
float next_minbits = INFINITY;
|
||||
float next_minrd = INFINITY;
|
||||
int next_mincb = 0;
|
||||
|
||||
abs_pow34_v(s->scoefs, sce->coeffs, 1024);
|
||||
@@ -435,7 +434,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
|
||||
size = sce->ics.swb_sizes[swb];
|
||||
if (sce->zeroes[win*16 + swb]) {
|
||||
float cost_stay_here = path[swb][0].cost;
|
||||
float cost_get_here = next_minbits + run_bits + 4;
|
||||
float cost_get_here = next_minrd + run_bits + 4;
|
||||
if ( run_value_bits[sce->ics.num_windows == 8][path[swb][0].run]
|
||||
!= run_value_bits[sce->ics.num_windows == 8][path[swb][0].run+1])
|
||||
cost_stay_here += run_bits;
|
||||
@@ -448,7 +447,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
|
||||
path[swb+1][0].cost = cost_stay_here;
|
||||
path[swb+1][0].run = path[swb][0].run + 1;
|
||||
}
|
||||
next_minbits = path[swb+1][0].cost;
|
||||
next_minrd = path[swb+1][0].cost;
|
||||
next_mincb = 0;
|
||||
for (cb = 1; cb < 12; cb++) {
|
||||
path[swb+1][cb].cost = 61450;
|
||||
@@ -456,10 +455,10 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
|
||||
path[swb+1][cb].run = 0;
|
||||
}
|
||||
} else {
|
||||
float minbits = next_minbits;
|
||||
float minrd = next_minrd;
|
||||
int mincb = next_mincb;
|
||||
int startcb = sce->band_type[win*16+swb];
|
||||
next_minbits = INFINITY;
|
||||
next_minrd = INFINITY;
|
||||
next_mincb = 0;
|
||||
for (cb = 0; cb < startcb; cb++) {
|
||||
path[swb+1][cb].cost = 61450;
|
||||
@@ -468,15 +467,15 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
|
||||
}
|
||||
for (cb = startcb; cb < 12; cb++) {
|
||||
float cost_stay_here, cost_get_here;
|
||||
float bits = 0.0f;
|
||||
float rd = 0.0f;
|
||||
for (w = 0; w < group_len; w++) {
|
||||
bits += quantize_band_cost(s, sce->coeffs + start + w*128,
|
||||
s->scoefs + start + w*128, size,
|
||||
sce->sf_idx[(win+w)*16+swb], cb,
|
||||
0, INFINITY, NULL);
|
||||
rd += quantize_band_cost(s, sce->coeffs + start + w*128,
|
||||
s->scoefs + start + w*128, size,
|
||||
sce->sf_idx[(win+w)*16+swb], cb,
|
||||
0, INFINITY, NULL);
|
||||
}
|
||||
cost_stay_here = path[swb][cb].cost + bits;
|
||||
cost_get_here = minbits + bits + run_bits + 4;
|
||||
cost_stay_here = path[swb][cb].cost + rd;
|
||||
cost_get_here = minrd + rd + run_bits + 4;
|
||||
if ( run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run]
|
||||
!= run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run+1])
|
||||
cost_stay_here += run_bits;
|
||||
@@ -489,8 +488,8 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
|
||||
path[swb+1][cb].cost = cost_stay_here;
|
||||
path[swb+1][cb].run = path[swb][cb].run + 1;
|
||||
}
|
||||
if (path[swb+1][cb].cost < next_minbits) {
|
||||
next_minbits = path[swb+1][cb].cost;
|
||||
if (path[swb+1][cb].cost < next_minrd) {
|
||||
next_minrd = path[swb+1][cb].cost;
|
||||
next_mincb = cb;
|
||||
}
|
||||
}
|
||||
|
@@ -98,7 +98,6 @@
|
||||
#include "aacsbr.h"
|
||||
#include "mpeg4audio.h"
|
||||
#include "aacadtsdec.h"
|
||||
#include "libavutil/intfloat.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
@@ -109,6 +108,11 @@
|
||||
# include "arm/aac.h"
|
||||
#endif
|
||||
|
||||
union float754 {
|
||||
float f;
|
||||
uint32_t i;
|
||||
};
|
||||
|
||||
static VLC vlc_scalefactors;
|
||||
static VLC vlc_spectral[11];
|
||||
|
||||
@@ -163,19 +167,6 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
}
|
||||
}
|
||||
|
||||
static int count_channels(enum ChannelPosition che_pos[4][MAX_ELEM_ID])
|
||||
{
|
||||
int i, type, sum = 0;
|
||||
for (i = 0; i < MAX_ELEM_ID; i++) {
|
||||
for (type = 0; type < 4; type++) {
|
||||
sum += (1 + (type == TYPE_CPE)) *
|
||||
(che_pos[type][i] != AAC_CHANNEL_OFF &&
|
||||
che_pos[type][i] != AAC_CHANNEL_CC);
|
||||
}
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for the channel element in the current channel position configuration.
|
||||
* If it exists, make sure the appropriate element is allocated and map the
|
||||
@@ -450,12 +441,6 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
|
||||
if ((ret = set_default_channel_config(avctx, new_che_pos, channel_config)))
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (count_channels(new_che_pos) > 1) {
|
||||
m4ac->ps = 0;
|
||||
} else if (m4ac->sbr == 1 && m4ac->ps == -1)
|
||||
m4ac->ps = 1;
|
||||
|
||||
if (ac && (ret = output_configure(ac, ac->che_pos, new_che_pos, channel_config, OC_GLOBAL_HDR)))
|
||||
return ret;
|
||||
|
||||
@@ -514,6 +499,8 @@ static int decode_audio_specific_config(AACContext *ac,
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index);
|
||||
return -1;
|
||||
}
|
||||
if (m4ac->sbr == 1 && m4ac->ps == -1)
|
||||
m4ac->ps = 1;
|
||||
|
||||
skip_bits_long(&gb, i);
|
||||
|
||||
@@ -739,13 +726,16 @@ static void decode_ltp(AACContext *ac, LongTermPrediction *ltp,
|
||||
|
||||
/**
|
||||
* Decode Individual Channel Stream info; reference: table 4.6.
|
||||
*
|
||||
* @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information.
|
||||
*/
|
||||
static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
|
||||
GetBitContext *gb)
|
||||
GetBitContext *gb, int common_window)
|
||||
{
|
||||
if (get_bits1(gb)) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
memset(ics, 0, sizeof(IndividualChannelStream));
|
||||
return -1;
|
||||
}
|
||||
ics->window_sequence[1] = ics->window_sequence[0];
|
||||
ics->window_sequence[0] = get_bits(gb, 2);
|
||||
@@ -780,11 +770,13 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
|
||||
if (ics->predictor_present) {
|
||||
if (ac->m4ac.object_type == AOT_AAC_MAIN) {
|
||||
if (decode_prediction(ac, ics, gb)) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
memset(ics, 0, sizeof(IndividualChannelStream));
|
||||
return -1;
|
||||
}
|
||||
} else if (ac->m4ac.object_type == AOT_AAC_LC) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
memset(ics, 0, sizeof(IndividualChannelStream));
|
||||
return -1;
|
||||
} else {
|
||||
if ((ics->ltp.present = get_bits(gb, 1)))
|
||||
decode_ltp(ac, &ics->ltp, gb, ics->max_sfb);
|
||||
@@ -796,7 +788,8 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Number of scalefactor bands in group (%d) exceeds limit (%d).\n",
|
||||
ics->max_sfb, ics->num_swb);
|
||||
return AVERROR_INVALIDDATA;
|
||||
memset(ics, 0, sizeof(IndividualChannelStream));
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1030,7 +1023,7 @@ static inline float *VMUL4(float *dst, const float *v, unsigned idx,
|
||||
static inline float *VMUL2S(float *dst, const float *v, unsigned idx,
|
||||
unsigned sign, const float *scale)
|
||||
{
|
||||
union av_intfloat32 s0, s1;
|
||||
union float754 s0, s1;
|
||||
|
||||
s0.f = s1.f = *scale;
|
||||
s0.i ^= sign >> 1 << 31;
|
||||
@@ -1048,8 +1041,8 @@ static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
|
||||
unsigned sign, const float *scale)
|
||||
{
|
||||
unsigned nz = idx >> 12;
|
||||
union av_intfloat32 s = { .f = *scale };
|
||||
union av_intfloat32 t;
|
||||
union float754 s = { .f = *scale };
|
||||
union float754 t;
|
||||
|
||||
t.i = s.i ^ (sign & 1U<<31);
|
||||
*dst++ = v[idx & 3] * t.f;
|
||||
@@ -1298,7 +1291,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
||||
|
||||
static av_always_inline float flt16_round(float pf)
|
||||
{
|
||||
union av_intfloat32 tmp;
|
||||
union float754 tmp;
|
||||
tmp.f = pf;
|
||||
tmp.i = (tmp.i + 0x00008000U) & 0xFFFF0000U;
|
||||
return tmp.f;
|
||||
@@ -1306,7 +1299,7 @@ static av_always_inline float flt16_round(float pf)
|
||||
|
||||
static av_always_inline float flt16_even(float pf)
|
||||
{
|
||||
union av_intfloat32 tmp;
|
||||
union float754 tmp;
|
||||
tmp.f = pf;
|
||||
tmp.i = (tmp.i + 0x00007FFFU + (tmp.i & 0x00010000U >> 16)) & 0xFFFF0000U;
|
||||
return tmp.f;
|
||||
@@ -1314,7 +1307,7 @@ static av_always_inline float flt16_even(float pf)
|
||||
|
||||
static av_always_inline float flt16_trunc(float pf)
|
||||
{
|
||||
union av_intfloat32 pun;
|
||||
union float754 pun;
|
||||
pun.f = pf;
|
||||
pun.i &= 0xFFFF0000U;
|
||||
return pun.f;
|
||||
@@ -1401,8 +1394,8 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
|
||||
global_gain = get_bits(gb, 8);
|
||||
|
||||
if (!common_window && !scale_flag) {
|
||||
if (decode_ics_info(ac, ics, gb) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (decode_ics_info(ac, ics, gb, 0) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0)
|
||||
@@ -1518,8 +1511,8 @@ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
|
||||
|
||||
common_window = get_bits1(gb);
|
||||
if (common_window) {
|
||||
if (decode_ics_info(ac, &cpe->ch[0].ics, gb))
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (decode_ics_info(ac, &cpe->ch[0].ics, gb, 1))
|
||||
return -1;
|
||||
i = cpe->ch[1].ics.use_kb_window[0];
|
||||
cpe->ch[1].ics = cpe->ch[0].ics;
|
||||
cpe->ch[1].ics.use_kb_window[1] = i;
|
||||
@@ -2110,7 +2103,7 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
|
||||
|
||||
size = avpriv_aac_parse_header(gb, &hdr_info);
|
||||
if (size > 0) {
|
||||
if (hdr_info.chan_config) {
|
||||
if (hdr_info.chan_config && (hdr_info.chan_config!=ac->m4ac.chan_config || ac->m4ac.sample_rate!=hdr_info.sample_rate)) {
|
||||
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
|
||||
memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
|
||||
ac->m4ac.chan_config = hdr_info.chan_config;
|
||||
@@ -2218,11 +2211,10 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
if ((err = decode_pce(avctx, &ac->m4ac, new_che_pos, gb)))
|
||||
break;
|
||||
if (ac->output_configured > OC_TRIAL_PCE)
|
||||
av_log(avctx, AV_LOG_INFO,
|
||||
"Evaluating a further program_config_element.\n");
|
||||
err = output_configure(ac, ac->che_pos, new_che_pos, 0, OC_TRIAL_PCE);
|
||||
if (!err)
|
||||
ac->m4ac.chan_config = 0;
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Not evaluating a further program_config_element as this construct is dubious at best.\n");
|
||||
else
|
||||
err = output_configure(ac, ac->che_pos, new_che_pos, 0, OC_TRIAL_PCE);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2294,31 +2286,12 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
static int aac_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
AACContext *ac = avctx->priv_data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
GetBitContext gb;
|
||||
int buf_consumed;
|
||||
int buf_offset;
|
||||
int err;
|
||||
int new_extradata_size;
|
||||
const uint8_t *new_extradata = av_packet_get_side_data(avpkt,
|
||||
AV_PKT_DATA_NEW_EXTRADATA,
|
||||
&new_extradata_size);
|
||||
|
||||
if (new_extradata) {
|
||||
av_free(avctx->extradata);
|
||||
avctx->extradata = av_mallocz(new_extradata_size +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!avctx->extradata)
|
||||
return AVERROR(ENOMEM);
|
||||
avctx->extradata_size = new_extradata_size;
|
||||
memcpy(avctx->extradata, new_extradata, new_extradata_size);
|
||||
if (decode_audio_specific_config(ac, ac->avctx, &ac->m4ac,
|
||||
avctx->extradata,
|
||||
avctx->extradata_size*8, 1) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
init_get_bits(&gb, buf, buf_size * 8);
|
||||
|
||||
@@ -2393,8 +2366,6 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
|
||||
"config not byte aligned.\n", 1);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (asclen <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
bits_consumed = decode_audio_specific_config(NULL, avctx, &m4ac,
|
||||
gb->buffer + (config_start_bit / 8),
|
||||
asclen, sync_extension);
|
||||
|
@@ -46,14 +46,6 @@
|
||||
|
||||
#define AAC_MAX_CHANNELS 6
|
||||
|
||||
#define ERROR_IF(cond, ...) \
|
||||
if (cond) { \
|
||||
av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
|
||||
return AVERROR(EINVAL); \
|
||||
}
|
||||
|
||||
float ff_aac_pow34sf_tab[428];
|
||||
|
||||
static const uint8_t swb_size_1024_96[] = {
|
||||
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8,
|
||||
12, 12, 12, 12, 12, 16, 16, 24, 28, 36, 44,
|
||||
@@ -143,10 +135,7 @@ static const uint8_t aac_chan_configs[6][5] = {
|
||||
{4, TYPE_SCE, TYPE_CPE, TYPE_CPE, TYPE_LFE}, // 6 channels - front center + stereo + back stereo + LFE
|
||||
};
|
||||
|
||||
/**
|
||||
* Table to remap channels from Libav's default order to AAC order.
|
||||
*/
|
||||
static const uint8_t aac_chan_maps[AAC_MAX_CHANNELS][AAC_MAX_CHANNELS] = {
|
||||
static const uint8_t channel_maps[][AAC_MAX_CHANNELS] = {
|
||||
{ 0 },
|
||||
{ 0, 1 },
|
||||
{ 2, 0, 1 },
|
||||
@@ -167,7 +156,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
|
||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
|
||||
put_bits(&pb, 5, 2); //object type - AAC-LC
|
||||
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
||||
put_bits(&pb, 4, s->channels);
|
||||
put_bits(&pb, 4, avctx->channels);
|
||||
//GASpecificConfig
|
||||
put_bits(&pb, 1, 0); //frame length - 1024 samples
|
||||
put_bits(&pb, 1, 0); //does not depend on core coder
|
||||
@@ -180,80 +169,117 @@ static void put_audio_specific_config(AVCodecContext *avctx)
|
||||
flush_put_bits(&pb);
|
||||
}
|
||||
|
||||
#define WINDOW_FUNC(type) \
|
||||
static void apply_ ##type ##_window(DSPContext *dsp, SingleChannelElement *sce, const float *audio)
|
||||
|
||||
WINDOW_FUNC(only_long)
|
||||
{
|
||||
const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
|
||||
const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
|
||||
float *out = sce->ret;
|
||||
|
||||
dsp->vector_fmul (out, audio, lwindow, 1024);
|
||||
dsp->vector_fmul_reverse(out + 1024, audio + 1024, pwindow, 1024);
|
||||
}
|
||||
|
||||
WINDOW_FUNC(long_start)
|
||||
{
|
||||
const float *lwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
|
||||
const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
|
||||
float *out = sce->ret;
|
||||
|
||||
dsp->vector_fmul(out, audio, lwindow, 1024);
|
||||
memcpy(out + 1024, audio, sizeof(out[0]) * 448);
|
||||
dsp->vector_fmul_reverse(out + 1024 + 448, audio, swindow, 128);
|
||||
memset(out + 1024 + 576, 0, sizeof(out[0]) * 448);
|
||||
}
|
||||
|
||||
WINDOW_FUNC(long_stop)
|
||||
{
|
||||
const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
|
||||
const float *swindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
|
||||
float *out = sce->ret;
|
||||
|
||||
memset(out, 0, sizeof(out[0]) * 448);
|
||||
dsp->vector_fmul(out + 448, audio + 448, swindow, 128);
|
||||
memcpy(out + 576, audio + 576, sizeof(out[0]) * 448);
|
||||
dsp->vector_fmul_reverse(out + 1024, audio + 1024, lwindow, 1024);
|
||||
}
|
||||
|
||||
WINDOW_FUNC(eight_short)
|
||||
{
|
||||
const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
|
||||
const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
|
||||
const float *in = audio + 448;
|
||||
float *out = sce->ret;
|
||||
|
||||
for (int w = 0; w < 8; w++) {
|
||||
dsp->vector_fmul (out, in, w ? pwindow : swindow, 128);
|
||||
out += 128;
|
||||
in += 128;
|
||||
dsp->vector_fmul_reverse(out, in, swindow, 128);
|
||||
out += 128;
|
||||
}
|
||||
}
|
||||
|
||||
static void (*const apply_window[4])(DSPContext *dsp, SingleChannelElement *sce, const float *audio) = {
|
||||
[ONLY_LONG_SEQUENCE] = apply_only_long_window,
|
||||
[LONG_START_SEQUENCE] = apply_long_start_window,
|
||||
[EIGHT_SHORT_SEQUENCE] = apply_eight_short_window,
|
||||
[LONG_STOP_SEQUENCE] = apply_long_stop_window
|
||||
};
|
||||
|
||||
static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce,
|
||||
float *audio)
|
||||
static av_cold int aac_encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
AACEncContext *s = avctx->priv_data;
|
||||
int i;
|
||||
const uint8_t *sizes[2];
|
||||
uint8_t grouping[AAC_MAX_CHANNELS];
|
||||
int lengths[2];
|
||||
|
||||
avctx->frame_size = 1024;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i])
|
||||
break;
|
||||
if (i == 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n", avctx->sample_rate);
|
||||
return -1;
|
||||
}
|
||||
if (avctx->channels > AAC_MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n", avctx->channels);
|
||||
return -1;
|
||||
}
|
||||
if (avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported profile %d\n", avctx->profile);
|
||||
return -1;
|
||||
}
|
||||
if (1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * avctx->channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Too many bits per frame requested\n");
|
||||
return -1;
|
||||
}
|
||||
s->samplerate_index = i;
|
||||
|
||||
dsputil_init(&s->dsp, avctx);
|
||||
ff_mdct_init(&s->mdct1024, 11, 0, 1.0);
|
||||
ff_mdct_init(&s->mdct128, 8, 0, 1.0);
|
||||
// window init
|
||||
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
|
||||
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
|
||||
ff_init_ff_sine_windows(10);
|
||||
ff_init_ff_sine_windows(7);
|
||||
|
||||
s->chan_map = aac_chan_configs[avctx->channels-1];
|
||||
s->samples = av_malloc(2 * 1024 * avctx->channels * sizeof(s->samples[0]));
|
||||
s->cpe = av_mallocz(sizeof(ChannelElement) * s->chan_map[0]);
|
||||
avctx->extradata = av_mallocz(5 + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
avctx->extradata_size = 5;
|
||||
put_audio_specific_config(avctx);
|
||||
|
||||
sizes[0] = swb_size_1024[i];
|
||||
sizes[1] = swb_size_128[i];
|
||||
lengths[0] = ff_aac_num_swb_1024[i];
|
||||
lengths[1] = ff_aac_num_swb_128[i];
|
||||
for (i = 0; i < s->chan_map[0]; i++)
|
||||
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
|
||||
ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping);
|
||||
s->psypp = ff_psy_preprocess_init(avctx);
|
||||
s->coder = &ff_aac_coders[s->options.aac_coder];
|
||||
|
||||
s->lambda = avctx->global_quality ? avctx->global_quality : 120;
|
||||
|
||||
ff_aac_tableinit();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void apply_window_and_mdct(AVCodecContext *avctx, AACEncContext *s,
|
||||
SingleChannelElement *sce, short *audio)
|
||||
{
|
||||
int i, k;
|
||||
const int chans = avctx->channels;
|
||||
const float * lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
|
||||
const float * swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
|
||||
const float * pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
|
||||
float *output = sce->ret;
|
||||
|
||||
apply_window[sce->ics.window_sequence[0]](&s->dsp, sce, audio);
|
||||
|
||||
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE)
|
||||
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
|
||||
memcpy(output, sce->saved, sizeof(float)*1024);
|
||||
if (sce->ics.window_sequence[0] == LONG_STOP_SEQUENCE) {
|
||||
memset(output, 0, sizeof(output[0]) * 448);
|
||||
for (i = 448; i < 576; i++)
|
||||
output[i] = sce->saved[i] * pwindow[i - 448];
|
||||
for (i = 576; i < 704; i++)
|
||||
output[i] = sce->saved[i];
|
||||
}
|
||||
if (sce->ics.window_sequence[0] != LONG_START_SEQUENCE) {
|
||||
for (i = 0; i < 1024; i++) {
|
||||
output[i+1024] = audio[i * chans] * lwindow[1024 - i - 1];
|
||||
sce->saved[i] = audio[i * chans] * lwindow[i];
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < 448; i++)
|
||||
output[i+1024] = audio[i * chans];
|
||||
for (; i < 576; i++)
|
||||
output[i+1024] = audio[i * chans] * swindow[576 - i - 1];
|
||||
memset(output+1024+576, 0, sizeof(output[0]) * 448);
|
||||
for (i = 0; i < 1024; i++)
|
||||
sce->saved[i] = audio[i * chans];
|
||||
}
|
||||
s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output);
|
||||
else
|
||||
for (i = 0; i < 1024; i += 128)
|
||||
s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + i, output + i*2);
|
||||
memcpy(audio, audio + 1024, sizeof(audio[0]) * 1024);
|
||||
} else {
|
||||
for (k = 0; k < 1024; k += 128) {
|
||||
for (i = 448 + k; i < 448 + k + 256; i++)
|
||||
output[i - 448 - k] = (i < 1024)
|
||||
? sce->saved[i]
|
||||
: audio[(i-1024)*chans];
|
||||
s->dsp.vector_fmul (output, output, k ? swindow : pwindow, 128);
|
||||
s->dsp.vector_fmul_reverse(output+128, output+128, swindow, 128);
|
||||
s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + k, output);
|
||||
}
|
||||
for (i = 0; i < 1024; i++)
|
||||
sce->saved[i] = audio[i * chans];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -462,46 +488,20 @@ static void put_bitstream_info(AVCodecContext *avctx, AACEncContext *s,
|
||||
put_bits(&s->pb, 3, TYPE_FIL);
|
||||
put_bits(&s->pb, 4, FFMIN(namelen, 15));
|
||||
if (namelen >= 15)
|
||||
put_bits(&s->pb, 8, namelen - 14);
|
||||
put_bits(&s->pb, 8, namelen - 16);
|
||||
put_bits(&s->pb, 4, 0); //extension type - filler
|
||||
padbits = -put_bits_count(&s->pb) & 7;
|
||||
padbits = 8 - (put_bits_count(&s->pb) & 7);
|
||||
avpriv_align_put_bits(&s->pb);
|
||||
for (i = 0; i < namelen - 2; i++)
|
||||
put_bits(&s->pb, 8, name[i]);
|
||||
put_bits(&s->pb, 12 - padbits, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Deinterleave input samples.
|
||||
* Channels are reordered from Libav's default order to AAC order.
|
||||
*/
|
||||
static void deinterleave_input_samples(AACEncContext *s,
|
||||
const float *samples)
|
||||
{
|
||||
int ch, i;
|
||||
const int sinc = s->channels;
|
||||
const uint8_t *channel_map = aac_chan_maps[sinc - 1];
|
||||
|
||||
/* deinterleave and remap input samples */
|
||||
for (ch = 0; ch < sinc; ch++) {
|
||||
const float *sptr = samples + channel_map[ch];
|
||||
|
||||
/* copy last 1024 samples of previous frame to the start of the current frame */
|
||||
memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][1024], 1024 * sizeof(s->planar_samples[0][0]));
|
||||
|
||||
/* deinterleave */
|
||||
for (i = 1024; i < 1024 * 2; i++) {
|
||||
s->planar_samples[ch][i] = *sptr;
|
||||
sptr += sinc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int aac_encode_frame(AVCodecContext *avctx,
|
||||
uint8_t *frame, int buf_size, void *data)
|
||||
{
|
||||
AACEncContext *s = avctx->priv_data;
|
||||
float **samples = s->planar_samples, *samples2, *la, *overlap;
|
||||
int16_t *samples = s->samples, *samples2, *la;
|
||||
ChannelElement *cpe;
|
||||
int i, ch, w, g, chans, tag, start_ch;
|
||||
int chan_el_counter[4];
|
||||
@@ -509,15 +509,36 @@ static int aac_encode_frame(AVCodecContext *avctx,
|
||||
|
||||
if (s->last_frame)
|
||||
return 0;
|
||||
|
||||
if (data) {
|
||||
deinterleave_input_samples(s, data);
|
||||
if (s->psypp)
|
||||
ff_psy_preprocess(s->psypp, s->planar_samples, s->channels);
|
||||
if (!s->psypp) {
|
||||
if (avctx->channels <= 2) {
|
||||
memcpy(s->samples + 1024 * avctx->channels, data,
|
||||
1024 * avctx->channels * sizeof(s->samples[0]));
|
||||
} else {
|
||||
for (i = 0; i < 1024; i++)
|
||||
for (ch = 0; ch < avctx->channels; ch++)
|
||||
s->samples[(i + 1024) * avctx->channels + ch] =
|
||||
((int16_t*)data)[i * avctx->channels +
|
||||
channel_maps[avctx->channels-1][ch]];
|
||||
}
|
||||
} else {
|
||||
start_ch = 0;
|
||||
samples2 = s->samples + 1024 * avctx->channels;
|
||||
for (i = 0; i < s->chan_map[0]; i++) {
|
||||
tag = s->chan_map[i+1];
|
||||
chans = tag == TYPE_CPE ? 2 : 1;
|
||||
ff_psy_preprocess(s->psypp,
|
||||
(uint16_t*)data + channel_maps[avctx->channels-1][start_ch],
|
||||
samples2 + start_ch, start_ch, chans);
|
||||
start_ch += chans;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!avctx->frame_number)
|
||||
if (!avctx->frame_number) {
|
||||
memcpy(s->samples, s->samples + 1024 * avctx->channels,
|
||||
1024 * avctx->channels * sizeof(s->samples[0]));
|
||||
return 0;
|
||||
}
|
||||
|
||||
start_ch = 0;
|
||||
for (i = 0; i < s->chan_map[0]; i++) {
|
||||
@@ -528,9 +549,8 @@ static int aac_encode_frame(AVCodecContext *avctx,
|
||||
for (ch = 0; ch < chans; ch++) {
|
||||
IndividualChannelStream *ics = &cpe->ch[ch].ics;
|
||||
int cur_channel = start_ch + ch;
|
||||
overlap = &samples[cur_channel][0];
|
||||
samples2 = overlap + 1024;
|
||||
la = samples2 + (448+64);
|
||||
samples2 = samples + cur_channel;
|
||||
la = samples2 + (448+64) * avctx->channels;
|
||||
if (!data)
|
||||
la = NULL;
|
||||
if (tag == TYPE_LFE) {
|
||||
@@ -558,7 +578,7 @@ static int aac_encode_frame(AVCodecContext *avctx,
|
||||
for (w = 0; w < ics->num_windows; w++)
|
||||
ics->group_len[w] = wi[ch].grouping[w];
|
||||
|
||||
apply_window_and_mdct(s, &cpe->ch[ch], overlap);
|
||||
apply_window_and_mdct(avctx, s, &cpe->ch[ch], samples2);
|
||||
}
|
||||
start_ch += chans;
|
||||
}
|
||||
@@ -624,8 +644,8 @@ static int aac_encode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
frame_bits = put_bits_count(&s->pb);
|
||||
if (frame_bits <= 6144 * s->channels - 3) {
|
||||
s->psy.bitres.bits = frame_bits / s->channels;
|
||||
if (frame_bits <= 6144 * avctx->channels - 3) {
|
||||
s->psy.bitres.bits = frame_bits / avctx->channels;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -646,7 +666,8 @@ static int aac_encode_frame(AVCodecContext *avctx,
|
||||
|
||||
if (!data)
|
||||
s->last_frame = 1;
|
||||
|
||||
memcpy(s->samples, s->samples + 1024 * avctx->channels,
|
||||
1024 * avctx->channels * sizeof(s->samples[0]));
|
||||
return put_bits_count(&s->pb)>>3;
|
||||
}
|
||||
|
||||
@@ -657,109 +678,12 @@ static av_cold int aac_encode_end(AVCodecContext *avctx)
|
||||
ff_mdct_end(&s->mdct1024);
|
||||
ff_mdct_end(&s->mdct128);
|
||||
ff_psy_end(&s->psy);
|
||||
if (s->psypp)
|
||||
ff_psy_preprocess_end(s->psypp);
|
||||
av_freep(&s->buffer.samples);
|
||||
ff_psy_preprocess_end(s->psypp);
|
||||
av_freep(&s->samples);
|
||||
av_freep(&s->cpe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
dsputil_init(&s->dsp, avctx);
|
||||
|
||||
// window init
|
||||
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
|
||||
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
|
||||
ff_init_ff_sine_windows(10);
|
||||
ff_init_ff_sine_windows(7);
|
||||
|
||||
if (ret = ff_mdct_init(&s->mdct1024, 11, 0, 32768.0))
|
||||
return ret;
|
||||
if (ret = ff_mdct_init(&s->mdct128, 8, 0, 32768.0))
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
|
||||
{
|
||||
FF_ALLOCZ_OR_GOTO(avctx, s->buffer.samples, 3 * 1024 * s->channels * sizeof(s->buffer.samples[0]), alloc_fail);
|
||||
FF_ALLOCZ_OR_GOTO(avctx, s->cpe, sizeof(ChannelElement) * s->chan_map[0], alloc_fail);
|
||||
FF_ALLOCZ_OR_GOTO(avctx, avctx->extradata, 5 + FF_INPUT_BUFFER_PADDING_SIZE, alloc_fail);
|
||||
|
||||
for(int ch = 0; ch < s->channels; ch++)
|
||||
s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch;
|
||||
|
||||
return 0;
|
||||
alloc_fail:
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
static av_cold int aac_encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
AACEncContext *s = avctx->priv_data;
|
||||
int i, ret = 0;
|
||||
const uint8_t *sizes[2];
|
||||
uint8_t grouping[AAC_MAX_CHANNELS];
|
||||
int lengths[2];
|
||||
|
||||
avctx->frame_size = 1024;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i])
|
||||
break;
|
||||
|
||||
s->channels = avctx->channels;
|
||||
|
||||
ERROR_IF(i == 16,
|
||||
"Unsupported sample rate %d\n", avctx->sample_rate);
|
||||
ERROR_IF(s->channels > AAC_MAX_CHANNELS,
|
||||
"Unsupported number of channels: %d\n", s->channels);
|
||||
ERROR_IF(avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW,
|
||||
"Unsupported profile %d\n", avctx->profile);
|
||||
ERROR_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels,
|
||||
"Too many bits per frame requested\n");
|
||||
|
||||
s->samplerate_index = i;
|
||||
|
||||
s->chan_map = aac_chan_configs[s->channels-1];
|
||||
|
||||
if (ret = dsp_init(avctx, s))
|
||||
goto fail;
|
||||
|
||||
if (ret = alloc_buffers(avctx, s))
|
||||
goto fail;
|
||||
|
||||
avctx->extradata_size = 5;
|
||||
put_audio_specific_config(avctx);
|
||||
|
||||
sizes[0] = swb_size_1024[i];
|
||||
sizes[1] = swb_size_128[i];
|
||||
lengths[0] = ff_aac_num_swb_1024[i];
|
||||
lengths[1] = ff_aac_num_swb_128[i];
|
||||
for (i = 0; i < s->chan_map[0]; i++)
|
||||
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
|
||||
if (ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping))
|
||||
goto fail;
|
||||
s->psypp = ff_psy_preprocess_init(avctx);
|
||||
s->coder = &ff_aac_coders[s->options.aac_coder];
|
||||
|
||||
s->lambda = avctx->global_quality ? avctx->global_quality : 120;
|
||||
|
||||
ff_aac_tableinit();
|
||||
|
||||
for (i = 0; i < 428; i++)
|
||||
ff_aac_pow34sf_tab[i] = sqrt(ff_aac_pow2sf_tab[i] * sqrt(ff_aac_pow2sf_tab[i]));
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
aac_encode_end(avctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
|
||||
static const AVOption aacenc_options[] = {
|
||||
{"stereo_mode", "Stereo coding method", offsetof(AACEncContext, options.stereo_mode), AV_OPT_TYPE_INT, {.dbl = 0}, -1, 1, AACENC_FLAGS, "stereo_mode"},
|
||||
@@ -786,7 +710,7 @@ AVCodec ff_aac_encoder = {
|
||||
.encode = aac_encode_frame,
|
||||
.close = aac_encode_end,
|
||||
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
|
||||
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
|
||||
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
|
||||
.priv_class = &aacenc_class,
|
||||
};
|
||||
|
@@ -61,10 +61,9 @@ typedef struct AACEncContext {
|
||||
FFTContext mdct1024; ///< long (1024 samples) frame transform context
|
||||
FFTContext mdct128; ///< short (128 samples) frame transform context
|
||||
DSPContext dsp;
|
||||
float *planar_samples[6]; ///< saved preprocessed input
|
||||
int16_t *samples; ///< saved preprocessed input
|
||||
|
||||
int samplerate_index; ///< MPEG-4 samplerate index
|
||||
int channels; ///< channel count
|
||||
const uint8_t *chan_map; ///< channel configuration map
|
||||
|
||||
ChannelElement *cpe; ///< channel elements
|
||||
@@ -76,12 +75,6 @@ typedef struct AACEncContext {
|
||||
float lambda;
|
||||
DECLARE_ALIGNED(16, int, qcoefs)[96]; ///< quantized coefficients
|
||||
DECLARE_ALIGNED(32, float, scoefs)[1024]; ///< scaled coefficients
|
||||
|
||||
struct {
|
||||
float *samples;
|
||||
} buffer;
|
||||
} AACEncContext;
|
||||
|
||||
extern float ff_aac_pow34sf_tab[428];
|
||||
|
||||
#endif /* AVCODEC_AACENC_H */
|
||||
|
@@ -223,7 +223,7 @@ int ff_ps_read_data(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps
|
||||
cnt -= 2 + ps_read_extension_data(gb, ps, ps_extension_id);
|
||||
}
|
||||
if (cnt < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "ps extension overflow %d\n", cnt);
|
||||
av_log(avctx, AV_LOG_ERROR, "ps extension overflow %d", cnt);
|
||||
goto err;
|
||||
}
|
||||
skip_bits(gb, cnt);
|
||||
|
@@ -216,7 +216,7 @@ static const float psy_fir_coeffs[] = {
|
||||
};
|
||||
|
||||
/**
|
||||
* Calculate the ABR attack threshold from the above LAME psymodel table.
|
||||
* calculates the attack threshold for ABR from the above table for the LAME psy model
|
||||
*/
|
||||
static float lame_calc_attack_threshold(int bitrate)
|
||||
{
|
||||
@@ -400,7 +400,7 @@ static av_unused FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx,
|
||||
int stay_short = 0;
|
||||
for (i = 0; i < 8; i++) {
|
||||
for (j = 0; j < 128; j++) {
|
||||
v = iir_filter(la[i*128+j], pch->iir_state);
|
||||
v = iir_filter(la[(i*128+j)*ctx->avctx->channels], pch->iir_state);
|
||||
sum += v*v;
|
||||
}
|
||||
s[i] = sum;
|
||||
@@ -776,8 +776,9 @@ static void lame_apply_block_type(AacPsyChannel *ctx, FFPsyWindowInfo *wi, int u
|
||||
ctx->next_window_seq = blocktype;
|
||||
}
|
||||
|
||||
static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio,
|
||||
const float *la, int channel, int prev_type)
|
||||
static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx,
|
||||
const int16_t *audio, const int16_t *la,
|
||||
int channel, int prev_type)
|
||||
{
|
||||
AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data;
|
||||
AacPsyChannel *pch = &pctx->ch[channel];
|
||||
@@ -794,20 +795,20 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio,
|
||||
float attack_intensity[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS];
|
||||
float energy_subshort[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS];
|
||||
float energy_short[AAC_NUM_BLOCKS_SHORT + 1] = { 0 };
|
||||
const float *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN);
|
||||
int chans = ctx->avctx->channels;
|
||||
const int16_t *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN) * chans;
|
||||
int j, att_sum = 0;
|
||||
|
||||
/* LAME comment: apply high pass filter of fs/4 */
|
||||
for (i = 0; i < AAC_BLOCK_SIZE_LONG; i++) {
|
||||
float sum1, sum2;
|
||||
sum1 = firbuf[i + (PSY_LAME_FIR_LEN - 1) / 2];
|
||||
sum1 = firbuf[(i + ((PSY_LAME_FIR_LEN - 1) / 2)) * chans];
|
||||
sum2 = 0.0;
|
||||
for (j = 0; j < ((PSY_LAME_FIR_LEN - 1) / 2) - 1; j += 2) {
|
||||
sum1 += psy_fir_coeffs[j] * (firbuf[i + j] + firbuf[i + PSY_LAME_FIR_LEN - j]);
|
||||
sum2 += psy_fir_coeffs[j + 1] * (firbuf[i + j + 1] + firbuf[i + PSY_LAME_FIR_LEN - j - 1]);
|
||||
sum1 += psy_fir_coeffs[j] * (firbuf[(i + j) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j) * chans]);
|
||||
sum2 += psy_fir_coeffs[j + 1] * (firbuf[(i + j + 1) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j - 1) * chans]);
|
||||
}
|
||||
/* NOTE: The LAME psymodel expects it's input in the range -32768 to 32768. Tuning this for normalized floats would be difficult. */
|
||||
hpfsmpl[i] = (sum1 + sum2) * 32768.0f;
|
||||
hpfsmpl[i] = sum1 + sum2;
|
||||
}
|
||||
|
||||
/* Calculate the energies of each sub-shortblock */
|
||||
@@ -822,15 +823,16 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio,
|
||||
float const *const pfe = pf + AAC_BLOCK_SIZE_LONG / (AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS);
|
||||
float p = 1.0f;
|
||||
for (; pf < pfe; pf++)
|
||||
p = FFMAX(p, fabsf(*pf));
|
||||
if (p < fabsf(*pf))
|
||||
p = fabsf(*pf);
|
||||
pch->prev_energy_subshort[i] = energy_subshort[i + PSY_LAME_NUM_SUBBLOCKS] = p;
|
||||
energy_short[1 + i / PSY_LAME_NUM_SUBBLOCKS] += p;
|
||||
/* NOTE: The indexes below are [i + 3 - 2] in the LAME source.
|
||||
* Obviously the 3 and 2 have some significance, or this would be just [i + 1]
|
||||
* (which is what we use here). What the 3 stands for is ambiguous, as it is both
|
||||
* number of short blocks, and the number of sub-short blocks.
|
||||
* It seems that LAME is comparing each sub-block to sub-block + 1 in the
|
||||
* previous block.
|
||||
/* FIXME: The indexes below are [i + 3 - 2] in the LAME source.
|
||||
* Obviously the 3 and 2 have some significance, or this would be just [i + 1]
|
||||
* (which is what we use here). What the 3 stands for is ambigious, as it is both
|
||||
* number of short blocks, and the number of sub-short blocks.
|
||||
* It seems that LAME is comparing each sub-block to sub-block + 1 in the
|
||||
* previous block.
|
||||
*/
|
||||
if (p > energy_subshort[i + 1])
|
||||
p = p / energy_subshort[i + 1];
|
||||
|
@@ -1185,15 +1185,14 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
|
||||
{
|
||||
int i, n;
|
||||
const float *sbr_qmf_window = div ? sbr_qmf_window_ds : sbr_qmf_window_us;
|
||||
const int step = 128 >> div;
|
||||
float *v;
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (*v_off < step) {
|
||||
if (*v_off < 128 >> div) {
|
||||
int saved_samples = (1280 - 128) >> div;
|
||||
memcpy(&v0[SBR_SYNTHESIS_BUF_SIZE - saved_samples], v0, saved_samples * sizeof(float));
|
||||
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - step;
|
||||
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - (128 >> div);
|
||||
} else {
|
||||
*v_off -= step;
|
||||
*v_off -= 128 >> div;
|
||||
}
|
||||
v = v0 + *v_off;
|
||||
if (div) {
|
||||
|
@@ -34,18 +34,6 @@ static const uint8_t eac3_blocks[4] = {
|
||||
1, 2, 3, 6
|
||||
};
|
||||
|
||||
/**
|
||||
* Table for center mix levels
|
||||
* reference: Section 5.4.2.4 cmixlev
|
||||
*/
|
||||
static const uint8_t center_levels[4] = { 4, 5, 6, 5 };
|
||||
|
||||
/**
|
||||
* Table for surround mix levels
|
||||
* reference: Section 5.4.2.5 surmixlev
|
||||
*/
|
||||
static const uint8_t surround_levels[4] = { 4, 6, 7, 6 };
|
||||
|
||||
|
||||
int avpriv_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr)
|
||||
{
|
||||
@@ -65,8 +53,8 @@ int avpriv_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr)
|
||||
hdr->num_blocks = 6;
|
||||
|
||||
/* set default mix levels */
|
||||
hdr->center_mix_level = 5; // -4.5dB
|
||||
hdr->surround_mix_level = 6; // -6.0dB
|
||||
hdr->center_mix_level = 1; // -4.5dB
|
||||
hdr->surround_mix_level = 1; // -6.0dB
|
||||
|
||||
if(hdr->bitstream_id <= 10) {
|
||||
/* Normal AC-3 */
|
||||
@@ -88,9 +76,9 @@ int avpriv_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr)
|
||||
skip_bits(gbc, 2); // skip dsurmod
|
||||
} else {
|
||||
if((hdr->channel_mode & 1) && hdr->channel_mode != AC3_CHMODE_MONO)
|
||||
hdr-> center_mix_level = center_levels[get_bits(gbc, 2)];
|
||||
hdr->center_mix_level = get_bits(gbc, 2);
|
||||
if(hdr->channel_mode & 4)
|
||||
hdr->surround_mix_level = surround_levels[get_bits(gbc, 2)];
|
||||
hdr->surround_mix_level = get_bits(gbc, 2);
|
||||
}
|
||||
hdr->lfe_on = get_bits1(gbc);
|
||||
|
||||
|
@@ -76,6 +76,18 @@ static const float gain_levels[9] = {
|
||||
LEVEL_MINUS_9DB
|
||||
};
|
||||
|
||||
/**
|
||||
* Table for center mix levels
|
||||
* reference: Section 5.4.2.4 cmixlev
|
||||
*/
|
||||
static const uint8_t center_levels[4] = { 4, 5, 6, 5 };
|
||||
|
||||
/**
|
||||
* Table for surround mix levels
|
||||
* reference: Section 5.4.2.5 surmixlev
|
||||
*/
|
||||
static const uint8_t surround_levels[4] = { 4, 6, 7, 6 };
|
||||
|
||||
/**
|
||||
* Table for default stereo downmixing coefficients
|
||||
* reference: Section 7.8.2 Downmixing Into Two Channels
|
||||
@@ -211,7 +223,7 @@ static int ac3_parse_header(AC3DecodeContext *s)
|
||||
int i;
|
||||
|
||||
/* read the rest of the bsi. read twice for dual mono mode. */
|
||||
i = !s->channel_mode;
|
||||
i = !(s->channel_mode);
|
||||
do {
|
||||
skip_bits(gbc, 5); // skip dialog normalization
|
||||
if (get_bits1(gbc))
|
||||
@@ -308,8 +320,8 @@ static int parse_frame_header(AC3DecodeContext *s)
|
||||
static void set_downmix_coeffs(AC3DecodeContext *s)
|
||||
{
|
||||
int i;
|
||||
float cmix = gain_levels[s-> center_mix_level];
|
||||
float smix = gain_levels[s->surround_mix_level];
|
||||
float cmix = gain_levels[center_levels[s->center_mix_level]];
|
||||
float smix = gain_levels[surround_levels[s->surround_mix_level]];
|
||||
float norm0, norm1;
|
||||
|
||||
for (i = 0; i < s->fbw_channels; i++) {
|
||||
@@ -780,7 +792,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
}
|
||||
|
||||
/* dynamic range */
|
||||
i = !s->channel_mode;
|
||||
i = !(s->channel_mode);
|
||||
do {
|
||||
if (get_bits1(gbc)) {
|
||||
s->dynamic_range[i] = ((dynamic_range_tab[get_bits(gbc, 8)] - 1.0) *
|
||||
@@ -1388,8 +1400,8 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
avctx->channels = s->out_channels;
|
||||
avctx->channel_layout = s->channel_layout;
|
||||
|
||||
s->loro_center_mix_level = gain_levels[s-> center_mix_level];
|
||||
s->loro_surround_mix_level = gain_levels[s->surround_mix_level];
|
||||
s->loro_center_mix_level = gain_levels[ center_levels[s-> center_mix_level]];
|
||||
s->loro_surround_mix_level = gain_levels[surround_levels[s->surround_mix_level]];
|
||||
s->ltrt_center_mix_level = LEVEL_MINUS_3DB;
|
||||
s->ltrt_surround_mix_level = LEVEL_MINUS_3DB;
|
||||
/* set downmixing coefficients if needed */
|
||||
|
@@ -238,11 +238,11 @@ void ff_set_fixed_vector(float *out, const AMRFixed *in, float scale, int size)
|
||||
float y = in->y[i] * scale;
|
||||
|
||||
if (in->pitch_lag > 0)
|
||||
do {
|
||||
out[x] += y;
|
||||
y *= in->pitch_fac;
|
||||
x += in->pitch_lag;
|
||||
} while (x < size && repeats);
|
||||
do {
|
||||
out[x] += y;
|
||||
y *= in->pitch_fac;
|
||||
x += in->pitch_lag;
|
||||
} while (x < size && repeats);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -254,9 +254,9 @@ void ff_clear_fixed_vector(float *out, const AMRFixed *in, int size)
|
||||
int x = in->x[i], repeats = !((in->no_repeat_mask >> i) & 1);
|
||||
|
||||
if (in->pitch_lag > 0)
|
||||
do {
|
||||
out[x] = 0.0;
|
||||
x += in->pitch_lag;
|
||||
} while (x < size && repeats);
|
||||
do {
|
||||
out[x] = 0.0;
|
||||
x += in->pitch_lag;
|
||||
} while (x < size && repeats);
|
||||
}
|
||||
}
|
||||
|
@@ -86,19 +86,14 @@ static const int swf_index_tables[4][16] = {
|
||||
typedef struct ADPCMDecodeContext {
|
||||
AVFrame frame;
|
||||
ADPCMChannelStatus status[6];
|
||||
int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
|
||||
} ADPCMDecodeContext;
|
||||
|
||||
static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
{
|
||||
ADPCMDecodeContext *c = avctx->priv_data;
|
||||
unsigned int min_channels = 1;
|
||||
unsigned int max_channels = 2;
|
||||
|
||||
switch(avctx->codec->id) {
|
||||
case CODEC_ID_ADPCM_EA:
|
||||
min_channels = 2;
|
||||
break;
|
||||
case CODEC_ID_ADPCM_EA_R1:
|
||||
case CODEC_ID_ADPCM_EA_R2:
|
||||
case CODEC_ID_ADPCM_EA_R3:
|
||||
@@ -106,7 +101,7 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
max_channels = 6;
|
||||
break;
|
||||
}
|
||||
if (avctx->channels < min_channels || avctx->channels > max_channels) {
|
||||
if (avctx->channels <= 0 || avctx->channels > max_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
@@ -121,16 +116,12 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case CODEC_ID_ADPCM_IMA_APC:
|
||||
if (avctx->extradata && avctx->extradata_size >= 8) {
|
||||
case CODEC_ID_ADPCM_IMA_WS:
|
||||
if (avctx->extradata && avctx->extradata_size == 2 * 4) {
|
||||
c->status[0].predictor = AV_RL32(avctx->extradata);
|
||||
c->status[1].predictor = AV_RL32(avctx->extradata + 4);
|
||||
}
|
||||
break;
|
||||
case CODEC_ID_ADPCM_IMA_WS:
|
||||
if (avctx->extradata && avctx->extradata_size >= 42)
|
||||
c->vqa_version = AV_RL16(avctx->extradata);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -367,7 +358,6 @@ static int get_nb_samples(AVCodecContext *avctx, const uint8_t *buf,
|
||||
break;
|
||||
/* simple 4-bit adpcm */
|
||||
case CODEC_ID_ADPCM_CT:
|
||||
case CODEC_ID_ADPCM_IMA_APC:
|
||||
case CODEC_ID_ADPCM_IMA_EA_SEAD:
|
||||
case CODEC_ID_ADPCM_IMA_WS:
|
||||
case CODEC_ID_ADPCM_YAMAHA:
|
||||
@@ -782,37 +772,13 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
|
||||
}
|
||||
break;
|
||||
case CODEC_ID_ADPCM_IMA_APC:
|
||||
case CODEC_ID_ADPCM_IMA_WS:
|
||||
while (src < buf + buf_size) {
|
||||
uint8_t v = *src++;
|
||||
*samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
|
||||
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
|
||||
}
|
||||
break;
|
||||
case CODEC_ID_ADPCM_IMA_WS:
|
||||
for (channel = 0; channel < avctx->channels; channel++) {
|
||||
const uint8_t *src0;
|
||||
int src_stride;
|
||||
int16_t *smp = samples + channel;
|
||||
|
||||
if (c->vqa_version == 3) {
|
||||
src0 = src + channel * buf_size / 2;
|
||||
src_stride = 1;
|
||||
} else {
|
||||
src0 = src + channel;
|
||||
src_stride = avctx->channels;
|
||||
}
|
||||
for (n = nb_samples / 2; n > 0; n--) {
|
||||
uint8_t v = *src0;
|
||||
src0 += src_stride;
|
||||
*smp = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
|
||||
smp += avctx->channels;
|
||||
*smp = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
|
||||
smp += avctx->channels;
|
||||
}
|
||||
}
|
||||
src = buf + buf_size;
|
||||
break;
|
||||
case CODEC_ID_ADPCM_XA:
|
||||
while (buf_size >= 128) {
|
||||
xa_decode(samples, src, &c->status[0], &c->status[1],
|
||||
@@ -1256,7 +1222,6 @@ ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2");
|
||||
ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3");
|
||||
ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
|
||||
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV");
|
||||
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_APC, adpcm_ima_apc, "ADPCM IMA CRYO APC");
|
||||
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
|
||||
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
|
||||
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
|
||||
|
@@ -58,7 +58,7 @@ int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf,
|
||||
|
||||
/* channels */
|
||||
avctx->channels = buf[7];
|
||||
if (avctx->channels <= 0 || avctx->channels > 2)
|
||||
if (avctx->channels > 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
/* sample rate */
|
||||
|
@@ -45,31 +45,27 @@ static int adx_parse(AVCodecParserContext *s1,
|
||||
ParseContext *pc = &s->pc;
|
||||
int next = END_NOT_FOUND;
|
||||
int i;
|
||||
uint64_t state = pc->state64;
|
||||
uint64_t state= pc->state64;
|
||||
|
||||
if (!s->header_size) {
|
||||
for (i = 0; i < buf_size; i++) {
|
||||
state = (state << 8) | buf[i];
|
||||
/* check for fixed fields in ADX header for possible match */
|
||||
if ((state & 0xFFFF0000FFFFFF00) == 0x8000000003120400ULL) {
|
||||
int channels = state & 0xFF;
|
||||
int header_size = ((state >> 32) & 0xFFFF) + 4;
|
||||
if (channels > 0 && header_size >= 8) {
|
||||
s->header_size = header_size;
|
||||
s->block_size = BLOCK_SIZE * channels;
|
||||
s->remaining = i - 7 + s->header_size + s->block_size;
|
||||
break;
|
||||
}
|
||||
if(!s->header_size){
|
||||
for(i=0; i<buf_size; i++){
|
||||
state= (state<<8) | buf[i];
|
||||
if((state&0xFFFF0000FFFFFF00) == 0x8000000003120400ULL && (state&0xFF) && ((state>>32)&0xFFFF)>=4){
|
||||
s->header_size= ((state>>32)&0xFFFF) + 4;
|
||||
s->block_size = BLOCK_SIZE * (state&0xFF);
|
||||
s->remaining = i - 7 + s->header_size + s->block_size;
|
||||
break;
|
||||
}
|
||||
}
|
||||
pc->state64 = state;
|
||||
pc->state64= state;
|
||||
}
|
||||
|
||||
if (s->header_size) {
|
||||
if (!s->remaining)
|
||||
if (!s->remaining) {
|
||||
s->remaining = s->block_size;
|
||||
if (s->remaining <= buf_size) {
|
||||
next = s->remaining;
|
||||
}
|
||||
if (s->remaining<=buf_size) {
|
||||
next= s->remaining;
|
||||
s->remaining = 0;
|
||||
} else
|
||||
s->remaining -= buf_size;
|
||||
|
@@ -45,8 +45,7 @@ static av_cold int adx_decode_init(AVCodecContext *avctx)
|
||||
av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->channels = avctx->channels;
|
||||
c->header_parsed = 1;
|
||||
c->channels = avctx->channels;
|
||||
}
|
||||
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
@@ -107,21 +106,21 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data,
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
if (!c->header_parsed && buf_size >= 2 && AV_RB16(buf) == 0x8000) {
|
||||
if(AV_RB16(buf) == 0x8000){
|
||||
int header_size;
|
||||
if ((ret = avpriv_adx_decode_header(avctx, buf, buf_size, &header_size,
|
||||
if ((ret = avpriv_adx_decode_header(avctx, buf,
|
||||
buf_size, &header_size,
|
||||
c->coeff)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->channels = avctx->channels;
|
||||
c->header_parsed = 1;
|
||||
if (buf_size < header_size)
|
||||
c->channels = avctx->channels;
|
||||
if(buf_size < header_size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
buf += header_size;
|
||||
buf += header_size;
|
||||
buf_size -= header_size;
|
||||
}
|
||||
if (!c->header_parsed)
|
||||
if(c->channels <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
/* calculate number of blocks in the packet */
|
||||
@@ -165,13 +164,6 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data,
|
||||
return buf - avpkt->data;
|
||||
}
|
||||
|
||||
static void adx_decode_flush(AVCodecContext *avctx)
|
||||
{
|
||||
ADXContext *c = avctx->priv_data;
|
||||
memset(c->prev, 0, sizeof(c->prev));
|
||||
c->eof = 0;
|
||||
}
|
||||
|
||||
AVCodec ff_adpcm_adx_decoder = {
|
||||
.name = "adpcm_adx",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
@@ -179,7 +171,6 @@ AVCodec ff_adpcm_adx_decoder = {
|
||||
.priv_data_size = sizeof(ADXContext),
|
||||
.init = adx_decode_init,
|
||||
.decode = adx_decode_frame,
|
||||
.flush = adx_decode_flush,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"),
|
||||
};
|
||||
|
@@ -19,9 +19,9 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "avcodec.h"
|
||||
#include "adx.h"
|
||||
#include "bytestream.h"
|
||||
#include "put_bits.h"
|
||||
|
||||
/**
|
||||
@@ -33,135 +33,167 @@
|
||||
* adx2wav & wav2adx http://www.geocities.co.jp/Playtown/2004/
|
||||
*/
|
||||
|
||||
static void adx_encode(ADXContext *c, uint8_t *adx, const int16_t *wav,
|
||||
ADXChannelState *prev, int channels)
|
||||
/* 18 bytes <-> 32 samples */
|
||||
|
||||
static void adx_encode(ADXContext *c, unsigned char *adx, const short *wav,
|
||||
ADXChannelState *prev)
|
||||
{
|
||||
PutBitContext pb;
|
||||
int scale;
|
||||
int i, j;
|
||||
int s0, s1, s2, d;
|
||||
int max = 0;
|
||||
int min = 0;
|
||||
int data[BLOCK_SAMPLES];
|
||||
int i;
|
||||
int s0,s1,s2,d;
|
||||
int max=0;
|
||||
int min=0;
|
||||
int data[32];
|
||||
|
||||
s1 = prev->s1;
|
||||
s2 = prev->s2;
|
||||
for (i = 0, j = 0; j < 32; i += channels, j++) {
|
||||
for(i=0;i<32;i++) {
|
||||
s0 = wav[i];
|
||||
d = ((s0 << COEFF_BITS) - c->coeff[0] * s1 - c->coeff[1] * s2) >> COEFF_BITS;
|
||||
data[j] = d;
|
||||
if (max < d)
|
||||
max = d;
|
||||
if (min > d)
|
||||
min = d;
|
||||
data[i]=d;
|
||||
if (max<d) max=d;
|
||||
if (min>d) min=d;
|
||||
s2 = s1;
|
||||
s1 = s0;
|
||||
}
|
||||
prev->s1 = s1;
|
||||
prev->s2 = s2;
|
||||
|
||||
if (max == 0 && min == 0) {
|
||||
memset(adx, 0, BLOCK_SIZE);
|
||||
/* -8..+7 */
|
||||
|
||||
if (max==0 && min==0) {
|
||||
memset(adx,0,18);
|
||||
return;
|
||||
}
|
||||
|
||||
if (max / 7 > -min / 8)
|
||||
scale = max / 7;
|
||||
else
|
||||
scale = -min / 8;
|
||||
if (max/7>-min/8) scale = max/7;
|
||||
else scale = -min/8;
|
||||
|
||||
if (scale == 0)
|
||||
scale = 1;
|
||||
if (scale==0) scale=1;
|
||||
|
||||
AV_WB16(adx, scale);
|
||||
|
||||
init_put_bits(&pb, adx + 2, 16);
|
||||
for (i = 0; i < BLOCK_SAMPLES; i++)
|
||||
put_sbits(&pb, 4, av_clip(data[i] / scale, -8, 7));
|
||||
for (i = 0; i < 32; i++)
|
||||
put_sbits(&pb, 4, av_clip(data[i]/scale, -8, 7));
|
||||
flush_put_bits(&pb);
|
||||
}
|
||||
|
||||
#define HEADER_SIZE 36
|
||||
|
||||
static int adx_encode_header(AVCodecContext *avctx, uint8_t *buf, int bufsize)
|
||||
static int adx_encode_header(AVCodecContext *avctx,unsigned char *buf,size_t bufsize)
|
||||
{
|
||||
#if 0
|
||||
struct {
|
||||
uint32_t offset; /* 0x80000000 + sample start - 4 */
|
||||
unsigned char unknown1[3]; /* 03 12 04 */
|
||||
unsigned char channel; /* 1 or 2 */
|
||||
uint32_t freq;
|
||||
uint32_t size;
|
||||
uint32_t unknown2; /* 01 f4 03 00 */
|
||||
uint32_t unknown3; /* 00 00 00 00 */
|
||||
uint32_t unknown4; /* 00 00 00 00 */
|
||||
|
||||
/* if loop
|
||||
unknown3 00 15 00 01
|
||||
unknown4 00 00 00 01
|
||||
long loop_start_sample;
|
||||
long loop_start_byte;
|
||||
long loop_end_sample;
|
||||
long loop_end_byte;
|
||||
long
|
||||
*/
|
||||
} adxhdr; /* big endian */
|
||||
/* offset-6 "(c)CRI" */
|
||||
#endif
|
||||
ADXContext *c = avctx->priv_data;
|
||||
|
||||
if (bufsize < HEADER_SIZE)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
bytestream_put_be16(&buf, 0x8000); /* header signature */
|
||||
bytestream_put_be16(&buf, HEADER_SIZE - 4); /* copyright offset */
|
||||
bytestream_put_byte(&buf, 3); /* encoding */
|
||||
bytestream_put_byte(&buf, BLOCK_SIZE); /* block size */
|
||||
bytestream_put_byte(&buf, 4); /* sample size */
|
||||
bytestream_put_byte(&buf, avctx->channels); /* channels */
|
||||
bytestream_put_be32(&buf, avctx->sample_rate); /* sample rate */
|
||||
bytestream_put_be32(&buf, 0); /* total sample count */
|
||||
bytestream_put_be16(&buf, c->cutoff); /* cutoff frequency */
|
||||
bytestream_put_byte(&buf, 3); /* version */
|
||||
bytestream_put_byte(&buf, 0); /* flags */
|
||||
bytestream_put_be32(&buf, 0); /* unknown */
|
||||
bytestream_put_be32(&buf, 0); /* loop enabled */
|
||||
bytestream_put_be16(&buf, 0); /* padding */
|
||||
bytestream_put_buffer(&buf, "(c)CRI", 6); /* copyright signature */
|
||||
|
||||
return HEADER_SIZE;
|
||||
AV_WB32(buf+0x00,0x80000000|0x20);
|
||||
AV_WB32(buf+0x04,0x03120400|avctx->channels);
|
||||
AV_WB32(buf+0x08,avctx->sample_rate);
|
||||
AV_WB32(buf+0x0c,0); /* FIXME: set after */
|
||||
AV_WB16(buf + 0x10, c->cutoff);
|
||||
AV_WB32(buf + 0x12, 0x03000000);
|
||||
AV_WB32(buf + 0x16, 0x00000000);
|
||||
AV_WB32(buf + 0x1a, 0x00000000);
|
||||
memcpy (buf + 0x1e, "(c)CRI", 6);
|
||||
return 0x20+4;
|
||||
}
|
||||
|
||||
static av_cold int adx_encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
ADXContext *c = avctx->priv_data;
|
||||
|
||||
if (avctx->channels > 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
avctx->frame_size = BLOCK_SAMPLES;
|
||||
if (avctx->channels > 2)
|
||||
return -1; /* only stereo or mono =) */
|
||||
avctx->frame_size = 32;
|
||||
|
||||
avctx->coded_frame = avcodec_alloc_frame();
|
||||
avctx->coded_frame= avcodec_alloc_frame();
|
||||
avctx->coded_frame->key_frame= 1;
|
||||
|
||||
// avctx->bit_rate = avctx->sample_rate*avctx->channels*18*8/32;
|
||||
|
||||
/* the cutoff can be adjusted, but this seems to work pretty well */
|
||||
c->cutoff = 500;
|
||||
ff_adx_calculate_coeffs(c->cutoff, avctx->sample_rate, COEFF_BITS, c->coeff);
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG, "adx encode init\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int adx_encode_close(AVCodecContext *avctx)
|
||||
{
|
||||
av_freep(&avctx->coded_frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adx_encode_frame(AVCodecContext *avctx, uint8_t *frame,
|
||||
int buf_size, void *data)
|
||||
static int adx_encode_frame(AVCodecContext *avctx,
|
||||
uint8_t *frame, int buf_size, void *data)
|
||||
{
|
||||
ADXContext *c = avctx->priv_data;
|
||||
const int16_t *samples = data;
|
||||
uint8_t *dst = frame;
|
||||
int ch;
|
||||
ADXContext *c = avctx->priv_data;
|
||||
const short *samples = data;
|
||||
unsigned char *dst = frame;
|
||||
int rest = avctx->frame_size;
|
||||
|
||||
/*
|
||||
input data size =
|
||||
ffmpeg.c: do_audio_out()
|
||||
frame_bytes = enc->frame_size * 2 * enc->channels;
|
||||
*/
|
||||
|
||||
// printf("sz=%d ",buf_size); fflush(stdout);
|
||||
if (!c->header_parsed) {
|
||||
int hdrsize;
|
||||
if ((hdrsize = adx_encode_header(avctx, dst, buf_size)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
dst += hdrsize;
|
||||
buf_size -= hdrsize;
|
||||
int hdrsize = adx_encode_header(avctx,dst,buf_size);
|
||||
dst+=hdrsize;
|
||||
c->header_parsed = 1;
|
||||
}
|
||||
if (buf_size < BLOCK_SIZE * avctx->channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
for (ch = 0; ch < avctx->channels; ch++) {
|
||||
adx_encode(c, dst, samples + ch, &c->prev[ch], avctx->channels);
|
||||
dst += BLOCK_SIZE;
|
||||
if (avctx->channels==1) {
|
||||
while(rest>=32) {
|
||||
adx_encode(c, dst, samples, c->prev);
|
||||
dst+=18;
|
||||
samples+=32;
|
||||
rest-=32;
|
||||
}
|
||||
} else {
|
||||
while(rest>=32*2) {
|
||||
short tmpbuf[32*2];
|
||||
int i;
|
||||
|
||||
for(i=0;i<32;i++) {
|
||||
tmpbuf[i] = samples[i*2];
|
||||
tmpbuf[i+32] = samples[i*2+1];
|
||||
}
|
||||
|
||||
adx_encode(c, dst, tmpbuf, c->prev);
|
||||
adx_encode(c, dst + 18, tmpbuf + 32, c->prev + 1);
|
||||
dst+=18*2;
|
||||
samples+=32*2;
|
||||
rest-=32*2;
|
||||
}
|
||||
}
|
||||
return dst - frame;
|
||||
return dst-frame;
|
||||
}
|
||||
|
||||
AVCodec ff_adpcm_adx_encoder = {
|
||||
@@ -172,7 +204,6 @@ AVCodec ff_adpcm_adx_encoder = {
|
||||
.init = adx_encode_init,
|
||||
.encode = adx_encode_frame,
|
||||
.close = adx_encode_close,
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"),
|
||||
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
|
||||
.long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"),
|
||||
};
|
||||
|
@@ -25,23 +25,27 @@
|
||||
* @author 2005 David Hammerton
|
||||
* @see http://crazney.net/programs/itunes/alac.html
|
||||
*
|
||||
* Note: This decoder expects a 36-byte QuickTime atom to be
|
||||
* Note: This decoder expects a 36- (0x24-)byte QuickTime atom to be
|
||||
* passed through the extradata[_size] fields. This atom is tacked onto
|
||||
* the end of an 'alac' stsd atom and has the following format:
|
||||
* bytes 0-3 atom size (0x24), big-endian
|
||||
* bytes 4-7 atom type ('alac', not the 'alac' tag from start of stsd)
|
||||
* bytes 8-35 data bytes needed by decoder
|
||||
*
|
||||
* 32bit atom size
|
||||
* 32bit tag ("alac")
|
||||
* 32bit tag version (0)
|
||||
* 32bit samples per frame (used when not set explicitly in the frames)
|
||||
* 8bit compatible version (0)
|
||||
* Extradata:
|
||||
* 32bit size
|
||||
* 32bit tag (=alac)
|
||||
* 32bit zero?
|
||||
* 32bit max sample per frame
|
||||
* 8bit ?? (zero?)
|
||||
* 8bit sample size
|
||||
* 8bit history mult (40)
|
||||
* 8bit initial history (14)
|
||||
* 8bit kmodifier (10)
|
||||
* 8bit channels
|
||||
* 16bit maxRun (255)
|
||||
* 32bit max coded frame size (0 means unknown)
|
||||
* 32bit average bitrate (0 means unknown)
|
||||
* 8bit history mult
|
||||
* 8bit initial history
|
||||
* 8bit kmodifier
|
||||
* 8bit channels?
|
||||
* 16bit ??
|
||||
* 32bit max coded frame size
|
||||
* 32bit bitrate?
|
||||
* 32bit samplerate
|
||||
*/
|
||||
|
||||
@@ -351,17 +355,6 @@ static void interleave_stereo_24(int32_t *buffer[MAX_CHANNELS],
|
||||
}
|
||||
}
|
||||
|
||||
static void interleave_stereo_32(int32_t *buffer[MAX_CHANNELS],
|
||||
int32_t *buffer_out, int numsamples)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < numsamples; i++) {
|
||||
*buffer_out++ = buffer[0][i];
|
||||
*buffer_out++ = buffer[1][i];
|
||||
}
|
||||
}
|
||||
|
||||
static int alac_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
@@ -471,29 +464,24 @@ static int alac_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if(ret<0)
|
||||
return ret;
|
||||
|
||||
/* adaptive FIR filter */
|
||||
if (prediction_type[ch] == 15) {
|
||||
/* Prediction type 15 runs the adaptive FIR twice.
|
||||
* The first pass uses the special-case coef_num = 31, while
|
||||
* the second pass uses the coefs from the bitstream.
|
||||
*
|
||||
* However, this prediction type is not currently used by the
|
||||
* reference encoder.
|
||||
*/
|
||||
if (prediction_type[ch] == 0) {
|
||||
/* adaptive fir */
|
||||
predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
|
||||
alac->predicterror_buffer[ch],
|
||||
outputsamples, readsamplesize,
|
||||
NULL, 31, 0);
|
||||
} else if (prediction_type[ch] > 0) {
|
||||
av_log(avctx, AV_LOG_WARNING, "unknown prediction type: %i\n",
|
||||
prediction_type[ch]);
|
||||
alac->outputsamples_buffer[ch],
|
||||
outputsamples,
|
||||
readsamplesize,
|
||||
predictor_coef_table[ch],
|
||||
predictor_coef_num[ch],
|
||||
prediction_quantitization[ch]);
|
||||
} else {
|
||||
av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type[ch]);
|
||||
/* I think the only other prediction type (or perhaps this is
|
||||
* just a boolean?) runs adaptive fir twice.. like:
|
||||
* predictor_decompress_fir_adapt(predictor_error, tempout, ...)
|
||||
* predictor_decompress_fir_adapt(predictor_error, outputsamples ...)
|
||||
* little strange..
|
||||
*/
|
||||
}
|
||||
predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
|
||||
alac->outputsamples_buffer[ch],
|
||||
outputsamples, readsamplesize,
|
||||
predictor_coef_table[ch],
|
||||
predictor_coef_num[ch],
|
||||
prediction_quantitization[ch]);
|
||||
}
|
||||
} else {
|
||||
/* not compressed, easy case */
|
||||
@@ -544,16 +532,6 @@ static int alac_decode_frame(AVCodecContext *avctx, void *data,
|
||||
outbuffer[i] = alac->outputsamples_buffer[0][i] << 8;
|
||||
}
|
||||
break;
|
||||
case 32:
|
||||
if (channels == 2) {
|
||||
interleave_stereo_32(alac->outputsamples_buffer,
|
||||
(int32_t *)alac->frame.data[0], outputsamples);
|
||||
} else {
|
||||
int32_t *outbuffer = (int32_t *)alac->frame.data[0];
|
||||
for (i = 0; i < outputsamples; i++)
|
||||
outbuffer[i] = alac->outputsamples_buffer[0][i];
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8)
|
||||
@@ -606,7 +584,7 @@ static int alac_set_info(ALACContext *alac)
|
||||
|
||||
ptr += 4; /* size */
|
||||
ptr += 4; /* alac */
|
||||
ptr += 4; /* version */
|
||||
ptr += 4; /* 0 ? */
|
||||
|
||||
if(AV_RB32(ptr) >= UINT_MAX/4){
|
||||
av_log(alac->avctx, AV_LOG_ERROR, "setinfo_max_samples_per_frame too large\n");
|
||||
@@ -615,15 +593,15 @@ static int alac_set_info(ALACContext *alac)
|
||||
|
||||
/* buffer size / 2 ? */
|
||||
alac->setinfo_max_samples_per_frame = bytestream_get_be32(&ptr);
|
||||
ptr++; /* compatible version */
|
||||
ptr++; /* ??? */
|
||||
alac->setinfo_sample_size = *ptr++;
|
||||
alac->setinfo_rice_historymult = *ptr++;
|
||||
alac->setinfo_rice_initialhistory = *ptr++;
|
||||
alac->setinfo_rice_kmodifier = *ptr++;
|
||||
alac->numchannels = *ptr++;
|
||||
bytestream_get_be16(&ptr); /* maxRun */
|
||||
bytestream_get_be16(&ptr); /* ??? */
|
||||
bytestream_get_be32(&ptr); /* max coded frame size */
|
||||
bytestream_get_be32(&ptr); /* average bitrate */
|
||||
bytestream_get_be32(&ptr); /* bitrate ? */
|
||||
bytestream_get_be32(&ptr); /* samplerate */
|
||||
|
||||
return 0;
|
||||
@@ -649,7 +627,6 @@ static av_cold int alac_decode_init(AVCodecContext * avctx)
|
||||
switch (alac->setinfo_sample_size) {
|
||||
case 16: avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
break;
|
||||
case 32:
|
||||
case 24: avctx->sample_fmt = AV_SAMPLE_FMT_S32;
|
||||
break;
|
||||
default: av_log_ask_for_sample(avctx, "Sample depth %d is not supported.\n",
|
||||
|
@@ -259,7 +259,7 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
|
||||
// generate warm-up samples
|
||||
residual[0] = samples[0];
|
||||
for (i = 1; i <= lpc.lpc_order; i++)
|
||||
residual[i] = samples[i] - samples[i-1];
|
||||
residual[i] = sign_extend(samples[i] - samples[i-1], s->write_sample_size);
|
||||
|
||||
// perform lpc on remaining samples
|
||||
for (i = lpc.lpc_order + 1; i < s->avctx->frame_size; i++) {
|
||||
@@ -348,7 +348,6 @@ static void alac_entropy_coder(AlacEncodeContext *s)
|
||||
static void write_compressed_frame(AlacEncodeContext *s)
|
||||
{
|
||||
int i, j;
|
||||
int prediction_type = 0;
|
||||
|
||||
if (s->avctx->channels == 2)
|
||||
alac_stereo_decorrelation(s);
|
||||
@@ -359,7 +358,7 @@ static void write_compressed_frame(AlacEncodeContext *s)
|
||||
|
||||
calc_predictor_params(s, i);
|
||||
|
||||
put_bits(&s->pbctx, 4, prediction_type);
|
||||
put_bits(&s->pbctx, 4, 0); // prediction type : currently only type 0 has been RE'd
|
||||
put_bits(&s->pbctx, 4, s->lpc[i].lpc_quant);
|
||||
|
||||
put_bits(&s->pbctx, 3, s->rc.rice_modifier);
|
||||
@@ -374,14 +373,6 @@ static void write_compressed_frame(AlacEncodeContext *s)
|
||||
|
||||
for (i = 0; i < s->avctx->channels; i++) {
|
||||
alac_linear_predictor(s, i);
|
||||
|
||||
// TODO: determine when this will actually help. for now it's not used.
|
||||
if (prediction_type == 15) {
|
||||
// 2nd pass 1st order filter
|
||||
for (j = s->avctx->frame_size - 1; j > 0; j--)
|
||||
s->predictor_buf[j] -= s->predictor_buf[j - 1];
|
||||
}
|
||||
|
||||
alac_entropy_coder(s);
|
||||
}
|
||||
}
|
||||
@@ -400,11 +391,8 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* TODO: Correctly implement multi-channel ALAC.
|
||||
It is similar to multi-channel AAC, in that it has a series of
|
||||
single-channel (SCE), channel-pair (CPE), and LFE elements. */
|
||||
if (avctx->channels > 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "only mono or stereo input is currently supported\n");
|
||||
if(avctx->channels > 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "channels > 2 not supported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
|
@@ -79,7 +79,6 @@ void avcodec_register_all(void)
|
||||
REGISTER_ENCDEC (ASV2, asv2);
|
||||
REGISTER_DECODER (AURA, aura);
|
||||
REGISTER_DECODER (AURA2, aura2);
|
||||
REGISTER_ENCDEC (AVRP, avrp);
|
||||
REGISTER_DECODER (AVS, avs);
|
||||
REGISTER_DECODER (BETHSOFTVID, bethsoftvid);
|
||||
REGISTER_DECODER (BFI, bfi);
|
||||
@@ -184,8 +183,8 @@ void avcodec_register_all(void)
|
||||
REGISTER_DECODER (QDRAW, qdraw);
|
||||
REGISTER_DECODER (QPEG, qpeg);
|
||||
REGISTER_ENCDEC (QTRLE, qtrle);
|
||||
REGISTER_ENCDEC (R10K, r10k);
|
||||
REGISTER_ENCDEC (R210, r210);
|
||||
REGISTER_DECODER (R10K, r10k);
|
||||
REGISTER_DECODER (R210, r210);
|
||||
REGISTER_ENCDEC (RAWVIDEO, rawvideo);
|
||||
REGISTER_DECODER (RL2, rl2);
|
||||
REGISTER_ENCDEC (ROQ, roq);
|
||||
@@ -217,7 +216,6 @@ void avcodec_register_all(void)
|
||||
REGISTER_DECODER (UTVIDEO, utvideo);
|
||||
REGISTER_ENCDEC (V210, v210);
|
||||
REGISTER_DECODER (V210X, v210x);
|
||||
REGISTER_ENCDEC (V308, v308);
|
||||
REGISTER_ENCDEC (V410, v410);
|
||||
REGISTER_DECODER (VB, vb);
|
||||
REGISTER_DECODER (VBLE, vble);
|
||||
@@ -245,10 +243,8 @@ void avcodec_register_all(void)
|
||||
REGISTER_DECODER (XAN_WC3, xan_wc3);
|
||||
REGISTER_DECODER (XAN_WC4, xan_wc4);
|
||||
REGISTER_DECODER (XL, xl);
|
||||
REGISTER_ENCDEC (XWD, xwd);
|
||||
REGISTER_ENCDEC (Y41P, y41p);
|
||||
REGISTER_DECODER (YOP, yop);
|
||||
REGISTER_ENCDEC (YUV4, yuv4);
|
||||
REGISTER_ENCDEC (ZLIB, zlib);
|
||||
REGISTER_ENCDEC (ZMBV, zmbv);
|
||||
|
||||
@@ -271,7 +267,6 @@ void avcodec_register_all(void)
|
||||
REGISTER_ENCDEC (DCA, dca);
|
||||
REGISTER_DECODER (DSICINAUDIO, dsicinaudio);
|
||||
REGISTER_ENCDEC (EAC3, eac3);
|
||||
REGISTER_DECODER (FFWAVESYNTH, ffwavesynth);
|
||||
REGISTER_ENCDEC (FLAC, flac);
|
||||
REGISTER_ENCDEC (G723_1, g723_1);
|
||||
REGISTER_DECODER (G729, g729);
|
||||
@@ -365,7 +360,6 @@ void avcodec_register_all(void)
|
||||
REGISTER_ENCDEC (ADPCM_G722, adpcm_g722);
|
||||
REGISTER_ENCDEC (ADPCM_G726, adpcm_g726);
|
||||
REGISTER_DECODER (ADPCM_IMA_AMV, adpcm_ima_amv);
|
||||
REGISTER_DECODER (ADPCM_IMA_APC, adpcm_ima_apc);
|
||||
REGISTER_DECODER (ADPCM_IMA_DK3, adpcm_ima_dk3);
|
||||
REGISTER_DECODER (ADPCM_IMA_DK4, adpcm_ima_dk4);
|
||||
REGISTER_DECODER (ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs);
|
||||
@@ -434,7 +428,6 @@ void avcodec_register_all(void)
|
||||
REGISTER_PARSER (DVBSUB, dvbsub);
|
||||
REGISTER_PARSER (DVDSUB, dvdsub);
|
||||
REGISTER_PARSER (FLAC, flac);
|
||||
REGISTER_PARSER (GSM, gsm);
|
||||
REGISTER_PARSER (H261, h261);
|
||||
REGISTER_PARSER (H263, h263);
|
||||
REGISTER_PARSER (H264, h264);
|
||||
|
@@ -978,10 +978,6 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
pitch_sharpening(p, subframe, p->cur_frame_mode, &fixed_sparse);
|
||||
|
||||
if (fixed_sparse.pitch_lag == 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "The file is corrupted, pitch_lag = 0 is not allowed\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
ff_set_fixed_vector(p->fixed_vector, &fixed_sparse, 1.0,
|
||||
AMR_SUBFRAME_SIZE);
|
||||
|
||||
|
@@ -111,7 +111,7 @@ static av_cold int amrwb_decode_init(AVCodecContext *avctx)
|
||||
|
||||
/**
|
||||
* Decode the frame header in the "MIME/storage" format. This format
|
||||
* is simpler and does not carry the auxiliary frame information.
|
||||
* is simpler and does not carry the auxiliary information of the frame
|
||||
*
|
||||
* @param[in] ctx The Context
|
||||
* @param[in] buf Pointer to the input buffer
|
||||
@@ -133,7 +133,7 @@ static int decode_mime_header(AMRWBContext *ctx, const uint8_t *buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode quantized ISF vectors using 36-bit indexes (6K60 mode only).
|
||||
* Decodes quantized ISF vectors using 36-bit indexes (6K60 mode only)
|
||||
*
|
||||
* @param[in] ind Array of 5 indexes
|
||||
* @param[out] isf_q Buffer for isf_q[LP_ORDER]
|
||||
@@ -160,7 +160,7 @@ static void decode_isf_indices_36b(uint16_t *ind, float *isf_q)
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode quantized ISF vectors using 46-bit indexes (except 6K60 mode).
|
||||
* Decodes quantized ISF vectors using 46-bit indexes (except 6K60 mode)
|
||||
*
|
||||
* @param[in] ind Array of 7 indexes
|
||||
* @param[out] isf_q Buffer for isf_q[LP_ORDER]
|
||||
@@ -193,8 +193,8 @@ static void decode_isf_indices_46b(uint16_t *ind, float *isf_q)
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply mean and past ISF values using the prediction factor.
|
||||
* Updates past ISF vector.
|
||||
* Apply mean and past ISF values using the prediction factor
|
||||
* Updates past ISF vector
|
||||
*
|
||||
* @param[in,out] isf_q Current quantized ISF
|
||||
* @param[in,out] isf_past Past quantized ISF
|
||||
@@ -215,7 +215,7 @@ static void isf_add_mean_and_past(float *isf_q, float *isf_past)
|
||||
|
||||
/**
|
||||
* Interpolate the fourth ISP vector from current and past frames
|
||||
* to obtain an ISP vector for each subframe.
|
||||
* to obtain a ISP vector for each subframe
|
||||
*
|
||||
* @param[in,out] isp_q ISPs for each subframe
|
||||
* @param[in] isp4_past Past ISP for subframe 4
|
||||
@@ -232,9 +232,9 @@ static void interpolate_isp(double isp_q[4][LP_ORDER], const double *isp4_past)
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode an adaptive codebook index into pitch lag (except 6k60, 8k85 modes).
|
||||
* Calculate integer lag and fractional lag always using 1/4 resolution.
|
||||
* In 1st and 3rd subframes the index is relative to last subframe integer lag.
|
||||
* Decode an adaptive codebook index into pitch lag (except 6k60, 8k85 modes)
|
||||
* Calculate integer lag and fractional lag always using 1/4 resolution
|
||||
* In 1st and 3rd subframes the index is relative to last subframe integer lag
|
||||
*
|
||||
* @param[out] lag_int Decoded integer pitch lag
|
||||
* @param[out] lag_frac Decoded fractional pitch lag
|
||||
@@ -271,9 +271,9 @@ static void decode_pitch_lag_high(int *lag_int, int *lag_frac, int pitch_index,
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode an adaptive codebook index into pitch lag for 8k85 and 6k60 modes.
|
||||
* The description is analogous to decode_pitch_lag_high, but in 6k60 the
|
||||
* relative index is used for all subframes except the first.
|
||||
* Decode a adaptive codebook index into pitch lag for 8k85 and 6k60 modes
|
||||
* Description is analogous to decode_pitch_lag_high, but in 6k60 relative
|
||||
* index is used for all subframes except the first
|
||||
*/
|
||||
static void decode_pitch_lag_low(int *lag_int, int *lag_frac, int pitch_index,
|
||||
uint8_t *base_lag_int, int subframe, enum Mode mode)
|
||||
@@ -298,7 +298,7 @@ static void decode_pitch_lag_low(int *lag_int, int *lag_frac, int pitch_index,
|
||||
|
||||
/**
|
||||
* Find the pitch vector by interpolating the past excitation at the
|
||||
* pitch delay, which is obtained in this function.
|
||||
* pitch delay, which is obtained in this function
|
||||
*
|
||||
* @param[in,out] ctx The context
|
||||
* @param[in] amr_subframe Current subframe data
|
||||
@@ -351,10 +351,10 @@ static void decode_pitch_vector(AMRWBContext *ctx,
|
||||
/**
|
||||
* The next six functions decode_[i]p_track decode exactly i pulses
|
||||
* positions and amplitudes (-1 or 1) in a subframe track using
|
||||
* an encoded pulse indexing (TS 26.190 section 5.8.2).
|
||||
* an encoded pulse indexing (TS 26.190 section 5.8.2)
|
||||
*
|
||||
* The results are given in out[], in which a negative number means
|
||||
* amplitude -1 and vice versa (i.e., ampl(x) = x / abs(x) ).
|
||||
* amplitude -1 and vice versa (i.e., ampl(x) = x / abs(x) )
|
||||
*
|
||||
* @param[out] out Output buffer (writes i elements)
|
||||
* @param[in] code Pulse index (no. of bits varies, see below)
|
||||
@@ -470,7 +470,7 @@ static void decode_6p_track(int *out, int code, int m, int off) ///code: 6m-2 bi
|
||||
|
||||
/**
|
||||
* Decode the algebraic codebook index to pulse positions and signs,
|
||||
* then construct the algebraic codebook vector.
|
||||
* then construct the algebraic codebook vector
|
||||
*
|
||||
* @param[out] fixed_vector Buffer for the fixed codebook excitation
|
||||
* @param[in] pulse_hi MSBs part of the pulse index array (higher modes only)
|
||||
@@ -541,7 +541,7 @@ static void decode_fixed_vector(float *fixed_vector, const uint16_t *pulse_hi,
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode pitch gain and fixed gain correction factor.
|
||||
* Decode pitch gain and fixed gain correction factor
|
||||
*
|
||||
* @param[in] vq_gain Vector-quantized index for gains
|
||||
* @param[in] mode Mode of the current frame
|
||||
@@ -559,7 +559,7 @@ static void decode_gains(const uint8_t vq_gain, const enum Mode mode,
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply pitch sharpening filters to the fixed codebook vector.
|
||||
* Apply pitch sharpening filters to the fixed codebook vector
|
||||
*
|
||||
* @param[in] ctx The context
|
||||
* @param[in,out] fixed_vector Fixed codebook excitation
|
||||
@@ -580,7 +580,7 @@ static void pitch_sharpening(AMRWBContext *ctx, float *fixed_vector)
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the voicing factor (-1.0 = unvoiced to 1.0 = voiced).
|
||||
* Calculate the voicing factor (-1.0 = unvoiced to 1.0 = voiced)
|
||||
*
|
||||
* @param[in] p_vector, f_vector Pitch and fixed excitation vectors
|
||||
* @param[in] p_gain, f_gain Pitch and fixed gains
|
||||
@@ -599,8 +599,8 @@ static float voice_factor(float *p_vector, float p_gain,
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduce fixed vector sparseness by smoothing with one of three IR filters,
|
||||
* also known as "adaptive phase dispersion".
|
||||
* Reduce fixed vector sparseness by smoothing with one of three IR filters
|
||||
* Also known as "adaptive phase dispersion"
|
||||
*
|
||||
* @param[in] ctx The context
|
||||
* @param[in,out] fixed_vector Unfiltered fixed vector
|
||||
@@ -670,7 +670,7 @@ static float *anti_sparseness(AMRWBContext *ctx,
|
||||
|
||||
/**
|
||||
* Calculate a stability factor {teta} based on distance between
|
||||
* current and past isf. A value of 1 shows maximum signal stability.
|
||||
* current and past isf. A value of 1 shows maximum signal stability
|
||||
*/
|
||||
static float stability_factor(const float *isf, const float *isf_past)
|
||||
{
|
||||
@@ -687,7 +687,7 @@ static float stability_factor(const float *isf, const float *isf_past)
|
||||
|
||||
/**
|
||||
* Apply a non-linear fixed gain smoothing in order to reduce
|
||||
* fluctuation in the energy of excitation.
|
||||
* fluctuation in the energy of excitation
|
||||
*
|
||||
* @param[in] fixed_gain Unsmoothed fixed gain
|
||||
* @param[in,out] prev_tr_gain Previous threshold gain (updated)
|
||||
@@ -718,7 +718,7 @@ static float noise_enhancer(float fixed_gain, float *prev_tr_gain,
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter the fixed_vector to emphasize the higher frequencies.
|
||||
* Filter the fixed_vector to emphasize the higher frequencies
|
||||
*
|
||||
* @param[in,out] fixed_vector Fixed codebook vector
|
||||
* @param[in] voice_fac Frame voicing factor
|
||||
@@ -742,7 +742,7 @@ static void pitch_enhancer(float *fixed_vector, float voice_fac)
|
||||
}
|
||||
|
||||
/**
|
||||
* Conduct 16th order linear predictive coding synthesis from excitation.
|
||||
* Conduct 16th order linear predictive coding synthesis from excitation
|
||||
*
|
||||
* @param[in] ctx Pointer to the AMRWBContext
|
||||
* @param[in] lpc Pointer to the LPC coefficients
|
||||
@@ -802,7 +802,7 @@ static void de_emphasis(float *out, float *in, float m, float mem[1])
|
||||
|
||||
/**
|
||||
* Upsample a signal by 5/4 ratio (from 12.8kHz to 16kHz) using
|
||||
* a FIR interpolation filter. Uses past data from before *in address.
|
||||
* a FIR interpolation filter. Uses past data from before *in address
|
||||
*
|
||||
* @param[out] out Buffer for interpolated signal
|
||||
* @param[in] in Current signal data (length 0.8*o_size)
|
||||
@@ -832,7 +832,7 @@ static void upsample_5_4(float *out, const float *in, int o_size)
|
||||
|
||||
/**
|
||||
* Calculate the high-band gain based on encoded index (23k85 mode) or
|
||||
* on the low-band speech signal and the Voice Activity Detection flag.
|
||||
* on the low-band speech signal and the Voice Activity Detection flag
|
||||
*
|
||||
* @param[in] ctx The context
|
||||
* @param[in] synth LB speech synthesis at 12.8k
|
||||
@@ -857,7 +857,7 @@ static float find_hb_gain(AMRWBContext *ctx, const float *synth,
|
||||
|
||||
/**
|
||||
* Generate the high-band excitation with the same energy from the lower
|
||||
* one and scaled by the given gain.
|
||||
* one and scaled by the given gain
|
||||
*
|
||||
* @param[in] ctx The context
|
||||
* @param[out] hb_exc Buffer for the excitation
|
||||
@@ -880,7 +880,7 @@ static void scaled_hb_excitation(AMRWBContext *ctx, float *hb_exc,
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the auto-correlation for the ISF difference vector.
|
||||
* Calculate the auto-correlation for the ISF difference vector
|
||||
*/
|
||||
static float auto_correlation(float *diff_isf, float mean, int lag)
|
||||
{
|
||||
@@ -896,7 +896,7 @@ static float auto_correlation(float *diff_isf, float mean, int lag)
|
||||
|
||||
/**
|
||||
* Extrapolate a ISF vector to the 16kHz range (20th order LP)
|
||||
* used at mode 6k60 LP filter for the high frequency band.
|
||||
* used at mode 6k60 LP filter for the high frequency band
|
||||
*
|
||||
* @param[out] out Buffer for extrapolated isf
|
||||
* @param[in] isf Input isf vector
|
||||
@@ -981,7 +981,7 @@ static void lpc_weighting(float *out, const float *lpc, float gamma, int size)
|
||||
|
||||
/**
|
||||
* Conduct 20th order linear predictive coding synthesis for the high
|
||||
* frequency band excitation at 16kHz.
|
||||
* frequency band excitation at 16kHz
|
||||
*
|
||||
* @param[in] ctx The context
|
||||
* @param[in] subframe Current subframe index (0 to 3)
|
||||
@@ -1019,8 +1019,8 @@ static void hb_synthesis(AMRWBContext *ctx, int subframe, float *samples,
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply a 15th order filter to high-band samples.
|
||||
* The filter characteristic depends on the given coefficients.
|
||||
* Apply to high-band samples a 15th order filter
|
||||
* The filter characteristic depends on the given coefficients
|
||||
*
|
||||
* @param[out] out Buffer for filtered output
|
||||
* @param[in] fir_coef Filter coefficients
|
||||
@@ -1048,7 +1048,7 @@ static void hb_fir_filter(float *out, const float fir_coef[HB_FIR_SIZE + 1],
|
||||
}
|
||||
|
||||
/**
|
||||
* Update context state before the next subframe.
|
||||
* Update context state before the next subframe
|
||||
*/
|
||||
static void update_sub_state(AMRWBContext *ctx)
|
||||
{
|
||||
|
@@ -20,7 +20,7 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#define BITSTREAM_READER_LE
|
||||
#define ALT_BITSTREAM_READER_LE
|
||||
#include "avcodec.h"
|
||||
#include "dsputil.h"
|
||||
#include "get_bits.h"
|
||||
|
@@ -68,7 +68,6 @@ ELF .size \name, . - \name
|
||||
.purgem endfunc
|
||||
.endm
|
||||
.text
|
||||
.align 2
|
||||
.if \export
|
||||
.global EXTERN_ASM\name
|
||||
EXTERN_ASM\name:
|
||||
@@ -114,12 +113,6 @@ T add \rn, \rn, \rm
|
||||
T ldr \rt, [\rn]
|
||||
.endm
|
||||
|
||||
.macro ldr_dpre rt, rn, rm:vararg
|
||||
A ldr \rt, [\rn, -\rm]!
|
||||
T sub \rn, \rn, \rm
|
||||
T ldr \rt, [\rn]
|
||||
.endm
|
||||
|
||||
.macro ldr_dpren rt, rn, rm:vararg
|
||||
A ldr \rt, [\rn, -\rm]
|
||||
T sub \rt, \rn, \rm
|
||||
|
@@ -25,7 +25,7 @@
|
||||
#include "config.h"
|
||||
#include "libavutil/intmath.h"
|
||||
|
||||
#if HAVE_ARMV6 && HAVE_INLINE_ASM && AV_GCC_VERSION_AT_LEAST(4,4)
|
||||
#if HAVE_ARMV6 && HAVE_INLINE_ASM
|
||||
|
||||
#define decode_blockcodes decode_blockcodes
|
||||
static inline int decode_blockcodes(int code1, int code2, int levels,
|
||||
|
@@ -42,12 +42,10 @@ av_cold void ff_fft_init_arm(FFTContext *s)
|
||||
if (HAVE_NEON) {
|
||||
s->fft_permute = ff_fft_permute_neon;
|
||||
s->fft_calc = ff_fft_calc_neon;
|
||||
#if CONFIG_MDCT
|
||||
s->imdct_calc = ff_imdct_calc_neon;
|
||||
s->imdct_half = ff_imdct_half_neon;
|
||||
s->mdct_calc = ff_mdct_calc_neon;
|
||||
s->mdct_permutation = FF_MDCT_PERM_INTERLEAVE;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -66,10 +66,10 @@ function ff_scalarproduct_int16_neon, export=1
|
||||
|
||||
3: vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
@@ -106,10 +106,10 @@ function ff_scalarproduct_and_madd_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
|
@@ -23,18 +23,11 @@
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/rv34dsp.h"
|
||||
|
||||
void ff_rv34_inv_transform_neon(DCTELEM *block);
|
||||
void ff_rv34_inv_transform_noround_neon(DCTELEM *block);
|
||||
|
||||
void ff_rv34_inv_transform_noround_dc_neon(DCTELEM *block);
|
||||
|
||||
void ff_rv34_idct_add_neon(uint8_t *dst, int stride, DCTELEM *block);
|
||||
void ff_rv34_idct_dc_add_neon(uint8_t *dst, int stride, int dc);
|
||||
|
||||
void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
|
||||
{
|
||||
c->rv34_inv_transform = ff_rv34_inv_transform_noround_neon;
|
||||
c->rv34_inv_transform_dc = ff_rv34_inv_transform_noround_dc_neon;
|
||||
|
||||
c->rv34_idct_add = ff_rv34_idct_add_neon;
|
||||
c->rv34_idct_dc_add = ff_rv34_idct_dc_add_neon;
|
||||
c->rv34_inv_transform_tab[0] = ff_rv34_inv_transform_neon;
|
||||
c->rv34_inv_transform_tab[1] = ff_rv34_inv_transform_noround_neon;
|
||||
}
|
||||
|
@@ -19,10 +19,13 @@
|
||||
*/
|
||||
|
||||
#include "asm.S"
|
||||
#include "neon.S"
|
||||
|
||||
.macro rv34_inv_transform r0
|
||||
vld1.16 {q14-q15}, [\r0,:128]
|
||||
.macro rv34_inv_transform
|
||||
mov r1, #16
|
||||
vld1.16 {d28}, [r0,:64], r1 @ block[i+8*0]
|
||||
vld1.16 {d29}, [r0,:64], r1 @ block[i+8*1]
|
||||
vld1.16 {d30}, [r0,:64], r1 @ block[i+8*2]
|
||||
vld1.16 {d31}, [r0,:64], r1 @ block[i+8*3]
|
||||
vmov.s16 d0, #13
|
||||
vshll.s16 q12, d29, #3
|
||||
vshll.s16 q13, d29, #4
|
||||
@@ -32,12 +35,12 @@
|
||||
vmlal.s16 q10, d30, d0
|
||||
vmull.s16 q11, d28, d0
|
||||
vmlsl.s16 q11, d30, d0
|
||||
vsubw.s16 q12, q12, d29 @ z2 = block[i+4*1]*7
|
||||
vaddw.s16 q13, q13, d29 @ z3 = block[i+4*1]*17
|
||||
vsubw.s16 q12, q12, d29 @ z2 = block[i+8*1]*7
|
||||
vaddw.s16 q13, q13, d29 @ z3 = block[i+8*1]*17
|
||||
vsubw.s16 q9, q9, d31
|
||||
vaddw.s16 q1, q1, d31
|
||||
vadd.s32 q13, q13, q9 @ z3 = 17*block[i+4*1] + 7*block[i+4*3]
|
||||
vsub.s32 q12, q12, q1 @ z2 = 7*block[i+4*1] - 17*block[i+4*3]
|
||||
vadd.s32 q13, q13, q9 @ z3 = 17*block[i+8*1] + 7*block[i+8*3]
|
||||
vsub.s32 q12, q12, q1 @ z2 = 7*block[i+8*1] - 17*block[i+8*3]
|
||||
vadd.s32 q1, q10, q13 @ z0 + z3
|
||||
vadd.s32 q2, q11, q12 @ z1 + z2
|
||||
vsub.s32 q8, q10, q13 @ z0 - z3
|
||||
@@ -67,39 +70,25 @@
|
||||
vsub.s32 q15, q14, q9 @ z0 - z3
|
||||
.endm
|
||||
|
||||
/* void rv34_idct_add_c(uint8_t *dst, int stride, DCTELEM *block) */
|
||||
function ff_rv34_idct_add_neon, export=1
|
||||
mov r3, r0
|
||||
rv34_inv_transform r2
|
||||
vmov.i16 q12, #0
|
||||
vrshrn.s32 d16, q1, #10 @ (z0 + z3) >> 10
|
||||
vrshrn.s32 d17, q2, #10 @ (z1 + z2) >> 10
|
||||
vrshrn.s32 d18, q3, #10 @ (z1 - z2) >> 10
|
||||
vrshrn.s32 d19, q15, #10 @ (z0 - z3) >> 10
|
||||
vld1.32 {d28[]}, [r0,:32], r1
|
||||
vld1.32 {d29[]}, [r0,:32], r1
|
||||
vtrn.32 q8, q9
|
||||
vld1.32 {d28[1]}, [r0,:32], r1
|
||||
vld1.32 {d29[1]}, [r0,:32], r1
|
||||
vst1.16 {q12}, [r2,:128]! @ memset(block, 0, 16)
|
||||
vst1.16 {q12}, [r2,:128] @ memset(block+16, 0, 16)
|
||||
vtrn.16 d16, d17
|
||||
vtrn.32 d28, d29
|
||||
vtrn.16 d18, d19
|
||||
vaddw.u8 q0, q8, d28
|
||||
vaddw.u8 q1, q9, d29
|
||||
vqmovun.s16 d28, q0
|
||||
vqmovun.s16 d29, q1
|
||||
vst1.32 {d28[0]}, [r3,:32], r1
|
||||
vst1.32 {d28[1]}, [r3,:32], r1
|
||||
vst1.32 {d29[0]}, [r3,:32], r1
|
||||
vst1.32 {d29[1]}, [r3,:32], r1
|
||||
/* void ff_rv34_inv_transform_neon(DCTELEM *block); */
|
||||
function ff_rv34_inv_transform_neon, export=1
|
||||
mov r2, r0
|
||||
rv34_inv_transform
|
||||
vrshrn.s32 d1, q2, #10 @ (z1 + z2) >> 10
|
||||
vrshrn.s32 d0, q1, #10 @ (z0 + z3) >> 10
|
||||
vrshrn.s32 d2, q3, #10 @ (z1 - z2) >> 10
|
||||
vrshrn.s32 d3, q15, #10 @ (z0 - z3) >> 10
|
||||
vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1
|
||||
vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1
|
||||
vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1
|
||||
vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1
|
||||
bx lr
|
||||
endfunc
|
||||
|
||||
/* void rv34_inv_transform_noround_neon(DCTELEM *block); */
|
||||
function ff_rv34_inv_transform_noround_neon, export=1
|
||||
rv34_inv_transform r0
|
||||
mov r2, r0
|
||||
rv34_inv_transform
|
||||
vshl.s32 q11, q2, #1
|
||||
vshl.s32 q10, q1, #1
|
||||
vshl.s32 q12, q3, #1
|
||||
@@ -112,45 +101,9 @@ function ff_rv34_inv_transform_noround_neon, export=1
|
||||
vshrn.s32 d1, q11, #11 @ (z1 + z2)*3 >> 11
|
||||
vshrn.s32 d2, q12, #11 @ (z1 - z2)*3 >> 11
|
||||
vshrn.s32 d3, q13, #11 @ (z0 - z3)*3 >> 11
|
||||
vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r0,:64]!
|
||||
vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r0,:64]!
|
||||
vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r0,:64]!
|
||||
vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r0,:64]!
|
||||
bx lr
|
||||
endfunc
|
||||
|
||||
/* void ff_rv34_idct_dc_add_neon(uint8_t *dst, int stride, int dc) */
|
||||
function ff_rv34_idct_dc_add_neon, export=1
|
||||
mov r3, r0
|
||||
vld1.32 {d28[]}, [r0,:32], r1
|
||||
vld1.32 {d29[]}, [r0,:32], r1
|
||||
vdup.16 d0, r2
|
||||
vmov.s16 d1, #169
|
||||
vld1.32 {d28[1]}, [r0,:32], r1
|
||||
vmull.s16 q1, d0, d1 @ dc * 13 * 13
|
||||
vld1.32 {d29[1]}, [r0,:32], r1
|
||||
vrshrn.s32 d0, q1, #10 @ (dc * 13 * 13 + 0x200) >> 10
|
||||
vmov d1, d0
|
||||
vaddw.u8 q2, q0, d28
|
||||
vaddw.u8 q3, q0, d29
|
||||
vqmovun.s16 d28, q2
|
||||
vqmovun.s16 d29, q3
|
||||
vst1.32 {d28[0]}, [r3,:32], r1
|
||||
vst1.32 {d29[0]}, [r3,:32], r1
|
||||
vst1.32 {d28[1]}, [r3,:32], r1
|
||||
vst1.32 {d29[1]}, [r3,:32], r1
|
||||
bx lr
|
||||
endfunc
|
||||
|
||||
/* void rv34_inv_transform_dc_noround_c(DCTELEM *block) */
|
||||
function ff_rv34_inv_transform_noround_dc_neon, export=1
|
||||
vld1.16 {d28[]}, [r0,:16] @ block[0]
|
||||
vmov.i16 d4, #251
|
||||
vorr.s16 d4, #256 @ 13^2 * 3
|
||||
vmull.s16 q3, d28, d4
|
||||
vshrn.s32 d0, q3, #11
|
||||
vmov.i16 d1, d0
|
||||
vst1.64 {q0}, [r0,:128]!
|
||||
vst1.64 {q0}, [r0,:128]!
|
||||
vst4.16 {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1
|
||||
vst4.16 {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1
|
||||
vst4.16 {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1
|
||||
vst4.16 {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1
|
||||
bx lr
|
||||
endfunc
|
||||
|
@@ -54,20 +54,6 @@ void ff_avg_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
|
||||
void ff_rv40_weight_func_16_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int);
|
||||
void ff_rv40_weight_func_8_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int);
|
||||
|
||||
int ff_rv40_h_loop_filter_strength_neon(uint8_t *src, int stride,
|
||||
int beta, int beta2, int edge,
|
||||
int *p1, int *q1);
|
||||
int ff_rv40_v_loop_filter_strength_neon(uint8_t *src, int stride,
|
||||
int beta, int beta2, int edge,
|
||||
int *p1, int *q1);
|
||||
|
||||
void ff_rv40_h_weak_loop_filter_neon(uint8_t *src, int stride, int filter_p1,
|
||||
int filter_q1, int alpha, int beta,
|
||||
int lim_p0q0, int lim_q1, int lim_p1);
|
||||
void ff_rv40_v_weak_loop_filter_neon(uint8_t *src, int stride, int filter_p1,
|
||||
int filter_q1, int alpha, int beta,
|
||||
int lim_p0q0, int lim_q1, int lim_p1);
|
||||
|
||||
void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
|
||||
{
|
||||
c->put_pixels_tab[0][ 1] = ff_put_rv40_qpel16_mc10_neon;
|
||||
@@ -130,9 +116,4 @@ void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
|
||||
|
||||
c->rv40_weight_pixels_tab[0] = ff_rv40_weight_func_16_neon;
|
||||
c->rv40_weight_pixels_tab[1] = ff_rv40_weight_func_8_neon;
|
||||
|
||||
c->rv40_loop_filter_strength[0] = ff_rv40_h_loop_filter_strength_neon;
|
||||
c->rv40_loop_filter_strength[1] = ff_rv40_v_loop_filter_strength_neon;
|
||||
c->rv40_weak_loop_filter[0] = ff_rv40_h_weak_loop_filter_neon;
|
||||
c->rv40_weak_loop_filter[1] = ff_rv40_v_weak_loop_filter_neon;
|
||||
}
|
||||
|
@@ -372,7 +372,7 @@ endfunc
|
||||
|
||||
function ff_\type\()_rv40_qpel8_mc33_neon, export=1
|
||||
mov r3, #8
|
||||
b X(ff_\type\()_pixels8_xy2_neon)
|
||||
b ff_\type\()_pixels8_xy2_neon
|
||||
endfunc
|
||||
|
||||
function ff_\type\()_rv40_qpel8_mc13_neon, export=1
|
||||
@@ -652,7 +652,7 @@ endfunc
|
||||
|
||||
function ff_\type\()_rv40_qpel16_mc33_neon, export=1
|
||||
mov r3, #16
|
||||
b X(ff_\type\()_pixels16_xy2_neon)
|
||||
b ff_\type\()_pixels16_xy2_neon
|
||||
endfunc
|
||||
.endm
|
||||
|
||||
@@ -722,199 +722,3 @@ function ff_rv40_weight_func_8_neon, export=1
|
||||
bne 1b
|
||||
bx lr
|
||||
endfunc
|
||||
|
||||
function ff_rv40_h_loop_filter_strength_neon, export=1
|
||||
pkhbt r2, r3, r2, lsl #18
|
||||
|
||||
ldr r3, [r0]
|
||||
ldr_dpre r12, r0, r1
|
||||
teq r3, r12
|
||||
beq 1f
|
||||
|
||||
sub r0, r0, r1, lsl #1
|
||||
|
||||
vld1.32 {d4[]}, [r0,:32], r1 @ -3
|
||||
vld1.32 {d0[]}, [r0,:32], r1 @ -2
|
||||
vld1.32 {d4[1]}, [r0,:32], r1 @ -1
|
||||
vld1.32 {d5[]}, [r0,:32], r1 @ 0
|
||||
vld1.32 {d1[]}, [r0,:32], r1 @ 1
|
||||
vld1.32 {d5[0]}, [r0,:32], r1 @ 2
|
||||
|
||||
vpaddl.u8 q8, q0 @ -2, -2, -2, -2, 1, 1, 1, 1
|
||||
vpaddl.u8 q9, q2 @ -3, -3, -1, -1, 2, 2, 0, 0
|
||||
vdup.32 d30, r2 @ beta2, beta << 2
|
||||
vpadd.u16 d16, d16, d17 @ -2, -2, 1, 1
|
||||
vpadd.u16 d18, d18, d19 @ -3, -1, 2, 0
|
||||
vabd.u16 d16, d18, d16
|
||||
vclt.u16 d16, d16, d30
|
||||
|
||||
ldrd r2, r3, [sp, #4]
|
||||
vmovl.u16 q12, d16
|
||||
vtrn.16 d16, d17
|
||||
vshr.u32 q12, q12, #15
|
||||
ldr r0, [sp]
|
||||
vst1.32 {d24[1]}, [r2,:32]
|
||||
vst1.32 {d25[1]}, [r3,:32]
|
||||
|
||||
cmp r0, #0
|
||||
it eq
|
||||
bxeq lr
|
||||
|
||||
vand d18, d16, d17
|
||||
vtrn.32 d18, d19
|
||||
vand d18, d18, d19
|
||||
vmov.u16 r0, d18[0]
|
||||
bx lr
|
||||
1:
|
||||
ldrd r2, r3, [sp, #4]
|
||||
mov r0, #0
|
||||
str r0, [r2]
|
||||
str r0, [r3]
|
||||
bx lr
|
||||
endfunc
|
||||
|
||||
function ff_rv40_v_loop_filter_strength_neon, export=1
|
||||
sub r0, r0, #3
|
||||
pkhbt r2, r3, r2, lsl #18
|
||||
|
||||
vld1.8 {d0}, [r0], r1
|
||||
vld1.8 {d1}, [r0], r1
|
||||
vld1.8 {d2}, [r0], r1
|
||||
vld1.8 {d3}, [r0], r1
|
||||
|
||||
vaddl.u8 q0, d0, d1
|
||||
vaddl.u8 q1, d2, d3
|
||||
vdup.32 q15, r2
|
||||
vadd.u16 q0, q0, q1 @ -3, -2, -1, 0, 1, 2
|
||||
vext.16 q1, q0, q0, #1 @ -2, -1, 0, 1, 2
|
||||
vabd.u16 q0, q1, q0
|
||||
vclt.u16 q0, q0, q15
|
||||
|
||||
ldrd r2, r3, [sp, #4]
|
||||
vmovl.u16 q1, d0
|
||||
vext.16 d1, d0, d1, #3
|
||||
vshr.u32 q1, q1, #15
|
||||
ldr r0, [sp]
|
||||
vst1.32 {d2[1]}, [r2,:32]
|
||||
vst1.32 {d3[1]}, [r3,:32]
|
||||
|
||||
cmp r0, #0
|
||||
it eq
|
||||
bxeq lr
|
||||
|
||||
vand d0, d0, d1
|
||||
vtrn.16 d0, d1
|
||||
vand d0, d0, d1
|
||||
vmov.u16 r0, d0[0]
|
||||
bx lr
|
||||
endfunc
|
||||
|
||||
.macro rv40_weak_loop_filter
|
||||
vdup.16 d30, r2 @ filter_p1
|
||||
vdup.16 d31, r3 @ filter_q1
|
||||
ldrd r2, r3, [sp]
|
||||
vdup.16 d28, r2 @ alpha
|
||||
vdup.16 d29, r3 @ beta
|
||||
ldr r12, [sp, #8]
|
||||
vdup.16 d25, r12 @ lim_p0q0
|
||||
ldrd r2, r3, [sp, #12]
|
||||
vsubl.u8 q9, d5, d4 @ x, t
|
||||
vabdl.u8 q8, d5, d4 @ x, abs(t)
|
||||
vneg.s16 q15, q15
|
||||
vceq.i16 d16, d19, #0 @ !t
|
||||
vshl.s16 d19, d19, #2 @ t << 2
|
||||
vmul.u16 d18, d17, d28 @ alpha * abs(t)
|
||||
vand d24, d30, d31 @ filter_p1 & filter_q1
|
||||
vsubl.u8 q1, d0, d4 @ p1p2, p1p0
|
||||
vsubl.u8 q3, d1, d5 @ q1q2, q1q0
|
||||
vmov.i16 d22, #3
|
||||
vshr.u16 d18, d18, #7
|
||||
vadd.i16 d22, d22, d24 @ 3 - (filter_p1 & filter_q1)
|
||||
vsubl.u8 q10, d0, d1 @ src[-2] - src[1]
|
||||
vcle.u16 d18, d18, d22
|
||||
vand d20, d20, d24
|
||||
vneg.s16 d23, d25 @ -lim_p0q0
|
||||
vadd.s16 d19, d19, d20
|
||||
vbic d16, d18, d16 @ t && u <= 3 - (fp1 & fq1)
|
||||
vtrn.32 d4, d5 @ -3, 2, -1, 0
|
||||
vrshr.s16 d19, d19, #3
|
||||
vmov d28, d29 @ beta
|
||||
vswp d3, d6 @ q1q2, p1p0
|
||||
vmin.s16 d19, d19, d25
|
||||
vand d30, d30, d16
|
||||
vand d31, d31, d16
|
||||
vadd.s16 q10, q1, q3 @ p1p2 + p1p0, q1q2 + q1q0
|
||||
vmax.s16 d19, d19, d23 @ diff
|
||||
vabs.s16 q1, q1 @ abs(p1p2), abs(q1q2)
|
||||
vand d18, d19, d16 @ diff
|
||||
vcle.u16 q1, q1, q14
|
||||
vneg.s16 d19, d18 @ -diff
|
||||
vdup.16 d26, r3 @ lim_p1
|
||||
vaddw.u8 q2, q9, d5 @ src[-1]+diff, src[0]-diff
|
||||
vhsub.s16 q11, q10, q9
|
||||
vand q1, q1, q15
|
||||
vqmovun.s16 d4, q2 @ -1, 0
|
||||
vand q9, q11, q1
|
||||
vdup.16 d27, r2 @ lim_q1
|
||||
vneg.s16 q9, q9
|
||||
vneg.s16 q14, q13
|
||||
vmin.s16 q9, q9, q13
|
||||
vtrn.32 d0, d1 @ -2, 1, -2, 1
|
||||
vmax.s16 q9, q9, q14
|
||||
vaddw.u8 q3, q9, d0
|
||||
vqmovun.s16 d5, q3 @ -2, 1
|
||||
.endm
|
||||
|
||||
function ff_rv40_h_weak_loop_filter_neon, export=1
|
||||
sub r0, r0, r1, lsl #1
|
||||
sub r0, r0, r1
|
||||
|
||||
vld1.32 {d4[]}, [r0,:32], r1
|
||||
vld1.32 {d0[]}, [r0,:32], r1
|
||||
vld1.32 {d4[1]}, [r0,:32], r1
|
||||
vld1.32 {d5[]}, [r0,:32], r1
|
||||
vld1.32 {d1[]}, [r0,:32], r1
|
||||
vld1.32 {d5[0]}, [r0,:32]
|
||||
|
||||
sub r0, r0, r1, lsl #2
|
||||
|
||||
rv40_weak_loop_filter
|
||||
|
||||
vst1.32 {d5[0]}, [r0,:32], r1
|
||||
vst1.32 {d4[0]}, [r0,:32], r1
|
||||
vst1.32 {d4[1]}, [r0,:32], r1
|
||||
vst1.32 {d5[1]}, [r0,:32], r1
|
||||
|
||||
bx lr
|
||||
endfunc
|
||||
|
||||
function ff_rv40_v_weak_loop_filter_neon, export=1
|
||||
sub r12, r0, #3
|
||||
sub r0, r0, #2
|
||||
|
||||
vld1.8 {d4}, [r12], r1
|
||||
vld1.8 {d5}, [r12], r1
|
||||
vld1.8 {d2}, [r12], r1
|
||||
vld1.8 {d3}, [r12], r1
|
||||
|
||||
vtrn.16 q2, q1
|
||||
vtrn.8 d4, d5
|
||||
vtrn.8 d2, d3
|
||||
|
||||
vrev64.32 d5, d5
|
||||
vtrn.32 q2, q1
|
||||
vdup.32 d0, d3[0]
|
||||
vdup.32 d1, d2[0]
|
||||
|
||||
rv40_weak_loop_filter
|
||||
|
||||
vtrn.32 q2, q3
|
||||
vswp d4, d5
|
||||
|
||||
vst4.8 {d4[0],d5[0],d6[0],d7[0]}, [r0], r1
|
||||
vst4.8 {d4[1],d5[1],d6[1],d7[1]}, [r0], r1
|
||||
vst4.8 {d4[2],d5[2],d6[2],d7[2]}, [r0], r1
|
||||
vst4.8 {d4[3],d5[3],d6[3],d7[3]}, [r0], r1
|
||||
|
||||
bx lr
|
||||
endfunc
|
||||
|
@@ -491,8 +491,8 @@ __end_bef_a_evaluation:
|
||||
bal __end_a_evaluation
|
||||
|
||||
|
||||
.align
|
||||
__constant_ptr__: @@ see #defines at the beginning of the source code for values.
|
||||
.align
|
||||
.word W1
|
||||
.word W2
|
||||
.word W3
|
||||
|
@@ -366,7 +366,7 @@ int ff_ass_split_override_codes(const ASSCodesCallbacks *callbacks, void *priv,
|
||||
char new_line[2];
|
||||
int text_len = 0;
|
||||
|
||||
while (*buf) {
|
||||
while (buf && *buf) {
|
||||
if (text && callbacks->text &&
|
||||
(sscanf(buf, "\\%1[nN]", new_line) == 1 ||
|
||||
!strncmp(buf, "{\\", 2))) {
|
||||
|
@@ -408,7 +408,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
p->pict_type= AV_PICTURE_TYPE_I;
|
||||
p->key_frame= 1;
|
||||
|
||||
av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size);
|
||||
av_fast_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!a->bitstream_buffer)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
|
@@ -744,7 +744,7 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf,
|
||||
|
||||
result = decodeChannelSoundUnit(q,&q->gb, q->pUnits, out_samples[0], 0, JOINT_STEREO);
|
||||
if (result != 0)
|
||||
return result;
|
||||
return (result);
|
||||
|
||||
/* Framedata of the su2 in the joint-stereo mode is encoded in
|
||||
* reverse byte order so we need to swap it first. */
|
||||
@@ -785,7 +785,7 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf,
|
||||
/* Decode Sound Unit 2. */
|
||||
result = decodeChannelSoundUnit(q,&q->gb, &q->pUnits[1], out_samples[1], 1, JOINT_STEREO);
|
||||
if (result != 0)
|
||||
return result;
|
||||
return (result);
|
||||
|
||||
/* Reconstruct the channel coefficients. */
|
||||
reverseMatrixing(out_samples[0], out_samples[1], q->matrix_coeff_index_prev, q->matrix_coeff_index_now);
|
||||
@@ -804,7 +804,7 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf,
|
||||
|
||||
result = decodeChannelSoundUnit(q,&q->gb, &q->pUnits[i], out_samples[i], i, q->codingMode);
|
||||
if (result != 0)
|
||||
return result;
|
||||
return (result);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -255,15 +255,11 @@ enum CodecID {
|
||||
CODEC_ID_VBLE,
|
||||
CODEC_ID_DXTORY,
|
||||
CODEC_ID_V410,
|
||||
CODEC_ID_XWD,
|
||||
CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
|
||||
CODEC_ID_UTVIDEO = 0x800,
|
||||
CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'),
|
||||
CODEC_ID_AVRP = MKBETAG('A','V','R','P'),
|
||||
|
||||
CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'),
|
||||
CODEC_ID_V308 = MKBETAG('V','3','0','8'),
|
||||
CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'),
|
||||
|
||||
/* various PCM "codecs" */
|
||||
CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
|
||||
@@ -326,7 +322,6 @@ enum CodecID {
|
||||
CODEC_ID_ADPCM_EA_MAXIS_XA,
|
||||
CODEC_ID_ADPCM_IMA_ISS,
|
||||
CODEC_ID_ADPCM_G722,
|
||||
CODEC_ID_ADPCM_IMA_APC,
|
||||
|
||||
/* AMR */
|
||||
CODEC_ID_AMR_NB = 0x12000,
|
||||
@@ -409,7 +404,6 @@ enum CodecID {
|
||||
CODEC_ID_BMV_AUDIO,
|
||||
CODEC_ID_G729 = 0x15800,
|
||||
CODEC_ID_G723_1= 0x15801,
|
||||
CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
|
||||
CODEC_ID_8SVX_RAW = MKBETAG('8','S','V','X'),
|
||||
|
||||
/* subtitle codecs */
|
||||
@@ -747,27 +741,10 @@ typedef struct RcOverride{
|
||||
/* Codec can export data for HW decoding (XvMC). */
|
||||
#define CODEC_CAP_HWACCEL 0x0010
|
||||
/**
|
||||
* Encoder or decoder requires flushing with NULL input at the end in order to
|
||||
* give the complete and correct output.
|
||||
*
|
||||
* NOTE: If this flag is not set, the codec is guaranteed to never be fed with
|
||||
* with NULL data. The user can still send NULL data to the public encode
|
||||
* or decode function, but libavcodec will not pass it along to the codec
|
||||
* unless this flag is set.
|
||||
*
|
||||
* Decoders:
|
||||
* The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
|
||||
* Codec has a nonzero delay and needs to be fed with avpkt->data=NULL,
|
||||
* avpkt->size=0 at the end to get the delayed data until the decoder no longer
|
||||
* returns frames.
|
||||
*
|
||||
* Encoders:
|
||||
* The encoder needs to be fed with NULL data at the end of encoding until the
|
||||
* encoder no longer returns data.
|
||||
*
|
||||
* NOTE: For encoders implementing the AVCodec.encode2() function, setting this
|
||||
* flag also means that the encoder must set the pts and duration for
|
||||
* each output packet. If this flag is not set, the pts and duration will
|
||||
* be determined by libavcodec from the input frame.
|
||||
* returns frames. If this is not set, the codec is guaranteed to never be fed
|
||||
* with NULL data.
|
||||
*/
|
||||
#define CODEC_CAP_DELAY 0x0020
|
||||
/**
|
||||
@@ -818,14 +795,6 @@ typedef struct RcOverride{
|
||||
* Codec supports changed parameters at any point.
|
||||
*/
|
||||
#define CODEC_CAP_PARAM_CHANGE 0x4000
|
||||
/**
|
||||
* Codec supports avctx->thread_count == 0 (auto).
|
||||
*/
|
||||
#define CODEC_CAP_AUTO_THREADS 0x8000
|
||||
/**
|
||||
* Audio encoder supports receiving a different number of samples in each call.
|
||||
*/
|
||||
#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000
|
||||
/**
|
||||
* Codec is lossless.
|
||||
*/
|
||||
@@ -1087,12 +1056,13 @@ typedef struct AVFrame {
|
||||
*/
|
||||
int quality;
|
||||
|
||||
#if FF_API_AVFRAME_AGE
|
||||
/**
|
||||
* @deprecated unused
|
||||
* buffer age (1->was last buffer and dint change, 2->..., ...).
|
||||
* Set to INT_MAX if the buffer has not been used yet.
|
||||
* - encoding: unused
|
||||
* - decoding: MUST be set by get_buffer() for video.
|
||||
*/
|
||||
attribute_deprecated int age;
|
||||
#endif
|
||||
int age;
|
||||
|
||||
/**
|
||||
* is this picture used as reference
|
||||
@@ -1315,29 +1285,6 @@ typedef struct AVFrame {
|
||||
*/
|
||||
uint8_t **extended_data;
|
||||
|
||||
/**
|
||||
* sample aspect ratio for the video frame, 0/1 if unknown\unspecified
|
||||
* - encoding: unused
|
||||
* - decoding: Read by user.
|
||||
*/
|
||||
AVRational sample_aspect_ratio;
|
||||
|
||||
/**
|
||||
* width and height of the video frame
|
||||
* - encoding: unused
|
||||
* - decoding: Read by user.
|
||||
*/
|
||||
int width, height;
|
||||
|
||||
/**
|
||||
* format of the frame, -1 if unknown or unset
|
||||
* Values correspond to enum PixelFormat for video frames,
|
||||
* enum AVSampleFormat for audio)
|
||||
* - encoding: unused
|
||||
* - decoding: Read by user.
|
||||
*/
|
||||
int format;
|
||||
|
||||
/**
|
||||
* frame timestamp estimated using various heuristics, in stream time base
|
||||
* Code outside libavcodec should access this field using:
|
||||
@@ -1356,19 +1303,39 @@ typedef struct AVFrame {
|
||||
*/
|
||||
int64_t pkt_pos;
|
||||
|
||||
/**
|
||||
* reordered sample aspect ratio for the video frame, 0/1 if unknown\unspecified
|
||||
* Code outside libavcodec should access this field using:
|
||||
* av_opt_ptr(avcodec_get_frame_class(), frame, "sample_aspect_ratio");
|
||||
* - encoding: unused
|
||||
* - decoding: Read by user.
|
||||
*/
|
||||
AVRational sample_aspect_ratio;
|
||||
|
||||
/**
|
||||
* width and height of the video frame
|
||||
* Code outside libavcodec should access this field using:
|
||||
* av_opt_ptr(avcodec_get_frame_class(), frame, "width");
|
||||
* - encoding: unused
|
||||
* - decoding: Read by user.
|
||||
*/
|
||||
int width, height;
|
||||
|
||||
/**
|
||||
* format of the frame, -1 if unknown or unset
|
||||
* It should be cast to the corresponding enum (enum PixelFormat
|
||||
* for video, enum AVSampleFormat for audio)
|
||||
* Code outside libavcodec should access this field using:
|
||||
* av_opt_ptr(avcodec_get_frame_class(), frame, "format");
|
||||
* - encoding: unused
|
||||
* - decoding: Read by user.
|
||||
*/
|
||||
int format;
|
||||
|
||||
} AVFrame;
|
||||
|
||||
struct AVCodecInternal;
|
||||
|
||||
enum AVFieldOrder {
|
||||
AV_FIELD_UNKNOWN,
|
||||
AV_FIELD_PROGRESSIVE,
|
||||
AV_FIELD_TT, //< Top coded_first, top displayed first
|
||||
AV_FIELD_BB, //< Bottom coded first, bottom displayed first
|
||||
AV_FIELD_TB, //< Top coded first, bottom displayed first
|
||||
AV_FIELD_BT, //< Bottom coded first, top displayed first
|
||||
};
|
||||
|
||||
/**
|
||||
* main external API structure.
|
||||
* New fields can be added to the end with minor version bumps.
|
||||
@@ -1410,7 +1377,7 @@ typedef struct AVCodecContext {
|
||||
* Some codecs need additional format info. It is stored here.
|
||||
* If any muxer uses this then ALL demuxers/parsers AND encoders for the
|
||||
* specific codec MUST set it correctly otherwise stream copy breaks.
|
||||
* In general use of this field by muxers is not recommended.
|
||||
* In general use of this field by muxers is not recommanded.
|
||||
* - encoding: Set by libavcodec.
|
||||
* - decoding: Set by libavcodec. (FIXME: Is this OK?)
|
||||
*/
|
||||
@@ -2729,7 +2696,7 @@ typedef struct AVCodecContext {
|
||||
|
||||
#if FF_API_X264_GLOBAL_OPTS
|
||||
/**
|
||||
* Influence how often B-frames are used.
|
||||
* Influences how often B-frames are used.
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
@@ -2810,7 +2777,7 @@ typedef struct AVCodecContext {
|
||||
int mv0_threshold;
|
||||
|
||||
/**
|
||||
* Adjust sensitivity of b_frame_strategy 1.
|
||||
* Adjusts sensitivity of b_frame_strategy 1.
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
@@ -3094,7 +3061,7 @@ typedef struct AVCodecContext {
|
||||
|
||||
#if FF_API_FLAC_GLOBAL_OPTS
|
||||
/**
|
||||
* Determine which LPC analysis algorithm to use.
|
||||
* Determines which LPC analysis algorithm to use.
|
||||
* - encoding: Set by user
|
||||
* - decoding: unused
|
||||
*/
|
||||
@@ -3225,12 +3192,6 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
struct AVCodecInternal *internal;
|
||||
|
||||
/** Field order
|
||||
* - encoding: set by libavcodec
|
||||
* - decoding: Set by libavcodec
|
||||
*/
|
||||
enum AVFieldOrder field_order;
|
||||
|
||||
/**
|
||||
* Current statistics for PTS correction.
|
||||
* - decoding: maintained and used by libavcodec, not intended to be used by user apps
|
||||
@@ -3325,19 +3286,6 @@ typedef struct AVCodec {
|
||||
* Initialize codec static data, called from avcodec_register().
|
||||
*/
|
||||
void (*init_static_data)(struct AVCodec *codec);
|
||||
|
||||
/**
|
||||
* Encode data to an AVPacket.
|
||||
*
|
||||
* @param avctx codec context
|
||||
* @param avpkt output AVPacket (may contain a user-provided buffer)
|
||||
* @param[in] frame AVFrame containing the raw data to be encoded
|
||||
* @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
|
||||
* non-empty packet was returned in avpkt.
|
||||
* @return 0 on success, negative error code on failure
|
||||
*/
|
||||
int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,
|
||||
int *got_packet_ptr);
|
||||
} AVCodec;
|
||||
|
||||
/**
|
||||
@@ -3622,7 +3570,7 @@ typedef struct ReSampleContext ReSampleContext;
|
||||
* @param linear if 1 then the used FIR filter will be linearly interpolated
|
||||
between the 2 closest, if 0 the closest will be used
|
||||
* @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
|
||||
* @return allocated ReSampleContext, NULL if error occurred
|
||||
* @return allocated ReSampleContext, NULL if error occured
|
||||
*/
|
||||
ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
|
||||
int output_rate, int input_rate,
|
||||
@@ -4212,11 +4160,6 @@ int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
|
||||
* @warning The end of the input buffer avpkt->data should be set to 0 to ensure that
|
||||
* no overreading happens for damaged MPEG streams.
|
||||
*
|
||||
* @warning You must not provide a custom get_buffer() when using
|
||||
* avcodec_decode_audio3(). Doing so will override it with
|
||||
* avcodec_default_get_buffer. Use avcodec_decode_audio4() instead,
|
||||
* which does allow the application to provide a custom get_buffer().
|
||||
*
|
||||
* @note You might have to align the input buffer avpkt->data and output buffer
|
||||
* samples. The alignment requirements depend on the CPU: On some CPUs it isn't
|
||||
* necessary at all, on others it won't work at all if not aligned and on others
|
||||
@@ -4326,7 +4269,7 @@ int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
|
||||
*/
|
||||
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
|
||||
int *got_picture_ptr,
|
||||
const AVPacket *avpkt);
|
||||
AVPacket *avpkt);
|
||||
|
||||
/**
|
||||
* Decode a subtitle message.
|
||||
@@ -4349,22 +4292,19 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
|
||||
AVPacket *avpkt);
|
||||
|
||||
/**
|
||||
* Free all allocated data in the given subtitle struct.
|
||||
* Frees all allocated data in the given subtitle struct.
|
||||
*
|
||||
* @param sub AVSubtitle to free.
|
||||
*/
|
||||
void avsubtitle_free(AVSubtitle *sub);
|
||||
|
||||
#if FF_API_OLD_ENCODE_AUDIO
|
||||
/**
|
||||
* Encode an audio frame from samples into buf.
|
||||
*
|
||||
* @deprecated Use avcodec_encode_audio2 instead.
|
||||
*
|
||||
* @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
|
||||
* However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user
|
||||
* will know how much space is needed because it depends on the value passed
|
||||
* in buf_size as described below. In that case a lower value can be used.
|
||||
* However, for PCM audio the user will know how much space is needed
|
||||
* because it depends on the value passed in buf_size as described
|
||||
* below. In that case a lower value can be used.
|
||||
*
|
||||
* @param avctx the codec context
|
||||
* @param[out] buf the output buffer
|
||||
@@ -4372,79 +4312,13 @@ void avsubtitle_free(AVSubtitle *sub);
|
||||
* @param[in] samples the input buffer containing the samples
|
||||
* The number of samples read from this buffer is frame_size*channels,
|
||||
* both of which are defined in avctx.
|
||||
* For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of
|
||||
* samples read from samples is equal to:
|
||||
* buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id))
|
||||
* This also implies that av_get_bits_per_sample() must not return 0 for these
|
||||
* codecs.
|
||||
* For PCM audio the number of samples read from samples is equal to
|
||||
* buf_size * input_sample_size / output_sample_size.
|
||||
* @return On error a negative value is returned, on success zero or the number
|
||||
* of bytes used to encode the data read from the input buffer.
|
||||
*/
|
||||
int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
|
||||
uint8_t *buf, int buf_size,
|
||||
const short *samples);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Encode a frame of audio.
|
||||
*
|
||||
* Takes input samples from frame and writes the next output packet, if
|
||||
* available, to avpkt. The output packet does not necessarily contain data for
|
||||
* the most recent frame, as encoders can delay, split, and combine input frames
|
||||
* internally as needed.
|
||||
*
|
||||
* @param avctx codec context
|
||||
* @param avpkt output AVPacket.
|
||||
* The user can supply an output buffer by setting
|
||||
* avpkt->data and avpkt->size prior to calling the
|
||||
* function, but if the size of the user-provided data is not
|
||||
* large enough, encoding will fail. All other AVPacket fields
|
||||
* will be reset by the encoder using av_init_packet(). If
|
||||
* avpkt->data is NULL, the encoder will allocate it.
|
||||
* The encoder will set avpkt->size to the size of the
|
||||
* output packet.
|
||||
* @param[in] frame AVFrame containing the raw audio data to be encoded.
|
||||
* May be NULL when flushing an encoder that has the
|
||||
* CODEC_CAP_DELAY capability set.
|
||||
* There are 2 codec capabilities that affect the allowed
|
||||
* values of frame->nb_samples.
|
||||
* If CODEC_CAP_SMALL_LAST_FRAME is set, then only the final
|
||||
* frame may be smaller than avctx->frame_size, and all other
|
||||
* frames must be equal to avctx->frame_size.
|
||||
* If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
|
||||
* can have any number of samples.
|
||||
* If neither is set, frame->nb_samples must be equal to
|
||||
* avctx->frame_size for all frames.
|
||||
* @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
|
||||
* output packet is non-empty, and to 0 if it is
|
||||
* empty. If the function returns an error, the
|
||||
* packet can be assumed to be invalid, and the
|
||||
* value of got_packet_ptr is undefined and should
|
||||
* not be used.
|
||||
* @return 0 on success, negative error code on failure
|
||||
*/
|
||||
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
const AVFrame *frame, int *got_packet_ptr);
|
||||
|
||||
/**
|
||||
* Fill audio frame data and linesize.
|
||||
* AVFrame extended_data channel pointers are allocated if necessary for
|
||||
* planar audio.
|
||||
*
|
||||
* @param frame the AVFrame
|
||||
* frame->nb_samples must be set prior to calling the
|
||||
* function. This function fills in frame->data,
|
||||
* frame->extended_data, frame->linesize[0].
|
||||
* @param nb_channels channel count
|
||||
* @param sample_fmt sample format
|
||||
* @param buf buffer to use for frame data
|
||||
* @param buf_size size of buffer
|
||||
* @param align plane size sample alignment
|
||||
* @return 0 on success, negative error code on failure
|
||||
*/
|
||||
int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
|
||||
enum AVSampleFormat sample_fmt, const uint8_t *buf,
|
||||
int buf_size, int align);
|
||||
int avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
||||
const short *samples);
|
||||
|
||||
/**
|
||||
* Encode a video frame from pict into buf.
|
||||
@@ -4755,15 +4629,6 @@ void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
|
||||
*/
|
||||
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
|
||||
|
||||
/**
|
||||
* Same behaviour av_fast_malloc but the buffer has additional
|
||||
* FF_INPUT_PADDING_SIZE at the end which will will always be 0.
|
||||
*
|
||||
* In addition the whole buffer will initially and after resizes
|
||||
* be 0-initialized so that no uninitialized data will ever appear.
|
||||
*/
|
||||
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);
|
||||
|
||||
/**
|
||||
* Copy image src to dst. Wraps av_picture_data_copy() above.
|
||||
*/
|
||||
@@ -4792,7 +4657,7 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
|
||||
unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
|
||||
|
||||
/**
|
||||
* Log a generic warning message about a missing feature. This function is
|
||||
* Logs a generic warning message about a missing feature. This function is
|
||||
* intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
|
||||
* only, and would normally not be used by applications.
|
||||
* @param[in] avc a pointer to an arbitrary struct of which the first field is
|
||||
|
@@ -20,7 +20,6 @@
|
||||
*/
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "internal.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "bytestream.h"
|
||||
|
||||
@@ -31,23 +30,19 @@ void av_destruct_packet_nofree(AVPacket *pkt)
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
void ff_packet_free_side_data(AVPacket *pkt)
|
||||
void av_destruct_packet(AVPacket *pkt)
|
||||
{
|
||||
int i;
|
||||
|
||||
av_free(pkt->data);
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
|
||||
for (i = 0; i < pkt->side_data_elems; i++)
|
||||
av_free(pkt->side_data[i].data);
|
||||
av_freep(&pkt->side_data);
|
||||
pkt->side_data_elems = 0;
|
||||
}
|
||||
|
||||
void av_destruct_packet(AVPacket *pkt)
|
||||
{
|
||||
av_free(pkt->data);
|
||||
pkt->data = NULL; pkt->size = 0;
|
||||
|
||||
ff_packet_free_side_data(pkt);
|
||||
}
|
||||
|
||||
void av_init_packet(AVPacket *pkt)
|
||||
{
|
||||
pkt->pts = AV_NOPTS_VALUE;
|
||||
@@ -244,6 +239,8 @@ int av_packet_split_side_data(AVPacket *pkt){
|
||||
unsigned int size;
|
||||
uint8_t *p;
|
||||
|
||||
av_dup_packet(pkt);
|
||||
|
||||
p = pkt->data + pkt->size - 8 - 5;
|
||||
for (i=1; ; i++){
|
||||
size = AV_RB32(p);
|
||||
|
@@ -165,15 +165,6 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int avs_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
AvsContext *s = avctx->priv_data;
|
||||
if (s->picture.data[0])
|
||||
avctx->release_buffer(avctx, &s->picture);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
AVCodec ff_avs_decoder = {
|
||||
.name = "avs",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@@ -181,7 +172,6 @@ AVCodec ff_avs_decoder = {
|
||||
.priv_data_size = sizeof(AvsContext),
|
||||
.init = avs_decode_init,
|
||||
.decode = avs_decode_frame,
|
||||
.close = avs_decode_end,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("AVS (Audio Video Standard) video"),
|
||||
};
|
||||
|
@@ -34,7 +34,6 @@
|
||||
|
||||
typedef struct BethsoftvidContext {
|
||||
AVFrame frame;
|
||||
GetByteContext g;
|
||||
} BethsoftvidContext;
|
||||
|
||||
static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx)
|
||||
@@ -48,19 +47,19 @@ static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_palette(BethsoftvidContext *ctx)
|
||||
static int set_palette(AVFrame * frame, const uint8_t * palette_buffer, int buf_size)
|
||||
{
|
||||
uint32_t *palette = (uint32_t *)ctx->frame.data[1];
|
||||
uint32_t * palette = (uint32_t *)frame->data[1];
|
||||
int a;
|
||||
|
||||
if (bytestream2_get_bytes_left(&ctx->g) < 256*3)
|
||||
if (buf_size < 256*3)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
for(a = 0; a < 256; a++){
|
||||
palette[a] = 0xFFU << 24 | bytestream2_get_be24u(&ctx->g) * 4;
|
||||
palette[a] = 0xFF << 24 | AV_RB24(&palette_buffer[a * 3]) * 4;
|
||||
palette[a] |= palette[a] >> 6 & 0x30303;
|
||||
}
|
||||
ctx->frame.palette_has_changed = 1;
|
||||
frame->palette_has_changed = 1;
|
||||
return 256*3;
|
||||
}
|
||||
|
||||
@@ -68,6 +67,8 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
BethsoftvidContext * vid = avctx->priv_data;
|
||||
char block_type;
|
||||
uint8_t * dst;
|
||||
@@ -81,32 +82,29 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bytestream2_init(&vid->g, avpkt->data, avpkt->size);
|
||||
dst = vid->frame.data[0];
|
||||
frame_end = vid->frame.data[0] + vid->frame.linesize[0] * avctx->height;
|
||||
|
||||
switch(block_type = bytestream2_get_byte(&vid->g)){
|
||||
case PALETTE_BLOCK: {
|
||||
return set_palette(vid);
|
||||
}
|
||||
switch(block_type = *buf++){
|
||||
case PALETTE_BLOCK:
|
||||
return set_palette(&vid->frame, buf, buf_size);
|
||||
case VIDEO_YOFF_P_FRAME:
|
||||
yoffset = bytestream2_get_le16(&vid->g);
|
||||
yoffset = bytestream_get_le16(&buf);
|
||||
if(yoffset >= avctx->height)
|
||||
return -1;
|
||||
dst += vid->frame.linesize[0] * yoffset;
|
||||
}
|
||||
|
||||
// main code
|
||||
while((code = bytestream2_get_byte(&vid->g))){
|
||||
while((code = *buf++)){
|
||||
int length = code & 0x7f;
|
||||
|
||||
// copy any bytes starting at the current position, and ending at the frame width
|
||||
while(length > remaining){
|
||||
if(code < 0x80)
|
||||
bytestream2_get_buffer(&vid->g, dst, remaining);
|
||||
bytestream_get_buffer(&buf, dst, remaining);
|
||||
else if(block_type == VIDEO_I_FRAME)
|
||||
memset(dst, bytestream2_peek_byte(&vid->g), remaining);
|
||||
memset(dst, buf[0], remaining);
|
||||
length -= remaining; // decrement the number of bytes to be copied
|
||||
dst += remaining + wrap_to_next_line; // skip over extra bytes at end of frame
|
||||
remaining = avctx->width;
|
||||
@@ -116,9 +114,9 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
// copy any remaining bytes after / if line overflows
|
||||
if(code < 0x80)
|
||||
bytestream2_get_buffer(&vid->g, dst, length);
|
||||
bytestream_get_buffer(&buf, dst, length);
|
||||
else if(block_type == VIDEO_I_FRAME)
|
||||
memset(dst, bytestream2_get_byte(&vid->g), length);
|
||||
memset(dst, *buf++, length);
|
||||
remaining -= length;
|
||||
dst += length;
|
||||
}
|
||||
@@ -127,7 +125,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = vid->frame;
|
||||
|
||||
return avpkt->size;
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
static av_cold int bethsoftvid_decode_end(AVCodecContext *avctx)
|
||||
|
@@ -37,7 +37,7 @@ typedef struct BFIContext {
|
||||
uint32_t pal[256];
|
||||
} BFIContext;
|
||||
|
||||
static av_cold int bfi_decode_init(AVCodecContext *avctx)
|
||||
static av_cold int bfi_decode_init(AVCodecContext * avctx)
|
||||
{
|
||||
BFIContext *bfi = avctx->priv_data;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
@@ -46,10 +46,10 @@ static av_cold int bfi_decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bfi_decode_frame(AVCodecContext *avctx, void *data,
|
||||
static int bfi_decode_frame(AVCodecContext * avctx, void *data,
|
||||
int *data_size, AVPacket *avpkt)
|
||||
{
|
||||
GetByteContext g;
|
||||
const uint8_t *buf = avpkt->data, *buf_end = avpkt->data + avpkt->size;
|
||||
int buf_size = avpkt->size;
|
||||
BFIContext *bfi = avctx->priv_data;
|
||||
uint8_t *dst = bfi->dst;
|
||||
@@ -68,18 +68,16 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
|
||||
return -1;
|
||||
}
|
||||
|
||||
bytestream2_init(&g, avpkt->data, buf_size);
|
||||
|
||||
/* Set frame parameters and palette, if necessary */
|
||||
if (!avctx->frame_number) {
|
||||
bfi->frame.pict_type = AV_PICTURE_TYPE_I;
|
||||
bfi->frame.key_frame = 1;
|
||||
/* Setting the palette */
|
||||
if (avctx->extradata_size > 768) {
|
||||
if(avctx->extradata_size>768) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n");
|
||||
return -1;
|
||||
}
|
||||
pal = (uint32_t *)bfi->frame.data[1];
|
||||
pal = (uint32_t *) bfi->frame.data[1];
|
||||
for (i = 0; i < avctx->extradata_size / 3; i++) {
|
||||
int shift = 16;
|
||||
*pal = 0xFF << 24;
|
||||
@@ -98,47 +96,46 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
|
||||
memcpy(bfi->frame.data[1], bfi->pal, sizeof(bfi->pal));
|
||||
}
|
||||
|
||||
bytestream2_skip(&g, 4); // Unpacked size, not required.
|
||||
buf += 4; //Unpacked size, not required.
|
||||
|
||||
while (dst != frame_end) {
|
||||
static const uint8_t lentab[4] = { 0, 2, 0, 1 };
|
||||
unsigned int byte = bytestream2_get_byte(&g), av_uninit(offset);
|
||||
unsigned int code = byte >> 6;
|
||||
static const uint8_t lentab[4]={0,2,0,1};
|
||||
unsigned int byte = *buf++, av_uninit(offset);
|
||||
unsigned int code = byte >> 6;
|
||||
unsigned int length = byte & ~0xC0;
|
||||
|
||||
if (!bytestream2_get_bytes_left(&g)) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Input resolution larger than actual frame.\n");
|
||||
if (buf >= buf_end) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Input resolution larger than actual frame.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Get length and offset(if required) */
|
||||
if (length == 0) {
|
||||
if (code == 1) {
|
||||
length = bytestream2_get_byte(&g);
|
||||
offset = bytestream2_get_le16(&g);
|
||||
length = bytestream_get_byte(&buf);
|
||||
offset = bytestream_get_le16(&buf);
|
||||
} else {
|
||||
length = bytestream2_get_le16(&g);
|
||||
length = bytestream_get_le16(&buf);
|
||||
if (code == 2 && length == 0)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (code == 1)
|
||||
offset = bytestream2_get_byte(&g);
|
||||
offset = bytestream_get_byte(&buf);
|
||||
}
|
||||
|
||||
/* Do boundary check */
|
||||
if (dst + (length << lentab[code]) > frame_end)
|
||||
if (dst + (length<<lentab[code]) > frame_end)
|
||||
break;
|
||||
|
||||
switch (code) {
|
||||
|
||||
case 0: //Normal Chain
|
||||
if (length >= bytestream2_get_bytes_left(&g)) {
|
||||
if (length >= buf_end - buf) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Frame larger than buffer.\n");
|
||||
return -1;
|
||||
}
|
||||
bytestream2_get_buffer(&g, dst, length);
|
||||
bytestream_get_buffer(&buf, dst, length);
|
||||
dst += length;
|
||||
break;
|
||||
|
||||
@@ -156,8 +153,8 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
|
||||
break;
|
||||
|
||||
case 3: //Fill Chain
|
||||
colour1 = bytestream2_get_byte(&g);
|
||||
colour2 = bytestream2_get_byte(&g);
|
||||
colour1 = bytestream_get_byte(&buf);
|
||||
colour2 = bytestream_get_byte(&buf);
|
||||
while (length--) {
|
||||
*dst++ = colour1;
|
||||
*dst++ = colour2;
|
||||
@@ -175,7 +172,7 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
|
||||
dst += bfi->frame.linesize[0];
|
||||
}
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame *)data = bfi->frame;
|
||||
*(AVFrame *) data = bfi->frame;
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
|
@@ -474,8 +474,7 @@ int ff_bgmc_init(AVCodecContext *avctx, uint8_t **cf_lut, int **cf_lut_status)
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
} else {
|
||||
// initialize lut_status buffer to a value never used to compare
|
||||
// against
|
||||
// initialize lut_status buffer to a value never used to compare against
|
||||
memset(*cf_lut_status, -1, sizeof(*cf_lut_status) * LUT_BUFF);
|
||||
}
|
||||
|
||||
@@ -495,7 +494,7 @@ void ff_bgmc_end(uint8_t **cf_lut, int **cf_lut_status)
|
||||
/** Initialize decoding and reads the first value
|
||||
*/
|
||||
void ff_bgmc_decode_init(GetBitContext *gb,
|
||||
unsigned int *h, unsigned int *l, unsigned int *v)
|
||||
unsigned int *h, unsigned int *l, unsigned int *v)
|
||||
{
|
||||
*h = TOP_VALUE;
|
||||
*l = 0;
|
||||
@@ -514,9 +513,9 @@ void ff_bgmc_decode_end(GetBitContext *gb)
|
||||
/** Read and decode a block Gilbert-Moore coded symbol
|
||||
*/
|
||||
void ff_bgmc_decode(GetBitContext *gb, unsigned int num, int32_t *dst,
|
||||
int delta, unsigned int sx,
|
||||
unsigned int *h, unsigned int *l, unsigned int *v,
|
||||
uint8_t *cf_lut, int *cf_lut_status)
|
||||
int delta, unsigned int sx,
|
||||
unsigned int *h, unsigned int *l, unsigned int *v,
|
||||
uint8_t *cf_lut, int *cf_lut_status)
|
||||
{
|
||||
unsigned int i;
|
||||
uint8_t *lut = bgmc_lut_getp(cf_lut, cf_lut_status, delta);
|
||||
@@ -568,3 +567,4 @@ void ff_bgmc_decode(GetBitContext *gb, unsigned int num, int32_t *dst,
|
||||
*l = low;
|
||||
*v = value;
|
||||
}
|
||||
|
||||
|
@@ -27,7 +27,7 @@
|
||||
#include "binkdsp.h"
|
||||
#include "mathops.h"
|
||||
|
||||
#define BITSTREAM_READER_LE
|
||||
#define ALT_BITSTREAM_READER_LE
|
||||
#include "get_bits.h"
|
||||
|
||||
#define BINK_FLAG_ALPHA 0x00100000
|
||||
|
@@ -29,13 +29,13 @@
|
||||
*/
|
||||
|
||||
#include "avcodec.h"
|
||||
#define BITSTREAM_READER_LE
|
||||
#define ALT_BITSTREAM_READER_LE
|
||||
#include "get_bits.h"
|
||||
#include "dsputil.h"
|
||||
#include "dct.h"
|
||||
#include "rdft.h"
|
||||
#include "fmtconvert.h"
|
||||
#include "libavutil/intfloat.h"
|
||||
#include "libavutil/intfloat_readwrite.h"
|
||||
|
||||
extern const uint16_t ff_wma_critical_freqs[25];
|
||||
|
||||
@@ -193,8 +193,8 @@ static int decode_block(BinkAudioContext *s, int16_t *out, int use_dct)
|
||||
if (s->version_b) {
|
||||
if (get_bits_left(gb) < 64)
|
||||
return AVERROR_INVALIDDATA;
|
||||
coeffs[0] = av_int2float(get_bits_long(gb, 32)) * s->root;
|
||||
coeffs[1] = av_int2float(get_bits_long(gb, 32)) * s->root;
|
||||
coeffs[0] = av_int2flt(get_bits(gb, 32)) * s->root;
|
||||
coeffs[1] = av_int2flt(get_bits(gb, 32)) * s->root;
|
||||
} else {
|
||||
if (get_bits_left(gb) < 58)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@@ -139,7 +139,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
s->frame.pict_type = AV_PICTURE_TYPE_I;
|
||||
s->frame.pict_type = FF_I_TYPE;
|
||||
s->frame.palette_has_changed = 1;
|
||||
memcpy(s->frame.data[1], s->palette, 16 * 4);
|
||||
|
||||
@@ -211,37 +211,40 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
AVCodec ff_bintext_decoder = {
|
||||
.name = "bintext",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_BINTEXT,
|
||||
.priv_data_size = sizeof(XbinContext),
|
||||
.init = decode_init,
|
||||
.close = decode_end,
|
||||
.decode = decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
"bintext",
|
||||
AVMEDIA_TYPE_VIDEO,
|
||||
CODEC_ID_BINTEXT,
|
||||
sizeof(XbinContext),
|
||||
decode_init,
|
||||
NULL,
|
||||
decode_end,
|
||||
decode_frame,
|
||||
CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Binary text"),
|
||||
};
|
||||
|
||||
AVCodec ff_xbin_decoder = {
|
||||
.name = "xbin",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_XBIN,
|
||||
.priv_data_size = sizeof(XbinContext),
|
||||
.init = decode_init,
|
||||
.close = decode_end,
|
||||
.decode = decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
"xbin",
|
||||
AVMEDIA_TYPE_VIDEO,
|
||||
CODEC_ID_XBIN,
|
||||
sizeof(XbinContext),
|
||||
decode_init,
|
||||
NULL,
|
||||
decode_end,
|
||||
decode_frame,
|
||||
CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("eXtended BINary text"),
|
||||
};
|
||||
|
||||
AVCodec ff_idf_decoder = {
|
||||
.name = "idf",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_IDF,
|
||||
.priv_data_size = sizeof(XbinContext),
|
||||
.init = decode_init,
|
||||
.close = decode_end,
|
||||
.decode = decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
"idf",
|
||||
AVMEDIA_TYPE_VIDEO,
|
||||
CODEC_ID_IDF,
|
||||
sizeof(XbinContext),
|
||||
decode_init,
|
||||
NULL,
|
||||
decode_end,
|
||||
decode_frame,
|
||||
CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("iCEDraw text"),
|
||||
};
|
||||
|
@@ -103,7 +103,7 @@ static int alloc_table(VLC *vlc, int size, int use_static)
|
||||
vlc->table_size += size;
|
||||
if (vlc->table_size > vlc->table_allocated) {
|
||||
if(use_static)
|
||||
abort(); // cannot do anything, init_vlc() is used with too little memory
|
||||
abort(); //cant do anything, init_vlc() is used with too little memory
|
||||
vlc->table_allocated += (1 << vlc->bits);
|
||||
vlc->table = av_realloc_f(vlc->table,
|
||||
vlc->table_allocated, sizeof(VLC_TYPE) * 2);
|
||||
|
@@ -49,7 +49,6 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
unsigned int ihsize;
|
||||
int i, j, n, linesize;
|
||||
uint32_t rgb[3];
|
||||
uint32_t alpha = 0;
|
||||
uint8_t *ptr;
|
||||
int dsize;
|
||||
const uint8_t *buf0 = buf;
|
||||
@@ -132,8 +131,6 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
rgb[0] = bytestream_get_le32(&buf);
|
||||
rgb[1] = bytestream_get_le32(&buf);
|
||||
rgb[2] = bytestream_get_le32(&buf);
|
||||
if (ihsize >= 108)
|
||||
alpha = bytestream_get_le32(&buf);
|
||||
}
|
||||
|
||||
avctx->width = width;
|
||||
@@ -144,21 +141,21 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
switch(depth){
|
||||
case 32:
|
||||
if(comp == BMP_BITFIELDS){
|
||||
if (rgb[0] == 0xFF000000 && rgb[1] == 0x00FF0000 && rgb[2] == 0x0000FF00)
|
||||
avctx->pix_fmt = alpha ? PIX_FMT_ABGR : PIX_FMT_0BGR;
|
||||
else if (rgb[0] == 0x00FF0000 && rgb[1] == 0x0000FF00 && rgb[2] == 0x000000FF)
|
||||
avctx->pix_fmt = alpha ? PIX_FMT_BGRA : PIX_FMT_BGR0;
|
||||
else if (rgb[0] == 0x0000FF00 && rgb[1] == 0x00FF0000 && rgb[2] == 0xFF000000)
|
||||
avctx->pix_fmt = alpha ? PIX_FMT_ARGB : PIX_FMT_0RGB;
|
||||
else if (rgb[0] == 0x000000FF && rgb[1] == 0x0000FF00 && rgb[2] == 0x00FF0000)
|
||||
avctx->pix_fmt = alpha ? PIX_FMT_RGBA : PIX_FMT_RGB0;
|
||||
else {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unknown bitfields %0X %0X %0X\n", rgb[0], rgb[1], rgb[2]);
|
||||
return AVERROR(EINVAL);
|
||||
rgb[0] = (rgb[0] >> 15) & 3;
|
||||
rgb[1] = (rgb[1] >> 15) & 3;
|
||||
rgb[2] = (rgb[2] >> 15) & 3;
|
||||
|
||||
if(rgb[0] + rgb[1] + rgb[2] != 3 ||
|
||||
rgb[0] == rgb[1] || rgb[0] == rgb[2] || rgb[1] == rgb[2]){
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
avctx->pix_fmt = PIX_FMT_BGRA;
|
||||
rgb[0] = 2;
|
||||
rgb[1] = 1;
|
||||
rgb[2] = 0;
|
||||
}
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_BGRA;
|
||||
break;
|
||||
case 24:
|
||||
avctx->pix_fmt = PIX_FMT_BGR24;
|
||||
@@ -166,18 +163,8 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
case 16:
|
||||
if(comp == BMP_RGB)
|
||||
avctx->pix_fmt = PIX_FMT_RGB555;
|
||||
else if (comp == BMP_BITFIELDS) {
|
||||
if (rgb[0] == 0xF800 && rgb[1] == 0x07E0 && rgb[2] == 0x001F)
|
||||
avctx->pix_fmt = PIX_FMT_RGB565;
|
||||
else if (rgb[0] == 0x7C00 && rgb[1] == 0x03E0 && rgb[2] == 0x001F)
|
||||
avctx->pix_fmt = PIX_FMT_RGB555;
|
||||
else if (rgb[0] == 0x0F00 && rgb[1] == 0x00F0 && rgb[2] == 0x000F)
|
||||
avctx->pix_fmt = PIX_FMT_RGB444;
|
||||
else {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unknown bitfields %0X %0X %0X\n", rgb[0], rgb[1], rgb[2]);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
}
|
||||
if(comp == BMP_BITFIELDS)
|
||||
avctx->pix_fmt = rgb[1] == 0x07E0 ? PIX_FMT_RGB565 : PIX_FMT_RGB555;
|
||||
break;
|
||||
case 8:
|
||||
if(hsize - ihsize - 14 > 0)
|
||||
@@ -260,7 +247,7 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
((uint32_t*)p->data[1])[i] = (0xff<<24) | bytestream_get_le24(&buf);
|
||||
}else{
|
||||
for(i = 0; i < colors; i++)
|
||||
((uint32_t*)p->data[1])[i] = 0xFFU << 24 | bytestream_get_le32(&buf);
|
||||
((uint32_t*)p->data[1])[i] = bytestream_get_le32(&buf);
|
||||
}
|
||||
buf = buf0 + hsize;
|
||||
}
|
||||
@@ -295,7 +282,6 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
break;
|
||||
case 8:
|
||||
case 24:
|
||||
case 32:
|
||||
for(i = 0; i < avctx->height; i++){
|
||||
memcpy(ptr, buf, n);
|
||||
buf += n;
|
||||
@@ -325,6 +311,29 @@ static int bmp_decode_frame(AVCodecContext *avctx,
|
||||
ptr += linesize;
|
||||
}
|
||||
break;
|
||||
case 32:
|
||||
for(i = 0; i < avctx->height; i++){
|
||||
const uint8_t *src = buf;
|
||||
uint8_t *dst = ptr;
|
||||
|
||||
for(j = 0; j < avctx->width; j++){
|
||||
dst[0] = src[rgb[2]];
|
||||
dst[1] = src[rgb[1]];
|
||||
dst[2] = src[rgb[0]];
|
||||
/* The Microsoft documentation states:
|
||||
* "The high byte in each DWORD is not used."
|
||||
* Both GIMP and ImageMagick store the alpha transparency value
|
||||
* in the high byte for 32bit bmp files.
|
||||
*/
|
||||
dst[3] = src[3];
|
||||
dst += 4;
|
||||
src += 4;
|
||||
}
|
||||
|
||||
buf += n;
|
||||
ptr += linesize;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "BMP decoder is broken\n");
|
||||
return -1;
|
||||
|
@@ -24,11 +24,9 @@
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "bmp.h"
|
||||
#include <assert.h>
|
||||
|
||||
static const uint32_t monoblack_pal[] = { 0x000000, 0xFFFFFF };
|
||||
static const uint32_t rgb565_masks[] = { 0xF800, 0x07E0, 0x001F };
|
||||
static const uint32_t rgb444_masks[] = { 0x0F00, 0x00F0, 0x000F };
|
||||
|
||||
static av_cold int bmp_encode_init(AVCodecContext *avctx){
|
||||
BMPContext *s = avctx->priv_data;
|
||||
@@ -37,15 +35,13 @@ static av_cold int bmp_encode_init(AVCodecContext *avctx){
|
||||
avctx->coded_frame = (AVFrame*)&s->picture;
|
||||
|
||||
switch (avctx->pix_fmt) {
|
||||
case PIX_FMT_BGRA:
|
||||
avctx->bits_per_coded_sample = 32;
|
||||
break;
|
||||
case PIX_FMT_BGR24:
|
||||
avctx->bits_per_coded_sample = 24;
|
||||
break;
|
||||
case PIX_FMT_RGB555:
|
||||
avctx->bits_per_coded_sample = 16;
|
||||
break;
|
||||
case PIX_FMT_RGB565:
|
||||
case PIX_FMT_RGB444:
|
||||
avctx->bits_per_coded_sample = 16;
|
||||
break;
|
||||
case PIX_FMT_RGB8:
|
||||
@@ -73,7 +69,6 @@ static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_s
|
||||
AVFrame * const p= (AVFrame*)&s->picture;
|
||||
int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize;
|
||||
const uint32_t *pal = NULL;
|
||||
uint32_t palette256[256];
|
||||
int pad_bytes_per_row, pal_entries = 0, compression = BMP_RGB;
|
||||
int bit_count = avctx->bits_per_coded_sample;
|
||||
uint8_t *ptr;
|
||||
@@ -82,11 +77,6 @@ static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_s
|
||||
p->pict_type= AV_PICTURE_TYPE_I;
|
||||
p->key_frame= 1;
|
||||
switch (avctx->pix_fmt) {
|
||||
case PIX_FMT_RGB444:
|
||||
compression = BMP_BITFIELDS;
|
||||
pal = rgb444_masks; // abuse pal to hold color masks
|
||||
pal_entries = 3;
|
||||
break;
|
||||
case PIX_FMT_RGB565:
|
||||
compression = BMP_BITFIELDS;
|
||||
pal = rgb565_masks; // abuse pal to hold color masks
|
||||
@@ -97,10 +87,7 @@ static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_s
|
||||
case PIX_FMT_RGB4_BYTE:
|
||||
case PIX_FMT_BGR4_BYTE:
|
||||
case PIX_FMT_GRAY8:
|
||||
assert(bit_count == 8);
|
||||
ff_set_systematic_pal2(palette256, avctx->pix_fmt);
|
||||
pal = palette256;
|
||||
break;
|
||||
ff_set_systematic_pal2((uint32_t*)p->data[1], avctx->pix_fmt);
|
||||
case PIX_FMT_PAL8:
|
||||
pal = (uint32_t *)p->data[1];
|
||||
break;
|
||||
@@ -170,8 +157,8 @@ AVCodec ff_bmp_encoder = {
|
||||
.init = bmp_encode_init,
|
||||
.encode = bmp_encode_frame,
|
||||
.pix_fmts = (const enum PixelFormat[]){
|
||||
PIX_FMT_BGRA, PIX_FMT_BGR24,
|
||||
PIX_FMT_RGB565, PIX_FMT_RGB555, PIX_FMT_RGB444,
|
||||
PIX_FMT_BGR24,
|
||||
PIX_FMT_RGB555, PIX_FMT_RGB565,
|
||||
PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8,
|
||||
PIX_FMT_MONOBLACK,
|
||||
PIX_FMT_NONE},
|
||||
|
@@ -27,7 +27,7 @@
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
typedef struct {
|
||||
const uint8_t *buffer, *buffer_end, *buffer_start;
|
||||
const uint8_t *buffer, *buffer_end;
|
||||
} GetByteContext;
|
||||
|
||||
#define DEF_T(type, name, bytes, read, write) \
|
||||
@@ -39,15 +39,11 @@ static av_always_inline void bytestream_put_ ##name(uint8_t **b, const type valu
|
||||
write(*b, value);\
|
||||
(*b) += bytes;\
|
||||
}\
|
||||
static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g)\
|
||||
{\
|
||||
return bytestream_get_ ## name(&g->buffer);\
|
||||
}\
|
||||
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
if (g->buffer_end - g->buffer < bytes)\
|
||||
return 0;\
|
||||
return bytestream2_get_ ## name ## u(g);\
|
||||
return bytestream_get_ ## name(&g->buffer);\
|
||||
}\
|
||||
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g)\
|
||||
{\
|
||||
@@ -75,47 +71,10 @@ DEF (byte, 1, AV_RB8 , AV_WB8 )
|
||||
#undef DEF64
|
||||
#undef DEF_T
|
||||
|
||||
#if HAVE_BIGENDIAN
|
||||
# define bytestream2_get_ne16 bytestream2_get_be16
|
||||
# define bytestream2_get_ne24 bytestream2_get_be24
|
||||
# define bytestream2_get_ne32 bytestream2_get_be32
|
||||
# define bytestream2_get_ne64 bytestream2_get_be64
|
||||
# define bytestream2_get_ne16u bytestream2_get_be16u
|
||||
# define bytestream2_get_ne24u bytestream2_get_be24u
|
||||
# define bytestream2_get_ne32u bytestream2_get_be32u
|
||||
# define bytestream2_get_ne64u bytestream2_get_be64u
|
||||
# define bytestream2_put_ne16 bytestream2_put_be16
|
||||
# define bytestream2_put_ne24 bytestream2_put_be24
|
||||
# define bytestream2_put_ne32 bytestream2_put_be32
|
||||
# define bytestream2_put_ne64 bytestream2_put_be64
|
||||
# define bytestream2_peek_ne16 bytestream2_peek_be16
|
||||
# define bytestream2_peek_ne24 bytestream2_peek_be24
|
||||
# define bytestream2_peek_ne32 bytestream2_peek_be32
|
||||
# define bytestream2_peek_ne64 bytestream2_peek_be64
|
||||
#else
|
||||
# define bytestream2_get_ne16 bytestream2_get_le16
|
||||
# define bytestream2_get_ne24 bytestream2_get_le24
|
||||
# define bytestream2_get_ne32 bytestream2_get_le32
|
||||
# define bytestream2_get_ne64 bytestream2_get_le64
|
||||
# define bytestream2_get_ne16u bytestream2_get_le16u
|
||||
# define bytestream2_get_ne24u bytestream2_get_le24u
|
||||
# define bytestream2_get_ne32u bytestream2_get_le32u
|
||||
# define bytestream2_get_ne64u bytestream2_get_le64u
|
||||
# define bytestream2_put_ne16 bytestream2_put_le16
|
||||
# define bytestream2_put_ne24 bytestream2_put_le24
|
||||
# define bytestream2_put_ne32 bytestream2_put_le32
|
||||
# define bytestream2_put_ne64 bytestream2_put_le64
|
||||
# define bytestream2_peek_ne16 bytestream2_peek_le16
|
||||
# define bytestream2_peek_ne24 bytestream2_peek_le24
|
||||
# define bytestream2_peek_ne32 bytestream2_peek_le32
|
||||
# define bytestream2_peek_ne64 bytestream2_peek_le64
|
||||
#endif
|
||||
|
||||
static av_always_inline void bytestream2_init(GetByteContext *g,
|
||||
const uint8_t *buf, int buf_size)
|
||||
{
|
||||
g->buffer = buf;
|
||||
g->buffer_start = buf;
|
||||
g->buffer_end = buf + buf_size;
|
||||
}
|
||||
|
||||
@@ -130,34 +89,6 @@ static av_always_inline void bytestream2_skip(GetByteContext *g,
|
||||
g->buffer += FFMIN(g->buffer_end - g->buffer, size);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_tell(GetByteContext *g)
|
||||
{
|
||||
return (int)(g->buffer - g->buffer_start);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset,
|
||||
int whence)
|
||||
{
|
||||
switch (whence) {
|
||||
case SEEK_CUR:
|
||||
offset = av_clip(offset, -(g->buffer - g->buffer_start),
|
||||
g->buffer_end - g->buffer);
|
||||
g->buffer += offset;
|
||||
break;
|
||||
case SEEK_END:
|
||||
offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
|
||||
g->buffer = g->buffer_end + offset;
|
||||
break;
|
||||
case SEEK_SET:
|
||||
offset = av_clip(offset, 0, g->buffer_end - g->buffer_start);
|
||||
g->buffer = g->buffer_start + offset;
|
||||
break;
|
||||
default:
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
return bytestream2_tell(g);
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
|
@@ -29,7 +29,6 @@
|
||||
#include "libavutil/common.h"
|
||||
#include "get_bits.h"
|
||||
#include "cabac.h"
|
||||
#include "cabac_functions.h"
|
||||
|
||||
static const uint8_t lps_range[64][4]= {
|
||||
{128,176,208,240}, {128,167,197,227}, {128,158,187,216}, {123,150,178,205},
|
||||
@@ -53,7 +52,7 @@ static const uint8_t lps_range[64][4]= {
|
||||
uint8_t ff_h264_mlps_state[4*64];
|
||||
uint8_t ff_h264_lps_range[4*2*64];
|
||||
uint8_t ff_h264_lps_state[2*64];
|
||||
static uint8_t h264_mps_state[2 * 64];
|
||||
uint8_t ff_h264_mps_state[2*64];
|
||||
|
||||
static const uint8_t mps_state[64]= {
|
||||
1, 2, 3, 4, 5, 6, 7, 8,
|
||||
@@ -110,6 +109,10 @@ void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size){
|
||||
c->low= 0;
|
||||
c->range= 0x1FE;
|
||||
c->outstanding_count= 0;
|
||||
#ifdef STRICT_LIMITS
|
||||
c->sym_count =0;
|
||||
#endif
|
||||
|
||||
c->pb.bit_left++; //avoids firstBitFlag
|
||||
}
|
||||
|
||||
@@ -142,9 +145,9 @@ void ff_init_cabac_states(CABACContext *c){
|
||||
}
|
||||
|
||||
ff_h264_mlps_state[128+2*i+0]=
|
||||
h264_mps_state[2 * i + 0] = 2 * mps_state[i] + 0;
|
||||
ff_h264_mps_state[2*i+0]= 2*mps_state[i]+0;
|
||||
ff_h264_mlps_state[128+2*i+1]=
|
||||
h264_mps_state[2 * i + 1] = 2 * mps_state[i] + 1;
|
||||
ff_h264_mps_state[2*i+1]= 2*mps_state[i]+1;
|
||||
|
||||
if( i ){
|
||||
ff_h264_lps_state[2*i+0]=
|
||||
@@ -167,37 +170,12 @@ void ff_init_cabac_states(CABACContext *c){
|
||||
#include "avcodec.h"
|
||||
#include "cabac.h"
|
||||
|
||||
static inline void put_cabac_bit(CABACContext *c, int b){
|
||||
put_bits(&c->pb, 1, b);
|
||||
for(;c->outstanding_count; c->outstanding_count--){
|
||||
put_bits(&c->pb, 1, 1-b);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void renorm_cabac_encoder(CABACContext *c){
|
||||
while(c->range < 0x100){
|
||||
//FIXME optimize
|
||||
if(c->low<0x100){
|
||||
put_cabac_bit(c, 0);
|
||||
}else if(c->low<0x200){
|
||||
c->outstanding_count++;
|
||||
c->low -= 0x100;
|
||||
}else{
|
||||
put_cabac_bit(c, 1);
|
||||
c->low -= 0x200;
|
||||
}
|
||||
|
||||
c->range+= c->range;
|
||||
c->low += c->low;
|
||||
}
|
||||
}
|
||||
|
||||
static void put_cabac(CABACContext *c, uint8_t * const state, int bit){
|
||||
int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + *state];
|
||||
|
||||
if(bit == ((*state)&1)){
|
||||
c->range -= RangeLPS;
|
||||
*state = h264_mps_state[*state];
|
||||
*state= ff_h264_mps_state[*state];
|
||||
}else{
|
||||
c->low += c->range - RangeLPS;
|
||||
c->range = RangeLPS;
|
||||
@@ -205,6 +183,10 @@ static void put_cabac(CABACContext *c, uint8_t * const state, int bit){
|
||||
}
|
||||
|
||||
renorm_cabac_encoder(c);
|
||||
|
||||
#ifdef STRICT_LIMITS
|
||||
c->symCount++;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -226,6 +208,10 @@ static void put_cabac_bypass(CABACContext *c, int bit){
|
||||
put_cabac_bit(c, 1);
|
||||
c->low -= 0x400;
|
||||
}
|
||||
|
||||
#ifdef STRICT_LIMITS
|
||||
c->symCount++;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -250,9 +236,74 @@ static int put_cabac_terminate(CABACContext *c, int bit){
|
||||
flush_put_bits(&c->pb); //FIXME FIXME FIXME XXX wrong
|
||||
}
|
||||
|
||||
#ifdef STRICT_LIMITS
|
||||
c->symCount++;
|
||||
#endif
|
||||
|
||||
return (put_bits_count(&c->pb)+7)>>3;
|
||||
}
|
||||
|
||||
/**
|
||||
* put (truncated) unary binarization.
|
||||
*/
|
||||
static void put_cabac_u(CABACContext *c, uint8_t * state, int v, int max, int max_index, int truncated){
|
||||
int i;
|
||||
|
||||
assert(v <= max);
|
||||
|
||||
for(i=0; i<v; i++){
|
||||
put_cabac(c, state, 1);
|
||||
if(i < max_index) state++;
|
||||
}
|
||||
if(truncated==0 || v<max)
|
||||
put_cabac(c, state, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* put unary exp golomb k-th order binarization.
|
||||
*/
|
||||
static void put_cabac_ueg(CABACContext *c, uint8_t * state, int v, int max, int is_signed, int k, int max_index){
|
||||
int i;
|
||||
|
||||
if(v==0)
|
||||
put_cabac(c, state, 0);
|
||||
else{
|
||||
const int sign= v < 0;
|
||||
|
||||
if(is_signed) v= FFABS(v);
|
||||
|
||||
if(v<max){
|
||||
for(i=0; i<v; i++){
|
||||
put_cabac(c, state, 1);
|
||||
if(i < max_index) state++;
|
||||
}
|
||||
|
||||
put_cabac(c, state, 0);
|
||||
}else{
|
||||
int m= 1<<k;
|
||||
|
||||
for(i=0; i<max; i++){
|
||||
put_cabac(c, state, 1);
|
||||
if(i < max_index) state++;
|
||||
}
|
||||
|
||||
v -= max;
|
||||
while(v >= m){ //FIXME optimize
|
||||
put_cabac_bypass(c, 1);
|
||||
v-= m;
|
||||
m+= m;
|
||||
}
|
||||
put_cabac_bypass(c, 0);
|
||||
while(m>>=1){
|
||||
put_cabac_bypass(c, v&m);
|
||||
}
|
||||
}
|
||||
|
||||
if(is_signed)
|
||||
put_cabac_bypass(c, sign);
|
||||
}
|
||||
}
|
||||
|
||||
int main(void){
|
||||
CABACContext c;
|
||||
uint8_t b[9*SIZE];
|
||||
@@ -282,6 +333,19 @@ START_TIMER
|
||||
STOP_TIMER("put_cabac")
|
||||
}
|
||||
|
||||
#if 0
|
||||
for(i=0; i<SIZE; i++){
|
||||
START_TIMER
|
||||
put_cabac_u(&c, state, r[i], 6, 3, i&1);
|
||||
STOP_TIMER("put_cabac_u")
|
||||
}
|
||||
|
||||
for(i=0; i<SIZE; i++){
|
||||
START_TIMER
|
||||
put_cabac_ueg(&c, state, r[i], 3, 0, 1, 2);
|
||||
STOP_TIMER("put_cabac_ueg")
|
||||
}
|
||||
#endif
|
||||
put_cabac_terminate(&c, 1);
|
||||
|
||||
ff_init_cabac_decoder(&c, b, SIZE);
|
||||
@@ -301,6 +365,21 @@ START_TIMER
|
||||
av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
|
||||
STOP_TIMER("get_cabac")
|
||||
}
|
||||
#if 0
|
||||
for(i=0; i<SIZE; i++){
|
||||
START_TIMER
|
||||
if( r[i] != get_cabac_u(&c, state, (i&1) ? 6 : 7, 3, i&1) )
|
||||
av_log(NULL, AV_LOG_ERROR, "CABAC unary (truncated) binarization failure at %d\n", i);
|
||||
STOP_TIMER("get_cabac_u")
|
||||
}
|
||||
|
||||
for(i=0; i<SIZE; i++){
|
||||
START_TIMER
|
||||
if( r[i] != get_cabac_ueg(&c, state, 3, 0, 1, 2))
|
||||
av_log(NULL, AV_LOG_ERROR, "CABAC unary (truncated) binarization failure at %d\n", i);
|
||||
STOP_TIMER("get_cabac_ueg")
|
||||
}
|
||||
#endif
|
||||
if(!get_cabac_terminate(&c))
|
||||
av_log(NULL, AV_LOG_ERROR, "where's the Terminator?\n");
|
||||
|
||||
|
@@ -27,10 +27,13 @@
|
||||
#ifndef AVCODEC_CABAC_H
|
||||
#define AVCODEC_CABAC_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include "put_bits.h"
|
||||
|
||||
//#undef NDEBUG
|
||||
#include <assert.h>
|
||||
|
||||
#define CABAC_BITS 16
|
||||
#define CABAC_MASK ((1<<CABAC_BITS)-1)
|
||||
|
||||
@@ -38,14 +41,237 @@ typedef struct CABACContext{
|
||||
int low;
|
||||
int range;
|
||||
int outstanding_count;
|
||||
#ifdef STRICT_LIMITS
|
||||
int symCount;
|
||||
#endif
|
||||
const uint8_t *bytestream_start;
|
||||
const uint8_t *bytestream;
|
||||
const uint8_t *bytestream_end;
|
||||
PutBitContext pb;
|
||||
}CABACContext;
|
||||
|
||||
extern uint8_t ff_h264_mlps_state[4*64];
|
||||
extern uint8_t ff_h264_lps_range[4*2*64]; ///< rangeTabLPS
|
||||
extern uint8_t ff_h264_mps_state[2*64]; ///< transIdxMPS
|
||||
extern uint8_t ff_h264_lps_state[2*64]; ///< transIdxLPS
|
||||
extern const uint8_t ff_h264_norm_shift[512];
|
||||
|
||||
#if ARCH_X86
|
||||
# include "x86/cabac.h"
|
||||
#endif
|
||||
|
||||
void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size);
|
||||
void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size);
|
||||
void ff_init_cabac_states(CABACContext *c);
|
||||
|
||||
|
||||
static inline void put_cabac_bit(CABACContext *c, int b){
|
||||
put_bits(&c->pb, 1, b);
|
||||
for(;c->outstanding_count; c->outstanding_count--){
|
||||
put_bits(&c->pb, 1, 1-b);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void renorm_cabac_encoder(CABACContext *c){
|
||||
while(c->range < 0x100){
|
||||
//FIXME optimize
|
||||
if(c->low<0x100){
|
||||
put_cabac_bit(c, 0);
|
||||
}else if(c->low<0x200){
|
||||
c->outstanding_count++;
|
||||
c->low -= 0x100;
|
||||
}else{
|
||||
put_cabac_bit(c, 1);
|
||||
c->low -= 0x200;
|
||||
}
|
||||
|
||||
c->range+= c->range;
|
||||
c->low += c->low;
|
||||
}
|
||||
}
|
||||
|
||||
static void refill(CABACContext *c){
|
||||
#if CABAC_BITS == 16
|
||||
c->low+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1);
|
||||
#else
|
||||
c->low+= c->bytestream[0]<<1;
|
||||
#endif
|
||||
c->low -= CABAC_MASK;
|
||||
c->bytestream+= CABAC_BITS/8;
|
||||
}
|
||||
|
||||
static inline void renorm_cabac_decoder(CABACContext *c){
|
||||
while(c->range < 0x100){
|
||||
c->range+= c->range;
|
||||
c->low+= c->low;
|
||||
if(!(c->low & CABAC_MASK))
|
||||
refill(c);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void renorm_cabac_decoder_once(CABACContext *c){
|
||||
int shift= (uint32_t)(c->range - 0x100)>>31;
|
||||
c->range<<= shift;
|
||||
c->low <<= shift;
|
||||
if(!(c->low & CABAC_MASK))
|
||||
refill(c);
|
||||
}
|
||||
|
||||
#ifndef get_cabac_inline
|
||||
static void refill2(CABACContext *c){
|
||||
int i, x;
|
||||
|
||||
x= c->low ^ (c->low-1);
|
||||
i= 7 - ff_h264_norm_shift[x>>(CABAC_BITS-1)];
|
||||
|
||||
x= -CABAC_MASK;
|
||||
|
||||
#if CABAC_BITS == 16
|
||||
x+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1);
|
||||
#else
|
||||
x+= c->bytestream[0]<<1;
|
||||
#endif
|
||||
|
||||
c->low += x<<i;
|
||||
c->bytestream+= CABAC_BITS/8;
|
||||
}
|
||||
|
||||
static av_always_inline int get_cabac_inline(CABACContext *c, uint8_t * const state){
|
||||
int s = *state;
|
||||
int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + s];
|
||||
int bit, lps_mask;
|
||||
|
||||
c->range -= RangeLPS;
|
||||
lps_mask= ((c->range<<(CABAC_BITS+1)) - c->low)>>31;
|
||||
|
||||
c->low -= (c->range<<(CABAC_BITS+1)) & lps_mask;
|
||||
c->range += (RangeLPS - c->range) & lps_mask;
|
||||
|
||||
s^=lps_mask;
|
||||
*state= (ff_h264_mlps_state+128)[s];
|
||||
bit= s&1;
|
||||
|
||||
lps_mask= ff_h264_norm_shift[c->range];
|
||||
c->range<<= lps_mask;
|
||||
c->low <<= lps_mask;
|
||||
if(!(c->low & CABAC_MASK))
|
||||
refill2(c);
|
||||
return bit;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int av_noinline av_unused get_cabac_noinline(CABACContext *c, uint8_t * const state){
|
||||
return get_cabac_inline(c,state);
|
||||
}
|
||||
|
||||
static int av_unused get_cabac(CABACContext *c, uint8_t * const state){
|
||||
return get_cabac_inline(c,state);
|
||||
}
|
||||
|
||||
static int av_unused get_cabac_bypass(CABACContext *c){
|
||||
int range;
|
||||
c->low += c->low;
|
||||
|
||||
if(!(c->low & CABAC_MASK))
|
||||
refill(c);
|
||||
|
||||
range= c->range<<(CABAC_BITS+1);
|
||||
if(c->low < range){
|
||||
return 0;
|
||||
}else{
|
||||
c->low -= range;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifndef get_cabac_bypass_sign
|
||||
static av_always_inline int get_cabac_bypass_sign(CABACContext *c, int val){
|
||||
int range, mask;
|
||||
c->low += c->low;
|
||||
|
||||
if(!(c->low & CABAC_MASK))
|
||||
refill(c);
|
||||
|
||||
range= c->range<<(CABAC_BITS+1);
|
||||
c->low -= range;
|
||||
mask= c->low >> 31;
|
||||
range &= mask;
|
||||
c->low += range;
|
||||
return (val^mask)-mask;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @return the number of bytes read or 0 if no end
|
||||
*/
|
||||
static int av_unused get_cabac_terminate(CABACContext *c){
|
||||
c->range -= 2;
|
||||
if(c->low < c->range<<(CABAC_BITS+1)){
|
||||
renorm_cabac_decoder_once(c);
|
||||
return 0;
|
||||
}else{
|
||||
return c->bytestream - c->bytestream_start;
|
||||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
/**
|
||||
* Get (truncated) unary binarization.
|
||||
*/
|
||||
static int get_cabac_u(CABACContext *c, uint8_t * state, int max, int max_index, int truncated){
|
||||
int i;
|
||||
|
||||
for(i=0; i<max; i++){
|
||||
if(get_cabac(c, state)==0)
|
||||
return i;
|
||||
|
||||
if(i< max_index) state++;
|
||||
}
|
||||
|
||||
return truncated ? max : -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* get unary exp golomb k-th order binarization.
|
||||
*/
|
||||
static int get_cabac_ueg(CABACContext *c, uint8_t * state, int max, int is_signed, int k, int max_index){
|
||||
int i, v;
|
||||
int m= 1<<k;
|
||||
|
||||
if(get_cabac(c, state)==0)
|
||||
return 0;
|
||||
|
||||
if(0 < max_index) state++;
|
||||
|
||||
for(i=1; i<max; i++){
|
||||
if(get_cabac(c, state)==0){
|
||||
if(is_signed && get_cabac_bypass(c)){
|
||||
return -i;
|
||||
}else
|
||||
return i;
|
||||
}
|
||||
|
||||
if(i < max_index) state++;
|
||||
}
|
||||
|
||||
while(get_cabac_bypass(c)){
|
||||
i+= m;
|
||||
m+= m;
|
||||
}
|
||||
|
||||
v=0;
|
||||
while(m>>=1){
|
||||
v+= v + get_cabac_bypass(c);
|
||||
}
|
||||
i += v;
|
||||
|
||||
if(is_signed && get_cabac_bypass(c)){
|
||||
return -i;
|
||||
}else
|
||||
return i;
|
||||
}
|
||||
#endif /* 0 */
|
||||
|
||||
#endif /* AVCODEC_CABAC_H */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user