Compare commits
104 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
9d0bb7fc39 | ||
![]() |
a53fd4b758 | ||
![]() |
b4ccdf5e68 | ||
![]() |
9b02aa2593 | ||
![]() |
f089e67d51 | ||
![]() |
842d7c9b3a | ||
![]() |
187297b871 | ||
![]() |
211ad5042a | ||
![]() |
2b06f5f8f1 | ||
![]() |
0a64b25c77 | ||
![]() |
40e52bbb63 | ||
![]() |
aeec1a6430 | ||
![]() |
ef121a88d5 | ||
![]() |
db99c41567 | ||
![]() |
54bdb5fc86 | ||
![]() |
2fd824b466 | ||
![]() |
005b38f8f1 | ||
![]() |
6911d9e1b0 | ||
![]() |
cecb2b39ce | ||
![]() |
9ebfee7ac0 | ||
![]() |
01b39884c7 | ||
![]() |
c08e8ab715 | ||
![]() |
68ae344b5e | ||
![]() |
5753d780b4 | ||
![]() |
dbb534cea6 | ||
![]() |
414d75b8bc | ||
![]() |
c52a25e03b | ||
![]() |
c7966bf795 | ||
![]() |
0f429392cf | ||
![]() |
2dc6c5d462 | ||
![]() |
dc0403530e | ||
![]() |
59147be24f | ||
![]() |
09bc4be3db | ||
![]() |
aaef59d535 | ||
![]() |
283e070877 | ||
![]() |
4e0c29451b | ||
![]() |
f5ae34250a | ||
![]() |
fc4c29bc6e | ||
![]() |
6158eec53f | ||
![]() |
6c0fef5762 | ||
![]() |
f5a4bd23e9 | ||
![]() |
8e1760f37f | ||
![]() |
1da5ab751f | ||
![]() |
237ef710a1 | ||
![]() |
be47e93134 | ||
![]() |
1821c849da | ||
![]() |
a4522ae516 | ||
![]() |
c7ee4bc016 | ||
![]() |
0cabb95811 | ||
![]() |
a1b32533aa | ||
![]() |
e2eb0d2326 | ||
![]() |
acac6b0d69 | ||
![]() |
deb8d0d6a1 | ||
![]() |
3e817d91ef | ||
![]() |
e231f0fade | ||
![]() |
f0f55e6726 | ||
![]() |
61dc8494d7 | ||
![]() |
423b87d621 | ||
![]() |
8d9568b4a1 | ||
![]() |
acf511de34 | ||
![]() |
fd2951bb53 | ||
![]() |
fa004f4854 | ||
![]() |
1155bdb754 | ||
![]() |
01838c5732 | ||
![]() |
f09f33031b | ||
![]() |
b2a9f64e1b | ||
![]() |
15ea618ef6 | ||
![]() |
ec33423273 | ||
![]() |
d5dd54df69 | ||
![]() |
e7a4c34e7c | ||
![]() |
2881bfbfd6 | ||
![]() |
80fb38153e | ||
![]() |
b79f337f8a | ||
![]() |
f593ac1c21 | ||
![]() |
baf92305a6 | ||
![]() |
d6d168e87b | ||
![]() |
50f9c4acc3 | ||
![]() |
211374e52a | ||
![]() |
1bf2461765 | ||
![]() |
64444cd578 | ||
![]() |
0047a31090 | ||
![]() |
d73ce6cb56 | ||
![]() |
9a6d3eee59 | ||
![]() |
8b221d60fa | ||
![]() |
9da9b36435 | ||
![]() |
09b33f9a82 | ||
![]() |
fa6b6dad3d | ||
![]() |
e0d88cfd18 | ||
![]() |
18043e3d22 | ||
![]() |
ccf470fdb6 | ||
![]() |
8f9bc6f2ce | ||
![]() |
fcab45f39b | ||
![]() |
bc44d06c3d | ||
![]() |
7740e36a89 | ||
![]() |
6127f792f9 | ||
![]() |
fd2cf9c45d | ||
![]() |
fc3dec8b62 | ||
![]() |
a7315116dd | ||
![]() |
37268dcc86 | ||
![]() |
ea28e74205 | ||
![]() |
c1c84f0a55 | ||
![]() |
56bf38859b | ||
![]() |
1cda4aa1e0 | ||
![]() |
9711b52739 |
43
MAINTAINERS
43
MAINTAINERS
@@ -46,7 +46,7 @@ Miscellaneous Areas
|
||||
documentation Mike Melanson
|
||||
website Robert Swain, Lou Logan
|
||||
build system (configure,Makefiles) Diego Biurrun, Mans Rullgard
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Döffinger
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Döffinger, Alexander Strasser
|
||||
mailinglists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
@@ -62,13 +62,20 @@ Internal Interfaces:
|
||||
libavutil/common.h Michael Niedermayer
|
||||
|
||||
Other:
|
||||
intfloat* Michael Niedermayer
|
||||
rational.c, rational.h Michael Niedermayer
|
||||
mathematics.c, mathematics.h Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
bprint Nicolas George
|
||||
bswap.h
|
||||
des Reimar Doeffinger
|
||||
float_dsp Loren Merritt
|
||||
hash Reimar Doeffinger
|
||||
intfloat* Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
lzo Reimar Doeffinger
|
||||
mathematics.c, mathematics.h Michael Niedermayer
|
||||
opencl.c, opencl.h Wei Gao
|
||||
rational.c, rational.h Michael Niedermayer
|
||||
rc4 Reimar Doeffinger
|
||||
ripemd.c, ripemd.h James Almer
|
||||
timecode Clément Bœsch
|
||||
|
||||
|
||||
libavcodec
|
||||
@@ -131,8 +138,8 @@ Codecs:
|
||||
binkaudio.c Peter Ross
|
||||
bmp.c Mans Rullgard, Kostya Shishkov
|
||||
cavs* Stefan Gehrer
|
||||
celp_filters.* Vitor Sessak
|
||||
cdxl.c Paul B Mahol
|
||||
celp_filters.* Vitor Sessak
|
||||
cinepak.c Roberto Togni
|
||||
cljr Alex Beregszaszi
|
||||
cllc.c Derek Buitenhuis
|
||||
@@ -143,8 +150,8 @@ Codecs:
|
||||
dca.c Kostya Shishkov, Benjamin Larsson
|
||||
dnxhd* Baptiste Coudurier
|
||||
dpcm.c Mike Melanson
|
||||
dxa.c Kostya Shishkov
|
||||
dv.c Roman Shaposhnik
|
||||
dxa.c Kostya Shishkov
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
ffv1.c Michael Niedermayer
|
||||
ffwavesynth.c Nicolas George
|
||||
@@ -154,9 +161,9 @@ Codecs:
|
||||
g722.c Martin Storsjo
|
||||
g726.c Roman Shaposhnik
|
||||
gifdec.c Baptiste Coudurier
|
||||
h264* Loren Merritt, Michael Niedermayer
|
||||
h261* Michael Niedermayer
|
||||
h263* Michael Niedermayer
|
||||
h264* Loren Merritt, Michael Niedermayer
|
||||
huffyuv.c Michael Niedermayer
|
||||
idcinvideo.c Mike Melanson
|
||||
imc* Benjamin Larsson
|
||||
@@ -171,8 +178,8 @@ Codecs:
|
||||
kmvc.c Kostya Shishkov
|
||||
lcl*.c Roberto Togni, Reimar Doeffinger
|
||||
libcelt_dec.c Nicolas George
|
||||
libgsm.c Michel Bardiaux
|
||||
libdirac* David Conrad
|
||||
libgsm.c Michel Bardiaux
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libschroedinger* David Conrad
|
||||
@@ -180,8 +187,8 @@ Codecs:
|
||||
libtheoraenc.c David Conrad
|
||||
libutvideo* Derek Buitenhuis
|
||||
libvorbis.c David Conrad
|
||||
libxavs.c Stefan Gehrer
|
||||
libx264.c Mans Rullgard, Jason Garrett-Glaser
|
||||
libxavs.c Stefan Gehrer
|
||||
loco.c Kostya Shishkov
|
||||
lzo.h, lzo.c Reimar Doeffinger
|
||||
mdec.c Michael Niedermayer
|
||||
@@ -243,8 +250,8 @@ Codecs:
|
||||
vda_h264_dec.c Xidorn Quan
|
||||
vima.c Paul B Mahol
|
||||
vmnc.c Kostya Shishkov
|
||||
vorbis_enc.c Oded Shimon
|
||||
vorbis_dec.c Denes Balatoni, David Conrad
|
||||
vorbis_enc.c Oded Shimon
|
||||
vp3* Mike Melanson
|
||||
vp5 Aurelien Jacobs
|
||||
vp6 Aurelien Jacobs
|
||||
@@ -278,11 +285,11 @@ libavdevice
|
||||
libavdevice/avdevice.h
|
||||
|
||||
|
||||
dshow.c Roger Pack
|
||||
iec61883.c Georg Lippitsch
|
||||
libdc1394.c Roman Shaposhnik
|
||||
v4l2.c Luca Abeni
|
||||
vfwcap.c Ramiro Polla
|
||||
dshow.c Roger Pack
|
||||
|
||||
libavfilter
|
||||
===========
|
||||
@@ -292,11 +299,13 @@ Generic parts:
|
||||
|
||||
Filters:
|
||||
af_amerge.c Nicolas George
|
||||
af_aresample.c Michael Niedermayer
|
||||
af_astreamsync.c Nicolas George
|
||||
af_atempo.c Pavel Koshevoy
|
||||
af_pan.c Nicolas George
|
||||
vf_delogo.c Jean Delvare (CC <khali@linux-fr.org>)
|
||||
vf_drawbox.c/drawgrid Andrey Utkin
|
||||
vf_scale.c Michael Niedermayer
|
||||
vf_yadif.c Michael Niedermayer
|
||||
|
||||
Sources:
|
||||
@@ -316,7 +325,8 @@ Muxers/Demuxers:
|
||||
4xm.c Mike Melanson
|
||||
adtsenc.c Robert Swain
|
||||
afc.c Paul B Mahol
|
||||
aiff.c Baptiste Coudurier
|
||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
ape.c Kostya Shishkov
|
||||
ass* Aurelien Jacobs
|
||||
astdec.c Paul B Mahol
|
||||
@@ -344,8 +354,8 @@ Muxers/Demuxers:
|
||||
idcin.c Mike Melanson
|
||||
idroqdec.c Mike Melanson
|
||||
iff.c Jaikrishnan Menon
|
||||
ipmovie.c Mike Melanson
|
||||
img2*.c Michael Niedermayer
|
||||
ipmovie.c Mike Melanson
|
||||
ircam* Paul B Mahol
|
||||
iss.c Stefan Gehrer
|
||||
jacosub* Clément Bœsch
|
||||
@@ -359,11 +369,11 @@ Muxers/Demuxers:
|
||||
matroskadec.c Aurelien Jacobs
|
||||
matroskaenc.c David Conrad
|
||||
metadata* Aurelien Jacobs
|
||||
microdvd* Aurelien Jacobs
|
||||
mgsts.c Paul B Mahol
|
||||
microdvd* Aurelien Jacobs
|
||||
mm.c Peter Ross
|
||||
mov.c Michael Niedermayer, Baptiste Coudurier
|
||||
movenc.c Michael Niedermayer, Baptiste Coudurier
|
||||
movenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
mpc.c Kostya Shishkov
|
||||
mpeg.c Michael Niedermayer
|
||||
mpegenc.c Michael Niedermayer
|
||||
@@ -458,7 +468,6 @@ Releases
|
||||
|
||||
2.0 Michael Niedermayer
|
||||
1.2 Michael Niedermayer
|
||||
1.1 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
68
compat/avisynth/avisynth_c_25.h
Normal file
68
compat/avisynth/avisynth_c_25.h
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright (c) 2011 FFmpegSource Project
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
/* these are defines/functions that are used and were changed in the switch to 2.6
|
||||
* and are needed to maintain full compatility with 2.5 */
|
||||
|
||||
enum {
|
||||
AVS_CS_YV12_25 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420_25 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
};
|
||||
|
||||
AVSC_INLINE int avs_get_height_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->row_size>>1;
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV)
|
||||
{
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
}
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_yv12_25(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12_25) == AVS_CS_YV12_25)||((p->pixel_type & AVS_CS_I420_25) == AVS_CS_I420_25); }
|
2
configure
vendored
2
configure
vendored
@@ -4201,7 +4201,7 @@ enabled openal && { { for al_libs in "${OPENAL_LIBS}" "-lopenal" "-lO
|
||||
enabled opencl && { check_lib2 OpenCL/cl.h clEnqueueNDRangeKernel -Wl,-framework,OpenCL ||
|
||||
check_lib2 CL/cl.h clEnqueueNDRangeKernel -lOpenCL ||
|
||||
die "ERROR: opencl not found"; } &&
|
||||
{ enabled_any w32threads os2threads &&
|
||||
{ ! enabled_any w32threads os2threads ||
|
||||
die "opencl currently needs --enable-pthreads or --disable-w32threads"; } &&
|
||||
{ check_cpp_condition "OpenCL/cl.h" "defined(CL_VERSION_1_2)" ||
|
||||
check_cpp_condition "CL/cl.h" "defined(CL_VERSION_1_2)" ||
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.0
|
||||
PROJECT_NUMBER = 2.0.2
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -14,7 +14,3 @@ accepted. If you are experiencing issues with any formally released version of
|
||||
FFmpeg, please try git master to check if the issue still exists. If it does,
|
||||
make your report against the development code following the usual bug reporting
|
||||
guidelines.
|
||||
|
||||
AVI/AVXSynth
|
||||
--------
|
||||
If you want to use FFmpeg with AVISynth, you need AVISynth 2.6.0 at minimum.
|
||||
|
@@ -1,3 +1,4 @@
|
||||
@anchor{codec-options}
|
||||
@chapter Codec Options
|
||||
@c man begin CODEC OPTIONS
|
||||
|
||||
|
@@ -25,6 +25,95 @@ enabled encoders.
|
||||
A description of some of the currently available audio encoders
|
||||
follows.
|
||||
|
||||
@anchor{aacenc}
|
||||
@section aac
|
||||
|
||||
Advanced Audio Coding (AAC) encoder.
|
||||
|
||||
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
|
||||
low complexity (AAC-LC) profile is supported. To use this encoder, you must set
|
||||
@option{strict} option to @samp{experimental} or lower.
|
||||
|
||||
As this encoder is experimental, unexpected behavior may exist from time to
|
||||
time. For a more stable AAC encoder, see @ref{libvo-aacenc}. However, be warned
|
||||
that it has a worse quality reported by some users.
|
||||
|
||||
@c Comment this out until somebody writes the respective documentation.
|
||||
@c See also @ref{libfaac}, @ref{libaacplus}, and @ref{libfdk-aac-enc}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
Set bit rate in bits/s. Setting this automatically activates constant bit rate
|
||||
(CBR) mode.
|
||||
|
||||
@item q
|
||||
Set quality for variable bit rate (VBR) mode. This option is valid only using
|
||||
the @command{ffmpeg} command-line tool. For library interface users, use
|
||||
@option{global_quality}.
|
||||
|
||||
@item stereo_mode
|
||||
Set stereo encoding mode. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Automatically selected by the encoder.
|
||||
|
||||
@item ms_off
|
||||
Disable middle/side encoding. This is the default.
|
||||
|
||||
@item ms_force
|
||||
Force middle/side encoding.
|
||||
@end table
|
||||
|
||||
@item aac_coder
|
||||
Set AAC encoder coding method. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item 0
|
||||
FAAC-inspired method.
|
||||
|
||||
This method is a simplified reimplementation of the method used in FAAC, which
|
||||
sets thresholds proportional to the band energies, and then decreases all the
|
||||
thresholds with quantizer steps to find the appropriate quantization with
|
||||
distortion below threshold band by band.
|
||||
|
||||
The quality of this method is comparable to the two loop searching method
|
||||
descibed below, but somewhat a little better and slower.
|
||||
|
||||
@item 1
|
||||
Average noise to mask ratio (ANMR) trellis-based solution.
|
||||
|
||||
This has a theoretic best quality out of all the coding methods, but at the
|
||||
cost of the slowest speed.
|
||||
|
||||
@item 2
|
||||
Two loop searching (TLS) method.
|
||||
|
||||
This method first sets quantizers depending on band thresholds and then tries
|
||||
to find an optimal combination by adding or subtracting a specific value from
|
||||
all quantizers and adjusting some individual quantizer a little.
|
||||
|
||||
This method produces similar quality with the FAAC method and is the default.
|
||||
|
||||
@item 3
|
||||
Constant quantizer method.
|
||||
|
||||
This method sets a constant quantizer for all bands. This is the fastest of all
|
||||
the methods, yet produces the worst quality.
|
||||
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Tips and Tricks
|
||||
|
||||
According to some reports
|
||||
(e.g. @url{http://d.hatena.ne.jp/kamedo2/20120729/1343545890}), setting the
|
||||
@option{cutoff} option to 15000 Hz greatly improves the quality of the output
|
||||
quality. As a result, we encourage you to do the same.
|
||||
|
||||
@section ac3 and ac3_fixed
|
||||
|
||||
AC-3 audio encoders.
|
||||
@@ -420,26 +509,36 @@ Requires the presence of the libmp3lame headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libmp3lame}.
|
||||
|
||||
@subsection Option Mapping
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libmp3lame wrapper,
|
||||
the LAME-equivalent options follow the FFmpeg ones.
|
||||
The following options are supported by the libmp3lame wrapper. The
|
||||
@command{lame}-equivalent of the options are listed in parentheses.
|
||||
|
||||
@multitable @columnfractions .2 .2
|
||||
@item FFmpeg @tab LAME
|
||||
@item b @tab b
|
||||
Set bitrate expressed in bits/s, LAME @code{bitrate} is expressed in
|
||||
kilobits/s.
|
||||
@item q @tab V
|
||||
Set quality setting for VBR.
|
||||
@item compression_level @tab q
|
||||
Set algorithm quality. Valid arguments are integers in the 0-9 range.
|
||||
@item reservoir @tab N.A.
|
||||
Enable use of bit reservoir. LAME has this enabled by default.
|
||||
@item joint_stereo @tab -m j
|
||||
@table @option
|
||||
@item b (@emph{-b})
|
||||
Set bitrate expressed in bits/s for CBR. LAME @code{bitrate} is
|
||||
expressed in kilobits/s.
|
||||
|
||||
@item q (@emph{-V})
|
||||
Set constant quality setting for VBR. This option is valid only
|
||||
using the @command{ffmpeg} command-line tool. For library interface
|
||||
users, use @option{global_quality}.
|
||||
|
||||
@item compression_level (@emph{-q})
|
||||
Set algorithm quality. Valid arguments are integers in the 0-9 range,
|
||||
with 0 meaning highest quality but slowest, and 9 meaning fastest
|
||||
while producing the worst quality.
|
||||
|
||||
@item reservoir
|
||||
Enable use of bit reservoir when set to 1. Default value is 1. LAME
|
||||
has this enabled by default, but can be overriden by use
|
||||
@option{--nores} option.
|
||||
|
||||
@item joint_stereo (@emph{-m j})
|
||||
Enable the encoder to use (on a frame by frame basis) either L/R
|
||||
stereo or mid/side stereo.
|
||||
@end multitable
|
||||
stereo or mid/side stereo. Default value is 1.
|
||||
|
||||
@end table
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
@@ -486,24 +585,26 @@ Requires the presence of the libtwolame headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libtwolame}.
|
||||
|
||||
@subsection Options Mapping
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libtwolame wrapper. The
|
||||
TwoLAME-equivalent options follow the FFmpeg ones and are in
|
||||
@command{twolame}-equivalent options follow the FFmpeg ones and are in
|
||||
parentheses.
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
(b) Set bitrate in bits/s. Note that FFmpeg @code{b} option is
|
||||
expressed in bits/s, twolame @code{b} in kilobits/s. The default
|
||||
value is 128k.
|
||||
@item b (@emph{-b})
|
||||
Set bitrate expressed in bits/s for CBR. @command{twolame} @option{b}
|
||||
option is expressed in kilobits/s. Default value is 128k.
|
||||
|
||||
@item q
|
||||
(V) Set quality for experimental VBR support. Maximum value range is
|
||||
from -50 to 50, useful range is from -10 to 10.
|
||||
@item q (@emph{-V})
|
||||
Set quality for experimental VBR support. Maximum value range is
|
||||
from -50 to 50, useful range is from -10 to 10. The higher the
|
||||
value, the better the quality. This option is valid only using the
|
||||
@command{ffmpeg} command-line tool. For library interface users,
|
||||
use @option{global_quality}.
|
||||
|
||||
@item mode
|
||||
(mode) Set MPEG mode. Possible values:
|
||||
@item mode (@emph{--mode})
|
||||
Set the mode of the resulting audio. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
@@ -518,29 +619,30 @@ Dual channel
|
||||
Mono
|
||||
@end table
|
||||
|
||||
@item psymodel
|
||||
(psyc-mode) Set psychoacoustic model to use in encoding. The argument
|
||||
must be an integer between -1 and 4, inclusive. The higher the value,
|
||||
the better the quality. The default value is 3.
|
||||
@item psymodel (@emph{--psyc-mode})
|
||||
Set psychoacoustic model to use in encoding. The argument must be
|
||||
an integer between -1 and 4, inclusive. The higher the value, the
|
||||
better the quality. The default value is 3.
|
||||
|
||||
@item energy_levels
|
||||
(energy) Enable energy levels extensions when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
@item energy_levels (@emph{--energy})
|
||||
Enable energy levels extensions when set to 1. The default value is
|
||||
0 (disabled).
|
||||
|
||||
@item error_protection
|
||||
(protect) Enable CRC error protection when set to 1. The default value
|
||||
is 0 (disabled).
|
||||
@item error_protection (@emph{--protect})
|
||||
Enable CRC error protection when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@item copyright
|
||||
(copyright) Set MPEG audio copyright flag when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
@item copyright (@emph{--copyright})
|
||||
Set MPEG audio copyright flag when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@item original
|
||||
(original) Set MPEG audio original flag when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
@item original (@emph{--original})
|
||||
Set MPEG audio original flag when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{libvo-aacenc}
|
||||
@section libvo-aacenc
|
||||
|
||||
VisualOn AAC encoder.
|
||||
@@ -549,16 +651,19 @@ Requires the presence of the libvo-aacenc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libvo-aacenc --enable-version3}.
|
||||
|
||||
This encoder is considered to be worse than the
|
||||
@ref{aacenc,,native experimental FFmpeg AAC encoder}, according to
|
||||
multiple sources.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
|
||||
channels. It is also CBR-only. It is considered to be worse than the
|
||||
native experimental FFmpeg AAC encoder.
|
||||
channels. It is also CBR-only.
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Bitrate.
|
||||
Set bit rate in bits/s.
|
||||
|
||||
@end table
|
||||
|
||||
@@ -873,178 +978,318 @@ For more information about libvpx see:
|
||||
|
||||
x264 H.264/MPEG-4 AVC encoder wrapper.
|
||||
|
||||
Requires the presence of the libx264 headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
This encoder requires the presence of the libx264 headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libx264}.
|
||||
|
||||
x264 supports an impressive number of features, including 8x8 and 4x4 adaptive
|
||||
spatial transform, adaptive B-frame placement, CAVLC/CABAC entropy coding,
|
||||
interlacing (MBAFF), lossless mode, psy optimizations for detail retention
|
||||
(adaptive quantization, psy-RD, psy-trellis).
|
||||
libx264 supports an impressive number of features, including 8x8 and
|
||||
4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
|
||||
entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
|
||||
for detail retention (adaptive quantization, psy-RD, psy-trellis).
|
||||
|
||||
The FFmpeg wrapper provides a mapping for most of them using global options
|
||||
that match those of the encoders and provides private options for the unique
|
||||
encoder options. Additionally an expert override is provided to directly pass
|
||||
a list of key=value tuples as accepted by x264_param_parse.
|
||||
Many libx264 encoder options are mapped to FFmpeg global codec
|
||||
options, while unique encoder options are provided through private
|
||||
options. Additionally the @option{x264opts} and @option{x264-params}
|
||||
private options allows to pass a list of key=value tuples as accepted
|
||||
by the libx264 @code{x264_param_parse} function.
|
||||
|
||||
@subsection Option Mapping
|
||||
The x264 project website is at
|
||||
@url{http://www.videolan.org/developers/x264.html}.
|
||||
|
||||
The following options are supported by the x264 wrapper, the x264-equivalent
|
||||
options follow the FFmpeg ones.
|
||||
@subsection Options
|
||||
|
||||
@multitable @columnfractions .2 .2
|
||||
@item b @tab bitrate
|
||||
FFmpeg @code{b} option is expressed in bits/s, x264 @code{bitrate} in kilobits/s.
|
||||
@item bf @tab bframes
|
||||
Maximum number of B-frames.
|
||||
@item g @tab keyint
|
||||
Maximum GOP size.
|
||||
@item qmin @tab qpmin
|
||||
@item qmax @tab qpmax
|
||||
@item qdiff @tab qpstep
|
||||
@item qblur @tab qblur
|
||||
@item qcomp @tab qcomp
|
||||
@item refs @tab ref
|
||||
@item sc_threshold @tab scenecut
|
||||
@item trellis @tab trellis
|
||||
@item nr @tab nr
|
||||
Noise reduction.
|
||||
@item me_range @tab merange
|
||||
@item me_method @tab me
|
||||
@item subq @tab subme
|
||||
@item b_strategy @tab b-adapt
|
||||
@item keyint_min @tab keyint-min
|
||||
@item coder @tab cabac
|
||||
Set coder to @code{ac} to use CABAC.
|
||||
@item cmp @tab chroma-me
|
||||
Set to @code{chroma} to use chroma motion estimation.
|
||||
@item threads @tab threads
|
||||
@item thread_type @tab sliced_threads
|
||||
Set to @code{slice} to use sliced threading instead of frame threading.
|
||||
@item flags -cgop @tab open-gop
|
||||
Set @code{-cgop} to use recovery points to close GOPs.
|
||||
@item rc_init_occupancy @tab vbv-init
|
||||
Initial buffer occupancy.
|
||||
@end multitable
|
||||
The following options are supported by the libx264 wrapper. The
|
||||
@command{x264}-equivalent options or values are listed in parentheses
|
||||
for easy migration.
|
||||
|
||||
To reduce the duplication of documentation, only the private options
|
||||
and some others requiring special attention are documented here. For
|
||||
the documentation of the undocumented generic options, see
|
||||
@ref{codec-options,,the Codec Options chapter}.
|
||||
|
||||
To get a more accurate and extensive documentation of the libx264
|
||||
options, invoke the command @command{x264 --full-help} or consult
|
||||
the libx264 documentation.
|
||||
|
||||
@subsection Private Options
|
||||
@table @option
|
||||
@item -preset @var{string}
|
||||
Set the encoding preset (cf. x264 --fullhelp).
|
||||
@item -tune @var{string}
|
||||
Tune the encoding params (cf. x264 --fullhelp).
|
||||
@item -profile @var{string}
|
||||
Set profile restrictions (cf. x264 --fullhelp).
|
||||
@item -fastfirstpass @var{integer}
|
||||
Use fast settings when encoding first pass.
|
||||
@item -crf @var{float}
|
||||
Select the quality for constant quality mode.
|
||||
@item -crf_max @var{float}
|
||||
In CRF mode, prevents VBV from lowering quality beyond this point.
|
||||
@item -qp @var{integer}
|
||||
Constant quantization parameter rate control method.
|
||||
@item -aq-mode @var{integer}
|
||||
AQ method
|
||||
@item b (@emph{bitrate})
|
||||
Set bitrate in bits/s. Note that FFmpeg's @option{b} option is
|
||||
expressed in bits/s, while @command{x264}'s @option{bitrate} is in
|
||||
kilobits/s.
|
||||
|
||||
@item bf (@emph{bframes})
|
||||
|
||||
@item g (@emph{keyint})
|
||||
|
||||
@item qmax (@emph{qpmax})
|
||||
|
||||
@item qmin (@emph{qpmin})
|
||||
|
||||
@item qdiff (@emph{qpstep})
|
||||
|
||||
@item qblur (@emph{qblur})
|
||||
|
||||
@item qcomp (@emph{qcomp})
|
||||
|
||||
@item refs (@emph{ref})
|
||||
|
||||
@item sc_threshold (@emph{scenecut})
|
||||
|
||||
@item trellis (@emph{trellis})
|
||||
|
||||
@item nr (@emph{nr})
|
||||
|
||||
@item me_range (@emph{merange})
|
||||
|
||||
@item me_method (@emph{me})
|
||||
Set motion estimation method. Possible values in the decreasing order
|
||||
of speed:
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none
|
||||
@item dia (@emph{dia})
|
||||
@item epzs (@emph{dia})
|
||||
Diamond search with radius 1 (fastest). @samp{epzs} is an alias for
|
||||
@samp{dia}.
|
||||
@item hex (@emph{hex})
|
||||
Hexagonal search with radius 2.
|
||||
@item umh (@emph{umh})
|
||||
Uneven multi-hexagon search.
|
||||
@item esa (@emph{esa})
|
||||
Exhaustive search.
|
||||
@item tesa (@emph{tesa})
|
||||
Hadamard exhaustive search (slowest).
|
||||
@end table
|
||||
|
||||
@item variance
|
||||
@item subq (@emph{subme})
|
||||
|
||||
@item b_strategy (@emph{b-adapt})
|
||||
|
||||
@item keyint_min (@emph{min-keyint})
|
||||
|
||||
@item coder
|
||||
Set entropy encoder. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item ac
|
||||
Enable CABAC.
|
||||
|
||||
@item vlc
|
||||
Enable CAVLC and disable CABAC. It generates the same effect as
|
||||
@command{x264}'s @option{--no-cabac} option.
|
||||
@end table
|
||||
|
||||
@item cmp
|
||||
Set full pixel motion estimation comparation algorithm. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item chroma
|
||||
Enable chroma in motion estimation.
|
||||
|
||||
@item sad
|
||||
Ignore chroma in motion estimation. It generates the same effect as
|
||||
@command{x264}'s @option{--no-chroma-me} option.
|
||||
@end table
|
||||
|
||||
@item threads (@emph{threads})
|
||||
|
||||
@item thread_type
|
||||
Set multithreading technique. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item slice
|
||||
Slice-based multithreading. It generates the same effect as
|
||||
@command{x264}'s @option{--sliced-threads} option.
|
||||
@item frame
|
||||
Frame-based multithreading.
|
||||
@end table
|
||||
|
||||
@item flags
|
||||
Set encoding flags. It can be used to disable closed GOP and enable
|
||||
open GOP by setting it to @code{-cgop}. The result is similar to
|
||||
the behavior of @command{x264}'s @option{--open-gop} option.
|
||||
|
||||
@item rc_init_occupancy (@emph{vbv-init})
|
||||
|
||||
@item preset (@emph{preset})
|
||||
Set the encoding preset.
|
||||
|
||||
@item tune (@emph{tune})
|
||||
Set tuning of the encoding params.
|
||||
|
||||
@item profile (@emph{profile})
|
||||
Set profile restrictions.
|
||||
|
||||
@item fastfirstpass
|
||||
Enable fast settings when encoding first pass, when set to 1. When set
|
||||
to 0, it has the same effect of @command{x264}'s
|
||||
@option{--slow-firstpass} option.
|
||||
|
||||
@item crf (@emph{crf})
|
||||
Set the quality for constant quality mode.
|
||||
|
||||
@item crf_max (@emph{crf-max})
|
||||
In CRF mode, prevents VBV from lowering quality beyond this point.
|
||||
|
||||
@item qp (@emph{qp})
|
||||
Set constant quantization rate control method parameter.
|
||||
|
||||
@item aq-mode (@emph{aq-mode})
|
||||
Set AQ method. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none (@emph{0})
|
||||
Disabled.
|
||||
|
||||
@item variance (@emph{1})
|
||||
Variance AQ (complexity mask).
|
||||
@item autovariance
|
||||
|
||||
@item autovariance (@emph{2})
|
||||
Auto-variance AQ (experimental).
|
||||
@end table
|
||||
@item -aq-strength @var{float}
|
||||
AQ strength, reduces blocking and blurring in flat and textured areas.
|
||||
@item -psy @var{integer}
|
||||
Use psychovisual optimizations.
|
||||
@item -psy-rd @var{string}
|
||||
Strength of psychovisual optimization, in <psy-rd>:<psy-trellis> format.
|
||||
@item -rc-lookahead @var{integer}
|
||||
Number of frames to look ahead for frametype and ratecontrol.
|
||||
@item -weightb @var{integer}
|
||||
Weighted prediction for B-frames.
|
||||
@item -weightp @var{integer}
|
||||
Weighted prediction analysis method.
|
||||
|
||||
Possible values:
|
||||
@item aq-strength (@emph{aq-strength})
|
||||
Set AQ strength, reduce blocking and blurring in flat and textured areas.
|
||||
|
||||
@item psy
|
||||
Use psychovisual optimizations when set to 1. When set to 0, it has the
|
||||
same effect as @command{x264}'s @option{--no-psy} option.
|
||||
|
||||
@item psy-rd (@emph{psy-rd})
|
||||
Set strength of psychovisual optimization, in
|
||||
@var{psy-rd}:@var{psy-trellis} format.
|
||||
|
||||
@item rc-lookahead (@emph{rc-lookahead})
|
||||
Set number of frames to look ahead for frametype and ratecontrol.
|
||||
|
||||
@item weightb
|
||||
Enable weighted prediction for B-frames when set to 1. When set to 0,
|
||||
it has the same effect as @command{x264}'s @option{--no-weightb} option.
|
||||
|
||||
@item weightp (@emph{weightp})
|
||||
Set weighted prediction method for P-frames. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none
|
||||
|
||||
@item simple
|
||||
|
||||
@item smart
|
||||
|
||||
@item none (@emph{0})
|
||||
Disabled
|
||||
@item simple (@emph{1})
|
||||
Enable only weighted refs
|
||||
@item smart (@emph{2})
|
||||
Enable both weighted refs and duplicates
|
||||
@end table
|
||||
@item -ssim @var{integer}
|
||||
Calculate and print SSIM stats.
|
||||
@item -intra-refresh @var{integer}
|
||||
Use Periodic Intra Refresh instead of IDR frames.
|
||||
@item -b-bias @var{integer}
|
||||
Influences how often B-frames are used.
|
||||
@item -b-pyramid @var{integer}
|
||||
Keep some B-frames as references.
|
||||
|
||||
Possible values:
|
||||
@item ssim (@emph{ssim})
|
||||
Enable calculation and printing SSIM stats after the encoding.
|
||||
|
||||
@item intra-refresh (@emph{intra-refresh})
|
||||
Enable the use of Periodic Intra Refresh instead of IDR frames when set
|
||||
to 1.
|
||||
|
||||
@item b-bias (@emph{b-bias})
|
||||
Set the influence on how often B-frames are used.
|
||||
|
||||
@item b-pyramid (@emph{b-pyramid})
|
||||
Set method for keeping of some B-frames as references. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none
|
||||
|
||||
@item strict
|
||||
@item none (@emph{none})
|
||||
Disabled.
|
||||
@item strict (@emph{strict})
|
||||
Strictly hierarchical pyramid.
|
||||
@item normal
|
||||
@item normal (@emph{normal})
|
||||
Non-strict (not Blu-ray compatible).
|
||||
@end table
|
||||
@item -mixed-refs @var{integer}
|
||||
One reference per partition, as opposed to one reference per macroblock.
|
||||
@item -8x8dct @var{integer}
|
||||
High profile 8x8 transform.
|
||||
@item -fast-pskip @var{integer}
|
||||
@item -aud @var{integer}
|
||||
Use access unit delimiters.
|
||||
@item -mbtree @var{integer}
|
||||
Use macroblock tree ratecontrol.
|
||||
@item -deblock @var{string}
|
||||
Loop filter parameters, in <alpha:beta> form.
|
||||
@item -cplxblur @var{float}
|
||||
Reduce fluctuations in QP (before curve compression).
|
||||
@item -partitions @var{string}
|
||||
A comma-separated list of partitions to consider, possible values: p8x8, p4x4, b8x8, i8x8, i4x4, none, all.
|
||||
@item -direct-pred @var{integer}
|
||||
Direct MV prediction mode
|
||||
|
||||
Possible values:
|
||||
@item mixed-refs
|
||||
Enable the use of one reference per partition, as opposed to one
|
||||
reference per macroblock when set to 1. When set to 0, it has the
|
||||
same effect as @command{x264}'s @option{--no-mixed-refs} option.
|
||||
|
||||
@item 8x8dct
|
||||
Enable adaptive spatial transform (high profile 8x8 transform)
|
||||
when set to 1. When set to 0, it has the same effect as
|
||||
@command{x264}'s @option{--no-8x8dct} option.
|
||||
|
||||
@item fast-pskip
|
||||
Enable early SKIP detection on P-frames when set to 1. When set
|
||||
to 0, it has the same effect as @command{x264}'s
|
||||
@option{--no-fast-pskip} option.
|
||||
|
||||
@item aud (@emph{aud})
|
||||
Enable use of access unit delimiters when set to 1.
|
||||
|
||||
@item mbtree
|
||||
Enable use macroblock tree ratecontrol when set to 1. When set
|
||||
to 0, it has the same effect as @command{x264}'s
|
||||
@option{--no-mbtree} option.
|
||||
|
||||
@item deblock (@emph{deblock})
|
||||
Set loop filter parameters, in @var{alpha}:@var{beta} form.
|
||||
|
||||
@item cplxblur (@emph{cplxblur})
|
||||
Set fluctuations reduction in QP (before curve compression).
|
||||
|
||||
@item partitions (@emph{partitions})
|
||||
Set partitions to consider as a comma-separated list of. Possible
|
||||
values in the list:
|
||||
|
||||
@table @samp
|
||||
@item none
|
||||
|
||||
@item spatial
|
||||
|
||||
@item temporal
|
||||
|
||||
@item auto
|
||||
|
||||
@end table
|
||||
@item -slice-max-size @var{integer}
|
||||
Limit the size of each slice in bytes.
|
||||
@item -stats @var{string}
|
||||
Filename for 2 pass stats.
|
||||
@item -nal-hrd @var{integer}
|
||||
Signal HRD information (requires vbv-bufsize; cbr not allowed in .mp4).
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none
|
||||
|
||||
@item vbr
|
||||
|
||||
@item cbr
|
||||
|
||||
@item p8x8
|
||||
8x8 P-frame partition.
|
||||
@item p4x4
|
||||
4x4 P-frame partition.
|
||||
@item b8x8
|
||||
4x4 B-frame partition.
|
||||
@item i8x8
|
||||
8x8 I-frame partition.
|
||||
@item i4x4
|
||||
4x4 I-frame partition.
|
||||
(Enabling @samp{p4x4} requires @samp{p8x8} to be enabled. Enabling
|
||||
@samp{i8x8} requires adaptive spatial transform (@option{8x8dct}
|
||||
option) to be enabled.)
|
||||
@item none (@emph{none})
|
||||
Do not consider any partitions.
|
||||
@item all (@emph{all})
|
||||
Consider every partition.
|
||||
@end table
|
||||
|
||||
@item x264opts @var{options}
|
||||
Allow to set any x264 option, see @code{x264 --fullhelp} for a list.
|
||||
@item direct-pred (@emph{direct})
|
||||
Set direct MV prediction mode. Possible values:
|
||||
|
||||
@var{options} is a list of @var{key}=@var{value} couples separated by
|
||||
@table @samp
|
||||
@item none (@emph{none})
|
||||
Disable MV prediction.
|
||||
@item spatial (@emph{spatial})
|
||||
Enable spatial predicting.
|
||||
@item temporal (@emph{temporal})
|
||||
Enable temporal predicting.
|
||||
@item auto (@emph{auto})
|
||||
Automatically decided.
|
||||
@end table
|
||||
|
||||
@item slice-max-size (@emph{slice-max-size})
|
||||
Set the limit of the size of each slice in bytes. If not specified
|
||||
but RTP payload size (@option{ps}) is specified, that is used.
|
||||
|
||||
@item stats (@emph{stats})
|
||||
Set the file name for multi-pass stats.
|
||||
|
||||
@item nal-hrd (@emph{nal-hrd})
|
||||
Set signal HRD information (requires @option{vbv-bufsize} to be set).
|
||||
Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none (@emph{none})
|
||||
Disable HRD information signaling.
|
||||
@item vbr (@emph{vbr})
|
||||
Variable bit rate.
|
||||
@item cbr (@emph{cbr})
|
||||
Constant bit rate (not allowed in MP4 container).
|
||||
@end table
|
||||
|
||||
@item x264opts (N.A.)
|
||||
Set any x264 option, see @command{x264 --fullhelp} for a list.
|
||||
|
||||
Argument is a list of @var{key}=@var{value} couples separated by
|
||||
":". In @var{filter} and @var{psy-rd} options that use ":" as a separator
|
||||
themselves, use "," instead. They accept it as well since long ago but this
|
||||
is kept undocumented for some reason.
|
||||
@@ -1054,18 +1299,136 @@ For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
||||
@end example
|
||||
|
||||
For more information about libx264 and the supported options see:
|
||||
@url{http://www.videolan.org/developers/x264.html}
|
||||
@item x264-params (N.A.)
|
||||
Override the x264 configuration using a :-separated list of key=value
|
||||
parameters.
|
||||
|
||||
@item -x264-params @var{string}
|
||||
Override the x264 configuration using a :-separated list of key=value parameters.
|
||||
This option is functionally the same as the @option{x264opts}, but is
|
||||
duplicated for compability with the Libav fork.
|
||||
|
||||
For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||
@example
|
||||
-x264-params level=30:bframes=0:weightp=0:cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:no-fast-pskip=1:subq=6:8x8dct=0:trellis=0
|
||||
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
|
||||
cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
|
||||
no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
|
||||
@end example
|
||||
@end table
|
||||
|
||||
Encoding avpresets for common usages are provided so they can be used with the
|
||||
general presets system (e.g. passing the @code{-pre} option).
|
||||
Encoding ffpresets for common usages are provided so they can be used with the
|
||||
general presets system (e.g. passing the @option{pre} option).
|
||||
|
||||
@section libxvid
|
||||
|
||||
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||
|
||||
This encoder requires the presence of the libxvidcore headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libxvid --enable-gpl}.
|
||||
|
||||
The native @code{mpeg4} encoder supports the MPEG-4 Part 2 format, so
|
||||
users can encode to this format without this library.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libxvid wrapper. Some of
|
||||
the following options are listed but are not documented, and
|
||||
correspond to shared codec options. See @ref{codec-options,,the Codec
|
||||
Options chapter} for their documentation. The other shared options
|
||||
which are not listed have no effect for the libxvid encoder.
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
|
||||
@item g
|
||||
|
||||
@item qmin
|
||||
|
||||
@item qmax
|
||||
|
||||
@item mpeg_quant
|
||||
|
||||
@item threads
|
||||
|
||||
@item bf
|
||||
|
||||
@item b_qfactor
|
||||
|
||||
@item b_qoffset
|
||||
|
||||
@item flags
|
||||
Set specific encoding flags. Possible values:
|
||||
|
||||
@table @samp
|
||||
|
||||
@item mv4
|
||||
Use four motion vector by macroblock.
|
||||
|
||||
@item aic
|
||||
Enable high quality AC prediction.
|
||||
|
||||
@item gray
|
||||
Only encode grayscale.
|
||||
|
||||
@item gmc
|
||||
Enable the use of global motion compensation (GMC).
|
||||
|
||||
@item qpel
|
||||
Enable quarter-pixel motion compensation.
|
||||
|
||||
@item cgop
|
||||
Enable closed GOP.
|
||||
|
||||
@item global_header
|
||||
Place global headers in extradata instead of every keyframe.
|
||||
|
||||
@end table
|
||||
|
||||
@item trellis
|
||||
|
||||
@item me_method
|
||||
Set motion estimation method. Possible values in decreasing order of
|
||||
speed and increasing order of quality:
|
||||
|
||||
@table @samp
|
||||
@item zero
|
||||
Use no motion estimation (default).
|
||||
|
||||
@item phods
|
||||
@item x1
|
||||
@item log
|
||||
Enable advanced diamond zonal search for 16x16 blocks and half-pixel
|
||||
refinement for 16x16 blocks. @samp{x1} and @samp{log} are aliases for
|
||||
@samp{phods}.
|
||||
|
||||
@item epzs
|
||||
Enable all of the things described above, plus advanced diamond zonal
|
||||
search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
|
||||
estimation on chroma planes.
|
||||
|
||||
@item full
|
||||
Enable all of the things described above, plus extended 16x16 and 8x8
|
||||
blocks search.
|
||||
@end table
|
||||
|
||||
@item mbd
|
||||
Set macroblock decision algorithm. Possible values in the increasing
|
||||
order of quality:
|
||||
|
||||
@table @samp
|
||||
@item simple
|
||||
Use macroblock comparing function algorithm (default).
|
||||
|
||||
@item bits
|
||||
Enable rate distortion-based half pixel and quarter pixel refinement for
|
||||
16x16 blocks.
|
||||
|
||||
@item rd
|
||||
Enable all of the things described above, plus rate distortion-based
|
||||
half pixel and quarter pixel refinement for 8x8 blocks, and rate
|
||||
distortion-based search using square pattern.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@section png
|
||||
|
||||
|
@@ -57,6 +57,9 @@ Enable RTP MP4A-LATM payload.
|
||||
Reduce the latency introduced by optional buffering
|
||||
@end table
|
||||
|
||||
@item seek2any @var{integer} (@emph{input})
|
||||
Forces seeking to enable seek to any mode if set to 1. Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will allow to detect more accurate information, but will
|
||||
@@ -133,6 +136,12 @@ been without shifting.
|
||||
Also note that this affects only leading negative timestamps, and not
|
||||
non-monotonic negative timestamps.
|
||||
|
||||
@item skip_initial_bytes @var{integer} (@emph{input})
|
||||
Set number initial bytes to skip. Default is 0.
|
||||
|
||||
@item correct_ts_overflow @var{integer} (@emph{input})
|
||||
Correct single timestamp overflows if set to 1. Default is 1.
|
||||
|
||||
@item flush_packets @var{integer} (@emph{output})
|
||||
Flush the underlying I/O stream after each packet. Default 1 enables it, and
|
||||
has the effect of reducing the latency; 0 disables it and may slightly
|
||||
|
@@ -80,9 +80,16 @@ static int ass_encode_frame(AVCodecContext *avctx,
|
||||
* will be "Marked=N" instead of the layer num, so we will
|
||||
* have layer=0, which is fine. */
|
||||
layer = strtol(ass, &p, 10);
|
||||
if (*p) p += strcspn(p, ",") + 1; // skip layer or marked
|
||||
if (*p) p += strcspn(p, ",") + 1; // skip start timestamp
|
||||
if (*p) p += strcspn(p, ",") + 1; // skip end timestamp
|
||||
|
||||
#define SKIP_ENTRY(ptr) do { \
|
||||
char *sep = strchr(ptr, ','); \
|
||||
if (sep) \
|
||||
ptr = sep + 1; \
|
||||
} while (0)
|
||||
|
||||
SKIP_ENTRY(p); // skip layer or marked
|
||||
SKIP_ENTRY(p); // skip start timestamp
|
||||
SKIP_ENTRY(p); // skip end timestamp
|
||||
snprintf(ass_line, sizeof(ass_line), "%d,%ld,%s", ++s->id, layer, p);
|
||||
ass_line[strcspn(ass_line, "\r\n")] = 0;
|
||||
ass = ass_line;
|
||||
|
@@ -1931,7 +1931,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
|
||||
|
||||
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
||||
long i;
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src+i);
|
||||
long b = *(long*)(dst+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
@@ -1956,7 +1956,7 @@ static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
|
||||
}
|
||||
}else
|
||||
#endif
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
||||
|
@@ -483,6 +483,10 @@ static int read_extra_header(FFV1Context *f)
|
||||
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
|
||||
|
||||
f->version = get_symbol(c, state, 0);
|
||||
if (f->version < 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (f->version > 2) {
|
||||
c->bytestream_end -= 4;
|
||||
f->minor_version = get_symbol(c, state, 0);
|
||||
@@ -562,6 +566,7 @@ static int read_header(FFV1Context *f)
|
||||
memset(state, 128, sizeof(state));
|
||||
|
||||
if (f->version < 2) {
|
||||
int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
|
||||
unsigned v= get_symbol(c, state, 0);
|
||||
if (v >= 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
|
||||
@@ -574,15 +579,32 @@ static int read_header(FFV1Context *f)
|
||||
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
||||
}
|
||||
|
||||
f->colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
|
||||
chroma_planes = get_rac(c, state);
|
||||
chroma_h_shift = get_symbol(c, state, 0);
|
||||
chroma_v_shift = get_symbol(c, state, 0);
|
||||
transparency = get_rac(c, state);
|
||||
|
||||
if (f->version > 0)
|
||||
f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
|
||||
if (f->plane_count) {
|
||||
if ( colorspace != f->colorspace
|
||||
|| bits_per_raw_sample != f->avctx->bits_per_raw_sample
|
||||
|| chroma_planes != f->chroma_planes
|
||||
|| chroma_h_shift!= f->chroma_h_shift
|
||||
|| chroma_v_shift!= f->chroma_v_shift
|
||||
|| transparency != f->transparency) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
f->colorspace = colorspace;
|
||||
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
||||
f->chroma_planes = chroma_planes;
|
||||
f->chroma_h_shift = chroma_h_shift;
|
||||
f->chroma_v_shift = chroma_v_shift;
|
||||
f->transparency = transparency;
|
||||
|
||||
f->chroma_planes = get_rac(c, state);
|
||||
f->chroma_h_shift = get_symbol(c, state, 0);
|
||||
f->chroma_v_shift = get_symbol(c, state, 0);
|
||||
f->transparency = get_rac(c, state);
|
||||
f->plane_count = 2 + f->transparency;
|
||||
}
|
||||
|
||||
@@ -600,47 +622,32 @@ static int read_header(FFV1Context *f)
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
|
||||
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
|
||||
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
|
||||
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample == 9) {
|
||||
} else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
|
||||
f->packed_at_lsb = 1;
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample == 10) {
|
||||
} else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
|
||||
f->packed_at_lsb = 1;
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else {
|
||||
} else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
}
|
||||
} else if (f->colorspace == 1) {
|
||||
@@ -664,6 +671,10 @@ static int read_header(FFV1Context *f)
|
||||
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
av_dlog(f->avctx, "%d %d %d\n",
|
||||
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
|
||||
@@ -899,16 +910,56 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
static int init_thread_copy(AVCodecContext *avctx)
|
||||
{
|
||||
FFV1Context *f = avctx->priv_data;
|
||||
int i, ret;
|
||||
|
||||
f->picture.f = NULL;
|
||||
f->last_picture.f = NULL;
|
||||
f->sample_buffer = NULL;
|
||||
f->quant_table_count = 0;
|
||||
f->slice_count = 0;
|
||||
|
||||
for (i = 0; i < f->quant_table_count; i++) {
|
||||
av_assert0(f->version > 1);
|
||||
f->initial_states[i] = av_memdup(f->initial_states[i],
|
||||
f->context_count[i] * sizeof(*f->initial_states[i]));
|
||||
}
|
||||
|
||||
f->picture.f = av_frame_alloc();
|
||||
f->last_picture.f = av_frame_alloc();
|
||||
|
||||
if ((ret = ffv1_init_slice_contexts(f)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
|
||||
{
|
||||
fsdst->version = fsrc->version;
|
||||
fsdst->minor_version = fsrc->minor_version;
|
||||
fsdst->chroma_planes = fsrc->chroma_planes;
|
||||
fsdst->chroma_h_shift = fsrc->chroma_h_shift;
|
||||
fsdst->chroma_v_shift = fsrc->chroma_v_shift;
|
||||
fsdst->transparency = fsrc->transparency;
|
||||
fsdst->plane_count = fsrc->plane_count;
|
||||
fsdst->ac = fsrc->ac;
|
||||
fsdst->colorspace = fsrc->colorspace;
|
||||
|
||||
fsdst->ec = fsrc->ec;
|
||||
fsdst->intra = fsrc->intra;
|
||||
fsdst->slice_damaged = fssrc->slice_damaged;
|
||||
fsdst->key_frame_ok = fsrc->key_frame_ok;
|
||||
|
||||
fsdst->bits_per_raw_sample = fsrc->bits_per_raw_sample;
|
||||
fsdst->packed_at_lsb = fsrc->packed_at_lsb;
|
||||
fsdst->slice_count = fsrc->slice_count;
|
||||
if (fsrc->version<3){
|
||||
fsdst->slice_x = fssrc->slice_x;
|
||||
fsdst->slice_y = fssrc->slice_y;
|
||||
fsdst->slice_width = fssrc->slice_width;
|
||||
fsdst->slice_height = fssrc->slice_height;
|
||||
}
|
||||
}
|
||||
|
||||
static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
||||
{
|
||||
FFV1Context *fsrc = src->priv_data;
|
||||
@@ -918,36 +969,30 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
||||
if (dst == src)
|
||||
return 0;
|
||||
|
||||
if (!fdst->picture.f) {
|
||||
{
|
||||
FFV1Context bak = *fdst;
|
||||
memcpy(fdst, fsrc, sizeof(*fdst));
|
||||
|
||||
for (i = 0; i < fdst->quant_table_count; i++) {
|
||||
fdst->initial_states[i] = av_malloc(fdst->context_count[i] * sizeof(*fdst->initial_states[i]));
|
||||
memcpy(fdst->initial_states[i], fsrc->initial_states[i], fdst->context_count[i] * sizeof(*fdst->initial_states[i]));
|
||||
memcpy(fdst->initial_states, bak.initial_states, sizeof(fdst->initial_states));
|
||||
memcpy(fdst->slice_context, bak.slice_context , sizeof(fdst->slice_context));
|
||||
fdst->picture = bak.picture;
|
||||
fdst->last_picture = bak.last_picture;
|
||||
for (i = 0; i<fdst->num_h_slices * fdst->num_v_slices; i++) {
|
||||
FFV1Context *fssrc = fsrc->slice_context[i];
|
||||
FFV1Context *fsdst = fdst->slice_context[i];
|
||||
copy_fields(fsdst, fssrc, fsrc);
|
||||
}
|
||||
|
||||
fdst->picture.f = av_frame_alloc();
|
||||
fdst->last_picture.f = av_frame_alloc();
|
||||
|
||||
if ((ret = ffv1_init_slice_contexts(fdst)) < 0)
|
||||
return ret;
|
||||
av_assert0(!fdst->plane[0].state);
|
||||
av_assert0(!fdst->sample_buffer);
|
||||
}
|
||||
|
||||
av_assert1(fdst->slice_count == fsrc->slice_count);
|
||||
|
||||
fdst->key_frame_ok = fsrc->key_frame_ok;
|
||||
|
||||
ff_thread_release_buffer(dst, &fdst->picture);
|
||||
if (fsrc->picture.f->data[0]) {
|
||||
if ((ret = ff_thread_ref_frame(&fdst->picture, &fsrc->picture)) < 0)
|
||||
return ret;
|
||||
}
|
||||
for (i = 0; i < fdst->slice_count; i++) {
|
||||
FFV1Context *fsdst = fdst->slice_context[i];
|
||||
FFV1Context *fssrc = fsrc->slice_context[i];
|
||||
|
||||
fsdst->slice_damaged = fssrc->slice_damaged;
|
||||
}
|
||||
|
||||
fdst->fsrc = fsrc;
|
||||
|
||||
|
@@ -275,7 +275,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w,
|
||||
int run_mode = 0;
|
||||
|
||||
if (s->ac) {
|
||||
if (c->bytestream_end - c->bytestream < w * 20) {
|
||||
if (c->bytestream_end - c->bytestream < w * 35) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -387,6 +387,10 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
s->diff_start = get_bits(&gb, 8);
|
||||
s->diff_height = get_bits(&gb, 8);
|
||||
if (s->diff_start + s->diff_height > cur_blk_height) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Block parameters invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
"%dx%d diff start %d height %d\n",
|
||||
i, j, s->diff_start, s->diff_height);
|
||||
|
@@ -389,7 +389,7 @@ static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
|
||||
return 0;
|
||||
zsize = (src[0] << 8) | src[1]; src += 2;
|
||||
|
||||
if (src_end - src < zsize)
|
||||
if (src_end - src < zsize + (sub_type != 2))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
ret = uncompress(c->kempf_buf, &dlen, src, zsize);
|
||||
@@ -411,6 +411,8 @@ static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
|
||||
for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
|
||||
for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
|
||||
if (!bits) {
|
||||
if (src >= src_end)
|
||||
return AVERROR_INVALIDDATA;
|
||||
bitbuf = *src++;
|
||||
bits = 8;
|
||||
}
|
||||
@@ -441,8 +443,8 @@ static int g2m_init_buffers(G2MContext *c)
|
||||
int aligned_height;
|
||||
|
||||
if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
|
||||
c->framebuf_stride = FFALIGN(c->width * 3, 16);
|
||||
aligned_height = FFALIGN(c->height, 16);
|
||||
c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3;
|
||||
aligned_height = c->height + 15;
|
||||
av_free(c->framebuf);
|
||||
c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
|
||||
if (!c->framebuf)
|
||||
@@ -451,7 +453,7 @@ static int g2m_init_buffers(G2MContext *c)
|
||||
if (!c->synth_tile || !c->jpeg_tile ||
|
||||
c->old_tile_w < c->tile_width ||
|
||||
c->old_tile_h < c->tile_height) {
|
||||
c->tile_stride = FFALIGN(c->tile_width * 3, 16);
|
||||
c->tile_stride = FFALIGN(c->tile_width, 16) * 3;
|
||||
aligned_height = FFALIGN(c->tile_height, 16);
|
||||
av_free(c->synth_tile);
|
||||
av_free(c->jpeg_tile);
|
||||
|
@@ -3876,6 +3876,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|
||||
if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0];
|
||||
if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0];
|
||||
h->er.ref_count = h->ref_count[0];
|
||||
|
||||
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
|
||||
av_log(h->avctx, AV_LOG_DEBUG,
|
||||
@@ -4267,7 +4268,6 @@ static void er_add_slice(H264Context *h, int startx, int starty,
|
||||
if (CONFIG_ERROR_RESILIENCE) {
|
||||
ERContext *er = &h->er;
|
||||
|
||||
er->ref_count = h->ref_count[0];
|
||||
ff_er_add_slice(er, startx, starty, endx, endy, status);
|
||||
}
|
||||
}
|
||||
|
@@ -71,6 +71,9 @@ void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int
|
||||
}
|
||||
|
||||
static int get_color_type(const AVPixFmtDescriptor *desc) {
|
||||
if (desc->flags & AV_PIX_FMT_FLAG_PAL)
|
||||
return FF_COLOR_RGB;
|
||||
|
||||
if(desc->nb_components == 1 || desc->nb_components == 2)
|
||||
return FF_COLOR_GRAY;
|
||||
|
||||
|
@@ -802,7 +802,7 @@ static void truncpasses(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile)
|
||||
Jpeg2000Cblk *cblk = prec->cblk + cblkno;
|
||||
|
||||
cblk->ninclpasses = getcut(cblk, s->lambda,
|
||||
(int64_t)dwt_norms[codsty->transform == FF_DWT53][bandpos][lev] * (int64_t)band->i_stepsize >> 16);
|
||||
(int64_t)dwt_norms[codsty->transform == FF_DWT53][bandpos][lev] * (int64_t)band->i_stepsize >> 15);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -863,7 +863,7 @@ static int encode_tile(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile, int tileno
|
||||
int *ptr = t1.data[y-yy0];
|
||||
for (x = xx0; x < xx1; x++){
|
||||
*ptr = (comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * y + x]);
|
||||
*ptr = (int64_t)*ptr * (int64_t)(16384 * 65536 / band->i_stepsize) >> 14 - NMSEDEC_FRACBITS;
|
||||
*ptr = (int64_t)*ptr * (int64_t)(16384 * 65536 / band->i_stepsize) >> 15 - NMSEDEC_FRACBITS;
|
||||
ptr++;
|
||||
}
|
||||
}
|
||||
@@ -1016,7 +1016,7 @@ static av_cold int j2kenc_init(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
ff_jpeg2000_init_tier1_luts();
|
||||
|
||||
ff_mqc_init_context_tables();
|
||||
init_luts();
|
||||
|
||||
init_quantization(s);
|
||||
|
@@ -272,7 +272,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
||||
reslevel->log2_prec_height) -
|
||||
(reslevel->coord[1][0] >> reslevel->log2_prec_height);
|
||||
|
||||
reslevel->band = av_malloc_array(reslevel->nbands, sizeof(*reslevel->band));
|
||||
reslevel->band = av_calloc(reslevel->nbands, sizeof(*reslevel->band));
|
||||
if (!reslevel->band)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -320,7 +320,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
||||
if (!av_codec_is_encoder(avctx->codec))
|
||||
band->f_stepsize *= 0.5;
|
||||
|
||||
band->i_stepsize = band->f_stepsize * (1 << 16);
|
||||
band->i_stepsize = band->f_stepsize * (1 << 15);
|
||||
|
||||
/* computation of tbx_0, tbx_1, tby_0, tby_1
|
||||
* see ISO/IEC 15444-1:2002 B.5 eq. B-15 and tbl B.1
|
||||
@@ -368,7 +368,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
||||
for (j = 0; j < 2; j++)
|
||||
band->coord[1][j] = ff_jpeg2000_ceildiv(band->coord[1][j], dy);
|
||||
|
||||
band->prec = av_malloc_array(reslevel->num_precincts_x *
|
||||
band->prec = av_calloc(reslevel->num_precincts_x *
|
||||
(uint64_t)reslevel->num_precincts_y,
|
||||
sizeof(*band->prec));
|
||||
if (!band->prec)
|
||||
@@ -509,10 +509,12 @@ void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty)
|
||||
for (bandno = 0; bandno < reslevel->nbands; bandno++) {
|
||||
Jpeg2000Band *band = reslevel->band + bandno;
|
||||
for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) {
|
||||
Jpeg2000Prec *prec = band->prec + precno;
|
||||
av_freep(&prec->zerobits);
|
||||
av_freep(&prec->cblkincl);
|
||||
av_freep(&prec->cblk);
|
||||
if (band->prec) {
|
||||
Jpeg2000Prec *prec = band->prec + precno;
|
||||
av_freep(&prec->zerobits);
|
||||
av_freep(&prec->cblkincl);
|
||||
av_freep(&prec->cblk);
|
||||
}
|
||||
}
|
||||
|
||||
av_freep(&band->prec);
|
||||
|
@@ -28,6 +28,7 @@
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "internal.h"
|
||||
@@ -37,6 +38,7 @@
|
||||
#define JP2_SIG_TYPE 0x6A502020
|
||||
#define JP2_SIG_VALUE 0x0D0A870A
|
||||
#define JP2_CODESTREAM 0x6A703263
|
||||
#define JP2_HEADER 0x6A703268
|
||||
|
||||
#define HAD_COC 0x01
|
||||
#define HAD_QCC 0x02
|
||||
@@ -72,6 +74,10 @@ typedef struct Jpeg2000DecoderContext {
|
||||
int cdx[4], cdy[4];
|
||||
int precision;
|
||||
int ncomponents;
|
||||
int colour_space;
|
||||
uint32_t palette[256];
|
||||
int8_t pal8;
|
||||
int cdef[4];
|
||||
int tile_width, tile_height;
|
||||
unsigned numXtiles, numYtiles;
|
||||
int maxtilelen;
|
||||
@@ -154,12 +160,74 @@ static int tag_tree_decode(Jpeg2000DecoderContext *s, Jpeg2000TgtNode *node,
|
||||
return curval;
|
||||
}
|
||||
|
||||
static int pix_fmt_match(enum AVPixelFormat pix_fmt, int components,
|
||||
int bpc, uint32_t log2_chroma_wh, int pal8)
|
||||
{
|
||||
int match = 1;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
|
||||
|
||||
if (desc->nb_components != components) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (components) {
|
||||
case 4:
|
||||
match = match && desc->comp[3].depth_minus1 + 1 >= bpc &&
|
||||
(log2_chroma_wh >> 14 & 3) == 0 &&
|
||||
(log2_chroma_wh >> 12 & 3) == 0;
|
||||
case 3:
|
||||
match = match && desc->comp[2].depth_minus1 + 1 >= bpc &&
|
||||
(log2_chroma_wh >> 10 & 3) == desc->log2_chroma_w &&
|
||||
(log2_chroma_wh >> 8 & 3) == desc->log2_chroma_h;
|
||||
case 2:
|
||||
match = match && desc->comp[1].depth_minus1 + 1 >= bpc &&
|
||||
(log2_chroma_wh >> 6 & 3) == desc->log2_chroma_w &&
|
||||
(log2_chroma_wh >> 4 & 3) == desc->log2_chroma_h;
|
||||
|
||||
case 1:
|
||||
match = match && desc->comp[0].depth_minus1 + 1 >= bpc &&
|
||||
(log2_chroma_wh >> 2 & 3) == 0 &&
|
||||
(log2_chroma_wh & 3) == 0 &&
|
||||
(desc->flags & AV_PIX_FMT_FLAG_PAL) == pal8 * AV_PIX_FMT_FLAG_PAL;
|
||||
}
|
||||
return match;
|
||||
}
|
||||
|
||||
// pix_fmts with lower bpp have to be listed before
|
||||
// similar pix_fmts with higher bpp.
|
||||
#define RGB_PIXEL_FORMATS AV_PIX_FMT_PAL8,AV_PIX_FMT_RGB24,AV_PIX_FMT_RGBA,AV_PIX_FMT_RGB48,AV_PIX_FMT_RGBA64
|
||||
#define GRAY_PIXEL_FORMATS AV_PIX_FMT_GRAY8,AV_PIX_FMT_GRAY8A,AV_PIX_FMT_GRAY16
|
||||
#define YUV_PIXEL_FORMATS AV_PIX_FMT_YUV410P,AV_PIX_FMT_YUV411P,AV_PIX_FMT_YUVA420P, \
|
||||
AV_PIX_FMT_YUV420P,AV_PIX_FMT_YUV422P,AV_PIX_FMT_YUVA422P, \
|
||||
AV_PIX_FMT_YUV440P,AV_PIX_FMT_YUV444P,AV_PIX_FMT_YUVA444P, \
|
||||
AV_PIX_FMT_YUV420P9,AV_PIX_FMT_YUV422P9,AV_PIX_FMT_YUV444P9, \
|
||||
AV_PIX_FMT_YUVA420P9,AV_PIX_FMT_YUVA422P9,AV_PIX_FMT_YUVA444P9, \
|
||||
AV_PIX_FMT_YUV420P10,AV_PIX_FMT_YUV422P10,AV_PIX_FMT_YUV444P10, \
|
||||
AV_PIX_FMT_YUVA420P10,AV_PIX_FMT_YUVA422P10,AV_PIX_FMT_YUVA444P10, \
|
||||
AV_PIX_FMT_YUV420P12,AV_PIX_FMT_YUV422P12,AV_PIX_FMT_YUV444P12, \
|
||||
AV_PIX_FMT_YUV420P14,AV_PIX_FMT_YUV422P14,AV_PIX_FMT_YUV444P14, \
|
||||
AV_PIX_FMT_YUV420P16,AV_PIX_FMT_YUV422P16,AV_PIX_FMT_YUV444P16, \
|
||||
AV_PIX_FMT_YUVA420P16,AV_PIX_FMT_YUVA422P16,AV_PIX_FMT_YUVA444P16
|
||||
#define XYZ_PIXEL_FORMATS AV_PIX_FMT_XYZ12
|
||||
|
||||
static const enum AVPixelFormat rgb_pix_fmts[] = {RGB_PIXEL_FORMATS};
|
||||
static const enum AVPixelFormat gray_pix_fmts[] = {GRAY_PIXEL_FORMATS};
|
||||
static const enum AVPixelFormat yuv_pix_fmts[] = {YUV_PIXEL_FORMATS};
|
||||
static const enum AVPixelFormat xyz_pix_fmts[] = {XYZ_PIXEL_FORMATS};
|
||||
static const enum AVPixelFormat all_pix_fmts[] = {RGB_PIXEL_FORMATS,
|
||||
GRAY_PIXEL_FORMATS,
|
||||
YUV_PIXEL_FORMATS,
|
||||
XYZ_PIXEL_FORMATS};
|
||||
|
||||
/* marker segments */
|
||||
/* get sizes and offsets of image, tiles; number of components */
|
||||
static int get_siz(Jpeg2000DecoderContext *s)
|
||||
{
|
||||
int i;
|
||||
int ncomponents;
|
||||
uint32_t log2_chroma_wh = 0;
|
||||
const enum AVPixelFormat *possible_fmts = NULL;
|
||||
int possible_fmts_nb = 0;
|
||||
|
||||
if (bytestream2_get_bytes_left(&s->g) < 36)
|
||||
return AVERROR_INVALIDDATA;
|
||||
@@ -205,13 +273,12 @@ static int get_siz(Jpeg2000DecoderContext *s)
|
||||
s->sgnd[i] = !!(x & 0x80);
|
||||
s->cdx[i] = bytestream2_get_byteu(&s->g);
|
||||
s->cdy[i] = bytestream2_get_byteu(&s->g);
|
||||
if (s->cdx[i] != 1 || s->cdy[i] != 1) {
|
||||
avpriv_request_sample(s->avctx,
|
||||
"CDxy values %d %d for component %d",
|
||||
s->cdx[i], s->cdy[i], i);
|
||||
if (!s->cdx[i] || !s->cdy[i])
|
||||
return AVERROR_INVALIDDATA;
|
||||
if ( !s->cdx[i] || s->cdx[i] == 3 || s->cdx[i] > 4
|
||||
|| !s->cdy[i] || s->cdy[i] == 3 || s->cdy[i] > 4) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid sample seperation\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
log2_chroma_wh |= s->cdy[i] >> 1 << i * 4 | s->cdx[i] >> 1 << i * 4 + 2;
|
||||
}
|
||||
|
||||
s->numXtiles = ff_jpeg2000_ceildiv(s->width - s->tile_offset_x, s->tile_width);
|
||||
@@ -242,35 +309,46 @@ static int get_siz(Jpeg2000DecoderContext *s)
|
||||
s->avctx->height = ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y,
|
||||
s->reduction_factor);
|
||||
|
||||
switch (s->ncomponents) {
|
||||
case 1:
|
||||
if (s->precision > 8)
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
||||
else
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
||||
break;
|
||||
case 3:
|
||||
switch (s->avctx->profile) {
|
||||
case FF_PROFILE_JPEG2000_DCINEMA_2K:
|
||||
case FF_PROFILE_JPEG2000_DCINEMA_4K:
|
||||
/* XYZ color-space for digital cinema profiles */
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_XYZ12;
|
||||
if (s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_2K ||
|
||||
s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_4K) {
|
||||
possible_fmts = xyz_pix_fmts;
|
||||
possible_fmts_nb = FF_ARRAY_ELEMS(xyz_pix_fmts);
|
||||
} else {
|
||||
switch (s->colour_space) {
|
||||
case 16:
|
||||
possible_fmts = rgb_pix_fmts;
|
||||
possible_fmts_nb = FF_ARRAY_ELEMS(rgb_pix_fmts);
|
||||
break;
|
||||
case 17:
|
||||
possible_fmts = gray_pix_fmts;
|
||||
possible_fmts_nb = FF_ARRAY_ELEMS(gray_pix_fmts);
|
||||
break;
|
||||
case 18:
|
||||
possible_fmts = yuv_pix_fmts;
|
||||
possible_fmts_nb = FF_ARRAY_ELEMS(yuv_pix_fmts);
|
||||
break;
|
||||
default:
|
||||
if (s->precision > 8)
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_RGB48;
|
||||
else
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
||||
possible_fmts = all_pix_fmts;
|
||||
possible_fmts_nb = FF_ARRAY_ELEMS(all_pix_fmts);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
|
||||
break;
|
||||
default:
|
||||
/* pixel format can not be identified */
|
||||
s->avctx->pix_fmt = AV_PIX_FMT_NONE;
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < possible_fmts_nb; ++i) {
|
||||
if (pix_fmt_match(possible_fmts[i], ncomponents, s->precision, log2_chroma_wh, s->pal8)) {
|
||||
s->avctx->pix_fmt = possible_fmts[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Unknown pix_fmt, profile: %d, colour_space: %d, "
|
||||
"components: %d, precision: %d, "
|
||||
"cdx[1]: %d, cdy[1]: %d, cdx[2]: %d, cdy[2]: %d\n",
|
||||
s->avctx->profile, s->colour_space, ncomponents, s->precision,
|
||||
ncomponents > 2 ? s->cdx[1] : 0,
|
||||
ncomponents > 2 ? s->cdy[1] : 0,
|
||||
ncomponents > 2 ? s->cdx[2] : 0,
|
||||
ncomponents > 2 ? s->cdy[2] : 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -306,6 +384,11 @@ static int get_cox(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (c->log2_cblk_width > 6 || c->log2_cblk_height > 6) {
|
||||
avpriv_request_sample(s->avctx, "cblk size > 64");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
c->cblk_style = bytestream2_get_byteu(&s->g);
|
||||
if (c->cblk_style != 0) { // cblk style
|
||||
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
|
||||
@@ -947,6 +1030,9 @@ static int decode_cblk(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *codsty,
|
||||
int bpass_csty_symbol = codsty->cblk_style & JPEG2000_CBLK_BYPASS;
|
||||
int vert_causal_ctx_csty_symbol = codsty->cblk_style & JPEG2000_CBLK_VSC;
|
||||
|
||||
av_assert0(width <= JPEG2000_MAX_CBLKW);
|
||||
av_assert0(height <= JPEG2000_MAX_CBLKH);
|
||||
|
||||
for (y = 0; y < height; y++)
|
||||
memset(t1->data[y], 0, width * sizeof(**t1->data));
|
||||
|
||||
@@ -1024,7 +1110,7 @@ static void dequantization_int(int x, int y, Jpeg2000Cblk *cblk,
|
||||
int32_t *datap = &comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * (y + j) + x];
|
||||
int *src = t1->data[j];
|
||||
for (i = 0; i < w; ++i)
|
||||
datap[i] = (src[i] * band->i_stepsize + (1 << 15)) >> 16;
|
||||
datap[i] = (src[i] * band->i_stepsize + (1 << 14)) >> 15;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1157,6 +1243,13 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||
if (tile->codsty[0].mct)
|
||||
mct_decode(s, tile);
|
||||
|
||||
if (s->cdef[0] < 0) {
|
||||
for (x = 0; x < s->ncomponents; x++)
|
||||
s->cdef[x] = x + 1;
|
||||
if ((s->ncomponents & 1) == 0)
|
||||
s->cdef[s->ncomponents-1] = 0;
|
||||
}
|
||||
|
||||
if (s->precision <= 8) {
|
||||
for (compno = 0; compno < s->ncomponents; compno++) {
|
||||
Jpeg2000Component *comp = tile->comp + compno;
|
||||
@@ -1165,14 +1258,21 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||
int32_t *i_datap = comp->i_data;
|
||||
int cbps = s->cbps[compno];
|
||||
int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
|
||||
int planar = !!picture->data[2];
|
||||
int pixelsize = planar ? 1 : s->ncomponents;
|
||||
int plane = 0;
|
||||
|
||||
if (planar)
|
||||
plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1);
|
||||
|
||||
|
||||
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
|
||||
line = picture->data[0] + y * picture->linesize[0];
|
||||
line = picture->data[plane] + y * picture->linesize[plane];
|
||||
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
|
||||
uint8_t *dst;
|
||||
|
||||
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
|
||||
dst = line + x * s->ncomponents + compno;
|
||||
dst = line + x * pixelsize + compno*!planar;
|
||||
|
||||
if (codsty->transform == FF_DWT97) {
|
||||
for (; x < w; x += s->cdx[compno]) {
|
||||
@@ -1181,7 +1281,7 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||
val = av_clip(val, 0, (1 << cbps) - 1);
|
||||
*dst = val << (8 - cbps);
|
||||
datap++;
|
||||
dst += s->ncomponents;
|
||||
dst += pixelsize;
|
||||
}
|
||||
} else {
|
||||
for (; x < w; x += s->cdx[compno]) {
|
||||
@@ -1190,10 +1290,10 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||
val = av_clip(val, 0, (1 << cbps) - 1);
|
||||
*dst = val << (8 - cbps);
|
||||
i_datap++;
|
||||
dst += s->ncomponents;
|
||||
dst += pixelsize;
|
||||
}
|
||||
}
|
||||
line += picture->linesize[0];
|
||||
line += picture->linesize[plane];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -1205,14 +1305,20 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||
uint16_t *linel;
|
||||
int cbps = s->cbps[compno];
|
||||
int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
|
||||
int planar = !!picture->data[2];
|
||||
int pixelsize = planar ? 1 : s->ncomponents;
|
||||
int plane = 0;
|
||||
|
||||
if (planar)
|
||||
plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1);
|
||||
|
||||
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
|
||||
linel = (uint16_t *)picture->data[0] + y * (picture->linesize[0] >> 1);
|
||||
linel = (uint16_t *)picture->data[plane] + y * (picture->linesize[plane] >> 1);
|
||||
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
|
||||
uint16_t *dst;
|
||||
|
||||
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
|
||||
dst = linel + (x * s->ncomponents + compno);
|
||||
dst = linel + (x * pixelsize + compno*!planar);
|
||||
if (codsty->transform == FF_DWT97) {
|
||||
for (; x < w; x += s-> cdx[compno]) {
|
||||
int val = lrintf(*datap) + (1 << (cbps - 1));
|
||||
@@ -1221,7 +1327,7 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||
/* align 12 bit values in little-endian mode */
|
||||
*dst = val << (16 - cbps);
|
||||
datap++;
|
||||
dst += s->ncomponents;
|
||||
dst += pixelsize;
|
||||
}
|
||||
} else {
|
||||
for (; x < w; x += s-> cdx[compno]) {
|
||||
@@ -1231,10 +1337,10 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||
/* align 12 bit values in little-endian mode */
|
||||
*dst = val << (16 - cbps);
|
||||
i_datap++;
|
||||
dst += s->ncomponents;
|
||||
dst += pixelsize;
|
||||
}
|
||||
}
|
||||
linel += picture->linesize[0] >> 1;
|
||||
linel += picture->linesize[plane] >> 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1386,6 +1492,104 @@ static int jp2_find_codestream(Jpeg2000DecoderContext *s)
|
||||
atom = bytestream2_get_be32u(&s->g);
|
||||
if (atom == JP2_CODESTREAM) {
|
||||
found_codestream = 1;
|
||||
} else if (atom == JP2_HEADER &&
|
||||
bytestream2_get_bytes_left(&s->g) >= atom_size &&
|
||||
atom_size >= 16) {
|
||||
uint32_t atom2_size, atom2;
|
||||
atom_size -= 8;
|
||||
do {
|
||||
atom2_size = bytestream2_get_be32u(&s->g);
|
||||
atom2 = bytestream2_get_be32u(&s->g);
|
||||
atom_size -= 8;
|
||||
if (atom2_size < 8 || atom2_size - 8 > atom_size)
|
||||
break;
|
||||
atom2_size -= 8;
|
||||
if (atom2 == JP2_CODESTREAM) {
|
||||
return 1;
|
||||
} else if (atom2 == MKBETAG('c','o','l','r') && atom2_size >= 7) {
|
||||
int method = bytestream2_get_byteu(&s->g);
|
||||
bytestream2_skipu(&s->g, 2);
|
||||
atom_size -= 3;
|
||||
atom2_size -= 3;
|
||||
if (method == 1) {
|
||||
s->colour_space = bytestream2_get_be32u(&s->g);
|
||||
atom_size -= 4;
|
||||
atom2_size -= 4;
|
||||
}
|
||||
bytestream2_skipu(&s->g, atom2_size);
|
||||
atom_size -= atom2_size;
|
||||
} else if (atom2 == MKBETAG('p','c','l','r') && atom2_size >= 6) {
|
||||
int i, size, colour_count, colour_channels, colour_depth[3];
|
||||
uint32_t r, g, b;
|
||||
colour_count = bytestream2_get_be16u(&s->g);
|
||||
colour_channels = bytestream2_get_byteu(&s->g);
|
||||
// FIXME: Do not ignore channel_sign
|
||||
colour_depth[0] = (bytestream2_get_byteu(&s->g) & 0x7f) + 1;
|
||||
colour_depth[1] = (bytestream2_get_byteu(&s->g) & 0x7f) + 1;
|
||||
colour_depth[2] = (bytestream2_get_byteu(&s->g) & 0x7f) + 1;
|
||||
atom_size -= 6;
|
||||
atom2_size -= 6;
|
||||
size = (colour_depth[0] + 7 >> 3) * colour_count +
|
||||
(colour_depth[1] + 7 >> 3) * colour_count +
|
||||
(colour_depth[2] + 7 >> 3) * colour_count;
|
||||
if (colour_count > 256 ||
|
||||
colour_channels != 3 ||
|
||||
colour_depth[0] > 16 ||
|
||||
colour_depth[1] > 16 ||
|
||||
colour_depth[2] > 16 ||
|
||||
atom2_size < size) {
|
||||
avpriv_request_sample(s->avctx, "Unknown palette");
|
||||
bytestream2_skipu(&s->g, atom2_size);
|
||||
atom_size -= atom2_size;
|
||||
continue;
|
||||
}
|
||||
s->pal8 = 1;
|
||||
for (i = 0; i < colour_count; i++) {
|
||||
if (colour_depth[0] <= 8) {
|
||||
r = bytestream2_get_byteu(&s->g) << 8 - colour_depth[0];
|
||||
r |= r >> colour_depth[0];
|
||||
} else {
|
||||
r = bytestream2_get_be16u(&s->g) >> colour_depth[0] - 8;
|
||||
}
|
||||
if (colour_depth[1] <= 8) {
|
||||
g = bytestream2_get_byteu(&s->g) << 8 - colour_depth[1];
|
||||
r |= r >> colour_depth[1];
|
||||
} else {
|
||||
g = bytestream2_get_be16u(&s->g) >> colour_depth[1] - 8;
|
||||
}
|
||||
if (colour_depth[2] <= 8) {
|
||||
b = bytestream2_get_byteu(&s->g) << 8 - colour_depth[2];
|
||||
r |= r >> colour_depth[2];
|
||||
} else {
|
||||
b = bytestream2_get_be16u(&s->g) >> colour_depth[2] - 8;
|
||||
}
|
||||
s->palette[i] = 0xffu << 24 | r << 16 | g << 8 | b;
|
||||
}
|
||||
atom_size -= size;
|
||||
atom2_size -= size;
|
||||
bytestream2_skipu(&s->g, atom2_size);
|
||||
atom_size -= atom2_size;
|
||||
} else if (atom2 == MKBETAG('c','d','e','f') && atom2_size >= 2 &&
|
||||
bytestream2_get_bytes_left(&s->g) >= atom2_size) {
|
||||
int n = bytestream2_get_be16u(&s->g);
|
||||
atom_size -= 2;
|
||||
atom2_size -= 2;
|
||||
for (; n>0; n--) {
|
||||
int cn = bytestream2_get_be16(&s->g);
|
||||
int av_unused typ = bytestream2_get_be16(&s->g);
|
||||
int asoc = bytestream2_get_be16(&s->g);
|
||||
if (cn < 4 || asoc < 4)
|
||||
s->cdef[cn] = asoc;
|
||||
atom_size -= 6;
|
||||
atom2_size -= 6;
|
||||
}
|
||||
bytestream2_skipu(&s->g, atom2_size);
|
||||
} else {
|
||||
bytestream2_skipu(&s->g, atom2_size);
|
||||
atom_size -= atom2_size;
|
||||
}
|
||||
} while (atom_size >= 8);
|
||||
bytestream2_skipu(&s->g, atom_size);
|
||||
} else {
|
||||
if (bytestream2_get_bytes_left(&s->g) < atom_size - 8)
|
||||
return 0;
|
||||
@@ -1410,6 +1614,7 @@ static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
|
||||
s->avctx = avctx;
|
||||
bytestream2_init(&s->g, avpkt->data, avpkt->size);
|
||||
s->curtileno = -1;
|
||||
memset(s->cdef, -1, sizeof(s->cdef));
|
||||
|
||||
if (bytestream2_get_bytes_left(&s->g) < 2) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
@@ -1456,6 +1661,9 @@ static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
*got_frame = 1;
|
||||
|
||||
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
|
||||
memcpy(picture->data[1], s->palette, 256 * sizeof(uint32_t));
|
||||
|
||||
return bytestream2_tell(&s->g);
|
||||
|
||||
end:
|
||||
@@ -1466,6 +1674,7 @@ end:
|
||||
static void jpeg2000_init_static_data(AVCodec *codec)
|
||||
{
|
||||
ff_jpeg2000_init_tier1_luts();
|
||||
ff_mqc_init_context_tables();
|
||||
}
|
||||
|
||||
#define OFFSET(x) offsetof(Jpeg2000DecoderContext, x)
|
||||
|
@@ -107,7 +107,7 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, int w, int h)
|
||||
val = bytestream2_get_byte(&ctx->g);
|
||||
mx = val & 0xF;
|
||||
my = val >> 4;
|
||||
if ((l0x-mx) + 320*(l0y-my) < 0 || (l0x-mx) + 320*(l0y-my) > 316*196) {
|
||||
if ((l0x-mx) + 320*(l0y-my) < 0 || (l0x-mx) + 320*(l0y-my) > 320*197 - 4) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -132,7 +132,7 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, int w, int h)
|
||||
val = bytestream2_get_byte(&ctx->g);
|
||||
mx = val & 0xF;
|
||||
my = val >> 4;
|
||||
if ((l1x-mx) + 320*(l1y-my) < 0 || (l1x-mx) + 320*(l1y-my) > 318*198) {
|
||||
if ((l1x-mx) + 320*(l1y-my) < 0 || (l1x-mx) + 320*(l1y-my) > 320*199 - 2) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -207,7 +207,7 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, int w, int h)
|
||||
val = bytestream2_get_byte(&ctx->g);
|
||||
mx = (val & 0xF) - 8;
|
||||
my = (val >> 4) - 8;
|
||||
if ((l0x+mx) + 320*(l0y+my) < 0 || (l0x+mx) + 320*(l0y+my) > 318*198) {
|
||||
if ((l0x+mx) + 320*(l0y+my) < 0 || (l0x+mx) + 320*(l0y+my) > 320*197 - 4) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -232,7 +232,7 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, int w, int h)
|
||||
val = bytestream2_get_byte(&ctx->g);
|
||||
mx = (val & 0xF) - 8;
|
||||
my = (val >> 4) - 8;
|
||||
if ((l1x+mx) + 320*(l1y+my) < 0 || (l1x+mx) + 320*(l1y+my) > 318*198) {
|
||||
if ((l1x+mx) + 320*(l1y+my) < 0 || (l1x+mx) + 320*(l1y+my) > 320*199 - 2) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -780,6 +780,12 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p
|
||||
int resync_mb_y = 0;
|
||||
int resync_mb_x = 0;
|
||||
|
||||
if (s->nb_components != 3 && s->nb_components != 4)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
|
||||
s->restart_count = s->restart_interval;
|
||||
|
||||
av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size,
|
||||
@@ -1088,7 +1094,7 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
s->dsp.clear_block(s->block);
|
||||
if (decode_block(s, s->block, i,
|
||||
s->dc_index[i], s->ac_index[i],
|
||||
s->quant_matrixes[s->quant_index[c]]) < 0) {
|
||||
s->quant_matrixes[s->quant_sindex[i]]) < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"error y=%d x=%d\n", mb_y, mb_x);
|
||||
return AVERROR_INVALIDDATA;
|
||||
@@ -1101,9 +1107,9 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
int16_t *block = s->blocks[c][block_idx];
|
||||
if (Ah)
|
||||
block[0] += get_bits1(&s->gb) *
|
||||
s->quant_matrixes[s->quant_index[c]][0] << Al;
|
||||
s->quant_matrixes[s->quant_sindex[i]][0] << Al;
|
||||
else if (decode_dc_progressive(s, block, i, s->dc_index[i],
|
||||
s->quant_matrixes[s->quant_index[c]],
|
||||
s->quant_matrixes[s->quant_sindex[i]],
|
||||
Al) < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"error y=%d x=%d\n", mb_y, mb_x);
|
||||
@@ -1136,7 +1142,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss,
|
||||
uint8_t *data = s->picture.data[c];
|
||||
int linesize = s->linesize[c];
|
||||
int last_scan = 0;
|
||||
int16_t *quant_matrix = s->quant_matrixes[s->quant_index[c]];
|
||||
int16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
|
||||
|
||||
av_assert0(ss>=0 && Ah>=0 && Al>=0);
|
||||
if (se < ss || se > 63) {
|
||||
@@ -1231,6 +1237,11 @@ int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
|
||||
&& nb_components == 3 && s->nb_components == 3 && i)
|
||||
index = 3 - i;
|
||||
|
||||
s->quant_sindex[i] = s->quant_index[index];
|
||||
s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
|
||||
s->h_scount[i] = s->h_count[index];
|
||||
s->v_scount[i] = s->v_count[index];
|
||||
|
||||
if(nb_components == 3 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
|
||||
index = (i+2)%3;
|
||||
if(nb_components == 1 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
|
||||
@@ -1238,10 +1249,6 @@ int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
|
||||
|
||||
s->comp_index[i] = index;
|
||||
|
||||
s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
|
||||
s->h_scount[i] = s->h_count[index];
|
||||
s->v_scount[i] = s->v_count[index];
|
||||
|
||||
s->dc_index[i] = get_bits(&s->gb, 4);
|
||||
s->ac_index[i] = get_bits(&s->gb, 4);
|
||||
|
||||
|
@@ -84,6 +84,7 @@ typedef struct MJpegDecodeContext {
|
||||
int nb_blocks[MAX_COMPONENTS];
|
||||
int h_scount[MAX_COMPONENTS];
|
||||
int v_scount[MAX_COMPONENTS];
|
||||
int quant_sindex[MAX_COMPONENTS];
|
||||
int h_max, v_max; /* maximum h and v counts */
|
||||
int quant_index[4]; /* quant table index for each component */
|
||||
int last_dc[MAX_COMPONENTS]; /* last DEQUANTIZED dc (XXX: am I right to do that ?) */
|
||||
|
@@ -2130,7 +2130,8 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
|
||||
if (start_code > 0x1ff) {
|
||||
if (!skip_frame) {
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)) {
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||
!avctx->hwaccel) {
|
||||
int i;
|
||||
av_assert0(avctx->thread_count > 1);
|
||||
|
||||
@@ -2194,7 +2195,8 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
s2->intra_dc_precision= 3;
|
||||
s2->intra_matrix[0]= 1;
|
||||
}
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && s->slice_count) {
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||
!avctx->hwaccel && s->slice_count) {
|
||||
int i;
|
||||
|
||||
avctx->execute(avctx, slice_decode_thread,
|
||||
@@ -2369,7 +2371,8 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
break;
|
||||
}
|
||||
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)) {
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||
!avctx->hwaccel) {
|
||||
int threshold = (s2->mb_height * s->slice_count +
|
||||
s2->slice_context_count / 2) /
|
||||
s2->slice_context_count;
|
||||
|
@@ -92,14 +92,9 @@ uint16_t ff_mqc_qe [2 * 47];
|
||||
uint8_t ff_mqc_nlps[2 * 47];
|
||||
uint8_t ff_mqc_nmps[2 * 47];
|
||||
|
||||
void ff_mqc_init_contexts(MqcState *mqc)
|
||||
void ff_mqc_init_context_tables(void)
|
||||
{
|
||||
int i;
|
||||
memset(mqc->cx_states, 0, sizeof(mqc->cx_states));
|
||||
mqc->cx_states[MQC_CX_UNI] = 2 * 46;
|
||||
mqc->cx_states[MQC_CX_RL] = 2 * 3;
|
||||
mqc->cx_states[0] = 2 * 4;
|
||||
|
||||
for (i = 0; i < 47; i++) {
|
||||
ff_mqc_qe[2 * i] =
|
||||
ff_mqc_qe[2 * i + 1] = cx_states[i].qe;
|
||||
@@ -110,3 +105,11 @@ void ff_mqc_init_contexts(MqcState *mqc)
|
||||
ff_mqc_nmps[2 * i + 1] = 2 * cx_states[i].nmps + 1;
|
||||
}
|
||||
}
|
||||
|
||||
void ff_mqc_init_contexts(MqcState *mqc)
|
||||
{
|
||||
memset(mqc->cx_states, 0, sizeof(mqc->cx_states));
|
||||
mqc->cx_states[MQC_CX_UNI] = 2 * 46;
|
||||
mqc->cx_states[MQC_CX_RL] = 2 * 3;
|
||||
mqc->cx_states[0] = 2 * 4;
|
||||
}
|
||||
|
@@ -78,6 +78,11 @@ int ff_mqc_decode(MqcState *mqc, uint8_t *cxstate);
|
||||
|
||||
/* common */
|
||||
|
||||
/**
|
||||
* MQ-coder Initialize context tables (QE, NLPS, NMPS)
|
||||
*/
|
||||
void ff_mqc_init_context_tables(void);
|
||||
|
||||
/**
|
||||
* MQ-coder context initialisations.
|
||||
* @param mqc MQ-coder context
|
||||
|
@@ -237,8 +237,10 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
if(next == END_NOT_FOUND){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer)
|
||||
if(!new_buffer) {
|
||||
pc->index = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
|
||||
pc->index += *buf_size;
|
||||
@@ -251,9 +253,11 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
/* append to buffer */
|
||||
if(pc->index){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer)
|
||||
if(!new_buffer) {
|
||||
pc->overread_index =
|
||||
pc->index = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
if (next > -FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
memcpy(&pc->buffer[pc->index], *buf,
|
||||
|
@@ -31,7 +31,7 @@
|
||||
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
|
||||
{
|
||||
long i;
|
||||
for (i = 0; i <= w - sizeof(long); i += sizeof(long)) {
|
||||
for (i = 0; i <= w - (int)sizeof(long); i += sizeof(long)) {
|
||||
long a = *(long *)(src1 + i);
|
||||
long b = *(long *)(src2 + i);
|
||||
*(long *)(dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
|
||||
|
@@ -163,6 +163,8 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s)
|
||||
if (s->maxval >= 256) {
|
||||
if (avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
|
||||
if (s->maxval != 65535)
|
||||
avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
||||
} else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
|
||||
} else if (avctx->pix_fmt == AV_PIX_FMT_YUV420P && s->maxval < 65536) {
|
||||
|
@@ -819,7 +819,8 @@ static int synthfilt_build_sb_samples(QDM2Context *q, GetBitContext *gb,
|
||||
int type34_first;
|
||||
float type34_div = 0;
|
||||
float type34_predictor;
|
||||
float samples[10], sign_bits[16];
|
||||
float samples[10];
|
||||
int sign_bits[16] = {0};
|
||||
|
||||
if (length == 0) {
|
||||
// If no data use noise
|
||||
|
@@ -85,7 +85,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
unsigned short *pixels = (unsigned short *)s->frame.data[0];
|
||||
|
||||
int row_ptr = 0;
|
||||
int pixel_ptr = 0;
|
||||
int pixel_ptr = -4;
|
||||
int block_ptr;
|
||||
int pixel_x, pixel_y;
|
||||
int total_blocks;
|
||||
@@ -141,6 +141,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
colorA = AV_RB16 (&s->buf[stream_ptr]);
|
||||
stream_ptr += 2;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK()
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -149,7 +150,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -188,6 +188,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
if (s->size - stream_ptr < n_blocks * 4)
|
||||
return;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
index = s->buf[stream_ptr++];
|
||||
@@ -198,7 +199,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -206,6 +206,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
case 0x00:
|
||||
if (s->size - stream_ptr < 16)
|
||||
return;
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -219,7 +220,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
break;
|
||||
|
||||
/* Unknown opcode */
|
||||
|
@@ -58,7 +58,8 @@ static int expand_rle_row(SgiState *s, uint8_t *out_buf,
|
||||
}
|
||||
|
||||
/* Check for buffer overflow. */
|
||||
if(out_buf + pixelstride * (count-1) >= out_end) return -1;
|
||||
if (out_end - out_buf <= pixelstride * (count - 1))
|
||||
return -1;
|
||||
|
||||
if (pixel & 0x80) {
|
||||
while (count--) {
|
||||
|
@@ -204,7 +204,8 @@ static const char *read_ts(const char *buf, int *ts_start, int *ts_end,
|
||||
"%*[ ]X1:%u X2:%u Y1:%u Y2:%u",
|
||||
&hs, &ms, &ss, ts_start, &he, &me, &se, ts_end,
|
||||
x1, x2, y1, y2);
|
||||
buf += strcspn(buf, "\n") + 1;
|
||||
buf += strcspn(buf, "\n");
|
||||
buf += !!*buf;
|
||||
if (c >= 8) {
|
||||
*ts_start = 100*(ss + 60*(ms + 60*hs)) + *ts_start/10;
|
||||
*ts_end = 100*(se + 60*(me + 60*he)) + *ts_end /10;
|
||||
|
@@ -945,14 +945,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
if (!l->Y1_base || !l->Y2_base || !l->U1_base ||
|
||||
!l->V1_base || !l->U2_base || !l->V2_base ||
|
||||
!l->last || !l->clast) {
|
||||
av_freep(l->Y1_base);
|
||||
av_freep(l->Y2_base);
|
||||
av_freep(l->U1_base);
|
||||
av_freep(l->U2_base);
|
||||
av_freep(l->V1_base);
|
||||
av_freep(l->V2_base);
|
||||
av_freep(l->last);
|
||||
av_freep(l->clast);
|
||||
av_freep(&l->Y1_base);
|
||||
av_freep(&l->Y2_base);
|
||||
av_freep(&l->U1_base);
|
||||
av_freep(&l->U2_base);
|
||||
av_freep(&l->V1_base);
|
||||
av_freep(&l->V2_base);
|
||||
av_freep(&l->last);
|
||||
av_freep(&l->clast);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
l->Y1 = l->Y1_base + l->y_stride * 4 + 4;
|
||||
|
@@ -116,7 +116,7 @@ static int tta_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
PutBitContext pb;
|
||||
int ret, i, out_bytes, cur_chan = 0, res = 0, samples = 0;
|
||||
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, frame->nb_samples * 2 * s->bps)) < 0)
|
||||
if ((ret = ff_alloc_packet2(avctx, avpkt, frame->nb_samples * 2 * avctx->channels * s->bps)) < 0)
|
||||
return ret;
|
||||
init_put_bits(&pb, avpkt->data, avpkt->size);
|
||||
|
||||
|
@@ -512,6 +512,10 @@ static int wma_decode_block(WMACodecContext *s)
|
||||
coef escape coding */
|
||||
total_gain = 1;
|
||||
for(;;) {
|
||||
if (get_bits_left(&s->gb) < 7) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "total_gain overread\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
a = get_bits(&s->gb, 7);
|
||||
total_gain += a;
|
||||
if (a != 127)
|
||||
|
@@ -1045,9 +1045,10 @@ static void aw_parse_coords(WMAVoiceContext *s, GetBitContext *gb,
|
||||
* @param gb bit I/O context
|
||||
* @param block_idx block index in frame [0, 1]
|
||||
* @param fcb structure containing fixed codebook vector info
|
||||
* @return -1 on error, 0 otherwise
|
||||
*/
|
||||
static void aw_pulse_set2(WMAVoiceContext *s, GetBitContext *gb,
|
||||
int block_idx, AMRFixed *fcb)
|
||||
static int aw_pulse_set2(WMAVoiceContext *s, GetBitContext *gb,
|
||||
int block_idx, AMRFixed *fcb)
|
||||
{
|
||||
uint16_t use_mask_mem[9]; // only 5 are used, rest is padding
|
||||
uint16_t *use_mask = use_mask_mem + 2;
|
||||
@@ -1109,7 +1110,7 @@ static void aw_pulse_set2(WMAVoiceContext *s, GetBitContext *gb,
|
||||
else if (use_mask[2]) idx = 0x2F;
|
||||
else if (use_mask[3]) idx = 0x3F;
|
||||
else if (use_mask[4]) idx = 0x4F;
|
||||
else return;
|
||||
else return -1;
|
||||
idx -= av_log2_16bit(use_mask[idx >> 4]);
|
||||
}
|
||||
if (use_mask[idx >> 4] & (0x8000 >> (idx & 15))) {
|
||||
@@ -1126,6 +1127,7 @@ static void aw_pulse_set2(WMAVoiceContext *s, GetBitContext *gb,
|
||||
/* set offset for next block, relative to start of that block */
|
||||
n = (MAX_FRAMESIZE / 2 - start_off) % fcb->pitch_lag;
|
||||
s->aw_next_pulse_off_cache = n ? fcb->pitch_lag - n : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1288,7 +1290,18 @@ static void synth_block_fcb_acb(WMAVoiceContext *s, GetBitContext *gb,
|
||||
* (fixed) codebook pulses of the speech signal. */
|
||||
if (frame_desc->fcb_type == FCB_TYPE_AW_PULSES) {
|
||||
aw_pulse_set1(s, gb, block_idx, &fcb);
|
||||
aw_pulse_set2(s, gb, block_idx, &fcb);
|
||||
if (aw_pulse_set2(s, gb, block_idx, &fcb)) {
|
||||
/* Conceal the block with silence and return.
|
||||
* Skip the correct amount of bits to read the next
|
||||
* block from the correct offset. */
|
||||
int r_idx = pRNG(s->frame_cntr, block_idx, size);
|
||||
|
||||
for (n = 0; n < size; n++)
|
||||
excitation[n] =
|
||||
wmavoice_std_codebook[r_idx + n] * s->silence_gain;
|
||||
skip_bits(gb, 7 + 1);
|
||||
return;
|
||||
}
|
||||
} else /* FCB_TYPE_EXC_PULSES */ {
|
||||
int offset_nbits = 5 - frame_desc->log_n_blocks;
|
||||
|
||||
|
@@ -929,6 +929,9 @@ static int v4l2_read_header(AVFormatContext *s1)
|
||||
if (codec_id == AV_CODEC_ID_RAWVIDEO)
|
||||
st->codec->codec_tag =
|
||||
avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
|
||||
else if (codec_id == AV_CODEC_ID_H264) {
|
||||
st->need_parsing = AVSTREAM_PARSE_HEADERS;
|
||||
}
|
||||
if (desired_format == V4L2_PIX_FMT_YVU420)
|
||||
st->codec->codec_tag = MKTAG('Y', 'V', '1', '2');
|
||||
else if (desired_format == V4L2_PIX_FMT_YVU410)
|
||||
|
@@ -25,42 +25,48 @@
|
||||
* sample format and channel layout conversion audio filter
|
||||
*/
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libswresample/swresample.h"
|
||||
#include "avfilter.h"
|
||||
#include "audio.h"
|
||||
#include "internal.h"
|
||||
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
enum AVSampleFormat out_sample_fmt;
|
||||
int64_t out_chlayout;
|
||||
struct SwrContext *swr;
|
||||
char *format_str;
|
||||
char *channel_layout_str;
|
||||
} AConvertContext;
|
||||
|
||||
#define OFFSET(x) offsetof(AConvertContext, x)
|
||||
#define A AV_OPT_FLAG_AUDIO_PARAM
|
||||
#define F AV_OPT_FLAG_FILTERING_PARAM
|
||||
static const AVOption aconvert_options[] = {
|
||||
{ "sample_fmt", "", OFFSET(format_str), AV_OPT_TYPE_STRING, .flags = A|F },
|
||||
{ "channel_layout", "", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F },
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(aconvert);
|
||||
|
||||
static av_cold int init(AVFilterContext *ctx)
|
||||
{
|
||||
AConvertContext *aconvert = ctx->priv;
|
||||
char *arg, *ptr = NULL;
|
||||
int ret = 0;
|
||||
char *args = av_strdup(NULL);
|
||||
|
||||
av_log(ctx, AV_LOG_WARNING, "This filter is deprecated, use aformat instead\n");
|
||||
|
||||
aconvert->out_sample_fmt = AV_SAMPLE_FMT_NONE;
|
||||
aconvert->out_chlayout = 0;
|
||||
|
||||
if ((arg = av_strtok(args, ":", &ptr)) && strcmp(arg, "auto")) {
|
||||
if ((ret = ff_parse_sample_format(&aconvert->out_sample_fmt, arg, ctx)) < 0)
|
||||
goto end;
|
||||
}
|
||||
if ((arg = av_strtok(NULL, ":", &ptr)) && strcmp(arg, "auto")) {
|
||||
if ((ret = ff_parse_channel_layout(&aconvert->out_chlayout, arg, ctx)) < 0)
|
||||
goto end;
|
||||
}
|
||||
|
||||
end:
|
||||
av_freep(&args);
|
||||
if (aconvert->format_str && strcmp(aconvert->format_str, "auto") &&
|
||||
(ret = ff_parse_sample_format(&aconvert->out_sample_fmt, aconvert->format_str, ctx)) < 0)
|
||||
return ret;
|
||||
if (aconvert->channel_layout_str && strcmp(aconvert->channel_layout_str, "auto"))
|
||||
return ff_parse_channel_layout(&aconvert->out_chlayout, aconvert->channel_layout_str, ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -181,6 +187,7 @@ AVFilter avfilter_af_aconvert = {
|
||||
.name = "aconvert",
|
||||
.description = NULL_IF_CONFIG_SMALL("Convert the input audio to sample_fmt:channel_layout."),
|
||||
.priv_size = sizeof(AConvertContext),
|
||||
.priv_class = &aconvert_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
@@ -114,6 +114,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
|
||||
AVFilterLink *outlink = inlink->dst->outputs[0];
|
||||
int16_t *taps, *endin, *in, *out;
|
||||
AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples);
|
||||
int len;
|
||||
|
||||
if (!outsamples) {
|
||||
av_frame_free(&insamples);
|
||||
@@ -125,16 +126,20 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
|
||||
out = (int16_t *)outsamples->data[0];
|
||||
in = (int16_t *)insamples ->data[0];
|
||||
|
||||
len = FFMIN(NUMTAPS, 2*insamples->nb_samples);
|
||||
// copy part of new input and process with saved input
|
||||
memcpy(taps+NUMTAPS, in, NUMTAPS * sizeof(*taps));
|
||||
out = scalarproduct(taps, taps + NUMTAPS, out);
|
||||
memcpy(taps+NUMTAPS, in, len * sizeof(*taps));
|
||||
out = scalarproduct(taps, taps + len, out);
|
||||
|
||||
// process current input
|
||||
endin = in + insamples->nb_samples * 2 - NUMTAPS;
|
||||
scalarproduct(in, endin, out);
|
||||
if (2*insamples->nb_samples >= NUMTAPS ){
|
||||
endin = in + insamples->nb_samples * 2 - NUMTAPS;
|
||||
scalarproduct(in, endin, out);
|
||||
|
||||
// save part of input for next round
|
||||
memcpy(taps, endin, NUMTAPS * sizeof(*taps));
|
||||
// save part of input for next round
|
||||
memcpy(taps, endin, NUMTAPS * sizeof(*taps));
|
||||
} else
|
||||
memmove(taps, taps + 2*insamples->nb_samples, NUMTAPS * sizeof(*taps));
|
||||
|
||||
av_frame_free(&insamples);
|
||||
return ff_filter_frame(outlink, outsamples);
|
||||
|
@@ -313,13 +313,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
}
|
||||
av_frame_copy_props(out, in);
|
||||
|
||||
for (plane = 0; in->data[plane] && plane < 4; plane++)
|
||||
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
|
||||
hblur(out->data[plane], out->linesize[plane],
|
||||
in ->data[plane], in ->linesize[plane],
|
||||
w[plane], h[plane], s->radius[plane], s->power[plane],
|
||||
s->temp);
|
||||
|
||||
for (plane = 0; in->data[plane] && plane < 4; plane++)
|
||||
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
|
||||
vblur(out->data[plane], out->linesize[plane],
|
||||
out->data[plane], out->linesize[plane],
|
||||
w[plane], h[plane], s->radius[plane], s->power[plane],
|
||||
|
@@ -236,7 +236,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
if (!sar.num)
|
||||
sar.num = sar.den = 1;
|
||||
|
||||
for (plane = 0; plane < 4 && in->data[plane]; plane++) {
|
||||
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
|
||||
int hsub = plane == 1 || plane == 2 ? hsub0 : 0;
|
||||
int vsub = plane == 1 || plane == 2 ? vsub0 : 0;
|
||||
|
||||
|
@@ -608,7 +608,7 @@ static void copy_fields(const FieldMatchContext *fm, AVFrame *dst,
|
||||
const AVFrame *src, int field)
|
||||
{
|
||||
int plane;
|
||||
for (plane = 0; plane < 4 && src->data[plane]; plane++)
|
||||
for (plane = 0; plane < 4 && src->data[plane] && src->linesize[plane]; plane++)
|
||||
av_image_copy_plane(dst->data[plane] + field*dst->linesize[plane], dst->linesize[plane] << 1,
|
||||
src->data[plane] + field*src->linesize[plane], src->linesize[plane] << 1,
|
||||
get_width(fm, src, plane), get_height(fm, src, plane) / 2);
|
||||
|
@@ -106,7 +106,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
"picture will move %s one line\n",
|
||||
s->dst_tff ? "up" : "down");
|
||||
h = frame->height;
|
||||
for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
|
||||
for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
|
||||
line_step = frame->linesize[plane];
|
||||
line_size = s->line_size[plane];
|
||||
data = frame->data[plane];
|
||||
|
@@ -189,7 +189,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
|
||||
}
|
||||
|
||||
/* now wait for the next timestamp */
|
||||
if (buf->pts == AV_NOPTS_VALUE) {
|
||||
if (buf->pts == AV_NOPTS_VALUE || av_fifo_size(s->fifo) <= 0) {
|
||||
return write_to_fifo(s->fifo, buf);
|
||||
}
|
||||
|
||||
|
@@ -200,7 +200,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
av_frame_copy_props(out, in);
|
||||
}
|
||||
|
||||
for (p = 0; p < 4 && in->data[p]; p++) {
|
||||
for (p = 0; p < 4 && in->data[p] && in->linesize[p]; p++) {
|
||||
int w = inlink->w;
|
||||
int h = inlink->h;
|
||||
int r = s->radius;
|
||||
|
@@ -90,7 +90,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)
|
||||
memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
|
||||
|
||||
for (plane = 0; plane < 4 && in->data[plane]; plane++) {
|
||||
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
|
||||
const int width = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, s->hsub) : inlink->w;
|
||||
const int height = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, s->vsub) : inlink->h;
|
||||
step = s->max_step[plane];
|
||||
|
@@ -150,7 +150,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
|
||||
av_frame_copy_props(outpic, inpic);
|
||||
outpic->interlaced_frame = 0;
|
||||
|
||||
for (plane = 0; inpic->data[plane] && plane < 4; plane++) {
|
||||
for (plane = 0; plane < 4 && inpic->data[plane] && inpic->linesize[plane]; plane++) {
|
||||
h = plane == 0 ? inlink->h : FF_CEIL_RSHIFT(inlink->h, kerndeint->vsub);
|
||||
bwidth = kerndeint->tmp_bwidth[plane];
|
||||
|
||||
|
@@ -304,7 +304,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
}
|
||||
} else {
|
||||
/* planar */
|
||||
for (plane = 0; plane < 4 && in->data[plane]; plane++) {
|
||||
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
|
||||
int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
|
||||
int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
|
||||
int h = FF_CEIL_RSHIFT(inlink->h, vsub);
|
||||
|
@@ -214,7 +214,7 @@ static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
|
||||
frame->width = w;
|
||||
frame->height = h;
|
||||
|
||||
for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
|
||||
for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
|
||||
int hsub = s->draw.hsub[plane];
|
||||
int vsub = s->draw.vsub[plane];
|
||||
frame->data[plane] += (s->x >> hsub) * s->draw.pixelstep[plane] +
|
||||
@@ -311,7 +311,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
int i;
|
||||
|
||||
out = in;
|
||||
for (i = 0; i < 4 && out->data[i]; i++) {
|
||||
for (i = 0; i < 4 && out->data[i] && out->linesize[i]; i++) {
|
||||
int hsub = s->draw.hsub[i];
|
||||
int vsub = s->draw.vsub[i];
|
||||
out->data[i] -= (s->x >> hsub) * s->draw.pixelstep[i] +
|
||||
|
@@ -33,7 +33,7 @@ static int config_props_output(AVFilterLink *outlink)
|
||||
SeparateFieldsContext *sf = ctx->priv;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
|
||||
sf->nb_planes = av_pix_fmt_count_planes(inlink->format);;
|
||||
sf->nb_planes = av_pix_fmt_count_planes(inlink->format);
|
||||
|
||||
if (inlink->h & 1) {
|
||||
av_log(ctx, AV_LOG_ERROR, "height must be even\n");
|
||||
|
@@ -38,7 +38,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
uint32_t plane_checksum[4] = {0}, checksum = 0;
|
||||
int i, plane, vsub = desc->log2_chroma_h;
|
||||
|
||||
for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
|
||||
for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
|
||||
int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
|
||||
uint8_t *data = frame->data[plane];
|
||||
int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
|
||||
@@ -68,7 +68,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
av_get_picture_type_char(frame->pict_type),
|
||||
checksum, plane_checksum[0]);
|
||||
|
||||
for (plane = 1; plane < 4 && frame->data[plane]; plane++)
|
||||
for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
|
||||
av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);
|
||||
av_log(ctx, AV_LOG_INFO, "]\n");
|
||||
|
||||
|
@@ -239,7 +239,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
} else {
|
||||
int plane;
|
||||
|
||||
for (plane = 0; plane < 4 && in->data[plane]; plane++) {
|
||||
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
|
||||
uint8_t *dst = out->data[plane];
|
||||
const uint8_t *src = in ->data[plane];
|
||||
const float *fmap = s->fmap;
|
||||
|
@@ -1328,7 +1328,7 @@ static int avi_read_idx1(AVFormatContext *s, int size)
|
||||
st = s->streams[index];
|
||||
ast = st->priv_data;
|
||||
|
||||
if(first_packet && first_packet_pos && len) {
|
||||
if (first_packet && first_packet_pos) {
|
||||
data_offset = first_packet_pos - pos;
|
||||
first_packet = 0;
|
||||
}
|
||||
|
@@ -241,6 +241,8 @@ int ffurl_alloc(URLContext **puc, const char *filename, int flags,
|
||||
return url_alloc_for_protocol (puc, up, filename, flags, int_cb);
|
||||
}
|
||||
*puc = NULL;
|
||||
if (!strcmp("https", proto_str))
|
||||
av_log(NULL, AV_LOG_WARNING, "https protocol not found, recompile with openssl or gnutls enabled.\n");
|
||||
return AVERROR_PROTOCOL_NOT_FOUND;
|
||||
}
|
||||
|
||||
|
@@ -36,6 +36,7 @@
|
||||
#include <windows.h>
|
||||
#undef EXTERN_C
|
||||
#include "compat/avisynth/avisynth_c.h"
|
||||
#include "compat/avisynth/avisynth_c_25.h"
|
||||
#define AVISYNTH_LIB "avisynth"
|
||||
#else
|
||||
#include <dlfcn.h>
|
||||
@@ -62,19 +63,20 @@
|
||||
typedef struct {
|
||||
void *library;
|
||||
#define AVSC_DECLARE_FUNC(name) name##_func name
|
||||
AVSC_DECLARE_FUNC(avs_bit_blt);
|
||||
AVSC_DECLARE_FUNC(avs_clip_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_create_script_environment);
|
||||
AVSC_DECLARE_FUNC(avs_delete_script_environment);
|
||||
AVSC_DECLARE_FUNC(avs_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_clip_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_invoke);
|
||||
AVSC_DECLARE_FUNC(avs_release_value);
|
||||
AVSC_DECLARE_FUNC(avs_get_video_info);
|
||||
AVSC_DECLARE_FUNC(avs_take_clip);
|
||||
AVSC_DECLARE_FUNC(avs_release_clip);
|
||||
AVSC_DECLARE_FUNC(avs_bit_blt);
|
||||
AVSC_DECLARE_FUNC(avs_get_audio);
|
||||
AVSC_DECLARE_FUNC(avs_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_get_frame);
|
||||
AVSC_DECLARE_FUNC(avs_get_version);
|
||||
AVSC_DECLARE_FUNC(avs_get_video_info);
|
||||
AVSC_DECLARE_FUNC(avs_invoke);
|
||||
AVSC_DECLARE_FUNC(avs_release_clip);
|
||||
AVSC_DECLARE_FUNC(avs_release_value);
|
||||
AVSC_DECLARE_FUNC(avs_release_video_frame);
|
||||
AVSC_DECLARE_FUNC(avs_take_clip);
|
||||
#undef AVSC_DECLARE_FUNC
|
||||
} AviSynthLibrary;
|
||||
|
||||
@@ -127,19 +129,20 @@ static av_cold int avisynth_load_library(void) {
|
||||
if(!continue_on_fail && !avs_library->name) \
|
||||
goto fail; \
|
||||
}
|
||||
LOAD_AVS_FUNC(avs_bit_blt, 0);
|
||||
LOAD_AVS_FUNC(avs_clip_get_error, 0);
|
||||
LOAD_AVS_FUNC(avs_create_script_environment, 0);
|
||||
LOAD_AVS_FUNC(avs_delete_script_environment, 0);
|
||||
LOAD_AVS_FUNC(avs_get_error, 1); // New to AviSynth 2.6
|
||||
LOAD_AVS_FUNC(avs_clip_get_error, 0);
|
||||
LOAD_AVS_FUNC(avs_invoke, 0);
|
||||
LOAD_AVS_FUNC(avs_release_value, 0);
|
||||
LOAD_AVS_FUNC(avs_get_video_info, 0);
|
||||
LOAD_AVS_FUNC(avs_take_clip, 0);
|
||||
LOAD_AVS_FUNC(avs_release_clip, 0);
|
||||
LOAD_AVS_FUNC(avs_bit_blt, 0);
|
||||
LOAD_AVS_FUNC(avs_get_audio, 0);
|
||||
LOAD_AVS_FUNC(avs_get_error, 1); // New to AviSynth 2.6
|
||||
LOAD_AVS_FUNC(avs_get_frame, 0);
|
||||
LOAD_AVS_FUNC(avs_get_version, 0);
|
||||
LOAD_AVS_FUNC(avs_get_video_info, 0);
|
||||
LOAD_AVS_FUNC(avs_invoke, 0);
|
||||
LOAD_AVS_FUNC(avs_release_clip, 0);
|
||||
LOAD_AVS_FUNC(avs_release_value, 0);
|
||||
LOAD_AVS_FUNC(avs_release_video_frame, 0);
|
||||
LOAD_AVS_FUNC(avs_take_clip, 0);
|
||||
#undef LOAD_AVS_FUNC
|
||||
|
||||
atexit(avisynth_atexit_handler);
|
||||
@@ -469,9 +472,20 @@ static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int dis
|
||||
for (i = 0; i < avs->n_planes; i++) {
|
||||
plane = avs->planes[i];
|
||||
src_p = avs_get_read_ptr_p(frame, plane);
|
||||
pitch = avs_get_pitch_p(frame, plane);
|
||||
|
||||
#ifdef _WIN32
|
||||
if (avs_library->avs_get_version(avs->clip) == 3) {
|
||||
rowsize = avs_get_row_size_p_25(frame, plane);
|
||||
planeheight = avs_get_height_p_25(frame, plane);
|
||||
} else {
|
||||
rowsize = avs_get_row_size_p(frame, plane);
|
||||
planeheight = avs_get_height_p(frame, plane);
|
||||
}
|
||||
#else
|
||||
rowsize = avs_get_row_size_p(frame, plane);
|
||||
planeheight = avs_get_height_p(frame, plane);
|
||||
pitch = avs_get_pitch_p(frame, plane);
|
||||
#endif
|
||||
|
||||
// Flip RGB video.
|
||||
if (avs_is_rgb24(avs->vi) || avs_is_rgb(avs->vi)) {
|
||||
|
@@ -34,6 +34,7 @@ static int dts_probe(AVProbeData *p)
|
||||
uint32_t state = -1;
|
||||
int markers[3] = {0};
|
||||
int sum, max;
|
||||
int64_t diff = 0;
|
||||
|
||||
buf = p->buf;
|
||||
|
||||
@@ -54,12 +55,16 @@ static int dts_probe(AVProbeData *p)
|
||||
if (state == DCA_MARKER_14B_LE)
|
||||
if ((bytestream_get_be16(&bufp) & 0xF0FF) == 0xF007)
|
||||
markers[2]++;
|
||||
|
||||
if (buf - p->buf >= 4)
|
||||
diff += FFABS(AV_RL16(buf) - AV_RL16(buf-4));
|
||||
}
|
||||
sum = markers[0] + markers[1] + markers[2];
|
||||
max = markers[1] > markers[0];
|
||||
max = markers[2] > markers[max] ? 2 : max;
|
||||
if (markers[max] > 3 && p->buf_size / markers[max] < 32*1024 &&
|
||||
markers[max] * 4 > sum * 3)
|
||||
markers[max] * 4 > sum * 3 &&
|
||||
diff / p->buf_size > 200)
|
||||
return AVPROBE_SCORE_EXTENSION + 1;
|
||||
|
||||
return 0;
|
||||
|
@@ -27,6 +27,7 @@
|
||||
#include "os_support.h"
|
||||
#include "url.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/bprint.h"
|
||||
|
||||
#define CONTROL_BUFFER_SIZE 1024
|
||||
#define CREDENTIALS_BUFFER_SIZE 128
|
||||
@@ -42,8 +43,6 @@ typedef enum {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
URLContext *conn_control; /**< Control connection */
|
||||
int conn_control_block_flag; /**< Controls block/unblock mode of data connection */
|
||||
AVIOInterruptCB conn_control_interrupt_cb; /**< Controls block/unblock mode of data connection */
|
||||
URLContext *conn_data; /**< Data connection, NULL when not connected */
|
||||
uint8_t control_buffer[CONTROL_BUFFER_SIZE]; /**< Control connection buffer */
|
||||
uint8_t *control_buf_ptr, *control_buf_end;
|
||||
@@ -77,18 +76,10 @@ static const AVClass ftp_context_class = {
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
static int ftp_conn_control_block_control(void *data)
|
||||
{
|
||||
FTPContext *s = data;
|
||||
return s->conn_control_block_flag;
|
||||
}
|
||||
|
||||
static int ftp_getc(FTPContext *s)
|
||||
{
|
||||
int len;
|
||||
if (s->control_buf_ptr >= s->control_buf_end) {
|
||||
if (s->conn_control_block_flag)
|
||||
return AVERROR_EXIT;
|
||||
len = ffurl_read(s->conn_control, s->control_buffer, CONTROL_BUFFER_SIZE);
|
||||
if (len < 0) {
|
||||
return len;
|
||||
@@ -106,12 +97,10 @@ static int ftp_get_line(FTPContext *s, char *line, int line_size)
|
||||
{
|
||||
int ch;
|
||||
char *q = line;
|
||||
int ori_block_flag = s->conn_control_block_flag;
|
||||
|
||||
for (;;) {
|
||||
ch = ftp_getc(s);
|
||||
if (ch < 0) {
|
||||
s->conn_control_block_flag = ori_block_flag;
|
||||
return ch;
|
||||
}
|
||||
if (ch == '\n') {
|
||||
@@ -119,35 +108,14 @@ static int ftp_get_line(FTPContext *s, char *line, int line_size)
|
||||
if (q > line && q[-1] == '\r')
|
||||
q--;
|
||||
*q = '\0';
|
||||
|
||||
s->conn_control_block_flag = ori_block_flag;
|
||||
return 0;
|
||||
} else {
|
||||
s->conn_control_block_flag = 0; /* line need to be finished */
|
||||
if ((q - line) < line_size - 1)
|
||||
*q++ = ch;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int ftp_flush_control_input(FTPContext *s)
|
||||
{
|
||||
char buf[CONTROL_BUFFER_SIZE];
|
||||
int err, ori_block_flag = s->conn_control_block_flag;
|
||||
|
||||
s->conn_control_block_flag = 1;
|
||||
do {
|
||||
err = ftp_get_line(s, buf, sizeof(buf));
|
||||
} while (!err);
|
||||
|
||||
s->conn_control_block_flag = ori_block_flag;
|
||||
|
||||
if (err < 0 && err != AVERROR_EXIT)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine returns ftp server response code.
|
||||
* Server may send more than one response for a certain command, following priorities are used:
|
||||
@@ -156,49 +124,47 @@ static int ftp_flush_control_input(FTPContext *s)
|
||||
*/
|
||||
static int ftp_status(FTPContext *s, char **line, const int response_codes[])
|
||||
{
|
||||
int err, i, result = 0, pref_code_found = 0, wait_count = 100;
|
||||
int err, i, dash = 0, result = 0, code_found = 0;
|
||||
char buf[CONTROL_BUFFER_SIZE];
|
||||
AVBPrint line_buffer;
|
||||
|
||||
/* Set blocking mode */
|
||||
s->conn_control_block_flag = 0;
|
||||
for (;;) {
|
||||
if (line)
|
||||
av_bprint_init(&line_buffer, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
|
||||
while (!code_found || dash) {
|
||||
if ((err = ftp_get_line(s, buf, sizeof(buf))) < 0) {
|
||||
if (err == AVERROR_EXIT) {
|
||||
if (!pref_code_found && wait_count--) {
|
||||
av_usleep(10000);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
if (line)
|
||||
av_bprint_finalize(&line_buffer, NULL);
|
||||
return err;
|
||||
}
|
||||
|
||||
av_log(s, AV_LOG_DEBUG, "%s\n", buf);
|
||||
|
||||
if (!pref_code_found) {
|
||||
if (strlen(buf) < 3)
|
||||
if (strlen(buf) < 4)
|
||||
continue;
|
||||
|
||||
err = 0;
|
||||
for (i = 0; i < 3; ++i) {
|
||||
if (buf[i] < '0' || buf[i] > '9')
|
||||
continue;
|
||||
err *= 10;
|
||||
err += buf[i] - '0';
|
||||
}
|
||||
dash = !!(buf[3] == '-');
|
||||
|
||||
err = 0;
|
||||
for (i = 0; i < 3; ++i) {
|
||||
if (buf[i] < '0' || buf[i] > '9')
|
||||
continue;
|
||||
err *= 10;
|
||||
err += buf[i] - '0';
|
||||
}
|
||||
|
||||
for (i = 0; response_codes[i]; ++i) {
|
||||
if (err == response_codes[i]) {
|
||||
/* first code received. Now get all lines in non blocking mode */
|
||||
s->conn_control_block_flag = 1;
|
||||
pref_code_found = 1;
|
||||
result = err;
|
||||
if (line)
|
||||
*line = av_strdup(buf);
|
||||
break;
|
||||
}
|
||||
for (i = 0; response_codes[i]; ++i) {
|
||||
if (err == response_codes[i]) {
|
||||
if (line)
|
||||
av_bprintf(&line_buffer, "%s", buf);
|
||||
code_found = 1;
|
||||
result = err;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (line)
|
||||
av_bprint_finalize(&line_buffer, line);
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -207,12 +173,6 @@ static int ftp_send_command(FTPContext *s, const char *command,
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Flush control connection input to get rid of non relevant responses if any */
|
||||
if ((err = ftp_flush_control_input(s)) < 0)
|
||||
return err;
|
||||
|
||||
/* send command in blocking mode */
|
||||
s->conn_control_block_flag = 0;
|
||||
if ((err = ffurl_write(s->conn_control, command, strlen(command))) < 0)
|
||||
return err;
|
||||
if (!err)
|
||||
@@ -434,8 +394,6 @@ static int ftp_connect_control_connection(URLContext *h)
|
||||
FTPContext *s = h->priv_data;
|
||||
const int connect_codes[] = {220, 0};
|
||||
|
||||
s->conn_control_block_flag = 0;
|
||||
|
||||
if (!s->conn_control) {
|
||||
ff_url_join(buf, sizeof(buf), "tcp", NULL,
|
||||
s->hostname, s->server_control_port, NULL);
|
||||
@@ -444,7 +402,7 @@ static int ftp_connect_control_connection(URLContext *h)
|
||||
av_dict_set(&opts, "timeout", opts_format, 0);
|
||||
} /* if option is not given, don't pass it and let tcp use its own default */
|
||||
err = ffurl_open(&s->conn_control, buf, AVIO_FLAG_READ_WRITE,
|
||||
&s->conn_control_interrupt_cb, &opts);
|
||||
&h->interrupt_callback, &opts);
|
||||
av_dict_free(&opts);
|
||||
if (err < 0) {
|
||||
av_log(h, AV_LOG_ERROR, "Cannot open control connection\n");
|
||||
@@ -489,7 +447,7 @@ static int ftp_connect_data_connection(URLContext *h)
|
||||
snprintf(opts_format, sizeof(opts_format), "%d", s->rw_timeout);
|
||||
av_dict_set(&opts, "timeout", opts_format, 0);
|
||||
} /* if option is not given, don't pass it and let tcp use its own default */
|
||||
err = ffurl_open(&s->conn_data, buf, AVIO_FLAG_READ_WRITE,
|
||||
err = ffurl_open(&s->conn_data, buf, h->flags,
|
||||
&h->interrupt_callback, &opts);
|
||||
av_dict_free(&opts);
|
||||
if (err < 0)
|
||||
@@ -553,8 +511,6 @@ static int ftp_open(URLContext *h, const char *url, int flags)
|
||||
s->state = DISCONNECTED;
|
||||
s->filesize = -1;
|
||||
s->position = 0;
|
||||
s->conn_control_interrupt_cb.opaque = s;
|
||||
s->conn_control_interrupt_cb.callback = ftp_conn_control_block_control;
|
||||
|
||||
av_url_split(proto, sizeof(proto),
|
||||
s->credencials, sizeof(s->credencials),
|
||||
|
@@ -63,7 +63,7 @@ static int jacosub_probe(AVProbeData *p)
|
||||
return AVPROBE_SCORE_EXTENSION + 1;
|
||||
return 0;
|
||||
}
|
||||
ptr += strcspn(ptr, "\n") + 1;
|
||||
ptr += ff_subtitles_next_line(ptr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -257,6 +257,7 @@ static int lxf_read_header(AVFormatContext *s)
|
||||
st->codec->bit_rate = 1000000 * ((video_params >> 14) & 0xFF);
|
||||
st->codec->codec_tag = video_params & 0xF;
|
||||
st->codec->codec_id = ff_codec_get_id(lxf_tags, st->codec->codec_tag);
|
||||
st->need_parsing = AVSTREAM_PARSE_HEADERS;
|
||||
|
||||
av_log(s, AV_LOG_DEBUG, "record: %x = %i-%02i-%02i\n",
|
||||
record_date, 1900 + (record_date & 0x7F), (record_date >> 7) & 0xF,
|
||||
|
@@ -1737,8 +1737,10 @@ static int matroska_read_header(AVFormatContext *s)
|
||||
avio_wl16(&b, 1);
|
||||
avio_wl16(&b, track->audio.channels);
|
||||
avio_wl16(&b, track->audio.bitdepth);
|
||||
if (track->audio.out_samplerate < 0 || track->audio.out_samplerate > INT_MAX)
|
||||
return AVERROR_INVALIDDATA;
|
||||
avio_wl32(&b, track->audio.out_samplerate);
|
||||
avio_wl32(&b, matroska->ctx->duration * track->audio.out_samplerate);
|
||||
avio_wl32(&b, av_rescale((matroska->duration * matroska->time_scale), track->audio.out_samplerate, AV_TIME_BASE * 1000));
|
||||
} else if (codec_id == AV_CODEC_ID_RV10 || codec_id == AV_CODEC_ID_RV20 ||
|
||||
codec_id == AV_CODEC_ID_RV30 || codec_id == AV_CODEC_ID_RV40) {
|
||||
extradata_offset = 26;
|
||||
@@ -2364,6 +2366,7 @@ static int matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data,
|
||||
uint32_t *lace_size = NULL;
|
||||
int n, flags, laces = 0;
|
||||
uint64_t num;
|
||||
int trust_default_duration = 1;
|
||||
|
||||
if ((n = matroska_ebmlnum_uint(matroska, data, size, &num)) < 0) {
|
||||
av_log(matroska->ctx, AV_LOG_ERROR, "EBML block data error\n");
|
||||
@@ -2418,7 +2421,15 @@ static int matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data,
|
||||
if (res)
|
||||
goto end;
|
||||
|
||||
if (!block_duration)
|
||||
if (track->audio.samplerate == 8000) {
|
||||
// If this is needed for more codecs, then add them here
|
||||
if (st->codec->codec_id == AV_CODEC_ID_AC3) {
|
||||
if(track->audio.samplerate != st->codec->sample_rate || !st->codec->frame_size)
|
||||
trust_default_duration = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!block_duration && trust_default_duration)
|
||||
block_duration = track->default_duration * laces / matroska->time_scale;
|
||||
|
||||
if (cluster_time != (uint64_t)-1 && (block_time >= 0 || cluster_time >= -block_time))
|
||||
|
@@ -27,6 +27,7 @@
|
||||
#include "isom.h"
|
||||
#include "matroska.h"
|
||||
#include "riff.h"
|
||||
#include "subtitles.h"
|
||||
#include "wv.h"
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
@@ -872,6 +873,17 @@ static int mkv_write_tag(AVFormatContext *s, AVDictionary *m, unsigned int eleme
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mkv_check_tag(AVDictionary *m)
|
||||
{
|
||||
AVDictionaryEntry *t = NULL;
|
||||
|
||||
while ((t = av_dict_get(m, "", t, AV_DICT_IGNORE_SUFFIX)))
|
||||
if (av_strcasecmp(t->key, "title") && av_strcasecmp(t->key, "stereo_mode"))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mkv_write_tags(AVFormatContext *s)
|
||||
{
|
||||
ebml_master tags = {0};
|
||||
@@ -879,7 +891,7 @@ static int mkv_write_tags(AVFormatContext *s)
|
||||
|
||||
ff_metadata_conv_ctx(s, ff_mkv_metadata_conv, NULL);
|
||||
|
||||
if (av_dict_get(s->metadata, "", NULL, AV_DICT_IGNORE_SUFFIX)) {
|
||||
if (mkv_check_tag(s->metadata)) {
|
||||
ret = mkv_write_tag(s, s->metadata, 0, 0, &tags);
|
||||
if (ret < 0) return ret;
|
||||
}
|
||||
@@ -887,7 +899,7 @@ static int mkv_write_tags(AVFormatContext *s)
|
||||
for (i = 0; i < s->nb_streams; i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
|
||||
if (!av_dict_get(st->metadata, "", 0, AV_DICT_IGNORE_SUFFIX))
|
||||
if (!mkv_check_tag(st->metadata))
|
||||
continue;
|
||||
|
||||
ret = mkv_write_tag(s, st->metadata, MATROSKA_ID_TAGTARGETS_TRACKUID, i + 1, &tags);
|
||||
@@ -897,7 +909,7 @@ static int mkv_write_tags(AVFormatContext *s)
|
||||
for (i = 0; i < s->nb_chapters; i++) {
|
||||
AVChapter *ch = s->chapters[i];
|
||||
|
||||
if (!av_dict_get(ch->metadata, "", NULL, AV_DICT_IGNORE_SUFFIX))
|
||||
if (!mkv_check_tag(ch->metadata))
|
||||
continue;
|
||||
|
||||
ret = mkv_write_tag(s, ch->metadata, MATROSKA_ID_TAGTARGETS_CHAPTERUID, ch->id, &tags);
|
||||
@@ -1317,7 +1329,7 @@ static int srt_get_duration(uint8_t **buf)
|
||||
s_hsec += 1000*s_sec; e_hsec += 1000*e_sec;
|
||||
duration = e_hsec - s_hsec;
|
||||
}
|
||||
*buf += strcspn(*buf, "\n") + 1;
|
||||
*buf += ff_subtitles_next_line(*buf);
|
||||
}
|
||||
return duration;
|
||||
}
|
||||
@@ -1606,7 +1618,6 @@ const AVCodecTag additional_audio_tags[] = {
|
||||
};
|
||||
|
||||
const AVCodecTag additional_video_tags[] = {
|
||||
{ AV_CODEC_ID_PRORES, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RV10, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RV20, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RV30, 0xFFFFFFFF },
|
||||
|
@@ -47,7 +47,7 @@ static int microdvd_probe(AVProbeData *p)
|
||||
sscanf(ptr, "{%*d}{%*d}%c", &c) != 1 &&
|
||||
sscanf(ptr, "{DEFAULT}{}%c", &c) != 1)
|
||||
return 0;
|
||||
ptr += strcspn(ptr, "\n") + 1;
|
||||
ptr += ff_subtitles_next_line(ptr);
|
||||
}
|
||||
return AVPROBE_SCORE_MAX;
|
||||
}
|
||||
|
@@ -3672,6 +3672,9 @@ static int mov_write_header(AVFormatContext *s)
|
||||
}else{
|
||||
track->sample_size = (av_get_bits_per_sample(st->codec->codec_id) >> 3) * st->codec->channels;
|
||||
}
|
||||
if (st->codec->codec_id == AV_CODEC_ID_ILBC) {
|
||||
track->audio_vbr = 1;
|
||||
}
|
||||
if (track->mode != MODE_MOV &&
|
||||
track->enc->codec_id == AV_CODEC_ID_MP3 && track->timescale < 16000) {
|
||||
av_log(s, AV_LOG_ERROR, "track %d: muxing mp3 at %dhz is not supported\n",
|
||||
|
@@ -725,6 +725,7 @@ static int vobsub_read_header(AVFormatContext *s)
|
||||
st->id = stream_id;
|
||||
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
|
||||
st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE;
|
||||
avpriv_set_pts_info(st, 64, 1, 1000);
|
||||
av_dict_set(&st->metadata, "language", id, 0);
|
||||
av_log(s, AV_LOG_DEBUG, "IDX stream[%d] id=%s\n", stream_id, id);
|
||||
header_parsed = 1;
|
||||
@@ -889,6 +890,21 @@ static int vobsub_read_seek(AVFormatContext *s, int stream_index,
|
||||
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
|
||||
{
|
||||
MpegDemuxContext *vobsub = s->priv_data;
|
||||
|
||||
/* Rescale requested timestamps based on the first stream (timebase is the
|
||||
* same for all subtitles stream within a .idx/.sub). Rescaling is done just
|
||||
* like in avformat_seek_file(). */
|
||||
if (stream_index == -1 && s->nb_streams != 1) {
|
||||
AVRational time_base = s->streams[0]->time_base;
|
||||
ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
|
||||
min_ts = av_rescale_rnd(min_ts, time_base.den,
|
||||
time_base.num * (int64_t)AV_TIME_BASE,
|
||||
AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
|
||||
max_ts = av_rescale_rnd(max_ts, time_base.den,
|
||||
time_base.num * (int64_t)AV_TIME_BASE,
|
||||
AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
|
||||
}
|
||||
|
||||
return ff_subtitles_queue_seek(&vobsub->q, s, stream_index,
|
||||
min_ts, ts, max_ts, flags);
|
||||
}
|
||||
|
@@ -43,7 +43,7 @@ static int mpl2_probe(AVProbeData *p)
|
||||
if (sscanf(ptr, "[%"SCNd64"][%"SCNd64"]%c", &start, &end, &c) != 3 &&
|
||||
sscanf(ptr, "[%"SCNd64"][]%c", &start, &c) != 2)
|
||||
return 0;
|
||||
ptr += strcspn(ptr, "\r\n") + 1;
|
||||
ptr += ff_subtitles_next_line(ptr);
|
||||
if (ptr >= ptr_end)
|
||||
return 0;
|
||||
}
|
||||
|
@@ -37,11 +37,16 @@ static int mpsub_probe(AVProbeData *p)
|
||||
const char *ptr_end = p->buf + p->buf_size;
|
||||
|
||||
while (ptr < ptr_end) {
|
||||
int inc;
|
||||
|
||||
if (!memcmp(ptr, "FORMAT=TIME", 11))
|
||||
return AVPROBE_SCORE_EXTENSION;
|
||||
if (!memcmp(ptr, "FORMAT=", 7))
|
||||
return AVPROBE_SCORE_EXTENSION / 3;
|
||||
ptr += strcspn(ptr, "\n") + 1;
|
||||
inc = ff_subtitles_next_line(ptr);
|
||||
if (!inc)
|
||||
break;
|
||||
ptr += inc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -75,6 +75,7 @@ typedef struct {
|
||||
int temporal_reordering;
|
||||
AVRational aspect_ratio; ///< display aspect ratio
|
||||
int closed_gop; ///< gop is closed, used in mpeg-2 frame parsing
|
||||
int video_bit_rate;
|
||||
} MXFStreamContext;
|
||||
|
||||
typedef struct {
|
||||
@@ -975,13 +976,14 @@ static void mxf_write_cdci_desc(AVFormatContext *s, AVStream *st)
|
||||
static void mxf_write_mpegvideo_desc(AVFormatContext *s, AVStream *st)
|
||||
{
|
||||
AVIOContext *pb = s->pb;
|
||||
MXFStreamContext *sc = st->priv_data;
|
||||
int profile_and_level = (st->codec->profile<<4) | st->codec->level;
|
||||
|
||||
mxf_write_cdci_common(s, st, mxf_mpegvideo_descriptor_key, 8+5);
|
||||
|
||||
// bit rate
|
||||
mxf_write_local_tag(pb, 4, 0x8000);
|
||||
avio_wb32(pb, st->codec->bit_rate);
|
||||
avio_wb32(pb, sc->video_bit_rate);
|
||||
|
||||
// profile and level
|
||||
mxf_write_local_tag(pb, 1, 0x8007);
|
||||
@@ -1704,14 +1706,15 @@ static int mxf_write_header(AVFormatContext *s)
|
||||
ret = av_timecode_init(&mxf->tc, rate, 0, 0, s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
sc->video_bit_rate = st->codec->bit_rate ? st->codec->bit_rate : st->codec->rc_max_rate;
|
||||
if (s->oformat == &ff_mxf_d10_muxer) {
|
||||
if (st->codec->bit_rate == 50000000) {
|
||||
if (sc->video_bit_rate == 50000000) {
|
||||
if (mxf->time_base.den == 25) sc->index = 3;
|
||||
else sc->index = 5;
|
||||
} else if (st->codec->bit_rate == 40000000) {
|
||||
} else if (sc->video_bit_rate == 40000000) {
|
||||
if (mxf->time_base.den == 25) sc->index = 7;
|
||||
else sc->index = 9;
|
||||
} else if (st->codec->bit_rate == 30000000) {
|
||||
} else if (sc->video_bit_rate == 30000000) {
|
||||
if (mxf->time_base.den == 25) sc->index = 11;
|
||||
else sc->index = 13;
|
||||
} else {
|
||||
@@ -1720,7 +1723,7 @@ static int mxf_write_header(AVFormatContext *s)
|
||||
}
|
||||
|
||||
mxf->edit_unit_byte_count = KAG_SIZE; // system element
|
||||
mxf->edit_unit_byte_count += 16 + 4 + (uint64_t)st->codec->bit_rate *
|
||||
mxf->edit_unit_byte_count += 16 + 4 + (uint64_t)sc->video_bit_rate *
|
||||
mxf->time_base.num / (8*mxf->time_base.den);
|
||||
mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count);
|
||||
mxf->edit_unit_byte_count += 16 + 4 + 4 + spf->samples_per_frame[0]*8*4;
|
||||
@@ -1854,7 +1857,8 @@ static void mxf_write_d10_video_packet(AVFormatContext *s, AVStream *st, AVPacke
|
||||
{
|
||||
MXFContext *mxf = s->priv_data;
|
||||
AVIOContext *pb = s->pb;
|
||||
int packet_size = (uint64_t)st->codec->bit_rate*mxf->time_base.num /
|
||||
MXFStreamContext *sc = st->priv_data;
|
||||
int packet_size = (uint64_t)sc->video_bit_rate*mxf->time_base.num /
|
||||
(8*mxf->time_base.den); // frame size
|
||||
int pad;
|
||||
|
||||
|
@@ -136,11 +136,15 @@ ff_vorbis_comment(AVFormatContext * as, AVDictionary **m, const uint8_t *buf, in
|
||||
|
||||
if (!pict) {
|
||||
av_log(as, AV_LOG_WARNING, "out-of-memory error. Skipping cover art block.\n");
|
||||
av_freep(&tt);
|
||||
av_freep(&ct);
|
||||
continue;
|
||||
}
|
||||
if ((ret = av_base64_decode(pict, ct, vl)) > 0)
|
||||
ret = ff_flac_parse_picture(as, pict, ret);
|
||||
av_freep(&pict);
|
||||
av_freep(&tt);
|
||||
av_freep(&ct);
|
||||
if (ret < 0) {
|
||||
av_log(as, AV_LOG_WARNING, "Failed to parse cover art block.\n");
|
||||
continue;
|
||||
|
@@ -233,10 +233,11 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
p->current_frame_block++;
|
||||
}
|
||||
|
||||
size = p->video_size - p->frames_offset_table[p->current_frame];
|
||||
if (size < 1)
|
||||
if (p->frames_offset_table[p->current_frame] >= p->video_size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
size = p->video_size - p->frames_offset_table[p->current_frame];
|
||||
|
||||
if (av_new_packet(pkt, size) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
|
@@ -37,12 +37,14 @@ static int srt_probe(AVProbeData *p)
|
||||
if (AV_RB24(ptr) == 0xEFBBBF)
|
||||
ptr += 3; /* skip UTF-8 BOM */
|
||||
|
||||
while (*ptr == '\r' || *ptr == '\n')
|
||||
ptr++;
|
||||
for (i=0; i<2; i++) {
|
||||
if ((num == i || num + 1 == i)
|
||||
&& sscanf(ptr, "%*d:%*2d:%*2d%*1[,.]%*3d --> %*d:%*2d:%*2d%*1[,.]%3d", &v) == 1)
|
||||
return AVPROBE_SCORE_MAX;
|
||||
num = atoi(ptr);
|
||||
ptr += strcspn(ptr, "\n") + 1;
|
||||
ptr += ff_subtitles_next_line(ptr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -63,12 +65,10 @@ static int64_t get_pts(const char **buf, int *duration,
|
||||
int64_t start = (hh1*3600LL + mm1*60LL + ss1) * 1000LL + ms1;
|
||||
int64_t end = (hh2*3600LL + mm2*60LL + ss2) * 1000LL + ms2;
|
||||
*duration = end - start;
|
||||
*buf += strcspn(*buf, "\n");
|
||||
*buf += !!**buf;
|
||||
*buf += ff_subtitles_next_line(*buf);
|
||||
return start;
|
||||
}
|
||||
*buf += strcspn(*buf, "\n");
|
||||
*buf += !!**buf;
|
||||
*buf += ff_subtitles_next_line(*buf);
|
||||
}
|
||||
return AV_NOPTS_VALUE;
|
||||
}
|
||||
|
@@ -109,7 +109,8 @@ int ff_subtitles_queue_seek(FFDemuxSubtitlesQueue *q, AVFormatContext *s, int st
|
||||
for (i = 0; i < q->nb_subs; i++) {
|
||||
int64_t pts = q->subs[i].pts;
|
||||
uint64_t ts_diff = FFABS(pts - ts);
|
||||
if (pts >= min_ts && pts <= max_ts && ts_diff < min_ts_diff) {
|
||||
if ((stream_index == -1 || q->subs[i].stream_index == stream_index) &&
|
||||
pts >= min_ts && pts <= max_ts && ts_diff < min_ts_diff) {
|
||||
min_ts_diff = ts_diff;
|
||||
idx = i;
|
||||
}
|
||||
@@ -119,13 +120,25 @@ int ff_subtitles_queue_seek(FFDemuxSubtitlesQueue *q, AVFormatContext *s, int st
|
||||
/* look back in the latest subtitles for overlapping subtitles */
|
||||
ts_selected = q->subs[idx].pts;
|
||||
for (i = idx - 1; i >= 0; i--) {
|
||||
if (q->subs[i].duration <= 0)
|
||||
int64_t pts = q->subs[i].pts;
|
||||
if (q->subs[i].duration <= 0 ||
|
||||
(stream_index != -1 && q->subs[i].stream_index != stream_index))
|
||||
continue;
|
||||
if (q->subs[i].pts > ts_selected - q->subs[i].duration)
|
||||
if (pts >= min_ts && pts > ts_selected - q->subs[i].duration)
|
||||
idx = i;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
/* If the queue is used to store multiple subtitles streams (like with
|
||||
* VobSub) and the stream index is not specified, we need to make sure
|
||||
* to focus on the smallest file position offset for a same timestamp;
|
||||
* queue is ordered by pts and then filepos, so we can take the first
|
||||
* entry for a given timestamp. */
|
||||
if (stream_index == -1)
|
||||
while (idx > 0 && q->subs[idx - 1].pts == q->subs[idx].pts)
|
||||
idx--;
|
||||
|
||||
q->current_sub_idx = idx;
|
||||
}
|
||||
return 0;
|
||||
|
@@ -96,4 +96,17 @@ const char *ff_smil_get_attr_ptr(const char *s, const char *attr);
|
||||
*/
|
||||
void ff_subtitles_read_chunk(AVIOContext *pb, AVBPrint *buf);
|
||||
|
||||
/**
|
||||
* Get the number of characters to increment to jump to the next line, or to
|
||||
* the end of the string.
|
||||
*/
|
||||
static av_always_inline int ff_subtitles_next_line(const char *ptr)
|
||||
{
|
||||
int n = strcspn(ptr, "\n");
|
||||
ptr += n;
|
||||
if (*ptr == '\n')
|
||||
n++;
|
||||
return n;
|
||||
}
|
||||
|
||||
#endif /* AVFORMAT_SUBTITLES_H */
|
||||
|
@@ -2783,6 +2783,10 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
|
||||
} else {
|
||||
pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
|
||||
&ic->packet_buffer_end);
|
||||
if (!pkt) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto find_stream_info_err;
|
||||
}
|
||||
if ((ret = av_dup_packet(pkt)) < 0)
|
||||
goto find_stream_info_err;
|
||||
}
|
||||
@@ -2855,6 +2859,8 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
|
||||
|
||||
if (!st->info->duration_error)
|
||||
st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
|
||||
if (!st->info->duration_error)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
// av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
|
||||
|
@@ -671,7 +671,7 @@ static int w64_read_header(AVFormatContext *s)
|
||||
uint32_t count, chunk_size, i;
|
||||
|
||||
start = avio_tell(pb);
|
||||
end = start + size;
|
||||
end = start + FFALIGN(size, INT64_C(8)) - 24;
|
||||
count = avio_rl32(pb);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
@@ -697,7 +697,7 @@ static int w64_read_header(AVFormatContext *s)
|
||||
avio_skip(pb, end - avio_tell(pb));
|
||||
} else {
|
||||
av_log(s, AV_LOG_DEBUG, "unknown guid: "FF_PRI_GUID"\n", FF_ARG_GUID(guid));
|
||||
avio_skip(pb, size - 24);
|
||||
avio_skip(pb, FFALIGN(size, INT64_C(8)) - 24);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -191,7 +191,7 @@ int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
|
||||
{
|
||||
void **ptrptr = ptr;
|
||||
*ptrptr = av_realloc_f(*ptrptr, nmemb, size);
|
||||
if (!*ptrptr && !(nmemb && size))
|
||||
if (!*ptrptr && nmemb && size)
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -82,9 +82,6 @@ static int even(int64_t layout){
|
||||
}
|
||||
|
||||
static int clean_layout(SwrContext *s, int64_t layout){
|
||||
if((layout & AV_CH_LAYOUT_STEREO_DOWNMIX) == AV_CH_LAYOUT_STEREO_DOWNMIX)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
if(layout && layout != AV_CH_FRONT_CENTER && !(layout&(layout-1))) {
|
||||
char buf[128];
|
||||
av_get_channel_layout_string(buf, sizeof(buf), -1, layout);
|
||||
@@ -122,13 +119,19 @@ av_cold static int auto_matrix(SwrContext *s)
|
||||
const int matrix_encoding = s->matrix_encoding;
|
||||
|
||||
in_ch_layout = clean_layout(s, s->in_ch_layout);
|
||||
out_ch_layout = clean_layout(s, s->out_ch_layout);
|
||||
|
||||
if( out_ch_layout == AV_CH_LAYOUT_STEREO_DOWNMIX
|
||||
&& (in_ch_layout & AV_CH_LAYOUT_STEREO_DOWNMIX) == 0
|
||||
)
|
||||
out_ch_layout = AV_CH_LAYOUT_STEREO;
|
||||
|
||||
if(!sane_layout(in_ch_layout)){
|
||||
av_get_channel_layout_string(buf, sizeof(buf), -1, s->in_ch_layout);
|
||||
av_log(s, AV_LOG_ERROR, "Input channel layout '%s' is not supported\n", buf);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
out_ch_layout = clean_layout(s, s->out_ch_layout);
|
||||
if(!sane_layout(out_ch_layout)){
|
||||
av_get_channel_layout_string(buf, sizeof(buf), -1, s->out_ch_layout);
|
||||
av_log(s, AV_LOG_ERROR, "Output channel layout '%s' is not supported\n", buf);
|
||||
|
@@ -754,12 +754,13 @@ static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_sr
|
||||
const uint16_t **src = (const uint16_t **)_src;
|
||||
uint16_t *dst = (uint16_t *)_dst;
|
||||
int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
|
||||
int shift = bpc < 16 ? bpc : 14;
|
||||
for (i = 0; i < width; i++) {
|
||||
int g = rdpx(src[0] + i);
|
||||
int b = rdpx(src[1] + i);
|
||||
int r = rdpx(src[2] + i);
|
||||
|
||||
dst[i] = ((ry*r + gy*g + by*b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + bpc - 14));
|
||||
dst[i] = ((ry*r + gy*g + by*b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -773,13 +774,14 @@ static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV,
|
||||
uint16_t *dstV = (uint16_t *)_dstV;
|
||||
int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
|
||||
int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
|
||||
int shift = bpc < 16 ? bpc : 14;
|
||||
for (i = 0; i < width; i++) {
|
||||
int g = rdpx(src[0] + i);
|
||||
int b = rdpx(src[1] + i);
|
||||
int r = rdpx(src[2] + i);
|
||||
|
||||
dstU[i] = (ru*r + gu*g + bu*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + bpc - 14);
|
||||
dstV[i] = (rv*r + gv*g + bv*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + bpc - 14);
|
||||
dstU[i] = (ru*r + gu*g + bu*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
|
||||
dstV[i] = (rv*r + gv*g + bv*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14);
|
||||
}
|
||||
}
|
||||
#undef rdpx
|
||||
|
@@ -1,3 +1,3 @@
|
||||
e35f5ea283bbcb249818e0078ec72664 *./tests/data/lavf/lavf.gif
|
||||
2011766 ./tests/data/lavf/lavf.gif
|
||||
8aef8081e8afa445f63f320f4a1c5edb *./tests/data/lavf/lavf.gif
|
||||
2030198 ./tests/data/lavf/lavf.gif
|
||||
./tests/data/lavf/lavf.gif CRC=0x0dc5477c
|
||||
|
@@ -1,6 +1,6 @@
|
||||
b53f31e572394f225aff0bc82b5d1cc9 *./tests/data/lavf/lavf.mkv
|
||||
472553 ./tests/data/lavf/lavf.mkv
|
||||
1748c0b3221977509c62a158236d2492 *./tests/data/lavf/lavf.mkv
|
||||
472533 ./tests/data/lavf/lavf.mkv
|
||||
./tests/data/lavf/lavf.mkv CRC=0x4780846b
|
||||
84dcb326fe85aeeb5768beb44372f248 *./tests/data/lavf/lavf.mkv
|
||||
320297 ./tests/data/lavf/lavf.mkv
|
||||
0f78dd9299210a51b18faafc971e71f2 *./tests/data/lavf/lavf.mkv
|
||||
320265 ./tests/data/lavf/lavf.mkv
|
||||
./tests/data/lavf/lavf.mkv CRC=0x4780846b
|
||||
|
@@ -1,48 +1,48 @@
|
||||
ret: 0 st: 1 flags:1 dts: 0.000000 pts: 0.000000 pos: 555 size: 208
|
||||
ret: 0 st: 1 flags:1 dts: 0.000000 pts: 0.000000 pos: 523 size: 208
|
||||
ret: 0 st:-1 flags:0 ts:-1.000000
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 771 size: 27837
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 739 size: 27837
|
||||
ret: 0 st:-1 flags:1 ts: 1.894167
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292185 size: 27834
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292153 size: 27834
|
||||
ret: 0 st: 0 flags:0 ts: 0.788000
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292185 size: 27834
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292153 size: 27834
|
||||
ret: 0 st: 0 flags:1 ts:-0.317000
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 771 size: 27837
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 739 size: 27837
|
||||
ret:-1 st: 1 flags:0 ts: 2.577000
|
||||
ret: 0 st: 1 flags:1 ts: 1.471000
|
||||
ret: 0 st: 1 flags:1 dts: 0.993000 pts: 0.993000 pos: 320026 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.993000 pts: 0.993000 pos: 319994 size: 209
|
||||
ret: 0 st:-1 flags:0 ts: 0.365002
|
||||
ret: 0 st: 0 flags:1 dts: 0.491000 pts: 0.491000 pos: 146738 size: 27925
|
||||
ret: 0 st: 0 flags:1 dts: 0.491000 pts: 0.491000 pos: 146706 size: 27925
|
||||
ret: 0 st:-1 flags:1 ts:-0.740831
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 771 size: 27837
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 739 size: 27837
|
||||
ret:-1 st: 0 flags:0 ts: 2.153000
|
||||
ret: 0 st: 0 flags:1 ts: 1.048000
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292185 size: 27834
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292153 size: 27834
|
||||
ret: 0 st: 1 flags:0 ts:-0.058000
|
||||
ret: 0 st: 1 flags:1 dts: 0.000000 pts: 0.000000 pos: 555 size: 208
|
||||
ret: 0 st: 1 flags:1 dts: 0.000000 pts: 0.000000 pos: 523 size: 208
|
||||
ret: 0 st: 1 flags:1 ts: 2.836000
|
||||
ret: 0 st: 1 flags:1 dts: 0.993000 pts: 0.993000 pos: 320026 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.993000 pts: 0.993000 pos: 319994 size: 209
|
||||
ret:-1 st:-1 flags:0 ts: 1.730004
|
||||
ret: 0 st:-1 flags:1 ts: 0.624171
|
||||
ret: 0 st: 0 flags:1 dts: 0.491000 pts: 0.491000 pos: 146738 size: 27925
|
||||
ret: 0 st: 0 flags:1 dts: 0.491000 pts: 0.491000 pos: 146706 size: 27925
|
||||
ret: 0 st: 0 flags:0 ts:-0.482000
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 771 size: 27837
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 739 size: 27837
|
||||
ret: 0 st: 0 flags:1 ts: 2.413000
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292185 size: 27834
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292153 size: 27834
|
||||
ret:-1 st: 1 flags:0 ts: 1.307000
|
||||
ret: 0 st: 1 flags:1 ts: 0.201000
|
||||
ret: 0 st: 1 flags:1 dts: 0.000000 pts: 0.000000 pos: 555 size: 208
|
||||
ret: 0 st: 1 flags:1 dts: 0.000000 pts: 0.000000 pos: 523 size: 208
|
||||
ret: 0 st:-1 flags:0 ts:-0.904994
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 771 size: 27837
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 739 size: 27837
|
||||
ret: 0 st:-1 flags:1 ts: 1.989173
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292185 size: 27834
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292153 size: 27834
|
||||
ret: 0 st: 0 flags:0 ts: 0.883000
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292185 size: 27834
|
||||
ret: 0 st: 0 flags:1 dts: 0.971000 pts: 0.971000 pos: 292153 size: 27834
|
||||
ret: 0 st: 0 flags:1 ts:-0.222000
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 771 size: 27837
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 739 size: 27837
|
||||
ret:-1 st: 1 flags:0 ts: 2.672000
|
||||
ret: 0 st: 1 flags:1 ts: 1.566000
|
||||
ret: 0 st: 1 flags:1 dts: 0.993000 pts: 0.993000 pos: 320026 size: 209
|
||||
ret: 0 st: 1 flags:1 dts: 0.993000 pts: 0.993000 pos: 319994 size: 209
|
||||
ret: 0 st:-1 flags:0 ts: 0.460008
|
||||
ret: 0 st: 0 flags:1 dts: 0.491000 pts: 0.491000 pos: 146738 size: 27925
|
||||
ret: 0 st: 0 flags:1 dts: 0.491000 pts: 0.491000 pos: 146706 size: 27925
|
||||
ret: 0 st:-1 flags:1 ts:-0.645825
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 771 size: 27837
|
||||
ret: 0 st: 0 flags:1 dts: 0.011000 pts: 0.011000 pos: 739 size: 27837
|
||||
|
Reference in New Issue
Block a user