Compare commits
115 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
b4552cc9b8 | ||
![]() |
172f929767 | ||
![]() |
66a9edfcf6 | ||
![]() |
a9382fc15c | ||
![]() |
bd9dcb411d | ||
![]() |
ae81a0e32d | ||
![]() |
4f93400db1 | ||
![]() |
0cd61c7f7d | ||
![]() |
28ac4e91dc | ||
![]() |
6b683be641 | ||
![]() |
3a3b5ae4c0 | ||
![]() |
70fcea3b77 | ||
![]() |
b545d11d49 | ||
![]() |
4324d7bade | ||
![]() |
c88bdac460 | ||
![]() |
bdf6e6fff4 | ||
![]() |
83e4aa3e7c | ||
![]() |
fce2cfbdcf | ||
![]() |
72f1907c96 | ||
![]() |
074ebfacf4 | ||
![]() |
47f8497837 | ||
![]() |
93f26b7992 | ||
![]() |
f684bbf224 | ||
![]() |
edca16f1af | ||
![]() |
aeac212fda | ||
![]() |
62f05d6309 | ||
![]() |
bd339d4882 | ||
![]() |
9de71b0eb2 | ||
![]() |
7e73760950 | ||
![]() |
0639e403be | ||
![]() |
5c7d6be5f9 | ||
![]() |
2005887707 | ||
![]() |
cf8462ce00 | ||
![]() |
02c8c064ea | ||
![]() |
9a4acedf31 | ||
![]() |
c2d37e7364 | ||
![]() |
62baf22ec0 | ||
![]() |
8f7d839e15 | ||
![]() |
93716f7bea | ||
![]() |
ff4c53e8b3 | ||
![]() |
9a22d6dd63 | ||
![]() |
beb28bc55d | ||
![]() |
1fa7ad2e20 | ||
![]() |
f514834917 | ||
![]() |
1d0e583728 | ||
![]() |
782331be1e | ||
![]() |
94c7ee4d9e | ||
![]() |
b7154758de | ||
![]() |
cd7d575e90 | ||
![]() |
17d169ce0f | ||
![]() |
fb1fb462e5 | ||
![]() |
7da810e68b | ||
![]() |
6de6d9e2d3 | ||
![]() |
736851264b | ||
![]() |
59431fc841 | ||
![]() |
f581e25a69 | ||
![]() |
9d0bb7fc39 | ||
![]() |
a53fd4b758 | ||
![]() |
b4ccdf5e68 | ||
![]() |
9b02aa2593 | ||
![]() |
f089e67d51 | ||
![]() |
842d7c9b3a | ||
![]() |
187297b871 | ||
![]() |
211ad5042a | ||
![]() |
2b06f5f8f1 | ||
![]() |
0a64b25c77 | ||
![]() |
40e52bbb63 | ||
![]() |
aeec1a6430 | ||
![]() |
ef121a88d5 | ||
![]() |
db99c41567 | ||
![]() |
54bdb5fc86 | ||
![]() |
2fd824b466 | ||
![]() |
005b38f8f1 | ||
![]() |
6911d9e1b0 | ||
![]() |
cecb2b39ce | ||
![]() |
9ebfee7ac0 | ||
![]() |
01b39884c7 | ||
![]() |
c08e8ab715 | ||
![]() |
68ae344b5e | ||
![]() |
5753d780b4 | ||
![]() |
dbb534cea6 | ||
![]() |
414d75b8bc | ||
![]() |
c52a25e03b | ||
![]() |
c7966bf795 | ||
![]() |
0f429392cf | ||
![]() |
2dc6c5d462 | ||
![]() |
dc0403530e | ||
![]() |
59147be24f | ||
![]() |
09bc4be3db | ||
![]() |
aaef59d535 | ||
![]() |
283e070877 | ||
![]() |
4e0c29451b | ||
![]() |
f5ae34250a | ||
![]() |
fc4c29bc6e | ||
![]() |
6158eec53f | ||
![]() |
6c0fef5762 | ||
![]() |
f5a4bd23e9 | ||
![]() |
8e1760f37f | ||
![]() |
1da5ab751f | ||
![]() |
237ef710a1 | ||
![]() |
be47e93134 | ||
![]() |
1821c849da | ||
![]() |
a4522ae516 | ||
![]() |
c7ee4bc016 | ||
![]() |
0cabb95811 | ||
![]() |
a1b32533aa | ||
![]() |
e2eb0d2326 | ||
![]() |
acac6b0d69 | ||
![]() |
deb8d0d6a1 | ||
![]() |
3e817d91ef | ||
![]() |
e231f0fade | ||
![]() |
f0f55e6726 | ||
![]() |
61dc8494d7 | ||
![]() |
423b87d621 | ||
![]() |
8d9568b4a1 |
68
compat/avisynth/avisynth_c_25.h
Normal file
68
compat/avisynth/avisynth_c_25.h
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright (c) 2011 FFmpegSource Project
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
/* these are defines/functions that are used and were changed in the switch to 2.6
|
||||
* and are needed to maintain full compatility with 2.5 */
|
||||
|
||||
enum {
|
||||
AVS_CS_YV12_25 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420_25 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
};
|
||||
|
||||
AVSC_INLINE int avs_get_height_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->row_size>>1;
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV)
|
||||
{
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
}
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_yv12_25(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12_25) == AVS_CS_YV12_25)||((p->pixel_type & AVS_CS_I420_25) == AVS_CS_I420_25); }
|
6
configure
vendored
6
configure
vendored
@@ -2801,7 +2801,9 @@ probe_cc(){
|
||||
unset _depflags _DEPCMD _DEPFLAGS
|
||||
_flags_filter=echo
|
||||
|
||||
if $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||
if $_cc --version 2>&1 | grep -q '^GNU assembler'; then
|
||||
true # no-op to avoid reading stdin in following checks
|
||||
elif $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||
_type=llvm_gcc
|
||||
gcc_extra_ver=$(expr "$($_cc --version | head -n1)" : '.*\((.*)\)')
|
||||
_ident="llvm-gcc $($_cc -dumpversion) $gcc_extra_ver"
|
||||
@@ -4201,7 +4203,7 @@ enabled openal && { { for al_libs in "${OPENAL_LIBS}" "-lopenal" "-lO
|
||||
enabled opencl && { check_lib2 OpenCL/cl.h clEnqueueNDRangeKernel -Wl,-framework,OpenCL ||
|
||||
check_lib2 CL/cl.h clEnqueueNDRangeKernel -lOpenCL ||
|
||||
die "ERROR: opencl not found"; } &&
|
||||
{ enabled_any w32threads os2threads &&
|
||||
{ ! enabled_any w32threads os2threads ||
|
||||
die "opencl currently needs --enable-pthreads or --disable-w32threads"; } &&
|
||||
{ check_cpp_condition "OpenCL/cl.h" "defined(CL_VERSION_1_2)" ||
|
||||
check_cpp_condition "CL/cl.h" "defined(CL_VERSION_1_2)" ||
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.0.1
|
||||
PROJECT_NUMBER = 2.0.3
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -103,8 +103,8 @@ endif
|
||||
uninstall: uninstall-man
|
||||
|
||||
uninstall-man:
|
||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(MANPAGES1))
|
||||
$(RM) $(addprefix "$(MANDIR)/man3/",$(MANPAGES3))
|
||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(PROGS-yes:%=%.1) $(PROGS-yes:%=%-all.1) $(COMPONENTS-yes:%=%.1))
|
||||
$(RM) $(addprefix "$(MANDIR)/man3/",$(LIBRARIES-yes:%=%.3))
|
||||
|
||||
clean:: docclean
|
||||
|
||||
|
@@ -14,7 +14,3 @@ accepted. If you are experiencing issues with any formally released version of
|
||||
FFmpeg, please try git master to check if the issue still exists. If it does,
|
||||
make your report against the development code following the usual bug reporting
|
||||
guidelines.
|
||||
|
||||
AVI/AVXSynth
|
||||
--------
|
||||
If you want to use FFmpeg with AVISynth, you need AVISynth 2.6.0 at minimum.
|
||||
|
@@ -1,3 +1,4 @@
|
||||
@anchor{codec-options}
|
||||
@chapter Codec Options
|
||||
@c man begin CODEC OPTIONS
|
||||
|
||||
|
@@ -25,6 +25,95 @@ enabled encoders.
|
||||
A description of some of the currently available audio encoders
|
||||
follows.
|
||||
|
||||
@anchor{aacenc}
|
||||
@section aac
|
||||
|
||||
Advanced Audio Coding (AAC) encoder.
|
||||
|
||||
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
|
||||
low complexity (AAC-LC) profile is supported. To use this encoder, you must set
|
||||
@option{strict} option to @samp{experimental} or lower.
|
||||
|
||||
As this encoder is experimental, unexpected behavior may exist from time to
|
||||
time. For a more stable AAC encoder, see @ref{libvo-aacenc}. However, be warned
|
||||
that it has a worse quality reported by some users.
|
||||
|
||||
@c Comment this out until somebody writes the respective documentation.
|
||||
@c See also @ref{libfaac}, @ref{libaacplus}, and @ref{libfdk-aac-enc}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
Set bit rate in bits/s. Setting this automatically activates constant bit rate
|
||||
(CBR) mode.
|
||||
|
||||
@item q
|
||||
Set quality for variable bit rate (VBR) mode. This option is valid only using
|
||||
the @command{ffmpeg} command-line tool. For library interface users, use
|
||||
@option{global_quality}.
|
||||
|
||||
@item stereo_mode
|
||||
Set stereo encoding mode. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Automatically selected by the encoder.
|
||||
|
||||
@item ms_off
|
||||
Disable middle/side encoding. This is the default.
|
||||
|
||||
@item ms_force
|
||||
Force middle/side encoding.
|
||||
@end table
|
||||
|
||||
@item aac_coder
|
||||
Set AAC encoder coding method. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item 0
|
||||
FAAC-inspired method.
|
||||
|
||||
This method is a simplified reimplementation of the method used in FAAC, which
|
||||
sets thresholds proportional to the band energies, and then decreases all the
|
||||
thresholds with quantizer steps to find the appropriate quantization with
|
||||
distortion below threshold band by band.
|
||||
|
||||
The quality of this method is comparable to the two loop searching method
|
||||
descibed below, but somewhat a little better and slower.
|
||||
|
||||
@item 1
|
||||
Average noise to mask ratio (ANMR) trellis-based solution.
|
||||
|
||||
This has a theoretic best quality out of all the coding methods, but at the
|
||||
cost of the slowest speed.
|
||||
|
||||
@item 2
|
||||
Two loop searching (TLS) method.
|
||||
|
||||
This method first sets quantizers depending on band thresholds and then tries
|
||||
to find an optimal combination by adding or subtracting a specific value from
|
||||
all quantizers and adjusting some individual quantizer a little.
|
||||
|
||||
This method produces similar quality with the FAAC method and is the default.
|
||||
|
||||
@item 3
|
||||
Constant quantizer method.
|
||||
|
||||
This method sets a constant quantizer for all bands. This is the fastest of all
|
||||
the methods, yet produces the worst quality.
|
||||
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Tips and Tricks
|
||||
|
||||
According to some reports
|
||||
(e.g. @url{http://d.hatena.ne.jp/kamedo2/20120729/1343545890}), setting the
|
||||
@option{cutoff} option to 15000 Hz greatly improves the quality of the output
|
||||
quality. As a result, we encourage you to do the same.
|
||||
|
||||
@section ac3 and ac3_fixed
|
||||
|
||||
AC-3 audio encoders.
|
||||
@@ -420,26 +509,36 @@ Requires the presence of the libmp3lame headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libmp3lame}.
|
||||
|
||||
@subsection Option Mapping
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libmp3lame wrapper,
|
||||
the LAME-equivalent options follow the FFmpeg ones.
|
||||
The following options are supported by the libmp3lame wrapper. The
|
||||
@command{lame}-equivalent of the options are listed in parentheses.
|
||||
|
||||
@multitable @columnfractions .2 .2
|
||||
@item FFmpeg @tab LAME
|
||||
@item b @tab b
|
||||
Set bitrate expressed in bits/s, LAME @code{bitrate} is expressed in
|
||||
kilobits/s.
|
||||
@item q @tab V
|
||||
Set quality setting for VBR.
|
||||
@item compression_level @tab q
|
||||
Set algorithm quality. Valid arguments are integers in the 0-9 range.
|
||||
@item reservoir @tab N.A.
|
||||
Enable use of bit reservoir. LAME has this enabled by default.
|
||||
@item joint_stereo @tab -m j
|
||||
@table @option
|
||||
@item b (@emph{-b})
|
||||
Set bitrate expressed in bits/s for CBR. LAME @code{bitrate} is
|
||||
expressed in kilobits/s.
|
||||
|
||||
@item q (@emph{-V})
|
||||
Set constant quality setting for VBR. This option is valid only
|
||||
using the @command{ffmpeg} command-line tool. For library interface
|
||||
users, use @option{global_quality}.
|
||||
|
||||
@item compression_level (@emph{-q})
|
||||
Set algorithm quality. Valid arguments are integers in the 0-9 range,
|
||||
with 0 meaning highest quality but slowest, and 9 meaning fastest
|
||||
while producing the worst quality.
|
||||
|
||||
@item reservoir
|
||||
Enable use of bit reservoir when set to 1. Default value is 1. LAME
|
||||
has this enabled by default, but can be overriden by use
|
||||
@option{--nores} option.
|
||||
|
||||
@item joint_stereo (@emph{-m j})
|
||||
Enable the encoder to use (on a frame by frame basis) either L/R
|
||||
stereo or mid/side stereo.
|
||||
@end multitable
|
||||
stereo or mid/side stereo. Default value is 1.
|
||||
|
||||
@end table
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
@@ -486,24 +585,26 @@ Requires the presence of the libtwolame headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libtwolame}.
|
||||
|
||||
@subsection Options Mapping
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libtwolame wrapper. The
|
||||
TwoLAME-equivalent options follow the FFmpeg ones and are in
|
||||
@command{twolame}-equivalent options follow the FFmpeg ones and are in
|
||||
parentheses.
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
(b) Set bitrate in bits/s. Note that FFmpeg @code{b} option is
|
||||
expressed in bits/s, twolame @code{b} in kilobits/s. The default
|
||||
value is 128k.
|
||||
@item b (@emph{-b})
|
||||
Set bitrate expressed in bits/s for CBR. @command{twolame} @option{b}
|
||||
option is expressed in kilobits/s. Default value is 128k.
|
||||
|
||||
@item q
|
||||
(V) Set quality for experimental VBR support. Maximum value range is
|
||||
from -50 to 50, useful range is from -10 to 10.
|
||||
@item q (@emph{-V})
|
||||
Set quality for experimental VBR support. Maximum value range is
|
||||
from -50 to 50, useful range is from -10 to 10. The higher the
|
||||
value, the better the quality. This option is valid only using the
|
||||
@command{ffmpeg} command-line tool. For library interface users,
|
||||
use @option{global_quality}.
|
||||
|
||||
@item mode
|
||||
(mode) Set MPEG mode. Possible values:
|
||||
@item mode (@emph{--mode})
|
||||
Set the mode of the resulting audio. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
@@ -518,29 +619,30 @@ Dual channel
|
||||
Mono
|
||||
@end table
|
||||
|
||||
@item psymodel
|
||||
(psyc-mode) Set psychoacoustic model to use in encoding. The argument
|
||||
must be an integer between -1 and 4, inclusive. The higher the value,
|
||||
the better the quality. The default value is 3.
|
||||
@item psymodel (@emph{--psyc-mode})
|
||||
Set psychoacoustic model to use in encoding. The argument must be
|
||||
an integer between -1 and 4, inclusive. The higher the value, the
|
||||
better the quality. The default value is 3.
|
||||
|
||||
@item energy_levels
|
||||
(energy) Enable energy levels extensions when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
@item energy_levels (@emph{--energy})
|
||||
Enable energy levels extensions when set to 1. The default value is
|
||||
0 (disabled).
|
||||
|
||||
@item error_protection
|
||||
(protect) Enable CRC error protection when set to 1. The default value
|
||||
is 0 (disabled).
|
||||
@item error_protection (@emph{--protect})
|
||||
Enable CRC error protection when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@item copyright
|
||||
(copyright) Set MPEG audio copyright flag when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
@item copyright (@emph{--copyright})
|
||||
Set MPEG audio copyright flag when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@item original
|
||||
(original) Set MPEG audio original flag when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
@item original (@emph{--original})
|
||||
Set MPEG audio original flag when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{libvo-aacenc}
|
||||
@section libvo-aacenc
|
||||
|
||||
VisualOn AAC encoder.
|
||||
@@ -549,16 +651,19 @@ Requires the presence of the libvo-aacenc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libvo-aacenc --enable-version3}.
|
||||
|
||||
This encoder is considered to be worse than the
|
||||
@ref{aacenc,,native experimental FFmpeg AAC encoder}, according to
|
||||
multiple sources.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
|
||||
channels. It is also CBR-only. It is considered to be worse than the
|
||||
native experimental FFmpeg AAC encoder.
|
||||
channels. It is also CBR-only.
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Bitrate.
|
||||
Set bit rate in bits/s.
|
||||
|
||||
@end table
|
||||
|
||||
@@ -648,7 +753,7 @@ Set maximum frame size, or duration of a frame in milliseconds. The
|
||||
argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
|
||||
frame sizes achieve lower latency but less quality at a given bitrate.
|
||||
Sizes greater than 20ms are only interesting at fairly low bitrates.
|
||||
The default of FFmpeg is 10ms, but is 20ms in @command{opusenc}.
|
||||
The default is 20ms.
|
||||
|
||||
@item packet_loss (@emph{expect-loss})
|
||||
Set expected packet loss percentage. The default is 0.
|
||||
@@ -873,178 +978,318 @@ For more information about libvpx see:
|
||||
|
||||
x264 H.264/MPEG-4 AVC encoder wrapper.
|
||||
|
||||
Requires the presence of the libx264 headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
This encoder requires the presence of the libx264 headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libx264}.
|
||||
|
||||
x264 supports an impressive number of features, including 8x8 and 4x4 adaptive
|
||||
spatial transform, adaptive B-frame placement, CAVLC/CABAC entropy coding,
|
||||
interlacing (MBAFF), lossless mode, psy optimizations for detail retention
|
||||
(adaptive quantization, psy-RD, psy-trellis).
|
||||
libx264 supports an impressive number of features, including 8x8 and
|
||||
4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
|
||||
entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
|
||||
for detail retention (adaptive quantization, psy-RD, psy-trellis).
|
||||
|
||||
The FFmpeg wrapper provides a mapping for most of them using global options
|
||||
that match those of the encoders and provides private options for the unique
|
||||
encoder options. Additionally an expert override is provided to directly pass
|
||||
a list of key=value tuples as accepted by x264_param_parse.
|
||||
Many libx264 encoder options are mapped to FFmpeg global codec
|
||||
options, while unique encoder options are provided through private
|
||||
options. Additionally the @option{x264opts} and @option{x264-params}
|
||||
private options allows to pass a list of key=value tuples as accepted
|
||||
by the libx264 @code{x264_param_parse} function.
|
||||
|
||||
@subsection Option Mapping
|
||||
The x264 project website is at
|
||||
@url{http://www.videolan.org/developers/x264.html}.
|
||||
|
||||
The following options are supported by the x264 wrapper, the x264-equivalent
|
||||
options follow the FFmpeg ones.
|
||||
@subsection Options
|
||||
|
||||
@multitable @columnfractions .2 .2
|
||||
@item b @tab bitrate
|
||||
FFmpeg @code{b} option is expressed in bits/s, x264 @code{bitrate} in kilobits/s.
|
||||
@item bf @tab bframes
|
||||
Maximum number of B-frames.
|
||||
@item g @tab keyint
|
||||
Maximum GOP size.
|
||||
@item qmin @tab qpmin
|
||||
@item qmax @tab qpmax
|
||||
@item qdiff @tab qpstep
|
||||
@item qblur @tab qblur
|
||||
@item qcomp @tab qcomp
|
||||
@item refs @tab ref
|
||||
@item sc_threshold @tab scenecut
|
||||
@item trellis @tab trellis
|
||||
@item nr @tab nr
|
||||
Noise reduction.
|
||||
@item me_range @tab merange
|
||||
@item me_method @tab me
|
||||
@item subq @tab subme
|
||||
@item b_strategy @tab b-adapt
|
||||
@item keyint_min @tab keyint-min
|
||||
@item coder @tab cabac
|
||||
Set coder to @code{ac} to use CABAC.
|
||||
@item cmp @tab chroma-me
|
||||
Set to @code{chroma} to use chroma motion estimation.
|
||||
@item threads @tab threads
|
||||
@item thread_type @tab sliced_threads
|
||||
Set to @code{slice} to use sliced threading instead of frame threading.
|
||||
@item flags -cgop @tab open-gop
|
||||
Set @code{-cgop} to use recovery points to close GOPs.
|
||||
@item rc_init_occupancy @tab vbv-init
|
||||
Initial buffer occupancy.
|
||||
@end multitable
|
||||
The following options are supported by the libx264 wrapper. The
|
||||
@command{x264}-equivalent options or values are listed in parentheses
|
||||
for easy migration.
|
||||
|
||||
To reduce the duplication of documentation, only the private options
|
||||
and some others requiring special attention are documented here. For
|
||||
the documentation of the undocumented generic options, see
|
||||
@ref{codec-options,,the Codec Options chapter}.
|
||||
|
||||
To get a more accurate and extensive documentation of the libx264
|
||||
options, invoke the command @command{x264 --full-help} or consult
|
||||
the libx264 documentation.
|
||||
|
||||
@subsection Private Options
|
||||
@table @option
|
||||
@item -preset @var{string}
|
||||
Set the encoding preset (cf. x264 --fullhelp).
|
||||
@item -tune @var{string}
|
||||
Tune the encoding params (cf. x264 --fullhelp).
|
||||
@item -profile @var{string}
|
||||
Set profile restrictions (cf. x264 --fullhelp).
|
||||
@item -fastfirstpass @var{integer}
|
||||
Use fast settings when encoding first pass.
|
||||
@item -crf @var{float}
|
||||
Select the quality for constant quality mode.
|
||||
@item -crf_max @var{float}
|
||||
In CRF mode, prevents VBV from lowering quality beyond this point.
|
||||
@item -qp @var{integer}
|
||||
Constant quantization parameter rate control method.
|
||||
@item -aq-mode @var{integer}
|
||||
AQ method
|
||||
@item b (@emph{bitrate})
|
||||
Set bitrate in bits/s. Note that FFmpeg's @option{b} option is
|
||||
expressed in bits/s, while @command{x264}'s @option{bitrate} is in
|
||||
kilobits/s.
|
||||
|
||||
@item bf (@emph{bframes})
|
||||
|
||||
@item g (@emph{keyint})
|
||||
|
||||
@item qmax (@emph{qpmax})
|
||||
|
||||
@item qmin (@emph{qpmin})
|
||||
|
||||
@item qdiff (@emph{qpstep})
|
||||
|
||||
@item qblur (@emph{qblur})
|
||||
|
||||
@item qcomp (@emph{qcomp})
|
||||
|
||||
@item refs (@emph{ref})
|
||||
|
||||
@item sc_threshold (@emph{scenecut})
|
||||
|
||||
@item trellis (@emph{trellis})
|
||||
|
||||
@item nr (@emph{nr})
|
||||
|
||||
@item me_range (@emph{merange})
|
||||
|
||||
@item me_method (@emph{me})
|
||||
Set motion estimation method. Possible values in the decreasing order
|
||||
of speed:
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none
|
||||
@item dia (@emph{dia})
|
||||
@item epzs (@emph{dia})
|
||||
Diamond search with radius 1 (fastest). @samp{epzs} is an alias for
|
||||
@samp{dia}.
|
||||
@item hex (@emph{hex})
|
||||
Hexagonal search with radius 2.
|
||||
@item umh (@emph{umh})
|
||||
Uneven multi-hexagon search.
|
||||
@item esa (@emph{esa})
|
||||
Exhaustive search.
|
||||
@item tesa (@emph{tesa})
|
||||
Hadamard exhaustive search (slowest).
|
||||
@end table
|
||||
|
||||
@item variance
|
||||
@item subq (@emph{subme})
|
||||
|
||||
@item b_strategy (@emph{b-adapt})
|
||||
|
||||
@item keyint_min (@emph{min-keyint})
|
||||
|
||||
@item coder
|
||||
Set entropy encoder. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item ac
|
||||
Enable CABAC.
|
||||
|
||||
@item vlc
|
||||
Enable CAVLC and disable CABAC. It generates the same effect as
|
||||
@command{x264}'s @option{--no-cabac} option.
|
||||
@end table
|
||||
|
||||
@item cmp
|
||||
Set full pixel motion estimation comparation algorithm. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item chroma
|
||||
Enable chroma in motion estimation.
|
||||
|
||||
@item sad
|
||||
Ignore chroma in motion estimation. It generates the same effect as
|
||||
@command{x264}'s @option{--no-chroma-me} option.
|
||||
@end table
|
||||
|
||||
@item threads (@emph{threads})
|
||||
|
||||
@item thread_type
|
||||
Set multithreading technique. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item slice
|
||||
Slice-based multithreading. It generates the same effect as
|
||||
@command{x264}'s @option{--sliced-threads} option.
|
||||
@item frame
|
||||
Frame-based multithreading.
|
||||
@end table
|
||||
|
||||
@item flags
|
||||
Set encoding flags. It can be used to disable closed GOP and enable
|
||||
open GOP by setting it to @code{-cgop}. The result is similar to
|
||||
the behavior of @command{x264}'s @option{--open-gop} option.
|
||||
|
||||
@item rc_init_occupancy (@emph{vbv-init})
|
||||
|
||||
@item preset (@emph{preset})
|
||||
Set the encoding preset.
|
||||
|
||||
@item tune (@emph{tune})
|
||||
Set tuning of the encoding params.
|
||||
|
||||
@item profile (@emph{profile})
|
||||
Set profile restrictions.
|
||||
|
||||
@item fastfirstpass
|
||||
Enable fast settings when encoding first pass, when set to 1. When set
|
||||
to 0, it has the same effect of @command{x264}'s
|
||||
@option{--slow-firstpass} option.
|
||||
|
||||
@item crf (@emph{crf})
|
||||
Set the quality for constant quality mode.
|
||||
|
||||
@item crf_max (@emph{crf-max})
|
||||
In CRF mode, prevents VBV from lowering quality beyond this point.
|
||||
|
||||
@item qp (@emph{qp})
|
||||
Set constant quantization rate control method parameter.
|
||||
|
||||
@item aq-mode (@emph{aq-mode})
|
||||
Set AQ method. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none (@emph{0})
|
||||
Disabled.
|
||||
|
||||
@item variance (@emph{1})
|
||||
Variance AQ (complexity mask).
|
||||
@item autovariance
|
||||
|
||||
@item autovariance (@emph{2})
|
||||
Auto-variance AQ (experimental).
|
||||
@end table
|
||||
@item -aq-strength @var{float}
|
||||
AQ strength, reduces blocking and blurring in flat and textured areas.
|
||||
@item -psy @var{integer}
|
||||
Use psychovisual optimizations.
|
||||
@item -psy-rd @var{string}
|
||||
Strength of psychovisual optimization, in <psy-rd>:<psy-trellis> format.
|
||||
@item -rc-lookahead @var{integer}
|
||||
Number of frames to look ahead for frametype and ratecontrol.
|
||||
@item -weightb @var{integer}
|
||||
Weighted prediction for B-frames.
|
||||
@item -weightp @var{integer}
|
||||
Weighted prediction analysis method.
|
||||
|
||||
Possible values:
|
||||
@item aq-strength (@emph{aq-strength})
|
||||
Set AQ strength, reduce blocking and blurring in flat and textured areas.
|
||||
|
||||
@item psy
|
||||
Use psychovisual optimizations when set to 1. When set to 0, it has the
|
||||
same effect as @command{x264}'s @option{--no-psy} option.
|
||||
|
||||
@item psy-rd (@emph{psy-rd})
|
||||
Set strength of psychovisual optimization, in
|
||||
@var{psy-rd}:@var{psy-trellis} format.
|
||||
|
||||
@item rc-lookahead (@emph{rc-lookahead})
|
||||
Set number of frames to look ahead for frametype and ratecontrol.
|
||||
|
||||
@item weightb
|
||||
Enable weighted prediction for B-frames when set to 1. When set to 0,
|
||||
it has the same effect as @command{x264}'s @option{--no-weightb} option.
|
||||
|
||||
@item weightp (@emph{weightp})
|
||||
Set weighted prediction method for P-frames. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none
|
||||
|
||||
@item simple
|
||||
|
||||
@item smart
|
||||
|
||||
@item none (@emph{0})
|
||||
Disabled
|
||||
@item simple (@emph{1})
|
||||
Enable only weighted refs
|
||||
@item smart (@emph{2})
|
||||
Enable both weighted refs and duplicates
|
||||
@end table
|
||||
@item -ssim @var{integer}
|
||||
Calculate and print SSIM stats.
|
||||
@item -intra-refresh @var{integer}
|
||||
Use Periodic Intra Refresh instead of IDR frames.
|
||||
@item -b-bias @var{integer}
|
||||
Influences how often B-frames are used.
|
||||
@item -b-pyramid @var{integer}
|
||||
Keep some B-frames as references.
|
||||
|
||||
Possible values:
|
||||
@item ssim (@emph{ssim})
|
||||
Enable calculation and printing SSIM stats after the encoding.
|
||||
|
||||
@item intra-refresh (@emph{intra-refresh})
|
||||
Enable the use of Periodic Intra Refresh instead of IDR frames when set
|
||||
to 1.
|
||||
|
||||
@item b-bias (@emph{b-bias})
|
||||
Set the influence on how often B-frames are used.
|
||||
|
||||
@item b-pyramid (@emph{b-pyramid})
|
||||
Set method for keeping of some B-frames as references. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none
|
||||
|
||||
@item strict
|
||||
@item none (@emph{none})
|
||||
Disabled.
|
||||
@item strict (@emph{strict})
|
||||
Strictly hierarchical pyramid.
|
||||
@item normal
|
||||
@item normal (@emph{normal})
|
||||
Non-strict (not Blu-ray compatible).
|
||||
@end table
|
||||
@item -mixed-refs @var{integer}
|
||||
One reference per partition, as opposed to one reference per macroblock.
|
||||
@item -8x8dct @var{integer}
|
||||
High profile 8x8 transform.
|
||||
@item -fast-pskip @var{integer}
|
||||
@item -aud @var{integer}
|
||||
Use access unit delimiters.
|
||||
@item -mbtree @var{integer}
|
||||
Use macroblock tree ratecontrol.
|
||||
@item -deblock @var{string}
|
||||
Loop filter parameters, in <alpha:beta> form.
|
||||
@item -cplxblur @var{float}
|
||||
Reduce fluctuations in QP (before curve compression).
|
||||
@item -partitions @var{string}
|
||||
A comma-separated list of partitions to consider, possible values: p8x8, p4x4, b8x8, i8x8, i4x4, none, all.
|
||||
@item -direct-pred @var{integer}
|
||||
Direct MV prediction mode
|
||||
|
||||
Possible values:
|
||||
@item mixed-refs
|
||||
Enable the use of one reference per partition, as opposed to one
|
||||
reference per macroblock when set to 1. When set to 0, it has the
|
||||
same effect as @command{x264}'s @option{--no-mixed-refs} option.
|
||||
|
||||
@item 8x8dct
|
||||
Enable adaptive spatial transform (high profile 8x8 transform)
|
||||
when set to 1. When set to 0, it has the same effect as
|
||||
@command{x264}'s @option{--no-8x8dct} option.
|
||||
|
||||
@item fast-pskip
|
||||
Enable early SKIP detection on P-frames when set to 1. When set
|
||||
to 0, it has the same effect as @command{x264}'s
|
||||
@option{--no-fast-pskip} option.
|
||||
|
||||
@item aud (@emph{aud})
|
||||
Enable use of access unit delimiters when set to 1.
|
||||
|
||||
@item mbtree
|
||||
Enable use macroblock tree ratecontrol when set to 1. When set
|
||||
to 0, it has the same effect as @command{x264}'s
|
||||
@option{--no-mbtree} option.
|
||||
|
||||
@item deblock (@emph{deblock})
|
||||
Set loop filter parameters, in @var{alpha}:@var{beta} form.
|
||||
|
||||
@item cplxblur (@emph{cplxblur})
|
||||
Set fluctuations reduction in QP (before curve compression).
|
||||
|
||||
@item partitions (@emph{partitions})
|
||||
Set partitions to consider as a comma-separated list of. Possible
|
||||
values in the list:
|
||||
|
||||
@table @samp
|
||||
@item none
|
||||
|
||||
@item spatial
|
||||
|
||||
@item temporal
|
||||
|
||||
@item auto
|
||||
|
||||
@end table
|
||||
@item -slice-max-size @var{integer}
|
||||
Limit the size of each slice in bytes.
|
||||
@item -stats @var{string}
|
||||
Filename for 2 pass stats.
|
||||
@item -nal-hrd @var{integer}
|
||||
Signal HRD information (requires vbv-bufsize; cbr not allowed in .mp4).
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none
|
||||
|
||||
@item vbr
|
||||
|
||||
@item cbr
|
||||
|
||||
@item p8x8
|
||||
8x8 P-frame partition.
|
||||
@item p4x4
|
||||
4x4 P-frame partition.
|
||||
@item b8x8
|
||||
4x4 B-frame partition.
|
||||
@item i8x8
|
||||
8x8 I-frame partition.
|
||||
@item i4x4
|
||||
4x4 I-frame partition.
|
||||
(Enabling @samp{p4x4} requires @samp{p8x8} to be enabled. Enabling
|
||||
@samp{i8x8} requires adaptive spatial transform (@option{8x8dct}
|
||||
option) to be enabled.)
|
||||
@item none (@emph{none})
|
||||
Do not consider any partitions.
|
||||
@item all (@emph{all})
|
||||
Consider every partition.
|
||||
@end table
|
||||
|
||||
@item x264opts @var{options}
|
||||
Allow to set any x264 option, see @code{x264 --fullhelp} for a list.
|
||||
@item direct-pred (@emph{direct})
|
||||
Set direct MV prediction mode. Possible values:
|
||||
|
||||
@var{options} is a list of @var{key}=@var{value} couples separated by
|
||||
@table @samp
|
||||
@item none (@emph{none})
|
||||
Disable MV prediction.
|
||||
@item spatial (@emph{spatial})
|
||||
Enable spatial predicting.
|
||||
@item temporal (@emph{temporal})
|
||||
Enable temporal predicting.
|
||||
@item auto (@emph{auto})
|
||||
Automatically decided.
|
||||
@end table
|
||||
|
||||
@item slice-max-size (@emph{slice-max-size})
|
||||
Set the limit of the size of each slice in bytes. If not specified
|
||||
but RTP payload size (@option{ps}) is specified, that is used.
|
||||
|
||||
@item stats (@emph{stats})
|
||||
Set the file name for multi-pass stats.
|
||||
|
||||
@item nal-hrd (@emph{nal-hrd})
|
||||
Set signal HRD information (requires @option{vbv-bufsize} to be set).
|
||||
Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none (@emph{none})
|
||||
Disable HRD information signaling.
|
||||
@item vbr (@emph{vbr})
|
||||
Variable bit rate.
|
||||
@item cbr (@emph{cbr})
|
||||
Constant bit rate (not allowed in MP4 container).
|
||||
@end table
|
||||
|
||||
@item x264opts (N.A.)
|
||||
Set any x264 option, see @command{x264 --fullhelp} for a list.
|
||||
|
||||
Argument is a list of @var{key}=@var{value} couples separated by
|
||||
":". In @var{filter} and @var{psy-rd} options that use ":" as a separator
|
||||
themselves, use "," instead. They accept it as well since long ago but this
|
||||
is kept undocumented for some reason.
|
||||
@@ -1054,18 +1299,136 @@ For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
||||
@end example
|
||||
|
||||
For more information about libx264 and the supported options see:
|
||||
@url{http://www.videolan.org/developers/x264.html}
|
||||
@item x264-params (N.A.)
|
||||
Override the x264 configuration using a :-separated list of key=value
|
||||
parameters.
|
||||
|
||||
@item -x264-params @var{string}
|
||||
Override the x264 configuration using a :-separated list of key=value parameters.
|
||||
This option is functionally the same as the @option{x264opts}, but is
|
||||
duplicated for compability with the Libav fork.
|
||||
|
||||
For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||
@example
|
||||
-x264-params level=30:bframes=0:weightp=0:cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:no-fast-pskip=1:subq=6:8x8dct=0:trellis=0
|
||||
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
|
||||
cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
|
||||
no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
|
||||
@end example
|
||||
@end table
|
||||
|
||||
Encoding avpresets for common usages are provided so they can be used with the
|
||||
general presets system (e.g. passing the @code{-pre} option).
|
||||
Encoding ffpresets for common usages are provided so they can be used with the
|
||||
general presets system (e.g. passing the @option{pre} option).
|
||||
|
||||
@section libxvid
|
||||
|
||||
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||
|
||||
This encoder requires the presence of the libxvidcore headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libxvid --enable-gpl}.
|
||||
|
||||
The native @code{mpeg4} encoder supports the MPEG-4 Part 2 format, so
|
||||
users can encode to this format without this library.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libxvid wrapper. Some of
|
||||
the following options are listed but are not documented, and
|
||||
correspond to shared codec options. See @ref{codec-options,,the Codec
|
||||
Options chapter} for their documentation. The other shared options
|
||||
which are not listed have no effect for the libxvid encoder.
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
|
||||
@item g
|
||||
|
||||
@item qmin
|
||||
|
||||
@item qmax
|
||||
|
||||
@item mpeg_quant
|
||||
|
||||
@item threads
|
||||
|
||||
@item bf
|
||||
|
||||
@item b_qfactor
|
||||
|
||||
@item b_qoffset
|
||||
|
||||
@item flags
|
||||
Set specific encoding flags. Possible values:
|
||||
|
||||
@table @samp
|
||||
|
||||
@item mv4
|
||||
Use four motion vector by macroblock.
|
||||
|
||||
@item aic
|
||||
Enable high quality AC prediction.
|
||||
|
||||
@item gray
|
||||
Only encode grayscale.
|
||||
|
||||
@item gmc
|
||||
Enable the use of global motion compensation (GMC).
|
||||
|
||||
@item qpel
|
||||
Enable quarter-pixel motion compensation.
|
||||
|
||||
@item cgop
|
||||
Enable closed GOP.
|
||||
|
||||
@item global_header
|
||||
Place global headers in extradata instead of every keyframe.
|
||||
|
||||
@end table
|
||||
|
||||
@item trellis
|
||||
|
||||
@item me_method
|
||||
Set motion estimation method. Possible values in decreasing order of
|
||||
speed and increasing order of quality:
|
||||
|
||||
@table @samp
|
||||
@item zero
|
||||
Use no motion estimation (default).
|
||||
|
||||
@item phods
|
||||
@item x1
|
||||
@item log
|
||||
Enable advanced diamond zonal search for 16x16 blocks and half-pixel
|
||||
refinement for 16x16 blocks. @samp{x1} and @samp{log} are aliases for
|
||||
@samp{phods}.
|
||||
|
||||
@item epzs
|
||||
Enable all of the things described above, plus advanced diamond zonal
|
||||
search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
|
||||
estimation on chroma planes.
|
||||
|
||||
@item full
|
||||
Enable all of the things described above, plus extended 16x16 and 8x8
|
||||
blocks search.
|
||||
@end table
|
||||
|
||||
@item mbd
|
||||
Set macroblock decision algorithm. Possible values in the increasing
|
||||
order of quality:
|
||||
|
||||
@table @samp
|
||||
@item simple
|
||||
Use macroblock comparing function algorithm (default).
|
||||
|
||||
@item bits
|
||||
Enable rate distortion-based half pixel and quarter pixel refinement for
|
||||
16x16 blocks.
|
||||
|
||||
@item rd
|
||||
Enable all of the things described above, plus rate distortion-based
|
||||
half pixel and quarter pixel refinement for 8x8 blocks, and rate
|
||||
distortion-based search using square pattern.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@section png
|
||||
|
||||
|
@@ -57,6 +57,9 @@ Enable RTP MP4A-LATM payload.
|
||||
Reduce the latency introduced by optional buffering
|
||||
@end table
|
||||
|
||||
@item seek2any @var{integer} (@emph{input})
|
||||
Forces seeking to enable seek to any mode if set to 1. Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will allow to detect more accurate information, but will
|
||||
@@ -133,6 +136,12 @@ been without shifting.
|
||||
Also note that this affects only leading negative timestamps, and not
|
||||
non-monotonic negative timestamps.
|
||||
|
||||
@item skip_initial_bytes @var{integer} (@emph{input})
|
||||
Set number initial bytes to skip. Default is 0.
|
||||
|
||||
@item correct_ts_overflow @var{integer} (@emph{input})
|
||||
Correct single timestamp overflows if set to 1. Default is 1.
|
||||
|
||||
@item flush_packets @var{integer} (@emph{output})
|
||||
Flush the underlying I/O stream after each packet. Default 1 enables it, and
|
||||
has the effect of reducing the latency; 0 disables it and may slightly
|
||||
|
@@ -92,6 +92,11 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec)
|
||||
|
||||
static char *choose_pix_fmts(OutputStream *ost)
|
||||
{
|
||||
AVDictionaryEntry *strict_dict = av_dict_get(ost->opts, "strict", NULL, 0);
|
||||
if (strict_dict)
|
||||
// used by choose_pixel_fmt() and below
|
||||
av_opt_set(ost->st->codec, "strict", strict_dict->value, 0);
|
||||
|
||||
if (ost->keep_pix_fmt) {
|
||||
if (ost->filter)
|
||||
avfilter_graph_set_auto_convert(ost->filter->graph->graph,
|
||||
|
@@ -328,6 +328,14 @@ static AVLFG random_state;
|
||||
|
||||
static FILE *logfile = NULL;
|
||||
|
||||
static void htmlstrip(char *s) {
|
||||
while (s && *s) {
|
||||
s += strspn(s, "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,. ");
|
||||
if (*s)
|
||||
*s++ = '?';
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t ffm_read_write_index(int fd)
|
||||
{
|
||||
uint8_t buf[8];
|
||||
@@ -1887,6 +1895,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
send_error:
|
||||
c->http_error = 404;
|
||||
q = c->buffer;
|
||||
htmlstrip(msg);
|
||||
snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 404 Not Found\r\n"
|
||||
"Content-type: text/html\r\n"
|
||||
|
@@ -41,10 +41,10 @@ function ff_scalarproduct_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
@@ -81,10 +81,10 @@ function ff_scalarproduct_and_madd_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
|
@@ -80,9 +80,16 @@ static int ass_encode_frame(AVCodecContext *avctx,
|
||||
* will be "Marked=N" instead of the layer num, so we will
|
||||
* have layer=0, which is fine. */
|
||||
layer = strtol(ass, &p, 10);
|
||||
if (*p) p += strcspn(p, ",") + 1; // skip layer or marked
|
||||
if (*p) p += strcspn(p, ",") + 1; // skip start timestamp
|
||||
if (*p) p += strcspn(p, ",") + 1; // skip end timestamp
|
||||
|
||||
#define SKIP_ENTRY(ptr) do { \
|
||||
char *sep = strchr(ptr, ','); \
|
||||
if (sep) \
|
||||
ptr = sep + 1; \
|
||||
} while (0)
|
||||
|
||||
SKIP_ENTRY(p); // skip layer or marked
|
||||
SKIP_ENTRY(p); // skip start timestamp
|
||||
SKIP_ENTRY(p); // skip end timestamp
|
||||
snprintf(ass_line, sizeof(ass_line), "%d,%ld,%s", ++s->id, layer, p);
|
||||
ass_line[strcspn(ass_line, "\r\n")] = 0;
|
||||
ass = ass_line;
|
||||
|
@@ -356,7 +356,7 @@ int av_packet_merge_side_data(AVPacket *pkt){
|
||||
int av_packet_split_side_data(AVPacket *pkt){
|
||||
if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
|
||||
int i;
|
||||
unsigned int size, orig_pktsize = pkt->size;
|
||||
unsigned int size;
|
||||
uint8_t *p;
|
||||
|
||||
p = pkt->data + pkt->size - 8 - 5;
|
||||
@@ -377,7 +377,7 @@ int av_packet_split_side_data(AVPacket *pkt){
|
||||
for (i=0; ; i++){
|
||||
size= AV_RB32(p);
|
||||
av_assert0(size<=INT_MAX && p - pkt->data >= size);
|
||||
pkt->side_data[i].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].data = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].size = size;
|
||||
pkt->side_data[i].type = p[4]&127;
|
||||
if (!pkt->side_data[i].data)
|
||||
@@ -389,13 +389,6 @@ int av_packet_split_side_data(AVPacket *pkt){
|
||||
p-= size+5;
|
||||
}
|
||||
pkt->size -= 8;
|
||||
/* FFMIN() prevents overflow in case the packet wasn't allocated with
|
||||
* proper padding.
|
||||
* If the side data is smaller than the buffer padding size, the
|
||||
* remaining bytes should have already been filled with zeros by the
|
||||
* original packet allocation anyway. */
|
||||
memset(pkt->data + pkt->size, 0,
|
||||
FFMIN(orig_pktsize - pkt->size, FF_INPUT_BUFFER_PADDING_SIZE));
|
||||
pkt->side_data_elems = i+1;
|
||||
return 1;
|
||||
}
|
||||
|
@@ -120,6 +120,7 @@ typedef struct BinkContext {
|
||||
int version; ///< internal Bink file version
|
||||
int has_alpha;
|
||||
int swap_planes;
|
||||
unsigned frame_num;
|
||||
|
||||
Bundle bundle[BINKB_NB_SRC]; ///< bundles for decoding all data types
|
||||
Tree col_high[16]; ///< trees for decoding high nibble in "colours" data type
|
||||
@@ -1206,6 +1207,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
if (c->version >= 'i')
|
||||
skip_bits_long(&gb, 32);
|
||||
|
||||
c->frame_num++;
|
||||
|
||||
for (plane = 0; plane < 3; plane++) {
|
||||
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
|
||||
|
||||
@@ -1214,7 +1217,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
return ret;
|
||||
} else {
|
||||
if ((ret = binkb_decode_plane(c, frame, &gb, plane_idx,
|
||||
!avctx->frame_number, !!plane)) < 0)
|
||||
c->frame_num == 1, !!plane)) < 0)
|
||||
return ret;
|
||||
}
|
||||
if (get_bits_count(&gb) >= bits_count)
|
||||
@@ -1332,6 +1335,13 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flush(AVCodecContext *avctx)
|
||||
{
|
||||
BinkContext * const c = avctx->priv_data;
|
||||
|
||||
c->frame_num = 0;
|
||||
}
|
||||
|
||||
AVCodec ff_bink_decoder = {
|
||||
.name = "binkvideo",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@@ -1341,5 +1351,6 @@ AVCodec ff_bink_decoder = {
|
||||
.close = decode_end,
|
||||
.decode = decode_frame,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Bink video"),
|
||||
.flush = flush,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
};
|
||||
|
@@ -306,7 +306,7 @@ STOP_TIMER("get_cabac_bypass")
|
||||
|
||||
for(i=0; i<SIZE; i++){
|
||||
START_TIMER
|
||||
if( (r[i]&1) != get_cabac(&c, state) )
|
||||
if( (r[i]&1) != get_cabac_noinline(&c, state) )
|
||||
av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
|
||||
STOP_TIMER("get_cabac")
|
||||
}
|
||||
|
@@ -1931,7 +1931,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
|
||||
|
||||
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
||||
long i;
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src+i);
|
||||
long b = *(long*)(dst+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
@@ -1956,7 +1956,7 @@ static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
|
||||
}
|
||||
}else
|
||||
#endif
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
||||
|
@@ -762,6 +762,17 @@ void ff_er_frame_start(ERContext *s)
|
||||
s->error_occurred = 0;
|
||||
}
|
||||
|
||||
static int er_supported(ERContext *s)
|
||||
{
|
||||
if(s->avctx->hwaccel ||
|
||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
||||
!s->cur_pic ||
|
||||
s->cur_pic->field_picture
|
||||
)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a slice.
|
||||
* @param endx x component of the last macroblock, can be -1
|
||||
@@ -828,7 +839,7 @@ void ff_er_add_slice(ERContext *s, int startx, int starty,
|
||||
s->error_status_table[start_xy] |= VP_START;
|
||||
|
||||
if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||
s->avctx->skip_top * s->mb_width < start_i) {
|
||||
er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) {
|
||||
int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
|
||||
|
||||
prev_status &= ~ VP_START;
|
||||
@@ -853,9 +864,7 @@ void ff_er_frame_end(ERContext *s)
|
||||
* though it should not crash if enabled. */
|
||||
if (!s->avctx->err_recognition || s->error_count == 0 ||
|
||||
s->avctx->lowres ||
|
||||
s->avctx->hwaccel ||
|
||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
||||
!s->cur_pic || s->cur_pic->field_picture ||
|
||||
!er_supported(s) ||
|
||||
s->error_count == 3 * s->mb_width *
|
||||
(s->avctx->skip_top + s->avctx->skip_bottom)) {
|
||||
return;
|
||||
|
@@ -483,6 +483,10 @@ static int read_extra_header(FFV1Context *f)
|
||||
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
|
||||
|
||||
f->version = get_symbol(c, state, 0);
|
||||
if (f->version < 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (f->version > 2) {
|
||||
c->bytestream_end -= 4;
|
||||
f->minor_version = get_symbol(c, state, 0);
|
||||
@@ -562,6 +566,7 @@ static int read_header(FFV1Context *f)
|
||||
memset(state, 128, sizeof(state));
|
||||
|
||||
if (f->version < 2) {
|
||||
int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
|
||||
unsigned v= get_symbol(c, state, 0);
|
||||
if (v >= 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
|
||||
@@ -574,15 +579,32 @@ static int read_header(FFV1Context *f)
|
||||
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
||||
}
|
||||
|
||||
f->colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
|
||||
chroma_planes = get_rac(c, state);
|
||||
chroma_h_shift = get_symbol(c, state, 0);
|
||||
chroma_v_shift = get_symbol(c, state, 0);
|
||||
transparency = get_rac(c, state);
|
||||
|
||||
if (f->version > 0)
|
||||
f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
|
||||
if (f->plane_count) {
|
||||
if ( colorspace != f->colorspace
|
||||
|| bits_per_raw_sample != f->avctx->bits_per_raw_sample
|
||||
|| chroma_planes != f->chroma_planes
|
||||
|| chroma_h_shift!= f->chroma_h_shift
|
||||
|| chroma_v_shift!= f->chroma_v_shift
|
||||
|| transparency != f->transparency) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
f->colorspace = colorspace;
|
||||
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
||||
f->chroma_planes = chroma_planes;
|
||||
f->chroma_h_shift = chroma_h_shift;
|
||||
f->chroma_v_shift = chroma_v_shift;
|
||||
f->transparency = transparency;
|
||||
|
||||
f->chroma_planes = get_rac(c, state);
|
||||
f->chroma_h_shift = get_symbol(c, state, 0);
|
||||
f->chroma_v_shift = get_symbol(c, state, 0);
|
||||
f->transparency = get_rac(c, state);
|
||||
f->plane_count = 2 + f->transparency;
|
||||
}
|
||||
|
||||
@@ -600,47 +622,32 @@ static int read_header(FFV1Context *f)
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
|
||||
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
|
||||
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
|
||||
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample == 9) {
|
||||
} else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
|
||||
f->packed_at_lsb = 1;
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample == 10) {
|
||||
} else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
|
||||
f->packed_at_lsb = 1;
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else {
|
||||
} else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
}
|
||||
} else if (f->colorspace == 1) {
|
||||
@@ -664,6 +671,10 @@ static int read_header(FFV1Context *f)
|
||||
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
av_dlog(f->avctx, "%d %d %d\n",
|
||||
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
|
||||
@@ -899,16 +910,56 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
static int init_thread_copy(AVCodecContext *avctx)
|
||||
{
|
||||
FFV1Context *f = avctx->priv_data;
|
||||
int i, ret;
|
||||
|
||||
f->picture.f = NULL;
|
||||
f->last_picture.f = NULL;
|
||||
f->sample_buffer = NULL;
|
||||
f->quant_table_count = 0;
|
||||
f->slice_count = 0;
|
||||
|
||||
for (i = 0; i < f->quant_table_count; i++) {
|
||||
av_assert0(f->version > 1);
|
||||
f->initial_states[i] = av_memdup(f->initial_states[i],
|
||||
f->context_count[i] * sizeof(*f->initial_states[i]));
|
||||
}
|
||||
|
||||
f->picture.f = av_frame_alloc();
|
||||
f->last_picture.f = av_frame_alloc();
|
||||
|
||||
if ((ret = ffv1_init_slice_contexts(f)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
|
||||
{
|
||||
fsdst->version = fsrc->version;
|
||||
fsdst->minor_version = fsrc->minor_version;
|
||||
fsdst->chroma_planes = fsrc->chroma_planes;
|
||||
fsdst->chroma_h_shift = fsrc->chroma_h_shift;
|
||||
fsdst->chroma_v_shift = fsrc->chroma_v_shift;
|
||||
fsdst->transparency = fsrc->transparency;
|
||||
fsdst->plane_count = fsrc->plane_count;
|
||||
fsdst->ac = fsrc->ac;
|
||||
fsdst->colorspace = fsrc->colorspace;
|
||||
|
||||
fsdst->ec = fsrc->ec;
|
||||
fsdst->intra = fsrc->intra;
|
||||
fsdst->slice_damaged = fssrc->slice_damaged;
|
||||
fsdst->key_frame_ok = fsrc->key_frame_ok;
|
||||
|
||||
fsdst->bits_per_raw_sample = fsrc->bits_per_raw_sample;
|
||||
fsdst->packed_at_lsb = fsrc->packed_at_lsb;
|
||||
fsdst->slice_count = fsrc->slice_count;
|
||||
if (fsrc->version<3){
|
||||
fsdst->slice_x = fssrc->slice_x;
|
||||
fsdst->slice_y = fssrc->slice_y;
|
||||
fsdst->slice_width = fssrc->slice_width;
|
||||
fsdst->slice_height = fssrc->slice_height;
|
||||
}
|
||||
}
|
||||
|
||||
static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
||||
{
|
||||
FFV1Context *fsrc = src->priv_data;
|
||||
@@ -918,36 +969,30 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
||||
if (dst == src)
|
||||
return 0;
|
||||
|
||||
if (!fdst->picture.f) {
|
||||
{
|
||||
FFV1Context bak = *fdst;
|
||||
memcpy(fdst, fsrc, sizeof(*fdst));
|
||||
|
||||
for (i = 0; i < fdst->quant_table_count; i++) {
|
||||
fdst->initial_states[i] = av_malloc(fdst->context_count[i] * sizeof(*fdst->initial_states[i]));
|
||||
memcpy(fdst->initial_states[i], fsrc->initial_states[i], fdst->context_count[i] * sizeof(*fdst->initial_states[i]));
|
||||
memcpy(fdst->initial_states, bak.initial_states, sizeof(fdst->initial_states));
|
||||
memcpy(fdst->slice_context, bak.slice_context , sizeof(fdst->slice_context));
|
||||
fdst->picture = bak.picture;
|
||||
fdst->last_picture = bak.last_picture;
|
||||
for (i = 0; i<fdst->num_h_slices * fdst->num_v_slices; i++) {
|
||||
FFV1Context *fssrc = fsrc->slice_context[i];
|
||||
FFV1Context *fsdst = fdst->slice_context[i];
|
||||
copy_fields(fsdst, fssrc, fsrc);
|
||||
}
|
||||
|
||||
fdst->picture.f = av_frame_alloc();
|
||||
fdst->last_picture.f = av_frame_alloc();
|
||||
|
||||
if ((ret = ffv1_init_slice_contexts(fdst)) < 0)
|
||||
return ret;
|
||||
av_assert0(!fdst->plane[0].state);
|
||||
av_assert0(!fdst->sample_buffer);
|
||||
}
|
||||
|
||||
av_assert1(fdst->slice_count == fsrc->slice_count);
|
||||
|
||||
fdst->key_frame_ok = fsrc->key_frame_ok;
|
||||
|
||||
ff_thread_release_buffer(dst, &fdst->picture);
|
||||
if (fsrc->picture.f->data[0]) {
|
||||
if ((ret = ff_thread_ref_frame(&fdst->picture, &fsrc->picture)) < 0)
|
||||
return ret;
|
||||
}
|
||||
for (i = 0; i < fdst->slice_count; i++) {
|
||||
FFV1Context *fsdst = fdst->slice_context[i];
|
||||
FFV1Context *fssrc = fsrc->slice_context[i];
|
||||
|
||||
fsdst->slice_damaged = fssrc->slice_damaged;
|
||||
}
|
||||
|
||||
fdst->fsrc = fsrc;
|
||||
|
||||
|
@@ -275,7 +275,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w,
|
||||
int run_mode = 0;
|
||||
|
||||
if (s->ac) {
|
||||
if (c->bytestream_end - c->bytestream < w * 20) {
|
||||
if (c->bytestream_end - c->bytestream < w * 35) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -387,6 +387,10 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
s->diff_start = get_bits(&gb, 8);
|
||||
s->diff_height = get_bits(&gb, 8);
|
||||
if (s->diff_start + s->diff_height > cur_blk_height) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Block parameters invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
"%dx%d diff start %d height %d\n",
|
||||
i, j, s->diff_start, s->diff_height);
|
||||
|
@@ -443,8 +443,8 @@ static int g2m_init_buffers(G2MContext *c)
|
||||
int aligned_height;
|
||||
|
||||
if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
|
||||
c->framebuf_stride = FFALIGN(c->width * 3, 16);
|
||||
aligned_height = FFALIGN(c->height, 16);
|
||||
c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3;
|
||||
aligned_height = c->height + 15;
|
||||
av_free(c->framebuf);
|
||||
c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
|
||||
if (!c->framebuf)
|
||||
@@ -453,7 +453,7 @@ static int g2m_init_buffers(G2MContext *c)
|
||||
if (!c->synth_tile || !c->jpeg_tile ||
|
||||
c->old_tile_w < c->tile_width ||
|
||||
c->old_tile_h < c->tile_height) {
|
||||
c->tile_stride = FFALIGN(c->tile_width * 3, 16);
|
||||
c->tile_stride = FFALIGN(c->tile_width, 16) * 3;
|
||||
aligned_height = FFALIGN(c->tile_height, 16);
|
||||
av_free(c->synth_tile);
|
||||
av_free(c->jpeg_tile);
|
||||
|
@@ -2660,6 +2660,7 @@ static void flush_change(H264Context *h)
|
||||
h->sync= 0;
|
||||
h->list_count = 0;
|
||||
h->current_slice = 0;
|
||||
h->mmco_reset = 1;
|
||||
}
|
||||
|
||||
/* forget old pics after a seek */
|
||||
@@ -3434,7 +3435,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
} else {
|
||||
/* Shorten frame num gaps so we don't have to allocate reference
|
||||
* frames just to throw them away */
|
||||
if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
|
||||
if (h->frame_num != h->prev_frame_num) {
|
||||
int unwrap_prev_frame_num = h->prev_frame_num;
|
||||
int max_frame_num = 1 << h->sps.log2_max_frame_num;
|
||||
|
||||
@@ -3461,7 +3462,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
|
||||
|
||||
/* Mark old field/frame as completed */
|
||||
if (!last_pic_droppable && h0->cur_pic_ptr->tf.owner == h0->avctx) {
|
||||
if (h0->cur_pic_ptr->tf.owner == h0->avctx) {
|
||||
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
|
||||
last_pic_structure == PICT_BOTTOM_FIELD);
|
||||
}
|
||||
@@ -3470,7 +3471,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
|
||||
/* Previous field is unmatched. Don't display it, but let it
|
||||
* remain for reference if marked as such. */
|
||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
||||
if (last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
@@ -3480,7 +3481,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
* different frame_nums. Consider this field first in
|
||||
* pair. Throw away previous field except for reference
|
||||
* purposes. */
|
||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
||||
if (last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
@@ -3507,7 +3508,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
}
|
||||
}
|
||||
|
||||
while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field &&
|
||||
while (h->frame_num != h->prev_frame_num && !h0->first_field &&
|
||||
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
|
||||
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
||||
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
|
||||
@@ -3876,6 +3877,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|
||||
if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0];
|
||||
if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0];
|
||||
h->er.ref_count = h->ref_count[0];
|
||||
|
||||
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
|
||||
av_log(h->avctx, AV_LOG_DEBUG,
|
||||
@@ -4267,7 +4269,6 @@ static void er_add_slice(H264Context *h, int startx, int starty,
|
||||
if (CONFIG_ERROR_RESILIENCE) {
|
||||
ERContext *er = &h->er;
|
||||
|
||||
er->ref_count = h->ref_count[0];
|
||||
ff_er_add_slice(er, startx, starty, endx, endy, status);
|
||||
}
|
||||
}
|
||||
|
@@ -583,7 +583,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
|
||||
if (mmco[i].opcode != MMCO_SHORT2LONG ||
|
||||
!h->long_ref[mmco[i].long_arg] ||
|
||||
h->long_ref[mmco[i].long_arg]->frame_num != frame_num) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "mmco: unref short failure\n");
|
||||
av_log(h->avctx, h->short_ref_count ? AV_LOG_ERROR : AV_LOG_DEBUG, "mmco: unref short failure\n");
|
||||
err = AVERROR_INVALIDDATA;
|
||||
}
|
||||
continue;
|
||||
@@ -733,7 +733,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
|
||||
print_short_term(h);
|
||||
print_long_term(h);
|
||||
|
||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=1 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=2 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
h->cur_pic_ptr->sync |= 1;
|
||||
if(!h->avctx->has_b_frames)
|
||||
h->sync = 2;
|
||||
|
@@ -71,6 +71,9 @@ void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int
|
||||
}
|
||||
|
||||
static int get_color_type(const AVPixFmtDescriptor *desc) {
|
||||
if (desc->flags & AV_PIX_FMT_FLAG_PAL)
|
||||
return FF_COLOR_RGB;
|
||||
|
||||
if(desc->nb_components == 1 || desc->nb_components == 2)
|
||||
return FF_COLOR_GRAY;
|
||||
|
||||
|
@@ -272,7 +272,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
||||
reslevel->log2_prec_height) -
|
||||
(reslevel->coord[1][0] >> reslevel->log2_prec_height);
|
||||
|
||||
reslevel->band = av_malloc_array(reslevel->nbands, sizeof(*reslevel->band));
|
||||
reslevel->band = av_calloc(reslevel->nbands, sizeof(*reslevel->band));
|
||||
if (!reslevel->band)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -368,7 +368,7 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
||||
for (j = 0; j < 2; j++)
|
||||
band->coord[1][j] = ff_jpeg2000_ceildiv(band->coord[1][j], dy);
|
||||
|
||||
band->prec = av_malloc_array(reslevel->num_precincts_x *
|
||||
band->prec = av_calloc(reslevel->num_precincts_x *
|
||||
(uint64_t)reslevel->num_precincts_y,
|
||||
sizeof(*band->prec));
|
||||
if (!band->prec)
|
||||
@@ -509,10 +509,12 @@ void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty)
|
||||
for (bandno = 0; bandno < reslevel->nbands; bandno++) {
|
||||
Jpeg2000Band *band = reslevel->band + bandno;
|
||||
for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) {
|
||||
Jpeg2000Prec *prec = band->prec + precno;
|
||||
av_freep(&prec->zerobits);
|
||||
av_freep(&prec->cblkincl);
|
||||
av_freep(&prec->cblk);
|
||||
if (band->prec) {
|
||||
Jpeg2000Prec *prec = band->prec + precno;
|
||||
av_freep(&prec->zerobits);
|
||||
av_freep(&prec->cblkincl);
|
||||
av_freep(&prec->cblk);
|
||||
}
|
||||
}
|
||||
|
||||
av_freep(&band->prec);
|
||||
|
@@ -243,6 +243,11 @@ static int get_siz(Jpeg2000DecoderContext *s)
|
||||
s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
|
||||
ncomponents = bytestream2_get_be16u(&s->g); // CSiz
|
||||
|
||||
if (s->image_offset_x || s->image_offset_y) {
|
||||
avpriv_request_sample(s->avctx, "Support for image offsets");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
if (ncomponents <= 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of components: %d\n",
|
||||
s->ncomponents);
|
||||
@@ -273,6 +278,11 @@ static int get_siz(Jpeg2000DecoderContext *s)
|
||||
s->sgnd[i] = !!(x & 0x80);
|
||||
s->cdx[i] = bytestream2_get_byteu(&s->g);
|
||||
s->cdy[i] = bytestream2_get_byteu(&s->g);
|
||||
if ( !s->cdx[i] || s->cdx[i] == 3 || s->cdx[i] > 4
|
||||
|| !s->cdy[i] || s->cdy[i] == 3 || s->cdy[i] > 4) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid sample seperation\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
log2_chroma_wh |= s->cdy[i] >> 1 << i * 4 | s->cdx[i] >> 1 << i * 4 + 2;
|
||||
}
|
||||
|
||||
@@ -364,11 +374,18 @@ static int get_cox(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (c->nreslevels <= s->reduction_factor) {
|
||||
/* we are forced to update reduction_factor as its requested value is
|
||||
not compatible with this bitstream, and as we might have used it
|
||||
already in setup earlier we have to fail this frame until
|
||||
reinitialization is implemented */
|
||||
av_log(s->avctx, AV_LOG_ERROR, "reduction_factor too large for this bitstream, max is %d\n", c->nreslevels - 1);
|
||||
s->reduction_factor = c->nreslevels - 1;
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* compute number of resolution levels to decode */
|
||||
if (c->nreslevels < s->reduction_factor)
|
||||
c->nreslevels2decode = 1;
|
||||
else
|
||||
c->nreslevels2decode = c->nreslevels - s->reduction_factor;
|
||||
c->nreslevels2decode = c->nreslevels - s->reduction_factor;
|
||||
|
||||
c->log2_cblk_width = (bytestream2_get_byteu(&s->g) & 15) + 2; // cblk width
|
||||
c->log2_cblk_height = (bytestream2_get_byteu(&s->g) & 15) + 2; // cblk height
|
||||
@@ -379,6 +396,11 @@ static int get_cox(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (c->log2_cblk_width > 6 || c->log2_cblk_height > 6) {
|
||||
avpriv_request_sample(s->avctx, "cblk size > 64");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
c->cblk_style = bytestream2_get_byteu(&s->g);
|
||||
if (c->cblk_style != 0) { // cblk style
|
||||
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
|
||||
@@ -862,6 +884,10 @@ static int jpeg2000_decode_packets(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile
|
||||
prcx = ff_jpeg2000_ceildivpow2(x, reducedresno) >> rlevel->log2_prec_width;
|
||||
prcy = ff_jpeg2000_ceildivpow2(y, reducedresno) >> rlevel->log2_prec_height;
|
||||
precno = prcx + rlevel->num_precincts_x * prcy;
|
||||
|
||||
if (prcx >= rlevel->num_precincts_x || prcy >= rlevel->num_precincts_y)
|
||||
return AVERROR_PATCHWELCOME;
|
||||
|
||||
for (layno = 0; layno < tile->codsty[0].nlayers; layno++) {
|
||||
if ((ret = jpeg2000_decode_packet(s, codsty, rlevel,
|
||||
precno, layno,
|
||||
@@ -1020,6 +1046,9 @@ static int decode_cblk(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *codsty,
|
||||
int bpass_csty_symbol = codsty->cblk_style & JPEG2000_CBLK_BYPASS;
|
||||
int vert_causal_ctx_csty_symbol = codsty->cblk_style & JPEG2000_CBLK_VSC;
|
||||
|
||||
av_assert0(width <= JPEG2000_MAX_CBLKW);
|
||||
av_assert0(height <= JPEG2000_MAX_CBLKH);
|
||||
|
||||
for (y = 0; y < height; y++)
|
||||
memset(t1->data[y], 0, width * sizeof(**t1->data));
|
||||
|
||||
@@ -1122,6 +1151,12 @@ static void mct_decode(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile)
|
||||
int32_t *src[3], i0, i1, i2;
|
||||
float *srcf[3], i0f, i1f, i2f;
|
||||
|
||||
for (i = 1; i < 3; i++)
|
||||
if (tile->codsty[0].transform != tile->codsty[i].transform) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Transforms mismatch, MCT not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
if (tile->codsty[0].transform == FF_DWT97)
|
||||
srcf[i] = tile->comp[i].f_data;
|
||||
@@ -1254,12 +1289,12 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||
|
||||
|
||||
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
|
||||
line = picture->data[plane] + y * picture->linesize[plane];
|
||||
line = picture->data[plane] + y / s->cdy[compno] * picture->linesize[plane];
|
||||
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
|
||||
uint8_t *dst;
|
||||
|
||||
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
|
||||
dst = line + x * pixelsize + compno*!planar;
|
||||
dst = line + x / s->cdx[compno] * pixelsize + compno*!planar;
|
||||
|
||||
if (codsty->transform == FF_DWT97) {
|
||||
for (; x < w; x += s->cdx[compno]) {
|
||||
@@ -1300,12 +1335,12 @@ static int jpeg2000_decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile,
|
||||
plane = s->cdef[compno] ? s->cdef[compno]-1 : (s->ncomponents-1);
|
||||
|
||||
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
|
||||
linel = (uint16_t *)picture->data[plane] + y * (picture->linesize[plane] >> 1);
|
||||
linel = (uint16_t *)picture->data[plane] + y / s->cdy[compno] * (picture->linesize[plane] >> 1);
|
||||
for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) {
|
||||
uint16_t *dst;
|
||||
|
||||
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
|
||||
dst = linel + (x * pixelsize + compno*!planar);
|
||||
dst = linel + (x / s->cdx[compno] * pixelsize + compno*!planar);
|
||||
if (codsty->transform == FF_DWT97) {
|
||||
for (; x < w; x += s-> cdx[compno]) {
|
||||
int val = lrintf(*datap) + (1 << (cbps - 1));
|
||||
|
@@ -148,6 +148,8 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state,
|
||||
ret = ret >> 1;
|
||||
}
|
||||
|
||||
if(FFABS(ret) > 0xFFFF)
|
||||
return -0x10000;
|
||||
/* update state */
|
||||
state->A[Q] += FFABS(ret) - RItype;
|
||||
ret *= state->twonear;
|
||||
|
@@ -380,7 +380,7 @@ static const AVOption libopus_options[] = {
|
||||
{ "voip", "Favor improved speech intelligibility", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_VOIP }, 0, 0, FLAGS, "application" },
|
||||
{ "audio", "Favor faithfulness to the input", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_AUDIO }, 0, 0, FLAGS, "application" },
|
||||
{ "lowdelay", "Restrict to only the lowest delay modes", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_RESTRICTED_LOWDELAY }, 0, 0, FLAGS, "application" },
|
||||
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 10.0 }, 2.5, 60.0, FLAGS },
|
||||
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 20.0 }, 2.5, 60.0, FLAGS },
|
||||
{ "packet_loss", "Expected packet loss percentage", OFFSET(packet_loss), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, FLAGS },
|
||||
{ "vbr", "Variable bit rate mode", OFFSET(vbr), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, FLAGS, "vbr" },
|
||||
{ "off", "Use constant bit rate", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "vbr" },
|
||||
|
@@ -20,7 +20,7 @@
|
||||
*/
|
||||
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/lls.h"
|
||||
#include "libavutil/lls2.h"
|
||||
|
||||
#define LPC_USE_DOUBLE
|
||||
#include "lpc.h"
|
||||
@@ -208,7 +208,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
|
||||
}
|
||||
|
||||
if (lpc_type == FF_LPC_TYPE_CHOLESKY) {
|
||||
LLSModel m[2];
|
||||
LLSModel2 m[2];
|
||||
LOCAL_ALIGNED(32, double, var, [FFALIGN(MAX_LPC_ORDER+1,4)]);
|
||||
double av_uninit(weight);
|
||||
memset(var, 0, FFALIGN(MAX_LPC_ORDER+1,4)*sizeof(*var));
|
||||
@@ -217,7 +217,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
|
||||
m[0].coeff[max_order-1][j] = -lpc[max_order-1][j];
|
||||
|
||||
for(; pass<lpc_passes; pass++){
|
||||
avpriv_init_lls(&m[pass&1], max_order);
|
||||
avpriv_init_lls2(&m[pass&1], max_order);
|
||||
|
||||
weight=0;
|
||||
for(i=max_order; i<blocksize; i++){
|
||||
@@ -238,7 +238,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
|
||||
|
||||
m[pass&1].update_lls(&m[pass&1], var);
|
||||
}
|
||||
avpriv_solve_lls(&m[pass&1], 0.001, 0);
|
||||
avpriv_solve_lls2(&m[pass&1], 0.001, 0);
|
||||
}
|
||||
|
||||
for(i=0; i<max_order; i++){
|
||||
|
@@ -780,6 +780,12 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p
|
||||
int resync_mb_y = 0;
|
||||
int resync_mb_x = 0;
|
||||
|
||||
if (s->nb_components != 3 && s->nb_components != 4)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
|
||||
s->restart_count = s->restart_interval;
|
||||
|
||||
av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size,
|
||||
|
@@ -2038,7 +2038,6 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
|
||||
if (s->codec_tag == AV_RL32("BW10")) {
|
||||
s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
|
||||
} else {
|
||||
exchange_uv(s); // common init reset pblocks, so we swap them here
|
||||
s->swap_uv = 1; // in case of xvmc we need to swap uv for each MB
|
||||
s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
|
||||
}
|
||||
|
@@ -537,6 +537,15 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void exchange_uv(MpegEncContext *s)
|
||||
{
|
||||
int16_t (*tmp)[64];
|
||||
|
||||
tmp = s->pblocks[4];
|
||||
s->pblocks[4] = s->pblocks[5];
|
||||
s->pblocks[5] = tmp;
|
||||
}
|
||||
|
||||
static int init_duplicate_context(MpegEncContext *s)
|
||||
{
|
||||
int y_size = s->b8_stride * (2 * s->mb_height + 1);
|
||||
@@ -567,6 +576,8 @@ static int init_duplicate_context(MpegEncContext *s)
|
||||
for (i = 0; i < 12; i++) {
|
||||
s->pblocks[i] = &s->block[i];
|
||||
}
|
||||
if (s->avctx->codec_tag == AV_RL32("VCR2"))
|
||||
exchange_uv(s);
|
||||
|
||||
if (s->out_format == FMT_H263) {
|
||||
/* ac values */
|
||||
@@ -641,6 +652,8 @@ int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
|
||||
for (i = 0; i < 12; i++) {
|
||||
dst->pblocks[i] = &dst->block[i];
|
||||
}
|
||||
if (dst->avctx->codec_tag == AV_RL32("VCR2"))
|
||||
exchange_uv(dst);
|
||||
if (!dst->edge_emu_buffer &&
|
||||
(ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
|
||||
av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
|
||||
|
@@ -237,8 +237,10 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
if(next == END_NOT_FOUND){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer)
|
||||
if(!new_buffer) {
|
||||
pc->index = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
|
||||
pc->index += *buf_size;
|
||||
@@ -251,9 +253,11 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
/* append to buffer */
|
||||
if(pc->index){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer)
|
||||
if(!new_buffer) {
|
||||
pc->overread_index =
|
||||
pc->index = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
if (next > -FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
memcpy(&pc->buffer[pc->index], *buf,
|
||||
|
@@ -31,7 +31,7 @@
|
||||
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
|
||||
{
|
||||
long i;
|
||||
for (i = 0; i <= w - sizeof(long); i += sizeof(long)) {
|
||||
for (i = 0; i <= w - (int)sizeof(long); i += sizeof(long)) {
|
||||
long a = *(long *)(src1 + i);
|
||||
long b = *(long *)(src2 + i);
|
||||
*(long *)(dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
|
||||
|
@@ -85,7 +85,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
unsigned short *pixels = (unsigned short *)s->frame.data[0];
|
||||
|
||||
int row_ptr = 0;
|
||||
int pixel_ptr = 0;
|
||||
int pixel_ptr = -4;
|
||||
int block_ptr;
|
||||
int pixel_x, pixel_y;
|
||||
int total_blocks;
|
||||
@@ -141,6 +141,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
colorA = AV_RB16 (&s->buf[stream_ptr]);
|
||||
stream_ptr += 2;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK()
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -149,7 +150,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -188,6 +188,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
if (s->size - stream_ptr < n_blocks * 4)
|
||||
return;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
index = s->buf[stream_ptr++];
|
||||
@@ -198,7 +199,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -206,6 +206,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
case 0x00:
|
||||
if (s->size - stream_ptr < 16)
|
||||
return;
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -219,7 +220,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
break;
|
||||
|
||||
/* Unknown opcode */
|
||||
|
@@ -204,7 +204,8 @@ static const char *read_ts(const char *buf, int *ts_start, int *ts_end,
|
||||
"%*[ ]X1:%u X2:%u Y1:%u Y2:%u",
|
||||
&hs, &ms, &ss, ts_start, &he, &me, &se, ts_end,
|
||||
x1, x2, y1, y2);
|
||||
buf += strcspn(buf, "\n") + 1;
|
||||
buf += strcspn(buf, "\n");
|
||||
buf += !!*buf;
|
||||
if (c >= 8) {
|
||||
*ts_start = 100*(ss + 60*(ms + 60*hs)) + *ts_start/10;
|
||||
*ts_end = 100*(se + 60*(me + 60*he)) + *ts_end /10;
|
||||
|
@@ -945,14 +945,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
if (!l->Y1_base || !l->Y2_base || !l->U1_base ||
|
||||
!l->V1_base || !l->U2_base || !l->V2_base ||
|
||||
!l->last || !l->clast) {
|
||||
av_freep(l->Y1_base);
|
||||
av_freep(l->Y2_base);
|
||||
av_freep(l->U1_base);
|
||||
av_freep(l->U2_base);
|
||||
av_freep(l->V1_base);
|
||||
av_freep(l->V2_base);
|
||||
av_freep(l->last);
|
||||
av_freep(l->clast);
|
||||
av_freep(&l->Y1_base);
|
||||
av_freep(&l->Y2_base);
|
||||
av_freep(&l->U1_base);
|
||||
av_freep(&l->U2_base);
|
||||
av_freep(&l->V1_base);
|
||||
av_freep(&l->V2_base);
|
||||
av_freep(&l->last);
|
||||
av_freep(&l->clast);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
l->Y1 = l->Y1_base + l->y_stride * 4 + 4;
|
||||
|
@@ -163,7 +163,8 @@ av_cold void avcodec_register(AVCodec *codec)
|
||||
avcodec_init();
|
||||
p = &first_avcodec;
|
||||
codec->next = NULL;
|
||||
while(avpriv_atomic_ptr_cas((void * volatile *)p, NULL, codec))
|
||||
|
||||
while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, codec))
|
||||
p = &(*p)->next;
|
||||
|
||||
if (codec->init_static_data)
|
||||
@@ -1899,10 +1900,17 @@ static int add_metadata_from_side_data(AVCodecContext *avctx, AVFrame *frame)
|
||||
if (!side_metadata)
|
||||
goto end;
|
||||
end = side_metadata + size;
|
||||
if (size && end[-1])
|
||||
return AVERROR_INVALIDDATA;
|
||||
while (side_metadata < end) {
|
||||
const uint8_t *key = side_metadata;
|
||||
const uint8_t *val = side_metadata + strlen(key) + 1;
|
||||
int ret = av_dict_set(avpriv_frame_get_metadatap(frame), key, val, 0);
|
||||
int ret;
|
||||
|
||||
if (val >= end)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
ret = av_dict_set(avpriv_frame_get_metadatap(frame), key, val, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
side_metadata = val + strlen(val) + 1;
|
||||
@@ -2271,6 +2279,16 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
|
||||
int did_split = av_packet_split_side_data(&tmp);
|
||||
//apply_param_change(avctx, &tmp);
|
||||
|
||||
if (did_split) {
|
||||
/* FFMIN() prevents overflow in case the packet wasn't allocated with
|
||||
* proper padding.
|
||||
* If the side data is smaller than the buffer padding size, the
|
||||
* remaining bytes should have already been filled with zeros by the
|
||||
* original packet allocation anyway. */
|
||||
memset(tmp.data + tmp.size, 0,
|
||||
FFMIN(avpkt->size - tmp.size, FF_INPUT_BUFFER_PADDING_SIZE));
|
||||
}
|
||||
|
||||
pkt_recoded = tmp;
|
||||
ret = recode_subtitle(avctx, &pkt_recoded, &tmp);
|
||||
if (ret < 0) {
|
||||
@@ -2996,7 +3014,7 @@ void av_register_hwaccel(AVHWAccel *hwaccel)
|
||||
{
|
||||
AVHWAccel **p = &first_hwaccel;
|
||||
hwaccel->next = NULL;
|
||||
while(avpriv_atomic_ptr_cas((void * volatile *)p, NULL, hwaccel))
|
||||
while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, hwaccel))
|
||||
p = &(*p)->next;
|
||||
}
|
||||
|
||||
|
@@ -621,6 +621,10 @@ static void rotate_luts(VC1Context *v)
|
||||
INIT_LUT(32, 0, v->curr_luty[0], v->curr_lutuv[0], 0);
|
||||
INIT_LUT(32, 0, v->curr_luty[1], v->curr_lutuv[1], 0);
|
||||
v->curr_use_ic = 0;
|
||||
if (v->curr_luty == v->next_luty) {
|
||||
// If we just initialized next_lut, clear next_use_ic to match.
|
||||
v->next_use_ic = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
|
||||
|
@@ -512,6 +512,10 @@ static int wma_decode_block(WMACodecContext *s)
|
||||
coef escape coding */
|
||||
total_gain = 1;
|
||||
for(;;) {
|
||||
if (get_bits_left(&s->gb) < 7) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "total_gain overread\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
a = get_bits(&s->gb, 7);
|
||||
total_gain += a;
|
||||
if (a != 127)
|
||||
|
@@ -248,6 +248,10 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
|
||||
ret = av_opt_set_int_list(sink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
ret = av_opt_set_int(sink, "all_channel_counts", 1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
|
||||
lavfi->sinks[i] = sink;
|
||||
|
@@ -685,6 +685,10 @@ static int v4l2_set_parameters(AVFormatContext *s1)
|
||||
standard.index = i;
|
||||
if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
|
||||
ret = AVERROR(errno);
|
||||
if (ret == AVERROR(EINVAL)) {
|
||||
tpf = &streamparm.parm.capture.timeperframe;
|
||||
break;
|
||||
}
|
||||
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
@@ -929,6 +933,9 @@ static int v4l2_read_header(AVFormatContext *s1)
|
||||
if (codec_id == AV_CODEC_ID_RAWVIDEO)
|
||||
st->codec->codec_tag =
|
||||
avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
|
||||
else if (codec_id == AV_CODEC_ID_H264) {
|
||||
st->need_parsing = AVSTREAM_PARSE_HEADERS;
|
||||
}
|
||||
if (desired_format == V4L2_PIX_FMT_YVU420)
|
||||
st->codec->codec_tag = MKTAG('Y', 'V', '1', '2');
|
||||
else if (desired_format == V4L2_PIX_FMT_YVU410)
|
||||
|
@@ -66,7 +66,7 @@ static av_cold int init(AVFilterContext *ctx)
|
||||
(ret = ff_parse_sample_format(&aconvert->out_sample_fmt, aconvert->format_str, ctx)) < 0)
|
||||
return ret;
|
||||
if (aconvert->channel_layout_str && strcmp(aconvert->channel_layout_str, "auto"))
|
||||
return ff_parse_channel_layout(&aconvert->out_chlayout, aconvert->channel_layout_str, ctx);
|
||||
return ff_parse_channel_layout(&aconvert->out_chlayout, NULL, aconvert->channel_layout_str, ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -46,7 +46,6 @@ typedef struct PanContext {
|
||||
double gain[MAX_CHANNELS][MAX_CHANNELS];
|
||||
int64_t need_renorm;
|
||||
int need_renumber;
|
||||
int nb_input_channels;
|
||||
int nb_output_channels;
|
||||
|
||||
int pure_gains;
|
||||
@@ -116,10 +115,10 @@ static av_cold int init(AVFilterContext *ctx)
|
||||
if (!args)
|
||||
return AVERROR(ENOMEM);
|
||||
arg = av_strtok(args, "|", &tokenizer);
|
||||
ret = ff_parse_channel_layout(&pan->out_channel_layout, arg, ctx);
|
||||
ret = ff_parse_channel_layout(&pan->out_channel_layout,
|
||||
&pan->nb_output_channels, arg, ctx);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
pan->nb_output_channels = av_get_channel_layout_nb_channels(pan->out_channel_layout);
|
||||
|
||||
/* parse channel specifications */
|
||||
while ((arg = arg0 = av_strtok(NULL, "|", &tokenizer))) {
|
||||
@@ -239,12 +238,14 @@ static int query_formats(AVFilterContext *ctx)
|
||||
ff_set_common_samplerates(ctx, formats);
|
||||
|
||||
// inlink supports any channel layout
|
||||
layouts = ff_all_channel_layouts();
|
||||
layouts = ff_all_channel_counts();
|
||||
ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
|
||||
|
||||
// outlink supports only requested output channel layout
|
||||
layouts = NULL;
|
||||
ff_add_channel_layout(&layouts, pan->out_channel_layout);
|
||||
ff_add_channel_layout(&layouts,
|
||||
pan->out_channel_layout ? pan->out_channel_layout :
|
||||
FF_COUNT2LAYOUT(pan->nb_output_channels));
|
||||
ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
|
||||
return 0;
|
||||
}
|
||||
@@ -257,7 +258,6 @@ static int config_props(AVFilterLink *link)
|
||||
int i, j, k, r;
|
||||
double t;
|
||||
|
||||
pan->nb_input_channels = av_get_channel_layout_nb_channels(link->channel_layout);
|
||||
if (pan->need_renumber) {
|
||||
// input channels were given by their name: renumber them
|
||||
for (i = j = 0; i < MAX_CHANNELS; i++) {
|
||||
@@ -271,7 +271,7 @@ static int config_props(AVFilterLink *link)
|
||||
|
||||
// sanity check; can't be done in query_formats since the inlink
|
||||
// channel layout is unknown at that time
|
||||
if (pan->nb_input_channels > SWR_CH_MAX ||
|
||||
if (link->channels > SWR_CH_MAX ||
|
||||
pan->nb_output_channels > SWR_CH_MAX) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"libswresample support a maximum of %d channels. "
|
||||
@@ -286,6 +286,10 @@ static int config_props(AVFilterLink *link)
|
||||
0, ctx);
|
||||
if (!pan->swr)
|
||||
return AVERROR(ENOMEM);
|
||||
if (!link->channel_layout)
|
||||
av_opt_set_int(pan->swr, "ich", link->channels, 0);
|
||||
if (!pan->out_channel_layout)
|
||||
av_opt_set_int(pan->swr, "och", pan->nb_output_channels, 0);
|
||||
|
||||
// gains are pure, init the channel mapping
|
||||
if (pan->pure_gains) {
|
||||
@@ -293,7 +297,7 @@ static int config_props(AVFilterLink *link)
|
||||
// get channel map from the pure gains
|
||||
for (i = 0; i < pan->nb_output_channels; i++) {
|
||||
int ch_id = -1;
|
||||
for (j = 0; j < pan->nb_input_channels; j++) {
|
||||
for (j = 0; j < link->channels; j++) {
|
||||
if (pan->gain[i][j]) {
|
||||
ch_id = j;
|
||||
break;
|
||||
@@ -311,7 +315,7 @@ static int config_props(AVFilterLink *link)
|
||||
if (!((pan->need_renorm >> i) & 1))
|
||||
continue;
|
||||
t = 0;
|
||||
for (j = 0; j < pan->nb_input_channels; j++)
|
||||
for (j = 0; j < link->channels; j++)
|
||||
t += pan->gain[i][j];
|
||||
if (t > -1E-5 && t < 1E-5) {
|
||||
// t is almost 0 but not exactly, this is probably a mistake
|
||||
@@ -320,7 +324,7 @@ static int config_props(AVFilterLink *link)
|
||||
"Degenerate coefficients while renormalizing\n");
|
||||
continue;
|
||||
}
|
||||
for (j = 0; j < pan->nb_input_channels; j++)
|
||||
for (j = 0; j < link->channels; j++)
|
||||
pan->gain[i][j] /= t;
|
||||
}
|
||||
av_opt_set_int(pan->swr, "icl", link->channel_layout, 0);
|
||||
@@ -335,7 +339,7 @@ static int config_props(AVFilterLink *link)
|
||||
// summary
|
||||
for (i = 0; i < pan->nb_output_channels; i++) {
|
||||
cur = buf;
|
||||
for (j = 0; j < pan->nb_input_channels; j++) {
|
||||
for (j = 0; j < link->channels; j++) {
|
||||
r = snprintf(cur, buf + sizeof(buf) - cur, "%s%.3g i%d",
|
||||
j ? " + " : "", pan->gain[i][j], j);
|
||||
cur += FFMIN(buf + sizeof(buf) - cur, r);
|
||||
|
@@ -109,7 +109,7 @@ static int init(AVFilterContext *ctx)
|
||||
|
||||
if (eval->chlayout_str) {
|
||||
int n;
|
||||
ret = ff_parse_channel_layout(&eval->chlayout, eval->chlayout_str, ctx);
|
||||
ret = ff_parse_channel_layout(&eval->chlayout, NULL, eval->chlayout_str, ctx);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
|
@@ -68,7 +68,7 @@ static int init(AVFilterContext *ctx)
|
||||
null->sample_rate_str, ctx)) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = ff_parse_channel_layout(&null->channel_layout,
|
||||
if ((ret = ff_parse_channel_layout(&null->channel_layout, NULL,
|
||||
null->channel_layout_str, ctx)) < 0)
|
||||
return ret;
|
||||
|
||||
|
@@ -110,8 +110,8 @@ void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
|
||||
|
||||
(*count)++;
|
||||
for (i = idx + 1; i < *count; i++)
|
||||
if (*links[i])
|
||||
(*(unsigned *)((uint8_t *) *links[i] + padidx_off))++;
|
||||
if ((*links)[i])
|
||||
(*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
|
||||
}
|
||||
|
||||
int avfilter_link(AVFilterContext *src, unsigned srcpad,
|
||||
@@ -468,7 +468,7 @@ int avfilter_register(AVFilter *filter)
|
||||
|
||||
filter->next = NULL;
|
||||
|
||||
while(avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
|
||||
while(*f || avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
|
||||
f = &(*f)->next;
|
||||
|
||||
return 0;
|
||||
|
@@ -575,6 +575,10 @@ static int pick_format(AVFilterLink *link, AVFilterLink *ref)
|
||||
av_log(link->src, AV_LOG_ERROR, "Cannot select channel layout for"
|
||||
" the link between filters %s and %s.\n", link->src->name,
|
||||
link->dst->name);
|
||||
if (!link->in_channel_layouts->all_counts)
|
||||
av_log(link->src, AV_LOG_ERROR, "Unknown channel layouts not "
|
||||
"supported, try specifying a channel layout using "
|
||||
"'aformat=channel_layouts=something'.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
link->in_channel_layouts->nb_channel_layouts = 1;
|
||||
@@ -657,7 +661,8 @@ static int reduce_formats_on_filter(AVFilterContext *filter)
|
||||
if (inlink->type != outlink->type || fmts->nb_channel_layouts == 1)
|
||||
continue;
|
||||
|
||||
if (fmts->all_layouts) {
|
||||
if (fmts->all_layouts &&
|
||||
(!FF_LAYOUT2COUNT(fmt) || fmts->all_counts)) {
|
||||
/* Turn the infinite list into a singleton */
|
||||
fmts->all_layouts = fmts->all_counts = 0;
|
||||
ff_add_channel_layout(&outlink->in_channel_layouts, fmt);
|
||||
|
@@ -629,10 +629,21 @@ int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx)
|
||||
int ff_parse_channel_layout(int64_t *ret, int *nret, const char *arg,
|
||||
void *log_ctx)
|
||||
{
|
||||
char *tail;
|
||||
int64_t chlayout = av_get_channel_layout(arg);
|
||||
int64_t chlayout, count;
|
||||
|
||||
if (nret) {
|
||||
count = strtol(arg, &tail, 10);
|
||||
if (*tail == 'c' && !tail[1] && count > 0 && count < 63) {
|
||||
*nret = count;
|
||||
*ret = 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
chlayout = av_get_channel_layout(arg);
|
||||
if (chlayout == 0) {
|
||||
chlayout = strtol(arg, &tail, 10);
|
||||
if (*tail || chlayout == 0) {
|
||||
@@ -641,6 +652,8 @@ int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx)
|
||||
}
|
||||
}
|
||||
*ret = chlayout;
|
||||
if (nret)
|
||||
*nret = av_get_channel_layout_nb_channels(chlayout);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -217,11 +217,14 @@ int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx);
|
||||
* Parse a channel layout or a corresponding integer representation.
|
||||
*
|
||||
* @param ret 64bit integer pointer to where the value should be written.
|
||||
* @param nret integer pointer to the number of channels;
|
||||
* if not NULL, then unknown channel layouts are accepted
|
||||
* @param arg string to parse
|
||||
* @param log_ctx log context
|
||||
* @return 0 in case of success, a negative AVERROR code on error
|
||||
*/
|
||||
int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx);
|
||||
int ff_parse_channel_layout(int64_t *ret, int *nret, const char *arg,
|
||||
void *log_ctx);
|
||||
|
||||
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts);
|
||||
|
||||
|
@@ -189,7 +189,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
|
||||
}
|
||||
|
||||
/* now wait for the next timestamp */
|
||||
if (buf->pts == AV_NOPTS_VALUE) {
|
||||
if (buf->pts == AV_NOPTS_VALUE || av_fifo_size(s->fifo) <= 0) {
|
||||
return write_to_fifo(s->fifo, buf);
|
||||
}
|
||||
|
||||
|
@@ -1328,7 +1328,7 @@ static int avi_read_idx1(AVFormatContext *s, int size)
|
||||
st = s->streams[index];
|
||||
ast = st->priv_data;
|
||||
|
||||
if(first_packet && first_packet_pos && len) {
|
||||
if (first_packet && first_packet_pos) {
|
||||
data_offset = first_packet_pos - pos;
|
||||
first_packet = 0;
|
||||
}
|
||||
|
@@ -36,6 +36,7 @@
|
||||
#include <windows.h>
|
||||
#undef EXTERN_C
|
||||
#include "compat/avisynth/avisynth_c.h"
|
||||
#include "compat/avisynth/avisynth_c_25.h"
|
||||
#define AVISYNTH_LIB "avisynth"
|
||||
#else
|
||||
#include <dlfcn.h>
|
||||
@@ -471,9 +472,20 @@ static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int dis
|
||||
for (i = 0; i < avs->n_planes; i++) {
|
||||
plane = avs->planes[i];
|
||||
src_p = avs_get_read_ptr_p(frame, plane);
|
||||
pitch = avs_get_pitch_p(frame, plane);
|
||||
|
||||
#ifdef _WIN32
|
||||
if (avs_library->avs_get_version(avs->clip) == 3) {
|
||||
rowsize = avs_get_row_size_p_25(frame, plane);
|
||||
planeheight = avs_get_height_p_25(frame, plane);
|
||||
} else {
|
||||
rowsize = avs_get_row_size_p(frame, plane);
|
||||
planeheight = avs_get_height_p(frame, plane);
|
||||
}
|
||||
#else
|
||||
rowsize = avs_get_row_size_p(frame, plane);
|
||||
planeheight = avs_get_height_p(frame, plane);
|
||||
pitch = avs_get_pitch_p(frame, plane);
|
||||
#endif
|
||||
|
||||
// Flip RGB video.
|
||||
if (avs_is_rgb24(avs->vi) || avs_is_rgb(avs->vi)) {
|
||||
@@ -481,26 +493,7 @@ static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int dis
|
||||
pitch = -pitch;
|
||||
}
|
||||
|
||||
// An issue with avs_bit_blt on 2.5.8 prevents video from working correctly.
|
||||
// This problem doesn't exist for 2.6 and AvxSynth, so enable the workaround
|
||||
// for 2.5.8 only. This only displays the warning and exits if the script has
|
||||
// video. 2.5.8's internal interface version is 3, so avs_get_version allows
|
||||
// it to work only in the circumstance that the interface is 5 or higher (4 is
|
||||
// unused). There's a strong chance that AvxSynth, having been based on 2.5.8,
|
||||
// would also be identified as interface version 3, but since AvxSynth doesn't
|
||||
// suffer from this problem, special-case it.
|
||||
#ifdef _WIN32
|
||||
if (avs_library->avs_get_version(avs->clip) > 3) {
|
||||
avs_library->avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch, rowsize, planeheight);
|
||||
} else {
|
||||
av_log(s, AV_LOG_ERROR, "Video input from AviSynth 2.5.8 is not supported. Please upgrade to 2.6.\n");
|
||||
avs->error = 1;
|
||||
av_freep(&pkt->data);
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
#else
|
||||
avs_library->avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch, rowsize, planeheight);
|
||||
#endif
|
||||
dst_p += rowsize * planeheight;
|
||||
}
|
||||
|
||||
|
@@ -54,7 +54,7 @@ void av_register_input_format(AVInputFormat *format)
|
||||
AVInputFormat **p = &first_iformat;
|
||||
|
||||
format->next = NULL;
|
||||
while(avpriv_atomic_ptr_cas((void * volatile *)p, NULL, format))
|
||||
while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, format))
|
||||
p = &(*p)->next;
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ void av_register_output_format(AVOutputFormat *format)
|
||||
AVOutputFormat **p = &first_oformat;
|
||||
|
||||
format->next = NULL;
|
||||
while(avpriv_atomic_ptr_cas((void * volatile *)p, NULL, format))
|
||||
while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, format))
|
||||
p = &(*p)->next;
|
||||
}
|
||||
|
||||
|
@@ -133,7 +133,8 @@ static int ftp_status(FTPContext *s, char **line, const int response_codes[])
|
||||
|
||||
while (!code_found || dash) {
|
||||
if ((err = ftp_get_line(s, buf, sizeof(buf))) < 0) {
|
||||
av_bprint_finalize(&line_buffer, NULL);
|
||||
if (line)
|
||||
av_bprint_finalize(&line_buffer, NULL);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -164,16 +164,26 @@ static int gif_read_ext(AVFormatContext *s)
|
||||
if ((ret = avio_skip(pb, sb_size - 3)) < 0 )
|
||||
return ret;
|
||||
} else if (ext_label == GIF_APP_EXT_LABEL) {
|
||||
uint8_t netscape_ext[sizeof(NETSCAPE_EXT_STR)-1 + 2];
|
||||
uint8_t data[256];
|
||||
|
||||
if ((sb_size = avio_r8(pb)) != strlen(NETSCAPE_EXT_STR))
|
||||
return 0;
|
||||
ret = avio_read(pb, netscape_ext, sizeof(netscape_ext));
|
||||
if (ret < sizeof(netscape_ext))
|
||||
sb_size = avio_r8(pb);
|
||||
ret = avio_read(pb, data, sb_size);
|
||||
if (ret < 0 || !sb_size)
|
||||
return ret;
|
||||
gdc->total_iter = avio_rl16(pb);
|
||||
if (gdc->total_iter == 0)
|
||||
gdc->total_iter = -1;
|
||||
|
||||
if (sb_size == strlen(NETSCAPE_EXT_STR)) {
|
||||
sb_size = avio_r8(pb);
|
||||
ret = avio_read(pb, data, sb_size);
|
||||
if (ret < 0 || !sb_size)
|
||||
return ret;
|
||||
|
||||
if (sb_size == 3 && data[0] == 1) {
|
||||
gdc->total_iter = AV_RL16(data+1);
|
||||
|
||||
if (gdc->total_iter == 0)
|
||||
gdc->total_iter = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((ret = gif_skip_subblocks(pb)) < 0)
|
||||
|
@@ -21,6 +21,7 @@
|
||||
*/
|
||||
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/opt.h"
|
||||
@@ -37,6 +38,7 @@ typedef struct {
|
||||
int split_planes; /**< use independent file for each Y, U, V plane */
|
||||
char path[1024];
|
||||
int update;
|
||||
const char *muxer;
|
||||
} VideoMuxData;
|
||||
|
||||
static int write_header(AVFormatContext *s)
|
||||
@@ -44,7 +46,6 @@ static int write_header(AVFormatContext *s)
|
||||
VideoMuxData *img = s->priv_data;
|
||||
AVStream *st = s->streams[0];
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(st->codec->pix_fmt);
|
||||
const char *str;
|
||||
|
||||
av_strlcpy(img->path, s->filename, sizeof(img->path));
|
||||
|
||||
@@ -54,14 +55,18 @@ static int write_header(AVFormatContext *s)
|
||||
else
|
||||
img->is_pipe = 1;
|
||||
|
||||
str = strrchr(img->path, '.');
|
||||
if (st->codec->codec_id == AV_CODEC_ID_GIF) {
|
||||
img->muxer = "gif";
|
||||
} else if (st->codec->codec_id == AV_CODEC_ID_RAWVIDEO) {
|
||||
const char *str = strrchr(img->path, '.');
|
||||
/* TODO: reindent */
|
||||
img->split_planes = str
|
||||
&& !av_strcasecmp(str + 1, "y")
|
||||
&& s->nb_streams == 1
|
||||
&& st->codec->codec_id == AV_CODEC_ID_RAWVIDEO
|
||||
&& desc
|
||||
&&(desc->flags & AV_PIX_FMT_FLAG_PLANAR)
|
||||
&& desc->nb_components >= 3;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -115,6 +120,37 @@ static int write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
avio_write(pb[3], pkt->data + ysize + 2*usize, ysize);
|
||||
avio_close(pb[3]);
|
||||
}
|
||||
} else if (img->muxer) {
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVPacket pkt2 = {0};
|
||||
AVFormatContext *fmt = NULL;
|
||||
|
||||
av_assert0(!img->split_planes);
|
||||
|
||||
ret = avformat_alloc_output_context2(&fmt, NULL, img->muxer, s->filename);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
st = avformat_new_stream(fmt, NULL);
|
||||
if (!st) {
|
||||
avformat_free_context(fmt);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
st->id = pkt->stream_index;
|
||||
|
||||
fmt->pb = pb[0];
|
||||
if ((ret = av_copy_packet(&pkt2, pkt)) < 0 ||
|
||||
(ret = av_dup_packet(&pkt2)) < 0 ||
|
||||
(ret = avcodec_copy_context(st->codec, s->streams[0]->codec)) < 0 ||
|
||||
(ret = avformat_write_header(fmt, NULL)) < 0 ||
|
||||
(ret = av_interleaved_write_frame(fmt, &pkt2)) < 0 ||
|
||||
(ret = av_write_trailer(fmt)) < 0) {
|
||||
av_free_packet(&pkt2);
|
||||
avformat_free_context(fmt);
|
||||
return ret;
|
||||
}
|
||||
av_free_packet(&pkt2);
|
||||
avformat_free_context(fmt);
|
||||
} else {
|
||||
avio_write(pb[0], pkt->data, pkt->size);
|
||||
}
|
||||
|
@@ -63,7 +63,7 @@ static int jacosub_probe(AVProbeData *p)
|
||||
return AVPROBE_SCORE_EXTENSION + 1;
|
||||
return 0;
|
||||
}
|
||||
ptr += strcspn(ptr, "\n") + 1;
|
||||
ptr += ff_subtitles_next_line(ptr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -257,6 +257,7 @@ static int lxf_read_header(AVFormatContext *s)
|
||||
st->codec->bit_rate = 1000000 * ((video_params >> 14) & 0xFF);
|
||||
st->codec->codec_tag = video_params & 0xF;
|
||||
st->codec->codec_id = ff_codec_get_id(lxf_tags, st->codec->codec_tag);
|
||||
st->need_parsing = AVSTREAM_PARSE_HEADERS;
|
||||
|
||||
av_log(s, AV_LOG_DEBUG, "record: %x = %i-%02i-%02i\n",
|
||||
record_date, 1900 + (record_date & 0x7F), (record_date >> 7) & 0xF,
|
||||
|
@@ -1737,8 +1737,10 @@ static int matroska_read_header(AVFormatContext *s)
|
||||
avio_wl16(&b, 1);
|
||||
avio_wl16(&b, track->audio.channels);
|
||||
avio_wl16(&b, track->audio.bitdepth);
|
||||
if (track->audio.out_samplerate < 0 || track->audio.out_samplerate > INT_MAX)
|
||||
return AVERROR_INVALIDDATA;
|
||||
avio_wl32(&b, track->audio.out_samplerate);
|
||||
avio_wl32(&b, matroska->ctx->duration * track->audio.out_samplerate);
|
||||
avio_wl32(&b, av_rescale((matroska->duration * matroska->time_scale), track->audio.out_samplerate, AV_TIME_BASE * 1000));
|
||||
} else if (codec_id == AV_CODEC_ID_RV10 || codec_id == AV_CODEC_ID_RV20 ||
|
||||
codec_id == AV_CODEC_ID_RV30 || codec_id == AV_CODEC_ID_RV40) {
|
||||
extradata_offset = 26;
|
||||
@@ -1827,7 +1829,8 @@ static int matroska_read_header(AVFormatContext *s)
|
||||
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
|
||||
1000000000, track->default_duration, 30000);
|
||||
#if FF_API_R_FRAME_RATE
|
||||
st->r_frame_rate = st->avg_frame_rate;
|
||||
if (st->avg_frame_rate.num < st->avg_frame_rate.den * 1000L)
|
||||
st->r_frame_rate = st->avg_frame_rate;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@@ -27,6 +27,7 @@
|
||||
#include "isom.h"
|
||||
#include "matroska.h"
|
||||
#include "riff.h"
|
||||
#include "subtitles.h"
|
||||
#include "wv.h"
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
@@ -1328,7 +1329,7 @@ static int srt_get_duration(uint8_t **buf)
|
||||
s_hsec += 1000*s_sec; e_hsec += 1000*e_sec;
|
||||
duration = e_hsec - s_hsec;
|
||||
}
|
||||
*buf += strcspn(*buf, "\n") + 1;
|
||||
*buf += ff_subtitles_next_line(*buf);
|
||||
}
|
||||
return duration;
|
||||
}
|
||||
@@ -1617,7 +1618,6 @@ const AVCodecTag additional_audio_tags[] = {
|
||||
};
|
||||
|
||||
const AVCodecTag additional_video_tags[] = {
|
||||
{ AV_CODEC_ID_PRORES, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RV10, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RV20, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RV30, 0xFFFFFFFF },
|
||||
|
@@ -47,7 +47,7 @@ static int microdvd_probe(AVProbeData *p)
|
||||
sscanf(ptr, "{%*d}{%*d}%c", &c) != 1 &&
|
||||
sscanf(ptr, "{DEFAULT}{}%c", &c) != 1)
|
||||
return 0;
|
||||
ptr += strcspn(ptr, "\n") + 1;
|
||||
ptr += ff_subtitles_next_line(ptr);
|
||||
}
|
||||
return AVPROBE_SCORE_MAX;
|
||||
}
|
||||
|
@@ -1697,6 +1697,8 @@ static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
if (!entries)
|
||||
{
|
||||
sc->keyframe_absent = 1;
|
||||
if (!st->need_parsing)
|
||||
st->need_parsing = AVSTREAM_PARSE_HEADERS;
|
||||
return 0;
|
||||
}
|
||||
if (entries >= UINT_MAX / sizeof(int))
|
||||
|
@@ -3672,6 +3672,9 @@ static int mov_write_header(AVFormatContext *s)
|
||||
}else{
|
||||
track->sample_size = (av_get_bits_per_sample(st->codec->codec_id) >> 3) * st->codec->channels;
|
||||
}
|
||||
if (st->codec->codec_id == AV_CODEC_ID_ILBC) {
|
||||
track->audio_vbr = 1;
|
||||
}
|
||||
if (track->mode != MODE_MOV &&
|
||||
track->enc->codec_id == AV_CODEC_ID_MP3 && track->timescale < 16000) {
|
||||
av_log(s, AV_LOG_ERROR, "track %d: muxing mp3 at %dhz is not supported\n",
|
||||
|
@@ -288,6 +288,7 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
|
||||
AVStream *st = s->streams[0];
|
||||
int64_t ret = av_index_search_timestamp(st, timestamp, flags);
|
||||
int i, j;
|
||||
int dir = (flags&AVSEEK_FLAG_BACKWARD) ? -1 : 1;
|
||||
|
||||
if (mp3->is_cbr && st->duration > 0 && mp3->header_filesize > s->data_offset) {
|
||||
int64_t filesize = avio_size(s->pb);
|
||||
@@ -317,7 +318,7 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
|
||||
|
||||
#define MIN_VALID 3
|
||||
for(i=0; i<4096; i++) {
|
||||
int64_t pos = ie->pos + i;
|
||||
int64_t pos = ie->pos + i*dir;
|
||||
for(j=0; j<MIN_VALID; j++) {
|
||||
ret = check(s, pos);
|
||||
if(ret < 0)
|
||||
@@ -330,7 +331,7 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
|
||||
if(j!=MIN_VALID)
|
||||
i=0;
|
||||
|
||||
ret = avio_seek(s->pb, ie->pos + i, SEEK_SET);
|
||||
ret = avio_seek(s->pb, ie->pos + i*dir, SEEK_SET);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_update_cur_dts(s, st, ie->timestamp);
|
||||
|
@@ -725,6 +725,7 @@ static int vobsub_read_header(AVFormatContext *s)
|
||||
st->id = stream_id;
|
||||
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
|
||||
st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE;
|
||||
avpriv_set_pts_info(st, 64, 1, 1000);
|
||||
av_dict_set(&st->metadata, "language", id, 0);
|
||||
av_log(s, AV_LOG_DEBUG, "IDX stream[%d] id=%s\n", stream_id, id);
|
||||
header_parsed = 1;
|
||||
@@ -889,6 +890,21 @@ static int vobsub_read_seek(AVFormatContext *s, int stream_index,
|
||||
int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
|
||||
{
|
||||
MpegDemuxContext *vobsub = s->priv_data;
|
||||
|
||||
/* Rescale requested timestamps based on the first stream (timebase is the
|
||||
* same for all subtitles stream within a .idx/.sub). Rescaling is done just
|
||||
* like in avformat_seek_file(). */
|
||||
if (stream_index == -1 && s->nb_streams != 1) {
|
||||
AVRational time_base = s->streams[0]->time_base;
|
||||
ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
|
||||
min_ts = av_rescale_rnd(min_ts, time_base.den,
|
||||
time_base.num * (int64_t)AV_TIME_BASE,
|
||||
AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
|
||||
max_ts = av_rescale_rnd(max_ts, time_base.den,
|
||||
time_base.num * (int64_t)AV_TIME_BASE,
|
||||
AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
|
||||
}
|
||||
|
||||
return ff_subtitles_queue_seek(&vobsub->q, s, stream_index,
|
||||
min_ts, ts, max_ts, flags);
|
||||
}
|
||||
|
@@ -43,7 +43,7 @@ static int mpl2_probe(AVProbeData *p)
|
||||
if (sscanf(ptr, "[%"SCNd64"][%"SCNd64"]%c", &start, &end, &c) != 3 &&
|
||||
sscanf(ptr, "[%"SCNd64"][]%c", &start, &c) != 2)
|
||||
return 0;
|
||||
ptr += strcspn(ptr, "\r\n") + 1;
|
||||
ptr += ff_subtitles_next_line(ptr);
|
||||
if (ptr >= ptr_end)
|
||||
return 0;
|
||||
}
|
||||
|
@@ -37,11 +37,16 @@ static int mpsub_probe(AVProbeData *p)
|
||||
const char *ptr_end = p->buf + p->buf_size;
|
||||
|
||||
while (ptr < ptr_end) {
|
||||
int inc;
|
||||
|
||||
if (!memcmp(ptr, "FORMAT=TIME", 11))
|
||||
return AVPROBE_SCORE_EXTENSION;
|
||||
if (!memcmp(ptr, "FORMAT=", 7))
|
||||
return AVPROBE_SCORE_EXTENSION / 3;
|
||||
ptr += strcspn(ptr, "\n") + 1;
|
||||
inc = ff_subtitles_next_line(ptr);
|
||||
if (!inc)
|
||||
break;
|
||||
ptr += inc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -75,6 +75,7 @@ typedef struct {
|
||||
int temporal_reordering;
|
||||
AVRational aspect_ratio; ///< display aspect ratio
|
||||
int closed_gop; ///< gop is closed, used in mpeg-2 frame parsing
|
||||
int video_bit_rate;
|
||||
} MXFStreamContext;
|
||||
|
||||
typedef struct {
|
||||
@@ -975,13 +976,14 @@ static void mxf_write_cdci_desc(AVFormatContext *s, AVStream *st)
|
||||
static void mxf_write_mpegvideo_desc(AVFormatContext *s, AVStream *st)
|
||||
{
|
||||
AVIOContext *pb = s->pb;
|
||||
MXFStreamContext *sc = st->priv_data;
|
||||
int profile_and_level = (st->codec->profile<<4) | st->codec->level;
|
||||
|
||||
mxf_write_cdci_common(s, st, mxf_mpegvideo_descriptor_key, 8+5);
|
||||
|
||||
// bit rate
|
||||
mxf_write_local_tag(pb, 4, 0x8000);
|
||||
avio_wb32(pb, st->codec->bit_rate);
|
||||
avio_wb32(pb, sc->video_bit_rate);
|
||||
|
||||
// profile and level
|
||||
mxf_write_local_tag(pb, 1, 0x8007);
|
||||
@@ -1704,14 +1706,15 @@ static int mxf_write_header(AVFormatContext *s)
|
||||
ret = av_timecode_init(&mxf->tc, rate, 0, 0, s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
sc->video_bit_rate = st->codec->bit_rate ? st->codec->bit_rate : st->codec->rc_max_rate;
|
||||
if (s->oformat == &ff_mxf_d10_muxer) {
|
||||
if (st->codec->bit_rate == 50000000) {
|
||||
if (sc->video_bit_rate == 50000000) {
|
||||
if (mxf->time_base.den == 25) sc->index = 3;
|
||||
else sc->index = 5;
|
||||
} else if (st->codec->bit_rate == 40000000) {
|
||||
} else if (sc->video_bit_rate == 40000000) {
|
||||
if (mxf->time_base.den == 25) sc->index = 7;
|
||||
else sc->index = 9;
|
||||
} else if (st->codec->bit_rate == 30000000) {
|
||||
} else if (sc->video_bit_rate == 30000000) {
|
||||
if (mxf->time_base.den == 25) sc->index = 11;
|
||||
else sc->index = 13;
|
||||
} else {
|
||||
@@ -1720,7 +1723,7 @@ static int mxf_write_header(AVFormatContext *s)
|
||||
}
|
||||
|
||||
mxf->edit_unit_byte_count = KAG_SIZE; // system element
|
||||
mxf->edit_unit_byte_count += 16 + 4 + (uint64_t)st->codec->bit_rate *
|
||||
mxf->edit_unit_byte_count += 16 + 4 + (uint64_t)sc->video_bit_rate *
|
||||
mxf->time_base.num / (8*mxf->time_base.den);
|
||||
mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count);
|
||||
mxf->edit_unit_byte_count += 16 + 4 + 4 + spf->samples_per_frame[0]*8*4;
|
||||
@@ -1854,7 +1857,8 @@ static void mxf_write_d10_video_packet(AVFormatContext *s, AVStream *st, AVPacke
|
||||
{
|
||||
MXFContext *mxf = s->priv_data;
|
||||
AVIOContext *pb = s->pb;
|
||||
int packet_size = (uint64_t)st->codec->bit_rate*mxf->time_base.num /
|
||||
MXFStreamContext *sc = st->priv_data;
|
||||
int packet_size = (uint64_t)sc->video_bit_rate*mxf->time_base.num /
|
||||
(8*mxf->time_base.den); // frame size
|
||||
int pad;
|
||||
|
||||
|
@@ -136,11 +136,15 @@ ff_vorbis_comment(AVFormatContext * as, AVDictionary **m, const uint8_t *buf, in
|
||||
|
||||
if (!pict) {
|
||||
av_log(as, AV_LOG_WARNING, "out-of-memory error. Skipping cover art block.\n");
|
||||
av_freep(&tt);
|
||||
av_freep(&ct);
|
||||
continue;
|
||||
}
|
||||
if ((ret = av_base64_decode(pict, ct, vl)) > 0)
|
||||
ret = ff_flac_parse_picture(as, pict, ret);
|
||||
av_freep(&pict);
|
||||
av_freep(&tt);
|
||||
av_freep(&ct);
|
||||
if (ret < 0) {
|
||||
av_log(as, AV_LOG_WARNING, "Failed to parse cover art block.\n");
|
||||
continue;
|
||||
|
@@ -37,12 +37,14 @@ static int srt_probe(AVProbeData *p)
|
||||
if (AV_RB24(ptr) == 0xEFBBBF)
|
||||
ptr += 3; /* skip UTF-8 BOM */
|
||||
|
||||
while (*ptr == '\r' || *ptr == '\n')
|
||||
ptr++;
|
||||
for (i=0; i<2; i++) {
|
||||
if ((num == i || num + 1 == i)
|
||||
&& sscanf(ptr, "%*d:%*2d:%*2d%*1[,.]%*3d --> %*d:%*2d:%*2d%*1[,.]%3d", &v) == 1)
|
||||
return AVPROBE_SCORE_MAX;
|
||||
num = atoi(ptr);
|
||||
ptr += strcspn(ptr, "\n") + 1;
|
||||
ptr += ff_subtitles_next_line(ptr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -63,12 +65,10 @@ static int64_t get_pts(const char **buf, int *duration,
|
||||
int64_t start = (hh1*3600LL + mm1*60LL + ss1) * 1000LL + ms1;
|
||||
int64_t end = (hh2*3600LL + mm2*60LL + ss2) * 1000LL + ms2;
|
||||
*duration = end - start;
|
||||
*buf += strcspn(*buf, "\n");
|
||||
*buf += !!**buf;
|
||||
*buf += ff_subtitles_next_line(*buf);
|
||||
return start;
|
||||
}
|
||||
*buf += strcspn(*buf, "\n");
|
||||
*buf += !!**buf;
|
||||
*buf += ff_subtitles_next_line(*buf);
|
||||
}
|
||||
return AV_NOPTS_VALUE;
|
||||
}
|
||||
|
@@ -109,7 +109,8 @@ int ff_subtitles_queue_seek(FFDemuxSubtitlesQueue *q, AVFormatContext *s, int st
|
||||
for (i = 0; i < q->nb_subs; i++) {
|
||||
int64_t pts = q->subs[i].pts;
|
||||
uint64_t ts_diff = FFABS(pts - ts);
|
||||
if (pts >= min_ts && pts <= max_ts && ts_diff < min_ts_diff) {
|
||||
if ((stream_index == -1 || q->subs[i].stream_index == stream_index) &&
|
||||
pts >= min_ts && pts <= max_ts && ts_diff < min_ts_diff) {
|
||||
min_ts_diff = ts_diff;
|
||||
idx = i;
|
||||
}
|
||||
@@ -119,13 +120,25 @@ int ff_subtitles_queue_seek(FFDemuxSubtitlesQueue *q, AVFormatContext *s, int st
|
||||
/* look back in the latest subtitles for overlapping subtitles */
|
||||
ts_selected = q->subs[idx].pts;
|
||||
for (i = idx - 1; i >= 0; i--) {
|
||||
if (q->subs[i].duration <= 0)
|
||||
int64_t pts = q->subs[i].pts;
|
||||
if (q->subs[i].duration <= 0 ||
|
||||
(stream_index != -1 && q->subs[i].stream_index != stream_index))
|
||||
continue;
|
||||
if (q->subs[i].pts > ts_selected - q->subs[i].duration)
|
||||
if (pts >= min_ts && pts > ts_selected - q->subs[i].duration)
|
||||
idx = i;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
/* If the queue is used to store multiple subtitles streams (like with
|
||||
* VobSub) and the stream index is not specified, we need to make sure
|
||||
* to focus on the smallest file position offset for a same timestamp;
|
||||
* queue is ordered by pts and then filepos, so we can take the first
|
||||
* entry for a given timestamp. */
|
||||
if (stream_index == -1)
|
||||
while (idx > 0 && q->subs[idx - 1].pts == q->subs[idx].pts)
|
||||
idx--;
|
||||
|
||||
q->current_sub_idx = idx;
|
||||
}
|
||||
return 0;
|
||||
|
@@ -96,4 +96,17 @@ const char *ff_smil_get_attr_ptr(const char *s, const char *attr);
|
||||
*/
|
||||
void ff_subtitles_read_chunk(AVIOContext *pb, AVBPrint *buf);
|
||||
|
||||
/**
|
||||
* Get the number of characters to increment to jump to the next line, or to
|
||||
* the end of the string.
|
||||
*/
|
||||
static av_always_inline int ff_subtitles_next_line(const char *ptr)
|
||||
{
|
||||
int n = strcspn(ptr, "\n");
|
||||
ptr += n;
|
||||
if (*ptr == '\n')
|
||||
n++;
|
||||
return n;
|
||||
}
|
||||
|
||||
#endif /* AVFORMAT_SUBTITLES_H */
|
||||
|
@@ -26,15 +26,15 @@
|
||||
|
||||
typedef struct ThpDemuxContext {
|
||||
int version;
|
||||
int first_frame;
|
||||
int first_framesz;
|
||||
int last_frame;
|
||||
unsigned first_frame;
|
||||
unsigned first_framesz;
|
||||
unsigned last_frame;
|
||||
int compoff;
|
||||
int framecnt;
|
||||
unsigned framecnt;
|
||||
AVRational fps;
|
||||
int frame;
|
||||
int next_frame;
|
||||
int next_framesz;
|
||||
unsigned frame;
|
||||
int64_t next_frame;
|
||||
unsigned next_framesz;
|
||||
int video_stream_index;
|
||||
int audio_stream_index;
|
||||
int compcount;
|
||||
@@ -158,7 +158,7 @@ static int thp_read_packet(AVFormatContext *s,
|
||||
avio_seek(pb, thp->next_frame, SEEK_SET);
|
||||
|
||||
/* Locate the next frame and read out its size. */
|
||||
thp->next_frame += thp->next_framesz;
|
||||
thp->next_frame += FFMAX(thp->next_framesz, 1);
|
||||
thp->next_framesz = avio_rb32(pb);
|
||||
|
||||
avio_rb32(pb); /* Previous total size. */
|
||||
|
@@ -1061,12 +1061,14 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
|
||||
if (pkt->dts != AV_NOPTS_VALUE) {
|
||||
// got DTS from the stream, update reference timestamp
|
||||
st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
|
||||
pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
|
||||
} else if (st->reference_dts != AV_NOPTS_VALUE) {
|
||||
// compute DTS based on reference timestamp
|
||||
pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
|
||||
pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
|
||||
}
|
||||
|
||||
if (st->reference_dts != AV_NOPTS_VALUE && pkt->pts == AV_NOPTS_VALUE)
|
||||
pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
|
||||
|
||||
if (pc->dts_sync_point > 0)
|
||||
st->reference_dts = pkt->dts; // new reference
|
||||
}
|
||||
@@ -2791,9 +2793,10 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
|
||||
goto find_stream_info_err;
|
||||
}
|
||||
|
||||
read_size += pkt->size;
|
||||
|
||||
st = ic->streams[pkt->stream_index];
|
||||
if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC))
|
||||
read_size += pkt->size;
|
||||
|
||||
if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
|
||||
/* check for non-increasing dts */
|
||||
if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
|
||||
|
@@ -396,11 +396,15 @@ break_loop:
|
||||
|
||||
avio_seek(pb, data_ofs, SEEK_SET);
|
||||
|
||||
if (!sample_count && st->codec->channels &&
|
||||
av_get_bits_per_sample(st->codec->codec_id) && wav->data_end <= avio_size(pb))
|
||||
sample_count = (data_size << 3) /
|
||||
(st->codec->channels *
|
||||
(uint64_t)av_get_bits_per_sample(st->codec->codec_id));
|
||||
if (!sample_count || av_get_exact_bits_per_sample(st->codec->codec_id) > 0)
|
||||
if ( st->codec->channels
|
||||
&& data_size
|
||||
&& av_get_bits_per_sample(st->codec->codec_id)
|
||||
&& wav->data_end <= avio_size(pb))
|
||||
sample_count = (data_size << 3)
|
||||
/
|
||||
(st->codec->channels * (uint64_t)av_get_bits_per_sample(st->codec->codec_id));
|
||||
|
||||
if (sample_count)
|
||||
st->duration = sample_count;
|
||||
|
||||
@@ -458,8 +462,8 @@ static int wav_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
if (wav->smv_data_ofs > 0) {
|
||||
int64_t audio_dts, video_dts;
|
||||
smv_retry:
|
||||
audio_dts = s->streams[0]->cur_dts;
|
||||
video_dts = s->streams[1]->cur_dts;
|
||||
audio_dts = (int32_t)s->streams[0]->cur_dts;
|
||||
video_dts = (int32_t)s->streams[1]->cur_dts;
|
||||
|
||||
if (audio_dts != AV_NOPTS_VALUE && video_dts != AV_NOPTS_VALUE) {
|
||||
/*We always return a video frame first to get the pixel format first*/
|
||||
@@ -671,7 +675,7 @@ static int w64_read_header(AVFormatContext *s)
|
||||
uint32_t count, chunk_size, i;
|
||||
|
||||
start = avio_tell(pb);
|
||||
end = start + size;
|
||||
end = start + FFALIGN(size, INT64_C(8)) - 24;
|
||||
count = avio_rl32(pb);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
@@ -697,7 +701,7 @@ static int w64_read_header(AVFormatContext *s)
|
||||
avio_skip(pb, end - avio_tell(pb));
|
||||
} else {
|
||||
av_log(s, AV_LOG_DEBUG, "unknown guid: "FF_PRI_GUID"\n", FF_ARG_GUID(guid));
|
||||
avio_skip(pb, size - 24);
|
||||
avio_skip(pb, FFALIGN(size, INT64_C(8)) - 24);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -89,7 +89,8 @@ OBJS = adler32.o \
|
||||
intfloat_readwrite.o \
|
||||
intmath.o \
|
||||
lfg.o \
|
||||
lls.o \
|
||||
lls1.o \
|
||||
lls2.o \
|
||||
log.o \
|
||||
log2_tab.o \
|
||||
mathematics.o \
|
||||
@@ -142,7 +143,8 @@ TESTPROGS = adler32 \
|
||||
fifo \
|
||||
hmac \
|
||||
lfg \
|
||||
lls \
|
||||
lls1 \
|
||||
lls2 \
|
||||
md5 \
|
||||
murmur3 \
|
||||
opt \
|
||||
|
@@ -30,14 +30,23 @@
|
||||
|
||||
#include "attributes.h"
|
||||
#include "version.h"
|
||||
#include "lls.h"
|
||||
#include "lls1.h"
|
||||
|
||||
static void update_lls(LLSModel *m, double *var)
|
||||
#if FF_API_LLS1
|
||||
|
||||
av_cold void avpriv_init_lls(LLSModel *m, int indep_count)
|
||||
{
|
||||
memset(m, 0, sizeof(LLSModel));
|
||||
m->indep_count = indep_count;
|
||||
}
|
||||
|
||||
void avpriv_update_lls(LLSModel *m, double *var, double decay)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i <= m->indep_count; i++) {
|
||||
for (j = i; j <= m->indep_count; j++) {
|
||||
m->covariance[i][j] *= decay;
|
||||
m->covariance[i][j] += var[i] * var[j];
|
||||
}
|
||||
}
|
||||
@@ -46,8 +55,8 @@ static void update_lls(LLSModel *m, double *var)
|
||||
void avpriv_solve_lls(LLSModel *m, double threshold, unsigned short min_order)
|
||||
{
|
||||
int i, j, k;
|
||||
double (*factor)[MAX_VARS_ALIGN] = (void *) &m->covariance[1][0];
|
||||
double (*covar) [MAX_VARS_ALIGN] = (void *) &m->covariance[1][1];
|
||||
double (*factor)[MAX_VARS + 1] = (void *) &m->covariance[1][0];
|
||||
double (*covar) [MAX_VARS + 1] = (void *) &m->covariance[1][1];
|
||||
double *covar_y = m->covariance[0];
|
||||
int count = m->indep_count;
|
||||
|
||||
@@ -100,7 +109,7 @@ void avpriv_solve_lls(LLSModel *m, double threshold, unsigned short min_order)
|
||||
}
|
||||
}
|
||||
|
||||
static double evaluate_lls(LLSModel *m, double *param, int order)
|
||||
double avpriv_evaluate_lls(LLSModel *m, double *param, int order)
|
||||
{
|
||||
int i;
|
||||
double out = 0;
|
||||
@@ -111,16 +120,6 @@ static double evaluate_lls(LLSModel *m, double *param, int order)
|
||||
return out;
|
||||
}
|
||||
|
||||
av_cold void avpriv_init_lls(LLSModel *m, int indep_count)
|
||||
{
|
||||
memset(m, 0, sizeof(LLSModel));
|
||||
m->indep_count = indep_count;
|
||||
m->update_lls = update_lls;
|
||||
m->evaluate_lls = evaluate_lls;
|
||||
if (ARCH_X86)
|
||||
ff_init_lls_x86(m);
|
||||
}
|
||||
|
||||
#if FF_API_LLS_PRIVATE
|
||||
av_cold void av_init_lls(LLSModel *m, int indep_count)
|
||||
{
|
||||
@@ -128,7 +127,7 @@ av_cold void av_init_lls(LLSModel *m, int indep_count)
|
||||
}
|
||||
void av_update_lls(LLSModel *m, double *param, double decay)
|
||||
{
|
||||
m->update_lls(m, param);
|
||||
avpriv_update_lls(m, param, decay);
|
||||
}
|
||||
void av_solve_lls(LLSModel *m, double threshold, int min_order)
|
||||
{
|
||||
@@ -136,10 +135,12 @@ void av_solve_lls(LLSModel *m, double threshold, int min_order)
|
||||
}
|
||||
double av_evaluate_lls(LLSModel *m, double *param, int order)
|
||||
{
|
||||
return m->evaluate_lls(m, param, order);
|
||||
return avpriv_evaluate_lls(m, param, order);
|
||||
}
|
||||
#endif /* FF_API_LLS_PRIVATE */
|
||||
|
||||
#endif /* FF_API_LLS1 */
|
||||
|
||||
#ifdef TEST
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -156,17 +157,17 @@ int main(void)
|
||||
avpriv_init_lls(&m, 3);
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
LOCAL_ALIGNED(32, double, var, [4]);
|
||||
double var[4];
|
||||
double eval;
|
||||
|
||||
var[0] = (av_lfg_get(&lfg) / (double) UINT_MAX - 0.5) * 2;
|
||||
var[1] = var[0] + av_lfg_get(&lfg) / (double) UINT_MAX - 0.5;
|
||||
var[2] = var[1] + av_lfg_get(&lfg) / (double) UINT_MAX - 0.5;
|
||||
var[3] = var[2] + av_lfg_get(&lfg) / (double) UINT_MAX - 0.5;
|
||||
m.update_lls(&m, var);
|
||||
avpriv_update_lls(&m, var, 0.99);
|
||||
avpriv_solve_lls(&m, 0.001, 0);
|
||||
for (order = 0; order < 3; order++) {
|
||||
eval = m.evaluate_lls(&m, var + 1, order);
|
||||
eval = avpriv_evaluate_lls(&m, var + 1, order);
|
||||
printf("real:%9f order:%d pred:%9f var:%f coeffs:%f %9f %9f\n",
|
||||
var[0], order, eval, sqrt(m.variance[order] / (i + 1)),
|
||||
m.coeff[order][0], m.coeff[order][1],
|
54
libavutil/lls1.h
Normal file
54
libavutil/lls1.h
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* linear least squares model
|
||||
*
|
||||
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVUTIL_LLS_H
|
||||
#define AVUTIL_LLS_H
|
||||
|
||||
#include "version.h"
|
||||
|
||||
#define MAX_VARS 32
|
||||
|
||||
//FIXME avoid direct access to LLSModel from outside
|
||||
|
||||
/**
|
||||
* Linear least squares model.
|
||||
*/
|
||||
typedef struct LLSModel {
|
||||
double covariance[MAX_VARS + 1][MAX_VARS + 1];
|
||||
double coeff[MAX_VARS][MAX_VARS];
|
||||
double variance[MAX_VARS];
|
||||
int indep_count;
|
||||
} LLSModel;
|
||||
|
||||
void avpriv_init_lls(LLSModel *m, int indep_count);
|
||||
void avpriv_update_lls(LLSModel *m, double *param, double decay);
|
||||
void avpriv_solve_lls(LLSModel *m, double threshold, unsigned short min_order);
|
||||
double avpriv_evaluate_lls(LLSModel *m, double *param, int order);
|
||||
|
||||
#if FF_API_LLS_PRIVATE
|
||||
void av_init_lls(LLSModel *m, int indep_count);
|
||||
void av_update_lls(LLSModel *m, double *param, double decay);
|
||||
void av_solve_lls(LLSModel *m, double threshold, int min_order);
|
||||
double av_evaluate_lls(LLSModel *m, double *param, int order);
|
||||
#endif /* FF_API_LLS_PRIVATE */
|
||||
|
||||
#endif /* AVUTIL_LLS_H */
|
160
libavutil/lls2.c
Normal file
160
libavutil/lls2.c
Normal file
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
* linear least squares model
|
||||
*
|
||||
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* linear least squares model
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "attributes.h"
|
||||
#include "version.h"
|
||||
#include "lls2.h"
|
||||
|
||||
static void update_lls(LLSModel2 *m, double *var)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i <= m->indep_count; i++) {
|
||||
for (j = i; j <= m->indep_count; j++) {
|
||||
m->covariance[i][j] += var[i] * var[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void avpriv_solve_lls2(LLSModel2 *m, double threshold, unsigned short min_order)
|
||||
{
|
||||
int i, j, k;
|
||||
double (*factor)[MAX_VARS_ALIGN] = (void *) &m->covariance[1][0];
|
||||
double (*covar) [MAX_VARS_ALIGN] = (void *) &m->covariance[1][1];
|
||||
double *covar_y = m->covariance[0];
|
||||
int count = m->indep_count;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
for (j = i; j < count; j++) {
|
||||
double sum = covar[i][j];
|
||||
|
||||
for (k = i - 1; k >= 0; k--)
|
||||
sum -= factor[i][k] * factor[j][k];
|
||||
|
||||
if (i == j) {
|
||||
if (sum < threshold)
|
||||
sum = 1.0;
|
||||
factor[i][i] = sqrt(sum);
|
||||
} else {
|
||||
factor[j][i] = sum / factor[i][i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
double sum = covar_y[i + 1];
|
||||
|
||||
for (k = i - 1; k >= 0; k--)
|
||||
sum -= factor[i][k] * m->coeff[0][k];
|
||||
|
||||
m->coeff[0][i] = sum / factor[i][i];
|
||||
}
|
||||
|
||||
for (j = count - 1; j >= min_order; j--) {
|
||||
for (i = j; i >= 0; i--) {
|
||||
double sum = m->coeff[0][i];
|
||||
|
||||
for (k = i + 1; k <= j; k++)
|
||||
sum -= factor[k][i] * m->coeff[j][k];
|
||||
|
||||
m->coeff[j][i] = sum / factor[i][i];
|
||||
}
|
||||
|
||||
m->variance[j] = covar_y[0];
|
||||
|
||||
for (i = 0; i <= j; i++) {
|
||||
double sum = m->coeff[j][i] * covar[i][i] - 2 * covar_y[i + 1];
|
||||
|
||||
for (k = 0; k < i; k++)
|
||||
sum += 2 * m->coeff[j][k] * covar[k][i];
|
||||
|
||||
m->variance[j] += m->coeff[j][i] * sum;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static double evaluate_lls(LLSModel2 *m, double *param, int order)
|
||||
{
|
||||
int i;
|
||||
double out = 0;
|
||||
|
||||
for (i = 0; i <= order; i++)
|
||||
out += param[i] * m->coeff[order][i];
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
av_cold void avpriv_init_lls2(LLSModel2 *m, int indep_count)
|
||||
{
|
||||
memset(m, 0, sizeof(LLSModel2));
|
||||
m->indep_count = indep_count;
|
||||
m->update_lls = update_lls;
|
||||
m->evaluate_lls = evaluate_lls;
|
||||
if (ARCH_X86)
|
||||
ff_init_lls_x86(m);
|
||||
}
|
||||
|
||||
#ifdef TEST
|
||||
|
||||
#include <stdio.h>
|
||||
#include <limits.h>
|
||||
#include "lfg.h"
|
||||
|
||||
int main(void)
|
||||
{
|
||||
LLSModel2 m;
|
||||
int i, order;
|
||||
AVLFG lfg;
|
||||
|
||||
av_lfg_init(&lfg, 1);
|
||||
avpriv_init_lls2(&m, 3);
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
LOCAL_ALIGNED(32, double, var, [4]);
|
||||
double eval;
|
||||
|
||||
var[0] = (av_lfg_get(&lfg) / (double) UINT_MAX - 0.5) * 2;
|
||||
var[1] = var[0] + av_lfg_get(&lfg) / (double) UINT_MAX - 0.5;
|
||||
var[2] = var[1] + av_lfg_get(&lfg) / (double) UINT_MAX - 0.5;
|
||||
var[3] = var[2] + av_lfg_get(&lfg) / (double) UINT_MAX - 0.5;
|
||||
m.update_lls(&m, var);
|
||||
avpriv_solve_lls2(&m, 0.001, 0);
|
||||
for (order = 0; order < 3; order++) {
|
||||
eval = m.evaluate_lls(&m, var + 1, order);
|
||||
printf("real:%9f order:%d pred:%9f var:%f coeffs:%f %9f %9f\n",
|
||||
var[0], order, eval, sqrt(m.variance[order] / (i + 1)),
|
||||
m.coeff[order][0], m.coeff[order][1],
|
||||
m.coeff[order][2]);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
@@ -30,12 +30,12 @@
|
||||
#define MAX_VARS 32
|
||||
#define MAX_VARS_ALIGN FFALIGN(MAX_VARS+1,4)
|
||||
|
||||
//FIXME avoid direct access to LLSModel from outside
|
||||
//FIXME avoid direct access to LLSModel2 from outside
|
||||
|
||||
/**
|
||||
* Linear least squares model.
|
||||
*/
|
||||
typedef struct LLSModel {
|
||||
typedef struct LLSModel2 {
|
||||
DECLARE_ALIGNED(32, double, covariance[MAX_VARS_ALIGN][MAX_VARS_ALIGN]);
|
||||
DECLARE_ALIGNED(32, double, coeff[MAX_VARS][MAX_VARS]);
|
||||
double variance[MAX_VARS];
|
||||
@@ -47,25 +47,18 @@ typedef struct LLSModel {
|
||||
* 32-byte aligned, and any padding elements must be initialized
|
||||
* (i.e not denormal/nan).
|
||||
*/
|
||||
void (*update_lls)(struct LLSModel *m, double *var);
|
||||
void (*update_lls)(struct LLSModel2 *m, double *var);
|
||||
/**
|
||||
* Inner product of var[] and the LPC coefs.
|
||||
* @param m this context
|
||||
* @param var training samples, excluding the value to be predicted. unaligned.
|
||||
* @param order lpc order
|
||||
*/
|
||||
double (*evaluate_lls)(struct LLSModel *m, double *var, int order);
|
||||
} LLSModel;
|
||||
double (*evaluate_lls)(struct LLSModel2 *m, double *var, int order);
|
||||
} LLSModel2;
|
||||
|
||||
void avpriv_init_lls(LLSModel *m, int indep_count);
|
||||
void ff_init_lls_x86(LLSModel *m);
|
||||
void avpriv_solve_lls(LLSModel *m, double threshold, unsigned short min_order);
|
||||
|
||||
#if FF_API_LLS_PRIVATE
|
||||
void av_init_lls(LLSModel *m, int indep_count);
|
||||
void av_update_lls(LLSModel *m, double *param, double decay);
|
||||
void av_solve_lls(LLSModel *m, double threshold, int min_order);
|
||||
double av_evaluate_lls(LLSModel *m, double *param, int order);
|
||||
#endif /* FF_API_LLS_PRIVATE */
|
||||
void avpriv_init_lls2(LLSModel2 *m, int indep_count);
|
||||
void ff_init_lls_x86(LLSModel2 *m);
|
||||
void avpriv_solve_lls2(LLSModel2 *m, double threshold, unsigned short min_order);
|
||||
|
||||
#endif /* AVUTIL_LLS_H */
|
@@ -103,6 +103,9 @@ static int use_color = -1;
|
||||
|
||||
static void colored_fputs(int level, const char *str)
|
||||
{
|
||||
if (!*str)
|
||||
return;
|
||||
|
||||
if (use_color < 0) {
|
||||
#if HAVE_SETCONSOLETEXTATTRIBUTE
|
||||
CONSOLE_SCREEN_BUFFER_INFO con_info;
|
||||
|
@@ -251,7 +251,7 @@ int av_set_string3(void *obj, const char *name, const char *val, int alloc, cons
|
||||
|
||||
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
void *dst, *target_obj;
|
||||
const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
|
||||
if (!o || !target_obj)
|
||||
|
@@ -132,6 +132,9 @@
|
||||
#ifndef FF_API_LLS_PRIVATE
|
||||
#define FF_API_LLS_PRIVATE (LIBAVUTIL_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_LLS1
|
||||
#define FF_API_LLS1 (LIBAVUTIL_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
#ifndef FF_API_AVFRAME_LAVC
|
||||
#define FF_API_AVFRAME_LAVC (LIBAVUTIL_VERSION_MAJOR < 53)
|
||||
#endif
|
||||
|
@@ -29,7 +29,7 @@ SECTION .text
|
||||
%define COVAR_STRIDE MAX_VARS_ALIGN*8
|
||||
%define COVAR(x,y) [covarq + (x)*8 + (y)*COVAR_STRIDE]
|
||||
|
||||
struc LLSModel
|
||||
struc LLSModel2
|
||||
.covariance: resq MAX_VARS_ALIGN*MAX_VARS_ALIGN
|
||||
.coeff: resq MAX_VARS*MAX_VARS
|
||||
.variance: resq MAX_VARS
|
||||
@@ -49,7 +49,7 @@ INIT_XMM sse2
|
||||
%define movdqa movaps
|
||||
cglobal update_lls, 2,5,8, ctx, var, i, j, covar2
|
||||
%define covarq ctxq
|
||||
mov id, [ctxq + LLSModel.indep_count]
|
||||
mov id, [ctxq + LLSModel2.indep_count]
|
||||
lea varq, [varq + iq*8]
|
||||
neg iq
|
||||
mov covar2q, covarq
|
||||
@@ -129,7 +129,7 @@ cglobal update_lls, 2,5,8, ctx, var, i, j, covar2
|
||||
INIT_YMM avx
|
||||
cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
|
||||
%define covarq ctxq
|
||||
mov countd, [ctxq + LLSModel.indep_count]
|
||||
mov countd, [ctxq + LLSModel2.indep_count]
|
||||
lea count2d, [countq-2]
|
||||
xor id, id
|
||||
.loopi:
|
||||
@@ -206,7 +206,7 @@ cglobal evaluate_lls, 3,4,2, ctx, var, order, i
|
||||
%define coefsq ctxq
|
||||
mov id, orderd
|
||||
imul orderd, MAX_VARS
|
||||
lea coefsq, [ctxq + LLSModel.coeff + orderq*8]
|
||||
lea coefsq, [ctxq + LLSModel2.coeff + orderq*8]
|
||||
movsd m0, [varq]
|
||||
movhpd m0, [varq + 8]
|
||||
mulpd m0, [coefsq]
|
||||
|
@@ -20,14 +20,14 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/lls.h"
|
||||
#include "libavutil/lls2.h"
|
||||
#include "libavutil/x86/cpu.h"
|
||||
|
||||
void ff_update_lls_sse2(LLSModel *m, double *var);
|
||||
void ff_update_lls_avx(LLSModel *m, double *var);
|
||||
double ff_evaluate_lls_sse2(LLSModel *m, double *var, int order);
|
||||
void ff_update_lls_sse2(LLSModel2 *m, double *var);
|
||||
void ff_update_lls_avx(LLSModel2 *m, double *var);
|
||||
double ff_evaluate_lls_sse2(LLSModel2 *m, double *var, int order);
|
||||
|
||||
av_cold void ff_init_lls_x86(LLSModel *m)
|
||||
av_cold void ff_init_lls_x86(LLSModel2 *m)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
if (EXTERNAL_SSE2(cpu_flags)) {
|
||||
|
@@ -82,9 +82,6 @@ static int even(int64_t layout){
|
||||
}
|
||||
|
||||
static int clean_layout(SwrContext *s, int64_t layout){
|
||||
if((layout & AV_CH_LAYOUT_STEREO_DOWNMIX) == AV_CH_LAYOUT_STEREO_DOWNMIX)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
if(layout && layout != AV_CH_FRONT_CENTER && !(layout&(layout-1))) {
|
||||
char buf[128];
|
||||
av_get_channel_layout_string(buf, sizeof(buf), -1, layout);
|
||||
@@ -122,13 +119,19 @@ av_cold static int auto_matrix(SwrContext *s)
|
||||
const int matrix_encoding = s->matrix_encoding;
|
||||
|
||||
in_ch_layout = clean_layout(s, s->in_ch_layout);
|
||||
out_ch_layout = clean_layout(s, s->out_ch_layout);
|
||||
|
||||
if( out_ch_layout == AV_CH_LAYOUT_STEREO_DOWNMIX
|
||||
&& (in_ch_layout & AV_CH_LAYOUT_STEREO_DOWNMIX) == 0
|
||||
)
|
||||
out_ch_layout = AV_CH_LAYOUT_STEREO;
|
||||
|
||||
if(!sane_layout(in_ch_layout)){
|
||||
av_get_channel_layout_string(buf, sizeof(buf), -1, s->in_ch_layout);
|
||||
av_log(s, AV_LOG_ERROR, "Input channel layout '%s' is not supported\n", buf);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
out_ch_layout = clean_layout(s, s->out_ch_layout);
|
||||
if(!sane_layout(out_ch_layout)){
|
||||
av_get_channel_layout_string(buf, sizeof(buf), -1, s->out_ch_layout);
|
||||
av_log(s, AV_LOG_ERROR, "Output channel layout '%s' is not supported\n", buf);
|
||||
@@ -421,8 +424,8 @@ int swri_rematrix(SwrContext *s, AudioData *out, AudioData *in, int len, int mus
|
||||
off = len1 * out->bps;
|
||||
}
|
||||
|
||||
av_assert0(out->ch_count == av_get_channel_layout_nb_channels(s->out_ch_layout));
|
||||
av_assert0(in ->ch_count == av_get_channel_layout_nb_channels(s-> in_ch_layout));
|
||||
av_assert0(!s->out_ch_layout || out->ch_count == av_get_channel_layout_nb_channels(s->out_ch_layout));
|
||||
av_assert0(!s-> in_ch_layout || in ->ch_count == av_get_channel_layout_nb_channels(s-> in_ch_layout));
|
||||
|
||||
for(out_i=0; out_i<out->ch_count; out_i++){
|
||||
switch(s->matrix_ch[out_i][0]){
|
||||
|
@@ -1268,9 +1268,10 @@ av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter,
|
||||
dst_stride <<= 1;
|
||||
|
||||
if (INLINE_MMXEXT(cpu_flags) && c->srcBpc == 8 && c->dstBpc <= 14) {
|
||||
c->canMMXEXTBeUsed = (dstW >= srcW && (dstW & 31) == 0 &&
|
||||
(srcW & 15) == 0) ? 1 : 0;
|
||||
if (!c->canMMXEXTBeUsed && dstW >= srcW && (srcW & 15) == 0
|
||||
c->canMMXEXTBeUsed = dstW >= srcW && (dstW & 31) == 0 &&
|
||||
c->chrDstW >= c->chrSrcW &&
|
||||
(srcW & 15) == 0;
|
||||
if (!c->canMMXEXTBeUsed && dstW >= srcW && c->chrDstW >= c->chrSrcW && (srcW & 15) == 0
|
||||
|
||||
&& (flags & SWS_FAST_BILINEAR)) {
|
||||
if (flags & SWS_PRINT_INFO)
|
||||
|
@@ -1,3 +1,3 @@
|
||||
e35f5ea283bbcb249818e0078ec72664 *./tests/data/lavf/lavf.gif
|
||||
2011766 ./tests/data/lavf/lavf.gif
|
||||
8aef8081e8afa445f63f320f4a1c5edb *./tests/data/lavf/lavf.gif
|
||||
2030198 ./tests/data/lavf/lavf.gif
|
||||
./tests/data/lavf/lavf.gif CRC=0x0dc5477c
|
||||
|
Reference in New Issue
Block a user