Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
80f91a70be |
@@ -1,7 +1,7 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 1.2:
|
||||
version <next>:
|
||||
|
||||
- VDPAU hardware acceleration through normal hwaccel
|
||||
- SRTP support
|
||||
|
@@ -65,7 +65,7 @@ struct SwsContext *sws_opts;
|
||||
AVDictionary *swr_opts;
|
||||
AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||
|
||||
const int this_year = 2014;
|
||||
const int this_year = 2013;
|
||||
|
||||
static FILE *report_file;
|
||||
|
||||
|
@@ -190,13 +190,13 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
void show_help_children(const AVClass *class, int flags);
|
||||
|
||||
/**
|
||||
* Per-fftool specific help handler. Implemented in each
|
||||
* fftool, called by show_help().
|
||||
* Per-avtool specific help handler. Implemented in each
|
||||
* avtool, called by show_help().
|
||||
*/
|
||||
void show_help_default(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Generic -h handler common to all fftools.
|
||||
* Generic -h handler common to all avtools.
|
||||
*/
|
||||
int show_help(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
|
55
configure
vendored
55
configure
vendored
@@ -140,10 +140,10 @@ Component options:
|
||||
--disable-fft disable FFT code
|
||||
|
||||
Hardware accelerators:
|
||||
--disable-dxva2 disable DXVA2 code [autodetect]
|
||||
--disable-vaapi disable VAAPI code [autodetect]
|
||||
--enable-dxva2 enable DXVA2 code
|
||||
--enable-vaapi enable VAAPI code
|
||||
--enable-vda enable VDA code
|
||||
--disable-vdpau disable VDPAU code [autodetect]
|
||||
--enable-vdpau enable VDPAU code
|
||||
|
||||
Individual component options:
|
||||
--disable-everything disable all components listed below
|
||||
@@ -184,11 +184,11 @@ Individual component options:
|
||||
|
||||
External library support:
|
||||
--enable-avisynth enable reading of AVISynth script files [no]
|
||||
--disable-bzlib disable bzlib [autodetect]
|
||||
--enable-bzlib enable bzlib [autodetect]
|
||||
--enable-fontconfig enable fontconfig
|
||||
--enable-frei0r enable frei0r video filtering
|
||||
--enable-gnutls enable gnutls [no]
|
||||
--disable-iconv disable iconv [autodetect]
|
||||
--enable-iconv enable iconv [no]
|
||||
--enable-libaacplus enable AAC+ encoding via libaacplus [no]
|
||||
--enable-libass enable libass subtitles rendering [no]
|
||||
--enable-libbluray enable BluRay reading using libbluray [no]
|
||||
@@ -235,7 +235,7 @@ External library support:
|
||||
--enable-openal enable OpenAL 1.1 capture support [no]
|
||||
--enable-openssl enable openssl [no]
|
||||
--enable-x11grab enable X11 grabbing [no]
|
||||
--disable-zlib disable zlib [autodetect]
|
||||
--enable-zlib enable zlib [autodetect]
|
||||
|
||||
Advanced options (experts only):
|
||||
--cross-prefix=PREFIX use PREFIX for compilation tools [$cross_prefix]
|
||||
@@ -1075,26 +1075,6 @@ require_pkg_config(){
|
||||
add_extralibs $(get_safe ${pkg}_libs)
|
||||
}
|
||||
|
||||
require_libfreetype(){
|
||||
log require_libfreetype "$@"
|
||||
pkg="freetype2"
|
||||
check_cmd $pkg_config --exists --print-errors $pkg \
|
||||
|| die "ERROR: $pkg not found"
|
||||
pkg_cflags=$($pkg_config --cflags $pkg)
|
||||
pkg_libs=$($pkg_config --libs $pkg)
|
||||
{
|
||||
echo "#include <ft2build.h>"
|
||||
echo "#include FT_FREETYPE_H"
|
||||
echo "long check_func(void) { return (long) FT_Init_FreeType; }"
|
||||
echo "int main(void) { return 0; }"
|
||||
} | check_ld "cc" $pkg_cflags $pkg_libs \
|
||||
&& set_safe ${pkg}_cflags $pkg_cflags \
|
||||
&& set_safe ${pkg}_libs $pkg_libs \
|
||||
|| die "ERROR: $pkg not found"
|
||||
add_cflags $(get_safe ${pkg}_cflags)
|
||||
add_extralibs $(get_safe ${pkg}_libs)
|
||||
}
|
||||
|
||||
hostcc_o(){
|
||||
eval printf '%s\\n' $HOSTCC_O
|
||||
}
|
||||
@@ -2189,9 +2169,6 @@ enable safe_bitstream_reader
|
||||
enable static
|
||||
enable swscale_alpha
|
||||
|
||||
# Enable hwaccels by default.
|
||||
enable dxva2 vaapi vdpau
|
||||
|
||||
# build settings
|
||||
SHFLAGS='-shared -Wl,-soname,$$(@F)'
|
||||
FFSERVERLDFLAGS=-Wl,-E
|
||||
@@ -2656,9 +2633,7 @@ probe_cc(){
|
||||
unset _depflags _DEPCMD _DEPFLAGS
|
||||
_flags_filter=echo
|
||||
|
||||
if $_cc --version 2>&1 | grep -q '^GNU assembler'; then
|
||||
true # no-op to avoid reading stdin in following checks
|
||||
elif $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||
if $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||
_type=llvm_gcc
|
||||
gcc_extra_ver=$(expr "$($_cc --version | head -n1)" : '.*\((.*)\)')
|
||||
_ident="llvm-gcc $($_cc -dumpversion) $gcc_extra_ver"
|
||||
@@ -3898,6 +3873,7 @@ fi
|
||||
|
||||
check_lib math.h sin -lm && LIBM="-lm"
|
||||
disabled crystalhd || check_lib libcrystalhd/libcrystalhd_if.h DtsCrystalHDVersion -lcrystalhd || disable crystalhd
|
||||
enabled vaapi && require vaapi va/va.h vaInitialize -lva
|
||||
|
||||
atan2f_args=2
|
||||
ldexpf_args=2
|
||||
@@ -3915,7 +3891,7 @@ enabled gnutls && require_pkg_config gnutls gnutls/gnutls.h gnutls_global_in
|
||||
enabled libiec61883 && require libiec61883 libiec61883/iec61883.h iec61883_cmp_connect -lraw1394 -lavc1394 -lrom1394 -liec61883
|
||||
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
||||
enabled libass && require_pkg_config libass ass/ass.h ass_library_init
|
||||
enabled libbluray && require_pkg_config libbluray libbluray/bluray.h bd_open
|
||||
enabled libbluray && require libbluray libbluray/bluray.h bd_open -lbluray
|
||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
|
||||
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
|
||||
die "ERROR: libcelt must be installed and version must be >= 0.11.0."; }
|
||||
@@ -3924,7 +3900,7 @@ enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaa
|
||||
enabled libfdk_aac && require libfdk_aac fdk-aac/aacenc_lib.h aacEncOpen -lfdk-aac
|
||||
flite_libs="-lflite_cmu_time_awb -lflite_cmu_us_awb -lflite_cmu_us_kal -lflite_cmu_us_kal16 -lflite_cmu_us_rms -lflite_cmu_us_slt -lflite_usenglish -lflite_cmulex -lflite"
|
||||
enabled libflite && require2 libflite "flite/flite.h" flite_init $flite_libs
|
||||
enabled libfreetype && require_libfreetype
|
||||
enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType
|
||||
enabled libgsm && { for gsm_hdr in "gsm.h" "gsm/gsm.h"; do
|
||||
check_lib "${gsm_hdr}" gsm_create -lgsm && break;
|
||||
done || die "ERROR: libgsm not found"; }
|
||||
@@ -4065,16 +4041,19 @@ require X11 X11/Xlib.h XOpenDisplay -lX11 &&
|
||||
require Xext X11/extensions/XShm.h XShmCreateImage -lXext &&
|
||||
require Xfixes X11/extensions/Xfixes.h XFixesGetCursorImage -lXfixes
|
||||
|
||||
enabled vaapi &&
|
||||
check_lib va/va.h vaInitialize -lva ||
|
||||
disable vaapi
|
||||
if ! disabled vaapi; then
|
||||
check_lib va/va.h vaInitialize -lva && {
|
||||
check_cpp_condition va/va_version.h "VA_CHECK_VERSION(0,32,0)" ||
|
||||
warn "Please upgrade to VA-API >= 0.32 if you would like full VA-API support.";
|
||||
} || disable vaapi
|
||||
fi
|
||||
|
||||
enabled vdpau &&
|
||||
check_cpp_condition vdpau/vdpau.h "defined VDP_DECODER_PROFILE_MPEG4_PART2_ASP" ||
|
||||
disable vdpau
|
||||
|
||||
# Funny iconv installations are not unusual, so check it after all flags have been set
|
||||
disabled iconv || check_func_headers iconv.h iconv || check_lib2 iconv.h iconv -liconv || disable iconv
|
||||
enabled iconv && { check_func_headers iconv.h iconv || check_lib2 iconv.h iconv -liconv || die "ERROR: iconv not found"; }
|
||||
|
||||
enabled debug && add_cflags -g"$debuglevel" && add_asflags -g"$debuglevel"
|
||||
enabled coverage && add_cflags "-fprofile-arcs -ftest-coverage" && add_ldflags "-fprofile-arcs -ftest-coverage"
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 1.2.7
|
||||
PROJECT_NUMBER =
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -1,7 +1,7 @@
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 1.2 "Magic" March, 2013
|
||||
* 0.10 "Freedom" January, 2012
|
||||
|
||||
|
||||
General notes
|
||||
@@ -14,3 +14,9 @@ accepted. If you are experiencing issues with any formally released version of
|
||||
FFmpeg, please try git master to check if the issue still exists. If it does,
|
||||
make your report against the development code following the usual bug reporting
|
||||
guidelines.
|
||||
|
||||
Of big interest to our Windows users, FFmpeg now supports building with the MSVC
|
||||
compiler. Since MSVC does not support C99 features used extensively by FFmpeg,
|
||||
this has been accomplished using a converter that turns C99 code to C89. See the
|
||||
platform-specific documentation for more detailed documentation on building
|
||||
FFmpeg with MSVC.
|
||||
|
@@ -60,78 +60,6 @@ This decoder generates wave patterns according to predefined sequences. Its
|
||||
use is purely internal and the format of the data it accepts is not publicly
|
||||
documented.
|
||||
|
||||
@section libcelt
|
||||
|
||||
libcelt decoder wrapper
|
||||
|
||||
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
|
||||
Requires the presence of the libcelt headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libcelt}.
|
||||
|
||||
@section libgsm
|
||||
|
||||
libgsm decoder wrapper
|
||||
|
||||
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
|
||||
the presence of the libgsm headers and library during configuration. You need
|
||||
to explicitly configure the build with @code{--enable-libgsm}.
|
||||
|
||||
This decoder supports both the ordinary GSM and the Microsoft variant.
|
||||
|
||||
@section libilbc
|
||||
|
||||
libilbc decoder wrapper
|
||||
|
||||
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
|
||||
audio codec. Requires the presence of the libilbc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libilbc}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libilbc wrapper.
|
||||
|
||||
@table @option
|
||||
@item enhance
|
||||
|
||||
Enable the enhancement of the decoded audio when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
libopencore-amrnb decoder wrapper
|
||||
|
||||
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Narrowband audio codec. Using it requires the presence of the
|
||||
libopencore-amrnb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrnb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
|
||||
without this library.
|
||||
|
||||
@section libopencore-amrwb
|
||||
|
||||
libopencore-amrwb decoder wrapper.
|
||||
|
||||
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Wideband audio codec. Using it requires the presence of the
|
||||
libopencore-amrwb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrwb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
|
||||
without this library.
|
||||
|
||||
@section libopus
|
||||
|
||||
libopus decoder wrapper.
|
||||
|
||||
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
|
||||
Requires the presence of the libopus headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopus}.
|
||||
|
||||
@c man end AUDIO DECODERS
|
||||
|
||||
@chapter Subtitles Decoders
|
||||
|
@@ -25,95 +25,6 @@ enabled encoders.
|
||||
A description of some of the currently available audio encoders
|
||||
follows.
|
||||
|
||||
@anchor{aacenc}
|
||||
@section aac
|
||||
|
||||
Advanced Audio Coding (AAC) encoder.
|
||||
|
||||
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
|
||||
low complexity (AAC-LC) profile is supported. To use this encoder, you must set
|
||||
@option{strict} option to @samp{experimental} or lower.
|
||||
|
||||
As this encoder is experimental, unexpected behavior may exist from time to
|
||||
time. For a more stable AAC encoder, see @ref{libvo-aacenc}. However, be warned
|
||||
that it has a worse quality reported by some users.
|
||||
|
||||
@c Comment this out until somebody writes the respective documentation.
|
||||
@c See also @ref{libfaac}, @ref{libaacplus}, and @ref{libfdk-aac-enc}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
Set bit rate in bits/s. Setting this automatically activates constant bit rate
|
||||
(CBR) mode.
|
||||
|
||||
@item q
|
||||
Set quality for variable bit rate (VBR) mode. This option is valid only using
|
||||
the @command{ffmpeg} command-line tool. For library interface users, use
|
||||
@option{global_quality}.
|
||||
|
||||
@item stereo_mode
|
||||
Set stereo encoding mode. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Automatically selected by the encoder.
|
||||
|
||||
@item ms_off
|
||||
Disable middle/side encoding. This is the default.
|
||||
|
||||
@item ms_force
|
||||
Force middle/side encoding.
|
||||
@end table
|
||||
|
||||
@item aac_coder
|
||||
Set AAC encoder coding method. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item 0
|
||||
FAAC-inspired method.
|
||||
|
||||
This method is a simplified reimplementation of the method used in FAAC, which
|
||||
sets thresholds proportional to the band energies, and then decreases all the
|
||||
thresholds with quantizer steps to find the appropriate quantization with
|
||||
distortion below threshold band by band.
|
||||
|
||||
The quality of this method is comparable to the two loop searching method
|
||||
descibed below, but somewhat a little better and slower.
|
||||
|
||||
@item 1
|
||||
Average noise to mask ratio (ANMR) trellis-based solution.
|
||||
|
||||
This has a theoretic best quality out of all the coding methods, but at the
|
||||
cost of the slowest speed.
|
||||
|
||||
@item 2
|
||||
Two loop searching (TLS) method.
|
||||
|
||||
This method first sets quantizers depending on band thresholds and then tries
|
||||
to find an optimal combination by adding or subtracting a specific value from
|
||||
all quantizers and adjusting some individual quantizer a little.
|
||||
|
||||
This method produces similar quality with the FAAC method and is the default.
|
||||
|
||||
@item 3
|
||||
Constant quantizer method.
|
||||
|
||||
This method sets a constant quantizer for all bands. This is the fastest of all
|
||||
the methods, yet produces the worst quality.
|
||||
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Tips and Tricks
|
||||
|
||||
According to some reports
|
||||
(e.g. @url{http://d.hatena.ne.jp/kamedo2/20120729/1343545890}), setting the
|
||||
@option{cutoff} option to 15000 Hz greatly improves the quality of the output
|
||||
quality. As a result, we encourage you to do the same.
|
||||
|
||||
@section ac3 and ac3_fixed
|
||||
|
||||
AC-3 audio encoders.
|
||||
@@ -501,279 +412,6 @@ Selected by Encoder (default)
|
||||
|
||||
@end table
|
||||
|
||||
@section libmp3lame
|
||||
|
||||
LAME (Lame Ain't an MP3 Encoder) MP3 encoder wrapper
|
||||
|
||||
Requires the presence of the libmp3lame headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libmp3lame}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libmp3lame wrapper. The
|
||||
@command{lame}-equivalent of the options are listed in parentheses.
|
||||
|
||||
@table @option
|
||||
@item b (@emph{-b})
|
||||
Set bitrate expressed in bits/s for CBR. LAME @code{bitrate} is
|
||||
expressed in kilobits/s.
|
||||
|
||||
@item q (@emph{-V})
|
||||
Set constant quality setting for VBR. This option is valid only
|
||||
using the @command{ffmpeg} command-line tool. For library interface
|
||||
users, use @option{global_quality}.
|
||||
|
||||
@item compression_level (@emph{-q})
|
||||
Set algorithm quality. Valid arguments are integers in the 0-9 range,
|
||||
with 0 meaning highest quality but slowest, and 9 meaning fastest
|
||||
while producing the worst quality.
|
||||
|
||||
@item reservoir
|
||||
Enable use of bit reservoir when set to 1. Default value is 1. LAME
|
||||
has this enabled by default, but can be overriden by use
|
||||
@option{--nores} option.
|
||||
|
||||
@end table
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
OpenCORE Adaptive Multi-Rate Narrowband encoder.
|
||||
|
||||
Requires the presence of the libopencore-amrnb headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopencore-amrnb --enable-version3}.
|
||||
|
||||
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
|
||||
but you can override it by setting @option{strict} to @samp{unofficial} or
|
||||
lower.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Set bitrate in bits per second. Only the following bitrates are supported,
|
||||
otherwise libavcodec will round to the nearest valid bitrate.
|
||||
|
||||
@table @option
|
||||
@item 4750
|
||||
@item 5150
|
||||
@item 5900
|
||||
@item 6700
|
||||
@item 7400
|
||||
@item 7950
|
||||
@item 10200
|
||||
@item 12200
|
||||
@end table
|
||||
|
||||
@item dtx
|
||||
Allow discontinuous transmission (generate comfort noise) when set to 1. The
|
||||
default value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libtwolame
|
||||
|
||||
TwoLAME MP2 encoder wrapper
|
||||
|
||||
Requires the presence of the libtwolame headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libtwolame}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libtwolame wrapper. The
|
||||
@command{twolame}-equivalent options follow the FFmpeg ones and are in
|
||||
parentheses.
|
||||
|
||||
@table @option
|
||||
@item b (@emph{-b})
|
||||
Set bitrate expressed in bits/s for CBR. @command{twolame} @option{b}
|
||||
option is expressed in kilobits/s. Default value is 128k.
|
||||
|
||||
@item q (@emph{-V})
|
||||
Set quality for experimental VBR support. Maximum value range is
|
||||
from -50 to 50, useful range is from -10 to 10. The higher the
|
||||
value, the better the quality. This option is valid only using the
|
||||
@command{ffmpeg} command-line tool. For library interface users,
|
||||
use @option{global_quality}.
|
||||
|
||||
@item mode (@emph{--mode})
|
||||
Set the mode of the resulting audio. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Choose mode automatically based on the input. This is the default.
|
||||
@item stereo
|
||||
Stereo
|
||||
@item joint_stereo
|
||||
Joint stereo
|
||||
@item dual_channel
|
||||
Dual channel
|
||||
@item mono
|
||||
Mono
|
||||
@end table
|
||||
|
||||
@item psymodel (@emph{--psyc-mode})
|
||||
Set psychoacoustic model to use in encoding. The argument must be
|
||||
an integer between -1 and 4, inclusive. The higher the value, the
|
||||
better the quality. The default value is 3.
|
||||
|
||||
@item energy_levels (@emph{--energy})
|
||||
Enable energy levels extensions when set to 1. The default value is
|
||||
0 (disabled).
|
||||
|
||||
@item error_protection (@emph{--protect})
|
||||
Enable CRC error protection when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@item copyright (@emph{--copyright})
|
||||
Set MPEG audio copyright flag when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@item original (@emph{--original})
|
||||
Set MPEG audio original flag when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{libvo-aacenc}
|
||||
@section libvo-aacenc
|
||||
|
||||
VisualOn AAC encoder
|
||||
|
||||
Requires the presence of the libvo-aacenc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libvo-aacenc --enable-version3}.
|
||||
|
||||
This encoder is considered to be worse than the
|
||||
@ref{aacenc,,native experimental FFmpeg AAC encoder}, according to
|
||||
multiple sources.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
|
||||
channels. It is also CBR-only.
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Set bit rate in bits/s.
|
||||
|
||||
@end table
|
||||
|
||||
@section libvo-amrwbenc
|
||||
|
||||
VisualOn Adaptive Multi-Rate Wideband encoder
|
||||
|
||||
Requires the presence of the libvo-amrwbenc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libvo-amrwbenc --enable-version3}.
|
||||
|
||||
This is a mono-only encoder. Officially it only supports 16000Hz sample
|
||||
rate, but you can override it by setting @option{strict} to
|
||||
@samp{unofficial} or lower.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Set bitrate in bits/s. Only the following bitrates are supported, otherwise
|
||||
libavcodec will round to the nearest valid bitrate.
|
||||
|
||||
@table @samp
|
||||
@item 6600
|
||||
@item 8850
|
||||
@item 12650
|
||||
@item 14250
|
||||
@item 15850
|
||||
@item 18250
|
||||
@item 19850
|
||||
@item 23050
|
||||
@item 23850
|
||||
@end table
|
||||
|
||||
@item dtx
|
||||
Allow discontinuous transmission (generate comfort noise) when set to 1. The
|
||||
default value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libopus
|
||||
|
||||
libopus Opus Interactive Audio Codec encoder wrapper.
|
||||
|
||||
Requires the presence of the libopus headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopus}.
|
||||
|
||||
@subsection Option Mapping
|
||||
|
||||
Most libopus options are modeled after the @command{opusenc} utility from
|
||||
opus-tools. The following is an option mapping chart describing options
|
||||
supported by the libopus wrapper, and their @command{opusenc}-equivalent
|
||||
in parentheses.
|
||||
|
||||
@table @option
|
||||
|
||||
@item b (@emph{bitrate})
|
||||
Set the bit rate in bits/s. FFmpeg's @option{b} option is
|
||||
expressed in bits/s, while @command{opusenc}'s @option{bitrate} in
|
||||
kilobits/s.
|
||||
|
||||
@item vbr (@emph{vbr}, @emph{hard-cbr}, and @emph{cvbr})
|
||||
Set VBR mode. The FFmpeg @option{vbr} option has the following
|
||||
valid arguments, with the their @command{opusenc} equivalent options
|
||||
in parentheses:
|
||||
|
||||
@table @samp
|
||||
@item off (@emph{hard-cbr})
|
||||
Use constant bit rate encoding.
|
||||
|
||||
@item on (@emph{vbr})
|
||||
Use variable bit rate encoding (the default).
|
||||
|
||||
@item constrained (@emph{cvbr})
|
||||
Use constrained variable bit rate encoding.
|
||||
@end table
|
||||
|
||||
@item compression_level (@emph{comp})
|
||||
Set encoding algorithm complexity. Valid options are integers in
|
||||
the 0-10 range. 0 gives the fastest encodes but lower quality, while 10
|
||||
gives the highest quality but slowest encoding. The default is 10.
|
||||
|
||||
@item frame_duration (@emph{framesize})
|
||||
Set maximum frame size, or duration of a frame in milliseconds. The
|
||||
argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
|
||||
frame sizes achieve lower latency but less quality at a given bitrate.
|
||||
Sizes greater than 20ms are only interesting at fairly low bitrates.
|
||||
The default is 20ms.
|
||||
|
||||
@item packet_loss (@emph{expect-loss})
|
||||
Set expected packet loss percentage. The default is 0.
|
||||
|
||||
@item application (N.A.)
|
||||
Set intended application type. Valid options are listed below:
|
||||
|
||||
@table @samp
|
||||
@item voip
|
||||
Favor improved speech intelligibility.
|
||||
@item audio
|
||||
Favor faithfulness to the input (the default).
|
||||
@item lowdelay
|
||||
Restrict to only the lowest delay modes.
|
||||
@end table
|
||||
|
||||
@item cutoff (N.A.)
|
||||
Set cutoff bandwidth in Hz. The argument must be exactly one of the
|
||||
following: 4000, 6000, 8000, 12000, or 20000, corresponding to
|
||||
narrowband, mediumband, wideband, super wideband, and fullband
|
||||
respectively. The default is 0 (cutoff disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@c man end AUDIO ENCODERS
|
||||
|
||||
@chapter Video Encoders
|
||||
@@ -945,318 +583,178 @@ For more information about libvpx see:
|
||||
|
||||
x264 H.264/MPEG-4 AVC encoder wrapper
|
||||
|
||||
This encoder requires the presence of the libx264 headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
Requires the presence of the libx264 headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libx264}.
|
||||
|
||||
libx264 supports an impressive number of features, including 8x8 and
|
||||
4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
|
||||
entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
|
||||
for detail retention (adaptive quantization, psy-RD, psy-trellis).
|
||||
x264 supports an impressive number of features, including 8x8 and 4x4 adaptive
|
||||
spatial transform, adaptive B-frame placement, CAVLC/CABAC entropy coding,
|
||||
interlacing (MBAFF), lossless mode, psy optimizations for detail retention
|
||||
(adaptive quantization, psy-RD, psy-trellis).
|
||||
|
||||
Many libx264 encoder options are mapped to FFmpeg global codec
|
||||
options, while unique encoder options are provided through private
|
||||
options. Additionally the @option{x264opts} and @option{x264-params}
|
||||
private options allows to pass a list of key=value tuples as accepted
|
||||
by the libx264 @code{x264_param_parse} function.
|
||||
The FFmpeg wrapper provides a mapping for most of them using global options
|
||||
that match those of the encoders and provides private options for the unique
|
||||
encoder options. Additionally an expert override is provided to directly pass
|
||||
a list of key=value tuples as accepted by x264_param_parse.
|
||||
|
||||
The x264 project website is at
|
||||
@url{http://www.videolan.org/developers/x264.html}.
|
||||
@subsection Option Mapping
|
||||
|
||||
@subsection Options
|
||||
The following options are supported by the x264 wrapper, the x264-equivalent
|
||||
options follow the FFmpeg ones.
|
||||
|
||||
The following options are supported by the libx264 wrapper. The
|
||||
@command{x264}-equivalent options or values are listed in parentheses
|
||||
for easy migration.
|
||||
|
||||
To reduce the duplication of documentation, only the private options
|
||||
and some others requiring special attention are documented here. For
|
||||
the documentation of the undocumented generic options, see
|
||||
@ref{codec-options,,the Codec Options chapter}.
|
||||
|
||||
To get a more accurate and extensive documentation of the libx264
|
||||
options, invoke the command @command{x264 --full-help} or consult
|
||||
the libx264 documentation.
|
||||
@multitable @columnfractions .2 .2
|
||||
@item b @tab bitrate
|
||||
FFmpeg @code{b} option is expressed in bits/s, x264 @code{bitrate} in kilobits/s.
|
||||
@item bf @tab bframes
|
||||
Maximum number of B-frames.
|
||||
@item g @tab keyint
|
||||
Maximum GOP size.
|
||||
@item qmin @tab qpmin
|
||||
@item qmax @tab qpmax
|
||||
@item qdiff @tab qpstep
|
||||
@item qblur @tab qblur
|
||||
@item qcomp @tab qcomp
|
||||
@item refs @tab ref
|
||||
@item sc_threshold @tab scenecut
|
||||
@item trellis @tab trellis
|
||||
@item nr @tab nr
|
||||
Noise reduction.
|
||||
@item me_range @tab merange
|
||||
@item me_method @tab me
|
||||
@item subq @tab subme
|
||||
@item b_strategy @tab b-adapt
|
||||
@item keyint_min @tab keyint-min
|
||||
@item coder @tab cabac
|
||||
Set coder to @code{ac} to use CABAC.
|
||||
@item cmp @tab chroma-me
|
||||
Set to @code{chroma} to use chroma motion estimation.
|
||||
@item threads @tab threads
|
||||
@item thread_type @tab sliced_threads
|
||||
Set to @code{slice} to use sliced threading instead of frame threading.
|
||||
@item flags -cgop @tab open-gop
|
||||
Set @code{-cgop} to use recovery points to close GOPs.
|
||||
@item rc_init_occupancy @tab vbv-init
|
||||
Initial buffer occupancy.
|
||||
@end multitable
|
||||
|
||||
@subsection Private Options
|
||||
@table @option
|
||||
@item b (@emph{bitrate})
|
||||
Set bitrate in bits/s. Note that FFmpeg's @option{b} option is
|
||||
expressed in bits/s, while @command{x264}'s @option{bitrate} is in
|
||||
kilobits/s.
|
||||
|
||||
@item bf (@emph{bframes})
|
||||
|
||||
@item g (@emph{keyint})
|
||||
|
||||
@item qmax (@emph{qpmax})
|
||||
|
||||
@item qmin (@emph{qpmin})
|
||||
|
||||
@item qdiff (@emph{qpstep})
|
||||
|
||||
@item qblur (@emph{qblur})
|
||||
|
||||
@item qcomp (@emph{qcomp})
|
||||
|
||||
@item refs (@emph{ref})
|
||||
|
||||
@item sc_threshold (@emph{scenecut})
|
||||
|
||||
@item trellis (@emph{trellis})
|
||||
|
||||
@item nr (@emph{nr})
|
||||
|
||||
@item me_range (@emph{merange})
|
||||
|
||||
@item me_method (@emph{me})
|
||||
Set motion estimation method. Possible values in the decreasing order
|
||||
of speed:
|
||||
|
||||
@table @samp
|
||||
@item dia (@emph{dia})
|
||||
@item epzs (@emph{dia})
|
||||
Diamond search with radius 1 (fastest). @samp{epzs} is an alias for
|
||||
@samp{dia}.
|
||||
@item hex (@emph{hex})
|
||||
Hexagonal search with radius 2.
|
||||
@item umh (@emph{umh})
|
||||
Uneven multi-hexagon search.
|
||||
@item esa (@emph{esa})
|
||||
Exhaustive search.
|
||||
@item tesa (@emph{tesa})
|
||||
Hadamard exhaustive search (slowest).
|
||||
@end table
|
||||
|
||||
@item subq (@emph{subme})
|
||||
|
||||
@item b_strategy (@emph{b-adapt})
|
||||
|
||||
@item keyint_min (@emph{min-keyint})
|
||||
|
||||
@item coder
|
||||
Set entropy encoder. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item ac
|
||||
Enable CABAC.
|
||||
|
||||
@item vlc
|
||||
Enable CAVLC and disable CABAC. It generates the same effect as
|
||||
@command{x264}'s @option{--no-cabac} option.
|
||||
@end table
|
||||
|
||||
@item cmp
|
||||
Set full pixel motion estimation comparation algorithm. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item chroma
|
||||
Enable chroma in motion estimation.
|
||||
|
||||
@item sad
|
||||
Ignore chroma in motion estimation. It generates the same effect as
|
||||
@command{x264}'s @option{--no-chroma-me} option.
|
||||
@end table
|
||||
|
||||
@item threads (@emph{threads})
|
||||
|
||||
@item thread_type
|
||||
Set multithreading technique. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item slice
|
||||
Slice-based multithreading. It generates the same effect as
|
||||
@command{x264}'s @option{--sliced-threads} option.
|
||||
@item frame
|
||||
Frame-based multithreading.
|
||||
@end table
|
||||
|
||||
@item flags
|
||||
Set encoding flags. It can be used to disable closed GOP and enable
|
||||
open GOP by setting it to @code{-cgop}. The result is similar to
|
||||
the behavior of @command{x264}'s @option{--open-gop} option.
|
||||
|
||||
@item rc_init_occupancy (@emph{vbv-init})
|
||||
|
||||
@item preset (@emph{preset})
|
||||
Set the encoding preset.
|
||||
|
||||
@item tune (@emph{tune})
|
||||
Set tuning of the encoding params.
|
||||
|
||||
@item profile (@emph{profile})
|
||||
Set profile restrictions.
|
||||
|
||||
@item fastfirstpass
|
||||
Enable fast settings when encoding first pass, when set to 1. When set
|
||||
to 0, it has the same effect of @command{x264}'s
|
||||
@option{--slow-firstpass} option.
|
||||
|
||||
@item crf (@emph{crf})
|
||||
Set the quality for constant quality mode.
|
||||
|
||||
@item crf_max (@emph{crf-max})
|
||||
@item -preset @var{string}
|
||||
Set the encoding preset (cf. x264 --fullhelp).
|
||||
@item -tune @var{string}
|
||||
Tune the encoding params (cf. x264 --fullhelp).
|
||||
@item -profile @var{string}
|
||||
Set profile restrictions (cf. x264 --fullhelp).
|
||||
@item -fastfirstpass @var{integer}
|
||||
Use fast settings when encoding first pass.
|
||||
@item -crf @var{float}
|
||||
Select the quality for constant quality mode.
|
||||
@item -crf_max @var{float}
|
||||
In CRF mode, prevents VBV from lowering quality beyond this point.
|
||||
@item -qp @var{integer}
|
||||
Constant quantization parameter rate control method.
|
||||
@item -aq-mode @var{integer}
|
||||
AQ method
|
||||
|
||||
@item qp (@emph{qp})
|
||||
Set constant quantization rate control method parameter.
|
||||
|
||||
@item aq-mode (@emph{aq-mode})
|
||||
Set AQ method. Possible values:
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none (@emph{0})
|
||||
Disabled.
|
||||
@item none
|
||||
|
||||
@item variance (@emph{1})
|
||||
@item variance
|
||||
Variance AQ (complexity mask).
|
||||
|
||||
@item autovariance (@emph{2})
|
||||
@item autovariance
|
||||
Auto-variance AQ (experimental).
|
||||
@end table
|
||||
@item -aq-strength @var{float}
|
||||
AQ strength, reduces blocking and blurring in flat and textured areas.
|
||||
@item -psy @var{integer}
|
||||
Use psychovisual optimizations.
|
||||
@item -psy-rd @var{string}
|
||||
Strength of psychovisual optimization, in <psy-rd>:<psy-trellis> format.
|
||||
@item -rc-lookahead @var{integer}
|
||||
Number of frames to look ahead for frametype and ratecontrol.
|
||||
@item -weightb @var{integer}
|
||||
Weighted prediction for B-frames.
|
||||
@item -weightp @var{integer}
|
||||
Weighted prediction analysis method.
|
||||
|
||||
@item aq-strength (@emph{aq-strength})
|
||||
Set AQ strength, reduce blocking and blurring in flat and textured areas.
|
||||
|
||||
@item psy
|
||||
Use psychovisual optimizations when set to 1. When set to 0, it has the
|
||||
same effect as @command{x264}'s @option{--no-psy} option.
|
||||
|
||||
@item psy-rd (@emph{psy-rd})
|
||||
Set strength of psychovisual optimization, in
|
||||
@var{psy-rd}:@var{psy-trellis} format.
|
||||
|
||||
@item rc-lookahead (@emph{rc-lookahead})
|
||||
Set number of frames to look ahead for frametype and ratecontrol.
|
||||
|
||||
@item weightb
|
||||
Enable weighted prediction for B-frames when set to 1. When set to 0,
|
||||
it has the same effect as @command{x264}'s @option{--no-weightb} option.
|
||||
|
||||
@item weightp (@emph{weightp})
|
||||
Set weighted prediction method for P-frames. Possible values:
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none (@emph{0})
|
||||
Disabled
|
||||
@item simple (@emph{1})
|
||||
Enable only weighted refs
|
||||
@item smart (@emph{2})
|
||||
Enable both weighted refs and duplicates
|
||||
@item none
|
||||
|
||||
@item simple
|
||||
|
||||
@item smart
|
||||
|
||||
@end table
|
||||
@item -ssim @var{integer}
|
||||
Calculate and print SSIM stats.
|
||||
@item -intra-refresh @var{integer}
|
||||
Use Periodic Intra Refresh instead of IDR frames.
|
||||
@item -b-bias @var{integer}
|
||||
Influences how often B-frames are used.
|
||||
@item -b-pyramid @var{integer}
|
||||
Keep some B-frames as references.
|
||||
|
||||
@item ssim (@emph{ssim})
|
||||
Enable calculation and printing SSIM stats after the encoding.
|
||||
|
||||
@item intra-refresh (@emph{intra-refresh})
|
||||
Enable the use of Periodic Intra Refresh instead of IDR frames when set
|
||||
to 1.
|
||||
|
||||
@item b-bias (@emph{b-bias})
|
||||
Set the influence on how often B-frames are used.
|
||||
|
||||
@item b-pyramid (@emph{b-pyramid})
|
||||
Set method for keeping of some B-frames as references. Possible values:
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none (@emph{none})
|
||||
Disabled.
|
||||
@item strict (@emph{strict})
|
||||
@item none
|
||||
|
||||
@item strict
|
||||
Strictly hierarchical pyramid.
|
||||
@item normal (@emph{normal})
|
||||
@item normal
|
||||
Non-strict (not Blu-ray compatible).
|
||||
@end table
|
||||
@item -mixed-refs @var{integer}
|
||||
One reference per partition, as opposed to one reference per macroblock.
|
||||
@item -8x8dct @var{integer}
|
||||
High profile 8x8 transform.
|
||||
@item -fast-pskip @var{integer}
|
||||
@item -aud @var{integer}
|
||||
Use access unit delimiters.
|
||||
@item -mbtree @var{integer}
|
||||
Use macroblock tree ratecontrol.
|
||||
@item -deblock @var{string}
|
||||
Loop filter parameters, in <alpha:beta> form.
|
||||
@item -cplxblur @var{float}
|
||||
Reduce fluctuations in QP (before curve compression).
|
||||
@item -partitions @var{string}
|
||||
A comma-separated list of partitions to consider, possible values: p8x8, p4x4, b8x8, i8x8, i4x4, none, all.
|
||||
@item -direct-pred @var{integer}
|
||||
Direct MV prediction mode
|
||||
|
||||
@item mixed-refs
|
||||
Enable the use of one reference per partition, as opposed to one
|
||||
reference per macroblock when set to 1. When set to 0, it has the
|
||||
same effect as @command{x264}'s @option{--no-mixed-refs} option.
|
||||
|
||||
@item 8x8dct
|
||||
Enable adaptive spatial transform (high profile 8x8 transform)
|
||||
when set to 1. When set to 0, it has the same effect as
|
||||
@command{x264}'s @option{--no-8x8dct} option.
|
||||
|
||||
@item fast-pskip
|
||||
Enable early SKIP detection on P-frames when set to 1. When set
|
||||
to 0, it has the same effect as @command{x264}'s
|
||||
@option{--no-fast-pskip} option.
|
||||
|
||||
@item aud (@emph{aud})
|
||||
Enable use of access unit delimiters when set to 1.
|
||||
|
||||
@item mbtree
|
||||
Enable use macroblock tree ratecontrol when set to 1. When set
|
||||
to 0, it has the same effect as @command{x264}'s
|
||||
@option{--no-mbtree} option.
|
||||
|
||||
@item deblock (@emph{deblock})
|
||||
Set loop filter parameters, in @var{alpha}:@var{beta} form.
|
||||
|
||||
@item cplxblur (@emph{cplxblur})
|
||||
Set fluctuations reduction in QP (before curve compression).
|
||||
|
||||
@item partitions (@emph{partitions})
|
||||
Set partitions to consider as a comma-separated list of. Possible
|
||||
values in the list:
|
||||
|
||||
@table @samp
|
||||
@item p8x8
|
||||
8x8 P-frame partition.
|
||||
@item p4x4
|
||||
4x4 P-frame partition.
|
||||
@item b8x8
|
||||
4x4 B-frame partition.
|
||||
@item i8x8
|
||||
8x8 I-frame partition.
|
||||
@item i4x4
|
||||
4x4 I-frame partition.
|
||||
(Enabling @samp{p4x4} requires @samp{p8x8} to be enabled. Enabling
|
||||
@samp{i8x8} requires adaptive spatial transform (@option{8x8dct}
|
||||
option) to be enabled.)
|
||||
@item none (@emph{none})
|
||||
Do not consider any partitions.
|
||||
@item all (@emph{all})
|
||||
Consider every partition.
|
||||
@end table
|
||||
|
||||
@item direct-pred (@emph{direct})
|
||||
Set direct MV prediction mode. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none (@emph{none})
|
||||
Disable MV prediction.
|
||||
@item spatial (@emph{spatial})
|
||||
Enable spatial predicting.
|
||||
@item temporal (@emph{temporal})
|
||||
Enable temporal predicting.
|
||||
@item auto (@emph{auto})
|
||||
Automatically decided.
|
||||
@end table
|
||||
|
||||
@item slice-max-size (@emph{slice-max-size})
|
||||
Set the limit of the size of each slice in bytes. If not specified
|
||||
but RTP payload size (@option{ps}) is specified, that is used.
|
||||
|
||||
@item stats (@emph{stats})
|
||||
Set the file name for multi-pass stats.
|
||||
|
||||
@item nal-hrd (@emph{nal-hrd})
|
||||
Set signal HRD information (requires @option{vbv-bufsize} to be set).
|
||||
Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none (@emph{none})
|
||||
Disable HRD information signaling.
|
||||
@item vbr (@emph{vbr})
|
||||
Variable bit rate.
|
||||
@item cbr (@emph{cbr})
|
||||
Constant bit rate (not allowed in MP4 container).
|
||||
@item none
|
||||
|
||||
@item spatial
|
||||
|
||||
@item temporal
|
||||
|
||||
@item auto
|
||||
|
||||
@end table
|
||||
@item -slice-max-size @var{integer}
|
||||
Limit the size of each slice in bytes.
|
||||
@item -stats @var{string}
|
||||
Filename for 2 pass stats.
|
||||
@item -nal-hrd @var{integer}
|
||||
Signal HRD information (requires vbv-bufsize; cbr not allowed in .mp4).
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none
|
||||
|
||||
@item vbr
|
||||
|
||||
@item cbr
|
||||
|
||||
@end table
|
||||
|
||||
@item x264opts (N.A.)
|
||||
Set any x264 option, see @command{x264 --fullhelp} for a list.
|
||||
@item x264opts @var{options}
|
||||
Allow to set any x264 option, see @code{x264 --fullhelp} for a list.
|
||||
|
||||
Argument is a list of @var{key}=@var{value} couples separated by
|
||||
@var{options} is a list of @var{key}=@var{value} couples separated by
|
||||
":". In @var{filter} and @var{psy-rd} options that use ":" as a separator
|
||||
themselves, use "," instead. They accept it as well since long ago but this
|
||||
is kept undocumented for some reason.
|
||||
@@ -1266,135 +764,17 @@ For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
||||
@end example
|
||||
|
||||
@item x264-params (N.A.)
|
||||
Override the x264 configuration using a :-separated list of key=value
|
||||
parameters.
|
||||
For more information about libx264 and the supported options see:
|
||||
@url{http://www.videolan.org/developers/x264.html}
|
||||
|
||||
This option is functionally the same as the @option{x264opts}, but is
|
||||
duplicated for compability with the Libav fork.
|
||||
|
||||
For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||
@item -x264-params @var{string}
|
||||
Override the x264 configuration using a :-separated list of key=value parameters.
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
|
||||
cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
|
||||
no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
|
||||
-x264-params level=30:bframes=0:weightp=0:cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:no-fast-pskip=1:subq=6:8x8dct=0:trellis=0
|
||||
@end example
|
||||
@end table
|
||||
|
||||
Encoding ffpresets for common usages are provided so they can be used with the
|
||||
general presets system (e.g. passing the @option{pre} option).
|
||||
|
||||
@section libxvid
|
||||
|
||||
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||
|
||||
This encoder requires the presence of the libxvidcore headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libxvid --enable-gpl}.
|
||||
|
||||
The native @code{mpeg4} encoder supports the MPEG-4 Part 2 format, so
|
||||
users can encode to this format without this library.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libxvid wrapper. Some of
|
||||
the following options are listed but are not documented, and
|
||||
correspond to shared codec options. See @ref{codec-options,,the Codec
|
||||
Options chapter} for their documentation. The other shared options
|
||||
which are not listed have no effect for the libxvid encoder.
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
|
||||
@item g
|
||||
|
||||
@item qmin
|
||||
|
||||
@item qmax
|
||||
|
||||
@item mpeg_quant
|
||||
|
||||
@item threads
|
||||
|
||||
@item bf
|
||||
|
||||
@item b_qfactor
|
||||
|
||||
@item b_qoffset
|
||||
|
||||
@item flags
|
||||
Set specific encoding flags. Possible values:
|
||||
|
||||
@table @samp
|
||||
|
||||
@item mv4
|
||||
Use four motion vector by macroblock.
|
||||
|
||||
@item aic
|
||||
Enable high quality AC prediction.
|
||||
|
||||
@item gray
|
||||
Only encode grayscale.
|
||||
|
||||
@item gmc
|
||||
Enable the use of global motion compensation (GMC).
|
||||
|
||||
@item qpel
|
||||
Enable quarter-pixel motion compensation.
|
||||
|
||||
@item cgop
|
||||
Enable closed GOP.
|
||||
|
||||
@item global_header
|
||||
Place global headers in extradata instead of every keyframe.
|
||||
|
||||
@end table
|
||||
|
||||
@item trellis
|
||||
|
||||
@item me_method
|
||||
Set motion estimation method. Possible values in decreasing order of
|
||||
speed and increasing order of quality:
|
||||
|
||||
@table @samp
|
||||
@item zero
|
||||
Use no motion estimation (default).
|
||||
|
||||
@item phods
|
||||
@item x1
|
||||
@item log
|
||||
Enable advanced diamond zonal search for 16x16 blocks and half-pixel
|
||||
refinement for 16x16 blocks. @samp{x1} and @samp{log} are aliases for
|
||||
@samp{phods}.
|
||||
|
||||
@item epzs
|
||||
Enable all of the things described above, plus advanced diamond zonal
|
||||
search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
|
||||
estimation on chroma planes.
|
||||
|
||||
@item full
|
||||
Enable all of the things described above, plus extended 16x16 and 8x8
|
||||
blocks search.
|
||||
@end table
|
||||
|
||||
@item mbd
|
||||
Set macroblock decision algorithm. Possible values in the increasing
|
||||
order of quality:
|
||||
|
||||
@table @samp
|
||||
@item simple
|
||||
Use macroblock comparing function algorithm (default).
|
||||
|
||||
@item bits
|
||||
Enable rate distortion-based half pixel and quarter pixel refinement for
|
||||
16x16 blocks.
|
||||
|
||||
@item rd
|
||||
Enable all of the things described above, plus rate distortion-based
|
||||
half pixel and quarter pixel refinement for 8x8 blocks, and rate
|
||||
distortion-based search using square pattern.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
Encoding avpresets for common usages are provided so they can be used with the
|
||||
general presets system (e.g. passing the @code{-pre} option).
|
||||
|
||||
@c man end VIDEO ENCODERS
|
||||
|
@@ -17,7 +17,6 @@ the libavcodec library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@anchor{codec-options}
|
||||
@chapter Codec Options
|
||||
@c man begin CODEC OPTIONS
|
||||
|
||||
|
@@ -76,9 +76,6 @@ Enable RTP MP4A-LATM payload.
|
||||
Reduce the latency introduced by optional buffering
|
||||
@end table
|
||||
|
||||
@item seek2any @var{integer} (@emph{input})
|
||||
Forces seeking to enable seek to any mode if set to 1. Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will allow to detect more accurate information, but will
|
||||
@@ -145,12 +142,6 @@ Use wallclock as timestamps.
|
||||
@item avoid_negative_ts @var{integer} (@emph{output})
|
||||
Shift timestamps to make them positive. 1 enables, 0 disables, default
|
||||
of -1 enables when required by target format.
|
||||
|
||||
@item skip_initial_bytes @var{integer} (@emph{input})
|
||||
Set number initial bytes to skip. Default is 0.
|
||||
|
||||
@item correct_ts_overflow @var{integer} (@emph{input})
|
||||
Correct single timestamp overflows if set to 1. Default is 1.
|
||||
@end table
|
||||
|
||||
@c man end FORMAT OPTIONS
|
||||
|
@@ -3,10 +3,10 @@
|
||||
|
||||
Filtering in FFmpeg is enabled through the libavfilter library.
|
||||
|
||||
In libavfilter, a filter can have multiple inputs and multiple
|
||||
outputs.
|
||||
To illustrate the sorts of things that are possible, we consider the
|
||||
following filtergraph.
|
||||
In libavfilter, it is possible for filters to have multiple inputs and
|
||||
multiple outputs.
|
||||
To illustrate the sorts of things that are possible, we can
|
||||
use a complex filter graph. For example, the following one:
|
||||
|
||||
@example
|
||||
input --> split ---------------------> overlay --> output
|
||||
@@ -15,32 +15,25 @@ input --> split ---------------------> overlay --> output
|
||||
+-----> crop --> vflip -------+
|
||||
@end example
|
||||
|
||||
This filtergraph splits the input stream in two streams, sends one
|
||||
stream through the crop filter and the vflip filter before merging it
|
||||
back with the other stream by overlaying it on top. You can use the
|
||||
following command to achieve this:
|
||||
splits the stream in two streams, sends one stream through the crop filter
|
||||
and the vflip filter before merging it back with the other stream by
|
||||
overlaying it on top. You can use the following command to achieve this:
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
|
||||
ffmpeg -i input -vf "[in] split [T1], [T2] overlay=0:H/2 [out]; [T1] crop=iw:ih/2:0:ih/2, vflip [T2]" output
|
||||
@end example
|
||||
|
||||
The result will be that in output the top half of the video is mirrored
|
||||
onto the bottom half.
|
||||
|
||||
Filters in the same linear chain are separated by commas, and distinct
|
||||
linear chains of filters are separated by semicolons. In our example,
|
||||
@var{crop,vflip} are in one linear chain, @var{split} and
|
||||
@var{overlay} are separately in another. The points where the linear
|
||||
chains join are labelled by names enclosed in square brackets. In the
|
||||
example, the split filter generates two outputs that are associated to
|
||||
the labels @var{[main]} and @var{[tmp]}.
|
||||
|
||||
The stream sent to the second output of @var{split}, labelled as
|
||||
@var{[tmp]}, is processed through the @var{crop} filter, which crops
|
||||
away the lower half part of the video, and then vertically flipped. The
|
||||
@var{overlay} filter takes in input the first unchanged output of the
|
||||
split filter (which was labelled as @var{[main]}), and overlay on its
|
||||
lower half the output generated by the @var{crop,vflip} filterchain.
|
||||
Filters are loaded using the @var{-vf} or @var{-af} option passed to
|
||||
@command{ffmpeg} or to @command{ffplay}. Filters in the same linear
|
||||
chain are separated by commas. In our example, @var{split,
|
||||
overlay} are in one linear chain, and @var{crop, vflip} are in
|
||||
another. The points where the linear chains join are labeled by names
|
||||
enclosed in square brackets. In our example, that is @var{[T1]} and
|
||||
@var{[T2]}. The special labels @var{[in]} and @var{[out]} are the points
|
||||
where video is input and output.
|
||||
|
||||
Some filters take in input a list of parameters: they are specified
|
||||
after the filter name and an equal sign, and are separated from each other
|
||||
@@ -2037,7 +2030,7 @@ This expression is evaluated only once during the filter
|
||||
configuration.
|
||||
|
||||
@item h, out_h
|
||||
Set the crop area height. It defaults to @code{ih}.
|
||||
Set the crop area width. It defaults to @code{ih}.
|
||||
This expression is evaluated only once during the filter
|
||||
configuration.
|
||||
|
||||
|
@@ -24,7 +24,7 @@ instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjp
|
||||
@file{./configure}.
|
||||
|
||||
|
||||
@section OpenCORE, VisualOn, and Fraunhofer libraries
|
||||
@section OpenCORE and VisualOn libraries
|
||||
|
||||
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
|
||||
libraries provide encoders for a number of audio codecs.
|
||||
@@ -32,14 +32,9 @@ libraries provide encoders for a number of audio codecs.
|
||||
@float NOTE
|
||||
OpenCORE and VisualOn libraries are under the Apache License 2.0
|
||||
(see @url{http://www.apache.org/licenses/LICENSE-2.0} for details), which is
|
||||
incompatible to the LGPL version 2.1 and GPL version 2. You have to
|
||||
incompatible with the LGPL version 2.1 and GPL version 2. You have to
|
||||
upgrade FFmpeg's license to LGPL version 3 (or if you have enabled
|
||||
GPL components, GPL version 3) by passing @code{--enable-version3} to configure in
|
||||
order to use it.
|
||||
|
||||
The Fraunhofer AAC library is licensed under a license incompatible to the GPL
|
||||
and is not known to be compatible to the LGPL. Therefore, you have to pass
|
||||
@code{--enable-nonfree} to configure to use it.
|
||||
GPL components, GPL version 3) to use it.
|
||||
@end float
|
||||
|
||||
@subsection OpenCORE AMR
|
||||
|
@@ -24,7 +24,7 @@ a mail for every change to every issue.
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://trac.ffmpeg.org
|
||||
http(s)://ffmpeg.org/trac/ffmpeg
|
||||
|
||||
Type:
|
||||
-----
|
||||
|
@@ -18,23 +18,6 @@ enabled muxers.
|
||||
|
||||
A description of some of the currently available muxers follows.
|
||||
|
||||
@anchor{aiff}
|
||||
@section aiff
|
||||
|
||||
Audio Interchange File Format muxer.
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item write_id3v2
|
||||
Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
|
||||
|
||||
@item id3v2_version
|
||||
Select ID3v2 version to write. Currently only version 3 and 4 (aka.
|
||||
ID3v2.3 and ID3v2.4) are supported. The default is version 4.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{crc}
|
||||
@section crc
|
||||
|
||||
|
12
ffmpeg.c
12
ffmpeg.c
@@ -162,8 +162,6 @@ static struct termios oldtty;
|
||||
static int restore_tty;
|
||||
#endif
|
||||
|
||||
static void free_input_threads(void);
|
||||
|
||||
|
||||
/* sub2video hack:
|
||||
Convert subtitles to video with alpha to insert them in filter graphs.
|
||||
@@ -459,9 +457,6 @@ static void exit_program(void)
|
||||
av_freep(&output_streams[i]->logfile_prefix);
|
||||
av_freep(&output_streams[i]);
|
||||
}
|
||||
#if HAVE_PTHREADS
|
||||
free_input_threads();
|
||||
#endif
|
||||
for (i = 0; i < nb_input_files; i++) {
|
||||
avformat_close_input(&input_files[i]->ctx);
|
||||
av_freep(&input_files[i]);
|
||||
@@ -1910,10 +1905,7 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
|
||||
ist->st->codec->sample_rate;
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if (ist->framerate.num) {
|
||||
int64_t next_dts = av_rescale_q(ist->next_dts, AV_TIME_BASE_Q, av_inv_q(ist->framerate));
|
||||
ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), AV_TIME_BASE_Q);
|
||||
} else if (pkt->duration) {
|
||||
if (pkt->duration) {
|
||||
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
} else if(ist->st->codec->time_base.num != 0) {
|
||||
int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
|
||||
@@ -2216,8 +2208,6 @@ static int transcode_init(void)
|
||||
codec->time_base = icodec->time_base;
|
||||
}
|
||||
|
||||
if (ist && !ost->frame_rate.num)
|
||||
ost->frame_rate = ist->framerate;
|
||||
if(ost->frame_rate.num)
|
||||
codec->time_base = av_inv_q(ost->frame_rate);
|
||||
|
||||
|
@@ -43,15 +43,12 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFo
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
||||
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
||||
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
||||
const enum AVPixelFormat mjpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||
const enum AVPixelFormat ljpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||
|
||||
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
p = mjpeg_formats;
|
||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||
p =ljpeg_formats;
|
||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||
}
|
||||
}
|
||||
for (; *p != AV_PIX_FMT_NONE; p++) {
|
||||
|
@@ -203,7 +203,6 @@ static char *value_string(char *buf, int buf_size, struct unit_value uv)
|
||||
vald /= pow(10, index * 3);
|
||||
prefix_string = decimal_unit_prefixes[index];
|
||||
}
|
||||
vali = vald;
|
||||
}
|
||||
|
||||
if (show_float || (use_value_prefix && vald != (long long int)vald))
|
||||
|
@@ -328,14 +328,6 @@ static AVLFG random_state;
|
||||
|
||||
static FILE *logfile = NULL;
|
||||
|
||||
static void htmlstrip(char *s) {
|
||||
while (s && *s) {
|
||||
s += strspn(s, "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,. ");
|
||||
if (*s)
|
||||
*s++ = '?';
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t ffm_read_write_index(int fd)
|
||||
{
|
||||
uint8_t buf[8];
|
||||
@@ -1895,7 +1887,6 @@ static int http_parse_request(HTTPContext *c)
|
||||
send_error:
|
||||
c->http_error = 404;
|
||||
q = c->buffer;
|
||||
htmlstrip(msg);
|
||||
snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 404 Not Found\r\n"
|
||||
"Content-type: text/html\r\n"
|
||||
|
@@ -81,7 +81,7 @@ enum BandType {
|
||||
INTENSITY_BT = 15, ///< Scalefactor data are intensity stereo positions.
|
||||
};
|
||||
|
||||
#define IS_CODEBOOK_UNSIGNED(x) (((x) - 1) & 10)
|
||||
#define IS_CODEBOOK_UNSIGNED(x) ((x - 1) & 10)
|
||||
|
||||
enum ChannelPosition {
|
||||
AAC_CHANNEL_OFF = 0,
|
||||
|
@@ -710,7 +710,7 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
|
||||
const float lambda)
|
||||
{
|
||||
int start = 0, i, w, w2, g;
|
||||
int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels * (lambda / 120.f);
|
||||
int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels;
|
||||
float dists[128] = { 0 }, uplims[128];
|
||||
float maxvals[128];
|
||||
int fflag, minscaler;
|
||||
|
@@ -189,9 +189,6 @@ static int frame_configure_elements(AVCodecContext *avctx)
|
||||
}
|
||||
}
|
||||
|
||||
if (!avctx->channels)
|
||||
return 1;
|
||||
|
||||
/* get output buffer */
|
||||
ac->frame->nb_samples = 2048;
|
||||
if ((ret = ff_get_buffer(avctx, ac->frame)) < 0) {
|
||||
|
@@ -429,7 +429,6 @@ static void hybrid_synthesis(PSDSPContext *dsp, float out[2][38][64],
|
||||
#define DECAY_SLOPE 0.05f
|
||||
/// Number of frequency bands that can be addressed by the parameter index, b(k)
|
||||
static const int NR_PAR_BANDS[] = { 20, 34 };
|
||||
static const int NR_IPDOPD_BANDS[] = { 11, 17 };
|
||||
/// Number of frequency bands that can be addressed by the sub subband index, k
|
||||
static const int NR_BANDS[] = { 71, 91 };
|
||||
/// Start frequency band for the all-pass filter decay slope
|
||||
@@ -824,8 +823,7 @@ static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2
|
||||
h12 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][1];
|
||||
h21 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][2];
|
||||
h22 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][3];
|
||||
|
||||
if (!PS_BASELINE && ps->enable_ipdopd && b < NR_IPDOPD_BANDS[is34]) {
|
||||
if (!PS_BASELINE && ps->enable_ipdopd && b < ps->nr_ipdopd_par) {
|
||||
//The spec say says to only run this smoother when enable_ipdopd
|
||||
//is set but the reference decoder appears to run it constantly
|
||||
float h11i, h12i, h21i, h22i;
|
||||
|
@@ -449,11 +449,9 @@ static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_
|
||||
* @param[out] coded_samples set to the number of samples as coded in the
|
||||
* packet, or 0 if the codec does not encode the
|
||||
* number of samples in each frame.
|
||||
* @param[out] approx_nb_samples set to non-zero if the number of samples
|
||||
* returned is an approximation.
|
||||
*/
|
||||
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
||||
int buf_size, int *coded_samples, int *approx_nb_samples)
|
||||
int buf_size, int *coded_samples)
|
||||
{
|
||||
ADPCMDecodeContext *s = avctx->priv_data;
|
||||
int nb_samples = 0;
|
||||
@@ -462,7 +460,6 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
||||
int header_size;
|
||||
|
||||
*coded_samples = 0;
|
||||
*approx_nb_samples = 0;
|
||||
|
||||
if(ch <= 0)
|
||||
return 0;
|
||||
@@ -533,12 +530,10 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
||||
case AV_CODEC_ID_ADPCM_EA_R2:
|
||||
header_size = 4 + 5 * ch;
|
||||
*coded_samples = bytestream2_get_le32(gb);
|
||||
*approx_nb_samples = 1;
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_EA_R3:
|
||||
header_size = 4 + 5 * ch;
|
||||
*coded_samples = bytestream2_get_be32(gb);
|
||||
*approx_nb_samples = 1;
|
||||
break;
|
||||
}
|
||||
*coded_samples -= *coded_samples % 28;
|
||||
@@ -630,11 +625,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int16_t **samples_p;
|
||||
int st; /* stereo */
|
||||
int count1, count2;
|
||||
int nb_samples, coded_samples, approx_nb_samples, ret;
|
||||
int nb_samples, coded_samples, ret;
|
||||
GetByteContext gb;
|
||||
|
||||
bytestream2_init(&gb, buf, buf_size);
|
||||
nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
|
||||
nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples);
|
||||
if (nb_samples <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
@@ -652,7 +647,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
/* use coded_samples when applicable */
|
||||
/* it is always <= nb_samples, so the output buffer will be large enough */
|
||||
if (coded_samples) {
|
||||
if (!approx_nb_samples && coded_samples != nb_samples)
|
||||
if (coded_samples != nb_samples)
|
||||
av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
|
||||
frame->nb_samples = nb_samples = coded_samples;
|
||||
}
|
||||
@@ -868,9 +863,6 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*samples++ = c->status[0].predictor + c->status[1].predictor;
|
||||
*samples++ = c->status[0].predictor - c->status[1].predictor;
|
||||
}
|
||||
|
||||
if ((bytestream2_tell(&gb) & 1))
|
||||
bytestream2_skip(&gb, 1);
|
||||
break;
|
||||
}
|
||||
case AV_CODEC_ID_ADPCM_IMA_ISS:
|
||||
|
@@ -557,11 +557,10 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
put_bits(&pb, 7, status->step_index);
|
||||
if (avctx->trellis > 0) {
|
||||
uint8_t buf[64];
|
||||
adpcm_compress_trellis(avctx, &samples_p[ch][0], buf, status,
|
||||
adpcm_compress_trellis(avctx, &samples_p[ch][1], buf, status,
|
||||
64, 1);
|
||||
for (i = 0; i < 64; i++)
|
||||
put_bits(&pb, 4, buf[i ^ 1]);
|
||||
status->prev_sample = status->predictor;
|
||||
} else {
|
||||
for (i = 0; i < 64; i += 2) {
|
||||
int t1, t2;
|
||||
|
@@ -274,7 +274,7 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
|
||||
// generate warm-up samples
|
||||
residual[0] = samples[0];
|
||||
for (i = 1; i <= lpc.lpc_order; i++)
|
||||
residual[i] = sign_extend(samples[i] - samples[i-1], s->write_sample_size);
|
||||
residual[i] = samples[i] - samples[i-1];
|
||||
|
||||
// perform lpc on remaining samples
|
||||
for (i = lpc.lpc_order + 1; i < s->frame_size; i++) {
|
||||
|
@@ -284,7 +284,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
GetBitContext gb;
|
||||
uint64_t ht_size;
|
||||
int i, config_offset;
|
||||
MPEG4AudioConfig m4ac = {0};
|
||||
MPEG4AudioConfig m4ac;
|
||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||
AVCodecContext *avctx = ctx->avctx;
|
||||
uint32_t als_id, header_size, trailer_size;
|
||||
|
@@ -112,7 +112,7 @@ static void hscroll(AVCodecContext *avctx)
|
||||
AnsiContext *s = avctx->priv_data;
|
||||
int i;
|
||||
|
||||
if (s->y <= avctx->height - 2*s->font_height) {
|
||||
if (s->y < avctx->height - s->font_height) {
|
||||
s->y += s->font_height;
|
||||
return;
|
||||
}
|
||||
@@ -165,7 +165,7 @@ static void draw_char(AVCodecContext *avctx, int c)
|
||||
ff_draw_pc_font(s->frame.data[0] + s->y * s->frame.linesize[0] + s->x,
|
||||
s->frame.linesize[0], s->font, s->font_height, c, fg, bg);
|
||||
s->x += FONT_WIDTH;
|
||||
if (s->x > avctx->width - FONT_WIDTH) {
|
||||
if (s->x >= avctx->width) {
|
||||
s->x = 0;
|
||||
hscroll(avctx);
|
||||
}
|
||||
@@ -239,8 +239,6 @@ static int execute_code(AVCodecContext * avctx, int c)
|
||||
default:
|
||||
av_log_ask_for_sample(avctx, "unsupported screen mode\n");
|
||||
}
|
||||
s->x = av_clip(s->x, 0, width - FONT_WIDTH);
|
||||
s->y = av_clip(s->y, 0, height - s->font_height);
|
||||
if (width != avctx->width || height != avctx->height) {
|
||||
if (s->frame.data[0])
|
||||
avctx->release_buffer(avctx, &s->frame);
|
||||
@@ -337,8 +335,6 @@ static int execute_code(AVCodecContext * avctx, int c)
|
||||
av_log_ask_for_sample(avctx, "unsupported escape code\n");
|
||||
break;
|
||||
}
|
||||
s->x = av_clip(s->x, 0, avctx->width - FONT_WIDTH);
|
||||
s->y = av_clip(s->y, 0, avctx->height - s->font_height);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -419,7 +415,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
switch(buf[0]) {
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9':
|
||||
if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] < 6553)
|
||||
if (s->nb_args < MAX_NB_ARGS)
|
||||
s->args[s->nb_args] = FFMAX(s->args[s->nb_args], 0) * 10 + buf[0] - '0';
|
||||
break;
|
||||
case ';':
|
||||
|
@@ -34,44 +34,46 @@ static inline int decode_blockcodes(int code1, int code2, int levels,
|
||||
{
|
||||
int v0, v1, v2, v3, v4, v5;
|
||||
|
||||
__asm__ ("smmul %0, %6, %10 \n"
|
||||
"smmul %3, %7, %10 \n"
|
||||
"smlabb %6, %0, %9, %6 \n"
|
||||
"smlabb %7, %3, %9, %7 \n"
|
||||
"smmul %1, %0, %10 \n"
|
||||
"smmul %4, %3, %10 \n"
|
||||
"sub %6, %6, %8, lsr #1 \n"
|
||||
"sub %7, %7, %8, lsr #1 \n"
|
||||
"smlabb %0, %1, %9, %0 \n"
|
||||
"smlabb %3, %4, %9, %3 \n"
|
||||
"smmul %2, %1, %10 \n"
|
||||
"smmul %5, %4, %10 \n"
|
||||
"str %6, [%11, #0] \n"
|
||||
"str %7, [%11, #16] \n"
|
||||
"sub %0, %0, %8, lsr #1 \n"
|
||||
"sub %3, %3, %8, lsr #1 \n"
|
||||
"smlabb %1, %2, %9, %1 \n"
|
||||
"smlabb %4, %5, %9, %4 \n"
|
||||
"smmul %6, %2, %10 \n"
|
||||
"smmul %7, %5, %10 \n"
|
||||
"str %0, [%11, #4] \n"
|
||||
"str %3, [%11, #20] \n"
|
||||
"sub %1, %1, %8, lsr #1 \n"
|
||||
"sub %4, %4, %8, lsr #1 \n"
|
||||
"smlabb %2, %6, %9, %2 \n"
|
||||
"smlabb %5, %7, %9, %5 \n"
|
||||
"str %1, [%11, #8] \n"
|
||||
"str %4, [%11, #24] \n"
|
||||
"sub %2, %2, %8, lsr #1 \n"
|
||||
"sub %5, %5, %8, lsr #1 \n"
|
||||
"str %2, [%11, #12] \n"
|
||||
"str %5, [%11, #28] \n"
|
||||
: "=&r"(v0), "=&r"(v1), "=&r"(v2),
|
||||
__asm__ ("smmul %8, %14, %18 \n"
|
||||
"smmul %11, %15, %18 \n"
|
||||
"smlabb %14, %8, %17, %14 \n"
|
||||
"smlabb %15, %11, %17, %15 \n"
|
||||
"smmul %9, %8, %18 \n"
|
||||
"smmul %12, %11, %18 \n"
|
||||
"sub %14, %14, %16, lsr #1 \n"
|
||||
"sub %15, %15, %16, lsr #1 \n"
|
||||
"smlabb %8, %9, %17, %8 \n"
|
||||
"smlabb %11, %12, %17, %11 \n"
|
||||
"smmul %10, %9, %18 \n"
|
||||
"smmul %13, %12, %18 \n"
|
||||
"str %14, %0 \n"
|
||||
"str %15, %4 \n"
|
||||
"sub %8, %8, %16, lsr #1 \n"
|
||||
"sub %11, %11, %16, lsr #1 \n"
|
||||
"smlabb %9, %10, %17, %9 \n"
|
||||
"smlabb %12, %13, %17, %12 \n"
|
||||
"smmul %14, %10, %18 \n"
|
||||
"smmul %15, %13, %18 \n"
|
||||
"str %8, %1 \n"
|
||||
"str %11, %5 \n"
|
||||
"sub %9, %9, %16, lsr #1 \n"
|
||||
"sub %12, %12, %16, lsr #1 \n"
|
||||
"smlabb %10, %14, %17, %10 \n"
|
||||
"smlabb %13, %15, %17, %13 \n"
|
||||
"str %9, %2 \n"
|
||||
"str %12, %6 \n"
|
||||
"sub %10, %10, %16, lsr #1 \n"
|
||||
"sub %13, %13, %16, lsr #1 \n"
|
||||
"str %10, %3 \n"
|
||||
"str %13, %7 \n"
|
||||
: "=m"(values[0]), "=m"(values[1]),
|
||||
"=m"(values[2]), "=m"(values[3]),
|
||||
"=m"(values[4]), "=m"(values[5]),
|
||||
"=m"(values[6]), "=m"(values[7]),
|
||||
"=&r"(v0), "=&r"(v1), "=&r"(v2),
|
||||
"=&r"(v3), "=&r"(v4), "=&r"(v5),
|
||||
"+&r"(code1), "+&r"(code2)
|
||||
: "r"(levels - 1), "r"(-levels),
|
||||
"r"(ff_inverse[levels]), "r"(values)
|
||||
: "memory");
|
||||
: "r"(levels - 1), "r"(-levels), "r"(ff_inverse[levels]));
|
||||
|
||||
return code1 | code2;
|
||||
}
|
||||
|
@@ -41,10 +41,10 @@ function ff_scalarproduct_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
@@ -81,10 +81,10 @@ function ff_scalarproduct_and_madd_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
|
@@ -661,8 +661,8 @@ static int decode_channel_sound_unit(ATRAC3Context *q, GetBitContext *gb,
|
||||
|
||||
snd->num_components = decode_tonal_components(gb, snd->components,
|
||||
snd->bands_coded);
|
||||
if (snd->num_components < 0)
|
||||
return snd->num_components;
|
||||
if (snd->num_components == -1)
|
||||
return -1;
|
||||
|
||||
num_subbands = decode_spectrum(gb, snd->spectrum);
|
||||
|
||||
|
@@ -286,7 +286,7 @@ int av_packet_split_side_data(AVPacket *pkt){
|
||||
for (i=0; ; i++){
|
||||
size= AV_RB32(p);
|
||||
av_assert0(size<=INT_MAX && p - pkt->data >= size);
|
||||
pkt->side_data[i].data = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].size = size;
|
||||
pkt->side_data[i].type = p[4]&127;
|
||||
if (!pkt->side_data[i].data)
|
||||
|
@@ -117,7 +117,6 @@ typedef struct BinkContext {
|
||||
int version; ///< internal Bink file version
|
||||
int has_alpha;
|
||||
int swap_planes;
|
||||
unsigned frame_num;
|
||||
|
||||
Bundle bundle[BINKB_NB_SRC]; ///< bundles for decoding all data types
|
||||
Tree col_high[16]; ///< trees for decoding high nibble in "colours" data type
|
||||
@@ -1208,8 +1207,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
if (c->version >= 'i')
|
||||
skip_bits_long(&gb, 32);
|
||||
|
||||
c->frame_num++;
|
||||
|
||||
for (plane = 0; plane < 3; plane++) {
|
||||
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
|
||||
|
||||
@@ -1218,7 +1215,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
return ret;
|
||||
} else {
|
||||
if ((ret = binkb_decode_plane(c, &gb, plane_idx,
|
||||
c->frame_num == 1, !!plane)) < 0)
|
||||
!avctx->frame_number, !!plane)) < 0)
|
||||
return ret;
|
||||
}
|
||||
if (get_bits_count(&gb) >= bits_count)
|
||||
@@ -1342,13 +1339,6 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flush(AVCodecContext *avctx)
|
||||
{
|
||||
BinkContext * const c = avctx->priv_data;
|
||||
|
||||
c->frame_num = 0;
|
||||
}
|
||||
|
||||
AVCodec ff_bink_decoder = {
|
||||
.name = "binkvideo",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@@ -1358,6 +1348,5 @@ AVCodec ff_bink_decoder = {
|
||||
.close = decode_end,
|
||||
.decode = decode_frame,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Bink video"),
|
||||
.flush = flush,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
};
|
||||
|
@@ -305,15 +305,7 @@ int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
||||
GET_DATA(buf[j].bits, bits, i, bits_wrap, bits_size);\
|
||||
if (!(condition))\
|
||||
continue;\
|
||||
if (buf[j].bits > 3*nb_bits || buf[j].bits>32) {\
|
||||
av_log(NULL, AV_LOG_ERROR, "Too long VLC in init_vlc\n");\
|
||||
return -1;\
|
||||
}\
|
||||
GET_DATA(buf[j].code, codes, i, codes_wrap, codes_size);\
|
||||
if (buf[j].code >= (1LL<<buf[j].bits)) {\
|
||||
av_log(NULL, AV_LOG_ERROR, "Invalid code in init_vlc\n");\
|
||||
return -1;\
|
||||
}\
|
||||
if (flags & INIT_VLC_LE)\
|
||||
buf[j].code = bitswap_32(buf[j].code);\
|
||||
else\
|
||||
|
@@ -175,13 +175,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
case C93_4X4_FROM_PREV:
|
||||
for (j = 0; j < 8; j += 4) {
|
||||
for (i = 0; i < 8; i += 4) {
|
||||
int offset = bytestream2_get_le16(&gb);
|
||||
int from_x = offset % WIDTH;
|
||||
int from_y = offset / WIDTH;
|
||||
if (block_type == C93_4X4_FROM_CURR && from_y == y+j &&
|
||||
(FFABS(from_x - x-i) < 4 || FFABS(from_x - x-i) > WIDTH-4)) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
offset = bytestream2_get_le16(&gb);
|
||||
if ((ret = copy_block(avctx, &out[j*stride+i],
|
||||
copy_from, offset, 4, stride)) < 0)
|
||||
return ret;
|
||||
|
@@ -305,7 +305,7 @@ STOP_TIMER("get_cabac_bypass")
|
||||
|
||||
for(i=0; i<SIZE; i++){
|
||||
START_TIMER
|
||||
if( (r[i]&1) != get_cabac_noinline(&c, state) )
|
||||
if( (r[i]&1) != get_cabac(&c, state) )
|
||||
av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
|
||||
STOP_TIMER("get_cabac")
|
||||
}
|
||||
|
@@ -300,9 +300,7 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
inst = bytestream_get_byte(&buf);
|
||||
inst &= CDG_MASK;
|
||||
buf += 2; /// skipping 2 unneeded bytes
|
||||
|
||||
if (buf_size > CDG_HEADER_SIZE)
|
||||
bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE);
|
||||
bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE);
|
||||
|
||||
if ((command & CDG_MASK) == CDG_COMMAND) {
|
||||
switch (inst) {
|
||||
|
@@ -28,7 +28,6 @@
|
||||
#ifndef AVCODEC_DIRAC_ARITH_H
|
||||
#define AVCODEC_DIRAC_ARITH_H
|
||||
|
||||
#include "libavutil/x86/asm.h"
|
||||
#include "bytestream.h"
|
||||
#include "get_bits.h"
|
||||
|
||||
@@ -135,7 +134,7 @@ static inline int dirac_get_arith_bit(DiracArith *c, int ctx)
|
||||
|
||||
range_times_prob = (c->range * prob_zero) >> 16;
|
||||
|
||||
#if HAVE_FAST_CMOV && HAVE_INLINE_ASM && HAVE_6REGS
|
||||
#if HAVE_FAST_CMOV && HAVE_INLINE_ASM
|
||||
low -= range_times_prob << 16;
|
||||
range -= range_times_prob;
|
||||
bit = 0;
|
||||
|
@@ -201,7 +201,6 @@ typedef struct DiracContext {
|
||||
|
||||
uint16_t *mctmp; /* buffer holding the MC data multipled by OBMC weights */
|
||||
uint8_t *mcscratch;
|
||||
int buffer_stride;
|
||||
|
||||
DECLARE_ALIGNED(16, uint8_t, obmc_weight)[3][MAX_BLOCKSIZE*MAX_BLOCKSIZE];
|
||||
|
||||
@@ -344,44 +343,22 @@ static int alloc_sequence_buffers(DiracContext *s)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
w = s->source.width;
|
||||
h = s->source.height;
|
||||
|
||||
/* fixme: allocate using real stride here */
|
||||
s->sbsplit = av_malloc_array(sbwidth, sbheight);
|
||||
s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
|
||||
s->sbsplit = av_malloc(sbwidth * sbheight);
|
||||
s->blmotion = av_malloc(sbwidth * sbheight * 16 * sizeof(*s->blmotion));
|
||||
s->edge_emu_buffer_base = av_malloc((w+64)*MAX_BLOCKSIZE);
|
||||
|
||||
if (!s->sbsplit || !s->blmotion)
|
||||
s->mctmp = av_malloc((w+64+MAX_BLOCKSIZE) * (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
|
||||
s->mcscratch = av_malloc((w+64)*MAX_BLOCKSIZE);
|
||||
|
||||
if (!s->sbsplit || !s->blmotion || !s->mctmp || !s->mcscratch)
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alloc_buffers(DiracContext *s, int stride)
|
||||
{
|
||||
int w = s->source.width;
|
||||
int h = s->source.height;
|
||||
|
||||
av_assert0(stride >= w);
|
||||
stride += 64;
|
||||
|
||||
if (s->buffer_stride >= stride)
|
||||
return 0;
|
||||
s->buffer_stride = 0;
|
||||
|
||||
av_freep(&s->edge_emu_buffer_base);
|
||||
memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
|
||||
av_freep(&s->mctmp);
|
||||
av_freep(&s->mcscratch);
|
||||
|
||||
s->edge_emu_buffer_base = av_malloc_array(stride, MAX_BLOCKSIZE);
|
||||
|
||||
s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
|
||||
s->mcscratch = av_malloc_array(stride, MAX_BLOCKSIZE);
|
||||
|
||||
if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
s->buffer_stride = stride;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_sequence_buffers(DiracContext *s)
|
||||
{
|
||||
int i, j, k;
|
||||
@@ -405,7 +382,6 @@ static void free_sequence_buffers(DiracContext *s)
|
||||
av_freep(&s->plane[i].idwt_tmp);
|
||||
}
|
||||
|
||||
s->buffer_stride = 0;
|
||||
av_freep(&s->sbsplit);
|
||||
av_freep(&s->blmotion);
|
||||
av_freep(&s->edge_emu_buffer_base);
|
||||
@@ -1367,8 +1343,8 @@ static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5],
|
||||
motion_y >>= s->chroma_y_shift;
|
||||
}
|
||||
|
||||
mx = motion_x & ~(-1U << s->mv_precision);
|
||||
my = motion_y & ~(-1U << s->mv_precision);
|
||||
mx = motion_x & ~(-1 << s->mv_precision);
|
||||
my = motion_y & ~(-1 << s->mv_precision);
|
||||
motion_x >>= s->mv_precision;
|
||||
motion_y >>= s->mv_precision;
|
||||
/* normalize subpel coordinates to epel */
|
||||
@@ -1842,9 +1818,6 @@ static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int
|
||||
s->plane[1].stride = pic->avframe.linesize[1];
|
||||
s->plane[2].stride = pic->avframe.linesize[2];
|
||||
|
||||
if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
|
||||
if (dirac_decode_picture_header(s))
|
||||
return -1;
|
||||
|
@@ -236,7 +236,7 @@ static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
|
||||
|
||||
static int dnxhd_init_rc(DNXHDEncContext *ctx)
|
||||
{
|
||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*(ctx->m.avctx->qmax + 1)*sizeof(RCEntry), fail);
|
||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry), fail);
|
||||
if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
|
||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry), fail);
|
||||
|
||||
|
@@ -212,7 +212,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
buf[803] = s->bits_per_component;
|
||||
write16(buf + 804, (s->bits_per_component == 10 || s->bits_per_component == 12) ?
|
||||
1 : 0); /* packing method */
|
||||
write32(buf + 808, HEADER_SIZE); /* data offset */
|
||||
|
||||
/* Image source information header */
|
||||
write32(buf + 1628, avctx->sample_aspect_ratio.num);
|
||||
|
@@ -1897,7 +1897,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
|
||||
|
||||
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
||||
long i;
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src+i);
|
||||
long b = *(long*)(dst+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
@@ -1922,7 +1922,7 @@ static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
|
||||
}
|
||||
}else
|
||||
#endif
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
||||
|
@@ -71,11 +71,6 @@ static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint
|
||||
case 4: // motion compensation
|
||||
x = (*mv) >> 4; if(x & 8) x = 8 - x;
|
||||
y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
|
||||
if (i < -x || avctx->width - i - 4 < x ||
|
||||
j < -y || avctx->height - j - 4 < y) {
|
||||
av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
tmp2 += x + y*stride;
|
||||
case 0: // skip
|
||||
case 5: // skip in method 12
|
||||
@@ -133,11 +128,6 @@ static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint
|
||||
case 0x80: // motion compensation
|
||||
x = (*mv) >> 4; if(x & 8) x = 8 - x;
|
||||
y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
|
||||
if (i + 2*(k & 1) < -x || avctx->width - i - 2*(k & 1) - 2 < x ||
|
||||
j + (k & 2) < -y || avctx->height - j - (k & 2) - 2 < y) {
|
||||
av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
tmp2 += x + y*stride;
|
||||
case 0x00: // skip
|
||||
tmp[d + 0 ] = tmp2[0];
|
||||
|
@@ -255,11 +255,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
calc_quant_matrix(s, buf[13]);
|
||||
buf += 16;
|
||||
|
||||
if (width < 16 || height < 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (avctx->width != width || avctx->height != height) {
|
||||
if((width * height)/2048*7 > buf_end-buf)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@@ -762,17 +762,6 @@ void ff_er_frame_start(ERContext *s)
|
||||
s->error_occurred = 0;
|
||||
}
|
||||
|
||||
static int er_supported(ERContext *s)
|
||||
{
|
||||
if(s->avctx->hwaccel ||
|
||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
||||
!s->cur_pic ||
|
||||
s->cur_pic->field_picture
|
||||
)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a slice.
|
||||
* @param endx x component of the last macroblock, can be -1
|
||||
@@ -839,7 +828,7 @@ void ff_er_add_slice(ERContext *s, int startx, int starty,
|
||||
s->error_status_table[start_xy] |= VP_START;
|
||||
|
||||
if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||
er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) {
|
||||
s->avctx->skip_top * s->mb_width < start_i) {
|
||||
int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
|
||||
|
||||
prev_status &= ~ VP_START;
|
||||
@@ -862,7 +851,9 @@ void ff_er_frame_end(ERContext *s)
|
||||
* though it should not crash if enabled. */
|
||||
if (!s->avctx->err_recognition || s->error_count == 0 ||
|
||||
s->avctx->lowres ||
|
||||
!er_supported(s) ||
|
||||
s->avctx->hwaccel ||
|
||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
||||
!s->cur_pic || s->cur_pic->field_picture ||
|
||||
s->error_count == 3 * s->mb_width *
|
||||
(s->avctx->skip_top + s->avctx->skip_bottom)) {
|
||||
return;
|
||||
|
@@ -374,7 +374,7 @@ static void bl_intrp(EVRCContext *e, float *ex, float delay)
|
||||
int offset, i, coef_idx;
|
||||
int16_t t;
|
||||
|
||||
offset = lrintf(delay);
|
||||
offset = lrintf(fabs(delay));
|
||||
|
||||
t = (offset - delay + 0.5) * 8.0 + 0.5;
|
||||
if (t == 8) {
|
||||
@@ -640,7 +640,7 @@ static void postfilter(EVRCContext *e, float *in, const float *coeff,
|
||||
/* Short term postfilter */
|
||||
synthesis_filter(temp, wcoef2, e->postfilter_iir, length, out);
|
||||
|
||||
memmove(e->postfilter_residual,
|
||||
memcpy(e->postfilter_residual,
|
||||
e->postfilter_residual + length, ACB_SIZE * sizeof(float));
|
||||
}
|
||||
|
||||
@@ -714,7 +714,7 @@ static void frame_erasure(EVRCContext *e, float *samples)
|
||||
e->pitch[ACB_SIZE + j] = e->energy_vector[i];
|
||||
}
|
||||
|
||||
memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
memcpy(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
|
||||
if (e->bitrate != RATE_QUANT && e->avg_acb_gain < 0.4) {
|
||||
f = 0.1 * e->avg_fcb_gain;
|
||||
@@ -814,7 +814,7 @@ static int evrc_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
interpolate_delay(idelay, delay, e->prev_pitch_delay, i);
|
||||
acb_excitation(e, e->pitch + ACB_SIZE, e->avg_acb_gain, idelay, subframe_size);
|
||||
memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
memcpy(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -872,7 +872,7 @@ static int evrc_decode_frame(AVCodecContext *avctx, void *data,
|
||||
e->pitch[ACB_SIZE + j] = e->energy_vector[i];
|
||||
}
|
||||
|
||||
memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
memcpy(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||
|
||||
synthesis_filter(e->pitch + ACB_SIZE, ilpc,
|
||||
e->synthesis, subframe_size, tmp);
|
||||
|
@@ -348,8 +348,7 @@ static int decode_block(AVCodecContext *avctx, void *tdata,
|
||||
const uint8_t *src;
|
||||
int axmax = (avctx->width - (s->xmax + 1)) * 2 * s->desc->nb_components;
|
||||
int bxmin = s->xmin * 2 * s->desc->nb_components;
|
||||
int i, x, buf_size = s->buf_size;
|
||||
int av_unused ret;
|
||||
int ret, i, x, buf_size = s->buf_size;
|
||||
|
||||
line_offset = AV_RL64(s->table + jobnr * 8);
|
||||
// Check if the buffer has the required bytes needed from the offset
|
||||
|
@@ -446,10 +446,6 @@ static int read_extra_header(FFV1Context *f)
|
||||
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
|
||||
|
||||
f->version = get_symbol(c, state, 0);
|
||||
if (f->version < 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (f->version > 2) {
|
||||
c->bytestream_end -= 4;
|
||||
f->minor_version = get_symbol(c, state, 0);
|
||||
@@ -527,7 +523,6 @@ static int read_header(FFV1Context *f)
|
||||
memset(state, 128, sizeof(state));
|
||||
|
||||
if (f->version < 2) {
|
||||
int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
|
||||
unsigned v= get_symbol(c, state, 0);
|
||||
if (v >= 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
|
||||
@@ -540,32 +535,15 @@ static int read_header(FFV1Context *f)
|
||||
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
||||
}
|
||||
|
||||
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
|
||||
chroma_planes = get_rac(c, state);
|
||||
chroma_h_shift = get_symbol(c, state, 0);
|
||||
chroma_v_shift = get_symbol(c, state, 0);
|
||||
transparency = get_rac(c, state);
|
||||
f->colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
|
||||
if (f->plane_count) {
|
||||
if ( colorspace != f->colorspace
|
||||
|| bits_per_raw_sample != f->avctx->bits_per_raw_sample
|
||||
|| chroma_planes != f->chroma_planes
|
||||
|| chroma_h_shift!= f->chroma_h_shift
|
||||
|| chroma_v_shift!= f->chroma_v_shift
|
||||
|| transparency != f->transparency) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
f->colorspace = colorspace;
|
||||
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
||||
f->chroma_planes = chroma_planes;
|
||||
f->chroma_h_shift = chroma_h_shift;
|
||||
f->chroma_v_shift = chroma_v_shift;
|
||||
f->transparency = transparency;
|
||||
if (f->version > 0)
|
||||
f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
|
||||
|
||||
f->chroma_planes = get_rac(c, state);
|
||||
f->chroma_h_shift = get_symbol(c, state, 0);
|
||||
f->chroma_v_shift = get_symbol(c, state, 0);
|
||||
f->transparency = get_rac(c, state);
|
||||
f->plane_count = 2 + f->transparency;
|
||||
}
|
||||
|
||||
@@ -583,32 +561,47 @@ static int read_header(FFV1Context *f)
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
|
||||
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
|
||||
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
|
||||
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
|
||||
} else if (f->avctx->bits_per_raw_sample == 9) {
|
||||
f->packed_at_lsb = 1;
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
|
||||
} else if (f->avctx->bits_per_raw_sample == 10) {
|
||||
f->packed_at_lsb = 1;
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
} else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
|
||||
} else {
|
||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
|
||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
|
||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
|
||||
default:
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
}
|
||||
} else if (f->colorspace == 1) {
|
||||
@@ -632,10 +625,6 @@ static int read_header(FFV1Context *f)
|
||||
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
av_dlog(f->avctx, "%d %d %d\n",
|
||||
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
|
||||
|
@@ -274,7 +274,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w,
|
||||
int run_mode = 0;
|
||||
|
||||
if (s->ac) {
|
||||
if (c->bytestream_end - c->bytestream < w * 35) {
|
||||
if (c->bytestream_end - c->bytestream < w * 20) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@ const int ff_flac_sample_rate_table[16] =
|
||||
8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000,
|
||||
0, 0, 0, 0 };
|
||||
|
||||
const int32_t ff_flac_blocksize_table[16] = {
|
||||
const int16_t ff_flac_blocksize_table[16] = {
|
||||
0, 192, 576<<0, 576<<1, 576<<2, 576<<3, 0, 0,
|
||||
256<<0, 256<<1, 256<<2, 256<<3, 256<<4, 256<<5, 256<<6, 256<<7
|
||||
};
|
||||
|
@@ -26,6 +26,6 @@
|
||||
|
||||
extern const int ff_flac_sample_rate_table[16];
|
||||
|
||||
extern const int32_t ff_flac_blocksize_table[16];
|
||||
extern const int16_t ff_flac_blocksize_table[16];
|
||||
|
||||
#endif /* AVCODEC_FLACDATA_H */
|
||||
|
@@ -394,10 +394,6 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
s->diff_start = get_bits(&gb, 8);
|
||||
s->diff_height = get_bits(&gb, 8);
|
||||
if (s->diff_start + s->diff_height > cur_blk_height) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Block parameters invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
"%dx%d diff start %d height %d\n",
|
||||
i, j, s->diff_start, s->diff_height);
|
||||
|
@@ -2288,8 +2288,7 @@ static int pack_bitstream(G723_1_Context *p, unsigned char *frame, int size)
|
||||
if (p->cur_rate == RATE_6300) {
|
||||
info_bits = 0;
|
||||
put_bits(&pb, 2, info_bits);
|
||||
}else
|
||||
av_assert0(0);
|
||||
}
|
||||
|
||||
put_bits(&pb, 8, p->lsp_index[2]);
|
||||
put_bits(&pb, 8, p->lsp_index[1]);
|
||||
|
@@ -462,7 +462,6 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, A
|
||||
|
||||
if (s->keyframe) {
|
||||
s->keyframe_ok = 0;
|
||||
s->gce_prev_disposal = GCE_DISPOSAL_NONE;
|
||||
if ((ret = gif_read_header1(s)) < 0)
|
||||
return ret;
|
||||
|
||||
|
@@ -58,7 +58,7 @@ int main(void)
|
||||
}
|
||||
}
|
||||
|
||||
#define EXTEND(i) ((i) << 3 | (i) & 7)
|
||||
#define EXTEND(i) (i << 3 | i & 7)
|
||||
init_put_bits(&pb, temp, SIZE);
|
||||
for (i = 0; i < COUNT; i++)
|
||||
set_ue_golomb(&pb, EXTEND(i));
|
||||
|
@@ -721,10 +721,10 @@ frame_end:
|
||||
}
|
||||
|
||||
if(startcode_found){
|
||||
av_fast_padded_mallocz(
|
||||
av_fast_malloc(
|
||||
&s->bitstream_buffer,
|
||||
&s->allocated_bitstream_buffer_size,
|
||||
buf_size - current_pos);
|
||||
buf_size - current_pos + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!s->bitstream_buffer)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos);
|
||||
|
@@ -1782,6 +1782,11 @@ int ff_h264_frame_start(H264Context *h)
|
||||
h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
|
||||
}
|
||||
|
||||
/* Some macroblocks can be accessed before they're available in case
|
||||
* of lost slices, MBAFF or threading. */
|
||||
memset(h->slice_table, -1,
|
||||
(h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
|
||||
|
||||
// s->decode = (h->flags & CODEC_FLAG_PSNR) || !s->encoding ||
|
||||
// h->cur_pic.f.reference /* || h->contains_intra */ || 1;
|
||||
|
||||
@@ -2579,7 +2584,6 @@ static void flush_change(H264Context *h)
|
||||
h->sync= 0;
|
||||
h->list_count = 0;
|
||||
h->current_slice = 0;
|
||||
h->mmco_reset = 1;
|
||||
}
|
||||
|
||||
/* forget old pics after a seek */
|
||||
@@ -3094,18 +3098,6 @@ static int h264_slice_header_init(H264Context *h, int reinit)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
|
||||
{
|
||||
switch (a) {
|
||||
case AV_PIX_FMT_YUVJ420P: return AV_PIX_FMT_YUV420P;
|
||||
case AV_PIX_FMT_YUVJ422P: return AV_PIX_FMT_YUV422P;
|
||||
case AV_PIX_FMT_YUVJ444P: return AV_PIX_FMT_YUV444P;
|
||||
default:
|
||||
return a;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Decode a slice header.
|
||||
* This will also call ff_MPV_common_init() and frame_start() as needed.
|
||||
@@ -3122,6 +3114,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
unsigned int pps_id;
|
||||
int num_ref_idx_active_override_flag, ret;
|
||||
unsigned int slice_type, tmp, i, j;
|
||||
int default_ref_list_done = 0;
|
||||
int last_pic_structure, last_pic_droppable;
|
||||
int must_reinit;
|
||||
int needs_reinit = 0;
|
||||
@@ -3161,6 +3154,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h->slice_type_fixed = 0;
|
||||
|
||||
slice_type = golomb_to_pict_type[slice_type];
|
||||
if (slice_type == AV_PICTURE_TYPE_I ||
|
||||
(h0->current_slice != 0 &&
|
||||
slice_type == h0->last_slice_type &&
|
||||
!memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) {
|
||||
default_ref_list_done = 1;
|
||||
}
|
||||
h->slice_type = slice_type;
|
||||
h->slice_type_nos = slice_type & 3;
|
||||
|
||||
@@ -3220,11 +3219,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
||||
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)
|
||||
|| h->mb_width != h->sps.mb_width
|
||||
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
||||
));
|
||||
if (non_j_pixfmt(h0->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h0, 0)))
|
||||
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)));
|
||||
if (h0->avctx->pix_fmt != get_pixel_format(h0, 0))
|
||||
must_reinit = 1;
|
||||
|
||||
h->mb_width = h->sps.mb_width;
|
||||
@@ -3341,7 +3337,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
} else {
|
||||
/* Shorten frame num gaps so we don't have to allocate reference
|
||||
* frames just to throw them away */
|
||||
if (h->frame_num != h->prev_frame_num) {
|
||||
if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
|
||||
int unwrap_prev_frame_num = h->prev_frame_num;
|
||||
int max_frame_num = 1 << h->sps.log2_max_frame_num;
|
||||
|
||||
@@ -3368,7 +3364,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
|
||||
|
||||
/* Mark old field/frame as completed */
|
||||
if (h0->cur_pic_ptr->owner2 == h0) {
|
||||
if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) {
|
||||
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_BOTTOM_FIELD);
|
||||
}
|
||||
@@ -3377,7 +3373,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
|
||||
/* Previous field is unmatched. Don't display it, but let it
|
||||
* remain for reference if marked as such. */
|
||||
if (last_pic_structure != PICT_FRAME) {
|
||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
@@ -3387,7 +3383,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
* different frame_nums. Consider this field first in
|
||||
* pair. Throw away previous field except for reference
|
||||
* purposes. */
|
||||
if (last_pic_structure != PICT_FRAME) {
|
||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
@@ -3423,7 +3419,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
}
|
||||
}
|
||||
|
||||
while (h->frame_num != h->prev_frame_num && !h0->first_field &&
|
||||
while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field &&
|
||||
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
|
||||
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
||||
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
|
||||
@@ -3502,15 +3498,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
} else {
|
||||
release_unused_pictures(h, 0);
|
||||
}
|
||||
/* Some macroblocks can be accessed before they're available in case
|
||||
* of lost slices, MBAFF or threading. */
|
||||
if (FIELD_PICTURE) {
|
||||
for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
|
||||
memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
|
||||
} else {
|
||||
memset(h->slice_table, -1,
|
||||
(h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
|
||||
}
|
||||
}
|
||||
if (h != h0 && (ret = clone_slice(h, h0)) < 0)
|
||||
return ret;
|
||||
@@ -3603,12 +3590,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h->list_count = 0;
|
||||
h->ref_count[0] = h->ref_count[1] = 0;
|
||||
}
|
||||
if (slice_type != AV_PICTURE_TYPE_I &&
|
||||
(h0->current_slice == 0 ||
|
||||
slice_type != h0->last_slice_type ||
|
||||
memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) {
|
||||
|
||||
if (!default_ref_list_done)
|
||||
ff_h264_fill_default_ref_list(h);
|
||||
}
|
||||
|
||||
if (h->slice_type_nos != AV_PICTURE_TYPE_I &&
|
||||
ff_h264_decode_ref_pic_list_reordering(h) < 0) {
|
||||
@@ -3791,7 +3775,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|
||||
if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0];
|
||||
if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0];
|
||||
h->er.ref_count = h->ref_count[0];
|
||||
|
||||
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
|
||||
av_log(h->avctx, AV_LOG_DEBUG,
|
||||
@@ -4183,6 +4166,7 @@ static void er_add_slice(H264Context *h, int startx, int starty,
|
||||
if (CONFIG_ERROR_RESILIENCE) {
|
||||
ERContext *er = &h->er;
|
||||
|
||||
er->ref_count = h->ref_count[0];
|
||||
ff_er_add_slice(er, startx, starty, endx, endy, status);
|
||||
}
|
||||
}
|
||||
|
@@ -549,15 +549,9 @@ static int decode_residual(H264Context *h, GetBitContext *gb, int16_t *block, in
|
||||
if(prefix<15){
|
||||
level_code = (prefix<<suffix_length) + get_bits(gb, suffix_length);
|
||||
}else{
|
||||
level_code = 15<<suffix_length;
|
||||
if (prefix>=16) {
|
||||
if(prefix > 25+3){
|
||||
av_log(h->avctx, AV_LOG_ERROR, "Invalid level prefix\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
level_code = (15<<suffix_length) + get_bits(gb, prefix-3);
|
||||
if(prefix>=16)
|
||||
level_code += (1<<(prefix-3))-4096;
|
||||
}
|
||||
level_code += get_bits(gb, prefix-3);
|
||||
}
|
||||
mask= -(level_code&1);
|
||||
level_code= (((2+level_code)>>1) ^ mask) - mask;
|
||||
@@ -712,7 +706,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
|
||||
down the code */
|
||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
||||
if(h->mb_skip_run==-1)
|
||||
h->mb_skip_run= get_ue_golomb_long(&h->gb);
|
||||
h->mb_skip_run= get_ue_golomb(&h->gb);
|
||||
|
||||
if (h->mb_skip_run--) {
|
||||
if(FRAME_MBAFF && (h->mb_y&1) == 0){
|
||||
|
@@ -154,7 +154,7 @@ pps:
|
||||
goto fail;
|
||||
|
||||
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
||||
if (ctx->first_idr && (unit_type == 5 || unit_type == 7 || unit_type == 8)) {
|
||||
if (ctx->first_idr && unit_type == 5) {
|
||||
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
||||
avctx->extradata, avctx->extradata_size,
|
||||
buf, nal_size)) < 0)
|
||||
|
@@ -543,7 +543,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
if(!pic){
|
||||
if(mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg]
|
||||
|| h->long_ref[mmco[i].long_arg]->frame_num != frame_num) {
|
||||
av_log(h->avctx, h->short_ref_count ? AV_LOG_ERROR : AV_LOG_DEBUG, "mmco: unref short failure\n");
|
||||
av_log(h->avctx, AV_LOG_ERROR, "mmco: unref short failure\n");
|
||||
err = AVERROR_INVALIDDATA;
|
||||
}
|
||||
continue;
|
||||
@@ -586,9 +586,6 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
|
||||
if (h->long_ref[mmco[i].long_arg] != h->cur_pic_ptr) {
|
||||
remove_long(h, mmco[i].long_arg, 0);
|
||||
if (remove_short(h, h->cur_pic_ptr->frame_num, 0)) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "mmco: cannot assign current picture to short and long at the same time\n");
|
||||
}
|
||||
|
||||
h->long_ref[ mmco[i].long_arg ]= h->cur_pic_ptr;
|
||||
h->long_ref[ mmco[i].long_arg ]->long_ref=1;
|
||||
@@ -683,7 +680,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
print_short_term(h);
|
||||
print_long_term(h);
|
||||
|
||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=2 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=1 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
h->cur_pic_ptr->sync |= 1;
|
||||
if(!h->avctx->has_b_frames)
|
||||
h->sync = 2;
|
||||
@@ -696,7 +693,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb,
|
||||
int first_slice)
|
||||
{
|
||||
int i, ret;
|
||||
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = mmco_temp;
|
||||
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp;
|
||||
int mmco_index = 0;
|
||||
|
||||
if (h->nal_unit_type == NAL_IDR_SLICE){ // FIXME fields
|
||||
@@ -762,7 +759,6 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb,
|
||||
}
|
||||
|
||||
if (first_slice && mmco_index != -1) {
|
||||
memcpy(h->mmco, mmco_temp, sizeof(h->mmco));
|
||||
h->mmco_index = mmco_index;
|
||||
} else if (!first_slice && mmco_index >= 0 &&
|
||||
(mmco_index != h->mmco_index ||
|
||||
|
@@ -31,11 +31,9 @@
|
||||
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_ ## depth ## _c; \
|
||||
c->put_h264_chroma_pixels_tab[1] = put_h264_chroma_mc4_ ## depth ## _c; \
|
||||
c->put_h264_chroma_pixels_tab[2] = put_h264_chroma_mc2_ ## depth ## _c; \
|
||||
c->put_h264_chroma_pixels_tab[3] = put_h264_chroma_mc1_ ## depth ## _c; \
|
||||
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_ ## depth ## _c; \
|
||||
c->avg_h264_chroma_pixels_tab[1] = avg_h264_chroma_mc4_ ## depth ## _c; \
|
||||
c->avg_h264_chroma_pixels_tab[2] = avg_h264_chroma_mc2_ ## depth ## _c; \
|
||||
c->avg_h264_chroma_pixels_tab[3] = avg_h264_chroma_mc1_ ## depth ## _c; \
|
||||
|
||||
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
|
||||
{
|
||||
|
@@ -24,8 +24,8 @@
|
||||
typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y);
|
||||
|
||||
typedef struct H264ChromaContext {
|
||||
h264_chroma_mc_func put_h264_chroma_pixels_tab[4];
|
||||
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4];
|
||||
h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
|
||||
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
|
||||
} H264ChromaContext;
|
||||
|
||||
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth);
|
||||
|
@@ -24,34 +24,6 @@
|
||||
#include "bit_depth_template.c"
|
||||
|
||||
#define H264_CHROMA_MC(OPNAME, OP)\
|
||||
static void FUNCC(OPNAME ## h264_chroma_mc1)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
|
||||
pixel *dst = (pixel*)_dst;\
|
||||
pixel *src = (pixel*)_src;\
|
||||
const int A=(8-x)*(8-y);\
|
||||
const int B=( x)*(8-y);\
|
||||
const int C=(8-x)*( y);\
|
||||
const int D=( x)*( y);\
|
||||
int i;\
|
||||
stride >>= sizeof(pixel)-1;\
|
||||
\
|
||||
av_assert2(x<8 && y<8 && x>=0 && y>=0);\
|
||||
\
|
||||
if(D){\
|
||||
for(i=0; i<h; i++){\
|
||||
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
|
||||
dst+= stride;\
|
||||
src+= stride;\
|
||||
}\
|
||||
}else{\
|
||||
const int E= B+C;\
|
||||
const int step= C ? stride : 1;\
|
||||
for(i=0; i<h; i++){\
|
||||
OP(dst[0], (A*src[0] + E*src[step+0]));\
|
||||
dst+= stride;\
|
||||
src+= stride;\
|
||||
}\
|
||||
}\
|
||||
}\
|
||||
static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
|
||||
pixel *dst = (pixel*)_dst;\
|
||||
pixel *src = (pixel*)_src;\
|
||||
|
@@ -804,16 +804,8 @@ static int decode_band(IVI45DecContext *ctx,
|
||||
break;
|
||||
|
||||
result = ivi_decode_blocks(&ctx->gb, band, tile, avctx);
|
||||
if (result < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Corrupted tile data encountered!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (((get_bits_count(&ctx->gb) - pos) >> 3) != tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Tile data_size mismatch!\n");
|
||||
result = AVERROR_INVALIDDATA;
|
||||
if (result < 0 || ((get_bits_count(&ctx->gb) - pos) >> 3) != tile->data_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Corrupted tile data encountered!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
|
@@ -31,7 +31,6 @@
|
||||
#include "bytestream.h"
|
||||
#include "internal.h"
|
||||
#include "j2k.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/common.h"
|
||||
|
||||
#define JP2_SIG_TYPE 0x6A502020
|
||||
@@ -303,10 +302,6 @@ static int get_cox(J2kDecoderContext *s, J2kCodingStyle *c)
|
||||
c->log2_cblk_width = bytestream2_get_byteu(&s->g) + 2; // cblk width
|
||||
c->log2_cblk_height = bytestream2_get_byteu(&s->g) + 2; // cblk height
|
||||
|
||||
if (c->log2_cblk_width > 6 || c->log2_cblk_height > 6) {
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
c->cblk_style = bytestream2_get_byteu(&s->g);
|
||||
if (c->cblk_style != 0){ // cblk style
|
||||
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
|
||||
@@ -724,9 +719,6 @@ static int decode_cblk(J2kDecoderContext *s, J2kCodingStyle *codsty, J2kT1Contex
|
||||
int bpass_csty_symbol = J2K_CBLK_BYPASS & codsty->cblk_style;
|
||||
int vert_causal_ctx_csty_symbol = J2K_CBLK_VSC & codsty->cblk_style;
|
||||
|
||||
av_assert0(width <= J2K_MAX_CBLKW);
|
||||
av_assert0(height <= J2K_MAX_CBLKH);
|
||||
|
||||
for (y = 0; y < height+2; y++)
|
||||
memset(t1->flags[y], 0, (width+2)*sizeof(int));
|
||||
|
||||
|
@@ -142,8 +142,6 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state, int RI
|
||||
ret = ret >> 1;
|
||||
}
|
||||
|
||||
if(FFABS(ret) > 0xFFFF)
|
||||
return -0x10000;
|
||||
/* update state */
|
||||
state->A[Q] += FFABS(ret) - RItype;
|
||||
ret *= state->twonear;
|
||||
|
@@ -107,7 +107,7 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, int w, int h)
|
||||
val = bytestream2_get_byte(&ctx->g);
|
||||
mx = val & 0xF;
|
||||
my = val >> 4;
|
||||
if ((l0x-mx) + 320*(l0y-my) < 0 || (l0x-mx) + 320*(l0y-my) > 320*197 - 4) {
|
||||
if ((l0x-mx) + 320*(l0y-my) < 0 || (l0x-mx) + 320*(l0y-my) > 316*196) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -132,7 +132,7 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, int w, int h)
|
||||
val = bytestream2_get_byte(&ctx->g);
|
||||
mx = val & 0xF;
|
||||
my = val >> 4;
|
||||
if ((l1x-mx) + 320*(l1y-my) < 0 || (l1x-mx) + 320*(l1y-my) > 320*199 - 2) {
|
||||
if ((l1x-mx) + 320*(l1y-my) < 0 || (l1x-mx) + 320*(l1y-my) > 318*198) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -207,7 +207,7 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, int w, int h)
|
||||
val = bytestream2_get_byte(&ctx->g);
|
||||
mx = (val & 0xF) - 8;
|
||||
my = (val >> 4) - 8;
|
||||
if ((l0x+mx) + 320*(l0y+my) < 0 || (l0x+mx) + 320*(l0y+my) > 320*197 - 4) {
|
||||
if ((l0x+mx) + 320*(l0y+my) < 0 || (l0x+mx) + 320*(l0y+my) > 318*198) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -232,7 +232,7 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, int w, int h)
|
||||
val = bytestream2_get_byte(&ctx->g);
|
||||
mx = (val & 0xF) - 8;
|
||||
my = (val >> 4) - 8;
|
||||
if ((l1x+mx) + 320*(l1y+my) < 0 || (l1x+mx) + 320*(l1y+my) > 320*199 - 2) {
|
||||
if ((l1x+mx) + 320*(l1y+my) < 0 || (l1x+mx) + 320*(l1y+my) > 318*198) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -42,7 +42,6 @@
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#include "internal.h"
|
||||
@@ -492,7 +491,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
unsigned int max_basesize = FFALIGN(avctx->width, 4) *
|
||||
FFALIGN(avctx->height, 4);
|
||||
unsigned int max_decomp_size;
|
||||
int subsample_h, subsample_v;
|
||||
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
if (avctx->extradata_size < 8) {
|
||||
@@ -519,9 +517,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
max_decomp_size = max_basesize * 2;
|
||||
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||
av_log(avctx, AV_LOG_DEBUG, "Image type is YUV 4:2:2.\n");
|
||||
if (avctx->width % 4) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
break;
|
||||
case IMGTYPE_RGB24:
|
||||
c->decomp_size = basesize * 3;
|
||||
@@ -552,11 +547,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &subsample_h, &subsample_v);
|
||||
if (avctx->width % (1<<subsample_h) || avctx->height % (1<<subsample_v)) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* Detect compression method */
|
||||
c->compression = (int8_t)avctx->extradata[5];
|
||||
switch (avctx->codec_id) {
|
||||
|
@@ -380,7 +380,7 @@ static const AVOption libopus_options[] = {
|
||||
{ "voip", "Favor improved speech intelligibility", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_VOIP }, 0, 0, FLAGS, "application" },
|
||||
{ "audio", "Favor faithfulness to the input", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_AUDIO }, 0, 0, FLAGS, "application" },
|
||||
{ "lowdelay", "Restrict to only the lowest delay modes", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_RESTRICTED_LOWDELAY }, 0, 0, FLAGS, "application" },
|
||||
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 20.0 }, 2.5, 60.0, FLAGS },
|
||||
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 10.0 }, 2.5, 60.0, FLAGS },
|
||||
{ "packet_loss", "Expected packet loss percentage", OFFSET(packet_loss), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, FLAGS },
|
||||
{ "vbr", "Variable bit rate mode", OFFSET(vbr), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, FLAGS, "vbr" },
|
||||
{ "off", "Use constant bit rate", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "vbr" },
|
||||
|
@@ -362,8 +362,7 @@ static int oggvorbis_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
avctx->delay = duration;
|
||||
av_assert0(!s->afq.remaining_delay);
|
||||
s->afq.frames->duration += duration;
|
||||
if (s->afq.frames->pts != AV_NOPTS_VALUE)
|
||||
s->afq.frames->pts -= duration;
|
||||
s->afq.frames->pts -= duration;
|
||||
s->afq.remaining_samples += duration;
|
||||
}
|
||||
ff_af_queue_remove(&s->afq, duration, &avpkt->pts, &avpkt->duration);
|
||||
|
@@ -175,7 +175,7 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
|
||||
frame->pict_type == AV_PICTURE_TYPE_P ? X264_TYPE_P :
|
||||
frame->pict_type == AV_PICTURE_TYPE_B ? X264_TYPE_B :
|
||||
X264_TYPE_AUTO;
|
||||
if (x4->params.b_interlaced && x4->params.b_tff != frame->top_field_first) {
|
||||
if (x4->params.b_tff != frame->top_field_first) {
|
||||
x4->params.b_tff = frame->top_field_first;
|
||||
x264_encoder_reconfig(x4->enc, &x4->params);
|
||||
}
|
||||
@@ -343,6 +343,19 @@ static av_cold int X264_init(AVCodecContext *avctx)
|
||||
|
||||
OPT_STR("level", x4->level);
|
||||
|
||||
if(x4->x264opts){
|
||||
const char *p= x4->x264opts;
|
||||
while(p){
|
||||
char param[256]={0}, val[256]={0};
|
||||
if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
|
||||
OPT_STR(param, "1");
|
||||
}else
|
||||
OPT_STR(param, val);
|
||||
p= strchr(p, ':');
|
||||
p+=!!p;
|
||||
}
|
||||
}
|
||||
|
||||
if (avctx->i_quant_factor > 0)
|
||||
x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
|
||||
|
||||
@@ -512,19 +525,6 @@ static av_cold int X264_init(AVCodecContext *avctx)
|
||||
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER)
|
||||
x4->params.b_repeat_headers = 0;
|
||||
|
||||
if(x4->x264opts){
|
||||
const char *p= x4->x264opts;
|
||||
while(p){
|
||||
char param[256]={0}, val[256]={0};
|
||||
if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
|
||||
OPT_STR(param, "1");
|
||||
}else
|
||||
OPT_STR(param, val);
|
||||
p= strchr(p, ':');
|
||||
p+=!!p;
|
||||
}
|
||||
}
|
||||
|
||||
if (x4->x264_params) {
|
||||
AVDictionary *dict = NULL;
|
||||
AVDictionaryEntry *en = NULL;
|
||||
|
@@ -329,7 +329,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
s->first_picture = 0;
|
||||
}
|
||||
|
||||
if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
|
||||
if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
|
||||
if (s->progressive) {
|
||||
av_log_ask_for_sample(s->avctx, "progressively coded interlaced pictures not supported\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
@@ -747,9 +747,7 @@ static void handle_rstn(MJpegDecodeContext *s, int nb_components)
|
||||
|
||||
i = 8 + ((-get_bits_count(&s->gb)) & 7);
|
||||
/* skip RSTn */
|
||||
if (s->restart_count == 0) {
|
||||
if( show_bits(&s->gb, i) == (1 << i) - 1
|
||||
|| show_bits(&s->gb, i) == 0xFF) {
|
||||
if (s->restart_count == 0 && show_bits(&s->gb, i) == (1 << i) - 1) {
|
||||
int pos = get_bits_count(&s->gb);
|
||||
align_get_bits(&s->gb);
|
||||
while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
|
||||
@@ -759,7 +757,6 @@ static void handle_rstn(MJpegDecodeContext *s, int nb_components)
|
||||
s->last_dc[i] = 1024;
|
||||
} else
|
||||
skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -774,12 +771,6 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p
|
||||
int resync_mb_y = 0;
|
||||
int resync_mb_x = 0;
|
||||
|
||||
if (s->nb_components != 3 && s->nb_components != 4)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
|
||||
s->restart_count = s->restart_interval;
|
||||
|
||||
av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size,
|
||||
@@ -1144,7 +1135,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss,
|
||||
}
|
||||
|
||||
if (!Al) {
|
||||
s->coefs_finished[c] |= (2LL << se) - (1LL << ss);
|
||||
s->coefs_finished[c] |= (1LL << (se + 1)) - (1LL << ss);
|
||||
last_scan = !~s->coefs_finished[c];
|
||||
}
|
||||
|
||||
@@ -1596,6 +1587,8 @@ int ff_mjpeg_find_marker(MJpegDecodeContext *s,
|
||||
int t = 0, b = 0;
|
||||
PutBitContext pb;
|
||||
|
||||
s->cur_scan++;
|
||||
|
||||
/* find marker */
|
||||
while (src + t < buf_end) {
|
||||
uint8_t x = src[t++];
|
||||
@@ -1643,7 +1636,6 @@ int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
MJpegDecodeContext *s = avctx->priv_data;
|
||||
const uint8_t *buf_end, *buf_ptr;
|
||||
const uint8_t *unescaped_buf_ptr;
|
||||
int hshift, vshift;
|
||||
int unescaped_buf_size;
|
||||
int start_code;
|
||||
int i, index;
|
||||
@@ -1659,7 +1651,7 @@ int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
&unescaped_buf_size);
|
||||
/* EOF */
|
||||
if (start_code < 0) {
|
||||
break;
|
||||
goto the_end;
|
||||
} else if (unescaped_buf_size > (1U<<28)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "MJPEG packet 0x%x too big (0x%x/0x%x), corrupt data?\n",
|
||||
start_code, unescaped_buf_size, buf_size);
|
||||
@@ -1769,7 +1761,6 @@ eoi_parser:
|
||||
|
||||
goto the_end;
|
||||
case SOS:
|
||||
s->cur_scan++;
|
||||
if ((ret = ff_mjpeg_decode_sos(s, NULL, NULL)) < 0 &&
|
||||
(avctx->err_recognition & AV_EF_EXPLODE))
|
||||
goto fail;
|
||||
@@ -1799,7 +1790,7 @@ eoi_parser:
|
||||
(get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
|
||||
}
|
||||
}
|
||||
if (s->got_picture && s->cur_scan) {
|
||||
if (s->got_picture) {
|
||||
av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
|
||||
goto eoi_parser;
|
||||
}
|
||||
@@ -1823,9 +1814,6 @@ the_end:
|
||||
}
|
||||
if (s->upscale_v) {
|
||||
uint8_t *dst = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[(s->height - 1) * s->linesize[s->upscale_v]];
|
||||
int w;
|
||||
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
|
||||
w = s->width >> hshift;
|
||||
av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
|
||||
avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
|
||||
avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
|
||||
@@ -1834,16 +1822,16 @@ the_end:
|
||||
uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[i / 2 * s->linesize[s->upscale_v]];
|
||||
uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[(i + 1) / 2 * s->linesize[s->upscale_v]];
|
||||
if (src1 == src2) {
|
||||
memcpy(dst, src1, w);
|
||||
memcpy(dst, src1, s->width);
|
||||
} else {
|
||||
for (index = 0; index < w; index++)
|
||||
for (index = 0; index < s->width; index++)
|
||||
dst[index] = (src1[index] + src2[index]) >> 1;
|
||||
}
|
||||
dst -= s->linesize[s->upscale_v];
|
||||
}
|
||||
}
|
||||
if (s->flipped && (s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
|
||||
int j;
|
||||
int hshift, vshift, j;
|
||||
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
|
||||
for (index=0; index<4; index++) {
|
||||
uint8_t *dst = s->picture_ptr->data[index];
|
||||
|
@@ -454,7 +454,7 @@ static void encode_block(MpegEncContext *s, int16_t *block, int n)
|
||||
put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);
|
||||
}
|
||||
|
||||
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
|
||||
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[6][64])
|
||||
{
|
||||
int i;
|
||||
if (s->chroma_format == CHROMA_444) {
|
||||
|
@@ -56,6 +56,6 @@ void ff_mjpeg_encode_picture_trailer(MpegEncContext *s);
|
||||
void ff_mjpeg_encode_stuffing(MpegEncContext *s);
|
||||
void ff_mjpeg_encode_dc(MpegEncContext *s, int val,
|
||||
uint8_t *huff_size, uint16_t *huff_code);
|
||||
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64]);
|
||||
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[6][64]);
|
||||
|
||||
#endif /* AVCODEC_MJPEGENC_H */
|
||||
|
@@ -813,7 +813,7 @@ static int read_decoding_params(MLPDecodeContext *m, GetBitContext *gbp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MSB_MASK(bits) (-1u << (bits))
|
||||
#define MSB_MASK(bits) (-1u << bits)
|
||||
|
||||
/** Generate PCM samples using the prediction filters and residual values
|
||||
* read from the data stream, and update the filter state. */
|
||||
|
@@ -104,9 +104,6 @@ static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
|
||||
if (half_horiz)
|
||||
run_length *=2;
|
||||
|
||||
if (run_length > s->avctx->width - x)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (color) {
|
||||
memset(s->frame.data[0] + y*s->frame.linesize[0] + x, color, run_length);
|
||||
if (half_vert)
|
||||
@@ -154,8 +151,6 @@ static int mm_decode_inter(MmContext * s, int half_horiz, int half_vert)
|
||||
int replace_array = bytestream2_get_byte(&s->gb);
|
||||
for(j=0; j<8; j++) {
|
||||
int replace = (replace_array >> (7-j)) & 1;
|
||||
if (x + half_horiz >= s->avctx->width)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (replace) {
|
||||
int color = bytestream2_get_byte(&data_ptr);
|
||||
s->frame.data[0][y*s->frame.linesize[0] + x] = color;
|
||||
|
@@ -1266,7 +1266,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
|
||||
s1->save_width != s->width ||
|
||||
s1->save_height != s->height ||
|
||||
s1->save_aspect_info != s->aspect_ratio_info ||
|
||||
(s1->save_progressive_seq != s->progressive_sequence && (s->height&31)) ||
|
||||
s1->save_progressive_seq != s->progressive_sequence ||
|
||||
0)
|
||||
{
|
||||
|
||||
@@ -2277,8 +2277,7 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
buf_ptr = avpriv_mpv_find_start_code(buf_ptr, buf_end, &start_code);
|
||||
if (start_code > 0x1ff) {
|
||||
if (s2->pict_type != AV_PICTURE_TYPE_B || avctx->skip_frame <= AVDISCARD_DEFAULT) {
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||
!avctx->hwaccel) {
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)) {
|
||||
int i;
|
||||
av_assert0(avctx->thread_count > 1);
|
||||
|
||||
@@ -2338,8 +2337,7 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
s2->intra_dc_precision= 3;
|
||||
s2->intra_matrix[0]= 1;
|
||||
}
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||
!avctx->hwaccel && s->slice_count) {
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && s->slice_count) {
|
||||
int i;
|
||||
|
||||
avctx->execute(avctx, slice_decode_thread,
|
||||
@@ -2506,8 +2504,7 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
break;
|
||||
}
|
||||
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||
!avctx->hwaccel) {
|
||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)) {
|
||||
int threshold = (s2->mb_height * s->slice_count +
|
||||
s2->slice_context_count / 2) /
|
||||
s2->slice_context_count;
|
||||
|
@@ -2102,7 +2102,7 @@ static inline int hpel_motion_lowres(MpegEncContext *s,
|
||||
int motion_x, int motion_y)
|
||||
{
|
||||
const int lowres = s->avctx->lowres;
|
||||
const int op_index = FFMIN(lowres, 3);
|
||||
const int op_index = FFMIN(lowres, 2);
|
||||
const int s_mask = (2 << lowres) - 1;
|
||||
int emu = 0;
|
||||
int sx, sy;
|
||||
@@ -2155,7 +2155,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
||||
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
|
||||
uvsx, uvsy;
|
||||
const int lowres = s->avctx->lowres;
|
||||
const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
|
||||
const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
|
||||
const int block_s = 8>>lowres;
|
||||
const int s_mask = (2 << lowres) - 1;
|
||||
const int h_edge_pos = s->h_edge_pos >> lowres;
|
||||
@@ -2221,7 +2221,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
||||
ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
|
||||
ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
|
||||
|
||||
if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
|
||||
if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
|
||||
(unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
|
||||
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
|
||||
linesize >> field_based, 17, 17 + field_based,
|
||||
@@ -2261,12 +2261,11 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
||||
pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
|
||||
|
||||
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
|
||||
int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
|
||||
uvsx = (uvsx << 2) >> lowres;
|
||||
uvsy = (uvsy << 2) >> lowres;
|
||||
if (hc) {
|
||||
pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
|
||||
pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
|
||||
if (h >> s->chroma_y_shift) {
|
||||
pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
|
||||
pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
|
||||
}
|
||||
}
|
||||
// FIXME h261 lowres loop filter
|
||||
@@ -2279,7 +2278,7 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
|
||||
int mx, int my)
|
||||
{
|
||||
const int lowres = s->avctx->lowres;
|
||||
const int op_index = FFMIN(lowres, 3);
|
||||
const int op_index = FFMIN(lowres, 2);
|
||||
const int block_s = 8 >> lowres;
|
||||
const int s_mask = (2 << lowres) - 1;
|
||||
const int h_edge_pos = s->h_edge_pos >> lowres + 1;
|
||||
|
@@ -812,8 +812,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
||||
s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
|
||||
}
|
||||
} else {
|
||||
if( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
|
||||
|| !ref_picture[0]){
|
||||
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
|
||||
ref_picture = s->current_picture_ptr->f.data;
|
||||
}
|
||||
|
||||
@@ -827,8 +826,8 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
||||
for(i=0; i<2; i++){
|
||||
uint8_t ** ref2picture;
|
||||
|
||||
if((s->picture_structure == s->field_select[dir][i] + 1
|
||||
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]){
|
||||
if(s->picture_structure == s->field_select[dir][i] + 1
|
||||
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
|
||||
ref2picture= ref_picture;
|
||||
}else{
|
||||
ref2picture = s->current_picture_ptr->f.data;
|
||||
@@ -857,9 +856,6 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
||||
pix_op = s->dsp.avg_pixels_tab;
|
||||
}
|
||||
}else{
|
||||
if (!ref_picture[0]) {
|
||||
ref_picture = s->current_picture_ptr->f.data;
|
||||
}
|
||||
for(i=0; i<2; i++){
|
||||
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|
||||
s->picture_structure != i+1,
|
||||
|
@@ -34,7 +34,6 @@
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "msrledec.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
|
||||
typedef struct MsrleContext {
|
||||
AVCodecContext *avctx;
|
||||
@@ -113,14 +112,11 @@ static int msrle_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
/* FIXME how to correctly detect RLE ??? */
|
||||
if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
|
||||
int linesize = av_image_get_linesize(avctx->pix_fmt, avctx->width, 0);
|
||||
int linesize = (avctx->width * avctx->bits_per_coded_sample + 7) / 8;
|
||||
uint8_t *ptr = s->frame.data[0];
|
||||
uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
|
||||
int i, j;
|
||||
|
||||
if (linesize < 0)
|
||||
return linesize;
|
||||
|
||||
for (i = 0; i < avctx->height; i++) {
|
||||
if (avctx->bits_per_coded_sample == 4) {
|
||||
for (j = 0; j < avctx->width - 1; j += 2) {
|
||||
|
@@ -84,8 +84,8 @@ void ff_mss34_gen_quant_mat(uint16_t *qmat, int quality, int luma)
|
||||
blk[6 * step] = (-(t3 + t7) + t8 + tA) >> shift; \
|
||||
blk[7 * step] = (-(t1 + t6) + t9 + tB) >> shift; \
|
||||
|
||||
#define SOP_ROW(a) (((a) << 16) + 0x2000)
|
||||
#define SOP_COL(a) (((a) + 32) << 16)
|
||||
#define SOP_ROW(a) ((a) << 16) + 0x2000
|
||||
#define SOP_COL(a) ((a + 32) << 16)
|
||||
|
||||
void ff_mss34_dct_put(uint8_t *dst, int stride, int *block)
|
||||
{
|
||||
|
@@ -363,7 +363,7 @@ static int get_value_cached(GetBitContext *gb, int vec_pos, uint8_t *vec,
|
||||
return prev[component];
|
||||
}
|
||||
|
||||
#define MKVAL(vals) ((vals)[0] | ((vals)[1] << 3) | ((vals)[2] << 6))
|
||||
#define MKVAL(vals) (vals[0] | (vals[1] << 3) | (vals[2] << 6))
|
||||
|
||||
/* Image mode - the hardest to comprehend MSS4 coding mode.
|
||||
*
|
||||
|
@@ -58,7 +58,7 @@ enum MSV1Mode{
|
||||
};
|
||||
|
||||
#define SKIP_PREFIX 0x8400
|
||||
#define SKIPS_MAX 0x03FF
|
||||
#define SKIPS_MAX 0x0FFF
|
||||
#define MKRGB555(in, off) ((in[off] << 10) | (in[off + 1] << 5) | (in[off + 2]))
|
||||
|
||||
static const int remap[16] = { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 };
|
||||
|
@@ -235,10 +235,8 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
if(next == END_NOT_FOUND){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
if(!new_buffer) {
|
||||
pc->index = 0;
|
||||
if(!new_buffer)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
|
||||
pc->index += *buf_size;
|
||||
@@ -251,11 +249,9 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
||||
/* append to buffer */
|
||||
if(pc->index){
|
||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if(!new_buffer) {
|
||||
pc->overread_index =
|
||||
pc->index = 0;
|
||||
|
||||
if(!new_buffer)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
pc->buffer = new_buffer;
|
||||
if (next > -FF_INPUT_BUFFER_PADDING_SIZE)
|
||||
memcpy(&pc->buffer[pc->index], *buf,
|
||||
|
@@ -30,7 +30,7 @@
|
||||
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
|
||||
{
|
||||
long i;
|
||||
for (i = 0; i <= w - (int)sizeof(long); i += sizeof(long)) {
|
||||
for (i = 0; i <= w - sizeof(long); i += sizeof(long)) {
|
||||
long a = *(long *)(src1 + i);
|
||||
long b = *(long *)(src2 + i);
|
||||
*(long *)(dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
|
||||
|
@@ -163,8 +163,6 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s)
|
||||
if (s->maxval >= 256) {
|
||||
if (avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
|
||||
if (s->maxval != 65535)
|
||||
avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
||||
} else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
|
||||
} else if (avctx->pix_fmt == AV_PIX_FMT_YUV420P && s->maxval < 65536) {
|
||||
|
@@ -912,6 +912,8 @@ void ff_thread_flush(AVCodecContext *avctx)
|
||||
if (fctx->prev_thread) {
|
||||
if (fctx->prev_thread != &fctx->threads[0])
|
||||
update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
|
||||
if (avctx->codec->flush)
|
||||
avctx->codec->flush(fctx->threads[0].avctx);
|
||||
}
|
||||
|
||||
fctx->next_decoding = fctx->next_finished = 0;
|
||||
@@ -923,9 +925,6 @@ void ff_thread_flush(AVCodecContext *avctx)
|
||||
p->got_frame = 0;
|
||||
|
||||
release_delayed_buffers(p);
|
||||
|
||||
if (avctx->codec->flush)
|
||||
avctx->codec->flush(fctx->threads[0].avctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -767,8 +767,7 @@ static int synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int le
|
||||
int type34_first;
|
||||
float type34_div = 0;
|
||||
float type34_predictor;
|
||||
float samples[10];
|
||||
int sign_bits[16] = {0};
|
||||
float samples[10], sign_bits[16];
|
||||
|
||||
if (length == 0) {
|
||||
// If no data use noise
|
||||
|
@@ -67,7 +67,7 @@ static void qtrle_decode_1bpp(QtrleContext *s, int row_ptr, int lines_to_change)
|
||||
* line' at the beginning. Since we always interpret it as 'go to next line'
|
||||
* in the decoding loop (which makes code simpler/faster), the first line
|
||||
* would not be counted, so we count one more.
|
||||
* See: https://trac.ffmpeg.org/ticket/226
|
||||
* See: https://ffmpeg.org/trac/ffmpeg/ticket/226
|
||||
* In the following decoding loop, row_ptr will be the position of the
|
||||
* current row. */
|
||||
|
||||
|
@@ -84,7 +84,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
unsigned short *pixels = (unsigned short *)s->frame.data[0];
|
||||
|
||||
int row_ptr = 0;
|
||||
int pixel_ptr = -4;
|
||||
int pixel_ptr = 0;
|
||||
int block_ptr;
|
||||
int pixel_x, pixel_y;
|
||||
int total_blocks;
|
||||
@@ -140,7 +140,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
colorA = AV_RB16 (&s->buf[stream_ptr]);
|
||||
stream_ptr += 2;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK()
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -149,6 +148,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -187,7 +187,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
if (s->size - stream_ptr < n_blocks * 4)
|
||||
return;
|
||||
while (n_blocks--) {
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
index = s->buf[stream_ptr++];
|
||||
@@ -198,6 +197,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -205,7 +205,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
case 0x00:
|
||||
if (s->size - stream_ptr < 16)
|
||||
return;
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
@@ -219,6 +218,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
block_ptr += row_inc;
|
||||
}
|
||||
ADVANCE_BLOCK();
|
||||
break;
|
||||
|
||||
/* Unknown opcode */
|
||||
|
@@ -739,11 +739,6 @@ static int process_frame_obj(SANMVideoContext *ctx)
|
||||
w = bytestream2_get_le16u(&ctx->gb);
|
||||
h = bytestream2_get_le16u(&ctx->gb);
|
||||
|
||||
if (!w || !h) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "dimensions are invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (ctx->width < left + w || ctx->height < top + h) {
|
||||
if (av_image_check_size(FFMAX(left + w, ctx->width),
|
||||
FFMAX(top + h, ctx->height), 0, ctx->avctx) < 0)
|
||||
|
@@ -424,7 +424,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
||||
void *tmp_ptr;
|
||||
s->max_framesize = 8192; // should hopefully be enough for the first header
|
||||
tmp_ptr = av_fast_realloc(s->bitstream, &s->allocated_bitstream_size,
|
||||
s->max_framesize + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
s->max_framesize);
|
||||
if (!tmp_ptr) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error allocating bitstream buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -437,7 +437,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
||||
buf_size = FFMIN(buf_size, s->max_framesize - s->bitstream_size);
|
||||
input_buf_size = buf_size;
|
||||
|
||||
if (s->bitstream_index + s->bitstream_size + buf_size + FF_INPUT_BUFFER_PADDING_SIZE >
|
||||
if (s->bitstream_index + s->bitstream_size + buf_size >
|
||||
s->allocated_bitstream_size) {
|
||||
memmove(s->bitstream, &s->bitstream[s->bitstream_index],
|
||||
s->bitstream_size);
|
||||
|
@@ -694,7 +694,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
val |= h[3].values[res] << 8;
|
||||
pred[1] += sign_extend(val, 16);
|
||||
*samples++ = pred[1];
|
||||
*samples++ = av_clip_int16(pred[1]);
|
||||
} else {
|
||||
if(vlc[0].table)
|
||||
res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
|
||||
@@ -715,7 +715,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
val |= h[1].values[res] << 8;
|
||||
pred[0] += sign_extend(val, 16);
|
||||
*samples++ = pred[0];
|
||||
*samples++ = av_clip_int16(pred[0]);
|
||||
}
|
||||
}
|
||||
} else { //8-bit data
|
||||
|
@@ -315,8 +315,7 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer
|
||||
if(!sliced && !offset_dst)
|
||||
dst -= src_x;
|
||||
src_x=0;
|
||||
}
|
||||
if(src_x + b_w > w){
|
||||
}else if(src_x + b_w > w){
|
||||
b_w = w - src_x;
|
||||
}
|
||||
if(src_y<0){
|
||||
@@ -325,8 +324,7 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer
|
||||
if(!sliced && !offset_dst)
|
||||
dst -= src_y*dst_stride;
|
||||
src_y=0;
|
||||
}
|
||||
if(src_y + b_h> h){
|
||||
}else if(src_y + b_h> h){
|
||||
b_h = h - src_y;
|
||||
}
|
||||
|
||||
|
@@ -204,8 +204,7 @@ static const char *read_ts(const char *buf, int *ts_start, int *ts_end,
|
||||
"%*[ ]X1:%u X2:%u Y1:%u Y2:%u",
|
||||
&hs, &ms, &ss, ts_start, &he, &me, &se, ts_end,
|
||||
x1, x2, y1, y2);
|
||||
buf += strcspn(buf, "\n");
|
||||
buf += !!*buf;
|
||||
buf += strcspn(buf, "\n") + 1;
|
||||
if (c >= 8) {
|
||||
*ts_start = 100*(ss + 60*(ms + 60*hs)) + *ts_start/10;
|
||||
*ts_end = 100*(se + 60*(me + 60*he)) + *ts_end /10;
|
||||
|
@@ -790,8 +790,8 @@ static int svq3_decode_slice_header(AVCodecContext *avctx)
|
||||
header ^ s->watermark_key);
|
||||
}
|
||||
if (length > 0) {
|
||||
memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
|
||||
&h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
|
||||
memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
|
||||
&h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
|
||||
}
|
||||
skip_bits_long(&h->gb, 0);
|
||||
}
|
||||
|
@@ -732,9 +732,11 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
avctx->bits_per_raw_sample = s->ti.bps;
|
||||
if ((ret = set_bps_params(avctx)) < 0)
|
||||
return ret;
|
||||
if (s->ti.bps != avctx->bits_per_raw_sample) {
|
||||
avctx->bits_per_raw_sample = s->ti.bps;
|
||||
if ((ret = set_bps_params(avctx)) < 0)
|
||||
return ret;
|
||||
}
|
||||
if (s->ti.sample_rate != avctx->sample_rate) {
|
||||
avctx->sample_rate = s->ti.sample_rate;
|
||||
set_sample_rate_params(avctx);
|
||||
|
@@ -948,14 +948,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
if (!l->Y1_base || !l->Y2_base || !l->U1_base ||
|
||||
!l->V1_base || !l->U2_base || !l->V2_base ||
|
||||
!l->last || !l->clast) {
|
||||
av_freep(&l->Y1_base);
|
||||
av_freep(&l->Y2_base);
|
||||
av_freep(&l->U1_base);
|
||||
av_freep(&l->U2_base);
|
||||
av_freep(&l->V1_base);
|
||||
av_freep(&l->V2_base);
|
||||
av_freep(&l->last);
|
||||
av_freep(&l->clast);
|
||||
av_freep(l->Y1_base);
|
||||
av_freep(l->Y2_base);
|
||||
av_freep(l->U1_base);
|
||||
av_freep(l->U2_base);
|
||||
av_freep(l->V1_base);
|
||||
av_freep(l->V2_base);
|
||||
av_freep(l->last);
|
||||
av_freep(l->clast);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
l->Y1 = l->Y1_base + l->y_stride * 4 + 4;
|
||||
|
@@ -474,7 +474,7 @@ static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
|
||||
|
||||
buf->linesize[i] = picture.linesize[i];
|
||||
|
||||
buf->base[i] = av_malloc(size[i] + 16 + STRIDE_ALIGN - 1); //FIXME 16
|
||||
buf->base[i] = av_malloc(size[i] + 16); //FIXME 16
|
||||
if (buf->base[i] == NULL)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@@ -1647,17 +1647,10 @@ static int add_metadata_from_side_data(AVCodecContext *avctx, AVFrame *frame)
|
||||
if (!side_metadata)
|
||||
goto end;
|
||||
end = side_metadata + size;
|
||||
if (size && end[-1])
|
||||
return AVERROR_INVALIDDATA;
|
||||
while (side_metadata < end) {
|
||||
const uint8_t *key = side_metadata;
|
||||
const uint8_t *val = side_metadata + strlen(key) + 1;
|
||||
int ret;
|
||||
|
||||
if (val >= end)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
ret = av_dict_set(ff_frame_get_metadatap(frame), key, val, 0);
|
||||
int ret = av_dict_set(ff_frame_get_metadatap(frame), key, val, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
side_metadata = val + strlen(val) + 1;
|
||||
@@ -1945,7 +1938,7 @@ static int recode_subtitle(AVCodecContext *avctx,
|
||||
goto end;
|
||||
}
|
||||
outpkt->size -= outl;
|
||||
memset(outpkt->data + outpkt->size, 0, outl);
|
||||
outpkt->data[outpkt->size - 1] = '\0';
|
||||
|
||||
end:
|
||||
if (cd != (iconv_t)-1)
|
||||
@@ -1976,16 +1969,6 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
|
||||
int did_split = av_packet_split_side_data(&tmp);
|
||||
//apply_param_change(avctx, &tmp);
|
||||
|
||||
if (did_split) {
|
||||
/* FFMIN() prevents overflow in case the packet wasn't allocated with
|
||||
* proper padding.
|
||||
* If the side data is smaller than the buffer padding size, the
|
||||
* remaining bytes should have already been filled with zeros by the
|
||||
* original packet allocation anyway. */
|
||||
memset(tmp.data + tmp.size, 0,
|
||||
FFMIN(avpkt->size - tmp.size, FF_INPUT_BUFFER_PADDING_SIZE));
|
||||
}
|
||||
|
||||
pkt_recoded = tmp;
|
||||
ret = recode_subtitle(avctx, &pkt_recoded, &tmp);
|
||||
if (ret < 0) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user