Compare commits
3 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
a2aeaff40f | ||
![]() |
aa15afb7ce | ||
![]() |
10c26e928a |
@@ -445,11 +445,10 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
1.2 Michael Niedermayer
|
||||
1.1 Michael Niedermayer
|
||||
1.0 Michael Niedermayer
|
||||
0.11 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
||||
GnuPG Fingerprints of maintainers and contributors
|
||||
|
@@ -65,7 +65,7 @@ struct SwsContext *sws_opts;
|
||||
SwrContext *swr_opts;
|
||||
AVDictionary *format_opts, *codec_opts;
|
||||
|
||||
const int this_year = 2014;
|
||||
const int this_year = 2013;
|
||||
|
||||
static FILE *report_file;
|
||||
|
||||
@@ -1851,7 +1851,7 @@ static int alloc_buffer(FrameBuffer **pool, AVCodecContext *s, FrameBuffer **pbu
|
||||
/* XXX this shouldn't be needed, but some tests break without this line
|
||||
* those decoders are buggy and need to be fixed.
|
||||
* the following tests fail:
|
||||
* cdgraphics, ansi
|
||||
* cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
|
||||
*/
|
||||
memset(buf->base[0], 128, ret);
|
||||
|
||||
|
@@ -190,13 +190,13 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
void show_help_children(const AVClass *class, int flags);
|
||||
|
||||
/**
|
||||
* Per-fftool specific help handler. Implemented in each
|
||||
* fftool, called by show_help().
|
||||
* Per-avtool specific help handler. Implemented in each
|
||||
* avtool, called by show_help().
|
||||
*/
|
||||
void show_help_default(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Generic -h handler common to all fftools.
|
||||
* Generic -h handler common to all avtools.
|
||||
*/
|
||||
int show_help(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
|
92
configure
vendored
92
configure
vendored
@@ -789,21 +789,14 @@ check_ld(){
|
||||
log check_ld "$@"
|
||||
type=$1
|
||||
shift 1
|
||||
flags=$(filter_out '-l*|*.so' $@)
|
||||
libs=$(filter '-l*|*.so' $@)
|
||||
flags=$(filter_out '-l*' $@)
|
||||
libs=$(filter '-l*' $@)
|
||||
check_$type $($cflags_filter $flags) || return
|
||||
flags=$($ldflags_filter $flags)
|
||||
libs=$($ldflags_filter $libs)
|
||||
check_cmd $ld $LDFLAGS $flags $(ld_o $TMPE) $TMPO $libs $extralibs
|
||||
}
|
||||
|
||||
print_include(){
|
||||
hdr=$1
|
||||
test "${hdr%.h}" = "${hdr}" &&
|
||||
echo "#include $hdr" ||
|
||||
echo "#include <$hdr>"
|
||||
}
|
||||
|
||||
check_code(){
|
||||
log check_code "$@"
|
||||
check=$1
|
||||
@@ -812,7 +805,7 @@ check_code(){
|
||||
shift 3
|
||||
{
|
||||
for hdr in $headers; do
|
||||
print_include $hdr
|
||||
echo "#include <$hdr>"
|
||||
done
|
||||
echo "int main(void) { $code; return 0; }"
|
||||
} | check_$check "$@"
|
||||
@@ -896,7 +889,7 @@ check_func_headers(){
|
||||
shift 2
|
||||
{
|
||||
for hdr in $headers; do
|
||||
print_include $hdr
|
||||
echo "#include <$hdr>"
|
||||
done
|
||||
for func in $funcs; do
|
||||
echo "long check_$func(void) { return (long) $func; }"
|
||||
@@ -1060,26 +1053,6 @@ require_pkg_config(){
|
||||
add_extralibs $(get_safe ${pkg}_libs)
|
||||
}
|
||||
|
||||
require_libfreetype(){
|
||||
log require_libfreetype "$@"
|
||||
pkg="freetype2"
|
||||
check_cmd $pkg_config --exists --print-errors $pkg \
|
||||
|| die "ERROR: $pkg not found"
|
||||
pkg_cflags=$($pkg_config --cflags $pkg)
|
||||
pkg_libs=$($pkg_config --libs $pkg)
|
||||
{
|
||||
echo "#include <ft2build.h>"
|
||||
echo "#include FT_FREETYPE_H"
|
||||
echo "long check_func(void) { return (long) FT_Init_FreeType; }"
|
||||
echo "int main(void) { return 0; }"
|
||||
} | check_ld "cc" $pkg_cflags $pkg_libs \
|
||||
&& set_safe ${pkg}_cflags $pkg_cflags \
|
||||
&& set_safe ${pkg}_libs $pkg_libs \
|
||||
|| die "ERROR: $pkg not found"
|
||||
add_cflags $(get_safe ${pkg}_cflags)
|
||||
add_extralibs $(get_safe ${pkg}_libs)
|
||||
}
|
||||
|
||||
hostcc_o(){
|
||||
eval printf '%s\\n' $HOSTCC_O
|
||||
}
|
||||
@@ -1370,8 +1343,6 @@ HAVE_LIST="
|
||||
asm_types_h
|
||||
attribute_may_alias
|
||||
attribute_packed
|
||||
cdio_paranoia_h
|
||||
cdio_paranoia_paranoia_h
|
||||
clock_gettime
|
||||
closesocket
|
||||
cmov
|
||||
@@ -1493,7 +1464,6 @@ HAVE_LIST="
|
||||
CONFIG_EXTRA="
|
||||
aandcttables
|
||||
ac3dsp
|
||||
audio_frame_queue
|
||||
error_resilience
|
||||
gcrypt
|
||||
golomb
|
||||
@@ -1661,7 +1631,7 @@ mpegvideoenc_select="mpegvideo"
|
||||
|
||||
# decoders / encoders
|
||||
aac_decoder_select="mdct sinewin"
|
||||
aac_encoder_select="audio_frame_queue mdct sinewin"
|
||||
aac_encoder_select="mdct sinewin"
|
||||
aac_latm_decoder_select="aac_decoder aac_latm_parser"
|
||||
ac3_decoder_select="mdct ac3dsp ac3_parser"
|
||||
ac3_encoder_select="mdct ac3dsp"
|
||||
@@ -1746,13 +1716,13 @@ msmpeg4v3_decoder_select="h263_decoder"
|
||||
msmpeg4v3_encoder_select="h263_encoder"
|
||||
mss2_decoder_select="vc1_decoder"
|
||||
nellymoser_decoder_select="mdct sinewin"
|
||||
nellymoser_encoder_select="audio_frame_queue mdct sinewin"
|
||||
nellymoser_encoder_select="mdct sinewin"
|
||||
nuv_decoder_select="lzo"
|
||||
png_decoder_select="zlib"
|
||||
png_encoder_select="zlib"
|
||||
qcelp_decoder_select="lsp"
|
||||
qdm2_decoder_select="mdct rdft mpegaudiodsp"
|
||||
ra_144_encoder_select="audio_frame_queue lpc"
|
||||
ra_144_encoder_select="lpc"
|
||||
ralf_decoder_select="golomb"
|
||||
rv10_decoder_select="h263_decoder"
|
||||
rv10_encoder_select="h263_encoder"
|
||||
@@ -1762,7 +1732,7 @@ rv30_decoder_select="error_resilience golomb h264chroma h264pred h264qpel mpegvi
|
||||
rv40_decoder_select="error_resilience golomb h264chroma h264pred h264qpel mpegvideo"
|
||||
shorten_decoder_select="golomb"
|
||||
sipr_decoder_select="lsp"
|
||||
snow_decoder_select="dwt rangecoder videodsp"
|
||||
snow_decoder_select="dwt rangecoder"
|
||||
snow_encoder_select="aandcttables dwt error_resilience mpegvideoenc rangecoder"
|
||||
sonic_decoder_select="golomb"
|
||||
sonic_encoder_select="golomb"
|
||||
@@ -1808,15 +1778,14 @@ zmbv_encoder_select="zlib"
|
||||
|
||||
# hardware accelerators
|
||||
crystalhd_deps="libcrystalhd_libcrystalhd_if_h"
|
||||
dxva2_deps="dxva2api_h"
|
||||
vaapi_deps="va_va_h"
|
||||
vda_deps="VideoDecodeAcceleration_VDADecoder_h pthreads"
|
||||
vdpau_deps="vdpau_vdpau_h vdpau_vdpau_x11_h"
|
||||
|
||||
h263_vaapi_hwaccel_select="vaapi h263_decoder"
|
||||
h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
|
||||
h264_dxva2_hwaccel_deps="dxva2"
|
||||
h264_dxva2_hwaccel_select="h264_decoder"
|
||||
h264_dxva2_hwaccel_deps="dxva2api_h"
|
||||
h264_dxva2_hwaccel_select="dxva2 h264_decoder"
|
||||
h264_vaapi_hwaccel_select="vaapi h264_decoder"
|
||||
h264_vda_decoder_select="vda h264_parser h264_decoder"
|
||||
h264_vda_hwaccel_deps="VideoDecodeAcceleration_VDADecoder_h pthreads"
|
||||
@@ -1826,8 +1795,8 @@ mpeg_vdpau_decoder_select="vdpau mpegvideo_decoder"
|
||||
mpeg1_vdpau_decoder_select="vdpau mpeg1video_decoder"
|
||||
mpeg1_vdpau_hwaccel_select="vdpau mpeg1video_decoder"
|
||||
mpeg2_crystalhd_decoder_select="crystalhd"
|
||||
mpeg2_dxva2_hwaccel_deps="dxva2"
|
||||
mpeg2_dxva2_hwaccel_select="mpeg2video_decoder"
|
||||
mpeg2_dxva2_hwaccel_deps="dxva2api_h"
|
||||
mpeg2_dxva2_hwaccel_select="dxva2 mpeg2video_decoder"
|
||||
mpeg2_vdpau_hwaccel_select="vdpau mpeg2video_decoder"
|
||||
mpeg2_vaapi_hwaccel_select="vaapi mpeg2video_decoder"
|
||||
mpeg4_crystalhd_decoder_select="crystalhd"
|
||||
@@ -1835,8 +1804,8 @@ mpeg4_vaapi_hwaccel_select="vaapi mpeg4_decoder"
|
||||
mpeg4_vdpau_decoder_select="vdpau mpeg4_decoder"
|
||||
msmpeg4_crystalhd_decoder_select="crystalhd"
|
||||
vc1_crystalhd_decoder_select="crystalhd"
|
||||
vc1_dxva2_hwaccel_deps="dxva2"
|
||||
vc1_dxva2_hwaccel_select="vc1_decoder"
|
||||
vc1_dxva2_hwaccel_deps="dxva2api_h"
|
||||
vc1_dxva2_hwaccel_select="dxva2 vc1_decoder"
|
||||
vc1_vaapi_hwaccel_select="vaapi vc1_decoder"
|
||||
vc1_vdpau_decoder_select="vdpau vc1_decoder"
|
||||
wmv3_crystalhd_decoder_select="crystalhd"
|
||||
@@ -1854,9 +1823,7 @@ vc1_parser_select="error_resilience mpegvideo"
|
||||
libaacplus_encoder_deps="libaacplus"
|
||||
libcelt_decoder_deps="libcelt"
|
||||
libfaac_encoder_deps="libfaac"
|
||||
libfaac_encoder_select="audio_frame_queue"
|
||||
libfdk_aac_encoder_deps="libfdk_aac"
|
||||
libfdk_aac_encoder_select="audio_frame_queue"
|
||||
libgsm_decoder_deps="libgsm"
|
||||
libgsm_encoder_deps="libgsm"
|
||||
libgsm_ms_decoder_deps="libgsm"
|
||||
@@ -1865,30 +1832,24 @@ libilbc_decoder_deps="libilbc"
|
||||
libilbc_encoder_deps="libilbc"
|
||||
libmodplug_demuxer_deps="libmodplug"
|
||||
libmp3lame_encoder_deps="libmp3lame"
|
||||
libmp3lame_encoder_select="audio_frame_queue"
|
||||
libopencore_amrnb_decoder_deps="libopencore_amrnb"
|
||||
libopencore_amrnb_encoder_deps="libopencore_amrnb"
|
||||
libopencore_amrnb_encoder_select="audio_frame_queue"
|
||||
libopencore_amrwb_decoder_deps="libopencore_amrwb"
|
||||
libopenjpeg_decoder_deps="libopenjpeg"
|
||||
libopenjpeg_encoder_deps="libopenjpeg"
|
||||
libopus_decoder_deps="libopus"
|
||||
libopus_encoder_deps="libopus"
|
||||
libopus_encoder_select="audio_frame_queue"
|
||||
libschroedinger_decoder_deps="libschroedinger"
|
||||
libschroedinger_encoder_deps="libschroedinger"
|
||||
libspeex_decoder_deps="libspeex"
|
||||
libspeex_encoder_deps="libspeex"
|
||||
libspeex_encoder_select="audio_frame_queue"
|
||||
libstagefright_h264_decoder_deps="libstagefright_h264"
|
||||
libtheora_encoder_deps="libtheora"
|
||||
libtwolame_encoder_deps="libtwolame"
|
||||
libvo_aacenc_encoder_deps="libvo_aacenc"
|
||||
libvo_aacenc_encoder_select="audio_frame_queue"
|
||||
libvo_amrwbenc_encoder_deps="libvo_amrwbenc"
|
||||
libvorbis_decoder_deps="libvorbis"
|
||||
libvorbis_encoder_deps="libvorbis"
|
||||
libvorbis_encoder_select="audio_frame_queue"
|
||||
libvpx_decoder_deps="libvpx"
|
||||
libvpx_encoder_deps="libvpx"
|
||||
libx264_encoder_deps="libx264"
|
||||
@@ -2127,9 +2088,6 @@ enable safe_bitstream_reader
|
||||
enable static
|
||||
enable swscale_alpha
|
||||
|
||||
# By default, enable only those hwaccels that have no external dependencies.
|
||||
enable dxva2 vdpau
|
||||
|
||||
# build settings
|
||||
SHFLAGS='-shared -Wl,-soname,$$(@F)'
|
||||
FFSERVERLDFLAGS=-Wl,-E
|
||||
@@ -2587,9 +2545,7 @@ probe_cc(){
|
||||
unset _depflags _DEPCMD _DEPFLAGS
|
||||
_flags_filter=echo
|
||||
|
||||
if $_cc --version 2>&1 | grep -q '^GNU assembler'; then
|
||||
true # no-op to avoid reading stdin in following checks
|
||||
elif $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||
if $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||
_type=llvm_gcc
|
||||
gcc_extra_ver=$(expr "$($_cc --version | head -n1)" : '.*\((.*)\)')
|
||||
_ident="llvm-gcc $($_cc -dumpversion) $gcc_extra_ver"
|
||||
@@ -3120,7 +3076,7 @@ check_64bit(){
|
||||
}
|
||||
|
||||
case "$arch" in
|
||||
aarch64|alpha|ia64)
|
||||
alpha|ia64)
|
||||
spic=$shared
|
||||
;;
|
||||
mips)
|
||||
@@ -3135,10 +3091,6 @@ case "$arch" in
|
||||
check_64bit ppc ppc64 'sizeof(void *) > 4'
|
||||
spic=$shared
|
||||
;;
|
||||
s390)
|
||||
check_64bit s390 s390x 'sizeof(void *) > 4'
|
||||
spic=$shared
|
||||
;;
|
||||
sparc)
|
||||
check_64bit sparc sparc64 'sizeof(void *) > 4'
|
||||
spic=$shared
|
||||
@@ -3864,7 +3816,8 @@ enabled gnutls && require_pkg_config gnutls gnutls/gnutls.h gnutls_global_in
|
||||
enabled libiec61883 && require libiec61883 libiec61883/iec61883.h iec61883_cmp_connect -lraw1394 -lavc1394 -lrom1394 -liec61883
|
||||
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
||||
enabled libass && require_pkg_config libass ass/ass.h ass_library_init
|
||||
enabled libbluray && require_pkg_config libbluray libbluray/bluray.h bd_open
|
||||
enabled libbluray && require libbluray libbluray/bluray.h bd_open -lbluray
|
||||
enabled libcdio && require2 libcdio "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio
|
||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
|
||||
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
|
||||
die "ERROR: libcelt must be installed and version must be >= 0.11.0."; }
|
||||
@@ -3873,7 +3826,7 @@ enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaa
|
||||
enabled libfdk_aac && require libfdk_aac fdk-aac/aacenc_lib.h aacEncOpen -lfdk-aac
|
||||
flite_libs="-lflite_cmu_time_awb -lflite_cmu_us_awb -lflite_cmu_us_kal -lflite_cmu_us_kal16 -lflite_cmu_us_rms -lflite_cmu_us_slt -lflite_usenglish -lflite_cmulex -lflite"
|
||||
enabled libflite && require2 libflite "flite/flite.h" flite_init $flite_libs
|
||||
enabled libfreetype && require_libfreetype
|
||||
enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType
|
||||
enabled libgsm && require libgsm gsm/gsm.h gsm_create -lgsm
|
||||
enabled libilbc && require libilbc ilbc.h WebRtcIlbcfix_InitDecode -lilbc
|
||||
enabled libmodplug && require libmodplug libmodplug/modplug.h ModPlug_Load -lmodplug
|
||||
@@ -3963,7 +3916,7 @@ rsync --help 2> /dev/null | grep -q 'contimeout' && enable rsync_contimeout || d
|
||||
check_header linux/fb.h
|
||||
check_header linux/videodev.h
|
||||
check_header linux/videodev2.h
|
||||
check_code cc linux/videodev2.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_safe struct_v4l2_frmivalenum_discrete
|
||||
check_struct linux/videodev2.h "struct v4l2_frmivalenum" discrete
|
||||
|
||||
check_header sys/videoio.h
|
||||
|
||||
@@ -4001,9 +3954,6 @@ enabled jack_indev && check_lib2 jack/jack.h jack_client_open -ljack && check_fu
|
||||
|
||||
enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio
|
||||
|
||||
if enabled libcdio; then
|
||||
check_lib2 "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio || check_lib2 "cdio/paranoia/cdda.h cdio/paranoia/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio
|
||||
fi
|
||||
|
||||
enabled x11grab &&
|
||||
require X11 X11/Xlib.h XOpenDisplay -lX11 &&
|
||||
@@ -4140,7 +4090,6 @@ elif enabled gcc; then
|
||||
check_optflags -fno-tree-vectorize
|
||||
check_cflags -Werror=implicit-function-declaration
|
||||
check_cflags -Werror=missing-prototypes
|
||||
check_cflags -Werror=return-type
|
||||
check_cflags -Werror=vla
|
||||
elif enabled llvm_gcc; then
|
||||
check_cflags -mllvm -stack-alignment=16
|
||||
@@ -4149,7 +4098,6 @@ elif enabled clang; then
|
||||
check_cflags -Qunused-arguments
|
||||
check_cflags -Werror=implicit-function-declaration
|
||||
check_cflags -Werror=missing-prototypes
|
||||
check_cflags -Werror=return-type
|
||||
elif enabled armcc; then
|
||||
# 2523: use of inline assembler is deprecated
|
||||
add_cflags -W${armcc_opt},--diag_suppress=2523
|
||||
|
336
doc/APIchanges
336
doc/APIchanges
@@ -132,30 +132,30 @@ API changes, most recent first:
|
||||
2012-03-26 - a67d9cf - lavfi 2.66.100
|
||||
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
|
||||
|
||||
2012-12-29 - 2ce43b3 / d8fd06c - lavu 52.13.100 / 52.3.0 - avstring.h
|
||||
2012-xx-xx - xxxxxxx - lavu 52.2.1 - avstring.h
|
||||
Add av_basename() and av_dirname().
|
||||
|
||||
2012-11-11 - 03b0787 / 5980f5d - lavu 52.6.100 / 52.2.0 - audioconvert.h
|
||||
2012-11-10 - 5980f5dd - lavu 52.2.0 - audioconvert.h
|
||||
Rename audioconvert.h to channel_layout.h. audioconvert.h is now deprecated.
|
||||
|
||||
2012-11-05 - 7d26be6 / dfde8a3 - lavu 52.5.100 / 52.1.0 - intmath.h
|
||||
Add av_ctz() for trailing zero bit count
|
||||
2012-10-26 - dfde8a34 - lavu 52.1.0 - intmath.h
|
||||
Add av_ctz() for trailing zero bit count.
|
||||
|
||||
2012-10-21 - e3a91c5 / a893655 - lavu 51.77.100 / 51.45.0 - error.h
|
||||
Add AVERROR_EXPERIMENTAL
|
||||
2012-10-18 - a893655b - lavu 51.45.0 - error.h
|
||||
Add AVERROR_EXPERIMENTAL.
|
||||
|
||||
2012-10-12 - a33ed6b / d2fcb35 - lavu 51.76.100 / 51.44.0 - pixdesc.h
|
||||
2012-10-12 - d2fcb356 - lavu 51.44.0 - pixdesc.h
|
||||
Add functions for accessing pixel format descriptors.
|
||||
Accessing the av_pix_fmt_descriptors array directly is now
|
||||
deprecated.
|
||||
|
||||
2012-10-11 - f391e40 / 9a92aea - lavu 51.75.100 / 51.43.0 - aes.h, md5.h, sha.h, tree.h
|
||||
2012-10-11 - 9a92aea2 - lavu 51.43.0 - aes.h, md5.h, sha.h, tree.h
|
||||
Add functions for allocating the opaque contexts for the algorithms,
|
||||
|
||||
2012-10-10 - de31814 / b522000 - lavf 54.32.100 / 54.18.0 - avio.h
|
||||
2012-10-10 - b522000e - lavf 54.18.0 - avio.h
|
||||
Add avio_closep to complement avio_close.
|
||||
|
||||
2012-10-08 - ae77266 / 78071a1 - lavu 51.74.100 / 51.42.0 - pixfmt.h
|
||||
2012-10-06 - 78071a14 - lavu 51.42.0 - pixfmt.h
|
||||
Rename PixelFormat to AVPixelFormat and all PIX_FMT_* to AV_PIX_FMT_*.
|
||||
To provide backwards compatibility, PixelFormat is now #defined as
|
||||
AVPixelFormat.
|
||||
@@ -163,23 +163,23 @@ API changes, most recent first:
|
||||
'PixelFormat' identifier. Such code should either #undef PixelFormat
|
||||
or stop using the PixelFormat name.
|
||||
|
||||
2012-10-05 - 55c49af / e7ba5b1 - lavr 1.0.0 - avresample.h
|
||||
2012-10-05 - e7ba5b1 - lavr 1.0.0 - avresample.h
|
||||
Data planes parameters to avresample_convert() and
|
||||
avresample_read() are now uint8_t** instead of void**.
|
||||
Libavresample is now stable.
|
||||
|
||||
2012-09-24 - 46a3595 / a42aada - lavc 54.59.100 / 54.28.0 - avcodec.h
|
||||
2012-09-24 - a42aada - lavc 54.28.0 - avcodec.h
|
||||
Add avcodec_free_frame(). This function must now
|
||||
be used for freeing an AVFrame.
|
||||
|
||||
2012-09-12 - e3e09f2 / 8919fee - lavu 51.73.100 / 51.41.0 - audioconvert.h
|
||||
2012-09-12 - 8919fee - lavu 51.41.0 - audioconvert.h
|
||||
Added AV_CH_LOW_FREQUENCY_2 channel mask value.
|
||||
|
||||
2012-09-04 - b21b5b0 / 686a329 - lavu 51.71.100 / 51.40.0 - opt.h
|
||||
2012-09-04 - 686a329 - lavu 51.40.0 - opt.h
|
||||
Reordered the fields in default_val in AVOption, changed which
|
||||
default_val field is used for which AVOptionType.
|
||||
|
||||
2012-08-30 - 98298eb / a231832 - lavc 54.54.101 / 54.26.1 - avcodec.h
|
||||
2012-08-30 - a231832 - lavc 54.26.1 - avcodec.h
|
||||
Add codec descriptor properties AV_CODEC_PROP_LOSSY and
|
||||
AV_CODEC_PROP_LOSSLESS.
|
||||
|
||||
@@ -187,90 +187,90 @@ API changes, most recent first:
|
||||
Add codec descriptors for accessing codec properties without having
|
||||
to refer to a specific decoder or encoder.
|
||||
|
||||
f5f3684 / c223d79 - Add an AVCodecDescriptor struct and functions
|
||||
c223d79 - Add an AVCodecDescriptor struct and functions
|
||||
avcodec_descriptor_get() and avcodec_descriptor_next().
|
||||
f5f3684 / 51efed1 - Add AVCodecDescriptor.props and AV_CODEC_PROP_INTRA_ONLY.
|
||||
6c180b3 / 91e59fe - Add avcodec_descriptor_get_by_name().
|
||||
51efed1 - Add AVCodecDescriptor.props and AV_CODEC_PROP_INTRA_ONLY.
|
||||
91e59fe - Add avcodec_descriptor_get_by_name().
|
||||
|
||||
2012-08-08 - f5f3684 / 987170c - lavu 51.68.100 / 51.38.0 - dict.h
|
||||
2012-08-08 - 987170c - lavu 51.38 - dict.h
|
||||
Add av_dict_count().
|
||||
|
||||
2012-08-07 - 7a72695 / 104e10f - lavc 54.51.100 / 54.25.0 - avcodec.h
|
||||
2012-08-07 - 104e10f - lavc 54.25 - avcodec.h
|
||||
Rename CodecID to AVCodecID and all CODEC_ID_* to AV_CODEC_ID_*.
|
||||
To provide backwards compatibility, CodecID is now #defined as AVCodecID.
|
||||
Note that this can break user code that includes avcodec.h and uses the
|
||||
'CodecID' identifier. Such code should either #undef CodecID or stop using the
|
||||
CodecID name.
|
||||
|
||||
2012-08-03 - e776ee8 / 239fdf1 - lavu 51.66.101 / 51.37.1 - cpu.h
|
||||
2012-08-03 - 239fdf1 - lavu 51.37.1 - cpu.h
|
||||
lsws 2.1.1 - swscale.h
|
||||
Rename AV_CPU_FLAG_MMX2 ---> AV_CPU_FLAG_MMXEXT.
|
||||
Rename SWS_CPU_CAPS_MMX2 ---> SWS_CPU_CAPS_MMXEXT.
|
||||
|
||||
2012-07-29 - 7c26761 / 681ed00 - lavf 54.22.100 / 54.13.0 - avformat.h
|
||||
2012-07-29 - 681ed00 - lavf 54.13.0 - avformat.h
|
||||
Add AVFMT_FLAG_NOBUFFER for low latency use cases.
|
||||
|
||||
2012-07-10 - 5fade8a - lavu 51.37.0
|
||||
Add av_malloc_array() and av_mallocz_array()
|
||||
|
||||
2012-06-22 - e847f41 / d3d3a32 - lavu 51.61.100 / 51.34.0
|
||||
2012-06-22 - d3d3a32 - lavu 51.34.0
|
||||
Add av_usleep()
|
||||
|
||||
2012-06-20 - 4da42eb / ae0a301 - lavu 51.60.100 / 51.33.0
|
||||
2012-06-20 - ae0a301 - lavu 51.33.0
|
||||
Move av_gettime() to libavutil, add libavutil/time.h
|
||||
|
||||
2012-06-09 - 82edf67 / 3971be0 - lavr 0.0.3
|
||||
2012-06-09 - 3971be0 - lavr 0.0.3
|
||||
Add a parameter to avresample_build_matrix() for Dolby/DPLII downmixing.
|
||||
|
||||
2012-06-12 - c7b9eab / 9baeff9 - lavfi 2.79.100 / 2.23.0 - avfilter.h
|
||||
2012-06-12 - 9baeff9 - lavfi 2.23.0 - avfilter.h
|
||||
Add AVFilterContext.nb_inputs/outputs. Deprecate
|
||||
AVFilterContext.input/output_count.
|
||||
|
||||
2012-06-12 - c7b9eab / 84b9fbe - lavfi 2.79.100 / 2.22.0 - avfilter.h
|
||||
2012-06-12 - 84b9fbe - lavfi 2.22.0 - avfilter.h
|
||||
Add avfilter_pad_get_type() and avfilter_pad_get_name(). Those
|
||||
should now be used instead of accessing AVFilterPad members
|
||||
directly.
|
||||
|
||||
2012-06-12 - 3630a07 / b0f0dfc - lavu 51.57.100 / 51.32.0 - audioconvert.h
|
||||
2012-06-12 - b0f0dfc - lavu 51.32.0 - audioconvert.h
|
||||
Add av_get_channel_layout_channel_index(), av_get_channel_name()
|
||||
and av_channel_layout_extract_channel().
|
||||
|
||||
2012-05-25 - 53ce990 / 154486f - lavu 51.55.100 / 51.31.0 - opt.h
|
||||
2012-05-25 - 154486f - lavu 51.31.0 - opt.h
|
||||
Add av_opt_set_bin()
|
||||
|
||||
2012-05-15 - lavfi 2.74.100 / 2.17.0
|
||||
2012-05-15 - lavfi 2.17.0
|
||||
Add support for audio filters
|
||||
61930bd / ac71230, 1cbf7fb / a2cd9be - add video/audio buffer sink in a new installed
|
||||
ac71230/a2cd9be - add video/audio buffer sink in a new installed
|
||||
header buffersink.h
|
||||
1cbf7fb / 720c6b7 - add av_buffersrc_write_frame(), deprecate
|
||||
720c6b7 - add av_buffersrc_write_frame(), deprecate
|
||||
av_vsrc_buffer_add_frame()
|
||||
61930bd / ab16504 - add avfilter_copy_buf_props()
|
||||
61930bd / 9453c9e - add extended_data to AVFilterBuffer
|
||||
61930bd / 1b8c927 - add avfilter_get_audio_buffer_ref_from_arrays()
|
||||
ab16504 - add avfilter_copy_buf_props()
|
||||
9453c9e - add extended_data to AVFilterBuffer
|
||||
1b8c927 - add avfilter_get_audio_buffer_ref_from_arrays()
|
||||
|
||||
2012-05-09 - lavu 51.53.100 / 51.30.0 - samplefmt.h
|
||||
61930bd / 142e740 - add av_samples_copy()
|
||||
61930bd / 6d7f617 - add av_samples_set_silence()
|
||||
2012-05-09 - lavu 51.30.0 - samplefmt.h
|
||||
142e740 - add av_samples_copy()
|
||||
6d7f617 - add av_samples_set_silence()
|
||||
|
||||
2012-05-09 - 61930bd / a5117a2 - lavc 54.21.101 / 54.13.1
|
||||
2012-05-09 - a5117a2 - lavc 54.13.1
|
||||
For audio formats with fixed frame size, the last frame
|
||||
no longer needs to be padded with silence, libavcodec
|
||||
will handle this internally (effectively all encoders
|
||||
behave as if they had CODEC_CAP_SMALL_LAST_FRAME set).
|
||||
|
||||
2012-05-07 - 653d117 / 828bd08 - lavc 54.20.100 / 54.13.0 - avcodec.h
|
||||
2012-05-07 - 828bd08 - lavc 54.13.0 - avcodec.h
|
||||
Add sample_rate and channel_layout fields to AVFrame.
|
||||
|
||||
2012-05-01 - 2330eb1 / 4010d72 - lavr 0.0.1
|
||||
2012-05-01 - 4010d72 - lavr 0.0.1
|
||||
Change AV_MIX_COEFF_TYPE_Q6 to AV_MIX_COEFF_TYPE_Q8.
|
||||
|
||||
2012-04-25 - e890b68 / 3527a73 - lavu 51.48.100 / 51.29.0 - cpu.h
|
||||
2012-04-25 - 3527a73 - lavu 51.29.0 - cpu.h
|
||||
Add av_parse_cpu_flags()
|
||||
|
||||
2012-04-24 - 3ead79e / c8af852 - lavr 0.0.0
|
||||
2012-04-24 - c8af852 - lavr 0.0.0
|
||||
Add libavresample audio conversion library
|
||||
|
||||
2012-04-20 - 3194ab7 / 0c0d1bc - lavu 51.47.100 / 51.28.0 - audio_fifo.h
|
||||
2012-04-20 - 0c0d1bc - lavu 51.28.0 - audio_fifo.h
|
||||
Add audio FIFO functions:
|
||||
av_audio_fifo_free()
|
||||
av_audio_fifo_alloc()
|
||||
@@ -282,10 +282,10 @@ API changes, most recent first:
|
||||
av_audio_fifo_size()
|
||||
av_audio_fifo_space()
|
||||
|
||||
2012-04-14 - lavfi 2.70.100 / 2.16.0 - avfiltergraph.h
|
||||
7432bcf / d7bcc71 Add avfilter_graph_parse2().
|
||||
2012-04-14 - lavfi 2.16.0 - avfiltergraph.h
|
||||
d7bcc71 Add avfilter_graph_parse2().
|
||||
|
||||
2012-04-08 - 6bfb304 / 4d693b0 - lavu 51.46.100 / 51.27.0 - samplefmt.h
|
||||
2012-04-08 - 4d693b0 - lavu 51.27.0 - samplefmt.h
|
||||
Add av_get_packed_sample_fmt() and av_get_planar_sample_fmt()
|
||||
|
||||
2012-03-21 - b75c67d - lavu 51.43.100
|
||||
@@ -313,73 +313,73 @@ API changes, most recent first:
|
||||
2012-01-24 - 0c3577b - lavfi 2.60.100
|
||||
Add avfilter_graph_dump.
|
||||
|
||||
2012-03-20 - 0ebd836 / 3c90cc2 - lavfo 54.2.0
|
||||
2012-03-20 - 3c90cc2 - lavfo 54.2.0
|
||||
Deprecate av_read_packet(), use av_read_frame() with
|
||||
AVFMT_FLAG_NOPARSE | AVFMT_FLAG_NOFILLIN in AVFormatContext.flags
|
||||
|
||||
2012-03-05 - lavc 54.10.100 / 54.8.0
|
||||
f095391 / 6699d07 Add av_get_exact_bits_per_sample()
|
||||
f095391 / 9524cf7 Add av_get_audio_frame_duration()
|
||||
2012-03-05 - lavc 54.8.0
|
||||
6699d07 Add av_get_exact_bits_per_sample()
|
||||
9524cf7 Add av_get_audio_frame_duration()
|
||||
|
||||
2012-03-04 - 2af8f2c / 44fe77b - lavc 54.8.100 / 54.7.0 - avcodec.h
|
||||
2012-03-04 - 44fe77b - lavc 54.7.0 - avcodec.h
|
||||
Add av_codec_is_encoder/decoder().
|
||||
|
||||
2012-03-01 - 1eb7f39 / 442c132 - lavc 54.5.100 / 54.3.0 - avcodec.h
|
||||
2012-03-01 - 442c132 - lavc 54.3.0 - avcodec.h
|
||||
Add av_packet_shrink_side_data.
|
||||
|
||||
2012-02-29 - 79ae084 / dd2a4bc - lavf 54.2.100 / 54.2.0 - avformat.h
|
||||
2012-02-29 - dd2a4bc - lavf 54.2.0 - avformat.h
|
||||
Add AVStream.attached_pic and AV_DISPOSITION_ATTACHED_PIC,
|
||||
used for dealing with attached pictures/cover art.
|
||||
|
||||
2012-02-25 - 305e4b3 / c9bca80 - lavu 51.41.100 / 51.24.0 - error.h
|
||||
2012-02-25 - c9bca80 - lavu 51.24.0 - error.h
|
||||
Add AVERROR_UNKNOWN
|
||||
NOTE: this was backported to 0.8
|
||||
|
||||
2012-02-20 - eadd426 / e9cda85 - lavc 54.2.100 / 54.2.0
|
||||
2012-02-20 - e9cda85 - lavc 54.2.0
|
||||
Add duration field to AVCodecParserContext
|
||||
|
||||
2012-02-20 - eadd426 / 0b42a93 - lavu 51.40.100 / 51.23.1 - mathematics.h
|
||||
2012-02-20 - 0b42a93 - lavu 51.23.1 - mathematics.h
|
||||
Add av_rescale_q_rnd()
|
||||
|
||||
2012-02-08 - f2b20b7 / 38d5533 - lavu 51.38.101 / 51.22.1 - pixdesc.h
|
||||
2012-02-08 - 38d5533 - lavu 51.22.1 - pixdesc.h
|
||||
Add PIX_FMT_PSEUDOPAL flag.
|
||||
|
||||
2012-02-08 - f2b20b7 / 52f82a1 - lavc 54.2.100 / 54.1.0
|
||||
2012-02-08 - 52f82a1 - lavc 54.01.0
|
||||
Add avcodec_encode_video2() and deprecate avcodec_encode_video().
|
||||
|
||||
2012-02-01 - 4c677df / 316fc74 - lavc 54.1.0
|
||||
2012-02-01 - 316fc74 - lavc 54.01.0
|
||||
Add av_fast_padded_malloc() as alternative for av_realloc() when aligned
|
||||
memory is required. The buffer will always have FF_INPUT_BUFFER_PADDING_SIZE
|
||||
zero-padded bytes at the end.
|
||||
|
||||
2012-01-31 - a369a6b / dd6d3b0 - lavf 54.1.0
|
||||
2012-01-31 - dd6d3b0 - lavf 54.01.0
|
||||
Add avformat_get_riff_video_tags() and avformat_get_riff_audio_tags().
|
||||
NOTE: this was backported to 0.8
|
||||
|
||||
2012-01-31 - a369a6b / af08d9a - lavc 54.1.0
|
||||
2012-01-31 - af08d9a - lavc 54.01.0
|
||||
Add avcodec_is_open() function.
|
||||
NOTE: this was backported to 0.8
|
||||
|
||||
2012-01-30 - 151ecc2 / 8b93312 - lavu 51.36.100 / 51.22.0 - intfloat.h
|
||||
2012-01-30 - 8b93312 - lavu 51.22.0 - intfloat.h
|
||||
Add a new installed header libavutil/intfloat.h with int/float punning
|
||||
functions.
|
||||
NOTE: this was backported to 0.8
|
||||
|
||||
2012-01-25 - lavf 53.31.100 / 53.22.0
|
||||
3c5fe5b / f1caf01 Allow doing av_write_frame(ctx, NULL) for flushing possible
|
||||
2012-01-25 - lavf 53.22.0
|
||||
f1caf01 Allow doing av_write_frame(ctx, NULL) for flushing possible
|
||||
buffered data within a muxer. Added AVFMT_ALLOW_FLUSH for
|
||||
muxers supporting it (av_write_frame makes sure it is called
|
||||
only for muxers with this flag).
|
||||
|
||||
2012-01-15 - lavc 53.56.105 / 53.34.0
|
||||
2012-01-15 - lavc 53.34.0
|
||||
New audio encoding API:
|
||||
67f5650 / b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
|
||||
b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
|
||||
encoders.
|
||||
67f5650 / 5ee5fa0 Add avcodec_fill_audio_frame() as a convenience function.
|
||||
67f5650 / b2c75b6 Add avcodec_encode_audio2() and deprecate avcodec_encode_audio().
|
||||
5ee5fa0 Add avcodec_fill_audio_frame() as a convenience function.
|
||||
b2c75b6 Add avcodec_encode_audio2() and deprecate avcodec_encode_audio().
|
||||
Add AVCodec.encode2().
|
||||
|
||||
2012-01-12 - b18e17e / 3167dc9 - lavfi 2.59.100 / 2.15.0
|
||||
2012-01-12 - 3167dc9 - lavfi 2.15.0
|
||||
Add a new installed header -- libavfilter/version.h -- with version macros.
|
||||
|
||||
2011-12-08 - a502939 - lavfi 2.52.0
|
||||
@@ -400,37 +400,37 @@ API changes, most recent first:
|
||||
2011-10-20 - b35e9e1 - lavu 51.22.0
|
||||
Add av_strtok() to avstring.h.
|
||||
|
||||
2012-01-03 - ad1c8dd / b73ec05 - lavu 51.34.100 / 51.21.0
|
||||
2011-01-03 - b73ec05 - lavu 51.21.0
|
||||
Add av_popcount64
|
||||
|
||||
2011-12-18 - 7c29313 / 8400b12 - lavc 53.46.1 / 53.28.1
|
||||
2011-12-18 - 8400b12 - lavc 53.28.1
|
||||
Deprecate AVFrame.age. The field is unused.
|
||||
|
||||
2011-12-12 - 8bc7fe4 / 5266045 - lavf 53.25.0 / 53.17.0
|
||||
2011-12-12 - 5266045 - lavf 53.17.0
|
||||
Add avformat_close_input().
|
||||
Deprecate av_close_input_file() and av_close_input_stream().
|
||||
|
||||
2011-12-02 - e4de716 / 0eea212 - lavc 53.40.0 / 53.25.0
|
||||
2011-12-02 - 0eea212 - lavc 53.25.0
|
||||
Add nb_samples and extended_data fields to AVFrame.
|
||||
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
|
||||
Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
|
||||
avcodec_decode_audio4() writes output samples to an AVFrame, which allows
|
||||
audio decoders to use get_buffer().
|
||||
|
||||
2011-12-04 - e4de716 / 560f773 - lavc 53.40.0 / 53.24.0
|
||||
2011-12-04 - 560f773 - lavc 53.24.0
|
||||
Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
|
||||
Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
|
||||
Change AVCodecContext.error[4] to [8] at next major bump.
|
||||
Add AV_NUM_DATA_POINTERS to simplify the bump transition.
|
||||
|
||||
2011-11-23 - 8e576d5 / bbb46f3 - lavu 51.27.0 / 51.18.0
|
||||
2011-11-23 - bbb46f3 - lavu 51.18.0
|
||||
Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and
|
||||
av_samples_alloc(), to samplefmt.h.
|
||||
|
||||
2011-11-23 - 8e576d5 / 8889cc4 - lavu 51.27.0 / 51.17.0
|
||||
2011-11-23 - 8889cc4 - lavu 51.17.0
|
||||
Add planar sample formats and av_sample_fmt_is_planar() to samplefmt.h.
|
||||
|
||||
2011-11-19 - dbb38bc / f3a29b7 - lavc 53.36.0 / 53.21.0
|
||||
2011-11-19 - f3a29b7 - lavc 53.21.0
|
||||
Move some AVCodecContext fields to a new private struct, AVCodecInternal,
|
||||
which is accessed from a new field, AVCodecContext.internal.
|
||||
- fields moved:
|
||||
@@ -438,55 +438,55 @@ API changes, most recent first:
|
||||
AVCodecContext.internal_buffer_count --> AVCodecInternal.buffer_count
|
||||
AVCodecContext.is_copy --> AVCodecInternal.is_copy
|
||||
|
||||
2011-11-16 - 8709ba9 / 6270671 - lavu 51.26.0 / 51.16.0
|
||||
2011-11-16 - 6270671 - lavu 51.16.0
|
||||
Add av_timegm()
|
||||
|
||||
2011-11-13 - lavf 53.21.0 / 53.15.0
|
||||
2011-11-13 - lavf 53.15.0
|
||||
New interrupt callback API, allowing per-AVFormatContext/AVIOContext
|
||||
interrupt callbacks.
|
||||
5f268ca / 6aa0b98 Add AVIOInterruptCB struct and the interrupt_callback field to
|
||||
6aa0b98 Add AVIOInterruptCB struct and the interrupt_callback field to
|
||||
AVFormatContext.
|
||||
5f268ca / 1dee0ac Add avio_open2() with additional parameters. Those are
|
||||
1dee0ac Add avio_open2() with additional parameters. Those are
|
||||
an interrupt callback and an options AVDictionary.
|
||||
This will allow passing AVOptions to protocols after lavf
|
||||
54.0.
|
||||
|
||||
2011-11-06 - 13b7781 / ba04ecf - lavu 51.24.0 / 51.14.0
|
||||
2011-11-06 - ba04ecf - lavu 51.14.0
|
||||
Add av_strcasecmp() and av_strncasecmp() to avstring.h.
|
||||
|
||||
2011-11-06 - 13b7781 / 07b172f - lavu 51.24.0 / 51.13.0
|
||||
2011-11-06 - 07b172f - lavu 51.13.0
|
||||
Add av_toupper()/av_tolower()
|
||||
|
||||
2011-11-05 - d8cab5c / b6d08f4 - lavf 53.19.0 / 53.13.0
|
||||
2011-11-05 - b6d08f4 - lavf 53.13.0
|
||||
Add avformat_network_init()/avformat_network_deinit()
|
||||
|
||||
2011-10-27 - 6faf0a2 / 512557b - lavc 53.24.0 / 53.15.0
|
||||
2011-10-27 - 512557b - lavc 53.15.0
|
||||
Remove avcodec_parse_frame.
|
||||
Deprecate AVCodecContext.parse_only and CODEC_CAP_PARSE_ONLY.
|
||||
|
||||
2011-10-19 - d049257 / 569129a - lavf 53.17.0 / 53.10.0
|
||||
2011-10-19 - 569129a - lavf 53.10.0
|
||||
Add avformat_new_stream(). Deprecate av_new_stream().
|
||||
|
||||
2011-10-13 - 91eb1b1 / b631fba - lavf 53.16.0 / 53.9.0
|
||||
2011-10-13 - b631fba - lavf 53.9.0
|
||||
Add AVFMT_NO_BYTE_SEEK AVInputFormat flag.
|
||||
|
||||
2011-10-12 - lavu 51.21.0 / 51.12.0
|
||||
2011-10-12 - lavu 51.12.0
|
||||
AVOptions API rewrite.
|
||||
|
||||
- f884ef0 / 145f741 FF_OPT_TYPE* renamed to AV_OPT_TYPE_*
|
||||
- 145f741 FF_OPT_TYPE* renamed to AV_OPT_TYPE_*
|
||||
- new setting/getting functions with slightly different semantics:
|
||||
f884ef0 / dac66da av_set_string3 -> av_opt_set
|
||||
dac66da av_set_string3 -> av_opt_set
|
||||
av_set_double -> av_opt_set_double
|
||||
av_set_q -> av_opt_set_q
|
||||
av_set_int -> av_opt_set_int
|
||||
|
||||
f884ef0 / 41d9d51 av_get_string -> av_opt_get
|
||||
41d9d51 av_get_string -> av_opt_get
|
||||
av_get_double -> av_opt_get_double
|
||||
av_get_q -> av_opt_get_q
|
||||
av_get_int -> av_opt_get_int
|
||||
|
||||
- f884ef0 / 8c5dcaa trivial rename av_next_option -> av_opt_next
|
||||
- f884ef0 / 641c7af new functions - av_opt_child_next, av_opt_child_class_next
|
||||
- 8c5dcaa trivial rename av_next_option -> av_opt_next
|
||||
- 641c7af new functions - av_opt_child_next, av_opt_child_class_next
|
||||
and av_opt_find2()
|
||||
|
||||
2011-09-22 - a70e787 - lavu 51.17.0
|
||||
@@ -532,31 +532,31 @@ API changes, most recent first:
|
||||
2011-08-20 - 69e2c1a - lavu 51.13.0
|
||||
Add av_get_media_type_string().
|
||||
|
||||
2011-09-03 - 1889c67 / fb4ca26 - lavc 53.13.0
|
||||
2011-09-03 - fb4ca26 - lavc 53.13.0
|
||||
lavf 53.11.0
|
||||
lsws 2.1.0
|
||||
Add {avcodec,avformat,sws}_get_class().
|
||||
|
||||
2011-08-03 - 1889c67 / c11fb82 - lavu 51.15.0
|
||||
2011-08-03 - c11fb82 - lavu 51.15.0
|
||||
Add AV_OPT_SEARCH_FAKE_OBJ flag for av_opt_find() function.
|
||||
|
||||
2011-08-14 - 323b930 - lavu 51.12.0
|
||||
Add av_fifo_peek2(), deprecate av_fifo_peek().
|
||||
|
||||
2011-08-26 - lavu 51.14.0 / 51.9.0
|
||||
- 976a8b2 / add41de..976a8b2 / abc78a5 Do not include intfloat_readwrite.h,
|
||||
2011-08-26 - lavu 51.9.0
|
||||
- add41de..abc78a5 Do not include intfloat_readwrite.h,
|
||||
mathematics.h, rational.h, pixfmt.h, or log.h from avutil.h.
|
||||
|
||||
2011-08-16 - 27fbe31 / 48f9e45 - lavf 53.11.0 / 53.8.0
|
||||
2011-08-16 - 48f9e45 - lavf 53.8.0
|
||||
Add avformat_query_codec().
|
||||
|
||||
2011-08-16 - 27fbe31 / bca06e7 - lavc 53.11.0
|
||||
2011-08-16 - bca06e7 - lavc 53.11.0
|
||||
Add avcodec_get_type().
|
||||
|
||||
2011-08-06 - 0cb233c / 2f63440 - lavf 53.7.0
|
||||
2011-08-06 - 2f63440 - lavf 53.7.0
|
||||
Add error_recognition to AVFormatContext.
|
||||
|
||||
2011-08-02 - 1d186e9 / 9d39cbf - lavc 53.9.1
|
||||
2011-08-02 - 9d39cbf - lavc 53.9.1
|
||||
Add AV_PKT_FLAG_CORRUPT AVPacket flag.
|
||||
|
||||
2011-07-16 - b57df29 - lavfi 2.27.0
|
||||
@@ -567,11 +567,11 @@ API changes, most recent first:
|
||||
avfilter_set_common_packing_formats()
|
||||
avfilter_all_packing_formats()
|
||||
|
||||
2011-07-10 - 3602ad7 / a67c061 - lavf 53.6.0
|
||||
2011-07-10 - a67c061 - lavf 53.6.0
|
||||
Add avformat_find_stream_info(), deprecate av_find_stream_info().
|
||||
NOTE: this was backported to 0.7
|
||||
|
||||
2011-07-10 - 3602ad7 / 0b950fe - lavc 53.8.0
|
||||
2011-07-10 - 0b950fe - lavc 53.8.0
|
||||
Add avcodec_open2(), deprecate avcodec_open().
|
||||
NOTE: this was backported to 0.7
|
||||
|
||||
@@ -614,35 +614,35 @@ API changes, most recent first:
|
||||
2011-06-12 - 6119b23 - lavfi 2.16.0 - avfilter_graph_parse()
|
||||
Change avfilter_graph_parse() signature.
|
||||
|
||||
2011-06-23 - 686959e / 67e9ae1 - lavu 51.10.0 / 51.8.0 - attributes.h
|
||||
2011-06-23 - 67e9ae1 - lavu 51.8.0 - attributes.h
|
||||
Add av_printf_format().
|
||||
|
||||
2011-06-16 - 2905e3f / 05e84c9, 2905e3f / 25de595 - lavf 53.4.0 / 53.2.0 - avformat.h
|
||||
2011-06-16 - 05e84c9, 25de595 - lavf 53.2.0 - avformat.h
|
||||
Add avformat_open_input and avformat_write_header().
|
||||
Deprecate av_open_input_stream, av_open_input_file,
|
||||
AVFormatParameters and av_write_header.
|
||||
|
||||
2011-06-16 - 2905e3f / 7e83e1c, 2905e3f / dc59ec5 - lavu 51.9.0 / 51.7.0 - opt.h
|
||||
2011-06-16 - 7e83e1c, dc59ec5 - lavu 51.7.0 - opt.h
|
||||
Add av_opt_set_dict() and av_opt_find().
|
||||
Deprecate av_find_opt().
|
||||
Add AV_DICT_APPEND flag.
|
||||
|
||||
2011-06-10 - 45fb647 / cb7c11c - lavu 51.6.0 - opt.h
|
||||
2011-06-10 - cb7c11c - lavu 51.6.0 - opt.h
|
||||
Add av_opt_flag_is_set().
|
||||
|
||||
2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays
|
||||
Add avfilter_get_audio_buffer_ref_from_arrays() to avfilter.h.
|
||||
|
||||
2011-06-09 - f9ecb84 / d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
2011-06-09 - d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
Move AVMetadata from lavf to lavu and rename it to
|
||||
AVDictionary -- new installed header dict.h.
|
||||
All av_metadata_* functions renamed to av_dict_*.
|
||||
|
||||
2011-06-07 - d552f61 / a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
2011-06-07 - a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
Add av_get_bytes_per_sample() in libavutil/samplefmt.h.
|
||||
Deprecate av_get_bits_per_sample_fmt().
|
||||
|
||||
2011-06-05 - f956924 / b39b062 - lavu 51.8.0 - opt.h
|
||||
2011-06-05 - b39b062 - lavu 51.8.0 - opt.h
|
||||
Add av_opt_free convenience function.
|
||||
|
||||
2011-06-06 - 95a0242 - lavfi 2.14.0 - AVFilterBufferRefAudioProps
|
||||
@@ -672,7 +672,7 @@ API changes, most recent first:
|
||||
Add av_get_pix_fmt_name() in libavutil/pixdesc.h, and deprecate
|
||||
avcodec_get_pix_fmt_name() in libavcodec/avcodec.h in its favor.
|
||||
|
||||
2011-05-25 - 39e4206 / 30315a8 - lavf 53.3.0 - avformat.h
|
||||
2011-05-25 - 30315a8 - lavf 53.3.0 - avformat.h
|
||||
Add fps_probe_size to AVFormatContext.
|
||||
|
||||
2011-05-22 - 5ecdfd0 - lavf 53.2.0 - avformat.h
|
||||
@@ -688,10 +688,10 @@ API changes, most recent first:
|
||||
2011-05-14 - 9fdf772 - lavfi 2.6.0 - avcodec.h
|
||||
Add avfilter_get_video_buffer_ref_from_frame() to libavfilter/avcodec.h.
|
||||
|
||||
2011-05-18 - 75a37b5 / 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
2011-05-18 - 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
Add request_sample_fmt field to AVCodecContext.
|
||||
|
||||
2011-05-10 - 59eb12f / 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
2011-05-10 - 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
Deprecate AVLPCType and the following fields in
|
||||
AVCodecContext: lpc_coeff_precision, prediction_order_method,
|
||||
min_partition_order, max_partition_order, lpc_type, lpc_passes.
|
||||
@@ -721,81 +721,81 @@ API changes, most recent first:
|
||||
Add av_dynarray_add function for adding
|
||||
an element to a dynamic array.
|
||||
|
||||
2011-04-26 - d7e5aeb / bebe72f - lavu 51.1.0 - avutil.h
|
||||
2011-04-26 - bebe72f - lavu 51.1.0 - avutil.h
|
||||
Add AVPictureType enum and av_get_picture_type_char(), deprecate
|
||||
FF_*_TYPE defines and av_get_pict_type_char() defined in
|
||||
libavcodec/avcodec.h.
|
||||
|
||||
2011-04-26 - d7e5aeb / 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
2011-04-26 - 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
Add pict_type and key_frame fields to AVFilterBufferRefVideo.
|
||||
|
||||
2011-04-26 - d7e5aeb / 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
2011-04-26 - 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
Add sample_aspect_ratio fields to vsrc_buffer arguments
|
||||
|
||||
2011-04-21 - 8772156 / 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
2011-04-21 - 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
Add CODEC_CAP_SLICE_THREADS for codecs supporting sliced threading.
|
||||
|
||||
2011-04-15 - lavc 52.120.0 - avcodec.h
|
||||
AVPacket structure got additional members for passing side information:
|
||||
c407984 / 4de339e introduce side information for AVPacket
|
||||
c407984 / 2d8591c make containers pass palette change in AVPacket
|
||||
4de339e introduce side information for AVPacket
|
||||
2d8591c make containers pass palette change in AVPacket
|
||||
|
||||
2011-04-12 - lavf 52.107.0 - avio.h
|
||||
Avio cleanup, part II - deprecate the entire URLContext API:
|
||||
c55780d / 175389c add avio_check as a replacement for url_exist
|
||||
9891004 / ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
175389c add avio_check as a replacement for url_exist
|
||||
ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
for _av_url_read_fseek/fpause
|
||||
d4d0932 / cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
should be used instead.
|
||||
c88caa5 / 80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
c88caa5 / f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
d4d0932 / f8270bb add avio_enum_protocols.
|
||||
d4d0932 / 5593f03 deprecate URLProtocol.
|
||||
d4d0932 / c486dad deprecate URLContext.
|
||||
d4d0932 / 026e175 deprecate the typedef for URLInterruptCB
|
||||
c88caa5 / 8e76a19 deprecate av_register_protocol2.
|
||||
11d7841 / b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
11d7841 / 1305d93 deprecate av_url_read_seek
|
||||
11d7841 / fa104e1 deprecate av_url_read_pause
|
||||
434f248 / 727c7aa deprecate url_get_filename().
|
||||
434f248 / 5958df3 deprecate url_max_packet_size().
|
||||
434f248 / 1869ea0 deprecate url_get_file_handle().
|
||||
434f248 / 32a97d4 deprecate url_filesize().
|
||||
434f248 / e52a914 deprecate url_close().
|
||||
434f248 / 58a48c6 deprecate url_seek().
|
||||
434f248 / 925e908 deprecate url_write().
|
||||
434f248 / dce3756 deprecate url_read_complete().
|
||||
434f248 / bc371ac deprecate url_read().
|
||||
434f248 / 0589da0 deprecate url_open().
|
||||
434f248 / 62eaaea deprecate url_connect.
|
||||
434f248 / 5652bb9 deprecate url_alloc.
|
||||
434f248 / 333e894 deprecate url_open_protocol
|
||||
434f248 / e230705 deprecate url_poll and URLPollEntry
|
||||
80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
f8270bb add avio_enum_protocols.
|
||||
5593f03 deprecate URLProtocol.
|
||||
c486dad deprecate URLContext.
|
||||
026e175 deprecate the typedef for URLInterruptCB
|
||||
8e76a19 deprecate av_register_protocol2.
|
||||
b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
1305d93 deprecate av_url_read_seek
|
||||
fa104e1 deprecate av_url_read_pause
|
||||
727c7aa deprecate url_get_filename().
|
||||
5958df3 deprecate url_max_packet_size().
|
||||
1869ea0 deprecate url_get_file_handle().
|
||||
32a97d4 deprecate url_filesize().
|
||||
e52a914 deprecate url_close().
|
||||
58a48c6 deprecate url_seek().
|
||||
925e908 deprecate url_write().
|
||||
dce3756 deprecate url_read_complete().
|
||||
bc371ac deprecate url_read().
|
||||
0589da0 deprecate url_open().
|
||||
62eaaea deprecate url_connect.
|
||||
5652bb9 deprecate url_alloc.
|
||||
333e894 deprecate url_open_protocol
|
||||
e230705 deprecate url_poll and URLPollEntry
|
||||
|
||||
2011-04-08 - lavf 52.106.0 - avformat.h
|
||||
Minor avformat.h cleanup:
|
||||
d4d0932 / a9bf9d8 deprecate av_guess_image2_codec
|
||||
d4d0932 / c3675df rename avf_sdp_create->av_sdp_create
|
||||
a9bf9d8 deprecate av_guess_image2_codec
|
||||
c3675df rename avf_sdp_create->av_sdp_create
|
||||
|
||||
2011-04-03 - lavf 52.105.0 - avio.h
|
||||
Large-scale renaming/deprecating of AVIOContext-related functions:
|
||||
2cae980 / 724f6a0 deprecate url_fdopen
|
||||
2cae980 / 403ee83 deprecate url_open_dyn_packet_buf
|
||||
2cae980 / 6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
2cae980 / b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
2cae980 / 8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
724f6a0 deprecate url_fdopen
|
||||
403ee83 deprecate url_open_dyn_packet_buf
|
||||
6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
AVIOContext.is_streamed and url_is_streamed()
|
||||
1caa412 / b64030f deprecate get_checksum()
|
||||
1caa412 / 4c4427a deprecate init_checksum()
|
||||
2fd41c9 / 4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
4fa0e24 / 933e90a deprecate av_url_read_fseek/fpause
|
||||
4fa0e24 / 8d9769a deprecate url_fileno
|
||||
0fecf26 / b7f2fdd rename put_flush_packet -> avio_flush
|
||||
0fecf26 / 35f1023 deprecate url_close_buf
|
||||
0fecf26 / 83fddae deprecate url_open_buf
|
||||
0fecf26 / d9d86e0 rename url_fprintf -> avio_printf
|
||||
0fecf26 / 59f65d9 deprecate url_setbufsize
|
||||
6947b0c / 3e68b3b deprecate url_ferror
|
||||
b64030f deprecate get_checksum()
|
||||
4c4427a deprecate init_checksum()
|
||||
4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
933e90a deprecate av_url_read_fseek/fpause
|
||||
8d9769a deprecate url_fileno
|
||||
b7f2fdd rename put_flush_packet -> avio_flush
|
||||
35f1023 deprecate url_close_buf
|
||||
83fddae deprecate url_open_buf
|
||||
d9d86e0 rename url_fprintf -> avio_printf
|
||||
59f65d9 deprecate url_setbufsize
|
||||
3e68b3b deprecate url_ferror
|
||||
e8bb2e2 deprecate url_fget_max_packet_size
|
||||
76aa876 rename url_fsize -> avio_size
|
||||
e519753 deprecate url_fgetc
|
||||
@@ -816,7 +816,7 @@ API changes, most recent first:
|
||||
b3db9ce deprecate get_partial_buffer
|
||||
8d9ac96 rename av_alloc_put_byte -> avio_alloc_context
|
||||
|
||||
2011-03-25 - 27ef7b1 / 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
2011-03-25 - 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
Add audio_service_type field to AVCodecContext.
|
||||
|
||||
2011-03-17 - e309fdc - lavu 50.40.0 - pixfmt.h
|
||||
@@ -854,11 +854,11 @@ API changes, most recent first:
|
||||
2011-02-10 - 12c14cd - lavf 52.99.0 - AVStream.disposition
|
||||
Add AV_DISPOSITION_HEARING_IMPAIRED and AV_DISPOSITION_VISUAL_IMPAIRED.
|
||||
|
||||
2011-02-09 - c0b102c - lavc 52.112.0 - avcodec_thread_init()
|
||||
2011-02-09 - 5592734 - lavc 52.112.0 - avcodec_thread_init()
|
||||
Deprecate avcodec_thread_init()/avcodec_thread_free() use; instead
|
||||
set thread_count before calling avcodec_open.
|
||||
|
||||
2011-02-09 - 37b00b4 - lavc 52.111.0 - threading API
|
||||
2011-02-09 - 778b08a - lavc 52.111.0 - threading API
|
||||
Add CODEC_CAP_FRAME_THREADS with new restrictions on get_buffer()/
|
||||
release_buffer()/draw_horiz_band() callbacks for appropriate codecs.
|
||||
Add thread_type and active_thread_type fields to AVCodecContext.
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 1.1.12
|
||||
PROJECT_NUMBER =
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -92,9 +92,9 @@ uninstall: uninstall-man
|
||||
uninstall-man:
|
||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(ALLMANPAGES))
|
||||
|
||||
clean:: docclean
|
||||
docclean: clean
|
||||
|
||||
docclean:
|
||||
clean::
|
||||
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 $(CLEANSUFFIXES:%=doc/%) doc/avoptions_*.texi
|
||||
$(RM) -r doc/doxy/html
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 1.1 "Fire Flower" January, 2013
|
||||
* 0.10 "Freedom" January, 2012
|
||||
|
||||
|
||||
General notes
|
||||
@@ -20,6 +20,3 @@ compiler. Since MSVC does not support C99 features used extensively by FFmpeg,
|
||||
this has been accomplished using a converter that turns C99 code to C89. See the
|
||||
platform-specific documentation for more detailed documentation on building
|
||||
FFmpeg with MSVC.
|
||||
|
||||
The used output sample format for several audio decoders has changed, make
|
||||
sure you always check/use AVCodecContext.sample_fmt or AVFrame.format.
|
||||
|
@@ -60,78 +60,6 @@ This decoder generates wave patterns according to predefined sequences. Its
|
||||
use is purely internal and the format of the data it accepts is not publicly
|
||||
documented.
|
||||
|
||||
@section libcelt
|
||||
|
||||
libcelt decoder wrapper
|
||||
|
||||
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
|
||||
Requires the presence of the libcelt headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libcelt}.
|
||||
|
||||
@section libgsm
|
||||
|
||||
libgsm decoder wrapper
|
||||
|
||||
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
|
||||
the presence of the libgsm headers and library during configuration. You need
|
||||
to explicitly configure the build with @code{--enable-libgsm}.
|
||||
|
||||
This decoder supports both the ordinary GSM and the Microsoft variant.
|
||||
|
||||
@section libilbc
|
||||
|
||||
libilbc decoder wrapper
|
||||
|
||||
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
|
||||
audio codec. Requires the presence of the libilbc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libilbc}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libilbc wrapper.
|
||||
|
||||
@table @option
|
||||
@item enhance
|
||||
|
||||
Enable the enhancement of the decoded audio when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
libopencore-amrnb decoder wrapper
|
||||
|
||||
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Narrowband audio codec. Using it requires the presence of the
|
||||
libopencore-amrnb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrnb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
|
||||
without this library.
|
||||
|
||||
@section libopencore-amrwb
|
||||
|
||||
libopencore-amrwb decoder wrapper.
|
||||
|
||||
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Wideband audio codec. Using it requires the presence of the
|
||||
libopencore-amrwb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrwb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
|
||||
without this library.
|
||||
|
||||
@section libopus
|
||||
|
||||
libopus decoder wrapper.
|
||||
|
||||
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
|
||||
Requires the presence of the libopus headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopus}.
|
||||
|
||||
@c man end AUDIO DECODERS
|
||||
|
||||
@chapter Subtitles Decoders
|
||||
|
@@ -190,8 +190,8 @@ set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" allow tabs in Makefiles
|
||||
autocmd FileType make set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
|
@@ -25,95 +25,6 @@ enabled encoders.
|
||||
A description of some of the currently available audio encoders
|
||||
follows.
|
||||
|
||||
@anchor{aacenc}
|
||||
@section aac
|
||||
|
||||
Advanced Audio Coding (AAC) encoder.
|
||||
|
||||
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
|
||||
low complexity (AAC-LC) profile is supported. To use this encoder, you must set
|
||||
@option{strict} option to @samp{experimental} or lower.
|
||||
|
||||
As this encoder is experimental, unexpected behavior may exist from time to
|
||||
time. For a more stable AAC encoder, see @ref{libvo-aacenc}. However, be warned
|
||||
that it has a worse quality reported by some users.
|
||||
|
||||
@c Comment this out until somebody writes the respective documentation.
|
||||
@c See also @ref{libfaac}, @ref{libaacplus}, and @ref{libfdk-aac-enc}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
Set bit rate in bits/s. Setting this automatically activates constant bit rate
|
||||
(CBR) mode.
|
||||
|
||||
@item q
|
||||
Set quality for variable bit rate (VBR) mode. This option is valid only using
|
||||
the @command{ffmpeg} command-line tool. For library interface users, use
|
||||
@option{global_quality}.
|
||||
|
||||
@item stereo_mode
|
||||
Set stereo encoding mode. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Automatically selected by the encoder.
|
||||
|
||||
@item ms_off
|
||||
Disable middle/side encoding. This is the default.
|
||||
|
||||
@item ms_force
|
||||
Force middle/side encoding.
|
||||
@end table
|
||||
|
||||
@item aac_coder
|
||||
Set AAC encoder coding method. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item 0
|
||||
FAAC-inspired method.
|
||||
|
||||
This method is a simplified reimplementation of the method used in FAAC, which
|
||||
sets thresholds proportional to the band energies, and then decreases all the
|
||||
thresholds with quantizer steps to find the appropriate quantization with
|
||||
distortion below threshold band by band.
|
||||
|
||||
The quality of this method is comparable to the two loop searching method
|
||||
descibed below, but somewhat a little better and slower.
|
||||
|
||||
@item 1
|
||||
Average noise to mask ratio (ANMR) trellis-based solution.
|
||||
|
||||
This has a theoretic best quality out of all the coding methods, but at the
|
||||
cost of the slowest speed.
|
||||
|
||||
@item 2
|
||||
Two loop searching (TLS) method.
|
||||
|
||||
This method first sets quantizers depending on band thresholds and then tries
|
||||
to find an optimal combination by adding or subtracting a specific value from
|
||||
all quantizers and adjusting some individual quantizer a little.
|
||||
|
||||
This method produces similar quality with the FAAC method and is the default.
|
||||
|
||||
@item 3
|
||||
Constant quantizer method.
|
||||
|
||||
This method sets a constant quantizer for all bands. This is the fastest of all
|
||||
the methods, yet produces the worst quality.
|
||||
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Tips and Tricks
|
||||
|
||||
According to some reports
|
||||
(e.g. @url{http://d.hatena.ne.jp/kamedo2/20120729/1343545890}), setting the
|
||||
@option{cutoff} option to 15000 Hz greatly improves the quality of the output
|
||||
quality. As a result, we encourage you to do the same.
|
||||
|
||||
@section ac3 and ac3_fixed
|
||||
|
||||
AC-3 audio encoders.
|
||||
@@ -501,279 +412,6 @@ Selected by Encoder (default)
|
||||
|
||||
@end table
|
||||
|
||||
@section libmp3lame
|
||||
|
||||
LAME (Lame Ain't an MP3 Encoder) MP3 encoder wrapper
|
||||
|
||||
Requires the presence of the libmp3lame headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libmp3lame}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libmp3lame wrapper. The
|
||||
@command{lame}-equivalent of the options are listed in parentheses.
|
||||
|
||||
@table @option
|
||||
@item b (@emph{-b})
|
||||
Set bitrate expressed in bits/s for CBR. LAME @code{bitrate} is
|
||||
expressed in kilobits/s.
|
||||
|
||||
@item q (@emph{-V})
|
||||
Set constant quality setting for VBR. This option is valid only
|
||||
using the @command{ffmpeg} command-line tool. For library interface
|
||||
users, use @option{global_quality}.
|
||||
|
||||
@item compression_level (@emph{-q})
|
||||
Set algorithm quality. Valid arguments are integers in the 0-9 range,
|
||||
with 0 meaning highest quality but slowest, and 9 meaning fastest
|
||||
while producing the worst quality.
|
||||
|
||||
@item reservoir
|
||||
Enable use of bit reservoir when set to 1. Default value is 1. LAME
|
||||
has this enabled by default, but can be overriden by use
|
||||
@option{--nores} option.
|
||||
|
||||
@end table
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
OpenCORE Adaptive Multi-Rate Narrowband encoder.
|
||||
|
||||
Requires the presence of the libopencore-amrnb headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopencore-amrnb --enable-version3}.
|
||||
|
||||
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
|
||||
but you can override it by setting @option{strict} to @samp{unofficial} or
|
||||
lower.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Set bitrate in bits per second. Only the following bitrates are supported,
|
||||
otherwise libavcodec will round to the nearest valid bitrate.
|
||||
|
||||
@table @option
|
||||
@item 4750
|
||||
@item 5150
|
||||
@item 5900
|
||||
@item 6700
|
||||
@item 7400
|
||||
@item 7950
|
||||
@item 10200
|
||||
@item 12200
|
||||
@end table
|
||||
|
||||
@item dtx
|
||||
Allow discontinuous transmission (generate comfort noise) when set to 1. The
|
||||
default value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libtwolame
|
||||
|
||||
TwoLAME MP2 encoder wrapper
|
||||
|
||||
Requires the presence of the libtwolame headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libtwolame}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libtwolame wrapper. The
|
||||
@command{twolame}-equivalent options follow the FFmpeg ones and are in
|
||||
parentheses.
|
||||
|
||||
@table @option
|
||||
@item b (@emph{-b})
|
||||
Set bitrate expressed in bits/s for CBR. @command{twolame} @option{b}
|
||||
option is expressed in kilobits/s. Default value is 128k.
|
||||
|
||||
@item q (@emph{-V})
|
||||
Set quality for experimental VBR support. Maximum value range is
|
||||
from -50 to 50, useful range is from -10 to 10. The higher the
|
||||
value, the better the quality. This option is valid only using the
|
||||
@command{ffmpeg} command-line tool. For library interface users,
|
||||
use @option{global_quality}.
|
||||
|
||||
@item mode (@emph{--mode})
|
||||
Set the mode of the resulting audio. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Choose mode automatically based on the input. This is the default.
|
||||
@item stereo
|
||||
Stereo
|
||||
@item joint_stereo
|
||||
Joint stereo
|
||||
@item dual_channel
|
||||
Dual channel
|
||||
@item mono
|
||||
Mono
|
||||
@end table
|
||||
|
||||
@item psymodel (@emph{--psyc-mode})
|
||||
Set psychoacoustic model to use in encoding. The argument must be
|
||||
an integer between -1 and 4, inclusive. The higher the value, the
|
||||
better the quality. The default value is 3.
|
||||
|
||||
@item energy_levels (@emph{--energy})
|
||||
Enable energy levels extensions when set to 1. The default value is
|
||||
0 (disabled).
|
||||
|
||||
@item error_protection (@emph{--protect})
|
||||
Enable CRC error protection when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@item copyright (@emph{--copyright})
|
||||
Set MPEG audio copyright flag when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@item original (@emph{--original})
|
||||
Set MPEG audio original flag when set to 1. The default value is 0
|
||||
(disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{libvo-aacenc}
|
||||
@section libvo-aacenc
|
||||
|
||||
VisualOn AAC encoder
|
||||
|
||||
Requires the presence of the libvo-aacenc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libvo-aacenc --enable-version3}.
|
||||
|
||||
This encoder is considered to be worse than the
|
||||
@ref{aacenc,,native experimental FFmpeg AAC encoder}, according to
|
||||
multiple sources.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
|
||||
channels. It is also CBR-only.
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Set bit rate in bits/s.
|
||||
|
||||
@end table
|
||||
|
||||
@section libvo-amrwbenc
|
||||
|
||||
VisualOn Adaptive Multi-Rate Wideband encoder
|
||||
|
||||
Requires the presence of the libvo-amrwbenc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libvo-amrwbenc --enable-version3}.
|
||||
|
||||
This is a mono-only encoder. Officially it only supports 16000Hz sample
|
||||
rate, but you can override it by setting @option{strict} to
|
||||
@samp{unofficial} or lower.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Set bitrate in bits/s. Only the following bitrates are supported, otherwise
|
||||
libavcodec will round to the nearest valid bitrate.
|
||||
|
||||
@table @samp
|
||||
@item 6600
|
||||
@item 8850
|
||||
@item 12650
|
||||
@item 14250
|
||||
@item 15850
|
||||
@item 18250
|
||||
@item 19850
|
||||
@item 23050
|
||||
@item 23850
|
||||
@end table
|
||||
|
||||
@item dtx
|
||||
Allow discontinuous transmission (generate comfort noise) when set to 1. The
|
||||
default value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libopus
|
||||
|
||||
libopus Opus Interactive Audio Codec encoder wrapper.
|
||||
|
||||
Requires the presence of the libopus headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopus}.
|
||||
|
||||
@subsection Option Mapping
|
||||
|
||||
Most libopus options are modeled after the @command{opusenc} utility from
|
||||
opus-tools. The following is an option mapping chart describing options
|
||||
supported by the libopus wrapper, and their @command{opusenc}-equivalent
|
||||
in parentheses.
|
||||
|
||||
@table @option
|
||||
|
||||
@item b (@emph{bitrate})
|
||||
Set the bit rate in bits/s. FFmpeg's @option{b} option is
|
||||
expressed in bits/s, while @command{opusenc}'s @option{bitrate} in
|
||||
kilobits/s.
|
||||
|
||||
@item vbr (@emph{vbr}, @emph{hard-cbr}, and @emph{cvbr})
|
||||
Set VBR mode. The FFmpeg @option{vbr} option has the following
|
||||
valid arguments, with the their @command{opusenc} equivalent options
|
||||
in parentheses:
|
||||
|
||||
@table @samp
|
||||
@item off (@emph{hard-cbr})
|
||||
Use constant bit rate encoding.
|
||||
|
||||
@item on (@emph{vbr})
|
||||
Use variable bit rate encoding (the default).
|
||||
|
||||
@item constrained (@emph{cvbr})
|
||||
Use constrained variable bit rate encoding.
|
||||
@end table
|
||||
|
||||
@item compression_level (@emph{comp})
|
||||
Set encoding algorithm complexity. Valid options are integers in
|
||||
the 0-10 range. 0 gives the fastest encodes but lower quality, while 10
|
||||
gives the highest quality but slowest encoding. The default is 10.
|
||||
|
||||
@item frame_duration (@emph{framesize})
|
||||
Set maximum frame size, or duration of a frame in milliseconds. The
|
||||
argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
|
||||
frame sizes achieve lower latency but less quality at a given bitrate.
|
||||
Sizes greater than 20ms are only interesting at fairly low bitrates.
|
||||
The default is 20ms.
|
||||
|
||||
@item packet_loss (@emph{expect-loss})
|
||||
Set expected packet loss percentage. The default is 0.
|
||||
|
||||
@item application (N.A.)
|
||||
Set intended application type. Valid options are listed below:
|
||||
|
||||
@table @samp
|
||||
@item voip
|
||||
Favor improved speech intelligibility.
|
||||
@item audio
|
||||
Favor faithfulness to the input (the default).
|
||||
@item lowdelay
|
||||
Restrict to only the lowest delay modes.
|
||||
@end table
|
||||
|
||||
@item cutoff (N.A.)
|
||||
Set cutoff bandwidth in Hz. The argument must be exactly one of the
|
||||
following: 4000, 6000, 8000, 12000, or 20000, corresponding to
|
||||
narrowband, mediumband, wideband, super wideband, and fullband
|
||||
respectively. The default is 0 (cutoff disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@c man end AUDIO ENCODERS
|
||||
|
||||
@chapter Video Encoders
|
||||
@@ -995,117 +633,4 @@ ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
||||
For more information about libx264 and the supported options see:
|
||||
@url{http://www.videolan.org/developers/x264.html}
|
||||
|
||||
@section libxvid
|
||||
|
||||
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||
|
||||
This encoder requires the presence of the libxvidcore headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libxvid --enable-gpl}.
|
||||
|
||||
The native @code{mpeg4} encoder supports the MPEG-4 Part 2 format, so
|
||||
users can encode to this format without this library.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libxvid wrapper. Some of
|
||||
the following options are listed but are not documented, and
|
||||
correspond to shared codec options. See @ref{codec-options,,the Codec
|
||||
Options chapter} for their documentation. The other shared options
|
||||
which are not listed have no effect for the libxvid encoder.
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
|
||||
@item g
|
||||
|
||||
@item qmin
|
||||
|
||||
@item qmax
|
||||
|
||||
@item mpeg_quant
|
||||
|
||||
@item threads
|
||||
|
||||
@item bf
|
||||
|
||||
@item b_qfactor
|
||||
|
||||
@item b_qoffset
|
||||
|
||||
@item flags
|
||||
Set specific encoding flags. Possible values:
|
||||
|
||||
@table @samp
|
||||
|
||||
@item mv4
|
||||
Use four motion vector by macroblock.
|
||||
|
||||
@item aic
|
||||
Enable high quality AC prediction.
|
||||
|
||||
@item gray
|
||||
Only encode grayscale.
|
||||
|
||||
@item gmc
|
||||
Enable the use of global motion compensation (GMC).
|
||||
|
||||
@item qpel
|
||||
Enable quarter-pixel motion compensation.
|
||||
|
||||
@item cgop
|
||||
Enable closed GOP.
|
||||
|
||||
@item global_header
|
||||
Place global headers in extradata instead of every keyframe.
|
||||
|
||||
@end table
|
||||
|
||||
@item trellis
|
||||
|
||||
@item me_method
|
||||
Set motion estimation method. Possible values in decreasing order of
|
||||
speed and increasing order of quality:
|
||||
|
||||
@table @samp
|
||||
@item zero
|
||||
Use no motion estimation (default).
|
||||
|
||||
@item phods
|
||||
@item x1
|
||||
@item log
|
||||
Enable advanced diamond zonal search for 16x16 blocks and half-pixel
|
||||
refinement for 16x16 blocks. @samp{x1} and @samp{log} are aliases for
|
||||
@samp{phods}.
|
||||
|
||||
@item epzs
|
||||
Enable all of the things described above, plus advanced diamond zonal
|
||||
search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
|
||||
estimation on chroma planes.
|
||||
|
||||
@item full
|
||||
Enable all of the things described above, plus extended 16x16 and 8x8
|
||||
blocks search.
|
||||
@end table
|
||||
|
||||
@item mbd
|
||||
Set macroblock decision algorithm. Possible values in the increasing
|
||||
order of quality:
|
||||
|
||||
@table @samp
|
||||
@item simple
|
||||
Use macroblock comparing function algorithm (default).
|
||||
|
||||
@item bits
|
||||
Enable rate distortion-based half pixel and quarter pixel refinement for
|
||||
16x16 blocks.
|
||||
|
||||
@item rd
|
||||
Enable all of the things described above, plus rate distortion-based
|
||||
half pixel and quarter pixel refinement for 8x8 blocks, and rate
|
||||
distortion-based search using square pattern.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@c man end VIDEO ENCODERS
|
||||
|
@@ -314,7 +314,7 @@ int main (int argc, char **argv)
|
||||
if (audio_stream) {
|
||||
const char *fmt;
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt)) < 0)
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt) < 0))
|
||||
goto end;
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
|
@@ -200,7 +200,7 @@ int main(int argc, char **argv)
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt) < 0))
|
||||
goto end;
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
|
@@ -17,7 +17,6 @@ the libavcodec library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@anchor{codec-options}
|
||||
@chapter Codec Options
|
||||
@c man begin CODEC OPTIONS
|
||||
|
||||
|
@@ -140,12 +140,6 @@ Use wallclock as timestamps.
|
||||
@item avoid_negative_ts @var{integer} (@emph{output})
|
||||
Shift timestamps to make them positive. 1 enables, 0 disables, default
|
||||
of -1 enables when required by target format.
|
||||
|
||||
@item skip_initial_bytes @var{integer} (@emph{input})
|
||||
Set number initial bytes to skip. Default is 0.
|
||||
|
||||
@item correct_ts_overflow @var{integer} (@emph{input})
|
||||
Correct single timestamp overflows if set to 1. Default is 1.
|
||||
@end table
|
||||
|
||||
@c man end FORMAT OPTIONS
|
||||
|
@@ -978,7 +978,7 @@ ffmpeg -filter_complex 'color=red' -t 5 out.mkv
|
||||
|
||||
As a special exception, you can use a bitmap subtitle stream as input: it
|
||||
will be converted into a video with the same size as the largest video in
|
||||
the file, or 720x576 if no video is present. Note that this is an
|
||||
the file, or 720×576 if no video is present. Note that this is an
|
||||
experimental and temporary solution. It will be removed once libavfilter has
|
||||
proper support for subtitles.
|
||||
|
||||
|
@@ -3,10 +3,10 @@
|
||||
|
||||
Filtering in FFmpeg is enabled through the libavfilter library.
|
||||
|
||||
In libavfilter, a filter can have multiple inputs and multiple
|
||||
outputs.
|
||||
To illustrate the sorts of things that are possible, we consider the
|
||||
following filtergraph.
|
||||
In libavfilter, it is possible for filters to have multiple inputs and
|
||||
multiple outputs.
|
||||
To illustrate the sorts of things that are possible, we can
|
||||
use a complex filter graph. For example, the following one:
|
||||
|
||||
@example
|
||||
input --> split ---------------------> overlay --> output
|
||||
@@ -15,32 +15,25 @@ input --> split ---------------------> overlay --> output
|
||||
+-----> crop --> vflip -------+
|
||||
@end example
|
||||
|
||||
This filtergraph splits the input stream in two streams, sends one
|
||||
stream through the crop filter and the vflip filter before merging it
|
||||
back with the other stream by overlaying it on top. You can use the
|
||||
following command to achieve this:
|
||||
splits the stream in two streams, sends one stream through the crop filter
|
||||
and the vflip filter before merging it back with the other stream by
|
||||
overlaying it on top. You can use the following command to achieve this:
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
|
||||
ffmpeg -i input -vf "[in] split [T1], [T2] overlay=0:H/2 [out]; [T1] crop=iw:ih/2:0:ih/2, vflip [T2]" output
|
||||
@end example
|
||||
|
||||
The result will be that in output the top half of the video is mirrored
|
||||
onto the bottom half.
|
||||
|
||||
Filters in the same linear chain are separated by commas, and distinct
|
||||
linear chains of filters are separated by semicolons. In our example,
|
||||
@var{crop,vflip} are in one linear chain, @var{split} and
|
||||
@var{overlay} are separately in another. The points where the linear
|
||||
chains join are labelled by names enclosed in square brackets. In the
|
||||
example, the split filter generates two outputs that are associated to
|
||||
the labels @var{[main]} and @var{[tmp]}.
|
||||
|
||||
The stream sent to the second output of @var{split}, labelled as
|
||||
@var{[tmp]}, is processed through the @var{crop} filter, which crops
|
||||
away the lower half part of the video, and then vertically flipped. The
|
||||
@var{overlay} filter takes in input the first unchanged output of the
|
||||
split filter (which was labelled as @var{[main]}), and overlay on its
|
||||
lower half the output generated by the @var{crop,vflip} filterchain.
|
||||
Filters are loaded using the @var{-vf} or @var{-af} option passed to
|
||||
@command{ffmpeg} or to @command{ffplay}. Filters in the same linear
|
||||
chain are separated by commas. In our example, @var{split,
|
||||
overlay} are in one linear chain, and @var{crop, vflip} are in
|
||||
another. The points where the linear chains join are labeled by names
|
||||
enclosed in square brackets. In our example, that is @var{[T1]} and
|
||||
@var{[T2]}. The special labels @var{[in]} and @var{[out]} are the points
|
||||
where video is input and output.
|
||||
|
||||
Some filters take in input a list of parameters: they are specified
|
||||
after the filter name and an equal sign, and are separated from each other
|
||||
@@ -186,7 +179,7 @@ Follows a BNF description for the filtergraph syntax:
|
||||
@var{LINKLABEL} ::= "[" @var{NAME} "]"
|
||||
@var{LINKLABELS} ::= @var{LINKLABEL} [@var{LINKLABELS}]
|
||||
@var{FILTER_ARGUMENTS} ::= sequence of chars (eventually quoted)
|
||||
@var{FILTER} ::= [@var{LINKLABELS}] @var{NAME} ["=" @var{FILTER_ARGUMENTS}] [@var{LINKLABELS}]
|
||||
@var{FILTER} ::= [@var{LINKNAMES}] @var{NAME} ["=" @var{ARGUMENTS}] [@var{LINKNAMES}]
|
||||
@var{FILTERCHAIN} ::= @var{FILTER} [,@var{FILTERCHAIN}]
|
||||
@var{FILTERGRAPH} ::= [sws_flags=@var{flags};] @var{FILTERCHAIN} [;@var{FILTERGRAPH}]
|
||||
@end example
|
||||
@@ -4156,7 +4149,7 @@ Alternatively, the options can be specified as a flat string:
|
||||
|
||||
@var{layout}[:@var{nb_frames}[:@var{margin}[:@var{padding}]]]
|
||||
|
||||
For example, produce 8x8 PNG tiles of all keyframes (@option{-skip_frame
|
||||
For example, produce 8×8 PNG tiles of all keyframes (@option{-skip_frame
|
||||
nokey}) in a movie:
|
||||
@example
|
||||
ffmpeg -skip_frame nokey -i file.avi -vf 'scale=128:72,tile=8x8' -an -vsync 0 keyframes%03d.png
|
||||
@@ -5550,7 +5543,7 @@ Activate unsafe mode: do not fail if segments have a different format.
|
||||
The filter has @var{v}+@var{a} outputs: first @var{v} video outputs, then
|
||||
@var{a} audio outputs.
|
||||
|
||||
There are @var{n}x(@var{v}+@var{a}) inputs: first the inputs for the first
|
||||
There are @var{n}×(@var{v}+@var{a}) inputs: first the inputs for the first
|
||||
segment, in the same order as the outputs, then the inputs for the second
|
||||
segment, etc.
|
||||
|
||||
|
@@ -24,7 +24,7 @@ instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjp
|
||||
@file{./configure}.
|
||||
|
||||
|
||||
@section OpenCORE, VisualOn, and Fraunhofer libraries
|
||||
@section OpenCORE and VisualOn libraries
|
||||
|
||||
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
|
||||
libraries provide encoders for a number of audio codecs.
|
||||
@@ -32,14 +32,9 @@ libraries provide encoders for a number of audio codecs.
|
||||
@float NOTE
|
||||
OpenCORE and VisualOn libraries are under the Apache License 2.0
|
||||
(see @url{http://www.apache.org/licenses/LICENSE-2.0} for details), which is
|
||||
incompatible to the LGPL version 2.1 and GPL version 2. You have to
|
||||
incompatible with the LGPL version 2.1 and GPL version 2. You have to
|
||||
upgrade FFmpeg's license to LGPL version 3 (or if you have enabled
|
||||
GPL components, GPL version 3) by passing @code{--enable-version3} to configure in
|
||||
order to use it.
|
||||
|
||||
The Fraunhofer AAC library is licensed under a license incompatible to the GPL
|
||||
and is not known to be compatible to the LGPL. Therefore, you have to pass
|
||||
@code{--enable-nonfree} to configure to use it.
|
||||
GPL components, GPL version 3) to use it.
|
||||
@end float
|
||||
|
||||
@subsection OpenCORE AMR
|
||||
|
@@ -24,7 +24,7 @@ a mail for every change to every issue.
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://trac.ffmpeg.org
|
||||
http(s)://ffmpeg.org/trac/ffmpeg
|
||||
|
||||
Type:
|
||||
-----
|
||||
|
@@ -18,23 +18,6 @@ enabled muxers.
|
||||
|
||||
A description of some of the currently available muxers follows.
|
||||
|
||||
@anchor{aiff}
|
||||
@section aiff
|
||||
|
||||
Audio Interchange File Format muxer.
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item write_id3v2
|
||||
Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
|
||||
|
||||
@item id3v2_version
|
||||
Select ID3v2 version to write. Currently only version 3 and 4 (aka.
|
||||
ID3v2.3 and ID3v2.4) are supported. The default is version 4.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{crc}
|
||||
@section crc
|
||||
|
||||
@@ -728,11 +711,10 @@ Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
|
||||
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
|
||||
@end example
|
||||
|
||||
To attach a picture to an mp3 file select both the audio and the picture stream
|
||||
with @code{map}:
|
||||
Attach a picture to an mp3:
|
||||
@example
|
||||
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
|
||||
-metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
|
||||
ffmpeg -i input.mp3 -i cover.png -c copy -metadata:s:v title="Album cover"
|
||||
-metadata:s:v comment="Cover (Front)" out.mp3
|
||||
@end example
|
||||
|
||||
@c man end MUXERS
|
||||
|
@@ -51,9 +51,8 @@ The toolchain provided with Xcode is sufficient to build the basic
|
||||
unacelerated code.
|
||||
|
||||
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
|
||||
@url{https://github.com/FFmpeg/gas-preprocessor} or
|
||||
@url{http://github.com/yuvi/gas-preprocessor} to build the optimized
|
||||
assembler functions. Put the Perl script somewhere
|
||||
assembler functions. Just download the Perl script and put it somewhere
|
||||
in your PATH, FFmpeg's configure will pick it up automatically.
|
||||
|
||||
Mac OS X on amd64 and x86 requires @command{yasm} to build most of the
|
||||
@@ -115,7 +114,7 @@ wrapper.
|
||||
You will need the following prerequisites:
|
||||
|
||||
@itemize
|
||||
@item @uref{http://download.videolan.org/pub/contrib/c99-to-c89/, C99-to-C89 Converter & Wrapper}
|
||||
@item @uref{https://github.com/libav/c99-to-c89/, C99-to-C89 Converter & Wrapper}
|
||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||
@item @uref{http://www.mingw.org/, MSYS}
|
||||
@item @uref{http://yasm.tortall.net/, YASM}
|
||||
|
@@ -121,7 +121,7 @@ INF: while(<$inf>) {
|
||||
$chapters{$chapter_name} .= postprocess($chapter) if ($chapter_name);
|
||||
|
||||
# start new chapter
|
||||
$chapter_name = $1, push (@chapters_sequence, $chapter_name) unless $skipping;
|
||||
$chapter_name = $1, push (@chapters_sequence, $chapter_name);
|
||||
$chapters{$chapter_name} = "" unless exists $chapters{$chapter_name};
|
||||
$chapter = "";
|
||||
$output = 1;
|
||||
@@ -169,7 +169,7 @@ INF: while(<$inf>) {
|
||||
} elsif ($ended =~ /^(?:example|smallexample|display)$/) {
|
||||
$shift = "";
|
||||
$_ = ""; # need a paragraph break
|
||||
} elsif ($ended =~ /^(?:itemize|enumerate|(?:multi|[fv])?table)$/) {
|
||||
} elsif ($ended =~ /^(?:itemize|enumerate|[fv]?table)$/) {
|
||||
$_ = "\n=back\n";
|
||||
$ic = pop @icstack;
|
||||
} else {
|
||||
@@ -269,7 +269,7 @@ INF: while(<$inf>) {
|
||||
$endw = "enumerate";
|
||||
};
|
||||
|
||||
/^\@((?:multi|[fv])?table)\s+(\@[a-z]+)/ and do {
|
||||
/^\@([fv]?table)\s+(\@[a-z]+)/ and do {
|
||||
push @endwstack, $endw;
|
||||
push @icstack, $ic;
|
||||
$endw = $1;
|
||||
@@ -278,7 +278,6 @@ INF: while(<$inf>) {
|
||||
$ic =~ s/\@(?:code|kbd)/C/;
|
||||
$ic =~ s/\@(?:dfn|var|emph|cite|i)/I/;
|
||||
$ic =~ s/\@(?:file)/F/;
|
||||
$ic =~ s/\@(?:columnfractions)//;
|
||||
$_ = "\n=over 4\n";
|
||||
};
|
||||
|
||||
@@ -289,21 +288,6 @@ INF: while(<$inf>) {
|
||||
$_ = ""; # need a paragraph break
|
||||
};
|
||||
|
||||
/^\@item\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
|
||||
my $columns = $1;
|
||||
$columns =~ s/\@tab/ : /;
|
||||
|
||||
$_ = "\n=item B<". $columns .">\n";
|
||||
};
|
||||
|
||||
/^\@tab\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
|
||||
my $columns = $1;
|
||||
$columns =~ s/\@tab/ : /;
|
||||
|
||||
$_ = " : ". $columns;
|
||||
$chapter =~ s/\n+\s+$//;
|
||||
};
|
||||
|
||||
/^\@itemx?\s*(.+)?$/ and do {
|
||||
if (defined $1) {
|
||||
# Entity escapes prevent munging by the <> processing below.
|
||||
@@ -377,7 +361,6 @@ sub postprocess
|
||||
s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g;
|
||||
s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g;
|
||||
s/;\s+\@pxref\{(?:[^\}]*)\}//g;
|
||||
s/\@ref\{(?:[^,\}]*,)(?:[^,\}]*,)([^,\}]*).*\}/$1/g;
|
||||
s/\@ref\{([^\}]*)\}/$1/g;
|
||||
s/\@noindent\s*//g;
|
||||
s/\@refill//g;
|
||||
|
50
ffmpeg.c
50
ffmpeg.c
@@ -152,8 +152,6 @@ static struct termios oldtty;
|
||||
static int restore_tty;
|
||||
#endif
|
||||
|
||||
static void free_input_threads(void);
|
||||
|
||||
|
||||
/* sub2video hack:
|
||||
Convert subtitles to video with alpha to insert them in filter graphs.
|
||||
@@ -443,9 +441,6 @@ static void exit_program(void)
|
||||
av_freep(&output_streams[i]->logfile_prefix);
|
||||
av_freep(&output_streams[i]);
|
||||
}
|
||||
#if HAVE_PTHREADS
|
||||
free_input_threads();
|
||||
#endif
|
||||
for (i = 0; i < nb_input_files; i++) {
|
||||
avformat_close_input(&input_files[i]->ctx);
|
||||
av_freep(&input_files[i]);
|
||||
@@ -476,6 +471,7 @@ static void exit_program(void)
|
||||
if (received_sigterm) {
|
||||
av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
|
||||
(int) received_sigterm);
|
||||
exit (255);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -578,25 +574,6 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
bsfc = bsfc->next;
|
||||
}
|
||||
|
||||
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
|
||||
ost->last_mux_dts != AV_NOPTS_VALUE &&
|
||||
pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
|
||||
av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
|
||||
"%d:%d; previous: %"PRId64", current: %"PRId64"; ",
|
||||
ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
|
||||
if (exit_on_error) {
|
||||
av_log(NULL, AV_LOG_FATAL, "aborting.\n");
|
||||
exit(1);
|
||||
}
|
||||
av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
|
||||
"in incorrect timestamps in the output file.\n",
|
||||
ost->last_mux_dts + 1);
|
||||
pkt->dts = ost->last_mux_dts + 1;
|
||||
if (pkt->pts != AV_NOPTS_VALUE)
|
||||
pkt->pts = FFMAX(pkt->pts, pkt->dts);
|
||||
}
|
||||
ost->last_mux_dts = pkt->dts;
|
||||
|
||||
pkt->stream_index = ost->index;
|
||||
|
||||
if (debug_ts) {
|
||||
@@ -1015,19 +992,6 @@ static void do_video_stats(OutputStream *ost, int frame_size)
|
||||
}
|
||||
}
|
||||
|
||||
static void finish_output_stream(OutputStream *ost)
|
||||
{
|
||||
OutputFile *of = output_files[ost->file_index];
|
||||
int i;
|
||||
|
||||
ost->finished = 1;
|
||||
|
||||
if (of->shortest) {
|
||||
for (i = 0; i < of->ctx->nb_streams; i++)
|
||||
output_streams[of->ost_index + i]->finished = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get and encode new output from any of the filtergraphs, without causing
|
||||
* activity.
|
||||
@@ -1691,7 +1655,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
|
||||
if (!frame_sample_aspect->num)
|
||||
*frame_sample_aspect = ist->st->sample_aspect_ratio;
|
||||
if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed && !do_deinterlace) {
|
||||
if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
|
||||
FrameBuffer *buf = decoded_frame->opaque;
|
||||
AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
|
||||
decoded_frame->data, decoded_frame->linesize,
|
||||
@@ -2136,12 +2100,6 @@ static int transcode_init(void)
|
||||
codec->time_base.num *= icodec->ticks_per_frame;
|
||||
}
|
||||
}
|
||||
if ( codec->codec_tag == AV_RL32("tmcd")
|
||||
&& icodec->time_base.num < icodec->time_base.den
|
||||
&& icodec->time_base.num > 0
|
||||
&& 121LL*icodec->time_base.num > icodec->time_base.den) {
|
||||
codec->time_base = icodec->time_base;
|
||||
}
|
||||
|
||||
if(ost->frame_rate.num)
|
||||
codec->time_base = av_inv_q(ost->frame_rate);
|
||||
@@ -2796,7 +2754,7 @@ static int process_input(int file_index)
|
||||
|
||||
if (ost->source_index == ifile->ist_index + i &&
|
||||
(ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
|
||||
finish_output_stream(ost);
|
||||
close_output_stream(ost);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3256,6 +3214,6 @@ int main(int argc, char **argv)
|
||||
printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
|
||||
}
|
||||
|
||||
exit(received_nb_signals ? 255 : 0);
|
||||
exit(0);
|
||||
return 0;
|
||||
}
|
||||
|
2
ffmpeg.h
2
ffmpeg.h
@@ -300,8 +300,6 @@ typedef struct OutputStream {
|
||||
/* pts of the first frame encoded for this stream, used for limiting
|
||||
* recording time */
|
||||
int64_t first_pts;
|
||||
/* dts of the last packet sent to the muxer */
|
||||
int64_t last_mux_dts;
|
||||
AVBitStreamFilterContext *bitstream_filters;
|
||||
AVCodec *enc;
|
||||
int64_t max_frames;
|
||||
|
@@ -41,15 +41,12 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFo
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
||||
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
||||
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
||||
const enum AVPixelFormat mjpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||
const enum AVPixelFormat ljpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||
|
||||
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
p = mjpeg_formats;
|
||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||
p =ljpeg_formats;
|
||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||
}
|
||||
}
|
||||
for (; *p != AV_PIX_FMT_NONE; p++) {
|
||||
|
@@ -1021,7 +1021,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
input_streams[source_index]->discard = 0;
|
||||
input_streams[source_index]->st->discard = AVDISCARD_NONE;
|
||||
}
|
||||
ost->last_mux_dts = AV_NOPTS_VALUE;
|
||||
|
||||
return ost;
|
||||
}
|
||||
@@ -1144,6 +1143,8 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
if (p) p++;
|
||||
}
|
||||
video_enc->rc_override_count = i;
|
||||
if (!video_enc->rc_initial_buffer_occupancy)
|
||||
video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
|
||||
video_enc->intra_dc_precision = intra_dc_precision - 8;
|
||||
|
||||
if (do_psnr)
|
||||
@@ -1154,11 +1155,9 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
if (do_pass) {
|
||||
if (do_pass & 1) {
|
||||
video_enc->flags |= CODEC_FLAG_PASS1;
|
||||
av_dict_set(&ost->opts, "flags", "+pass1", AV_DICT_APPEND);
|
||||
}
|
||||
if (do_pass & 2) {
|
||||
video_enc->flags |= CODEC_FLAG_PASS2;
|
||||
av_dict_set(&ost->opts, "flags", "+pass2", AV_DICT_APPEND);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2151,7 +2150,7 @@ static int opt_channel_layout(void *optctx, const char *opt, const char *arg)
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
|
||||
ret = opt_default_new(o, opt, layout_str);
|
||||
ret = opt_default(NULL, opt, layout_str);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@@ -328,14 +328,6 @@ static AVLFG random_state;
|
||||
|
||||
static FILE *logfile = NULL;
|
||||
|
||||
static void htmlstrip(char *s) {
|
||||
while (s && *s) {
|
||||
s += strspn(s, "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,. ");
|
||||
if (*s)
|
||||
*s++ = '?';
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t ffm_read_write_index(int fd)
|
||||
{
|
||||
uint8_t buf[8];
|
||||
@@ -1895,7 +1887,6 @@ static int http_parse_request(HTTPContext *c)
|
||||
send_error:
|
||||
c->http_error = 404;
|
||||
q = c->buffer;
|
||||
htmlstrip(msg);
|
||||
snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 404 Not Found\r\n"
|
||||
"Content-type: text/html\r\n"
|
||||
|
@@ -44,7 +44,7 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
{
|
||||
int line = 0, ret;
|
||||
int line = 0;
|
||||
const int width = avctx->width;
|
||||
AVFrame *pic = avctx->coded_frame;
|
||||
uint16_t *y, *u, *v;
|
||||
@@ -65,8 +65,8 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
pic->reference = 0;
|
||||
if ((ret = ff_get_buffer(avctx, pic)) < 0)
|
||||
return ret;
|
||||
if (ff_get_buffer(avctx, pic) < 0)
|
||||
return AVERROR_INVALIDDATA;;
|
||||
|
||||
y = (uint16_t *)pic->data[0];
|
||||
u = (uint16_t *)pic->data[1];
|
||||
|
173
libavcodec/4xm.c
173
libavcodec/4xm.c
@@ -328,12 +328,12 @@ static inline void mcdc(uint16_t *dst, const uint16_t *src, int log2w,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
av_assert0(0);
|
||||
av_assert2(0);
|
||||
}
|
||||
}
|
||||
|
||||
static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
|
||||
int log2w, int log2h, int stride)
|
||||
static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
|
||||
int log2w, int log2h, int stride)
|
||||
{
|
||||
const int index = size2index[log2h][log2w];
|
||||
const int h = 1 << log2h;
|
||||
@@ -342,72 +342,57 @@ static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
|
||||
BLOCK_TYPE_VLC_BITS, 1);
|
||||
uint16_t *start = (uint16_t *)f->last_picture.data[0];
|
||||
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
|
||||
int ret;
|
||||
|
||||
av_assert0(code >= 0 && code <= 6 && log2w >= 0);
|
||||
av_assert2(code >= 0 && code <= 6);
|
||||
|
||||
if (code == 0) {
|
||||
if (bytestream2_get_bytes_left(&f->g) < 1) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return;
|
||||
}
|
||||
src += f->mv[bytestream2_get_byteu(&f->g)];
|
||||
if (start > src || src > end) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return;
|
||||
}
|
||||
mcdc(dst, src, log2w, h, stride, 1, 0);
|
||||
} else if (code == 1) {
|
||||
log2h--;
|
||||
if ((ret = decode_p_block(f, dst, src, log2w, log2h, stride)) < 0)
|
||||
return ret;
|
||||
if ((ret = decode_p_block(f, dst + (stride << log2h),
|
||||
src + (stride << log2h),
|
||||
log2w, log2h, stride)) < 0)
|
||||
return ret;
|
||||
decode_p_block(f, dst, src, log2w, log2h, stride);
|
||||
decode_p_block(f, dst + (stride << log2h),
|
||||
src + (stride << log2h), log2w, log2h, stride);
|
||||
} else if (code == 2) {
|
||||
log2w--;
|
||||
if ((ret = decode_p_block(f, dst , src, log2w, log2h, stride)) < 0)
|
||||
return ret;
|
||||
if ((ret = decode_p_block(f, dst + (1 << log2w),
|
||||
src + (1 << log2w),
|
||||
log2w, log2h, stride)) < 0)
|
||||
return ret;
|
||||
decode_p_block(f, dst , src, log2w, log2h, stride);
|
||||
decode_p_block(f, dst + (1 << log2w),
|
||||
src + (1 << log2w), log2w, log2h, stride);
|
||||
} else if (code == 3 && f->version < 2) {
|
||||
if (start > src || src > end) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
mcdc(dst, src, log2w, h, stride, 1, 0);
|
||||
} else if (code == 4) {
|
||||
if (bytestream2_get_bytes_left(&f->g) < 1) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return;
|
||||
}
|
||||
src += f->mv[bytestream2_get_byteu(&f->g)];
|
||||
if (start > src || src > end) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return;
|
||||
}
|
||||
if (bytestream2_get_bytes_left(&f->g2) < 2){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return;
|
||||
}
|
||||
mcdc(dst, src, log2w, h, stride, 1, bytestream2_get_le16u(&f->g2));
|
||||
} else if (code == 5) {
|
||||
if (bytestream2_get_bytes_left(&f->g2) < 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (start > src || src > end) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return;
|
||||
}
|
||||
mcdc(dst, src, log2w, h, stride, 0, bytestream2_get_le16u(&f->g2));
|
||||
} else if (code == 6) {
|
||||
if (bytestream2_get_bytes_left(&f->g2) < 4) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return;
|
||||
}
|
||||
if (log2w) {
|
||||
dst[0] = bytestream2_get_le16u(&f->g2);
|
||||
@@ -417,7 +402,6 @@ static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
|
||||
dst[stride] = bytestream2_get_le16u(&f->g2);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
@@ -430,20 +414,8 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
const int stride = f->current_picture.linesize[0] >> 1;
|
||||
unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
|
||||
bytestream_offset, wordstream_offset;
|
||||
int ret;
|
||||
|
||||
if (!f->last_picture.data[0]) {
|
||||
if ((ret = ff_get_buffer(f->avctx, &f->last_picture)) < 0) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
for (y=0; y<f->avctx->height; y++)
|
||||
memset(f->last_picture.data[0] + y*f->last_picture.linesize[0], 0, 2*f->avctx->width);
|
||||
}
|
||||
|
||||
if (f->version > 1) {
|
||||
if (length < 20)
|
||||
return AVERROR_INVALIDDATA;
|
||||
extra = 20;
|
||||
if (length < extra)
|
||||
return -1;
|
||||
@@ -463,7 +435,7 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
extra > length - bytestream_size - bitstream_size - wordstream_size) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size,
|
||||
bitstream_size+ bytestream_size+ wordstream_size - length);
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size,
|
||||
@@ -487,8 +459,7 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
|
||||
for (y = 0; y < height; y += 8) {
|
||||
for (x = 0; x < width; x += 8)
|
||||
if ((ret = decode_p_block(f, dst + x, src + x, 3, 3, stride)) < 0)
|
||||
return ret;
|
||||
decode_p_block(f, dst + x, src + x, 3, 3, stride);
|
||||
src += 8 * stride;
|
||||
dst += 8 * stride;
|
||||
}
|
||||
@@ -596,20 +567,20 @@ static inline void idct_put(FourXContext *f, int x, int y)
|
||||
|
||||
static int decode_i_mb(FourXContext *f)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
f->dsp.clear_blocks(f->block[0]);
|
||||
|
||||
for (i = 0; i < 6; i++)
|
||||
if (decode_i_block(f, f->block[i]) < 0)
|
||||
return -1;
|
||||
if ((ret = decode_i_block(f, f->block[i])) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const uint8_t *read_huffman_tables(FourXContext *f,
|
||||
const uint8_t * const buf,
|
||||
int buf_size)
|
||||
const uint8_t * const buf, int buf_size)
|
||||
{
|
||||
int frequency[512] = { 0 };
|
||||
uint8_t flag[512];
|
||||
@@ -628,11 +599,8 @@ static const uint8_t *read_huffman_tables(FourXContext *f,
|
||||
for (;;) {
|
||||
int i;
|
||||
|
||||
if (ptr_end - ptr < FFMAX(end - start + 1, 0) + 1) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "invalid data in read_huffman_tables\n");
|
||||
if (start <= end && ptr_end - ptr < end - start + 1 + 1)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = start; i <= end; i++)
|
||||
frequency[i] = *ptr++;
|
||||
start = *ptr++;
|
||||
@@ -734,9 +702,9 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
color[1] = bytestream2_get_le16u(&g3);
|
||||
|
||||
if (color[0] & 0x8000)
|
||||
av_log(f->avctx, AV_LOG_ERROR, "unk bit 1\n");
|
||||
av_log(NULL, AV_LOG_ERROR, "unk bit 1\n");
|
||||
if (color[1] & 0x8000)
|
||||
av_log(f->avctx, AV_LOG_ERROR, "unk bit 2\n");
|
||||
av_log(NULL, AV_LOG_ERROR, "unk bit 2\n");
|
||||
|
||||
color[2] = mix(color[0], color[1]);
|
||||
color[3] = mix(color[1], color[0]);
|
||||
@@ -758,17 +726,14 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
|
||||
static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
{
|
||||
int x, y;
|
||||
int x, y, ret;
|
||||
const int width = f->avctx->width;
|
||||
const int height = f->avctx->height;
|
||||
const unsigned int bitstream_size = AV_RL32(buf);
|
||||
unsigned int prestream_size;
|
||||
const uint8_t *prestream;
|
||||
|
||||
if (bitstream_size > (1 << 26))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (length < bitstream_size + 12) {
|
||||
if (bitstream_size > (1<<26) || length < bitstream_size + 12) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "packet size too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -777,18 +742,17 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
prestream = buf + bitstream_size + 12;
|
||||
|
||||
if (prestream_size + bitstream_size + 12 != length
|
||||
|| bitstream_size > (1 << 26)
|
||||
|| prestream_size > (1 << 26)) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n",
|
||||
prestream_size, bitstream_size, length);
|
||||
return -1;
|
||||
}
|
||||
|
||||
prestream = read_huffman_tables(f, prestream, prestream_size);
|
||||
if (!prestream) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Error reading Huffman tables.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
prestream = read_huffman_tables(f, prestream, buf + length - prestream);
|
||||
if (!prestream)
|
||||
return -1;
|
||||
|
||||
init_get_bits(&f->gb, buf + 4, 8 * bitstream_size);
|
||||
|
||||
prestream_size = length + buf - prestream;
|
||||
@@ -807,8 +771,8 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
|
||||
for (y = 0; y < height; y += 16) {
|
||||
for (x = 0; x < width; x += 16) {
|
||||
if (decode_i_mb(f) < 0)
|
||||
return -1;
|
||||
if ((ret = decode_i_mb(f)) < 0)
|
||||
return ret;
|
||||
|
||||
idct_put(f, x, y);
|
||||
}
|
||||
@@ -827,41 +791,33 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
int buf_size = avpkt->size;
|
||||
FourXContext *const f = avctx->priv_data;
|
||||
AVFrame *picture = data;
|
||||
AVFrame *p, temp;
|
||||
int i, frame_4cc, frame_size;
|
||||
AVFrame *p;
|
||||
int i, frame_4cc, frame_size, ret;
|
||||
|
||||
if (buf_size < 20)
|
||||
if (buf_size < 12)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
av_assert0(avctx->width % 16 == 0 && avctx->height % 16 == 0);
|
||||
|
||||
if (buf_size < AV_RL32(buf + 4) + 8) {
|
||||
frame_4cc = AV_RL32(buf);
|
||||
if (buf_size != AV_RL32(buf + 4) + 8 || buf_size < 20)
|
||||
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n",
|
||||
buf_size, AV_RL32(buf + 4));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
frame_4cc = AV_RL32(buf);
|
||||
|
||||
if (frame_4cc == AV_RL32("cfrm")) {
|
||||
int free_index = -1;
|
||||
int id, whole_size;
|
||||
const int data_size = buf_size - 20;
|
||||
const int id = AV_RL32(buf + 12);
|
||||
const int whole_size = AV_RL32(buf + 16);
|
||||
CFrameBuffer *cfrm;
|
||||
|
||||
if (f->version <= 1) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "cfrm in version %d\n", f->version);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
id = AV_RL32(buf + 12);
|
||||
whole_size = AV_RL32(buf + 16);
|
||||
|
||||
if (data_size < 0 || whole_size < 0) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "sizes invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (f->version <= 1) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "cfrm in version %d\n", f->version);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i = 0; i < CFRAME_BUFFER_COUNT; i++)
|
||||
if (f->cfrm[i].id && f->cfrm[i].id < avctx->frame_number)
|
||||
av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n",
|
||||
@@ -888,7 +844,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
// explicit check needed as memcpy below might not catch a NULL
|
||||
if (!cfrm->data) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "realloc failure\n");
|
||||
return -1;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
memcpy(cfrm->data + cfrm->size, buf + 20, data_size);
|
||||
@@ -902,9 +858,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
av_log(f->avctx, AV_LOG_ERROR, "cframe id mismatch %d %d\n",
|
||||
id, avctx->frame_number);
|
||||
|
||||
if (f->version <= 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
cfrm->size = cfrm->id = 0;
|
||||
frame_4cc = AV_RL32("pfrm");
|
||||
} else
|
||||
@@ -914,9 +867,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
frame_size = buf_size - 12;
|
||||
}
|
||||
|
||||
temp = f->current_picture;
|
||||
f->current_picture = f->last_picture;
|
||||
f->last_picture = temp;
|
||||
FFSWAP(AVFrame, f->current_picture, f->last_picture);
|
||||
|
||||
p = &f->current_picture;
|
||||
avctx->coded_frame = p;
|
||||
@@ -925,28 +876,38 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
avctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
|
||||
p->reference= 3;
|
||||
if (avctx->reget_buffer(avctx, p) < 0) {
|
||||
if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
|
||||
return -1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (frame_4cc == AV_RL32("ifr2")) {
|
||||
p->pict_type= AV_PICTURE_TYPE_I;
|
||||
if (decode_i2_frame(f, buf - 4, frame_size + 4) < 0) {
|
||||
if ((ret = decode_i2_frame(f, buf - 4, frame_size + 4)) < 0) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "decode i2 frame failed\n");
|
||||
return -1;
|
||||
return ret;
|
||||
}
|
||||
} else if (frame_4cc == AV_RL32("ifrm")) {
|
||||
p->pict_type= AV_PICTURE_TYPE_I;
|
||||
if (decode_i_frame(f, buf, frame_size) < 0) {
|
||||
if ((ret = decode_i_frame(f, buf, frame_size)) < 0) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "decode i frame failed\n");
|
||||
return -1;
|
||||
return ret;
|
||||
}
|
||||
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
|
||||
if (!f->last_picture.data[0]) {
|
||||
f->last_picture.reference = 3;
|
||||
if ((ret = ff_get_buffer(avctx, &f->last_picture)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
for (i=0; i<avctx->height; i++)
|
||||
memset(f->last_picture.data[0] + i*f->last_picture.linesize[0], 0, 2*avctx->width);
|
||||
}
|
||||
|
||||
p->pict_type = AV_PICTURE_TYPE_P;
|
||||
if (decode_p_frame(f, buf, frame_size) < 0) {
|
||||
if ((ret = decode_p_frame(f, buf, frame_size)) < 0) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "decode p frame failed\n");
|
||||
return -1;
|
||||
return ret;
|
||||
}
|
||||
} else if (frame_4cc == AV_RL32("snd_")) {
|
||||
av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n",
|
||||
|
@@ -64,7 +64,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
unsigned char *pixptr, *pixptr_end;
|
||||
unsigned int height = avctx->height; // Real image height
|
||||
unsigned int dlen, p, row;
|
||||
const unsigned char *lp, *dp, *ep;
|
||||
const unsigned char *lp, *dp;
|
||||
unsigned char count;
|
||||
unsigned int planes = c->planes;
|
||||
unsigned char *planemap = c->planemap;
|
||||
@@ -79,8 +79,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
return -1;
|
||||
}
|
||||
|
||||
ep = encoded + buf_size;
|
||||
|
||||
/* Set data pointer after line lengths */
|
||||
dp = encoded + planes * (height << 1);
|
||||
|
||||
@@ -92,19 +90,19 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
for (row = 0; row < height; row++) {
|
||||
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
|
||||
pixptr_end = pixptr + c->pic.linesize[0];
|
||||
if (ep - lp < row * 2 + 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if(lp - encoded + row*2 + 1 >= buf_size)
|
||||
return -1;
|
||||
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
|
||||
/* Decode a row of this plane */
|
||||
while (dlen > 0) {
|
||||
if (ep - dp <= 1)
|
||||
if (dp + 1 >= buf + buf_size)
|
||||
return -1;
|
||||
if ((count = *dp++) <= 127) {
|
||||
count++;
|
||||
dlen -= count + 1;
|
||||
if (pixptr + count * planes > pixptr_end)
|
||||
break;
|
||||
if (ep - dp < count)
|
||||
if (dp + count > buf + buf_size)
|
||||
return -1;
|
||||
while (count--) {
|
||||
*pixptr = *dp++;
|
||||
|
@@ -25,6 +25,7 @@ OBJS = allcodecs.o \
|
||||
fmtconvert.o \
|
||||
imgconvert.o \
|
||||
jrevdct.o \
|
||||
log2_tab.o \
|
||||
mathtables.o \
|
||||
options.o \
|
||||
parser.o \
|
||||
@@ -38,7 +39,6 @@ OBJS = allcodecs.o \
|
||||
# parts needed for many different codecs
|
||||
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
|
||||
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o
|
||||
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE) += audio_frame_queue.o
|
||||
OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
|
||||
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
|
||||
OBJS-$(CONFIG_DWT) += dwt.o snow.o
|
||||
@@ -68,7 +68,6 @@ OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \
|
||||
OBJS-$(CONFIG_RANGECODER) += rangecoder.o
|
||||
RDFT-OBJS-$(CONFIG_HARDCODED_TABLES) += sin_tables.o
|
||||
OBJS-$(CONFIG_RDFT) += rdft.o $(RDFT-OBJS-yes)
|
||||
OBJS-$(CONFIG_SHARED) += log2_tab.o
|
||||
OBJS-$(CONFIG_SINEWIN) += sinewin.o
|
||||
OBJS-$(CONFIG_VAAPI) += vaapi.o
|
||||
OBJS-$(CONFIG_VDPAU) += vdpau.o
|
||||
@@ -85,7 +84,8 @@ OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps.o \
|
||||
OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o \
|
||||
aacpsy.o aactab.o \
|
||||
psymodel.o iirfilter.o \
|
||||
mpeg4audio.o kbdwin.o
|
||||
mpeg4audio.o kbdwin.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
|
||||
OBJS-$(CONFIG_AC3_DECODER) += ac3dec.o ac3dec_data.o ac3.o kbdwin.o
|
||||
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
|
||||
@@ -317,7 +317,8 @@ OBJS-$(CONFIG_MVC1_DECODER) += mvcdec.o
|
||||
OBJS-$(CONFIG_MVC2_DECODER) += mvcdec.o
|
||||
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
|
||||
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += paf.o
|
||||
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += paf.o
|
||||
@@ -357,7 +358,8 @@ OBJS-$(CONFIG_R10K_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_R210_DECODER) += r210dec.o
|
||||
OBJS-$(CONFIG_R210_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_RA_144_DECODER) += ra144dec.o ra144.o celp_filters.o
|
||||
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o
|
||||
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_RA_288_DECODER) += ra288.o celp_filters.o
|
||||
OBJS-$(CONFIG_RALF_DECODER) += ralf.o
|
||||
OBJS-$(CONFIG_RAWVIDEO_DECODER) += rawdec.o
|
||||
@@ -658,39 +660,43 @@ OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
# external codec libraries
|
||||
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
|
||||
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
||||
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
|
||||
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o
|
||||
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_MS_ENCODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBILBC_DECODER) += libilbc.o
|
||||
OBJS-$(CONFIG_LIBILBC_ENCODER) += libilbc.o
|
||||
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
|
||||
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
|
||||
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopusdec.o libopus.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_LIBOPUS_ENCODER) += libopusenc.o libopus.o \
|
||||
vorbis_data.o
|
||||
vorbis_data.o audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBSCHROEDINGER_DECODER) += libschroedingerdec.o \
|
||||
libschroedinger.o
|
||||
OBJS-$(CONFIG_LIBSCHROEDINGER_ENCODER) += libschroedingerenc.o \
|
||||
libschroedinger.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_DECODER) += libspeexdec.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBSTAGEFRIGHT_H264_DECODER)+= libstagefright.o
|
||||
OBJS-$(CONFIG_LIBTHEORA_ENCODER) += libtheoraenc.o
|
||||
OBJS-$(CONFIG_LIBTWOLAME_ENCODER) += libtwolame.o
|
||||
OBJS-$(CONFIG_LIBUTVIDEO_DECODER) += libutvideodec.o
|
||||
OBJS-$(CONFIG_LIBUTVIDEO_ENCODER) += libutvideoenc.o
|
||||
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
|
||||
OBJS-$(CONFIG_LIBVORBIS_DECODER) += libvorbisdec.o
|
||||
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o \
|
||||
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o audio_frame_queue.o \
|
||||
vorbis_data.o vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_LIBVPX_DECODER) += libvpxdec.o
|
||||
OBJS-$(CONFIG_LIBVPX_ENCODER) += libvpxenc.o
|
||||
|
@@ -28,13 +28,13 @@
|
||||
#include "parser.h"
|
||||
|
||||
typedef enum {
|
||||
AAC_AC3_PARSE_ERROR_SYNC = -0x1030c0a,
|
||||
AAC_AC3_PARSE_ERROR_BSID = -0x2030c0a,
|
||||
AAC_AC3_PARSE_ERROR_SAMPLE_RATE = -0x3030c0a,
|
||||
AAC_AC3_PARSE_ERROR_FRAME_SIZE = -0x4030c0a,
|
||||
AAC_AC3_PARSE_ERROR_FRAME_TYPE = -0x5030c0a,
|
||||
AAC_AC3_PARSE_ERROR_CRC = -0x6030c0a,
|
||||
AAC_AC3_PARSE_ERROR_CHANNEL_CFG = -0x7030c0a,
|
||||
AAC_AC3_PARSE_ERROR_SYNC = -1,
|
||||
AAC_AC3_PARSE_ERROR_BSID = -2,
|
||||
AAC_AC3_PARSE_ERROR_SAMPLE_RATE = -3,
|
||||
AAC_AC3_PARSE_ERROR_FRAME_SIZE = -4,
|
||||
AAC_AC3_PARSE_ERROR_FRAME_TYPE = -5,
|
||||
AAC_AC3_PARSE_ERROR_CRC = -6,
|
||||
AAC_AC3_PARSE_ERROR_CHANNEL_CFG = -7,
|
||||
} AACAC3ParseError;
|
||||
|
||||
typedef struct AACAC3ParseContext {
|
||||
|
@@ -710,7 +710,7 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
|
||||
const float lambda)
|
||||
{
|
||||
int start = 0, i, w, w2, g;
|
||||
int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels * (lambda / 120.f);
|
||||
int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels;
|
||||
float dists[128] = { 0 }, uplims[128];
|
||||
float maxvals[128];
|
||||
int fflag, minscaler;
|
||||
|
@@ -113,10 +113,6 @@
|
||||
static VLC vlc_scalefactors;
|
||||
static VLC vlc_spectral[11];
|
||||
|
||||
static int output_configure(AACContext *ac,
|
||||
uint8_t layout_map[MAX_ELEM_ID*4][3], int tags,
|
||||
enum OCStatus oc_type, int get_new_frame);
|
||||
|
||||
#define overread_err "Input buffer exhausted before END element found\n"
|
||||
|
||||
static int count_channels(uint8_t (*layout)[3], int tags)
|
||||
@@ -147,8 +143,6 @@ static av_cold int che_configure(AACContext *ac,
|
||||
enum ChannelPosition che_pos,
|
||||
int type, int id, int *channels)
|
||||
{
|
||||
if (*channels >= MAX_CHANNELS)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (che_pos) {
|
||||
if (!ac->che[type][id]) {
|
||||
if (!(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
|
||||
@@ -190,9 +184,6 @@ static int frame_configure_elements(AVCodecContext *avctx)
|
||||
}
|
||||
}
|
||||
|
||||
if (!avctx->channels)
|
||||
return 1;
|
||||
|
||||
/* get output buffer */
|
||||
ac->frame.nb_samples = 2048;
|
||||
if ((ret = ff_get_buffer(avctx, &ac->frame)) < 0) {
|
||||
@@ -218,39 +209,28 @@ struct elem_to_channel {
|
||||
|
||||
static int assign_pair(struct elem_to_channel e2c_vec[MAX_ELEM_ID],
|
||||
uint8_t (*layout_map)[3], int offset, uint64_t left,
|
||||
uint64_t right, int pos)
|
||||
uint64_t right, int pos)
|
||||
{
|
||||
if (layout_map[offset][0] == TYPE_CPE) {
|
||||
e2c_vec[offset] = (struct elem_to_channel) {
|
||||
.av_position = left | right,
|
||||
.syn_ele = TYPE_CPE,
|
||||
.elem_id = layout_map[offset][1],
|
||||
.aac_position = pos
|
||||
};
|
||||
.av_position = left | right, .syn_ele = TYPE_CPE,
|
||||
.elem_id = layout_map[offset ][1], .aac_position = pos };
|
||||
return 1;
|
||||
} else {
|
||||
e2c_vec[offset] = (struct elem_to_channel) {
|
||||
.av_position = left,
|
||||
.syn_ele = TYPE_SCE,
|
||||
.elem_id = layout_map[offset][1],
|
||||
.aac_position = pos
|
||||
};
|
||||
e2c_vec[offset] = (struct elem_to_channel) {
|
||||
.av_position = left, .syn_ele = TYPE_SCE,
|
||||
.elem_id = layout_map[offset ][1], .aac_position = pos };
|
||||
e2c_vec[offset + 1] = (struct elem_to_channel) {
|
||||
.av_position = right,
|
||||
.syn_ele = TYPE_SCE,
|
||||
.elem_id = layout_map[offset + 1][1],
|
||||
.aac_position = pos
|
||||
};
|
||||
.av_position = right, .syn_ele = TYPE_SCE,
|
||||
.elem_id = layout_map[offset + 1][1], .aac_position = pos };
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
|
||||
static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos,
|
||||
int *current)
|
||||
{
|
||||
static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, int *current) {
|
||||
int num_pos_channels = 0;
|
||||
int first_cpe = 0;
|
||||
int sce_parity = 0;
|
||||
int first_cpe = 0;
|
||||
int sce_parity = 0;
|
||||
int i;
|
||||
for (i = *current; i < tags; i++) {
|
||||
if (layout_map[i][2] != pos)
|
||||
@@ -264,7 +244,7 @@ static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos,
|
||||
}
|
||||
}
|
||||
num_pos_channels += 2;
|
||||
first_cpe = 1;
|
||||
first_cpe = 1;
|
||||
} else {
|
||||
num_pos_channels++;
|
||||
sce_parity ^= 1;
|
||||
@@ -272,7 +252,7 @@ static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos,
|
||||
}
|
||||
if (sce_parity &&
|
||||
((pos == AAC_CHANNEL_FRONT && first_cpe) || pos == AAC_CHANNEL_SIDE))
|
||||
return -1;
|
||||
return -1;
|
||||
*current = i;
|
||||
return num_pos_channels;
|
||||
}
|
||||
@@ -280,7 +260,7 @@ static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos,
|
||||
static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
|
||||
{
|
||||
int i, n, total_non_cc_elements;
|
||||
struct elem_to_channel e2c_vec[4 * MAX_ELEM_ID] = { { 0 } };
|
||||
struct elem_to_channel e2c_vec[4*MAX_ELEM_ID] = {{ 0 }};
|
||||
int num_front_channels, num_side_channels, num_back_channels;
|
||||
uint64_t layout;
|
||||
|
||||
@@ -304,11 +284,8 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
|
||||
i = 0;
|
||||
if (num_front_channels & 1) {
|
||||
e2c_vec[i] = (struct elem_to_channel) {
|
||||
.av_position = AV_CH_FRONT_CENTER,
|
||||
.syn_ele = TYPE_SCE,
|
||||
.elem_id = layout_map[i][1],
|
||||
.aac_position = AAC_CHANNEL_FRONT
|
||||
};
|
||||
.av_position = AV_CH_FRONT_CENTER, .syn_ele = TYPE_SCE,
|
||||
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_FRONT };
|
||||
i++;
|
||||
num_front_channels--;
|
||||
}
|
||||
@@ -365,31 +342,22 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
|
||||
}
|
||||
if (num_back_channels) {
|
||||
e2c_vec[i] = (struct elem_to_channel) {
|
||||
.av_position = AV_CH_BACK_CENTER,
|
||||
.syn_ele = TYPE_SCE,
|
||||
.elem_id = layout_map[i][1],
|
||||
.aac_position = AAC_CHANNEL_BACK
|
||||
};
|
||||
.av_position = AV_CH_BACK_CENTER, .syn_ele = TYPE_SCE,
|
||||
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_BACK };
|
||||
i++;
|
||||
num_back_channels--;
|
||||
}
|
||||
|
||||
if (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
|
||||
e2c_vec[i] = (struct elem_to_channel) {
|
||||
.av_position = AV_CH_LOW_FREQUENCY,
|
||||
.syn_ele = TYPE_LFE,
|
||||
.elem_id = layout_map[i][1],
|
||||
.aac_position = AAC_CHANNEL_LFE
|
||||
};
|
||||
.av_position = AV_CH_LOW_FREQUENCY, .syn_ele = TYPE_LFE,
|
||||
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
|
||||
i++;
|
||||
}
|
||||
while (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
|
||||
e2c_vec[i] = (struct elem_to_channel) {
|
||||
.av_position = UINT64_MAX,
|
||||
.syn_ele = TYPE_LFE,
|
||||
.elem_id = layout_map[i][1],
|
||||
.aac_position = AAC_CHANNEL_LFE
|
||||
};
|
||||
.av_position = UINT64_MAX, .syn_ele = TYPE_LFE,
|
||||
.elem_id = layout_map[i][1], .aac_position = AAC_CHANNEL_LFE };
|
||||
i++;
|
||||
}
|
||||
|
||||
@@ -397,11 +365,12 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
|
||||
total_non_cc_elements = n = i;
|
||||
do {
|
||||
int next_n = 0;
|
||||
for (i = 1; i < n; i++)
|
||||
if (e2c_vec[i - 1].av_position > e2c_vec[i].av_position) {
|
||||
FFSWAP(struct elem_to_channel, e2c_vec[i - 1], e2c_vec[i]);
|
||||
for (i = 1; i < n; i++) {
|
||||
if (e2c_vec[i-1].av_position > e2c_vec[i].av_position) {
|
||||
FFSWAP(struct elem_to_channel, e2c_vec[i-1], e2c_vec[i]);
|
||||
next_n = i;
|
||||
}
|
||||
}
|
||||
n = next_n;
|
||||
} while (n > 0);
|
||||
|
||||
@@ -437,19 +406,16 @@ static void pop_output_configuration(AACContext *ac) {
|
||||
ac->oc[1] = ac->oc[0];
|
||||
ac->avctx->channels = ac->oc[1].channels;
|
||||
ac->avctx->channel_layout = ac->oc[1].channel_layout;
|
||||
output_configure(ac, ac->oc[1].layout_map, ac->oc[1].layout_map_tags,
|
||||
ac->oc[1].status, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure output channel order based on the current program
|
||||
* configuration element.
|
||||
* Configure output channel order based on the current program configuration element.
|
||||
*
|
||||
* @return Returns error status. 0 - OK, !0 - error
|
||||
*/
|
||||
static int output_configure(AACContext *ac,
|
||||
uint8_t layout_map[MAX_ELEM_ID * 4][3], int tags,
|
||||
uint8_t layout_map[MAX_ELEM_ID*4][3], int tags,
|
||||
enum OCStatus oc_type, int get_new_frame)
|
||||
{
|
||||
AVCodecContext *avctx = ac->avctx;
|
||||
@@ -521,40 +487,36 @@ static void flush(AVCodecContext *avctx)
|
||||
* @return Returns error status. 0 - OK, !0 - error
|
||||
*/
|
||||
static int set_default_channel_config(AVCodecContext *avctx,
|
||||
uint8_t (*layout_map)[3],
|
||||
int *tags,
|
||||
int channel_config)
|
||||
uint8_t (*layout_map)[3],
|
||||
int *tags,
|
||||
int channel_config)
|
||||
{
|
||||
if (channel_config < 1 || channel_config > 7) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"invalid default channel configuration (%d)\n",
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
|
||||
channel_config);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
*tags = tags_per_config[channel_config];
|
||||
memcpy(layout_map, aac_channel_layout_map[channel_config - 1],
|
||||
*tags * sizeof(*layout_map));
|
||||
memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
{
|
||||
/* For PCE based channel configurations map the channels solely based
|
||||
* on tags. */
|
||||
// For PCE based channel configurations map the channels solely based on tags.
|
||||
if (!ac->oc[1].m4ac.chan_config) {
|
||||
return ac->tag_che_map[type][elem_id];
|
||||
}
|
||||
// Allow single CPE stereo files to be signalled with mono configuration.
|
||||
if (!ac->tags_mapped && type == TYPE_CPE &&
|
||||
ac->oc[1].m4ac.chan_config == 1) {
|
||||
if (!ac->tags_mapped && type == TYPE_CPE && ac->oc[1].m4ac.chan_config == 1) {
|
||||
uint8_t layout_map[MAX_ELEM_ID*4][3];
|
||||
int layout_map_tags;
|
||||
push_output_configuration(ac);
|
||||
|
||||
av_log(ac->avctx, AV_LOG_DEBUG, "mono with CPE\n");
|
||||
|
||||
if (set_default_channel_config(ac->avctx, layout_map,
|
||||
&layout_map_tags, 2) < 0)
|
||||
if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
|
||||
2) < 0)
|
||||
return NULL;
|
||||
if (output_configure(ac, layout_map, layout_map_tags,
|
||||
OC_TRIAL_FRAME, 1) < 0)
|
||||
@@ -564,16 +526,15 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
ac->oc[1].m4ac.ps = 0;
|
||||
}
|
||||
// And vice-versa
|
||||
if (!ac->tags_mapped && type == TYPE_SCE &&
|
||||
ac->oc[1].m4ac.chan_config == 2) {
|
||||
uint8_t layout_map[MAX_ELEM_ID * 4][3];
|
||||
if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) {
|
||||
uint8_t layout_map[MAX_ELEM_ID*4][3];
|
||||
int layout_map_tags;
|
||||
push_output_configuration(ac);
|
||||
|
||||
av_log(ac->avctx, AV_LOG_DEBUG, "stereo with SCE\n");
|
||||
|
||||
if (set_default_channel_config(ac->avctx, layout_map,
|
||||
&layout_map_tags, 1) < 0)
|
||||
if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
|
||||
1) < 0)
|
||||
return NULL;
|
||||
if (output_configure(ac, layout_map, layout_map_tags,
|
||||
OC_TRIAL_FRAME, 1) < 0)
|
||||
@@ -583,8 +544,7 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
if (ac->oc[1].m4ac.sbr)
|
||||
ac->oc[1].m4ac.ps = -1;
|
||||
}
|
||||
/* For indexed channel configurations map the channels solely based
|
||||
* on position. */
|
||||
// For indexed channel configurations map the channels solely based on position.
|
||||
switch (ac->oc[1].m4ac.chan_config) {
|
||||
case 7:
|
||||
if (ac->tags_mapped == 3 && type == TYPE_CPE) {
|
||||
@@ -592,12 +552,9 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
|
||||
}
|
||||
case 6:
|
||||
/* Some streams incorrectly code 5.1 audio as
|
||||
* SCE[0] CPE[0] CPE[1] SCE[1]
|
||||
* instead of
|
||||
* SCE[0] CPE[0] CPE[1] LFE[0].
|
||||
* If we seem to have encountered such a stream, transfer
|
||||
* the LFE[0] element to the SCE[1]'s mapping */
|
||||
/* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
|
||||
instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
|
||||
encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
|
||||
if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
|
||||
@@ -608,16 +565,13 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
|
||||
}
|
||||
case 4:
|
||||
if (ac->tags_mapped == 2 &&
|
||||
ac->oc[1].m4ac.chan_config == 4 &&
|
||||
type == TYPE_SCE) {
|
||||
if (ac->tags_mapped == 2 && ac->oc[1].m4ac.chan_config == 4 && type == TYPE_SCE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
|
||||
}
|
||||
case 3:
|
||||
case 2:
|
||||
if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) &&
|
||||
type == TYPE_CPE) {
|
||||
if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) && type == TYPE_CPE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
|
||||
} else if (ac->oc[1].m4ac.chan_config == 2) {
|
||||
@@ -634,8 +588,7 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode an array of 4 bit element IDs, optionally interleaved with a
|
||||
* stereo/mono switching bit.
|
||||
* Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit.
|
||||
*
|
||||
* @param type speaker type/position for these channels
|
||||
*/
|
||||
@@ -677,8 +630,7 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
|
||||
uint8_t (*layout_map)[3],
|
||||
GetBitContext *gb)
|
||||
{
|
||||
int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc;
|
||||
int sampling_index;
|
||||
int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index;
|
||||
int comment_len;
|
||||
int tags;
|
||||
|
||||
@@ -686,9 +638,7 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
|
||||
|
||||
sampling_index = get_bits(gb, 4);
|
||||
if (m4ac->sampling_index != sampling_index)
|
||||
av_log(avctx, AV_LOG_WARNING,
|
||||
"Sample rate index in program config element does not "
|
||||
"match the sample rate index configured by the container.\n");
|
||||
av_log(avctx, AV_LOG_WARNING, "Sample rate index in program config element does not match the sample rate index configured by the container.\n");
|
||||
|
||||
num_front = get_bits(gb, 4);
|
||||
num_side = get_bits(gb, 4);
|
||||
@@ -729,7 +679,7 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
|
||||
comment_len = get_bits(gb, 8) * 8;
|
||||
if (get_bits_left(gb) < comment_len) {
|
||||
av_log(avctx, AV_LOG_ERROR, "decode_pce: " overread_err);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
skip_bits_long(gb, comment_len);
|
||||
return tags;
|
||||
@@ -771,8 +721,7 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
|
||||
if (tags < 0)
|
||||
return tags;
|
||||
} else {
|
||||
if ((ret = set_default_channel_config(avctx, layout_map,
|
||||
&tags, channel_config)))
|
||||
if ((ret = set_default_channel_config(avctx, layout_map, &tags, channel_config)))
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -794,7 +743,7 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
|
||||
case AOT_ER_AAC_LTP:
|
||||
case AOT_ER_AAC_SCALABLE:
|
||||
case AOT_ER_AAC_LD:
|
||||
skip_bits(gb, 3); /* aacSectionDataResilienceFlag
|
||||
skip_bits(gb, 3); /* aacSectionDataResilienceFlag
|
||||
* aacScalefactorDataResilienceFlag
|
||||
* aacSpectralDataResilienceFlag
|
||||
*/
|
||||
@@ -824,24 +773,20 @@ static int decode_audio_specific_config(AACContext *ac,
|
||||
int sync_extension)
|
||||
{
|
||||
GetBitContext gb;
|
||||
int i, ret;
|
||||
int i;
|
||||
|
||||
av_dlog(avctx, "audio specific config size %d\n", bit_size >> 3);
|
||||
for (i = 0; i < bit_size >> 3; i++)
|
||||
av_dlog(avctx, "%02x ", data[i]);
|
||||
av_dlog(avctx, "\n");
|
||||
|
||||
if ((ret = init_get_bits(&gb, data, bit_size)) < 0)
|
||||
return ret;
|
||||
init_get_bits(&gb, data, bit_size);
|
||||
|
||||
if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size,
|
||||
sync_extension)) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size, sync_extension)) < 0)
|
||||
return -1;
|
||||
if (m4ac->sampling_index > 12) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"invalid sampling rate index %d\n",
|
||||
m4ac->sampling_index);
|
||||
return AVERROR_INVALIDDATA;
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index);
|
||||
return -1;
|
||||
}
|
||||
|
||||
skip_bits_long(&gb, i);
|
||||
@@ -850,23 +795,18 @@ static int decode_audio_specific_config(AACContext *ac,
|
||||
case AOT_AAC_MAIN:
|
||||
case AOT_AAC_LC:
|
||||
case AOT_AAC_LTP:
|
||||
if ((ret = decode_ga_specific_config(ac, avctx, &gb,
|
||||
m4ac, m4ac->chan_config)) < 0)
|
||||
return ret;
|
||||
if (decode_ga_specific_config(ac, avctx, &gb, m4ac, m4ac->chan_config))
|
||||
return -1;
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Audio object type %s%d is not supported.\n",
|
||||
m4ac->sbr == 1 ? "SBR+" : "",
|
||||
m4ac->object_type);
|
||||
return AVERROR(ENOSYS);
|
||||
av_log(avctx, AV_LOG_ERROR, "Audio object type %s%d is not supported.\n",
|
||||
m4ac->sbr == 1? "SBR+" : "", m4ac->object_type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
av_dlog(avctx,
|
||||
"AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
|
||||
av_dlog(avctx, "AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
|
||||
m4ac->object_type, m4ac->chan_config, m4ac->sampling_index,
|
||||
m4ac->sample_rate, m4ac->sbr,
|
||||
m4ac->ps);
|
||||
m4ac->sample_rate, m4ac->sbr, m4ac->ps);
|
||||
|
||||
return get_bits_count(&gb);
|
||||
}
|
||||
@@ -924,18 +864,15 @@ static void reset_predictor_group(PredictorState *ps, int group_num)
|
||||
reset_predict_state(&ps[i]);
|
||||
}
|
||||
|
||||
#define AAC_INIT_VLC_STATIC(num, size) \
|
||||
INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
|
||||
ff_aac_spectral_bits[num], sizeof(ff_aac_spectral_bits[num][0]), \
|
||||
sizeof(ff_aac_spectral_bits[num][0]), \
|
||||
ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), \
|
||||
sizeof(ff_aac_spectral_codes[num][0]), \
|
||||
#define AAC_INIT_VLC_STATIC(num, size) \
|
||||
INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
|
||||
ff_aac_spectral_bits[num], sizeof( ff_aac_spectral_bits[num][0]), sizeof( ff_aac_spectral_bits[num][0]), \
|
||||
ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), sizeof(ff_aac_spectral_codes[num][0]), \
|
||||
size);
|
||||
|
||||
static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
AACContext *ac = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
ac->avctx = avctx;
|
||||
ac->oc[1].m4ac.sample_rate = avctx->sample_rate;
|
||||
@@ -943,11 +880,10 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
|
||||
|
||||
if (avctx->extradata_size > 0) {
|
||||
if ((ret = decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
|
||||
avctx->extradata,
|
||||
avctx->extradata_size * 8,
|
||||
1)) < 0)
|
||||
return ret;
|
||||
if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
|
||||
avctx->extradata,
|
||||
avctx->extradata_size*8, 1) < 0)
|
||||
return -1;
|
||||
} else {
|
||||
int sr, i;
|
||||
uint8_t layout_map[MAX_ELEM_ID*4][3];
|
||||
@@ -978,11 +914,6 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
}
|
||||
}
|
||||
|
||||
if (avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Too many channels\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
AAC_INIT_VLC_STATIC( 0, 304);
|
||||
AAC_INIT_VLC_STATIC( 1, 270);
|
||||
AAC_INIT_VLC_STATIC( 2, 550);
|
||||
@@ -1005,14 +936,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
|
||||
ff_aac_tableinit();
|
||||
|
||||
INIT_VLC_STATIC(&vlc_scalefactors, 7,
|
||||
FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
|
||||
ff_aac_scalefactor_bits,
|
||||
sizeof(ff_aac_scalefactor_bits[0]),
|
||||
sizeof(ff_aac_scalefactor_bits[0]),
|
||||
ff_aac_scalefactor_code,
|
||||
sizeof(ff_aac_scalefactor_code[0]),
|
||||
sizeof(ff_aac_scalefactor_code[0]),
|
||||
INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
|
||||
ff_aac_scalefactor_bits, sizeof(ff_aac_scalefactor_bits[0]), sizeof(ff_aac_scalefactor_bits[0]),
|
||||
ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]),
|
||||
352);
|
||||
|
||||
ff_mdct_init(&ac->mdct, 11, 1, 1.0 / (32768.0 * 1024.0));
|
||||
@@ -1046,7 +972,7 @@ static int skip_data_stream_element(AACContext *ac, GetBitContext *gb)
|
||||
|
||||
if (get_bits_left(gb) < 8 * count) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "skip_data_stream_element: "overread_err);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
skip_bits_long(gb, 8 * count);
|
||||
return 0;
|
||||
@@ -1058,11 +984,9 @@ static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
|
||||
int sfb;
|
||||
if (get_bits1(gb)) {
|
||||
ics->predictor_reset_group = get_bits(gb, 5);
|
||||
if (ics->predictor_reset_group == 0 ||
|
||||
ics->predictor_reset_group > 30) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Invalid Predictor Reset Group.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (ics->predictor_reset_group == 0 || ics->predictor_reset_group > 30) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "Invalid Predictor Reset Group.\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]); sfb++) {
|
||||
@@ -1131,8 +1055,7 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
|
||||
goto fail;
|
||||
}
|
||||
} else if (ac->oc[1].m4ac.object_type == AOT_AAC_LC) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Prediction is not allowed in AAC-LC.\n");
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n");
|
||||
goto fail;
|
||||
} else {
|
||||
if ((ics->ltp.present = get_bits(gb, 1)))
|
||||
@@ -1143,8 +1066,7 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
|
||||
|
||||
if (ics->max_sfb > ics->num_swb) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Number of scalefactor bands in group (%d) "
|
||||
"exceeds limit (%d).\n",
|
||||
"Number of scalefactor bands in group (%d) exceeds limit (%d).\n",
|
||||
ics->max_sfb, ics->num_swb);
|
||||
goto fail;
|
||||
}
|
||||
@@ -1177,20 +1099,20 @@ static int decode_band_types(AACContext *ac, enum BandType band_type[120],
|
||||
int sect_band_type = get_bits(gb, 4);
|
||||
if (sect_band_type == 12) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
do {
|
||||
sect_len_incr = get_bits(gb, bits);
|
||||
sect_end += sect_len_incr;
|
||||
if (get_bits_left(gb) < 0) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "decode_band_types: "overread_err);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
if (sect_end > ics->max_sfb) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Number of bands (%d) exceeds limit (%d).\n",
|
||||
sect_end, ics->max_sfb);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
} while (sect_len_incr == (1 << bits) - 1);
|
||||
for (; k < sect_end; k++) {
|
||||
@@ -1228,8 +1150,7 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
|
||||
if (band_type[idx] == ZERO_BT) {
|
||||
for (; i < run_end; i++, idx++)
|
||||
sf[idx] = 0.;
|
||||
} else if ((band_type[idx] == INTENSITY_BT) ||
|
||||
(band_type[idx] == INTENSITY_BT2)) {
|
||||
} else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) {
|
||||
for (; i < run_end; i++, idx++) {
|
||||
offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
|
||||
clipped_offset = av_clip(offset[2], -155, 100);
|
||||
@@ -1262,7 +1183,7 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
|
||||
if (offset[0] > 255U) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Scalefactor (%d) out of range.\n", offset[0]);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
sf[idx] = -ff_aac_pow2sf_tab[offset[0] - 100 + POW_SF2_ZERO];
|
||||
}
|
||||
@@ -1317,11 +1238,10 @@ static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns,
|
||||
tns->length[w][filt] = get_bits(gb, 6 - 2 * is8);
|
||||
|
||||
if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"TNS filter order %d is greater than maximum %d.\n",
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "TNS filter order %d is greater than maximum %d.\n",
|
||||
tns->order[w][filt], tns_max_order);
|
||||
tns->order[w][filt] = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
if (tns->order[w][filt]) {
|
||||
tns->direction[w][filt] = get_bits1(gb);
|
||||
@@ -1350,9 +1270,7 @@ static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
|
||||
{
|
||||
int idx;
|
||||
if (ms_present == 1) {
|
||||
for (idx = 0;
|
||||
idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb;
|
||||
idx++)
|
||||
for (idx = 0; idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb; idx++)
|
||||
cpe->ms_mask[idx] = get_bits1(gb);
|
||||
} else if (ms_present == 2) {
|
||||
memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask[0]) * cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb);
|
||||
@@ -1451,8 +1369,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
||||
float *coef_base = coef;
|
||||
|
||||
for (g = 0; g < ics->num_windows; g++)
|
||||
memset(coef + g * 128 + offsets[ics->max_sfb], 0,
|
||||
sizeof(float) * (c - offsets[ics->max_sfb]));
|
||||
memset(coef + g * 128 + offsets[ics->max_sfb], 0, sizeof(float) * (c - offsets[ics->max_sfb]));
|
||||
|
||||
for (g = 0; g < ics->num_window_groups; g++) {
|
||||
unsigned g_len = ics->group_len[g];
|
||||
@@ -1607,7 +1524,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
||||
|
||||
if (b > 8) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "error in spectral data, ESC overflow\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
SKIP_BITS(re, gb, b + 1);
|
||||
@@ -1722,20 +1639,14 @@ static void apply_prediction(AACContext *ac, SingleChannelElement *sce)
|
||||
}
|
||||
|
||||
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
|
||||
for (sfb = 0;
|
||||
sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index];
|
||||
sfb++) {
|
||||
for (k = sce->ics.swb_offset[sfb];
|
||||
k < sce->ics.swb_offset[sfb + 1];
|
||||
k++) {
|
||||
for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]; sfb++) {
|
||||
for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) {
|
||||
predict(&sce->predictor_state[k], &sce->coeffs[k],
|
||||
sce->ics.predictor_present &&
|
||||
sce->ics.prediction_used[sfb]);
|
||||
sce->ics.predictor_present && sce->ics.prediction_used[sfb]);
|
||||
}
|
||||
}
|
||||
if (sce->ics.predictor_reset_group)
|
||||
reset_predictor_group(sce->predictor_state,
|
||||
sce->ics.predictor_reset_group);
|
||||
reset_predictor_group(sce->predictor_state, sce->ics.predictor_reset_group);
|
||||
} else
|
||||
reset_all_predictors(sce->predictor_state);
|
||||
}
|
||||
@@ -1756,7 +1667,6 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
|
||||
IndividualChannelStream *ics = &sce->ics;
|
||||
float *out = sce->coeffs;
|
||||
int global_gain, pulse_present = 0;
|
||||
int ret;
|
||||
|
||||
/* This assignment is to silence a GCC warning about the variable being used
|
||||
* uninitialized when in fact it always is.
|
||||
@@ -1770,38 +1680,33 @@ static int decode_ics(AACContext *ac, SingleChannelElement *sce,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if ((ret = decode_band_types(ac, sce->band_type,
|
||||
sce->band_type_run_end, gb, ics)) < 0)
|
||||
return ret;
|
||||
if ((ret = decode_scalefactors(ac, sce->sf, gb, global_gain, ics,
|
||||
sce->band_type, sce->band_type_run_end)) < 0)
|
||||
return ret;
|
||||
if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0)
|
||||
return -1;
|
||||
if (decode_scalefactors(ac, sce->sf, gb, global_gain, ics, sce->band_type, sce->band_type_run_end) < 0)
|
||||
return -1;
|
||||
|
||||
pulse_present = 0;
|
||||
if (!scale_flag) {
|
||||
if ((pulse_present = get_bits1(gb))) {
|
||||
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Pulse tool not allowed in eight short sequence.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "Pulse tool not allowed in eight short sequence.\n");
|
||||
return -1;
|
||||
}
|
||||
if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR,
|
||||
"Pulse data corrupt or invalid.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "Pulse data corrupt or invalid.\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics))
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
if (get_bits1(gb)) {
|
||||
av_log_missing_feature(ac->avctx, "SSR", 1);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
}
|
||||
|
||||
if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present,
|
||||
&pulse, ics, sce->band_type) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0)
|
||||
return -1;
|
||||
|
||||
if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN && !common_window)
|
||||
apply_prediction(ac, sce);
|
||||
@@ -1822,8 +1727,7 @@ static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
|
||||
for (g = 0; g < ics->num_window_groups; g++) {
|
||||
for (i = 0; i < ics->max_sfb; i++, idx++) {
|
||||
if (cpe->ms_mask[idx] &&
|
||||
cpe->ch[0].band_type[idx] < NOISE_BT &&
|
||||
cpe->ch[1].band_type[idx] < NOISE_BT) {
|
||||
cpe->ch[0].band_type[idx] < NOISE_BT && cpe->ch[1].band_type[idx] < NOISE_BT) {
|
||||
for (group = 0; group < ics->group_len[g]; group++) {
|
||||
ac->dsp.butterflies_float(ch0 + group * 128 + offsets[i],
|
||||
ch1 + group * 128 + offsets[i],
|
||||
@@ -1843,8 +1747,7 @@ static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
|
||||
* [1] mask is decoded from bitstream; [2] mask is all 1s;
|
||||
* [3] reserved for scalable AAC
|
||||
*/
|
||||
static void apply_intensity_stereo(AACContext *ac,
|
||||
ChannelElement *cpe, int ms_present)
|
||||
static void apply_intensity_stereo(AACContext *ac, ChannelElement *cpe, int ms_present)
|
||||
{
|
||||
const IndividualChannelStream *ics = &cpe->ch[1].ics;
|
||||
SingleChannelElement *sce1 = &cpe->ch[1];
|
||||
@@ -1855,8 +1758,7 @@ static void apply_intensity_stereo(AACContext *ac,
|
||||
float scale;
|
||||
for (g = 0; g < ics->num_window_groups; g++) {
|
||||
for (i = 0; i < ics->max_sfb;) {
|
||||
if (sce1->band_type[idx] == INTENSITY_BT ||
|
||||
sce1->band_type[idx] == INTENSITY_BT2) {
|
||||
if (sce1->band_type[idx] == INTENSITY_BT || sce1->band_type[idx] == INTENSITY_BT2) {
|
||||
const int bt_run_end = sce1->band_type_run_end[idx];
|
||||
for (; i < bt_run_end; i++, idx++) {
|
||||
c = -1 + 2 * (sce1->band_type[idx] - 14);
|
||||
@@ -1896,14 +1798,13 @@ static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
|
||||
i = cpe->ch[1].ics.use_kb_window[0];
|
||||
cpe->ch[1].ics = cpe->ch[0].ics;
|
||||
cpe->ch[1].ics.use_kb_window[1] = i;
|
||||
if (cpe->ch[1].ics.predictor_present &&
|
||||
(ac->oc[1].m4ac.object_type != AOT_AAC_MAIN))
|
||||
if (cpe->ch[1].ics.predictor_present && (ac->oc[1].m4ac.object_type != AOT_AAC_MAIN))
|
||||
if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1)))
|
||||
decode_ltp(&cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb);
|
||||
ms_present = get_bits(gb, 2);
|
||||
if (ms_present == 3) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
} else if (ms_present)
|
||||
decode_mid_side_stereo(cpe, gb, ms_present);
|
||||
}
|
||||
@@ -2764,8 +2665,7 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if (ac->force_dmono_mode >= 0)
|
||||
ac->dmono_mode = ac->force_dmono_mode;
|
||||
|
||||
if ((err = init_get_bits(&gb, buf, buf_size * 8)) < 0)
|
||||
return err;
|
||||
init_get_bits(&gb, buf, buf_size * 8);
|
||||
|
||||
if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb, avpkt)) < 0)
|
||||
return err;
|
||||
@@ -2801,13 +2701,13 @@ static av_cold int aac_decode_close(AVCodecContext *avctx)
|
||||
#define LOAS_SYNC_WORD 0x2b7 ///< 11 bits LOAS sync word
|
||||
|
||||
struct LATMContext {
|
||||
AACContext aac_ctx; ///< containing AACContext
|
||||
int initialized; ///< initilized after a valid extradata was seen
|
||||
AACContext aac_ctx; ///< containing AACContext
|
||||
int initialized; ///< initialized after a valid extradata was seen
|
||||
|
||||
// parser data
|
||||
int audio_mux_version_A; ///< LATM syntax version
|
||||
int frame_length_type; ///< 0/1 variable/fixed frame length
|
||||
int frame_length; ///< frame length for fixed frame length
|
||||
int audio_mux_version_A; ///< LATM syntax version
|
||||
int frame_length_type; ///< 0/1 variable/fixed frame length
|
||||
int frame_length; ///< frame length for fixed frame length
|
||||
};
|
||||
|
||||
static inline uint32_t latm_get_value(GetBitContext *b)
|
||||
@@ -3015,8 +2915,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out,
|
||||
int muxlength, err;
|
||||
GetBitContext gb;
|
||||
|
||||
if ((err = init_get_bits(&gb, avpkt->data, avpkt->size * 8)) < 0)
|
||||
return err;
|
||||
init_get_bits(&gb, avpkt->data, avpkt->size * 8);
|
||||
|
||||
// check for LOAS sync word
|
||||
if (get_bits(&gb, 11) != LOAS_SYNC_WORD)
|
||||
|
@@ -517,7 +517,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
|
||||
/* add current frame to queue */
|
||||
if (frame) {
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -1121,12 +1121,7 @@ static void sbr_dequant(SpectralBandReplication *sbr, int id_aac)
|
||||
for (k = 0; k < sbr->n[sbr->data[0].bs_freq_res[e]]; k++) {
|
||||
float temp1 = exp2f(sbr->data[0].env_facs[e][k] * alpha + 7.0f);
|
||||
float temp2 = exp2f((pan_offset - sbr->data[1].env_facs[e][k]) * alpha);
|
||||
float fac;
|
||||
if (temp1 > 1E20) {
|
||||
av_log(NULL, AV_LOG_ERROR, "envelope scalefactor overflow in dequant\n");
|
||||
temp1 = 1;
|
||||
}
|
||||
fac = temp1 / (1.0f + temp2);
|
||||
float fac = temp1 / (1.0f + temp2);
|
||||
sbr->data[0].env_facs[e][k] = fac;
|
||||
sbr->data[1].env_facs[e][k] = fac * temp2;
|
||||
}
|
||||
@@ -1135,12 +1130,7 @@ static void sbr_dequant(SpectralBandReplication *sbr, int id_aac)
|
||||
for (k = 0; k < sbr->n_q; k++) {
|
||||
float temp1 = exp2f(NOISE_FLOOR_OFFSET - sbr->data[0].noise_facs[e][k] + 1);
|
||||
float temp2 = exp2f(12 - sbr->data[1].noise_facs[e][k]);
|
||||
float fac;
|
||||
if (temp1 > 1E20) {
|
||||
av_log(NULL, AV_LOG_ERROR, "envelope scalefactor overflow in dequant\n");
|
||||
temp1 = 1;
|
||||
}
|
||||
fac = temp1 / (1.0f + temp2);
|
||||
float fac = temp1 / (1.0f + temp2);
|
||||
sbr->data[0].noise_facs[e][k] = fac;
|
||||
sbr->data[1].noise_facs[e][k] = fac * temp2;
|
||||
}
|
||||
@@ -1149,15 +1139,9 @@ static void sbr_dequant(SpectralBandReplication *sbr, int id_aac)
|
||||
for (ch = 0; ch < (id_aac == TYPE_CPE) + 1; ch++) {
|
||||
float alpha = sbr->data[ch].bs_amp_res ? 1.0f : 0.5f;
|
||||
for (e = 1; e <= sbr->data[ch].bs_num_env; e++)
|
||||
for (k = 0; k < sbr->n[sbr->data[ch].bs_freq_res[e]]; k++){
|
||||
for (k = 0; k < sbr->n[sbr->data[ch].bs_freq_res[e]]; k++)
|
||||
sbr->data[ch].env_facs[e][k] =
|
||||
exp2f(alpha * sbr->data[ch].env_facs[e][k] + 6.0f);
|
||||
if (sbr->data[ch].env_facs[e][k] > 1E20) {
|
||||
av_log(NULL, AV_LOG_ERROR, "envelope scalefactor overflow in dequant\n");
|
||||
sbr->data[ch].env_facs[e][k] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (e = 1; e <= sbr->data[ch].bs_num_noise; e++)
|
||||
for (k = 0; k < sbr->n_q; k++)
|
||||
sbr->data[ch].noise_facs[e][k] =
|
||||
|
@@ -60,7 +60,7 @@ static av_cold int aasc_decode_init(AVCodecContext *avctx)
|
||||
}
|
||||
break;
|
||||
case 16:
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB555LE;
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB555;
|
||||
break;
|
||||
case 24:
|
||||
avctx->pix_fmt = AV_PIX_FMT_BGR24;
|
||||
|
@@ -292,7 +292,7 @@ static int parse_frame_header(AC3DecodeContext *s)
|
||||
return ff_eac3_parse_header(s);
|
||||
} else {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "E-AC-3 support not compiled in\n");
|
||||
return AVERROR(ENOSYS);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -787,12 +787,12 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
if (start_subband >= end_subband) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid spectral extension "
|
||||
"range (%d >= %d)\n", start_subband, end_subband);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
if (dst_start_freq >= src_start_freq) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid spectral extension "
|
||||
"copy start bin (%d >= %d)\n", dst_start_freq, src_start_freq);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
s->spx_dst_start_freq = dst_start_freq;
|
||||
@@ -869,7 +869,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
|
||||
if (channel_mode < AC3_CHMODE_STEREO) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "coupling not allowed in mono or dual-mono\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* check for enhanced coupling */
|
||||
@@ -899,7 +899,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
if (cpl_start_subband >= cpl_end_subband) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid coupling range (%d >= %d)\n",
|
||||
cpl_start_subband, cpl_end_subband);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
s->start_freq[CPL_CH] = cpl_start_subband * 12 + 37;
|
||||
s->end_freq[CPL_CH] = cpl_end_subband * 12 + 37;
|
||||
@@ -921,7 +921,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
if (!blk) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "new coupling strategy must "
|
||||
"be present in block 0\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
} else {
|
||||
s->cpl_in_use[blk] = s->cpl_in_use[blk-1];
|
||||
}
|
||||
@@ -951,7 +951,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
} else if (!blk) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "new coupling coordinates must "
|
||||
"be present in block 0\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
/* channel not in coupling */
|
||||
@@ -1006,7 +1006,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
int bandwidth_code = get_bits(gbc, 6);
|
||||
if (bandwidth_code > 60) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "bandwidth code = %d > 60\n", bandwidth_code);
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
s->end_freq[ch] = bandwidth_code * 3 + 73;
|
||||
}
|
||||
@@ -1029,7 +1029,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
s->num_exp_groups[ch], s->dexps[ch][0],
|
||||
&s->dexps[ch][s->start_freq[ch]+!!ch])) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "exponent out-of-range\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
if (ch != CPL_CH && ch != s->lfe_ch)
|
||||
skip_bits(gbc, 2); /* skip gainrng */
|
||||
@@ -1049,7 +1049,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
} else if (!blk) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "new bit allocation info must "
|
||||
"be present in block 0\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1080,7 +1080,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
}
|
||||
} else if (!s->eac3 && !blk) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "new snr offsets must be present in block 0\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1119,7 +1119,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
} else if (!s->eac3 && !blk) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "new coupling leak info must "
|
||||
"be present in block 0\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
s->first_cpl_leak = 0;
|
||||
}
|
||||
@@ -1131,7 +1131,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
s->dba_mode[ch] = get_bits(gbc, 2);
|
||||
if (s->dba_mode[ch] == DBA_RESERVED) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "delta bit allocation strategy reserved\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
|
||||
}
|
||||
@@ -1172,7 +1172,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
s->dba_offsets[ch], s->dba_lengths[ch],
|
||||
s->dba_values[ch], s->mask[ch])) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "error in bit allocation\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (bit_alloc_stages[ch] > 0) {
|
||||
@@ -1291,7 +1291,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
switch (err) {
|
||||
case AAC_AC3_PARSE_ERROR_SYNC:
|
||||
av_log(avctx, AV_LOG_ERROR, "frame sync error\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
case AAC_AC3_PARSE_ERROR_BSID:
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid bitstream id\n");
|
||||
break;
|
||||
@@ -1305,20 +1305,17 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
/* skip frame if CRC is ok. otherwise use error concealment. */
|
||||
/* TODO: add support for substreams and dependent frames */
|
||||
if (s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) {
|
||||
av_log(avctx, AV_LOG_WARNING, "unsupported frame type : "
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported frame type : "
|
||||
"skipping frame\n");
|
||||
*got_frame_ptr = 0;
|
||||
return buf_size;
|
||||
return s->frame_size;
|
||||
} else {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid frame type\n");
|
||||
}
|
||||
break;
|
||||
case AAC_AC3_PARSE_ERROR_CRC:
|
||||
case AAC_AC3_PARSE_ERROR_CHANNEL_CFG:
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid header\n");
|
||||
break;
|
||||
default: // Normal AVERROR do not try to recover.
|
||||
*got_frame_ptr = 0;
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
/* check that reported frame size fits in input buffer */
|
||||
@@ -1339,10 +1336,8 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
if (!err) {
|
||||
avctx->sample_rate = s->sample_rate;
|
||||
avctx->bit_rate = s->bit_rate;
|
||||
}
|
||||
|
||||
/* channel config */
|
||||
if (!err || (s->channels && s->out_channels != s->channels)) {
|
||||
/* channel config */
|
||||
s->out_channels = s->channels;
|
||||
s->output_mode = s->channel_mode;
|
||||
if (s->lfe_on)
|
||||
@@ -1365,18 +1360,22 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
s->fbw_channels == s->out_channels)) {
|
||||
set_downmix_coeffs(s);
|
||||
}
|
||||
} else if (!s->channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "unable to determine channel mode\n");
|
||||
} else if (!s->out_channels) {
|
||||
s->out_channels = avctx->channels;
|
||||
if (s->out_channels < s->channels)
|
||||
s->output_mode = s->out_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
|
||||
}
|
||||
if (avctx->channels != s->out_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "channel number mismatching on damaged frame\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
avctx->channels = s->out_channels;
|
||||
|
||||
/* set audio service type based on bitstream mode for AC-3 */
|
||||
avctx->audio_service_type = s->bitstream_mode;
|
||||
if (s->bitstream_mode == 0x7 && s->channels > 1)
|
||||
avctx->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
|
||||
|
||||
/* get output buffer */
|
||||
avctx->channels = s->out_channels;
|
||||
s->frame.nb_samples = s->num_blocks * 256;
|
||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
|
@@ -47,8 +47,13 @@ int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf,
|
||||
return AVERROR_INVALIDDATA;
|
||||
offset = AV_RB16(buf + 2) + 4;
|
||||
|
||||
if (offset < 6) {
|
||||
av_log(avctx, AV_LOG_ERROR, "offset is prior data\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* if copyright string is within the provided data, validate it */
|
||||
if (bufsize >= offset && offset >= 6 && memcmp(buf + offset - 6, "(c)CRI", 6))
|
||||
if (bufsize >= offset && memcmp(buf + offset - 6, "(c)CRI", 6))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
/* check for encoding=3 block_size=18, sample_size=4 */
|
||||
|
@@ -321,9 +321,6 @@ static int decode_element(AVCodecContext *avctx, void *data, int ch_index,
|
||||
rice_history_mult[ch] = get_bits(&alac->gb, 3);
|
||||
lpc_order[ch] = get_bits(&alac->gb, 5);
|
||||
|
||||
if (lpc_order[ch] >= alac->max_samples_per_frame)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
/* read the predictor table */
|
||||
for (i = lpc_order[ch] - 1; i >= 0; i--)
|
||||
lpc_coefs[ch][i] = get_sbits(&alac->gb, 16);
|
||||
@@ -465,8 +462,9 @@ static int alac_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
channels = (element == TYPE_CPE) ? 2 : 1;
|
||||
if (ch + channels > alac->channels ||
|
||||
ff_alac_channel_layout_offsets[alac->channels - 1][ch] + channels > alac->channels) {
|
||||
if ( ch + channels > alac->channels
|
||||
|| ff_alac_channel_layout_offsets[alac->channels - 1][ch] + channels > alac->channels
|
||||
) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid element channel count\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -548,8 +546,7 @@ static int alac_set_info(ALACContext *alac)
|
||||
bytestream2_skipu(&gb, 12); // size:4, alac:4, version:4
|
||||
|
||||
alac->max_samples_per_frame = bytestream2_get_be32u(&gb);
|
||||
if (!alac->max_samples_per_frame ||
|
||||
alac->max_samples_per_frame > INT_MAX / sizeof(int32_t)) {
|
||||
if (!alac->max_samples_per_frame || alac->max_samples_per_frame > INT_MAX) {
|
||||
av_log(alac->avctx, AV_LOG_ERROR, "max samples per frame invalid: %u\n",
|
||||
alac->max_samples_per_frame);
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@@ -275,7 +275,7 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
|
||||
// generate warm-up samples
|
||||
residual[0] = samples[0];
|
||||
for (i = 1; i <= lpc.lpc_order; i++)
|
||||
residual[i] = sign_extend(samples[i] - samples[i-1], s->write_sample_size);
|
||||
residual[i] = samples[i] - samples[i-1];
|
||||
|
||||
// perform lpc on remaining samples
|
||||
for (i = lpc.lpc_order + 1; i < s->frame_size; i++) {
|
||||
|
@@ -285,7 +285,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
GetBitContext gb;
|
||||
uint64_t ht_size;
|
||||
int i, config_offset;
|
||||
MPEG4AudioConfig m4ac = {0};
|
||||
MPEG4AudioConfig m4ac;
|
||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||
AVCodecContext *avctx = ctx->avctx;
|
||||
uint32_t als_id, header_size, trailer_size;
|
||||
@@ -296,12 +296,12 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
avctx->extradata_size * 8, 1);
|
||||
|
||||
if (config_offset < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
|
||||
skip_bits_long(&gb, config_offset);
|
||||
|
||||
if (get_bits_left(&gb) < (30 << 3))
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
|
||||
// read the fixed items
|
||||
als_id = get_bits_long(&gb, 32);
|
||||
@@ -336,7 +336,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
|
||||
// check for ALSSpecificConfig struct
|
||||
if (als_id != MKBETAG('A','L','S','\0'))
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
|
||||
ctx->cur_frame_length = sconf->frame_length;
|
||||
|
||||
@@ -351,7 +351,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
int chan_pos_bits = av_ceil_log2(avctx->channels);
|
||||
int bits_needed = avctx->channels * chan_pos_bits + 7;
|
||||
if (get_bits_left(&gb) < bits_needed)
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
|
||||
if (!(sconf->chan_pos = av_malloc(avctx->channels * sizeof(*sconf->chan_pos))))
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -377,7 +377,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
// read fixed header and trailer sizes,
|
||||
// if size = 0xFFFFFFFF then there is no data field!
|
||||
if (get_bits_left(&gb) < 64)
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
|
||||
header_size = get_bits_long(&gb, 32);
|
||||
trailer_size = get_bits_long(&gb, 32);
|
||||
@@ -391,10 +391,10 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
|
||||
// skip the header and trailer data
|
||||
if (get_bits_left(&gb) < ht_size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
|
||||
if (ht_size > INT32_MAX)
|
||||
return AVERROR_PATCHWELCOME;
|
||||
return -1;
|
||||
|
||||
skip_bits_long(&gb, ht_size);
|
||||
|
||||
@@ -402,7 +402,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
// initialize CRC calculation
|
||||
if (sconf->crc_enabled) {
|
||||
if (get_bits_left(&gb) < 32)
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
|
||||
if (avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_CAREFUL)) {
|
||||
ctx->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE);
|
||||
@@ -646,7 +646,7 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
if (bd->block_length & (sub_blocks - 1)) {
|
||||
av_log(avctx, AV_LOG_WARNING,
|
||||
"Block length is not evenly divisible by the number of subblocks.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
sb_length = bd->block_length >> log2_sub_blocks;
|
||||
@@ -983,13 +983,14 @@ static int read_block(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
*bd->shift_lsbs = 0;
|
||||
// read block type flag and read the samples accordingly
|
||||
if (get_bits1(gb)) {
|
||||
ret = read_var_block_data(ctx, bd);
|
||||
if ((ret = read_var_block_data(ctx, bd)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
if ((ret = read_const_block_data(ctx, bd)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -998,16 +999,12 @@ static int read_block(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
static int decode_block(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
{
|
||||
unsigned int smp;
|
||||
int ret = 0;
|
||||
|
||||
// read block type flag and read the samples accordingly
|
||||
if (*bd->const_block)
|
||||
decode_const_block_data(ctx, bd);
|
||||
else
|
||||
ret = decode_var_block_data(ctx, bd); // always return 0
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else if (decode_var_block_data(ctx, bd))
|
||||
return -1;
|
||||
|
||||
// TODO: read RLSLMS extension data
|
||||
|
||||
@@ -1025,10 +1022,14 @@ static int read_decode_block(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((ret = read_block(ctx, bd)) < 0)
|
||||
ret = read_block(ctx, bd);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return decode_block(ctx, bd);
|
||||
ret = decode_block(ctx, bd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -1054,7 +1055,6 @@ static int decode_blocks_ind(ALSDecContext *ctx, unsigned int ra_frame,
|
||||
unsigned int c, const unsigned int *div_blocks,
|
||||
unsigned int *js_blocks)
|
||||
{
|
||||
int ret;
|
||||
unsigned int b;
|
||||
ALSBlockData bd = { 0 };
|
||||
|
||||
@@ -1075,10 +1075,10 @@ static int decode_blocks_ind(ALSDecContext *ctx, unsigned int ra_frame,
|
||||
for (b = 0; b < ctx->num_blocks; b++) {
|
||||
bd.block_length = div_blocks[b];
|
||||
|
||||
if ((ret = read_decode_block(ctx, &bd)) < 0) {
|
||||
if (read_decode_block(ctx, &bd)) {
|
||||
// damaged block, write zero for the rest of the frame
|
||||
zero_remaining(b, ctx->num_blocks, div_blocks, bd.raw_samples);
|
||||
return ret;
|
||||
return -1;
|
||||
}
|
||||
bd.raw_samples += div_blocks[b];
|
||||
bd.ra_block = 0;
|
||||
@@ -1097,7 +1097,6 @@ static int decode_blocks(ALSDecContext *ctx, unsigned int ra_frame,
|
||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||
unsigned int offset = 0;
|
||||
unsigned int b;
|
||||
int ret;
|
||||
ALSBlockData bd[2] = { { 0 } };
|
||||
|
||||
bd[0].ra_block = ra_frame;
|
||||
@@ -1139,9 +1138,12 @@ static int decode_blocks(ALSDecContext *ctx, unsigned int ra_frame,
|
||||
bd[0].raw_other = bd[1].raw_samples;
|
||||
bd[1].raw_other = bd[0].raw_samples;
|
||||
|
||||
if ((ret = read_decode_block(ctx, &bd[0])) < 0 ||
|
||||
(ret = read_decode_block(ctx, &bd[1])) < 0)
|
||||
goto fail;
|
||||
if(read_decode_block(ctx, &bd[0]) || read_decode_block(ctx, &bd[1])) {
|
||||
// damaged block, write zero for the rest of the frame
|
||||
zero_remaining(b, ctx->num_blocks, div_blocks, bd[0].raw_samples);
|
||||
zero_remaining(b, ctx->num_blocks, div_blocks, bd[1].raw_samples);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// reconstruct joint-stereo blocks
|
||||
if (bd[0].js_blocks) {
|
||||
@@ -1167,19 +1169,8 @@ static int decode_blocks(ALSDecContext *ctx, unsigned int ra_frame,
|
||||
sizeof(*ctx->raw_samples[c]) * sconf->max_order);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
// damaged block, write zero for the rest of the frame
|
||||
zero_remaining(b, ctx->num_blocks, div_blocks, bd[0].raw_samples);
|
||||
zero_remaining(b, ctx->num_blocks, div_blocks, bd[1].raw_samples);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int als_weighting(GetBitContext *gb, int k, int off)
|
||||
{
|
||||
int idx = av_clip(decode_rice(gb, k) + off,
|
||||
0, FF_ARRAY_ELEMS(mcc_weightings) - 1);
|
||||
return mcc_weightings[idx];
|
||||
}
|
||||
|
||||
/** Read the channel data.
|
||||
*/
|
||||
@@ -1195,19 +1186,19 @@ static int read_channel_data(ALSDecContext *ctx, ALSChannelData *cd, int c)
|
||||
|
||||
if (current->master_channel >= channels) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid master channel.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (current->master_channel != c) {
|
||||
current->time_diff_flag = get_bits1(gb);
|
||||
current->weighting[0] = als_weighting(gb, 1, 16);
|
||||
current->weighting[1] = als_weighting(gb, 2, 14);
|
||||
current->weighting[2] = als_weighting(gb, 1, 16);
|
||||
current->weighting[0] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
current->weighting[1] = mcc_weightings[av_clip(decode_rice(gb, 2) + 14, 0, 31)];
|
||||
current->weighting[2] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
|
||||
if (current->time_diff_flag) {
|
||||
current->weighting[3] = als_weighting(gb, 1, 16);
|
||||
current->weighting[4] = als_weighting(gb, 1, 16);
|
||||
current->weighting[5] = als_weighting(gb, 1, 16);
|
||||
current->weighting[3] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
current->weighting[4] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
current->weighting[5] = mcc_weightings[av_clip(decode_rice(gb, 1) + 16, 0, 31)];
|
||||
|
||||
current->time_diff_sign = get_bits1(gb);
|
||||
current->time_diff_index = get_bits(gb, ctx->ltp_lag_length - 3) + 3;
|
||||
@@ -1220,7 +1211,7 @@ static int read_channel_data(ALSDecContext *ctx, ALSChannelData *cd, int c)
|
||||
|
||||
if (entries == channels) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Damaged channel data.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
align_get_bits(gb);
|
||||
@@ -1252,7 +1243,7 @@ static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
|
||||
|
||||
if (dep == channels) {
|
||||
av_log(ctx->avctx, AV_LOG_WARNING, "Invalid channel correlation.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
bd->const_block = ctx->const_block + c;
|
||||
@@ -1323,8 +1314,8 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
|
||||
unsigned int div_blocks[32]; ///< block sizes.
|
||||
unsigned int c;
|
||||
unsigned int js_blocks[2];
|
||||
|
||||
uint32_t bs_info = 0;
|
||||
int ret;
|
||||
|
||||
// skip the size of the ra unit if present in the frame
|
||||
if (sconf->ra_flag == RA_FLAG_FRAMES && ra_frame)
|
||||
@@ -1355,15 +1346,13 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
|
||||
independent_bs = 1;
|
||||
|
||||
if (independent_bs) {
|
||||
ret = decode_blocks_ind(ctx, ra_frame, c,
|
||||
div_blocks, js_blocks);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (decode_blocks_ind(ctx, ra_frame, c, div_blocks, js_blocks))
|
||||
return -1;
|
||||
|
||||
independent_bs--;
|
||||
} else {
|
||||
ret = decode_blocks(ctx, ra_frame, c, div_blocks, js_blocks);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (decode_blocks(ctx, ra_frame, c, div_blocks, js_blocks))
|
||||
return -1;
|
||||
|
||||
c++;
|
||||
}
|
||||
@@ -1382,7 +1371,7 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
|
||||
for (c = 0; c < avctx->channels; c++)
|
||||
if (ctx->chan_data[c] < ctx->chan_data_buffer) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid channel data.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
memset(reverted_channels, 0, sizeof(*reverted_channels) * avctx->channels);
|
||||
@@ -1394,11 +1383,6 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
|
||||
|
||||
for (b = 0; b < ctx->num_blocks; b++) {
|
||||
bd.block_length = div_blocks[b];
|
||||
if (bd.block_length <= 0) {
|
||||
av_log(ctx->avctx, AV_LOG_WARNING,
|
||||
"Invalid block length %d in channel data!\n", bd.block_length);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (c = 0; c < avctx->channels; c++) {
|
||||
bd.const_block = ctx->const_block + c;
|
||||
@@ -1419,12 +1403,11 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (c = 0; c < avctx->channels; c++) {
|
||||
ret = revert_channel_correlation(ctx, &bd, ctx->chan_data,
|
||||
reverted_channels, offset, c);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
for (c = 0; c < avctx->channels; c++)
|
||||
if (revert_channel_correlation(ctx, &bd, ctx->chan_data,
|
||||
reverted_channels, offset, c))
|
||||
return -1;
|
||||
|
||||
for (c = 0; c < avctx->channels; c++) {
|
||||
bd.const_block = ctx->const_block + c;
|
||||
bd.shift_lsbs = ctx->shift_lsbs + c;
|
||||
@@ -1629,30 +1612,30 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
unsigned int c;
|
||||
unsigned int channel_size;
|
||||
int num_buffers, ret;
|
||||
int num_buffers;
|
||||
ALSDecContext *ctx = avctx->priv_data;
|
||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||
ctx->avctx = avctx;
|
||||
|
||||
if (!avctx->extradata) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Missing required ALS extradata.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((ret = read_specific_config(ctx)) < 0) {
|
||||
if (read_specific_config(ctx)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Reading ALSSpecificConfig failed.\n");
|
||||
goto fail;
|
||||
decode_end(avctx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((ret = check_specific_config(ctx)) < 0) {
|
||||
goto fail;
|
||||
if (check_specific_config(ctx)) {
|
||||
decode_end(avctx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (sconf->bgmc) {
|
||||
ret = ff_bgmc_init(avctx, &ctx->bgmc_lut, &ctx->bgmc_lut_status);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
}
|
||||
if (sconf->bgmc)
|
||||
ff_bgmc_init(avctx, &ctx->bgmc_lut, &ctx->bgmc_lut_status);
|
||||
|
||||
if (sconf->floating) {
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
||||
avctx->bits_per_raw_sample = 32;
|
||||
@@ -1687,8 +1670,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
!ctx->quant_cof_buffer || !ctx->lpc_cof_buffer ||
|
||||
!ctx->lpc_cof_reversed_buffer) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
// assign quantized parcor coefficient buffers
|
||||
@@ -1713,8 +1695,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
!ctx->use_ltp || !ctx->ltp_lag ||
|
||||
!ctx->ltp_gain || !ctx->ltp_gain_buffer) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
decode_end(avctx);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
for (c = 0; c < num_buffers; c++)
|
||||
@@ -1731,8 +1713,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
|
||||
if (!ctx->chan_data_buffer || !ctx->chan_data || !ctx->reverted_channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
decode_end(avctx);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
for (c = 0; c < num_buffers; c++)
|
||||
@@ -1752,8 +1734,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
// allocate previous raw sample buffer
|
||||
if (!ctx->prev_raw_samples || !ctx->raw_buffer|| !ctx->raw_samples) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
decode_end(avctx);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
// assign raw samples buffers
|
||||
@@ -1770,8 +1752,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
av_get_bytes_per_sample(avctx->sample_fmt));
|
||||
if (!ctx->crc_buffer) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Allocating buffer memory failed.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
decode_end(avctx);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1781,10 +1763,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
avctx->coded_frame = &ctx->frame;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
decode_end(avctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -415,7 +415,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
switch(buf[0]) {
|
||||
case '0': case '1': case '2': case '3': case '4':
|
||||
case '5': case '6': case '7': case '8': case '9':
|
||||
if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] < 6553)
|
||||
if (s->nb_args < MAX_NB_ARGS)
|
||||
s->args[s->nb_args] = FFMAX(s->args[s->nb_args], 0) * 10 + buf[0] - '0';
|
||||
break;
|
||||
case ';':
|
||||
|
@@ -144,11 +144,10 @@ function ff_put_pixels8_y2_armv6, export=1
|
||||
eor r7, r5, r7
|
||||
uadd8 r10, r10, r6
|
||||
and r7, r7, r12
|
||||
ldrc_pre ne, r6, r1, r2
|
||||
ldr_pre r6, r1, r2
|
||||
uadd8 r11, r11, r7
|
||||
strd_post r8, r9, r0, r2
|
||||
it ne
|
||||
ldrne r7, [r1, #4]
|
||||
ldr r7, [r1, #4]
|
||||
strd_post r10, r11, r0, r2
|
||||
bne 1b
|
||||
|
||||
@@ -197,10 +196,9 @@ function ff_put_pixels8_y2_no_rnd_armv6, export=1
|
||||
uhadd8 r9, r5, r7
|
||||
ldr r5, [r1, #4]
|
||||
uhadd8 r12, r4, r6
|
||||
ldrc_pre ne, r6, r1, r2
|
||||
ldr_pre r6, r1, r2
|
||||
uhadd8 r14, r5, r7
|
||||
it ne
|
||||
ldrne r7, [r1, #4]
|
||||
ldr r7, [r1, #4]
|
||||
stm r0, {r8,r9}
|
||||
add r0, r0, r2
|
||||
stm r0, {r12,r14}
|
||||
|
@@ -91,7 +91,7 @@ static void ff_h264dsp_init_neon(H264DSPContext *c, const int bit_depth, const i
|
||||
c->h264_idct_dc_add = ff_h264_idct_dc_add_neon;
|
||||
c->h264_idct_add16 = ff_h264_idct_add16_neon;
|
||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_neon;
|
||||
if (chroma_format_idc <= 1)
|
||||
if (chroma_format_idc == 1)
|
||||
c->h264_idct_add8 = ff_h264_idct_add8_neon;
|
||||
c->h264_idct8_add = ff_h264_idct8_add_neon;
|
||||
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_neon;
|
||||
|
@@ -41,10 +41,10 @@ function ff_scalarproduct_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
@@ -81,10 +81,10 @@ function ff_scalarproduct_and_madd_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
|
@@ -22,6 +22,7 @@
|
||||
#include "config.h"
|
||||
#include "libavutil/arm/asm.S"
|
||||
|
||||
#if HAVE_ARMV5TE_EXTERNAL
|
||||
function ff_prefetch_arm, export=1
|
||||
subs r2, r2, #1
|
||||
pld [r0]
|
||||
@@ -29,3 +30,4 @@ function ff_prefetch_arm, export=1
|
||||
bne ff_prefetch_arm
|
||||
bx lr
|
||||
endfunc
|
||||
#endif
|
||||
|
@@ -19,7 +19,7 @@
|
||||
*/
|
||||
|
||||
#include "libavutil/arm/cpu.h"
|
||||
#include "libavcodec/videodsp.h"
|
||||
#include <libavcodec/videodsp.h>
|
||||
#include "videodsp_arm.h"
|
||||
|
||||
void ff_prefetch_arm(uint8_t *mem, ptrdiff_t stride, int h);
|
||||
|
@@ -124,14 +124,14 @@ function ff_vp8_luma_dc_wht_armv6, export=1
|
||||
sbfx r1, r9, #3, #13
|
||||
sbfx r10, r4, #3, #13
|
||||
#else
|
||||
sxth r6, r8
|
||||
sxth r12, r7
|
||||
sxth r1, r9
|
||||
sxth r10, r4
|
||||
asr r6, #3 @ block[0][0]
|
||||
asr r12, #3 @ block[0][1]
|
||||
asr r1, #3 @ block[0][2]
|
||||
asr r10, #3 @ block[0][3]
|
||||
sxth r8, r8
|
||||
sxth r7, r7
|
||||
sxth r9, r9
|
||||
sxth r4, r4
|
||||
asr r8, #3 @ block[0][0]
|
||||
asr r7, #3 @ block[0][1]
|
||||
asr r9, #3 @ block[0][2]
|
||||
asr r4, #3 @ block[0][3]
|
||||
#endif
|
||||
|
||||
strh r6, [r0], #32
|
||||
|
@@ -282,11 +282,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
|
||||
int i;
|
||||
|
||||
if (avctx->extradata_size < 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "No extradata provided\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ff_asv_common_init(avctx);
|
||||
init_vlcs(a);
|
||||
ff_init_scantable(a->dsp.idct_permutation, &a->scantable, ff_asv_scantab);
|
||||
|
@@ -165,10 +165,7 @@ static int decode_bytes(const uint8_t *input, uint8_t *out, int bytes)
|
||||
|
||||
off = (intptr_t)input & 3;
|
||||
buf = (const uint32_t *)(input - off);
|
||||
if (off)
|
||||
c = av_be2ne32((0x537F6103U >> (off * 8)) | (0x537F6103U << (32 - (off * 8))));
|
||||
else
|
||||
c = av_be2ne32(0x537F6103U);
|
||||
c = av_be2ne32((0x537F6103 >> (off * 8)) | (0x537F6103 << (32 - (off * 8))));
|
||||
bytes += 3 + off;
|
||||
for (i = 0; i < bytes / 4; i++)
|
||||
output[i] = c ^ buf[i];
|
||||
@@ -521,7 +518,7 @@ static int add_tonal_components(float *spectrum, int num_components,
|
||||
output = &spectrum[components[i].pos];
|
||||
|
||||
for (j = 0; j < components[i].num_coefs; j++)
|
||||
output[j] += input[j];
|
||||
output[i] += input[i];
|
||||
}
|
||||
|
||||
return last_pos;
|
||||
@@ -665,8 +662,8 @@ static int decode_channel_sound_unit(ATRAC3Context *q, GetBitContext *gb,
|
||||
|
||||
snd->num_components = decode_tonal_components(gb, snd->components,
|
||||
snd->bands_coded);
|
||||
if (snd->num_components < 0)
|
||||
return snd->num_components;
|
||||
if (snd->num_components == -1)
|
||||
return -1;
|
||||
|
||||
num_subbands = decode_spectrum(gb, snd->spectrum);
|
||||
|
||||
@@ -743,7 +740,7 @@ static int decode_frame(AVCodecContext *avctx, const uint8_t *databuf,
|
||||
|
||||
|
||||
/* set the bitstream reader at the start of the second Sound Unit*/
|
||||
init_get_bits(&q->gb, ptr1, (avctx->block_align - i) * 8);
|
||||
init_get_bits(&q->gb, ptr1, avctx->block_align * 8);
|
||||
|
||||
/* Fill the Weighting coeffs delay buffer */
|
||||
memmove(q->weighting_delay, &q->weighting_delay[2],
|
||||
@@ -952,11 +949,9 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
|
||||
|
||||
if (q->coding_mode == STEREO)
|
||||
av_log(avctx, AV_LOG_DEBUG, "Normal stereo detected.\n");
|
||||
else if (q->coding_mode == JOINT_STEREO) {
|
||||
if (avctx->channels != 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
else if (q->coding_mode == JOINT_STEREO)
|
||||
av_log(avctx, AV_LOG_DEBUG, "Joint stereo detected.\n");
|
||||
} else {
|
||||
else {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unknown channel coding mode %x!\n",
|
||||
q->coding_mode);
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@@ -283,7 +283,7 @@ int av_packet_split_side_data(AVPacket *pkt){
|
||||
for (i=0; ; i++){
|
||||
size= AV_RB32(p);
|
||||
av_assert0(size<=INT_MAX && p - pkt->data >= size);
|
||||
pkt->side_data[i].data = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
pkt->side_data[i].size = size;
|
||||
pkt->side_data[i].type = p[4]&127;
|
||||
if (!pkt->side_data[i].data)
|
||||
|
@@ -170,7 +170,7 @@ static void init_lengths(BinkContext *c, int width, int bw)
|
||||
*
|
||||
* @param c decoder context
|
||||
*/
|
||||
static av_cold int init_bundles(BinkContext *c)
|
||||
static av_cold void init_bundles(BinkContext *c)
|
||||
{
|
||||
int bw, bh, blocks;
|
||||
int i;
|
||||
@@ -181,12 +181,8 @@ static av_cold int init_bundles(BinkContext *c)
|
||||
|
||||
for (i = 0; i < BINKB_NB_SRC; i++) {
|
||||
c->bundle[i].data = av_malloc(blocks * 64);
|
||||
if (!c->bundle[i].data)
|
||||
return AVERROR(ENOMEM);
|
||||
c->bundle[i].data_end = c->bundle[i].data + blocks * 64;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -685,9 +681,6 @@ static int read_dct_coeffs(GetBitContext *gb, int32_t block[64], const uint8_t *
|
||||
}
|
||||
}
|
||||
|
||||
if (quant_idx >= 16)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
quant = quant_matrices[quant_idx];
|
||||
|
||||
block[0] = (block[0] * quant[0]) >> 11;
|
||||
@@ -1271,7 +1264,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
BinkContext * const c = avctx->priv_data;
|
||||
static VLC_TYPE table[16 * 128][2];
|
||||
static int binkb_initialised = 0;
|
||||
int i, ret;
|
||||
int i;
|
||||
int flags;
|
||||
|
||||
c->version = avctx->codec_tag >> 24;
|
||||
@@ -1306,10 +1299,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
ff_dsputil_init(&c->dsp, avctx);
|
||||
ff_binkdsp_init(&c->bdsp);
|
||||
|
||||
if ((ret = init_bundles(c)) < 0) {
|
||||
free_bundles(c);
|
||||
return ret;
|
||||
}
|
||||
init_bundles(c);
|
||||
|
||||
if (c->version == 'b') {
|
||||
if (!binkb_initialised) {
|
||||
|
@@ -140,7 +140,7 @@ static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame,
|
||||
mode += 1 + advance_mode;
|
||||
if (mode >= 4)
|
||||
mode -= 3;
|
||||
if (len <= 0 || FFABS(dst_end - dst) < len)
|
||||
if (FFABS(dst_end - dst) < len)
|
||||
return -1;
|
||||
switch (mode) {
|
||||
case 1:
|
||||
|
@@ -325,32 +325,6 @@ static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
|
||||
return p->eof;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_copy_bufferu(PutByteContext *p,
|
||||
GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
memcpy(p->buffer, g->buffer, size);
|
||||
p->buffer += size;
|
||||
g->buffer += size;
|
||||
return size;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_copy_buffer(PutByteContext *p,
|
||||
GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2;
|
||||
|
||||
if (p->eof)
|
||||
return 0;
|
||||
size = FFMIN(g->buffer_end - g->buffer, size);
|
||||
size2 = FFMIN(p->buffer_end - p->buffer, size);
|
||||
if (size2 != size)
|
||||
p->eof = 1;
|
||||
|
||||
return bytestream2_copy_bufferu(p, g, size2);
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
|
@@ -305,7 +305,7 @@ STOP_TIMER("get_cabac_bypass")
|
||||
|
||||
for(i=0; i<SIZE; i++){
|
||||
START_TIMER
|
||||
if( (r[i]&1) != get_cabac_noinline(&c, state) )
|
||||
if( (r[i]&1) != get_cabac(&c, state) )
|
||||
av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
|
||||
STOP_TIMER("get_cabac")
|
||||
}
|
||||
|
@@ -604,9 +604,8 @@ static inline int decode_residual_inter(AVSContext *h)
|
||||
|
||||
/* get coded block pattern */
|
||||
int cbp = get_ue_golomb(&h->gb);
|
||||
|
||||
if (cbp > 63 || cbp < 0) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "illegal inter cbp %d\n", cbp);
|
||||
if (cbp > 63U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "illegal inter cbp\n");
|
||||
return -1;
|
||||
}
|
||||
h->cbp = cbp_tab[cbp][1];
|
||||
@@ -676,7 +675,7 @@ static int decode_mb_i(AVSContext *h, int cbp_code)
|
||||
/* get coded block pattern */
|
||||
if (h->cur.f->pict_type == AV_PICTURE_TYPE_I)
|
||||
cbp_code = get_ue_golomb(gb);
|
||||
if (cbp_code > 63 || cbp_code < 0) {
|
||||
if (cbp_code > 63U) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "illegal intra cbp\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -945,11 +944,6 @@ static int decode_pic(AVSContext *h)
|
||||
int ret;
|
||||
enum cavs_mb mb_type;
|
||||
|
||||
if (!h->top_qp) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "No sequence header decoded yet\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
skip_bits(&h->gb, 16);//bbv_dwlay
|
||||
if (h->stc == PIC_PB_START_CODE) {
|
||||
h->cur.f->pict_type = get_bits(&h->gb, 2) + AV_PICTURE_TYPE_I;
|
||||
@@ -979,8 +973,7 @@ static int decode_pic(AVSContext *h)
|
||||
if (h->cur.f->data[0])
|
||||
h->avctx->release_buffer(h->avctx, h->cur.f);
|
||||
|
||||
ret = ff_get_buffer(h->avctx, h->cur.f);
|
||||
if (ret < 0)
|
||||
if ((ret = ff_get_buffer(h->avctx, h->cur.f)) < 0)
|
||||
return ret;
|
||||
|
||||
if (!h->edge_emu_buffer) {
|
||||
|
@@ -296,9 +296,7 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
||||
inst = bytestream_get_byte(&buf);
|
||||
inst &= CDG_MASK;
|
||||
buf += 2; /// skipping 2 unneeded bytes
|
||||
|
||||
if (buf_size > CDG_HEADER_SIZE)
|
||||
bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE);
|
||||
bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE);
|
||||
|
||||
if ((command & CDG_MASK) == CDG_COMMAND) {
|
||||
switch (inst) {
|
||||
|
@@ -738,10 +738,10 @@ static int dca_parse_frame_header(DCAContext *s)
|
||||
s->lfe = get_bits(&s->gb, 2);
|
||||
s->predictor_history = get_bits(&s->gb, 1);
|
||||
|
||||
if (s->lfe > 2) {
|
||||
if (s->lfe == 3) {
|
||||
s->lfe = 0;
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid LFE value: %d\n", s->lfe);
|
||||
return AVERROR_INVALIDDATA;
|
||||
av_log_ask_for_sample(s->avctx, "LFE is 3\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
/* TODO: check CRC */
|
||||
@@ -971,13 +971,7 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
||||
"Invalid channel mode %d\n", am);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->prim_channels > FF_ARRAY_ELEMS(dca_default_coeffs[0])) {
|
||||
av_log_ask_for_sample(s->avctx, "Downmixing %d channels",
|
||||
s->prim_channels);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
for (j = base_channel; j < s->prim_channels; j++) {
|
||||
for (j = base_channel; j < FFMIN(s->prim_channels, FF_ARRAY_ELEMS(dca_default_coeffs[am])); j++) {
|
||||
s->downmix_coef[j][0] = dca_default_coeffs[am][j][0];
|
||||
s->downmix_coef[j][1] = dca_default_coeffs[am][j][1];
|
||||
}
|
||||
@@ -1431,7 +1425,6 @@ static int dca_subsubframe(DCAContext *s, int base_channel, int block_index)
|
||||
#endif
|
||||
} else {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Didn't get subframe DSYNC\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -263,8 +263,6 @@ static int decode_wdlt(GetByteContext *gb, uint8_t *frame, int width, int height
|
||||
segments = bytestream2_get_le16(gb);
|
||||
}
|
||||
line_ptr = frame;
|
||||
if (frame_end - frame < width)
|
||||
return AVERROR_INVALIDDATA;
|
||||
frame += width;
|
||||
y++;
|
||||
while (segments--) {
|
||||
|
@@ -237,7 +237,7 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
|
||||
|
||||
avctx->pix_fmt = dirac_pix_fmt[!luma_offset][source->chroma_format];
|
||||
avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_x_shift, &chroma_y_shift);
|
||||
if ((source->width % (1<<chroma_x_shift)) || (source->height % (1<<chroma_y_shift))) {
|
||||
if (!(source->width % (1<<chroma_x_shift)) || !(source->height % (1<<chroma_y_shift))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions must be a integer multiply of the chroma subsampling\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -28,7 +28,6 @@
|
||||
#ifndef AVCODEC_DIRAC_ARITH_H
|
||||
#define AVCODEC_DIRAC_ARITH_H
|
||||
|
||||
#include "libavutil/x86/asm.h"
|
||||
#include "bytestream.h"
|
||||
#include "get_bits.h"
|
||||
|
||||
@@ -135,7 +134,7 @@ static inline int dirac_get_arith_bit(DiracArith *c, int ctx)
|
||||
|
||||
range_times_prob = (c->range * prob_zero) >> 16;
|
||||
|
||||
#if HAVE_FAST_CMOV && HAVE_INLINE_ASM && HAVE_6REGS
|
||||
#if HAVE_FAST_CMOV && HAVE_INLINE_ASM
|
||||
low -= range_times_prob << 16;
|
||||
range -= range_times_prob;
|
||||
bit = 0;
|
||||
|
@@ -200,7 +200,6 @@ typedef struct DiracContext {
|
||||
|
||||
uint16_t *mctmp; /* buffer holding the MC data multipled by OBMC weights */
|
||||
uint8_t *mcscratch;
|
||||
int buffer_stride;
|
||||
|
||||
DECLARE_ALIGNED(16, uint8_t, obmc_weight)[3][MAX_BLOCKSIZE*MAX_BLOCKSIZE];
|
||||
|
||||
@@ -343,44 +342,22 @@ static int alloc_sequence_buffers(DiracContext *s)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
w = s->source.width;
|
||||
h = s->source.height;
|
||||
|
||||
/* fixme: allocate using real stride here */
|
||||
s->sbsplit = av_malloc_array(sbwidth, sbheight);
|
||||
s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
|
||||
s->sbsplit = av_malloc(sbwidth * sbheight);
|
||||
s->blmotion = av_malloc(sbwidth * sbheight * 16 * sizeof(*s->blmotion));
|
||||
s->edge_emu_buffer_base = av_malloc((w+64)*MAX_BLOCKSIZE);
|
||||
|
||||
if (!s->sbsplit || !s->blmotion)
|
||||
s->mctmp = av_malloc((w+64+MAX_BLOCKSIZE) * (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
|
||||
s->mcscratch = av_malloc((w+64)*MAX_BLOCKSIZE);
|
||||
|
||||
if (!s->sbsplit || !s->blmotion || !s->mctmp || !s->mcscratch)
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alloc_buffers(DiracContext *s, int stride)
|
||||
{
|
||||
int w = s->source.width;
|
||||
int h = s->source.height;
|
||||
|
||||
av_assert0(stride >= w);
|
||||
stride += 64;
|
||||
|
||||
if (s->buffer_stride >= stride)
|
||||
return 0;
|
||||
s->buffer_stride = 0;
|
||||
|
||||
av_freep(&s->edge_emu_buffer_base);
|
||||
memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
|
||||
av_freep(&s->mctmp);
|
||||
av_freep(&s->mcscratch);
|
||||
|
||||
s->edge_emu_buffer_base = av_malloc_array(stride, MAX_BLOCKSIZE);
|
||||
|
||||
s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
|
||||
s->mcscratch = av_malloc_array(stride, MAX_BLOCKSIZE);
|
||||
|
||||
if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
s->buffer_stride = stride;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_sequence_buffers(DiracContext *s)
|
||||
{
|
||||
int i, j, k;
|
||||
@@ -404,7 +381,6 @@ static void free_sequence_buffers(DiracContext *s)
|
||||
av_freep(&s->plane[i].idwt_tmp);
|
||||
}
|
||||
|
||||
s->buffer_stride = 0;
|
||||
av_freep(&s->sbsplit);
|
||||
av_freep(&s->blmotion);
|
||||
av_freep(&s->edge_emu_buffer_base);
|
||||
@@ -1366,8 +1342,8 @@ static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5],
|
||||
motion_y >>= s->chroma_y_shift;
|
||||
}
|
||||
|
||||
mx = motion_x & ~(-1U << s->mv_precision);
|
||||
my = motion_y & ~(-1U << s->mv_precision);
|
||||
mx = motion_x & ~(-1 << s->mv_precision);
|
||||
my = motion_y & ~(-1 << s->mv_precision);
|
||||
motion_x >>= s->mv_precision;
|
||||
motion_y >>= s->mv_precision;
|
||||
/* normalize subpel coordinates to epel */
|
||||
@@ -1841,9 +1817,6 @@ static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int
|
||||
s->plane[1].stride = pic->avframe.linesize[1];
|
||||
s->plane[2].stride = pic->avframe.linesize[2];
|
||||
|
||||
if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
|
||||
if (dirac_decode_picture_header(s))
|
||||
return -1;
|
||||
|
@@ -406,7 +406,7 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
|
||||
*picture = ctx->picture;
|
||||
*got_frame = 1;
|
||||
return avpkt->size;
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
static av_cold int dnxhd_decode_close(AVCodecContext *avctx)
|
||||
|
@@ -235,7 +235,7 @@ static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
|
||||
|
||||
static int dnxhd_init_rc(DNXHDEncContext *ctx)
|
||||
{
|
||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*(ctx->m.avctx->qmax + 1)*sizeof(RCEntry), fail);
|
||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry), fail);
|
||||
if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
|
||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry), fail);
|
||||
|
||||
@@ -629,35 +629,14 @@ static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
|
||||
static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
|
||||
{
|
||||
DNXHDEncContext *ctx = avctx->priv_data;
|
||||
int mb_y = jobnr, mb_x, x, y;
|
||||
int partial_last_row = (mb_y == ctx->m.mb_height - 1) &&
|
||||
((avctx->height >> ctx->interlaced) & 0xF);
|
||||
|
||||
int mb_y = jobnr, mb_x;
|
||||
ctx = ctx->thread[threadnr];
|
||||
if (ctx->cid_table->bit_depth == 8) {
|
||||
uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize);
|
||||
for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x, pix += 16) {
|
||||
unsigned mb = mb_y * ctx->m.mb_width + mb_x;
|
||||
int sum;
|
||||
int varc;
|
||||
|
||||
if (!partial_last_row && mb_x * 16 <= avctx->width - 16) {
|
||||
sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize);
|
||||
varc = ctx->m.dsp.pix_norm1(pix, ctx->m.linesize);
|
||||
} else {
|
||||
int bw = FFMIN(avctx->width - 16 * mb_x, 16);
|
||||
int bh = FFMIN((avctx->height >> ctx->interlaced) - 16 * mb_y, 16);
|
||||
sum = varc = 0;
|
||||
for (y = 0; y < bh; y++) {
|
||||
for (x = 0; x < bw; x++) {
|
||||
uint8_t val = pix[x + y * ctx->m.linesize];
|
||||
sum += val;
|
||||
varc += val * val;
|
||||
}
|
||||
}
|
||||
}
|
||||
varc = (varc - (((unsigned)sum * sum) >> 8) + 128) >> 8;
|
||||
|
||||
int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize);
|
||||
int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)sum*sum)>>8)+128)>>8;
|
||||
ctx->mb_cmp[mb].value = varc;
|
||||
ctx->mb_cmp[mb].mb = mb;
|
||||
}
|
||||
|
@@ -212,7 +212,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
buf[803] = s->bits_per_component;
|
||||
write16(buf + 804, (s->bits_per_component == 10 || s->bits_per_component == 12) ?
|
||||
1 : 0); /* packing method */
|
||||
write32(buf + 808, HEADER_SIZE); /* data offset */
|
||||
|
||||
/* Image source information header */
|
||||
write32(buf + 1628, avctx->sample_aspect_ratio.num);
|
||||
|
@@ -128,30 +128,27 @@ static av_cold int cinvideo_decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cin_apply_delta_data(const unsigned char *src, unsigned char *dst,
|
||||
int size)
|
||||
static void cin_apply_delta_data(const unsigned char *src, unsigned char *dst, int size)
|
||||
{
|
||||
while (size--)
|
||||
*dst++ += *src++;
|
||||
}
|
||||
|
||||
static int cin_decode_huffman(const unsigned char *src, int src_size,
|
||||
unsigned char *dst, int dst_size)
|
||||
static int cin_decode_huffman(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
{
|
||||
int b, huff_code = 0;
|
||||
unsigned char huff_code_table[15];
|
||||
unsigned char *dst_cur = dst;
|
||||
unsigned char *dst_end = dst + dst_size;
|
||||
unsigned char *dst_cur = dst;
|
||||
unsigned char *dst_end = dst + dst_size;
|
||||
const unsigned char *src_end = src + src_size;
|
||||
|
||||
memcpy(huff_code_table, src, 15);
|
||||
src += 15;
|
||||
memcpy(huff_code_table, src, 15); src += 15;
|
||||
|
||||
while (src < src_end) {
|
||||
huff_code = *src++;
|
||||
if ((huff_code >> 4) == 15) {
|
||||
b = huff_code << 4;
|
||||
huff_code = *src++;
|
||||
b = huff_code << 4;
|
||||
huff_code = *src++;
|
||||
*dst_cur++ = b | (huff_code >> 4);
|
||||
} else
|
||||
*dst_cur++ = huff_code_table[huff_code >> 4];
|
||||
@@ -170,12 +167,11 @@ static int cin_decode_huffman(const unsigned char *src, int src_size,
|
||||
return dst_cur - dst;
|
||||
}
|
||||
|
||||
static int cin_decode_lzss(const unsigned char *src, int src_size,
|
||||
unsigned char *dst, int dst_size)
|
||||
static int cin_decode_lzss(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
{
|
||||
uint16_t cmd;
|
||||
int i, sz, offset, code;
|
||||
unsigned char *dst_end = dst + dst_size, *dst_start = dst;
|
||||
unsigned char *dst_end = dst + dst_size, *dst_start = dst;
|
||||
const unsigned char *src_end = src + src_size;
|
||||
|
||||
while (src < src_end && dst < dst_end) {
|
||||
@@ -184,15 +180,13 @@ static int cin_decode_lzss(const unsigned char *src, int src_size,
|
||||
if (code & (1 << i)) {
|
||||
*dst++ = *src++;
|
||||
} else {
|
||||
cmd = AV_RL16(src);
|
||||
src += 2;
|
||||
cmd = AV_RL16(src); src += 2;
|
||||
offset = cmd >> 4;
|
||||
if ((int)(dst - dst_start) < offset + 1)
|
||||
if ((int) (dst - dst_start) < offset + 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
sz = (cmd & 0xF) + 2;
|
||||
/* don't use memcpy/memmove here as the decoding routine
|
||||
* (ab)uses buffer overlappings to repeat bytes in the
|
||||
* destination */
|
||||
/* don't use memcpy/memmove here as the decoding routine (ab)uses */
|
||||
/* buffer overlappings to repeat bytes in the destination */
|
||||
sz = FFMIN(sz, dst_end - dst);
|
||||
while (sz--) {
|
||||
*dst = *(dst - offset - 1);
|
||||
@@ -205,11 +199,10 @@ static int cin_decode_lzss(const unsigned char *src, int src_size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cin_decode_rle(const unsigned char *src, int src_size,
|
||||
unsigned char *dst, int dst_size)
|
||||
static int cin_decode_rle(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
|
||||
{
|
||||
int len, code;
|
||||
unsigned char *dst_end = dst + dst_size;
|
||||
unsigned char *dst_end = dst + dst_size;
|
||||
const unsigned char *src_end = src + src_size;
|
||||
|
||||
while (src + 1 < src_end && dst < dst_end) {
|
||||
@@ -223,7 +216,7 @@ static int cin_decode_rle(const unsigned char *src, int src_size,
|
||||
av_log(NULL, AV_LOG_ERROR, "RLE overread\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
memcpy(dst, src, FFMIN3(len, dst_end - dst, src_end - src));
|
||||
memcpy(dst, src, FFMIN(len, dst_end - dst));
|
||||
src += len;
|
||||
}
|
||||
dst += len;
|
||||
@@ -235,16 +228,15 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *got_frame,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
CinVideoContext *cin = avctx->priv_data;
|
||||
int i, y, palette_type, palette_colors_count,
|
||||
bitmap_frame_type, bitmap_frame_size, res = 0;
|
||||
int i, y, palette_type, palette_colors_count, bitmap_frame_type, bitmap_frame_size, res = 0;
|
||||
|
||||
palette_type = buf[0];
|
||||
palette_colors_count = AV_RL16(buf + 1);
|
||||
bitmap_frame_type = buf[3];
|
||||
buf += 4;
|
||||
palette_type = buf[0];
|
||||
palette_colors_count = AV_RL16(buf+1);
|
||||
bitmap_frame_type = buf[3];
|
||||
buf += 4;
|
||||
|
||||
bitmap_frame_size = buf_size - 4;
|
||||
|
||||
@@ -255,50 +247,46 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
if (palette_colors_count > 256)
|
||||
return AVERROR_INVALIDDATA;
|
||||
for (i = 0; i < palette_colors_count; ++i) {
|
||||
cin->palette[i] = 0xFFU << 24 | bytestream_get_le24(&buf);
|
||||
cin->palette[i] = 0xFFU << 24 | bytestream_get_le24(&buf);
|
||||
bitmap_frame_size -= 3;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < palette_colors_count; ++i) {
|
||||
cin->palette[buf[0]] = 0xFFU << 24 | AV_RL24(buf + 1);
|
||||
cin->palette[buf[0]] = 0xFFU << 24 | AV_RL24(buf+1);
|
||||
buf += 4;
|
||||
bitmap_frame_size -= 4;
|
||||
}
|
||||
}
|
||||
|
||||
bitmap_frame_size = FFMIN(cin->bitmap_size, bitmap_frame_size);
|
||||
|
||||
/* note: the decoding routines below assumes that
|
||||
* surface.width = surface.pitch */
|
||||
/* note: the decoding routines below assumes that surface.width = surface.pitch */
|
||||
switch (bitmap_frame_type) {
|
||||
case 9:
|
||||
cin_decode_rle(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
case 34:
|
||||
cin_decode_rle(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP],
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
case 35:
|
||||
bitmap_frame_size = cin_decode_huffman(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_INT_BMP], cin->bitmap_size);
|
||||
cin_decode_rle(cin->bitmap_table[CIN_INT_BMP], bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
case 36:
|
||||
bitmap_frame_size = cin_decode_huffman(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_INT_BMP],
|
||||
cin->bitmap_size);
|
||||
cin->bitmap_table[CIN_INT_BMP], cin->bitmap_size);
|
||||
cin_decode_rle(cin->bitmap_table[CIN_INT_BMP], bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP],
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
case 37:
|
||||
cin_decode_huffman(buf, bitmap_frame_size,
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
case 38:
|
||||
res = cin_decode_lzss(buf, bitmap_frame_size,
|
||||
@@ -314,12 +302,12 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
if (res < 0)
|
||||
return res;
|
||||
cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP],
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
|
||||
break;
|
||||
}
|
||||
|
||||
cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
|
||||
if ((res = avctx->reget_buffer(avctx, &cin->frame)) < 0) {
|
||||
if ((res = avctx->reget_buffer(avctx, &cin->frame))) {
|
||||
av_log(cin->avctx, AV_LOG_ERROR, "failed to allocate a frame\n");
|
||||
return res;
|
||||
}
|
||||
@@ -328,11 +316,10 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
|
||||
cin->frame.palette_has_changed = 1;
|
||||
for (y = 0; y < cin->avctx->height; ++y)
|
||||
memcpy(cin->frame.data[0] + (cin->avctx->height - 1 - y) * cin->frame.linesize[0],
|
||||
cin->bitmap_table[CIN_CUR_BMP] + y * cin->avctx->width,
|
||||
cin->avctx->width);
|
||||
cin->bitmap_table[CIN_CUR_BMP] + y * cin->avctx->width,
|
||||
cin->avctx->width);
|
||||
|
||||
FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP],
|
||||
cin->bitmap_table[CIN_PRE_BMP]);
|
||||
FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_table[CIN_PRE_BMP]);
|
||||
|
||||
*got_frame = 1;
|
||||
*(AVFrame *)data = cin->frame;
|
||||
@@ -371,8 +358,8 @@ static av_cold int cinaudio_decode_init(AVCodecContext *avctx)
|
||||
static int cinaudio_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
CinAudioContext *cin = avctx->priv_data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
CinAudioContext *cin = avctx->priv_data;
|
||||
const uint8_t *buf_end = buf + avpkt->size;
|
||||
int16_t *samples;
|
||||
int delta, ret;
|
||||
@@ -388,13 +375,13 @@ static int cinaudio_decode_frame(AVCodecContext *avctx, void *data,
|
||||
delta = cin->delta;
|
||||
if (cin->initial_decode_frame) {
|
||||
cin->initial_decode_frame = 0;
|
||||
delta = sign_extend(AV_RL16(buf), 16);
|
||||
buf += 2;
|
||||
*samples++ = delta;
|
||||
delta = sign_extend(AV_RL16(buf), 16);
|
||||
buf += 2;
|
||||
*samples++ = delta;
|
||||
}
|
||||
while (buf < buf_end) {
|
||||
delta += cinaudio_delta16_table[*buf++];
|
||||
delta = av_clip_int16(delta);
|
||||
delta += cinaudio_delta16_table[*buf++];
|
||||
delta = av_clip_int16(delta);
|
||||
*samples++ = delta;
|
||||
}
|
||||
cin->delta = delta;
|
||||
@@ -405,6 +392,7 @@ static int cinaudio_decode_frame(AVCodecContext *avctx, void *data,
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
|
||||
AVCodec ff_dsicinvideo_decoder = {
|
||||
.name = "dsicinvideo",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
|
@@ -1922,7 +1922,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
|
||||
|
||||
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
||||
long i;
|
||||
for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src+i);
|
||||
long b = *(long*)(dst+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
@@ -1947,7 +1947,7 @@ static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
|
||||
}
|
||||
}else
|
||||
#endif
|
||||
for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
|
||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
||||
|
@@ -350,6 +350,11 @@ static av_cold int dvvideo_init_encoder(AVCodecContext *avctx)
|
||||
static const int vs_total_ac_bits = (100 * 4 + 68*2) * 5;
|
||||
static const int mb_area_start[5] = { 1, 6, 21, 43, 64 };
|
||||
|
||||
static inline int put_bits_left(PutBitContext* s)
|
||||
{
|
||||
return (s->buf_end - s->buf) * 8 - put_bits_count(s);
|
||||
}
|
||||
|
||||
#if CONFIG_SMALL
|
||||
/* Converts run and level (where level != 0) pair into VLC, returning bit size */
|
||||
static av_always_inline int dv_rl2vlc(int run, int level, int sign, uint32_t* vlc)
|
||||
|
@@ -255,12 +255,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
case 5:
|
||||
c->pic.key_frame = !(compr & 1);
|
||||
c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
|
||||
|
||||
if (!tmpptr && !c->pic.key_frame) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for(j = 0; j < avctx->height; j++){
|
||||
if((compr & 1) && tmpptr){
|
||||
for(i = 0; i < avctx->width; i++)
|
||||
|
@@ -114,8 +114,8 @@ static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *
|
||||
int yoffset = ((buf[i] >> 4)) - 7;
|
||||
if (s->last_frame.data[0])
|
||||
cmv_motcomp(s->frame.data[0], s->frame.linesize[0],
|
||||
s->last_frame.data[0], s->last_frame.linesize[0],
|
||||
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
|
||||
s->last_frame.data[0], s->last_frame.linesize[0],
|
||||
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
@@ -123,7 +123,7 @@ static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *
|
||||
|
||||
static void cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t *buf_end)
|
||||
{
|
||||
int pal_start, pal_count, i, fps;
|
||||
int pal_start, pal_count, i;
|
||||
|
||||
if(buf_end - buf < 16) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "truncated header\n");
|
||||
@@ -135,9 +135,8 @@ static void cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t
|
||||
if (s->avctx->width!=s->width || s->avctx->height!=s->height)
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
|
||||
fps = AV_RL16(&buf[10]);
|
||||
if (fps > 0)
|
||||
s->avctx->time_base = (AVRational){ 1, fps };
|
||||
s->avctx->time_base.num = 1;
|
||||
s->avctx->time_base.den = AV_RL16(&buf[10]);
|
||||
|
||||
pal_start = AV_RL16(&buf[12]);
|
||||
pal_count = AV_RL16(&buf[14]);
|
||||
|
@@ -276,21 +276,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
}
|
||||
|
||||
if (inter && !s->last_frame.data[0]) {
|
||||
int ret;
|
||||
av_log(avctx, AV_LOG_WARNING, "Missing reference frame.\n");
|
||||
s->last_frame.reference = 1;
|
||||
ret = ff_get_buffer(avctx, &s->last_frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
memset(s->last_frame.data[0], 0, s->last_frame.height *
|
||||
s->last_frame.linesize[0]);
|
||||
memset(s->last_frame.data[1], 0x80, s->last_frame.height / 2 *
|
||||
s->last_frame.linesize[1]);
|
||||
memset(s->last_frame.data[2], 0x80, s->last_frame.height / 2 *
|
||||
s->last_frame.linesize[2]);
|
||||
}
|
||||
|
||||
av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size,
|
||||
buf_end - buf);
|
||||
if (!s->bitstream_buf)
|
||||
|
@@ -924,12 +924,6 @@ void ff_er_frame_end(MpegEncContext *s)
|
||||
return;
|
||||
};
|
||||
|
||||
if ( s->picture_structure == PICT_FRAME
|
||||
&& s->current_picture.f.linesize[0] != s->current_picture_ptr->f.linesize[0]) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Error concealment not possible, frame not fully initialized\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->current_picture.f.motion_val[0] == NULL) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
|
||||
|
||||
|
@@ -447,10 +447,6 @@ static int read_extra_header(FFV1Context *f)
|
||||
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
|
||||
|
||||
f->version = get_symbol(c, state, 0);
|
||||
if (f->version < 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (f->version > 2) {
|
||||
c->bytestream_end -= 4;
|
||||
f->minor_version = get_symbol(c, state, 0);
|
||||
@@ -528,7 +524,6 @@ static int read_header(FFV1Context *f)
|
||||
memset(state, 128, sizeof(state));
|
||||
|
||||
if (f->version < 2) {
|
||||
int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
|
||||
unsigned v= get_symbol(c, state, 0);
|
||||
if (v >= 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
|
||||
@@ -541,32 +536,15 @@ static int read_header(FFV1Context *f)
|
||||
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
||||
}
|
||||
|
||||
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
|
||||
chroma_planes = get_rac(c, state);
|
||||
chroma_h_shift = get_symbol(c, state, 0);
|
||||
chroma_v_shift = get_symbol(c, state, 0);
|
||||
transparency = get_rac(c, state);
|
||||
f->colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||
|
||||
if (f->plane_count) {
|
||||
if ( colorspace != f->colorspace
|
||||
|| bits_per_raw_sample != f->avctx->bits_per_raw_sample
|
||||
|| chroma_planes != f->chroma_planes
|
||||
|| chroma_h_shift!= f->chroma_h_shift
|
||||
|| chroma_v_shift!= f->chroma_v_shift
|
||||
|| transparency != f->transparency) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
f->colorspace = colorspace;
|
||||
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
||||
f->chroma_planes = chroma_planes;
|
||||
f->chroma_h_shift = chroma_h_shift;
|
||||
f->chroma_v_shift = chroma_v_shift;
|
||||
f->transparency = transparency;
|
||||
if (f->version > 0)
|
||||
f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
|
||||
|
||||
f->chroma_planes = get_rac(c, state);
|
||||
f->chroma_h_shift = get_symbol(c, state, 0);
|
||||
f->chroma_v_shift = get_symbol(c, state, 0);
|
||||
f->transparency = get_rac(c, state);
|
||||
f->plane_count = 2 + f->transparency;
|
||||
}
|
||||
|
||||
|
@@ -275,7 +275,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w,
|
||||
int run_mode = 0;
|
||||
|
||||
if (s->ac) {
|
||||
if (c->bytestream_end - c->bytestream < w * 35) {
|
||||
if (c->bytestream_end - c->bytestream < w * 20) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@ const int ff_flac_sample_rate_table[16] =
|
||||
8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000,
|
||||
0, 0, 0, 0 };
|
||||
|
||||
const int32_t ff_flac_blocksize_table[16] = {
|
||||
const int16_t ff_flac_blocksize_table[16] = {
|
||||
0, 192, 576<<0, 576<<1, 576<<2, 576<<3, 0, 0,
|
||||
256<<0, 256<<1, 256<<2, 256<<3, 256<<4, 256<<5, 256<<6, 256<<7
|
||||
};
|
||||
|
@@ -26,6 +26,6 @@
|
||||
|
||||
extern const int ff_flac_sample_rate_table[16];
|
||||
|
||||
extern const int32_t ff_flac_blocksize_table[16];
|
||||
extern const int16_t ff_flac_blocksize_table[16];
|
||||
|
||||
#endif /* AVCODEC_FLACDATA_H */
|
||||
|
@@ -389,12 +389,6 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
s->diff_start = get_bits(&gb, 8);
|
||||
s->diff_height = get_bits(&gb, 8);
|
||||
if (s->diff_start + s->diff_height > cur_blk_height) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Block parameters invalid: %d + %d > %d\n",
|
||||
s->diff_start, s->diff_height, cur_blk_height);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
"%dx%d diff start %d height %d\n",
|
||||
i, j, s->diff_start, s->diff_height);
|
||||
|
@@ -387,11 +387,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
|
||||
if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk)
|
||||
break;
|
||||
byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
|
||||
if (!byte_run) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid byte run value.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (byte_run > 0) {
|
||||
palette_idx1 = bytestream2_get_byte(&g2);
|
||||
CHECK_PIXEL_PTR(byte_run);
|
||||
|
@@ -145,11 +145,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
enum AVPixelFormat pix_fmt;
|
||||
int ret;
|
||||
|
||||
if (buf_size < 4) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Packet is too short\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
header = AV_RL32(buf);
|
||||
version = header & 0xff;
|
||||
header_size = (header & (1<<30))? 8 : 4; /* bit 30 means pad to 8 bytes */
|
||||
@@ -221,7 +216,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (version) {
|
||||
switch(version) {
|
||||
case 0:
|
||||
default:
|
||||
/* Fraps v0 is a reordered YUV420 */
|
||||
@@ -231,13 +226,13 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
buf32 = (const uint32_t*)buf;
|
||||
for (y = 0; y < avctx->height / 2; y++) {
|
||||
luma1 = (uint32_t*)&f->data[0][ y * 2 * f->linesize[0] ];
|
||||
luma2 = (uint32_t*)&f->data[0][ (y * 2 + 1) * f->linesize[0] ];
|
||||
cr = (uint32_t*)&f->data[1][ y * f->linesize[1] ];
|
||||
cb = (uint32_t*)&f->data[2][ y * f->linesize[2] ];
|
||||
for(x=0; x<avctx->width; x+=8) {
|
||||
buf32=(const uint32_t*)buf;
|
||||
for(y=0; y<avctx->height/2; y++){
|
||||
luma1=(uint32_t*)&f->data[0][ y*2*f->linesize[0] ];
|
||||
luma2=(uint32_t*)&f->data[0][ (y*2+1)*f->linesize[0] ];
|
||||
cr=(uint32_t*)&f->data[1][ y*f->linesize[1] ];
|
||||
cb=(uint32_t*)&f->data[2][ y*f->linesize[2] ];
|
||||
for(x=0; x<avctx->width; x+=8){
|
||||
*luma1++ = *buf32++;
|
||||
*luma1++ = *buf32++;
|
||||
*luma2++ = *buf32++;
|
||||
@@ -250,10 +245,10 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
case 1:
|
||||
/* Fraps v1 is an upside-down BGR24 */
|
||||
for (y = 0; y < avctx->height; y++)
|
||||
memcpy(&f->data[0][ (avctx->height - y - 1) * f->linesize[0]],
|
||||
&buf[y * avctx->width * 3],
|
||||
3 * avctx->width);
|
||||
for(y=0; y<avctx->height; y++)
|
||||
memcpy(&f->data[0][ (avctx->height-y)*f->linesize[0] ],
|
||||
&buf[y*avctx->width*3],
|
||||
3*avctx->width);
|
||||
break;
|
||||
|
||||
case 2:
|
||||
|
@@ -2292,8 +2292,7 @@ static int pack_bitstream(G723_1_Context *p, unsigned char *frame, int size)
|
||||
if (p->cur_rate == RATE_6300) {
|
||||
info_bits = 0;
|
||||
put_bits(&pb, 2, info_bits);
|
||||
}else
|
||||
av_assert0(0);
|
||||
}
|
||||
|
||||
put_bits(&pb, 8, p->lsp_index[2]);
|
||||
put_bits(&pb, 8, p->lsp_index[1]);
|
||||
|
@@ -366,49 +366,25 @@ static inline int check_marker(GetBitContext *s, const char *msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize GetBitContext.
|
||||
* @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes
|
||||
* larger than the actual read bits because some optimized bitstream
|
||||
* readers read 32 or 64 bit at once and could read over the end
|
||||
* Inititalize GetBitContext.
|
||||
* @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger than the actual read bits
|
||||
* because some optimized bitstream readers read 32 or 64 bit at once and could read over the end
|
||||
* @param bit_size the size of the buffer in bits
|
||||
* @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
|
||||
*/
|
||||
static inline int init_get_bits(GetBitContext *s, const uint8_t *buffer,
|
||||
int bit_size)
|
||||
static inline void init_get_bits(GetBitContext *s, const uint8_t *buffer,
|
||||
int bit_size)
|
||||
{
|
||||
int buffer_size;
|
||||
int ret = 0;
|
||||
|
||||
if (bit_size >= INT_MAX - 7 || bit_size < 0 || !buffer) {
|
||||
int buffer_size = (bit_size+7)>>3;
|
||||
if (buffer_size < 0 || bit_size < 0) {
|
||||
buffer_size = bit_size = 0;
|
||||
buffer = NULL;
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
buffer_size = (bit_size + 7) >> 3;
|
||||
|
||||
s->buffer = buffer;
|
||||
s->size_in_bits = bit_size;
|
||||
s->size_in_bits_plus8 = bit_size + 8;
|
||||
s->buffer_end = buffer + buffer_size;
|
||||
s->index = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize GetBitContext.
|
||||
* @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes
|
||||
* larger than the actual read bits because some optimized bitstream
|
||||
* readers read 32 or 64 bit at once and could read over the end
|
||||
* @param byte_size the size of the buffer in bytes
|
||||
* @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
|
||||
*/
|
||||
static inline int init_get_bits8(GetBitContext *s, const uint8_t *buffer,
|
||||
int byte_size)
|
||||
{
|
||||
if (byte_size > INT_MAX / 8 || byte_size < 0)
|
||||
byte_size = -1;
|
||||
return init_get_bits(s, buffer, byte_size * 8);
|
||||
}
|
||||
|
||||
static inline void align_get_bits(GetBitContext *s)
|
||||
|
@@ -65,8 +65,8 @@ typedef struct GifState {
|
||||
int stored_img_size;
|
||||
int stored_bg_color;
|
||||
|
||||
/* LZW compatible decoder */
|
||||
GetByteContext gb;
|
||||
/* LZW compatible decoder */
|
||||
LZWState *lzw;
|
||||
|
||||
/* aux buffers */
|
||||
@@ -75,7 +75,6 @@ typedef struct GifState {
|
||||
|
||||
AVCodecContext *avctx;
|
||||
int keyframe;
|
||||
int keyframe_ok;
|
||||
int trans_color; /**< color value that is used instead of transparent color */
|
||||
} GifState;
|
||||
|
||||
@@ -119,7 +118,7 @@ static void gif_copy_img_rect(const uint32_t *src, uint32_t *dst,
|
||||
const uint32_t *src_px, *src_pr,
|
||||
*src_py = src + y_start,
|
||||
*dst_py = dst + y_start;
|
||||
const uint32_t *src_pb = src_py + h * linesize;
|
||||
const uint32_t *src_pb = src_py + t * linesize;
|
||||
uint32_t *dst_px;
|
||||
|
||||
for (; src_py < src_pb; src_py += linesize, dst_py += linesize) {
|
||||
@@ -144,11 +143,11 @@ static int gif_read_image(GifState *s)
|
||||
if (bytestream2_get_bytes_left(&s->gb) < 9)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
left = bytestream2_get_le16u(&s->gb);
|
||||
top = bytestream2_get_le16u(&s->gb);
|
||||
width = bytestream2_get_le16u(&s->gb);
|
||||
left = bytestream2_get_le16u(&s->gb);
|
||||
top = bytestream2_get_le16u(&s->gb);
|
||||
width = bytestream2_get_le16u(&s->gb);
|
||||
height = bytestream2_get_le16u(&s->gb);
|
||||
flags = bytestream2_get_byteu(&s->gb);
|
||||
flags = bytestream2_get_byteu(&s->gb);
|
||||
is_interleaved = flags & 0x40;
|
||||
has_local_palette = flags & 0x80;
|
||||
bits_per_pixel = (flags & 0x07) + 1;
|
||||
@@ -185,11 +184,8 @@ static int gif_read_image(GifState *s)
|
||||
|
||||
/* verify that all the image is inside the screen dimensions */
|
||||
if (left + width > s->screen_width ||
|
||||
top + height > s->screen_height ||
|
||||
!width || !height) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid image dimensions.\n");
|
||||
top + height > s->screen_height)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* process disposal method */
|
||||
if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) {
|
||||
@@ -317,7 +313,7 @@ static int gif_read_extension(GifState *s)
|
||||
if (bytestream2_get_bytes_left(&s->gb) < 5)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
gce_flags = bytestream2_get_byteu(&s->gb);
|
||||
gce_flags = bytestream2_get_byteu(&s->gb);
|
||||
bytestream2_skipu(&s->gb, 2); // delay during which the frame is shown
|
||||
gce_transparent_index = bytestream2_get_byteu(&s->gb);
|
||||
if (gce_flags & 0x01)
|
||||
@@ -371,7 +367,7 @@ static int gif_read_header1(GifState *s)
|
||||
|
||||
/* read screen header */
|
||||
s->transparent_color_index = -1;
|
||||
s->screen_width = bytestream2_get_le16u(&s->gb);
|
||||
s->screen_width = bytestream2_get_le16u(&s->gb);
|
||||
s->screen_height = bytestream2_get_le16u(&s->gb);
|
||||
if( (unsigned)s->screen_width > 32767
|
||||
|| (unsigned)s->screen_height > 32767){
|
||||
@@ -414,10 +410,10 @@ static int gif_read_header1(GifState *s)
|
||||
|
||||
static int gif_parse_next_image(GifState *s, int *got_picture)
|
||||
{
|
||||
int ret;
|
||||
*got_picture = 1;
|
||||
while (bytestream2_get_bytes_left(&s->gb) > 0) {
|
||||
while (bytestream2_get_bytes_left(&s->gb)) {
|
||||
int code = bytestream2_get_byte(&s->gb);
|
||||
int ret;
|
||||
|
||||
av_dlog(s->avctx, "code=%02x '%c'\n", code, code);
|
||||
|
||||
@@ -462,9 +458,9 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, A
|
||||
|
||||
bytestream2_init(&s->gb, avpkt->data, avpkt->size);
|
||||
|
||||
s->picture.pts = avpkt->pts;
|
||||
s->picture.pkt_pts = avpkt->pts;
|
||||
s->picture.pkt_dts = avpkt->dts;
|
||||
s->picture.pts = avpkt->pts;
|
||||
s->picture.pkt_pts = avpkt->pts;
|
||||
s->picture.pkt_dts = avpkt->dts;
|
||||
s->picture.pkt_duration = avpkt->duration;
|
||||
|
||||
if (avpkt->size >= 6) {
|
||||
@@ -475,8 +471,6 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, A
|
||||
}
|
||||
|
||||
if (s->keyframe) {
|
||||
s->keyframe_ok = 0;
|
||||
s->gce_prev_disposal = GCE_DISPOSAL_NONE;
|
||||
if ((ret = gif_read_header1(s)) < 0)
|
||||
return ret;
|
||||
|
||||
@@ -494,13 +488,7 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, A
|
||||
|
||||
s->picture.pict_type = AV_PICTURE_TYPE_I;
|
||||
s->picture.key_frame = 1;
|
||||
s->keyframe_ok = 1;
|
||||
} else {
|
||||
if (!s->keyframe_ok) {
|
||||
av_log(avctx, AV_LOG_ERROR, "cannot decode frame without keyframe\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if ((ret = avctx->reget_buffer(avctx, &s->picture)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
|
||||
return ret;
|
||||
@@ -516,7 +504,7 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, A
|
||||
else if (*got_frame)
|
||||
*picture = s->picture;
|
||||
|
||||
return bytestream2_tell(&s->gb);
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
static av_cold int gif_decode_close(AVCodecContext *avctx)
|
||||
|
@@ -291,11 +291,9 @@ static int h261_decode_mb(H261Context *h){
|
||||
// Read mtype
|
||||
h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2);
|
||||
if (h->mtype < 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid mtype index %d\n",
|
||||
h->mtype);
|
||||
av_log(s->avctx, AV_LOG_ERROR, "illegal mtype %d\n", h->mtype);
|
||||
return SLICE_ERROR;
|
||||
}
|
||||
av_assert0(h->mtype < FF_ARRAY_ELEMS(ff_h261_mtype_map));
|
||||
h->mtype = ff_h261_mtype_map[h->mtype];
|
||||
|
||||
// Read mquant
|
||||
|
@@ -392,6 +392,7 @@ uint64_t time= rdtsc();
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
|
||||
retry:
|
||||
if(s->divx_packed && s->bitstream_buffer_size){
|
||||
int i;
|
||||
@@ -406,20 +407,16 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
if (s->bitstream_buffer_size && (s->divx_packed || buf_size < 20)) // divx 5.01+/xvid frame reorder
|
||||
ret = init_get_bits8(&s->gb, s->bitstream_buffer,
|
||||
s->bitstream_buffer_size);
|
||||
else
|
||||
ret = init_get_bits8(&s->gb, buf, buf_size);
|
||||
|
||||
if(s->bitstream_buffer_size && (s->divx_packed || buf_size<20)){ //divx 5.01+/xvid frame reorder
|
||||
init_get_bits(&s->gb, s->bitstream_buffer, s->bitstream_buffer_size*8);
|
||||
}else
|
||||
init_get_bits(&s->gb, buf, buf_size*8);
|
||||
s->bitstream_buffer_size=0;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!s->context_initialized)
|
||||
// we need the idct permutaton for reading a custom matrix
|
||||
if ((ret = ff_MPV_common_init(s)) < 0)
|
||||
if (!s->context_initialized) {
|
||||
if ((ret = ff_MPV_common_init(s)) < 0) //we need the idct permutaton for reading a custom matrix
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* We need to set current_picture_ptr before reading the header,
|
||||
* otherwise we cannot store anyting in there */
|
||||
@@ -439,11 +436,8 @@ retry:
|
||||
if(s->avctx->extradata_size && s->picture_number==0){
|
||||
GetBitContext gb;
|
||||
|
||||
ret = init_get_bits8(&gb, s->avctx->extradata,
|
||||
s->avctx->extradata_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_mpeg4_decode_picture_header(s, &gb);
|
||||
init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size*8);
|
||||
ret = ff_mpeg4_decode_picture_header(s, &gb);
|
||||
}
|
||||
ret = ff_mpeg4_decode_picture_header(s, &s->gb);
|
||||
} else if (CONFIG_H263I_DECODER && s->codec_id == AV_CODEC_ID_H263I) {
|
||||
@@ -603,6 +597,17 @@ retry:
|
||||
/* FIXME: By the way H263 decoder is evolving it should have */
|
||||
/* an H263EncContext */
|
||||
|
||||
if ((!avctx->coded_width || !avctx->coded_height) && 0) {
|
||||
ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat
|
||||
|
||||
s->parse_context.buffer=0;
|
||||
ff_MPV_common_end(s);
|
||||
s->parse_context= pc;
|
||||
avcodec_set_dimensions(avctx, s->width, s->height);
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (s->width != avctx->coded_width ||
|
||||
s->height != avctx->coded_height ||
|
||||
s->context_reinit) {
|
||||
@@ -720,10 +725,10 @@ frame_end:
|
||||
}
|
||||
|
||||
if(startcode_found){
|
||||
av_fast_padded_mallocz(
|
||||
av_fast_malloc(
|
||||
&s->bitstream_buffer,
|
||||
&s->allocated_bitstream_buffer_size,
|
||||
buf_size - current_pos);
|
||||
buf_size - current_pos + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!s->bitstream_buffer)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos);
|
||||
|
@@ -67,15 +67,9 @@ static const uint8_t div6[QP_MAX_NUM + 1] = {
|
||||
};
|
||||
|
||||
static const enum AVPixelFormat hwaccel_pixfmt_list_h264_jpeg_420[] = {
|
||||
#if CONFIG_H264_DXVA2_HWACCEL
|
||||
AV_PIX_FMT_DXVA2_VLD,
|
||||
#endif
|
||||
#if CONFIG_H264_VAAPI_HWACCEL
|
||||
AV_PIX_FMT_VAAPI_VLD,
|
||||
#endif
|
||||
#if CONFIG_H264_VDA_HWACCEL
|
||||
AV_PIX_FMT_VDA_VLD,
|
||||
#endif
|
||||
AV_PIX_FMT_YUVJ420P,
|
||||
AV_PIX_FMT_NONE
|
||||
};
|
||||
@@ -141,10 +135,10 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h)
|
||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
|
||||
{
|
||||
MpegEncContext *const s = &h->s;
|
||||
static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
|
||||
static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
|
||||
static const int8_t top[7] = { LEFT_DC_PRED8x8, 1, -1, -1 };
|
||||
static const int8_t left[7] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
|
||||
|
||||
if (mode > 3U) {
|
||||
if (mode > 6U) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"out of range intra chroma pred mode at %d %d\n",
|
||||
s->mb_x, s->mb_y);
|
||||
@@ -315,11 +309,10 @@ static inline int get_lowest_part_list_y(H264Context *h, Picture *pic, int n,
|
||||
int height, int y_offset, int list)
|
||||
{
|
||||
int raw_my = h->mv_cache[list][scan8[n]][1];
|
||||
int filter_height_up = (raw_my & 3) ? 2 : 0;
|
||||
int filter_height_down = (raw_my & 3) ? 3 : 0;
|
||||
int filter_height = (raw_my & 3) ? 2 : 0;
|
||||
int full_my = (raw_my >> 2) + y_offset;
|
||||
int top = full_my - filter_height_up;
|
||||
int bottom = full_my + filter_height_down + height;
|
||||
int top = full_my - filter_height;
|
||||
int bottom = full_my + filter_height + height;
|
||||
|
||||
return FFMAX(abs(top), bottom);
|
||||
}
|
||||
@@ -1337,8 +1330,6 @@ int ff_h264_frame_start(H264Context *h)
|
||||
int i;
|
||||
const int pixel_shift = h->pixel_shift;
|
||||
|
||||
h->next_output_pic = NULL;
|
||||
|
||||
if (ff_MPV_frame_start(s, s->avctx) < 0)
|
||||
return -1;
|
||||
ff_er_frame_start(s);
|
||||
@@ -1391,6 +1382,8 @@ int ff_h264_frame_start(H264Context *h)
|
||||
s->current_picture_ptr->field_poc[0] =
|
||||
s->current_picture_ptr->field_poc[1] = INT_MAX;
|
||||
|
||||
h->next_output_pic = NULL;
|
||||
|
||||
assert(s->current_picture_ptr->long_ref == 0);
|
||||
|
||||
return 0;
|
||||
@@ -2176,7 +2169,6 @@ static void flush_change(H264Context *h)
|
||||
h->sync= 0;
|
||||
h->list_count = 0;
|
||||
h->current_slice = 0;
|
||||
h->mmco_reset = 1;
|
||||
}
|
||||
|
||||
/* forget old pics after a seek */
|
||||
@@ -2358,7 +2350,7 @@ static int field_end(H264Context *h, int in_setup)
|
||||
* past end by one (callers fault) and resync_mb_y != 0
|
||||
* causes problems for the first MB line, too.
|
||||
*/
|
||||
if (!FIELD_PICTURE && h->current_slice && !h->sps.new)
|
||||
if (!FIELD_PICTURE)
|
||||
ff_er_frame_end(s);
|
||||
|
||||
ff_MPV_frame_end(s);
|
||||
@@ -2482,7 +2474,7 @@ static int h264_set_parameter_from_sps(H264Context *h)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum PixelFormat get_pixel_format(H264Context *h, int force_callback)
|
||||
static enum PixelFormat get_pixel_format(H264Context *h)
|
||||
{
|
||||
MpegEncContext *const s = &h->s;
|
||||
switch (h->sps.bit_depth_luma) {
|
||||
@@ -2544,17 +2536,11 @@ static enum PixelFormat get_pixel_format(H264Context *h, int force_callback)
|
||||
return s->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ422P
|
||||
: AV_PIX_FMT_YUV422P;
|
||||
} else {
|
||||
int i;
|
||||
const enum AVPixelFormat * fmt = s->avctx->codec->pix_fmts ?
|
||||
return s->avctx->get_format(s->avctx, s->avctx->codec->pix_fmts ?
|
||||
s->avctx->codec->pix_fmts :
|
||||
s->avctx->color_range == AVCOL_RANGE_JPEG ?
|
||||
hwaccel_pixfmt_list_h264_jpeg_420 :
|
||||
ff_hwaccel_pixfmt_list_420;
|
||||
|
||||
for (i=0; fmt[i] != AV_PIX_FMT_NONE; i++)
|
||||
if (fmt[i] == s->avctx->pix_fmt && !force_callback)
|
||||
return fmt[i];
|
||||
return s->avctx->get_format(s->avctx, fmt);
|
||||
ff_hwaccel_pixfmt_list_420);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@@ -2603,7 +2589,7 @@ static int h264_slice_header_init(H264Context *h, int reinit)
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
if ((ret = ff_MPV_common_init(s)) < 0) {
|
||||
if ((ret = ff_MPV_common_init(s) < 0)) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
|
||||
return ret;
|
||||
}
|
||||
@@ -2720,12 +2706,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h->slice_type = slice_type;
|
||||
h->slice_type_nos = slice_type & 3;
|
||||
|
||||
if (h->nal_unit_type == NAL_IDR_SLICE &&
|
||||
h->slice_type_nos != AV_PICTURE_TYPE_I) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
// to make a few old functions happy, it's wrong though
|
||||
s->pict_type = h->slice_type;
|
||||
|
||||
@@ -2784,8 +2764,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|| s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||
|| av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio)));
|
||||
if (h0->s.avctx->pix_fmt != get_pixel_format(h0, 0))
|
||||
must_reinit = 1;
|
||||
|
||||
|
||||
s->mb_width = h->sps.mb_width;
|
||||
s->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
||||
@@ -2822,7 +2801,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|
||||
flush_change(h);
|
||||
|
||||
if ((ret = get_pixel_format(h, 1)) < 0)
|
||||
if ((ret = get_pixel_format(h)) < 0)
|
||||
return ret;
|
||||
s->avctx->pix_fmt = ret;
|
||||
|
||||
@@ -2843,7 +2822,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((ret = get_pixel_format(h, 1)) < 0)
|
||||
if ((ret = get_pixel_format(h)) < 0)
|
||||
return ret;
|
||||
s->avctx->pix_fmt = ret;
|
||||
|
||||
@@ -2927,7 +2906,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
assert(s0->current_picture_ptr->f.reference != DELAYED_PIC_REF);
|
||||
|
||||
/* Mark old field/frame as completed */
|
||||
if (s0->current_picture_ptr->owner2 == s0) {
|
||||
if (!last_pic_droppable && s0->current_picture_ptr->owner2 == s0) {
|
||||
ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_BOTTOM_FIELD);
|
||||
}
|
||||
@@ -2936,7 +2915,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
|
||||
/* Previous field is unmatched. Don't display it, but let it
|
||||
* remain for reference if marked as such. */
|
||||
if (last_pic_structure != PICT_FRAME) {
|
||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
@@ -2946,7 +2925,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
* different frame_nums. Consider this field first in
|
||||
* pair. Throw away previous field except for reference
|
||||
* purposes. */
|
||||
if (last_pic_structure != PICT_FRAME) {
|
||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
||||
ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
|
||||
last_pic_structure == PICT_TOP_FIELD);
|
||||
}
|
||||
@@ -2987,21 +2966,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
||||
av_log(h->s.avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
|
||||
h->frame_num, h->prev_frame_num);
|
||||
if (!h->sps.gaps_in_frame_num_allowed_flag)
|
||||
for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
|
||||
h->last_pocs[i] = INT_MIN;
|
||||
if (ff_h264_frame_start(h) < 0) {
|
||||
s0->first_field = 0;
|
||||
if (ff_h264_frame_start(h) < 0)
|
||||
return -1;
|
||||
}
|
||||
h->prev_frame_num++;
|
||||
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
|
||||
s->current_picture_ptr->frame_num = h->prev_frame_num;
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 1);
|
||||
if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
|
||||
s->avctx->err_recognition & AV_EF_EXPLODE)
|
||||
return ret;
|
||||
ff_generate_sliding_window_mmcos(h);
|
||||
if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 &&
|
||||
(s->avctx->err_recognition & AV_EF_EXPLODE))
|
||||
return AVERROR_INVALIDDATA;
|
||||
@@ -3134,7 +3106,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|
||||
if (h->ref_count[0]-1 > max[0] || h->ref_count[1]-1 > max[1]){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n", h->ref_count[0]-1, max[0], h->ref_count[1]-1, max[1]);
|
||||
h->ref_count[0] = h->ref_count[1] = 0;
|
||||
h->ref_count[0] = h->ref_count[1] = 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
@@ -3142,10 +3114,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h->list_count = 2;
|
||||
else
|
||||
h->list_count = 1;
|
||||
} else {
|
||||
h->list_count = 0;
|
||||
h->ref_count[0] = h->ref_count[1] = 0;
|
||||
}
|
||||
} else
|
||||
h->ref_count[1]= h->ref_count[0]= h->list_count= 0;
|
||||
|
||||
if (!default_ref_list_done)
|
||||
ff_h264_fill_default_ref_list(h);
|
||||
@@ -3182,15 +3152,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
}
|
||||
}
|
||||
|
||||
// If frame-mt is enabled, only update mmco tables for the first slice
|
||||
// in a field. Subsequent slices can temporarily clobber h->mmco_index
|
||||
// or h->mmco, which will cause ref list mix-ups and decoding errors
|
||||
// further down the line. This may break decoding if the first slice is
|
||||
// corrupt, thus we only do this if frame-mt is enabled.
|
||||
if (h->nal_ref_idc &&
|
||||
ff_h264_decode_ref_pic_marking(h0, &s->gb,
|
||||
!(s->avctx->active_thread_type & FF_THREAD_FRAME) ||
|
||||
h0->current_slice == 0) < 0 &&
|
||||
if (h->nal_ref_idc && ff_h264_decode_ref_pic_marking(h0, &s->gb) < 0 &&
|
||||
(s->avctx->err_recognition & AV_EF_EXPLODE))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
@@ -3233,8 +3195,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
get_se_golomb(&s->gb); /* slice_qs_delta */
|
||||
|
||||
h->deblocking_filter = 1;
|
||||
h->slice_alpha_c0_offset = 0;
|
||||
h->slice_beta_offset = 0;
|
||||
h->slice_alpha_c0_offset = 52;
|
||||
h->slice_beta_offset = 52;
|
||||
if (h->pps.deblocking_filter_parameters_present) {
|
||||
tmp = get_ue_golomb_31(&s->gb);
|
||||
if (tmp > 2) {
|
||||
@@ -3247,12 +3209,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h->deblocking_filter ^= 1; // 1<->0
|
||||
|
||||
if (h->deblocking_filter) {
|
||||
h->slice_alpha_c0_offset = get_se_golomb(&s->gb) * 2;
|
||||
h->slice_beta_offset = get_se_golomb(&s->gb) * 2;
|
||||
if (h->slice_alpha_c0_offset > 12 ||
|
||||
h->slice_alpha_c0_offset < -12 ||
|
||||
h->slice_beta_offset > 12 ||
|
||||
h->slice_beta_offset < -12) {
|
||||
h->slice_alpha_c0_offset += get_se_golomb(&s->gb) << 1;
|
||||
h->slice_beta_offset += get_se_golomb(&s->gb) << 1;
|
||||
if (h->slice_alpha_c0_offset > 104U ||
|
||||
h->slice_beta_offset > 104U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"deblocking filter parameters %d %d out of range\n",
|
||||
h->slice_alpha_c0_offset, h->slice_beta_offset);
|
||||
@@ -3289,7 +3249,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
}
|
||||
}
|
||||
}
|
||||
h->qp_thresh = 15 -
|
||||
h->qp_thresh = 15 + 52 -
|
||||
FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) -
|
||||
FFMAX3(0,
|
||||
h->pps.chroma_qp_index_offset[0],
|
||||
@@ -3363,7 +3323,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h->ref_count[0], h->ref_count[1],
|
||||
s->qscale,
|
||||
h->deblocking_filter,
|
||||
h->slice_alpha_c0_offset, h->slice_beta_offset,
|
||||
h->slice_alpha_c0_offset / 2 - 26, h->slice_beta_offset / 2 - 26,
|
||||
h->use_weight,
|
||||
h->use_weight == 1 && h->use_weight_chroma ? "c" : "",
|
||||
h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
|
||||
@@ -3916,8 +3876,6 @@ static int execute_decode_slices(H264Context *h, int context_count)
|
||||
H264Context *hx;
|
||||
int i;
|
||||
|
||||
av_assert0(s->mb_y < s->mb_height);
|
||||
|
||||
if (s->avctx->hwaccel ||
|
||||
s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
|
||||
return 0;
|
||||
@@ -4040,7 +3998,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
|
||||
s->workaround_bugs |= FF_BUG_TRUNCATED;
|
||||
|
||||
if (!(s->workaround_bugs & FF_BUG_TRUNCATED))
|
||||
while (dst_length > 0 && ptr[dst_length - 1] == 0)
|
||||
while(dst_length > 0 && ptr[dst_length - 1] == 0)
|
||||
dst_length--;
|
||||
bit_length = !dst_length ? 0
|
||||
: (8 * dst_length -
|
||||
@@ -4193,24 +4151,12 @@ again:
|
||||
}
|
||||
break;
|
||||
case NAL_DPA:
|
||||
if (s->flags2 & CODEC_FLAG2_CHUNKS) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"Decoding in chunks is not supported for "
|
||||
"partitioned slices.\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
init_get_bits(&hx->s.gb, ptr, bit_length);
|
||||
hx->intra_gb_ptr =
|
||||
hx->inter_gb_ptr = NULL;
|
||||
|
||||
if ((err = decode_slice_header(hx, h)) < 0) {
|
||||
/* make sure data_partitioning is cleared if it was set
|
||||
* before, so we don't try decoding a slice without a valid
|
||||
* slice header later */
|
||||
h->s.data_partitioning = 0;
|
||||
if ((err = decode_slice_header(hx, h)) < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
hx->s.data_partitioning = 1;
|
||||
break;
|
||||
@@ -4280,10 +4226,9 @@ again:
|
||||
context_count = 0;
|
||||
}
|
||||
|
||||
if (err < 0) {
|
||||
if (err < 0)
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
|
||||
h->ref_count[0] = h->ref_count[1] = h->list_count = 0;
|
||||
} else if (err == 1) {
|
||||
else if (err == 1) {
|
||||
/* Slice could not be decoded in parallel mode, copy down
|
||||
* NAL unit stuff to context 0 and restart. Note that
|
||||
* rbsp_buffer is not transferred, but since we no longer
|
||||
@@ -4336,9 +4281,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
s->flags = avctx->flags;
|
||||
s->flags2 = avctx->flags2;
|
||||
/* reset data partitioning here, to ensure GetBitContexts from previous
|
||||
* packets do not get used. */
|
||||
s->data_partitioning = 0;
|
||||
|
||||
/* end of stream, output what is still in the buffers */
|
||||
if (buf_size == 0) {
|
||||
|
@@ -669,10 +669,9 @@ void ff_h264_remove_all_refs(H264Context *h);
|
||||
*/
|
||||
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count);
|
||||
|
||||
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb,
|
||||
int first_slice);
|
||||
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb);
|
||||
|
||||
int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice);
|
||||
void ff_generate_sliding_window_mmcos(H264Context *h);
|
||||
|
||||
/**
|
||||
* Check if the top & left blocks are available if needed & change the
|
||||
|
@@ -709,7 +709,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
|
||||
down the code */
|
||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
||||
if(s->mb_skip_run==-1)
|
||||
s->mb_skip_run= get_ue_golomb_long(&s->gb);
|
||||
s->mb_skip_run= get_ue_golomb(&s->gb);
|
||||
|
||||
if (s->mb_skip_run--) {
|
||||
if(FRAME_MBAFF && (s->mb_y&1) == 0){
|
||||
@@ -771,10 +771,6 @@ decode_intra_mb:
|
||||
|
||||
// We assume these blocks are very rare so we do not optimize it.
|
||||
align_get_bits(&s->gb);
|
||||
if (get_bits_left(&s->gb) < mb_size) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Not enough data for an intra PCM block.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
// The pixels are stored in the same order as levels in h->mb array.
|
||||
for(x=0; x < mb_size; x++){
|
||||
|
@@ -251,8 +251,8 @@ static av_always_inline void h264_filter_mb_fast_internal(H264Context *h,
|
||||
int top_type= h->top_type;
|
||||
|
||||
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
|
||||
int a = 52 + h->slice_alpha_c0_offset - qp_bd_offset;
|
||||
int b = 52 + h->slice_beta_offset - qp_bd_offset;
|
||||
int a = h->slice_alpha_c0_offset - qp_bd_offset;
|
||||
int b = h->slice_beta_offset - qp_bd_offset;
|
||||
|
||||
int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||
int qp = s->current_picture.f.qscale_table[mb_xy];
|
||||
@@ -712,8 +712,8 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
|
||||
av_unused int dir;
|
||||
int chroma = CHROMA && !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY));
|
||||
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
|
||||
int a = 52 + h->slice_alpha_c0_offset - qp_bd_offset;
|
||||
int b = 52 + h->slice_beta_offset - qp_bd_offset;
|
||||
int a = h->slice_alpha_c0_offset - qp_bd_offset;
|
||||
int b = h->slice_beta_offset - qp_bd_offset;
|
||||
|
||||
if (FRAME_MBAFF
|
||||
// and current and left pair do not have the same interlaced type
|
||||
|
@@ -154,7 +154,7 @@ pps:
|
||||
goto fail;
|
||||
|
||||
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
||||
if (ctx->first_idr && (unit_type == 5 || unit_type == 7 || unit_type == 8)) {
|
||||
if (ctx->first_idr && unit_type == 5) {
|
||||
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
||||
avctx->extradata, avctx->extradata_size,
|
||||
buf, nal_size)) < 0)
|
||||
|
@@ -267,9 +267,7 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){
|
||||
}
|
||||
|
||||
if(sps->num_reorder_frames > 16U /*max_dec_frame_buffering || max_dec_frame_buffering > 16*/){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Clipping illegal num_reorder_frames %d\n",
|
||||
sps->num_reorder_frames);
|
||||
sps->num_reorder_frames = 16;
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal num_reorder_frames %d\n", sps->num_reorder_frames);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -387,7 +385,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
}
|
||||
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
||||
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
||||
if (sps->bit_depth_luma > 14U || sps->bit_depth_chroma > 14U || sps->bit_depth_luma != sps->bit_depth_chroma) {
|
||||
if (sps->bit_depth_luma > 14U || sps->bit_depth_chroma > 14U) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
||||
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||
goto fail;
|
||||
|
@@ -63,9 +63,7 @@ static int split_field_copy(Picture *dest, Picture *src,
|
||||
return match;
|
||||
}
|
||||
|
||||
static int build_def_list(Picture *def, int def_len,
|
||||
Picture **in, int len, int is_long, int sel)
|
||||
{
|
||||
static int build_def_list(Picture *def, Picture **in, int len, int is_long, int sel){
|
||||
int i[2]={0};
|
||||
int index=0;
|
||||
|
||||
@@ -75,12 +73,10 @@ static int build_def_list(Picture *def, int def_len,
|
||||
while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3))))
|
||||
i[1]++;
|
||||
if(i[0] < len){
|
||||
av_assert0(index < def_len);
|
||||
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
|
||||
split_field_copy(&def[index++], in[ i[0]++ ], sel , 1);
|
||||
}
|
||||
if(i[1] < len){
|
||||
av_assert0(index < def_len);
|
||||
in[ i[1] ]->pic_id= is_long ? i[1] : in[ i[1] ]->frame_num;
|
||||
split_field_copy(&def[index++], in[ i[1]++ ], sel^3, 0);
|
||||
}
|
||||
@@ -128,12 +124,8 @@ int ff_h264_fill_default_ref_list(H264Context *h){
|
||||
len= add_sorted(sorted , h->short_ref, h->short_ref_count, cur_poc, 1^list);
|
||||
len+=add_sorted(sorted+len, h->short_ref, h->short_ref_count, cur_poc, 0^list);
|
||||
av_assert0(len<=32);
|
||||
len = build_def_list(h->default_ref_list[list], FF_ARRAY_ELEMS(h->default_ref_list[0]),
|
||||
sorted, len, 0, s->picture_structure);
|
||||
len += build_def_list(h->default_ref_list[list] + len,
|
||||
FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,
|
||||
h->long_ref, 16, 1, s->picture_structure);
|
||||
|
||||
len= build_def_list(h->default_ref_list[list] , sorted , len, 0, s->picture_structure);
|
||||
len+=build_def_list(h->default_ref_list[list]+len, h->long_ref, 16 , 1, s->picture_structure);
|
||||
av_assert0(len<=32);
|
||||
|
||||
if(len < h->ref_count[list])
|
||||
@@ -147,12 +139,8 @@ int ff_h264_fill_default_ref_list(H264Context *h){
|
||||
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
|
||||
}
|
||||
}else{
|
||||
len = build_def_list(h->default_ref_list[0], FF_ARRAY_ELEMS(h->default_ref_list[0]),
|
||||
h->short_ref, h->short_ref_count, 0, s->picture_structure);
|
||||
len += build_def_list(h->default_ref_list[0] + len,
|
||||
FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,
|
||||
h-> long_ref, 16, 1, s->picture_structure);
|
||||
|
||||
len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, s->picture_structure);
|
||||
len+= build_def_list(h->default_ref_list[0]+len, h-> long_ref, 16 , 1, s->picture_structure);
|
||||
av_assert0(len<=32);
|
||||
if(len < h->ref_count[0])
|
||||
memset(&h->default_ref_list[0][len], 0, sizeof(Picture)*(h->ref_count[0] - len));
|
||||
@@ -299,10 +287,7 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
|
||||
for(list=0; list<h->list_count; list++){
|
||||
for(index= 0; index < h->ref_count[list]; index++){
|
||||
if (!h->ref_list[list][index].f.data[0]) {
|
||||
int i;
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture, default is %d\n", h->default_ref_list[list][0].poc);
|
||||
for (i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
|
||||
h->last_pocs[i] = INT_MIN;
|
||||
if (h->default_ref_list[list][0].f.data[0])
|
||||
h->ref_list[list][index]= h->default_ref_list[list][0];
|
||||
else
|
||||
@@ -495,50 +480,22 @@ static void print_long_term(H264Context *h) {
|
||||
}
|
||||
}
|
||||
|
||||
static int check_opcodes(MMCO *mmco1, MMCO *mmco2, int n_mmcos)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < n_mmcos; i++) {
|
||||
if (mmco1[i].opcode != mmco2[i].opcode)
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
|
||||
{
|
||||
void ff_generate_sliding_window_mmcos(H264Context *h) {
|
||||
MpegEncContext * const s = &h->s;
|
||||
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp;
|
||||
int mmco_index = 0, i;
|
||||
|
||||
if (h->short_ref_count &&
|
||||
h->long_ref_count + h->short_ref_count >= h->sps.ref_frame_count &&
|
||||
!(FIELD_PICTURE && !s->first_field &&
|
||||
s->current_picture_ptr->f.reference)) {
|
||||
mmco[0].opcode = MMCO_SHORT2UNUSED;
|
||||
mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num;
|
||||
mmco_index = 1;
|
||||
h->mmco_index= 0;
|
||||
if(h->short_ref_count && h->long_ref_count + h->short_ref_count >= h->sps.ref_frame_count &&
|
||||
!(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->f.reference)) {
|
||||
h->mmco[0].opcode= MMCO_SHORT2UNUSED;
|
||||
h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
|
||||
h->mmco_index= 1;
|
||||
if (FIELD_PICTURE) {
|
||||
mmco[0].short_pic_num *= 2;
|
||||
mmco[1].opcode = MMCO_SHORT2UNUSED;
|
||||
mmco[1].short_pic_num = mmco[0].short_pic_num + 1;
|
||||
mmco_index = 2;
|
||||
h->mmco[0].short_pic_num *= 2;
|
||||
h->mmco[1].opcode= MMCO_SHORT2UNUSED;
|
||||
h->mmco[1].short_pic_num= h->mmco[0].short_pic_num + 1;
|
||||
h->mmco_index= 2;
|
||||
}
|
||||
}
|
||||
|
||||
if (first_slice) {
|
||||
h->mmco_index = mmco_index;
|
||||
} else if (!first_slice && mmco_index >= 0 &&
|
||||
(mmco_index != h->mmco_index ||
|
||||
(i = check_opcodes(h->mmco, mmco_temp, mmco_index)))) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"Inconsistent MMCO state between slices [%d, %d, %d]\n",
|
||||
mmco_index, h->mmco_index, i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
@@ -562,7 +519,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
if(!pic){
|
||||
if(mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg]
|
||||
|| h->long_ref[mmco[i].long_arg]->frame_num != frame_num) {
|
||||
av_log(h->s.avctx, h->short_ref_count ? AV_LOG_ERROR : AV_LOG_DEBUG, "mmco: unref short failure\n");
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "mmco: unref short failure\n");
|
||||
err = AVERROR_INVALIDDATA;
|
||||
}
|
||||
continue;
|
||||
@@ -699,7 +656,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
print_short_term(h);
|
||||
print_long_term(h);
|
||||
|
||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=2 + (s->picture_structure != PICT_FRAME) && s->current_picture_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=1 + (s->picture_structure != PICT_FRAME) && s->current_picture_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||
s->current_picture_ptr->sync |= 1;
|
||||
if(!h->s.avctx->has_b_frames)
|
||||
h->sync = 2;
|
||||
@@ -708,86 +665,52 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
return (h->s.avctx->err_recognition & AV_EF_EXPLODE) ? err : 0;
|
||||
}
|
||||
|
||||
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb,
|
||||
int first_slice)
|
||||
{
|
||||
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb){
|
||||
MpegEncContext * const s = &h->s;
|
||||
int i, ret;
|
||||
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp;
|
||||
int mmco_index = 0;
|
||||
int i;
|
||||
|
||||
if (h->nal_unit_type == NAL_IDR_SLICE){ // FIXME fields
|
||||
s->broken_link = get_bits1(gb) - 1;
|
||||
if (get_bits1(gb)){
|
||||
mmco[0].opcode = MMCO_LONG;
|
||||
mmco[0].long_arg = 0;
|
||||
mmco_index = 1;
|
||||
h->mmco_index= 0;
|
||||
if(h->nal_unit_type == NAL_IDR_SLICE){ //FIXME fields
|
||||
s->broken_link= get_bits1(gb) -1;
|
||||
if(get_bits1(gb)){
|
||||
h->mmco[0].opcode= MMCO_LONG;
|
||||
h->mmco[0].long_arg= 0;
|
||||
h->mmco_index= 1;
|
||||
}
|
||||
} else {
|
||||
if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag
|
||||
for (i = 0; i < MAX_MMCO_COUNT; i++) {
|
||||
MMCOOpcode opcode = get_ue_golomb_31(gb);
|
||||
}else{
|
||||
if(get_bits1(gb)){ // adaptive_ref_pic_marking_mode_flag
|
||||
for(i= 0; i<MAX_MMCO_COUNT; i++) {
|
||||
MMCOOpcode opcode= get_ue_golomb_31(gb);
|
||||
|
||||
mmco[i].opcode = opcode;
|
||||
if (opcode == MMCO_SHORT2UNUSED || opcode == MMCO_SHORT2LONG){
|
||||
mmco[i].short_pic_num =
|
||||
(h->curr_pic_num - get_ue_golomb(gb) - 1) &
|
||||
(h->max_pic_num - 1);
|
||||
#if 0
|
||||
if (mmco[i].short_pic_num >= h->short_ref_count ||
|
||||
h->short_ref[ mmco[i].short_pic_num ] == NULL){
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"illegal short ref in memory management control "
|
||||
"operation %d\n", mmco);
|
||||
h->mmco[i].opcode= opcode;
|
||||
if(opcode==MMCO_SHORT2UNUSED || opcode==MMCO_SHORT2LONG){
|
||||
h->mmco[i].short_pic_num= (h->curr_pic_num - get_ue_golomb(gb) - 1) & (h->max_pic_num - 1);
|
||||
/* if(h->mmco[i].short_pic_num >= h->short_ref_count || h->short_ref[ h->mmco[i].short_pic_num ] == NULL){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "illegal short ref in memory management control operation %d\n", mmco);
|
||||
return -1;
|
||||
}*/
|
||||
}
|
||||
if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){
|
||||
unsigned int long_arg= get_ue_golomb_31(gb);
|
||||
if(long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_SET_MAX_LONG && long_arg == 16) && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %d\n", opcode);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
if (opcode == MMCO_SHORT2LONG || opcode == MMCO_LONG2UNUSED ||
|
||||
opcode == MMCO_LONG || opcode == MMCO_SET_MAX_LONG) {
|
||||
unsigned int long_arg = get_ue_golomb_31(gb);
|
||||
if (long_arg >= 32 ||
|
||||
(long_arg >= 16 && !(opcode == MMCO_SET_MAX_LONG &&
|
||||
long_arg == 16) &&
|
||||
!(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"illegal long ref in memory management control "
|
||||
"operation %d\n", opcode);
|
||||
return -1;
|
||||
}
|
||||
mmco[i].long_arg = long_arg;
|
||||
h->mmco[i].long_arg= long_arg;
|
||||
}
|
||||
|
||||
if (opcode > (unsigned) MMCO_LONG){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"illegal memory management control operation %d\n",
|
||||
opcode);
|
||||
if(opcode > (unsigned)MMCO_LONG){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal memory management control operation %d\n", opcode);
|
||||
return -1;
|
||||
}
|
||||
if (opcode == MMCO_END)
|
||||
if(opcode == MMCO_END)
|
||||
break;
|
||||
}
|
||||
mmco_index = i;
|
||||
} else {
|
||||
if (first_slice) {
|
||||
ret = ff_generate_sliding_window_mmcos(h, first_slice);
|
||||
if (ret < 0 && s->avctx->err_recognition & AV_EF_EXPLODE)
|
||||
return ret;
|
||||
}
|
||||
mmco_index = -1;
|
||||
h->mmco_index= i;
|
||||
}else{
|
||||
ff_generate_sliding_window_mmcos(h);
|
||||
}
|
||||
}
|
||||
|
||||
if (first_slice && mmco_index != -1) {
|
||||
h->mmco_index = mmco_index;
|
||||
} else if (!first_slice && mmco_index >= 0 &&
|
||||
(mmco_index != h->mmco_index ||
|
||||
(i = check_opcodes(h->mmco, mmco_temp, mmco_index)))) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"Inconsistent MMCO state between slices [%d, %d, %d]\n",
|
||||
mmco_index, h->mmco_index, i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -63,13 +63,13 @@ void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_fo
|
||||
c->h264_idct8_dc_add= FUNC(ff_h264_idct8_dc_add, depth);\
|
||||
c->h264_idct_add16 = FUNC(ff_h264_idct_add16, depth);\
|
||||
c->h264_idct8_add4 = FUNC(ff_h264_idct8_add4, depth);\
|
||||
if (chroma_format_idc <= 1)\
|
||||
if (chroma_format_idc == 1)\
|
||||
c->h264_idct_add8 = FUNC(ff_h264_idct_add8, depth);\
|
||||
else\
|
||||
c->h264_idct_add8 = FUNC(ff_h264_idct_add8_422, depth);\
|
||||
c->h264_idct_add16intra= FUNC(ff_h264_idct_add16intra, depth);\
|
||||
c->h264_luma_dc_dequant_idct= FUNC(ff_h264_luma_dc_dequant_idct, depth);\
|
||||
if (chroma_format_idc <= 1)\
|
||||
if (chroma_format_idc == 1)\
|
||||
c->h264_chroma_dc_dequant_idct= FUNC(ff_h264_chroma_dc_dequant_idct, depth);\
|
||||
else\
|
||||
c->h264_chroma_dc_dequant_idct= FUNC(ff_h264_chroma422_dc_dequant_idct, depth);\
|
||||
@@ -90,20 +90,20 @@ void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_fo
|
||||
c->h264_h_loop_filter_luma_intra= FUNC(h264_h_loop_filter_luma_intra, depth);\
|
||||
c->h264_h_loop_filter_luma_mbaff_intra= FUNC(h264_h_loop_filter_luma_mbaff_intra, depth);\
|
||||
c->h264_v_loop_filter_chroma= FUNC(h264_v_loop_filter_chroma, depth);\
|
||||
if (chroma_format_idc <= 1)\
|
||||
if (chroma_format_idc == 1)\
|
||||
c->h264_h_loop_filter_chroma= FUNC(h264_h_loop_filter_chroma, depth);\
|
||||
else\
|
||||
c->h264_h_loop_filter_chroma= FUNC(h264_h_loop_filter_chroma422, depth);\
|
||||
if (chroma_format_idc <= 1)\
|
||||
if (chroma_format_idc == 1)\
|
||||
c->h264_h_loop_filter_chroma_mbaff= FUNC(h264_h_loop_filter_chroma_mbaff, depth);\
|
||||
else\
|
||||
c->h264_h_loop_filter_chroma_mbaff= FUNC(h264_h_loop_filter_chroma422_mbaff, depth);\
|
||||
c->h264_v_loop_filter_chroma_intra= FUNC(h264_v_loop_filter_chroma_intra, depth);\
|
||||
if (chroma_format_idc <= 1)\
|
||||
if (chroma_format_idc == 1)\
|
||||
c->h264_h_loop_filter_chroma_intra= FUNC(h264_h_loop_filter_chroma_intra, depth);\
|
||||
else\
|
||||
c->h264_h_loop_filter_chroma_intra= FUNC(h264_h_loop_filter_chroma422_intra, depth);\
|
||||
if (chroma_format_idc <= 1)\
|
||||
if (chroma_format_idc == 1)\
|
||||
c->h264_h_loop_filter_chroma_mbaff_intra= FUNC(h264_h_loop_filter_chroma_mbaff_intra, depth);\
|
||||
else\
|
||||
c->h264_h_loop_filter_chroma_mbaff_intra= FUNC(h264_h_loop_filter_chroma422_mbaff_intra, depth);\
|
||||
|
@@ -480,7 +480,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
|
||||
h->pred8x8l[TOP_DC_PRED ]= FUNCC(pred8x8l_top_dc , depth);\
|
||||
h->pred8x8l[DC_128_PRED ]= FUNCC(pred8x8l_128_dc , depth);\
|
||||
\
|
||||
if (chroma_format_idc <= 1) {\
|
||||
if (chroma_format_idc == 1) {\
|
||||
h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x8_vertical , depth);\
|
||||
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x8_horizontal , depth);\
|
||||
} else {\
|
||||
@@ -488,7 +488,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
|
||||
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\
|
||||
}\
|
||||
if (codec_id != AV_CODEC_ID_VP8) {\
|
||||
if (chroma_format_idc <= 1) {\
|
||||
if (chroma_format_idc == 1) {\
|
||||
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\
|
||||
} else {\
|
||||
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x16_plane , depth);\
|
||||
@@ -496,7 +496,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
|
||||
} else\
|
||||
h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\
|
||||
if(codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP8){\
|
||||
if (chroma_format_idc <= 1) {\
|
||||
if (chroma_format_idc == 1) {\
|
||||
h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\
|
||||
h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\
|
||||
h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x8_top_dc , depth);\
|
||||
@@ -522,7 +522,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
|
||||
h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\
|
||||
}\
|
||||
}\
|
||||
if (chroma_format_idc <= 1) {\
|
||||
if (chroma_format_idc == 1) {\
|
||||
h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x8_128_dc , depth);\
|
||||
} else {\
|
||||
h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x16_128_dc , depth);\
|
||||
@@ -556,7 +556,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
|
||||
h->pred4x4_add [ HOR_PRED ]= FUNCC(pred4x4_horizontal_add , depth);\
|
||||
h->pred8x8l_add [VERT_PRED ]= FUNCC(pred8x8l_vertical_add , depth);\
|
||||
h->pred8x8l_add [ HOR_PRED ]= FUNCC(pred8x8l_horizontal_add , depth);\
|
||||
if (chroma_format_idc <= 1) {\
|
||||
if (chroma_format_idc == 1) {\
|
||||
h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x8_vertical_add , depth);\
|
||||
h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x8_horizontal_add , depth);\
|
||||
} else {\
|
||||
|
@@ -118,13 +118,12 @@ static void generate_joint_tables(HYuvContext *s)
|
||||
for (i = y = 0; y < 256; y++) {
|
||||
int len0 = s->len[0][y];
|
||||
int limit = VLC_BITS - len0;
|
||||
if(limit <= 0 || !len0)
|
||||
if(limit <= 0)
|
||||
continue;
|
||||
for (u = 0; u < 256; u++) {
|
||||
int len1 = s->len[p][u];
|
||||
if (len1 > limit || !len1)
|
||||
if (len1 > limit)
|
||||
continue;
|
||||
av_assert0(i < (1 << VLC_BITS));
|
||||
len[i] = len0 + len1;
|
||||
bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
|
||||
symbols[i] = (y << 8) + u;
|
||||
@@ -147,19 +146,18 @@ static void generate_joint_tables(HYuvContext *s)
|
||||
for (i = 0, g = -16; g < 16; g++) {
|
||||
int len0 = s->len[p0][g & 255];
|
||||
int limit0 = VLC_BITS - len0;
|
||||
if (limit0 < 2 || !len0)
|
||||
if (limit0 < 2)
|
||||
continue;
|
||||
for (b = -16; b < 16; b++) {
|
||||
int len1 = s->len[p1][b & 255];
|
||||
int limit1 = limit0 - len1;
|
||||
if (limit1 < 1 || !len1)
|
||||
if (limit1 < 1)
|
||||
continue;
|
||||
code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
|
||||
for (r = -16; r < 16; r++) {
|
||||
int len2 = s->len[2][r & 255];
|
||||
if (len2 > limit1 || !len2)
|
||||
if (len2 > limit1)
|
||||
continue;
|
||||
av_assert0(i < (1 << VLC_BITS));
|
||||
len[i] = len0 + len1 + len2;
|
||||
bits[i] = (code << len2) + s->bits[2][r & 255];
|
||||
if (s->decorrelate) {
|
||||
@@ -184,7 +182,6 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
|
||||
{
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_get_bits(&gb, src, length * 8);
|
||||
|
||||
@@ -195,9 +192,8 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
|
||||
return -1;
|
||||
}
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
|
||||
s->bits[i], 4, 4, 0)) < 0)
|
||||
return ret;
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
|
||||
s->bits[i], 4, 4, 0);
|
||||
}
|
||||
|
||||
generate_joint_tables(s);
|
||||
@@ -209,7 +205,6 @@ static int read_old_huffman_tables(HYuvContext *s)
|
||||
{
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_get_bits(&gb, classic_shift_luma,
|
||||
classic_shift_luma_table_size * 8);
|
||||
@@ -233,9 +228,8 @@ static int read_old_huffman_tables(HYuvContext *s)
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
|
||||
s->bits[i], 4, 4, 0)) < 0)
|
||||
return ret;
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
|
||||
s->bits[i], 4, 4, 0);
|
||||
}
|
||||
|
||||
generate_joint_tables(s);
|
||||
|
@@ -341,7 +341,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB444;
|
||||
} else if (avctx->codec_tag != MKTAG('D','E','E','P')) {
|
||||
if (avctx->bits_per_coded_sample == 24) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_0BGR32;
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB0;
|
||||
} else if (avctx->bits_per_coded_sample == 32) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_BGR32;
|
||||
} else {
|
||||
|
@@ -451,10 +451,6 @@ static int bit_allocation(IMCContext *q, IMCChannel *chctx,
|
||||
iacc += chctx->bandWidthT[i];
|
||||
summa += chctx->bandWidthT[i] * chctx->flcoeffs4[i];
|
||||
}
|
||||
|
||||
if (!iacc)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
chctx->bandWidthT[BANDS - 1] = 0;
|
||||
summa = (summa * 0.5 - freebits) / iacc;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user