Compare commits
250 Commits
release/2.
...
n1.2.6
Author | SHA1 | Date | |
---|---|---|---|
![]() |
e63346f7e8 | ||
![]() |
ddcccababe | ||
![]() |
b580bae53a | ||
![]() |
9085cdd677 | ||
![]() |
ab31a9ee4a | ||
![]() |
a57d29a50c | ||
![]() |
11b14d0e63 | ||
![]() |
bb683ebdba | ||
![]() |
b3cc4bd18f | ||
![]() |
ca9d302519 | ||
![]() |
e925fd215f | ||
![]() |
5e01cd3b69 | ||
![]() |
2256b2a3c1 | ||
![]() |
89c917fcd9 | ||
![]() |
8a38deb789 | ||
![]() |
4fdcb1e1b7 | ||
![]() |
b902ba478c | ||
![]() |
f0fabcc6ed | ||
![]() |
bd29764c61 | ||
![]() |
d81ccf1fb2 | ||
![]() |
67d20495f5 | ||
![]() |
5a0a156e88 | ||
![]() |
884094deba | ||
![]() |
db20610c49 | ||
![]() |
15a736483e | ||
![]() |
c8b90c7cd5 | ||
![]() |
97978b7ae8 | ||
![]() |
3f2efac0fe | ||
![]() |
b9058f58bd | ||
![]() |
47faf347a0 | ||
![]() |
3d1e4b7ca5 | ||
![]() |
a2bea0df0e | ||
![]() |
483b2016bb | ||
![]() |
271f5d68b9 | ||
![]() |
dee327b0e7 | ||
![]() |
e438fd3be9 | ||
![]() |
3d380ffde9 | ||
![]() |
84b100396e | ||
![]() |
7855083443 | ||
![]() |
ca7f64e0ac | ||
![]() |
2b26f8c6bf | ||
![]() |
6e3697b985 | ||
![]() |
c93501687c | ||
![]() |
bb6f466794 | ||
![]() |
08a319549a | ||
![]() |
d1da1c8384 | ||
![]() |
8ded3738d1 | ||
![]() |
a2186a8054 | ||
![]() |
a92b73da99 | ||
![]() |
ab38b39059 | ||
![]() |
5dde8ba59e | ||
![]() |
b841869477 | ||
![]() |
3488e9e269 | ||
![]() |
5a8d78ab7f | ||
![]() |
dea7f1c62e | ||
![]() |
c7027ae738 | ||
![]() |
971b13752d | ||
![]() |
d0ed672484 | ||
![]() |
751e684aae | ||
![]() |
803ca5c349 | ||
![]() |
252002aec1 | ||
![]() |
d805a51713 | ||
![]() |
45dd7df83b | ||
![]() |
da82be0cc0 | ||
![]() |
b8b77aefe8 | ||
![]() |
30147f14d4 | ||
![]() |
d6a705d778 | ||
![]() |
e24b33cd68 | ||
![]() |
6f7fd2f589 | ||
![]() |
f974c54909 | ||
![]() |
71b3235cea | ||
![]() |
dcd1acce1a | ||
![]() |
5c502e5d41 | ||
![]() |
fa45feefad | ||
![]() |
9f3135b30b | ||
![]() |
7ba102d008 | ||
![]() |
3e65caf5bc | ||
![]() |
af7cbdf470 | ||
![]() |
414b377462 | ||
![]() |
86d4d4b011 | ||
![]() |
14f31df2cc | ||
![]() |
d04e78805a | ||
![]() |
25c67b2165 | ||
![]() |
fd8af75109 | ||
![]() |
93d720b040 | ||
![]() |
720e2d4143 | ||
![]() |
9195ef6f65 | ||
![]() |
833dce3818 | ||
![]() |
a8b6721bed | ||
![]() |
1fe734f4d3 | ||
![]() |
311e58e478 | ||
![]() |
970109deaf | ||
![]() |
eff0bf7def | ||
![]() |
6559bb893f | ||
![]() |
14e258d847 | ||
![]() |
ca47aec665 | ||
![]() |
95e26d33d7 | ||
![]() |
f32051cd73 | ||
![]() |
c3ee5b4c36 | ||
![]() |
c17fd9f9ce | ||
![]() |
0f3cdddf38 | ||
![]() |
a0d13f578b | ||
![]() |
91e19ab930 | ||
![]() |
bb4126e250 | ||
![]() |
9902eef0f5 | ||
![]() |
6db67ac2a4 | ||
![]() |
85eeab4d22 | ||
![]() |
4431ee1896 | ||
![]() |
d2feaf2ba6 | ||
![]() |
0f73cb454d | ||
![]() |
82cfb8c2db | ||
![]() |
0a55c882cb | ||
![]() |
c09acf9882 | ||
![]() |
a0779a2ee5 | ||
![]() |
b4f90013ab | ||
![]() |
3a6fbb0c8e | ||
![]() |
6a782e20d7 | ||
![]() |
8cea63c48a | ||
![]() |
8f87e75c6c | ||
![]() |
06a927a6b5 | ||
![]() |
3c523bdda8 | ||
![]() |
ece54c7085 | ||
![]() |
7eee79ea2f | ||
![]() |
458933fdb8 | ||
![]() |
95d0baa952 | ||
![]() |
32cff0d3f0 | ||
![]() |
ddce97c7b0 | ||
![]() |
26617b47fa | ||
![]() |
b3fb651a8a | ||
![]() |
2da5b69020 | ||
![]() |
b23642aa7e | ||
![]() |
eae4dfd4e8 | ||
![]() |
6fb2c03e1a | ||
![]() |
1d6b261a24 | ||
![]() |
02f1ae5406 | ||
![]() |
aaac34f5e6 | ||
![]() |
7858d261aa | ||
![]() |
f85c44264b | ||
![]() |
025e286757 | ||
![]() |
79ec45a455 | ||
![]() |
09653c5444 | ||
![]() |
c788713589 | ||
![]() |
f637fa5d33 | ||
![]() |
dc1881ee11 | ||
![]() |
24187a96ee | ||
![]() |
c87f3c3ab4 | ||
![]() |
8f07c09b20 | ||
![]() |
8e3581ab03 | ||
![]() |
5d3cd041fc | ||
![]() |
9865f19f15 | ||
![]() |
90ed770192 | ||
![]() |
f9fa321734 | ||
![]() |
a0e328d28b | ||
![]() |
848e5cfc0f | ||
![]() |
9d8795e598 | ||
![]() |
64d440a3b4 | ||
![]() |
42586c8754 | ||
![]() |
64d362fce7 | ||
![]() |
c55a09a8b6 | ||
![]() |
a94404457b | ||
![]() |
364495a351 | ||
![]() |
f9c872622e | ||
![]() |
9f2d73168f | ||
![]() |
930d035e72 | ||
![]() |
875649bfae | ||
![]() |
70127070dd | ||
![]() |
d83ab33715 | ||
![]() |
06190d75d0 | ||
![]() |
c9ea1f7fc5 | ||
![]() |
47de8ccf42 | ||
![]() |
72c1de649a | ||
![]() |
5e3e67f977 | ||
![]() |
221bbd002c | ||
![]() |
fffc9316da | ||
![]() |
9a87ba0933 | ||
![]() |
d8538a1002 | ||
![]() |
6cb33e0763 | ||
![]() |
2e75c45593 | ||
![]() |
ba4cb43f0b | ||
![]() |
dc73774792 | ||
![]() |
ce11d9490c | ||
![]() |
430ef8a716 | ||
![]() |
627772d988 | ||
![]() |
5ce57e0248 | ||
![]() |
5effd65312 | ||
![]() |
cb340ecd7d | ||
![]() |
b88bea1d6c | ||
![]() |
99036565ca | ||
![]() |
fcc6460dbf | ||
![]() |
1065d4197e | ||
![]() |
fbb1af39e4 | ||
![]() |
073bde2b1f | ||
![]() |
e4bb67bc50 | ||
![]() |
59549b5ab7 | ||
![]() |
c0445df5b3 | ||
![]() |
c77b3737b9 | ||
![]() |
04d6946600 | ||
![]() |
ae93d3405e | ||
![]() |
054779625f | ||
![]() |
e22dd7fbd0 | ||
![]() |
376a9f8e6e | ||
![]() |
9af68f8d1f | ||
![]() |
f2361593ca | ||
![]() |
96e6d4da37 | ||
![]() |
735deda2cf | ||
![]() |
5f64a7a625 | ||
![]() |
ffd28de388 | ||
![]() |
af589dd5e9 | ||
![]() |
fd60aeb55b | ||
![]() |
79a3f364dd | ||
![]() |
33d699a4e7 | ||
![]() |
f166a02b67 | ||
![]() |
c5948b472b | ||
![]() |
7ee5e97c46 | ||
![]() |
7ef2dbd239 | ||
![]() |
524d0d2cfc | ||
![]() |
91f04e7410 | ||
![]() |
9f7baf139f | ||
![]() |
ec89046fa1 | ||
![]() |
cc0dd86580 | ||
![]() |
0baa0a5a02 | ||
![]() |
039f6921c2 | ||
![]() |
7fa6db2545 | ||
![]() |
840931e766 | ||
![]() |
1ce3f736d2 | ||
![]() |
4d0090f90a | ||
![]() |
f8d5f3dff5 | ||
![]() |
298e03d102 | ||
![]() |
54f3902393 | ||
![]() |
3ca6253beb | ||
![]() |
84df0dc40b | ||
![]() |
a857811c75 | ||
![]() |
3d6219b6db | ||
![]() |
0a53c7016f | ||
![]() |
f0dc5c0419 | ||
![]() |
df8c36265a | ||
![]() |
c351d8781d | ||
![]() |
d553a522b9 | ||
![]() |
0b6d5f27c8 | ||
![]() |
9ecfd7daa3 | ||
![]() |
90f1aa38b6 | ||
![]() |
fc5c81ce0a | ||
![]() |
e820e3a259 | ||
![]() |
17911d0a96 | ||
![]() |
2ed7f5a670 | ||
![]() |
7c6c5767eb | ||
![]() |
0ec869527c | ||
![]() |
0b198e38c5 | ||
![]() |
1ea3248290 | ||
![]() |
bdd2db60c2 |
@@ -1,7 +1,7 @@
|
|||||||
Entries are sorted chronologically from oldest to youngest within each release,
|
Entries are sorted chronologically from oldest to youngest within each release,
|
||||||
releases are sorted from youngest to oldest.
|
releases are sorted from youngest to oldest.
|
||||||
|
|
||||||
version <next>:
|
version 1.2:
|
||||||
|
|
||||||
- VDPAU hardware acceleration through normal hwaccel
|
- VDPAU hardware acceleration through normal hwaccel
|
||||||
- SRTP support
|
- SRTP support
|
||||||
|
@@ -445,9 +445,9 @@ x86 Michael Niedermayer
|
|||||||
Releases
|
Releases
|
||||||
========
|
========
|
||||||
|
|
||||||
|
1.2 Michael Niedermayer
|
||||||
1.1 Michael Niedermayer
|
1.1 Michael Niedermayer
|
||||||
1.0 Michael Niedermayer
|
1.0 Michael Niedermayer
|
||||||
0.11 Michael Niedermayer
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@@ -65,7 +65,7 @@ struct SwsContext *sws_opts;
|
|||||||
AVDictionary *swr_opts;
|
AVDictionary *swr_opts;
|
||||||
AVDictionary *format_opts, *codec_opts, *resample_opts;
|
AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||||
|
|
||||||
const int this_year = 2013;
|
const int this_year = 2014;
|
||||||
|
|
||||||
static FILE *report_file;
|
static FILE *report_file;
|
||||||
|
|
||||||
|
@@ -190,13 +190,13 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
|||||||
void show_help_children(const AVClass *class, int flags);
|
void show_help_children(const AVClass *class, int flags);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Per-avtool specific help handler. Implemented in each
|
* Per-fftool specific help handler. Implemented in each
|
||||||
* avtool, called by show_help().
|
* fftool, called by show_help().
|
||||||
*/
|
*/
|
||||||
void show_help_default(const char *opt, const char *arg);
|
void show_help_default(const char *opt, const char *arg);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generic -h handler common to all avtools.
|
* Generic -h handler common to all fftools.
|
||||||
*/
|
*/
|
||||||
int show_help(void *optctx, const char *opt, const char *arg);
|
int show_help(void *optctx, const char *opt, const char *arg);
|
||||||
|
|
||||||
|
53
configure
vendored
53
configure
vendored
@@ -140,10 +140,10 @@ Component options:
|
|||||||
--disable-fft disable FFT code
|
--disable-fft disable FFT code
|
||||||
|
|
||||||
Hardware accelerators:
|
Hardware accelerators:
|
||||||
--enable-dxva2 enable DXVA2 code
|
--disable-dxva2 disable DXVA2 code [autodetect]
|
||||||
--enable-vaapi enable VAAPI code
|
--disable-vaapi disable VAAPI code [autodetect]
|
||||||
--enable-vda enable VDA code
|
--enable-vda enable VDA code
|
||||||
--enable-vdpau enable VDPAU code
|
--disable-vdpau disable VDPAU code [autodetect]
|
||||||
|
|
||||||
Individual component options:
|
Individual component options:
|
||||||
--disable-everything disable all components listed below
|
--disable-everything disable all components listed below
|
||||||
@@ -184,11 +184,11 @@ Individual component options:
|
|||||||
|
|
||||||
External library support:
|
External library support:
|
||||||
--enable-avisynth enable reading of AVISynth script files [no]
|
--enable-avisynth enable reading of AVISynth script files [no]
|
||||||
--enable-bzlib enable bzlib [autodetect]
|
--disable-bzlib disable bzlib [autodetect]
|
||||||
--enable-fontconfig enable fontconfig
|
--enable-fontconfig enable fontconfig
|
||||||
--enable-frei0r enable frei0r video filtering
|
--enable-frei0r enable frei0r video filtering
|
||||||
--enable-gnutls enable gnutls [no]
|
--enable-gnutls enable gnutls [no]
|
||||||
--enable-iconv enable iconv [no]
|
--disable-iconv disable iconv [autodetect]
|
||||||
--enable-libaacplus enable AAC+ encoding via libaacplus [no]
|
--enable-libaacplus enable AAC+ encoding via libaacplus [no]
|
||||||
--enable-libass enable libass subtitles rendering [no]
|
--enable-libass enable libass subtitles rendering [no]
|
||||||
--enable-libbluray enable BluRay reading using libbluray [no]
|
--enable-libbluray enable BluRay reading using libbluray [no]
|
||||||
@@ -235,7 +235,7 @@ External library support:
|
|||||||
--enable-openal enable OpenAL 1.1 capture support [no]
|
--enable-openal enable OpenAL 1.1 capture support [no]
|
||||||
--enable-openssl enable openssl [no]
|
--enable-openssl enable openssl [no]
|
||||||
--enable-x11grab enable X11 grabbing [no]
|
--enable-x11grab enable X11 grabbing [no]
|
||||||
--enable-zlib enable zlib [autodetect]
|
--disable-zlib disable zlib [autodetect]
|
||||||
|
|
||||||
Advanced options (experts only):
|
Advanced options (experts only):
|
||||||
--cross-prefix=PREFIX use PREFIX for compilation tools [$cross_prefix]
|
--cross-prefix=PREFIX use PREFIX for compilation tools [$cross_prefix]
|
||||||
@@ -1075,6 +1075,26 @@ require_pkg_config(){
|
|||||||
add_extralibs $(get_safe ${pkg}_libs)
|
add_extralibs $(get_safe ${pkg}_libs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
require_libfreetype(){
|
||||||
|
log require_libfreetype "$@"
|
||||||
|
pkg="freetype2"
|
||||||
|
check_cmd $pkg_config --exists --print-errors $pkg \
|
||||||
|
|| die "ERROR: $pkg not found"
|
||||||
|
pkg_cflags=$($pkg_config --cflags $pkg)
|
||||||
|
pkg_libs=$($pkg_config --libs $pkg)
|
||||||
|
{
|
||||||
|
echo "#include <ft2build.h>"
|
||||||
|
echo "#include FT_FREETYPE_H"
|
||||||
|
echo "long check_func(void) { return (long) FT_Init_FreeType; }"
|
||||||
|
echo "int main(void) { return 0; }"
|
||||||
|
} | check_ld "cc" $pkg_cflags $pkg_libs \
|
||||||
|
&& set_safe ${pkg}_cflags $pkg_cflags \
|
||||||
|
&& set_safe ${pkg}_libs $pkg_libs \
|
||||||
|
|| die "ERROR: $pkg not found"
|
||||||
|
add_cflags $(get_safe ${pkg}_cflags)
|
||||||
|
add_extralibs $(get_safe ${pkg}_libs)
|
||||||
|
}
|
||||||
|
|
||||||
hostcc_o(){
|
hostcc_o(){
|
||||||
eval printf '%s\\n' $HOSTCC_O
|
eval printf '%s\\n' $HOSTCC_O
|
||||||
}
|
}
|
||||||
@@ -2169,6 +2189,9 @@ enable safe_bitstream_reader
|
|||||||
enable static
|
enable static
|
||||||
enable swscale_alpha
|
enable swscale_alpha
|
||||||
|
|
||||||
|
# Enable hwaccels by default.
|
||||||
|
enable dxva2 vaapi vdpau
|
||||||
|
|
||||||
# build settings
|
# build settings
|
||||||
SHFLAGS='-shared -Wl,-soname,$$(@F)'
|
SHFLAGS='-shared -Wl,-soname,$$(@F)'
|
||||||
FFSERVERLDFLAGS=-Wl,-E
|
FFSERVERLDFLAGS=-Wl,-E
|
||||||
@@ -2633,7 +2656,9 @@ probe_cc(){
|
|||||||
unset _depflags _DEPCMD _DEPFLAGS
|
unset _depflags _DEPCMD _DEPFLAGS
|
||||||
_flags_filter=echo
|
_flags_filter=echo
|
||||||
|
|
||||||
if $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
if $_cc --version 2>&1 | grep -q '^GNU assembler'; then
|
||||||
|
true # no-op to avoid reading stdin in following checks
|
||||||
|
elif $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
|
||||||
_type=llvm_gcc
|
_type=llvm_gcc
|
||||||
gcc_extra_ver=$(expr "$($_cc --version | head -n1)" : '.*\((.*)\)')
|
gcc_extra_ver=$(expr "$($_cc --version | head -n1)" : '.*\((.*)\)')
|
||||||
_ident="llvm-gcc $($_cc -dumpversion) $gcc_extra_ver"
|
_ident="llvm-gcc $($_cc -dumpversion) $gcc_extra_ver"
|
||||||
@@ -3873,7 +3898,6 @@ fi
|
|||||||
|
|
||||||
check_lib math.h sin -lm && LIBM="-lm"
|
check_lib math.h sin -lm && LIBM="-lm"
|
||||||
disabled crystalhd || check_lib libcrystalhd/libcrystalhd_if.h DtsCrystalHDVersion -lcrystalhd || disable crystalhd
|
disabled crystalhd || check_lib libcrystalhd/libcrystalhd_if.h DtsCrystalHDVersion -lcrystalhd || disable crystalhd
|
||||||
enabled vaapi && require vaapi va/va.h vaInitialize -lva
|
|
||||||
|
|
||||||
atan2f_args=2
|
atan2f_args=2
|
||||||
ldexpf_args=2
|
ldexpf_args=2
|
||||||
@@ -3900,7 +3924,7 @@ enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaa
|
|||||||
enabled libfdk_aac && require libfdk_aac fdk-aac/aacenc_lib.h aacEncOpen -lfdk-aac
|
enabled libfdk_aac && require libfdk_aac fdk-aac/aacenc_lib.h aacEncOpen -lfdk-aac
|
||||||
flite_libs="-lflite_cmu_time_awb -lflite_cmu_us_awb -lflite_cmu_us_kal -lflite_cmu_us_kal16 -lflite_cmu_us_rms -lflite_cmu_us_slt -lflite_usenglish -lflite_cmulex -lflite"
|
flite_libs="-lflite_cmu_time_awb -lflite_cmu_us_awb -lflite_cmu_us_kal -lflite_cmu_us_kal16 -lflite_cmu_us_rms -lflite_cmu_us_slt -lflite_usenglish -lflite_cmulex -lflite"
|
||||||
enabled libflite && require2 libflite "flite/flite.h" flite_init $flite_libs
|
enabled libflite && require2 libflite "flite/flite.h" flite_init $flite_libs
|
||||||
enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType
|
enabled libfreetype && require_libfreetype
|
||||||
enabled libgsm && { for gsm_hdr in "gsm.h" "gsm/gsm.h"; do
|
enabled libgsm && { for gsm_hdr in "gsm.h" "gsm/gsm.h"; do
|
||||||
check_lib "${gsm_hdr}" gsm_create -lgsm && break;
|
check_lib "${gsm_hdr}" gsm_create -lgsm && break;
|
||||||
done || die "ERROR: libgsm not found"; }
|
done || die "ERROR: libgsm not found"; }
|
||||||
@@ -4041,19 +4065,16 @@ require X11 X11/Xlib.h XOpenDisplay -lX11 &&
|
|||||||
require Xext X11/extensions/XShm.h XShmCreateImage -lXext &&
|
require Xext X11/extensions/XShm.h XShmCreateImage -lXext &&
|
||||||
require Xfixes X11/extensions/Xfixes.h XFixesGetCursorImage -lXfixes
|
require Xfixes X11/extensions/Xfixes.h XFixesGetCursorImage -lXfixes
|
||||||
|
|
||||||
if ! disabled vaapi; then
|
enabled vaapi &&
|
||||||
check_lib va/va.h vaInitialize -lva && {
|
check_lib va/va.h vaInitialize -lva ||
|
||||||
check_cpp_condition va/va_version.h "VA_CHECK_VERSION(0,32,0)" ||
|
disable vaapi
|
||||||
warn "Please upgrade to VA-API >= 0.32 if you would like full VA-API support.";
|
|
||||||
} || disable vaapi
|
|
||||||
fi
|
|
||||||
|
|
||||||
enabled vdpau &&
|
enabled vdpau &&
|
||||||
check_cpp_condition vdpau/vdpau.h "defined VDP_DECODER_PROFILE_MPEG4_PART2_ASP" ||
|
check_cpp_condition vdpau/vdpau.h "defined VDP_DECODER_PROFILE_MPEG4_PART2_ASP" ||
|
||||||
disable vdpau
|
disable vdpau
|
||||||
|
|
||||||
# Funny iconv installations are not unusual, so check it after all flags have been set
|
# Funny iconv installations are not unusual, so check it after all flags have been set
|
||||||
enabled iconv && { check_func_headers iconv.h iconv || check_lib2 iconv.h iconv -liconv || die "ERROR: iconv not found"; }
|
disabled iconv || check_func_headers iconv.h iconv || check_lib2 iconv.h iconv -liconv || disable iconv
|
||||||
|
|
||||||
enabled debug && add_cflags -g"$debuglevel" && add_asflags -g"$debuglevel"
|
enabled debug && add_cflags -g"$debuglevel" && add_asflags -g"$debuglevel"
|
||||||
enabled coverage && add_cflags "-fprofile-arcs -ftest-coverage" && add_ldflags "-fprofile-arcs -ftest-coverage"
|
enabled coverage && add_cflags "-fprofile-arcs -ftest-coverage" && add_ldflags "-fprofile-arcs -ftest-coverage"
|
||||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# This could be handy for archiving the generated documentation or
|
# This could be handy for archiving the generated documentation or
|
||||||
# if some version control system is used.
|
# if some version control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER =
|
PROJECT_NUMBER = 1.2.6
|
||||||
|
|
||||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||||
# in the documentation. The maximum height of the logo should not exceed 55
|
# in the documentation. The maximum height of the logo should not exceed 55
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
Release Notes
|
Release Notes
|
||||||
=============
|
=============
|
||||||
|
|
||||||
* 0.10 "Freedom" January, 2012
|
* 1.2 "Magic" March, 2013
|
||||||
|
|
||||||
|
|
||||||
General notes
|
General notes
|
||||||
@@ -14,9 +14,3 @@ accepted. If you are experiencing issues with any formally released version of
|
|||||||
FFmpeg, please try git master to check if the issue still exists. If it does,
|
FFmpeg, please try git master to check if the issue still exists. If it does,
|
||||||
make your report against the development code following the usual bug reporting
|
make your report against the development code following the usual bug reporting
|
||||||
guidelines.
|
guidelines.
|
||||||
|
|
||||||
Of big interest to our Windows users, FFmpeg now supports building with the MSVC
|
|
||||||
compiler. Since MSVC does not support C99 features used extensively by FFmpeg,
|
|
||||||
this has been accomplished using a converter that turns C99 code to C89. See the
|
|
||||||
platform-specific documentation for more detailed documentation on building
|
|
||||||
FFmpeg with MSVC.
|
|
||||||
|
@@ -60,6 +60,78 @@ This decoder generates wave patterns according to predefined sequences. Its
|
|||||||
use is purely internal and the format of the data it accepts is not publicly
|
use is purely internal and the format of the data it accepts is not publicly
|
||||||
documented.
|
documented.
|
||||||
|
|
||||||
|
@section libcelt
|
||||||
|
|
||||||
|
libcelt decoder wrapper
|
||||||
|
|
||||||
|
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
|
||||||
|
Requires the presence of the libcelt headers and library during configuration.
|
||||||
|
You need to explicitly configure the build with @code{--enable-libcelt}.
|
||||||
|
|
||||||
|
@section libgsm
|
||||||
|
|
||||||
|
libgsm decoder wrapper
|
||||||
|
|
||||||
|
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
|
||||||
|
the presence of the libgsm headers and library during configuration. You need
|
||||||
|
to explicitly configure the build with @code{--enable-libgsm}.
|
||||||
|
|
||||||
|
This decoder supports both the ordinary GSM and the Microsoft variant.
|
||||||
|
|
||||||
|
@section libilbc
|
||||||
|
|
||||||
|
libilbc decoder wrapper
|
||||||
|
|
||||||
|
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
|
||||||
|
audio codec. Requires the presence of the libilbc headers and library during
|
||||||
|
configuration. You need to explicitly configure the build with
|
||||||
|
@code{--enable-libilbc}.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
The following option is supported by the libilbc wrapper.
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item enhance
|
||||||
|
|
||||||
|
Enable the enhancement of the decoded audio when set to 1. The default
|
||||||
|
value is 0 (disabled).
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@section libopencore-amrnb
|
||||||
|
|
||||||
|
libopencore-amrnb decoder wrapper
|
||||||
|
|
||||||
|
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
|
||||||
|
Narrowband audio codec. Using it requires the presence of the
|
||||||
|
libopencore-amrnb headers and library during configuration. You need to
|
||||||
|
explicitly configure the build with @code{--enable-libopencore-amrnb}.
|
||||||
|
|
||||||
|
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
|
||||||
|
without this library.
|
||||||
|
|
||||||
|
@section libopencore-amrwb
|
||||||
|
|
||||||
|
libopencore-amrwb decoder wrapper.
|
||||||
|
|
||||||
|
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
|
||||||
|
Wideband audio codec. Using it requires the presence of the
|
||||||
|
libopencore-amrwb headers and library during configuration. You need to
|
||||||
|
explicitly configure the build with @code{--enable-libopencore-amrwb}.
|
||||||
|
|
||||||
|
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
|
||||||
|
without this library.
|
||||||
|
|
||||||
|
@section libopus
|
||||||
|
|
||||||
|
libopus decoder wrapper.
|
||||||
|
|
||||||
|
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
|
||||||
|
Requires the presence of the libopus headers and library during
|
||||||
|
configuration. You need to explicitly configure the build with
|
||||||
|
@code{--enable-libopus}.
|
||||||
|
|
||||||
@c man end AUDIO DECODERS
|
@c man end AUDIO DECODERS
|
||||||
|
|
||||||
@chapter Subtitles Decoders
|
@chapter Subtitles Decoders
|
||||||
|
@@ -25,6 +25,95 @@ enabled encoders.
|
|||||||
A description of some of the currently available audio encoders
|
A description of some of the currently available audio encoders
|
||||||
follows.
|
follows.
|
||||||
|
|
||||||
|
@anchor{aacenc}
|
||||||
|
@section aac
|
||||||
|
|
||||||
|
Advanced Audio Coding (AAC) encoder.
|
||||||
|
|
||||||
|
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
|
||||||
|
low complexity (AAC-LC) profile is supported. To use this encoder, you must set
|
||||||
|
@option{strict} option to @samp{experimental} or lower.
|
||||||
|
|
||||||
|
As this encoder is experimental, unexpected behavior may exist from time to
|
||||||
|
time. For a more stable AAC encoder, see @ref{libvo-aacenc}. However, be warned
|
||||||
|
that it has a worse quality reported by some users.
|
||||||
|
|
||||||
|
@c Comment this out until somebody writes the respective documentation.
|
||||||
|
@c See also @ref{libfaac}, @ref{libaacplus}, and @ref{libfdk-aac-enc}.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item b
|
||||||
|
Set bit rate in bits/s. Setting this automatically activates constant bit rate
|
||||||
|
(CBR) mode.
|
||||||
|
|
||||||
|
@item q
|
||||||
|
Set quality for variable bit rate (VBR) mode. This option is valid only using
|
||||||
|
the @command{ffmpeg} command-line tool. For library interface users, use
|
||||||
|
@option{global_quality}.
|
||||||
|
|
||||||
|
@item stereo_mode
|
||||||
|
Set stereo encoding mode. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item auto
|
||||||
|
Automatically selected by the encoder.
|
||||||
|
|
||||||
|
@item ms_off
|
||||||
|
Disable middle/side encoding. This is the default.
|
||||||
|
|
||||||
|
@item ms_force
|
||||||
|
Force middle/side encoding.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item aac_coder
|
||||||
|
Set AAC encoder coding method. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item 0
|
||||||
|
FAAC-inspired method.
|
||||||
|
|
||||||
|
This method is a simplified reimplementation of the method used in FAAC, which
|
||||||
|
sets thresholds proportional to the band energies, and then decreases all the
|
||||||
|
thresholds with quantizer steps to find the appropriate quantization with
|
||||||
|
distortion below threshold band by band.
|
||||||
|
|
||||||
|
The quality of this method is comparable to the two loop searching method
|
||||||
|
descibed below, but somewhat a little better and slower.
|
||||||
|
|
||||||
|
@item 1
|
||||||
|
Average noise to mask ratio (ANMR) trellis-based solution.
|
||||||
|
|
||||||
|
This has a theoretic best quality out of all the coding methods, but at the
|
||||||
|
cost of the slowest speed.
|
||||||
|
|
||||||
|
@item 2
|
||||||
|
Two loop searching (TLS) method.
|
||||||
|
|
||||||
|
This method first sets quantizers depending on band thresholds and then tries
|
||||||
|
to find an optimal combination by adding or subtracting a specific value from
|
||||||
|
all quantizers and adjusting some individual quantizer a little.
|
||||||
|
|
||||||
|
This method produces similar quality with the FAAC method and is the default.
|
||||||
|
|
||||||
|
@item 3
|
||||||
|
Constant quantizer method.
|
||||||
|
|
||||||
|
This method sets a constant quantizer for all bands. This is the fastest of all
|
||||||
|
the methods, yet produces the worst quality.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@subsection Tips and Tricks
|
||||||
|
|
||||||
|
According to some reports
|
||||||
|
(e.g. @url{http://d.hatena.ne.jp/kamedo2/20120729/1343545890}), setting the
|
||||||
|
@option{cutoff} option to 15000 Hz greatly improves the quality of the output
|
||||||
|
quality. As a result, we encourage you to do the same.
|
||||||
|
|
||||||
@section ac3 and ac3_fixed
|
@section ac3 and ac3_fixed
|
||||||
|
|
||||||
AC-3 audio encoders.
|
AC-3 audio encoders.
|
||||||
@@ -412,6 +501,279 @@ Selected by Encoder (default)
|
|||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@section libmp3lame
|
||||||
|
|
||||||
|
LAME (Lame Ain't an MP3 Encoder) MP3 encoder wrapper
|
||||||
|
|
||||||
|
Requires the presence of the libmp3lame headers and library during
|
||||||
|
configuration. You need to explicitly configure the build with
|
||||||
|
@code{--enable-libmp3lame}.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
The following options are supported by the libmp3lame wrapper. The
|
||||||
|
@command{lame}-equivalent of the options are listed in parentheses.
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item b (@emph{-b})
|
||||||
|
Set bitrate expressed in bits/s for CBR. LAME @code{bitrate} is
|
||||||
|
expressed in kilobits/s.
|
||||||
|
|
||||||
|
@item q (@emph{-V})
|
||||||
|
Set constant quality setting for VBR. This option is valid only
|
||||||
|
using the @command{ffmpeg} command-line tool. For library interface
|
||||||
|
users, use @option{global_quality}.
|
||||||
|
|
||||||
|
@item compression_level (@emph{-q})
|
||||||
|
Set algorithm quality. Valid arguments are integers in the 0-9 range,
|
||||||
|
with 0 meaning highest quality but slowest, and 9 meaning fastest
|
||||||
|
while producing the worst quality.
|
||||||
|
|
||||||
|
@item reservoir
|
||||||
|
Enable use of bit reservoir when set to 1. Default value is 1. LAME
|
||||||
|
has this enabled by default, but can be overriden by use
|
||||||
|
@option{--nores} option.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@section libopencore-amrnb
|
||||||
|
|
||||||
|
OpenCORE Adaptive Multi-Rate Narrowband encoder.
|
||||||
|
|
||||||
|
Requires the presence of the libopencore-amrnb headers and library during
|
||||||
|
configuration. You need to explicitly configure the build with
|
||||||
|
@code{--enable-libopencore-amrnb --enable-version3}.
|
||||||
|
|
||||||
|
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
|
||||||
|
but you can override it by setting @option{strict} to @samp{unofficial} or
|
||||||
|
lower.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
|
||||||
|
@item b
|
||||||
|
Set bitrate in bits per second. Only the following bitrates are supported,
|
||||||
|
otherwise libavcodec will round to the nearest valid bitrate.
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item 4750
|
||||||
|
@item 5150
|
||||||
|
@item 5900
|
||||||
|
@item 6700
|
||||||
|
@item 7400
|
||||||
|
@item 7950
|
||||||
|
@item 10200
|
||||||
|
@item 12200
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item dtx
|
||||||
|
Allow discontinuous transmission (generate comfort noise) when set to 1. The
|
||||||
|
default value is 0 (disabled).
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@section libtwolame
|
||||||
|
|
||||||
|
TwoLAME MP2 encoder wrapper
|
||||||
|
|
||||||
|
Requires the presence of the libtwolame headers and library during
|
||||||
|
configuration. You need to explicitly configure the build with
|
||||||
|
@code{--enable-libtwolame}.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
The following options are supported by the libtwolame wrapper. The
|
||||||
|
@command{twolame}-equivalent options follow the FFmpeg ones and are in
|
||||||
|
parentheses.
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item b (@emph{-b})
|
||||||
|
Set bitrate expressed in bits/s for CBR. @command{twolame} @option{b}
|
||||||
|
option is expressed in kilobits/s. Default value is 128k.
|
||||||
|
|
||||||
|
@item q (@emph{-V})
|
||||||
|
Set quality for experimental VBR support. Maximum value range is
|
||||||
|
from -50 to 50, useful range is from -10 to 10. The higher the
|
||||||
|
value, the better the quality. This option is valid only using the
|
||||||
|
@command{ffmpeg} command-line tool. For library interface users,
|
||||||
|
use @option{global_quality}.
|
||||||
|
|
||||||
|
@item mode (@emph{--mode})
|
||||||
|
Set the mode of the resulting audio. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item auto
|
||||||
|
Choose mode automatically based on the input. This is the default.
|
||||||
|
@item stereo
|
||||||
|
Stereo
|
||||||
|
@item joint_stereo
|
||||||
|
Joint stereo
|
||||||
|
@item dual_channel
|
||||||
|
Dual channel
|
||||||
|
@item mono
|
||||||
|
Mono
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item psymodel (@emph{--psyc-mode})
|
||||||
|
Set psychoacoustic model to use in encoding. The argument must be
|
||||||
|
an integer between -1 and 4, inclusive. The higher the value, the
|
||||||
|
better the quality. The default value is 3.
|
||||||
|
|
||||||
|
@item energy_levels (@emph{--energy})
|
||||||
|
Enable energy levels extensions when set to 1. The default value is
|
||||||
|
0 (disabled).
|
||||||
|
|
||||||
|
@item error_protection (@emph{--protect})
|
||||||
|
Enable CRC error protection when set to 1. The default value is 0
|
||||||
|
(disabled).
|
||||||
|
|
||||||
|
@item copyright (@emph{--copyright})
|
||||||
|
Set MPEG audio copyright flag when set to 1. The default value is 0
|
||||||
|
(disabled).
|
||||||
|
|
||||||
|
@item original (@emph{--original})
|
||||||
|
Set MPEG audio original flag when set to 1. The default value is 0
|
||||||
|
(disabled).
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@anchor{libvo-aacenc}
|
||||||
|
@section libvo-aacenc
|
||||||
|
|
||||||
|
VisualOn AAC encoder
|
||||||
|
|
||||||
|
Requires the presence of the libvo-aacenc headers and library during
|
||||||
|
configuration. You need to explicitly configure the build with
|
||||||
|
@code{--enable-libvo-aacenc --enable-version3}.
|
||||||
|
|
||||||
|
This encoder is considered to be worse than the
|
||||||
|
@ref{aacenc,,native experimental FFmpeg AAC encoder}, according to
|
||||||
|
multiple sources.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
|
||||||
|
channels. It is also CBR-only.
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
|
||||||
|
@item b
|
||||||
|
Set bit rate in bits/s.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@section libvo-amrwbenc
|
||||||
|
|
||||||
|
VisualOn Adaptive Multi-Rate Wideband encoder
|
||||||
|
|
||||||
|
Requires the presence of the libvo-amrwbenc headers and library during
|
||||||
|
configuration. You need to explicitly configure the build with
|
||||||
|
@code{--enable-libvo-amrwbenc --enable-version3}.
|
||||||
|
|
||||||
|
This is a mono-only encoder. Officially it only supports 16000Hz sample
|
||||||
|
rate, but you can override it by setting @option{strict} to
|
||||||
|
@samp{unofficial} or lower.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
|
||||||
|
@item b
|
||||||
|
Set bitrate in bits/s. Only the following bitrates are supported, otherwise
|
||||||
|
libavcodec will round to the nearest valid bitrate.
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item 6600
|
||||||
|
@item 8850
|
||||||
|
@item 12650
|
||||||
|
@item 14250
|
||||||
|
@item 15850
|
||||||
|
@item 18250
|
||||||
|
@item 19850
|
||||||
|
@item 23050
|
||||||
|
@item 23850
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item dtx
|
||||||
|
Allow discontinuous transmission (generate comfort noise) when set to 1. The
|
||||||
|
default value is 0 (disabled).
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@section libopus
|
||||||
|
|
||||||
|
libopus Opus Interactive Audio Codec encoder wrapper.
|
||||||
|
|
||||||
|
Requires the presence of the libopus headers and library during
|
||||||
|
configuration. You need to explicitly configure the build with
|
||||||
|
@code{--enable-libopus}.
|
||||||
|
|
||||||
|
@subsection Option Mapping
|
||||||
|
|
||||||
|
Most libopus options are modeled after the @command{opusenc} utility from
|
||||||
|
opus-tools. The following is an option mapping chart describing options
|
||||||
|
supported by the libopus wrapper, and their @command{opusenc}-equivalent
|
||||||
|
in parentheses.
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
|
||||||
|
@item b (@emph{bitrate})
|
||||||
|
Set the bit rate in bits/s. FFmpeg's @option{b} option is
|
||||||
|
expressed in bits/s, while @command{opusenc}'s @option{bitrate} in
|
||||||
|
kilobits/s.
|
||||||
|
|
||||||
|
@item vbr (@emph{vbr}, @emph{hard-cbr}, and @emph{cvbr})
|
||||||
|
Set VBR mode. The FFmpeg @option{vbr} option has the following
|
||||||
|
valid arguments, with the their @command{opusenc} equivalent options
|
||||||
|
in parentheses:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item off (@emph{hard-cbr})
|
||||||
|
Use constant bit rate encoding.
|
||||||
|
|
||||||
|
@item on (@emph{vbr})
|
||||||
|
Use variable bit rate encoding (the default).
|
||||||
|
|
||||||
|
@item constrained (@emph{cvbr})
|
||||||
|
Use constrained variable bit rate encoding.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item compression_level (@emph{comp})
|
||||||
|
Set encoding algorithm complexity. Valid options are integers in
|
||||||
|
the 0-10 range. 0 gives the fastest encodes but lower quality, while 10
|
||||||
|
gives the highest quality but slowest encoding. The default is 10.
|
||||||
|
|
||||||
|
@item frame_duration (@emph{framesize})
|
||||||
|
Set maximum frame size, or duration of a frame in milliseconds. The
|
||||||
|
argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
|
||||||
|
frame sizes achieve lower latency but less quality at a given bitrate.
|
||||||
|
Sizes greater than 20ms are only interesting at fairly low bitrates.
|
||||||
|
The default is 20ms.
|
||||||
|
|
||||||
|
@item packet_loss (@emph{expect-loss})
|
||||||
|
Set expected packet loss percentage. The default is 0.
|
||||||
|
|
||||||
|
@item application (N.A.)
|
||||||
|
Set intended application type. Valid options are listed below:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item voip
|
||||||
|
Favor improved speech intelligibility.
|
||||||
|
@item audio
|
||||||
|
Favor faithfulness to the input (the default).
|
||||||
|
@item lowdelay
|
||||||
|
Restrict to only the lowest delay modes.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item cutoff (N.A.)
|
||||||
|
Set cutoff bandwidth in Hz. The argument must be exactly one of the
|
||||||
|
following: 4000, 6000, 8000, 12000, or 20000, corresponding to
|
||||||
|
narrowband, mediumband, wideband, super wideband, and fullband
|
||||||
|
respectively. The default is 0 (cutoff disabled).
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
@c man end AUDIO ENCODERS
|
@c man end AUDIO ENCODERS
|
||||||
|
|
||||||
@chapter Video Encoders
|
@chapter Video Encoders
|
||||||
@@ -583,178 +945,318 @@ For more information about libvpx see:
|
|||||||
|
|
||||||
x264 H.264/MPEG-4 AVC encoder wrapper
|
x264 H.264/MPEG-4 AVC encoder wrapper
|
||||||
|
|
||||||
Requires the presence of the libx264 headers and library during
|
This encoder requires the presence of the libx264 headers and library
|
||||||
configuration. You need to explicitly configure the build with
|
during configuration. You need to explicitly configure the build with
|
||||||
@code{--enable-libx264}.
|
@code{--enable-libx264}.
|
||||||
|
|
||||||
x264 supports an impressive number of features, including 8x8 and 4x4 adaptive
|
libx264 supports an impressive number of features, including 8x8 and
|
||||||
spatial transform, adaptive B-frame placement, CAVLC/CABAC entropy coding,
|
4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
|
||||||
interlacing (MBAFF), lossless mode, psy optimizations for detail retention
|
entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
|
||||||
(adaptive quantization, psy-RD, psy-trellis).
|
for detail retention (adaptive quantization, psy-RD, psy-trellis).
|
||||||
|
|
||||||
The FFmpeg wrapper provides a mapping for most of them using global options
|
Many libx264 encoder options are mapped to FFmpeg global codec
|
||||||
that match those of the encoders and provides private options for the unique
|
options, while unique encoder options are provided through private
|
||||||
encoder options. Additionally an expert override is provided to directly pass
|
options. Additionally the @option{x264opts} and @option{x264-params}
|
||||||
a list of key=value tuples as accepted by x264_param_parse.
|
private options allows to pass a list of key=value tuples as accepted
|
||||||
|
by the libx264 @code{x264_param_parse} function.
|
||||||
|
|
||||||
@subsection Option Mapping
|
The x264 project website is at
|
||||||
|
@url{http://www.videolan.org/developers/x264.html}.
|
||||||
|
|
||||||
The following options are supported by the x264 wrapper, the x264-equivalent
|
@subsection Options
|
||||||
options follow the FFmpeg ones.
|
|
||||||
|
|
||||||
@multitable @columnfractions .2 .2
|
The following options are supported by the libx264 wrapper. The
|
||||||
@item b @tab bitrate
|
@command{x264}-equivalent options or values are listed in parentheses
|
||||||
FFmpeg @code{b} option is expressed in bits/s, x264 @code{bitrate} in kilobits/s.
|
for easy migration.
|
||||||
@item bf @tab bframes
|
|
||||||
Maximum number of B-frames.
|
To reduce the duplication of documentation, only the private options
|
||||||
@item g @tab keyint
|
and some others requiring special attention are documented here. For
|
||||||
Maximum GOP size.
|
the documentation of the undocumented generic options, see
|
||||||
@item qmin @tab qpmin
|
@ref{codec-options,,the Codec Options chapter}.
|
||||||
@item qmax @tab qpmax
|
|
||||||
@item qdiff @tab qpstep
|
To get a more accurate and extensive documentation of the libx264
|
||||||
@item qblur @tab qblur
|
options, invoke the command @command{x264 --full-help} or consult
|
||||||
@item qcomp @tab qcomp
|
the libx264 documentation.
|
||||||
@item refs @tab ref
|
|
||||||
@item sc_threshold @tab scenecut
|
|
||||||
@item trellis @tab trellis
|
|
||||||
@item nr @tab nr
|
|
||||||
Noise reduction.
|
|
||||||
@item me_range @tab merange
|
|
||||||
@item me_method @tab me
|
|
||||||
@item subq @tab subme
|
|
||||||
@item b_strategy @tab b-adapt
|
|
||||||
@item keyint_min @tab keyint-min
|
|
||||||
@item coder @tab cabac
|
|
||||||
Set coder to @code{ac} to use CABAC.
|
|
||||||
@item cmp @tab chroma-me
|
|
||||||
Set to @code{chroma} to use chroma motion estimation.
|
|
||||||
@item threads @tab threads
|
|
||||||
@item thread_type @tab sliced_threads
|
|
||||||
Set to @code{slice} to use sliced threading instead of frame threading.
|
|
||||||
@item flags -cgop @tab open-gop
|
|
||||||
Set @code{-cgop} to use recovery points to close GOPs.
|
|
||||||
@item rc_init_occupancy @tab vbv-init
|
|
||||||
Initial buffer occupancy.
|
|
||||||
@end multitable
|
|
||||||
|
|
||||||
@subsection Private Options
|
|
||||||
@table @option
|
@table @option
|
||||||
@item -preset @var{string}
|
@item b (@emph{bitrate})
|
||||||
Set the encoding preset (cf. x264 --fullhelp).
|
Set bitrate in bits/s. Note that FFmpeg's @option{b} option is
|
||||||
@item -tune @var{string}
|
expressed in bits/s, while @command{x264}'s @option{bitrate} is in
|
||||||
Tune the encoding params (cf. x264 --fullhelp).
|
kilobits/s.
|
||||||
@item -profile @var{string}
|
|
||||||
Set profile restrictions (cf. x264 --fullhelp).
|
@item bf (@emph{bframes})
|
||||||
@item -fastfirstpass @var{integer}
|
|
||||||
Use fast settings when encoding first pass.
|
@item g (@emph{keyint})
|
||||||
@item -crf @var{float}
|
|
||||||
Select the quality for constant quality mode.
|
@item qmax (@emph{qpmax})
|
||||||
@item -crf_max @var{float}
|
|
||||||
In CRF mode, prevents VBV from lowering quality beyond this point.
|
@item qmin (@emph{qpmin})
|
||||||
@item -qp @var{integer}
|
|
||||||
Constant quantization parameter rate control method.
|
@item qdiff (@emph{qpstep})
|
||||||
@item -aq-mode @var{integer}
|
|
||||||
AQ method
|
@item qblur (@emph{qblur})
|
||||||
|
|
||||||
|
@item qcomp (@emph{qcomp})
|
||||||
|
|
||||||
|
@item refs (@emph{ref})
|
||||||
|
|
||||||
|
@item sc_threshold (@emph{scenecut})
|
||||||
|
|
||||||
|
@item trellis (@emph{trellis})
|
||||||
|
|
||||||
|
@item nr (@emph{nr})
|
||||||
|
|
||||||
|
@item me_range (@emph{merange})
|
||||||
|
|
||||||
|
@item me_method (@emph{me})
|
||||||
|
Set motion estimation method. Possible values in the decreasing order
|
||||||
|
of speed:
|
||||||
|
|
||||||
Possible values:
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item none
|
@item dia (@emph{dia})
|
||||||
|
@item epzs (@emph{dia})
|
||||||
|
Diamond search with radius 1 (fastest). @samp{epzs} is an alias for
|
||||||
|
@samp{dia}.
|
||||||
|
@item hex (@emph{hex})
|
||||||
|
Hexagonal search with radius 2.
|
||||||
|
@item umh (@emph{umh})
|
||||||
|
Uneven multi-hexagon search.
|
||||||
|
@item esa (@emph{esa})
|
||||||
|
Exhaustive search.
|
||||||
|
@item tesa (@emph{tesa})
|
||||||
|
Hadamard exhaustive search (slowest).
|
||||||
|
@end table
|
||||||
|
|
||||||
@item variance
|
@item subq (@emph{subme})
|
||||||
|
|
||||||
|
@item b_strategy (@emph{b-adapt})
|
||||||
|
|
||||||
|
@item keyint_min (@emph{min-keyint})
|
||||||
|
|
||||||
|
@item coder
|
||||||
|
Set entropy encoder. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item ac
|
||||||
|
Enable CABAC.
|
||||||
|
|
||||||
|
@item vlc
|
||||||
|
Enable CAVLC and disable CABAC. It generates the same effect as
|
||||||
|
@command{x264}'s @option{--no-cabac} option.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item cmp
|
||||||
|
Set full pixel motion estimation comparation algorithm. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item chroma
|
||||||
|
Enable chroma in motion estimation.
|
||||||
|
|
||||||
|
@item sad
|
||||||
|
Ignore chroma in motion estimation. It generates the same effect as
|
||||||
|
@command{x264}'s @option{--no-chroma-me} option.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item threads (@emph{threads})
|
||||||
|
|
||||||
|
@item thread_type
|
||||||
|
Set multithreading technique. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item slice
|
||||||
|
Slice-based multithreading. It generates the same effect as
|
||||||
|
@command{x264}'s @option{--sliced-threads} option.
|
||||||
|
@item frame
|
||||||
|
Frame-based multithreading.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item flags
|
||||||
|
Set encoding flags. It can be used to disable closed GOP and enable
|
||||||
|
open GOP by setting it to @code{-cgop}. The result is similar to
|
||||||
|
the behavior of @command{x264}'s @option{--open-gop} option.
|
||||||
|
|
||||||
|
@item rc_init_occupancy (@emph{vbv-init})
|
||||||
|
|
||||||
|
@item preset (@emph{preset})
|
||||||
|
Set the encoding preset.
|
||||||
|
|
||||||
|
@item tune (@emph{tune})
|
||||||
|
Set tuning of the encoding params.
|
||||||
|
|
||||||
|
@item profile (@emph{profile})
|
||||||
|
Set profile restrictions.
|
||||||
|
|
||||||
|
@item fastfirstpass
|
||||||
|
Enable fast settings when encoding first pass, when set to 1. When set
|
||||||
|
to 0, it has the same effect of @command{x264}'s
|
||||||
|
@option{--slow-firstpass} option.
|
||||||
|
|
||||||
|
@item crf (@emph{crf})
|
||||||
|
Set the quality for constant quality mode.
|
||||||
|
|
||||||
|
@item crf_max (@emph{crf-max})
|
||||||
|
In CRF mode, prevents VBV from lowering quality beyond this point.
|
||||||
|
|
||||||
|
@item qp (@emph{qp})
|
||||||
|
Set constant quantization rate control method parameter.
|
||||||
|
|
||||||
|
@item aq-mode (@emph{aq-mode})
|
||||||
|
Set AQ method. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item none (@emph{0})
|
||||||
|
Disabled.
|
||||||
|
|
||||||
|
@item variance (@emph{1})
|
||||||
Variance AQ (complexity mask).
|
Variance AQ (complexity mask).
|
||||||
@item autovariance
|
|
||||||
|
@item autovariance (@emph{2})
|
||||||
Auto-variance AQ (experimental).
|
Auto-variance AQ (experimental).
|
||||||
@end table
|
@end table
|
||||||
@item -aq-strength @var{float}
|
|
||||||
AQ strength, reduces blocking and blurring in flat and textured areas.
|
|
||||||
@item -psy @var{integer}
|
|
||||||
Use psychovisual optimizations.
|
|
||||||
@item -psy-rd @var{string}
|
|
||||||
Strength of psychovisual optimization, in <psy-rd>:<psy-trellis> format.
|
|
||||||
@item -rc-lookahead @var{integer}
|
|
||||||
Number of frames to look ahead for frametype and ratecontrol.
|
|
||||||
@item -weightb @var{integer}
|
|
||||||
Weighted prediction for B-frames.
|
|
||||||
@item -weightp @var{integer}
|
|
||||||
Weighted prediction analysis method.
|
|
||||||
|
|
||||||
Possible values:
|
@item aq-strength (@emph{aq-strength})
|
||||||
|
Set AQ strength, reduce blocking and blurring in flat and textured areas.
|
||||||
|
|
||||||
|
@item psy
|
||||||
|
Use psychovisual optimizations when set to 1. When set to 0, it has the
|
||||||
|
same effect as @command{x264}'s @option{--no-psy} option.
|
||||||
|
|
||||||
|
@item psy-rd (@emph{psy-rd})
|
||||||
|
Set strength of psychovisual optimization, in
|
||||||
|
@var{psy-rd}:@var{psy-trellis} format.
|
||||||
|
|
||||||
|
@item rc-lookahead (@emph{rc-lookahead})
|
||||||
|
Set number of frames to look ahead for frametype and ratecontrol.
|
||||||
|
|
||||||
|
@item weightb
|
||||||
|
Enable weighted prediction for B-frames when set to 1. When set to 0,
|
||||||
|
it has the same effect as @command{x264}'s @option{--no-weightb} option.
|
||||||
|
|
||||||
|
@item weightp (@emph{weightp})
|
||||||
|
Set weighted prediction method for P-frames. Possible values:
|
||||||
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item none
|
@item none (@emph{0})
|
||||||
|
Disabled
|
||||||
@item simple
|
@item simple (@emph{1})
|
||||||
|
Enable only weighted refs
|
||||||
@item smart
|
@item smart (@emph{2})
|
||||||
|
Enable both weighted refs and duplicates
|
||||||
@end table
|
@end table
|
||||||
@item -ssim @var{integer}
|
|
||||||
Calculate and print SSIM stats.
|
|
||||||
@item -intra-refresh @var{integer}
|
|
||||||
Use Periodic Intra Refresh instead of IDR frames.
|
|
||||||
@item -b-bias @var{integer}
|
|
||||||
Influences how often B-frames are used.
|
|
||||||
@item -b-pyramid @var{integer}
|
|
||||||
Keep some B-frames as references.
|
|
||||||
|
|
||||||
Possible values:
|
@item ssim (@emph{ssim})
|
||||||
|
Enable calculation and printing SSIM stats after the encoding.
|
||||||
|
|
||||||
|
@item intra-refresh (@emph{intra-refresh})
|
||||||
|
Enable the use of Periodic Intra Refresh instead of IDR frames when set
|
||||||
|
to 1.
|
||||||
|
|
||||||
|
@item b-bias (@emph{b-bias})
|
||||||
|
Set the influence on how often B-frames are used.
|
||||||
|
|
||||||
|
@item b-pyramid (@emph{b-pyramid})
|
||||||
|
Set method for keeping of some B-frames as references. Possible values:
|
||||||
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item none
|
@item none (@emph{none})
|
||||||
|
Disabled.
|
||||||
@item strict
|
@item strict (@emph{strict})
|
||||||
Strictly hierarchical pyramid.
|
Strictly hierarchical pyramid.
|
||||||
@item normal
|
@item normal (@emph{normal})
|
||||||
Non-strict (not Blu-ray compatible).
|
Non-strict (not Blu-ray compatible).
|
||||||
@end table
|
@end table
|
||||||
@item -mixed-refs @var{integer}
|
|
||||||
One reference per partition, as opposed to one reference per macroblock.
|
|
||||||
@item -8x8dct @var{integer}
|
|
||||||
High profile 8x8 transform.
|
|
||||||
@item -fast-pskip @var{integer}
|
|
||||||
@item -aud @var{integer}
|
|
||||||
Use access unit delimiters.
|
|
||||||
@item -mbtree @var{integer}
|
|
||||||
Use macroblock tree ratecontrol.
|
|
||||||
@item -deblock @var{string}
|
|
||||||
Loop filter parameters, in <alpha:beta> form.
|
|
||||||
@item -cplxblur @var{float}
|
|
||||||
Reduce fluctuations in QP (before curve compression).
|
|
||||||
@item -partitions @var{string}
|
|
||||||
A comma-separated list of partitions to consider, possible values: p8x8, p4x4, b8x8, i8x8, i4x4, none, all.
|
|
||||||
@item -direct-pred @var{integer}
|
|
||||||
Direct MV prediction mode
|
|
||||||
|
|
||||||
Possible values:
|
@item mixed-refs
|
||||||
|
Enable the use of one reference per partition, as opposed to one
|
||||||
|
reference per macroblock when set to 1. When set to 0, it has the
|
||||||
|
same effect as @command{x264}'s @option{--no-mixed-refs} option.
|
||||||
|
|
||||||
|
@item 8x8dct
|
||||||
|
Enable adaptive spatial transform (high profile 8x8 transform)
|
||||||
|
when set to 1. When set to 0, it has the same effect as
|
||||||
|
@command{x264}'s @option{--no-8x8dct} option.
|
||||||
|
|
||||||
|
@item fast-pskip
|
||||||
|
Enable early SKIP detection on P-frames when set to 1. When set
|
||||||
|
to 0, it has the same effect as @command{x264}'s
|
||||||
|
@option{--no-fast-pskip} option.
|
||||||
|
|
||||||
|
@item aud (@emph{aud})
|
||||||
|
Enable use of access unit delimiters when set to 1.
|
||||||
|
|
||||||
|
@item mbtree
|
||||||
|
Enable use macroblock tree ratecontrol when set to 1. When set
|
||||||
|
to 0, it has the same effect as @command{x264}'s
|
||||||
|
@option{--no-mbtree} option.
|
||||||
|
|
||||||
|
@item deblock (@emph{deblock})
|
||||||
|
Set loop filter parameters, in @var{alpha}:@var{beta} form.
|
||||||
|
|
||||||
|
@item cplxblur (@emph{cplxblur})
|
||||||
|
Set fluctuations reduction in QP (before curve compression).
|
||||||
|
|
||||||
|
@item partitions (@emph{partitions})
|
||||||
|
Set partitions to consider as a comma-separated list of. Possible
|
||||||
|
values in the list:
|
||||||
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item none
|
@item p8x8
|
||||||
|
8x8 P-frame partition.
|
||||||
@item spatial
|
@item p4x4
|
||||||
|
4x4 P-frame partition.
|
||||||
@item temporal
|
@item b8x8
|
||||||
|
4x4 B-frame partition.
|
||||||
@item auto
|
@item i8x8
|
||||||
|
8x8 I-frame partition.
|
||||||
@end table
|
@item i4x4
|
||||||
@item -slice-max-size @var{integer}
|
4x4 I-frame partition.
|
||||||
Limit the size of each slice in bytes.
|
(Enabling @samp{p4x4} requires @samp{p8x8} to be enabled. Enabling
|
||||||
@item -stats @var{string}
|
@samp{i8x8} requires adaptive spatial transform (@option{8x8dct}
|
||||||
Filename for 2 pass stats.
|
option) to be enabled.)
|
||||||
@item -nal-hrd @var{integer}
|
@item none (@emph{none})
|
||||||
Signal HRD information (requires vbv-bufsize; cbr not allowed in .mp4).
|
Do not consider any partitions.
|
||||||
|
@item all (@emph{all})
|
||||||
Possible values:
|
Consider every partition.
|
||||||
@table @samp
|
|
||||||
@item none
|
|
||||||
|
|
||||||
@item vbr
|
|
||||||
|
|
||||||
@item cbr
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item x264opts @var{options}
|
@item direct-pred (@emph{direct})
|
||||||
Allow to set any x264 option, see @code{x264 --fullhelp} for a list.
|
Set direct MV prediction mode. Possible values:
|
||||||
|
|
||||||
@var{options} is a list of @var{key}=@var{value} couples separated by
|
@table @samp
|
||||||
|
@item none (@emph{none})
|
||||||
|
Disable MV prediction.
|
||||||
|
@item spatial (@emph{spatial})
|
||||||
|
Enable spatial predicting.
|
||||||
|
@item temporal (@emph{temporal})
|
||||||
|
Enable temporal predicting.
|
||||||
|
@item auto (@emph{auto})
|
||||||
|
Automatically decided.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item slice-max-size (@emph{slice-max-size})
|
||||||
|
Set the limit of the size of each slice in bytes. If not specified
|
||||||
|
but RTP payload size (@option{ps}) is specified, that is used.
|
||||||
|
|
||||||
|
@item stats (@emph{stats})
|
||||||
|
Set the file name for multi-pass stats.
|
||||||
|
|
||||||
|
@item nal-hrd (@emph{nal-hrd})
|
||||||
|
Set signal HRD information (requires @option{vbv-bufsize} to be set).
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item none (@emph{none})
|
||||||
|
Disable HRD information signaling.
|
||||||
|
@item vbr (@emph{vbr})
|
||||||
|
Variable bit rate.
|
||||||
|
@item cbr (@emph{cbr})
|
||||||
|
Constant bit rate (not allowed in MP4 container).
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item x264opts (N.A.)
|
||||||
|
Set any x264 option, see @command{x264 --fullhelp} for a list.
|
||||||
|
|
||||||
|
Argument is a list of @var{key}=@var{value} couples separated by
|
||||||
":". In @var{filter} and @var{psy-rd} options that use ":" as a separator
|
":". In @var{filter} and @var{psy-rd} options that use ":" as a separator
|
||||||
themselves, use "," instead. They accept it as well since long ago but this
|
themselves, use "," instead. They accept it as well since long ago but this
|
||||||
is kept undocumented for some reason.
|
is kept undocumented for some reason.
|
||||||
@@ -764,17 +1266,135 @@ For example to specify libx264 encoding options with @command{ffmpeg}:
|
|||||||
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
For more information about libx264 and the supported options see:
|
@item x264-params (N.A.)
|
||||||
@url{http://www.videolan.org/developers/x264.html}
|
Override the x264 configuration using a :-separated list of key=value
|
||||||
|
parameters.
|
||||||
|
|
||||||
@item -x264-params @var{string}
|
This option is functionally the same as the @option{x264opts}, but is
|
||||||
Override the x264 configuration using a :-separated list of key=value parameters.
|
duplicated for compability with the Libav fork.
|
||||||
|
|
||||||
|
For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||||
@example
|
@example
|
||||||
-x264-params level=30:bframes=0:weightp=0:cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:no-fast-pskip=1:subq=6:8x8dct=0:trellis=0
|
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
|
||||||
|
cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
|
||||||
|
no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
|
||||||
@end example
|
@end example
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
Encoding avpresets for common usages are provided so they can be used with the
|
Encoding ffpresets for common usages are provided so they can be used with the
|
||||||
general presets system (e.g. passing the @code{-pre} option).
|
general presets system (e.g. passing the @option{pre} option).
|
||||||
|
|
||||||
|
@section libxvid
|
||||||
|
|
||||||
|
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||||
|
|
||||||
|
This encoder requires the presence of the libxvidcore headers and library
|
||||||
|
during configuration. You need to explicitly configure the build with
|
||||||
|
@code{--enable-libxvid --enable-gpl}.
|
||||||
|
|
||||||
|
The native @code{mpeg4} encoder supports the MPEG-4 Part 2 format, so
|
||||||
|
users can encode to this format without this library.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
The following options are supported by the libxvid wrapper. Some of
|
||||||
|
the following options are listed but are not documented, and
|
||||||
|
correspond to shared codec options. See @ref{codec-options,,the Codec
|
||||||
|
Options chapter} for their documentation. The other shared options
|
||||||
|
which are not listed have no effect for the libxvid encoder.
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item b
|
||||||
|
|
||||||
|
@item g
|
||||||
|
|
||||||
|
@item qmin
|
||||||
|
|
||||||
|
@item qmax
|
||||||
|
|
||||||
|
@item mpeg_quant
|
||||||
|
|
||||||
|
@item threads
|
||||||
|
|
||||||
|
@item bf
|
||||||
|
|
||||||
|
@item b_qfactor
|
||||||
|
|
||||||
|
@item b_qoffset
|
||||||
|
|
||||||
|
@item flags
|
||||||
|
Set specific encoding flags. Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
|
||||||
|
@item mv4
|
||||||
|
Use four motion vector by macroblock.
|
||||||
|
|
||||||
|
@item aic
|
||||||
|
Enable high quality AC prediction.
|
||||||
|
|
||||||
|
@item gray
|
||||||
|
Only encode grayscale.
|
||||||
|
|
||||||
|
@item gmc
|
||||||
|
Enable the use of global motion compensation (GMC).
|
||||||
|
|
||||||
|
@item qpel
|
||||||
|
Enable quarter-pixel motion compensation.
|
||||||
|
|
||||||
|
@item cgop
|
||||||
|
Enable closed GOP.
|
||||||
|
|
||||||
|
@item global_header
|
||||||
|
Place global headers in extradata instead of every keyframe.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item trellis
|
||||||
|
|
||||||
|
@item me_method
|
||||||
|
Set motion estimation method. Possible values in decreasing order of
|
||||||
|
speed and increasing order of quality:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item zero
|
||||||
|
Use no motion estimation (default).
|
||||||
|
|
||||||
|
@item phods
|
||||||
|
@item x1
|
||||||
|
@item log
|
||||||
|
Enable advanced diamond zonal search for 16x16 blocks and half-pixel
|
||||||
|
refinement for 16x16 blocks. @samp{x1} and @samp{log} are aliases for
|
||||||
|
@samp{phods}.
|
||||||
|
|
||||||
|
@item epzs
|
||||||
|
Enable all of the things described above, plus advanced diamond zonal
|
||||||
|
search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
|
||||||
|
estimation on chroma planes.
|
||||||
|
|
||||||
|
@item full
|
||||||
|
Enable all of the things described above, plus extended 16x16 and 8x8
|
||||||
|
blocks search.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item mbd
|
||||||
|
Set macroblock decision algorithm. Possible values in the increasing
|
||||||
|
order of quality:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
@item simple
|
||||||
|
Use macroblock comparing function algorithm (default).
|
||||||
|
|
||||||
|
@item bits
|
||||||
|
Enable rate distortion-based half pixel and quarter pixel refinement for
|
||||||
|
16x16 blocks.
|
||||||
|
|
||||||
|
@item rd
|
||||||
|
Enable all of the things described above, plus rate distortion-based
|
||||||
|
half pixel and quarter pixel refinement for 8x8 blocks, and rate
|
||||||
|
distortion-based search using square pattern.
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
@c man end VIDEO ENCODERS
|
@c man end VIDEO ENCODERS
|
||||||
|
@@ -17,6 +17,7 @@ the libavcodec library.
|
|||||||
|
|
||||||
@c man end DESCRIPTION
|
@c man end DESCRIPTION
|
||||||
|
|
||||||
|
@anchor{codec-options}
|
||||||
@chapter Codec Options
|
@chapter Codec Options
|
||||||
@c man begin CODEC OPTIONS
|
@c man begin CODEC OPTIONS
|
||||||
|
|
||||||
|
@@ -76,6 +76,9 @@ Enable RTP MP4A-LATM payload.
|
|||||||
Reduce the latency introduced by optional buffering
|
Reduce the latency introduced by optional buffering
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@item seek2any @var{integer} (@emph{input})
|
||||||
|
Forces seeking to enable seek to any mode if set to 1. Default is 0.
|
||||||
|
|
||||||
@item analyzeduration @var{integer} (@emph{input})
|
@item analyzeduration @var{integer} (@emph{input})
|
||||||
Specify how many microseconds are analyzed to probe the input. A
|
Specify how many microseconds are analyzed to probe the input. A
|
||||||
higher value will allow to detect more accurate information, but will
|
higher value will allow to detect more accurate information, but will
|
||||||
@@ -142,6 +145,12 @@ Use wallclock as timestamps.
|
|||||||
@item avoid_negative_ts @var{integer} (@emph{output})
|
@item avoid_negative_ts @var{integer} (@emph{output})
|
||||||
Shift timestamps to make them positive. 1 enables, 0 disables, default
|
Shift timestamps to make them positive. 1 enables, 0 disables, default
|
||||||
of -1 enables when required by target format.
|
of -1 enables when required by target format.
|
||||||
|
|
||||||
|
@item skip_initial_bytes @var{integer} (@emph{input})
|
||||||
|
Set number initial bytes to skip. Default is 0.
|
||||||
|
|
||||||
|
@item correct_ts_overflow @var{integer} (@emph{input})
|
||||||
|
Correct single timestamp overflows if set to 1. Default is 1.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@c man end FORMAT OPTIONS
|
@c man end FORMAT OPTIONS
|
||||||
|
@@ -3,10 +3,10 @@
|
|||||||
|
|
||||||
Filtering in FFmpeg is enabled through the libavfilter library.
|
Filtering in FFmpeg is enabled through the libavfilter library.
|
||||||
|
|
||||||
In libavfilter, it is possible for filters to have multiple inputs and
|
In libavfilter, a filter can have multiple inputs and multiple
|
||||||
multiple outputs.
|
outputs.
|
||||||
To illustrate the sorts of things that are possible, we can
|
To illustrate the sorts of things that are possible, we consider the
|
||||||
use a complex filter graph. For example, the following one:
|
following filtergraph.
|
||||||
|
|
||||||
@example
|
@example
|
||||||
input --> split ---------------------> overlay --> output
|
input --> split ---------------------> overlay --> output
|
||||||
@@ -15,25 +15,32 @@ input --> split ---------------------> overlay --> output
|
|||||||
+-----> crop --> vflip -------+
|
+-----> crop --> vflip -------+
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
splits the stream in two streams, sends one stream through the crop filter
|
This filtergraph splits the input stream in two streams, sends one
|
||||||
and the vflip filter before merging it back with the other stream by
|
stream through the crop filter and the vflip filter before merging it
|
||||||
overlaying it on top. You can use the following command to achieve this:
|
back with the other stream by overlaying it on top. You can use the
|
||||||
|
following command to achieve this:
|
||||||
|
|
||||||
@example
|
@example
|
||||||
ffmpeg -i input -vf "[in] split [T1], [T2] overlay=0:H/2 [out]; [T1] crop=iw:ih/2:0:ih/2, vflip [T2]" output
|
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
The result will be that in output the top half of the video is mirrored
|
The result will be that in output the top half of the video is mirrored
|
||||||
onto the bottom half.
|
onto the bottom half.
|
||||||
|
|
||||||
Filters are loaded using the @var{-vf} or @var{-af} option passed to
|
Filters in the same linear chain are separated by commas, and distinct
|
||||||
@command{ffmpeg} or to @command{ffplay}. Filters in the same linear
|
linear chains of filters are separated by semicolons. In our example,
|
||||||
chain are separated by commas. In our example, @var{split,
|
@var{crop,vflip} are in one linear chain, @var{split} and
|
||||||
overlay} are in one linear chain, and @var{crop, vflip} are in
|
@var{overlay} are separately in another. The points where the linear
|
||||||
another. The points where the linear chains join are labeled by names
|
chains join are labelled by names enclosed in square brackets. In the
|
||||||
enclosed in square brackets. In our example, that is @var{[T1]} and
|
example, the split filter generates two outputs that are associated to
|
||||||
@var{[T2]}. The special labels @var{[in]} and @var{[out]} are the points
|
the labels @var{[main]} and @var{[tmp]}.
|
||||||
where video is input and output.
|
|
||||||
|
The stream sent to the second output of @var{split}, labelled as
|
||||||
|
@var{[tmp]}, is processed through the @var{crop} filter, which crops
|
||||||
|
away the lower half part of the video, and then vertically flipped. The
|
||||||
|
@var{overlay} filter takes in input the first unchanged output of the
|
||||||
|
split filter (which was labelled as @var{[main]}), and overlay on its
|
||||||
|
lower half the output generated by the @var{crop,vflip} filterchain.
|
||||||
|
|
||||||
Some filters take in input a list of parameters: they are specified
|
Some filters take in input a list of parameters: they are specified
|
||||||
after the filter name and an equal sign, and are separated from each other
|
after the filter name and an equal sign, and are separated from each other
|
||||||
@@ -2030,7 +2037,7 @@ This expression is evaluated only once during the filter
|
|||||||
configuration.
|
configuration.
|
||||||
|
|
||||||
@item h, out_h
|
@item h, out_h
|
||||||
Set the crop area width. It defaults to @code{ih}.
|
Set the crop area height. It defaults to @code{ih}.
|
||||||
This expression is evaluated only once during the filter
|
This expression is evaluated only once during the filter
|
||||||
configuration.
|
configuration.
|
||||||
|
|
||||||
|
@@ -24,7 +24,7 @@ instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjp
|
|||||||
@file{./configure}.
|
@file{./configure}.
|
||||||
|
|
||||||
|
|
||||||
@section OpenCORE and VisualOn libraries
|
@section OpenCORE, VisualOn, and Fraunhofer libraries
|
||||||
|
|
||||||
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
|
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
|
||||||
libraries provide encoders for a number of audio codecs.
|
libraries provide encoders for a number of audio codecs.
|
||||||
@@ -32,9 +32,14 @@ libraries provide encoders for a number of audio codecs.
|
|||||||
@float NOTE
|
@float NOTE
|
||||||
OpenCORE and VisualOn libraries are under the Apache License 2.0
|
OpenCORE and VisualOn libraries are under the Apache License 2.0
|
||||||
(see @url{http://www.apache.org/licenses/LICENSE-2.0} for details), which is
|
(see @url{http://www.apache.org/licenses/LICENSE-2.0} for details), which is
|
||||||
incompatible with the LGPL version 2.1 and GPL version 2. You have to
|
incompatible to the LGPL version 2.1 and GPL version 2. You have to
|
||||||
upgrade FFmpeg's license to LGPL version 3 (or if you have enabled
|
upgrade FFmpeg's license to LGPL version 3 (or if you have enabled
|
||||||
GPL components, GPL version 3) to use it.
|
GPL components, GPL version 3) by passing @code{--enable-version3} to configure in
|
||||||
|
order to use it.
|
||||||
|
|
||||||
|
The Fraunhofer AAC library is licensed under a license incompatible to the GPL
|
||||||
|
and is not known to be compatible to the LGPL. Therefore, you have to pass
|
||||||
|
@code{--enable-nonfree} to configure to use it.
|
||||||
@end float
|
@end float
|
||||||
|
|
||||||
@subsection OpenCORE AMR
|
@subsection OpenCORE AMR
|
||||||
|
@@ -24,7 +24,7 @@ a mail for every change to every issue.
|
|||||||
The subscription URL for the ffmpeg-trac list is:
|
The subscription URL for the ffmpeg-trac list is:
|
||||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||||
The URL of the webinterface of the tracker is:
|
The URL of the webinterface of the tracker is:
|
||||||
http(s)://ffmpeg.org/trac/ffmpeg
|
http(s)://trac.ffmpeg.org
|
||||||
|
|
||||||
Type:
|
Type:
|
||||||
-----
|
-----
|
||||||
|
@@ -18,6 +18,23 @@ enabled muxers.
|
|||||||
|
|
||||||
A description of some of the currently available muxers follows.
|
A description of some of the currently available muxers follows.
|
||||||
|
|
||||||
|
@anchor{aiff}
|
||||||
|
@section aiff
|
||||||
|
|
||||||
|
Audio Interchange File Format muxer.
|
||||||
|
|
||||||
|
It accepts the following options:
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item write_id3v2
|
||||||
|
Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
|
||||||
|
|
||||||
|
@item id3v2_version
|
||||||
|
Select ID3v2 version to write. Currently only version 3 and 4 (aka.
|
||||||
|
ID3v2.3 and ID3v2.4) are supported. The default is version 4.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
@anchor{crc}
|
@anchor{crc}
|
||||||
@section crc
|
@section crc
|
||||||
|
|
||||||
|
12
ffmpeg.c
12
ffmpeg.c
@@ -162,6 +162,8 @@ static struct termios oldtty;
|
|||||||
static int restore_tty;
|
static int restore_tty;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void free_input_threads(void);
|
||||||
|
|
||||||
|
|
||||||
/* sub2video hack:
|
/* sub2video hack:
|
||||||
Convert subtitles to video with alpha to insert them in filter graphs.
|
Convert subtitles to video with alpha to insert them in filter graphs.
|
||||||
@@ -457,6 +459,9 @@ static void exit_program(void)
|
|||||||
av_freep(&output_streams[i]->logfile_prefix);
|
av_freep(&output_streams[i]->logfile_prefix);
|
||||||
av_freep(&output_streams[i]);
|
av_freep(&output_streams[i]);
|
||||||
}
|
}
|
||||||
|
#if HAVE_PTHREADS
|
||||||
|
free_input_threads();
|
||||||
|
#endif
|
||||||
for (i = 0; i < nb_input_files; i++) {
|
for (i = 0; i < nb_input_files; i++) {
|
||||||
avformat_close_input(&input_files[i]->ctx);
|
avformat_close_input(&input_files[i]->ctx);
|
||||||
av_freep(&input_files[i]);
|
av_freep(&input_files[i]);
|
||||||
@@ -1905,7 +1910,10 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
|
|||||||
ist->st->codec->sample_rate;
|
ist->st->codec->sample_rate;
|
||||||
break;
|
break;
|
||||||
case AVMEDIA_TYPE_VIDEO:
|
case AVMEDIA_TYPE_VIDEO:
|
||||||
if (pkt->duration) {
|
if (ist->framerate.num) {
|
||||||
|
int64_t next_dts = av_rescale_q(ist->next_dts, AV_TIME_BASE_Q, av_inv_q(ist->framerate));
|
||||||
|
ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), AV_TIME_BASE_Q);
|
||||||
|
} else if (pkt->duration) {
|
||||||
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||||
} else if(ist->st->codec->time_base.num != 0) {
|
} else if(ist->st->codec->time_base.num != 0) {
|
||||||
int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
|
int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
|
||||||
@@ -2208,6 +2216,8 @@ static int transcode_init(void)
|
|||||||
codec->time_base = icodec->time_base;
|
codec->time_base = icodec->time_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ist && !ost->frame_rate.num)
|
||||||
|
ost->frame_rate = ist->framerate;
|
||||||
if(ost->frame_rate.num)
|
if(ost->frame_rate.num)
|
||||||
codec->time_base = av_inv_q(ost->frame_rate);
|
codec->time_base = av_inv_q(ost->frame_rate);
|
||||||
|
|
||||||
|
@@ -328,6 +328,14 @@ static AVLFG random_state;
|
|||||||
|
|
||||||
static FILE *logfile = NULL;
|
static FILE *logfile = NULL;
|
||||||
|
|
||||||
|
static void htmlstrip(char *s) {
|
||||||
|
while (s && *s) {
|
||||||
|
s += strspn(s, "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,. ");
|
||||||
|
if (*s)
|
||||||
|
*s++ = '?';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int64_t ffm_read_write_index(int fd)
|
static int64_t ffm_read_write_index(int fd)
|
||||||
{
|
{
|
||||||
uint8_t buf[8];
|
uint8_t buf[8];
|
||||||
@@ -1887,6 +1895,7 @@ static int http_parse_request(HTTPContext *c)
|
|||||||
send_error:
|
send_error:
|
||||||
c->http_error = 404;
|
c->http_error = 404;
|
||||||
q = c->buffer;
|
q = c->buffer;
|
||||||
|
htmlstrip(msg);
|
||||||
snprintf(q, c->buffer_size,
|
snprintf(q, c->buffer_size,
|
||||||
"HTTP/1.0 404 Not Found\r\n"
|
"HTTP/1.0 404 Not Found\r\n"
|
||||||
"Content-type: text/html\r\n"
|
"Content-type: text/html\r\n"
|
||||||
|
@@ -710,7 +710,7 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
|
|||||||
const float lambda)
|
const float lambda)
|
||||||
{
|
{
|
||||||
int start = 0, i, w, w2, g;
|
int start = 0, i, w, w2, g;
|
||||||
int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels;
|
int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels * (lambda / 120.f);
|
||||||
float dists[128] = { 0 }, uplims[128];
|
float dists[128] = { 0 }, uplims[128];
|
||||||
float maxvals[128];
|
float maxvals[128];
|
||||||
int fflag, minscaler;
|
int fflag, minscaler;
|
||||||
|
@@ -189,6 +189,9 @@ static int frame_configure_elements(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!avctx->channels)
|
||||||
|
return 1;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
ac->frame->nb_samples = 2048;
|
ac->frame->nb_samples = 2048;
|
||||||
if ((ret = ff_get_buffer(avctx, ac->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, ac->frame)) < 0) {
|
||||||
|
@@ -429,6 +429,7 @@ static void hybrid_synthesis(PSDSPContext *dsp, float out[2][38][64],
|
|||||||
#define DECAY_SLOPE 0.05f
|
#define DECAY_SLOPE 0.05f
|
||||||
/// Number of frequency bands that can be addressed by the parameter index, b(k)
|
/// Number of frequency bands that can be addressed by the parameter index, b(k)
|
||||||
static const int NR_PAR_BANDS[] = { 20, 34 };
|
static const int NR_PAR_BANDS[] = { 20, 34 };
|
||||||
|
static const int NR_IPDOPD_BANDS[] = { 11, 17 };
|
||||||
/// Number of frequency bands that can be addressed by the sub subband index, k
|
/// Number of frequency bands that can be addressed by the sub subband index, k
|
||||||
static const int NR_BANDS[] = { 71, 91 };
|
static const int NR_BANDS[] = { 71, 91 };
|
||||||
/// Start frequency band for the all-pass filter decay slope
|
/// Start frequency band for the all-pass filter decay slope
|
||||||
@@ -823,7 +824,8 @@ static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2
|
|||||||
h12 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][1];
|
h12 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][1];
|
||||||
h21 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][2];
|
h21 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][2];
|
||||||
h22 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][3];
|
h22 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][3];
|
||||||
if (!PS_BASELINE && ps->enable_ipdopd && b < ps->nr_ipdopd_par) {
|
|
||||||
|
if (!PS_BASELINE && ps->enable_ipdopd && b < NR_IPDOPD_BANDS[is34]) {
|
||||||
//The spec say says to only run this smoother when enable_ipdopd
|
//The spec say says to only run this smoother when enable_ipdopd
|
||||||
//is set but the reference decoder appears to run it constantly
|
//is set but the reference decoder appears to run it constantly
|
||||||
float h11i, h12i, h21i, h22i;
|
float h11i, h12i, h21i, h22i;
|
||||||
|
@@ -274,7 +274,7 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
|
|||||||
// generate warm-up samples
|
// generate warm-up samples
|
||||||
residual[0] = samples[0];
|
residual[0] = samples[0];
|
||||||
for (i = 1; i <= lpc.lpc_order; i++)
|
for (i = 1; i <= lpc.lpc_order; i++)
|
||||||
residual[i] = samples[i] - samples[i-1];
|
residual[i] = sign_extend(samples[i] - samples[i-1], s->write_sample_size);
|
||||||
|
|
||||||
// perform lpc on remaining samples
|
// perform lpc on remaining samples
|
||||||
for (i = lpc.lpc_order + 1; i < s->frame_size; i++) {
|
for (i = lpc.lpc_order + 1; i < s->frame_size; i++) {
|
||||||
|
@@ -112,7 +112,7 @@ static void hscroll(AVCodecContext *avctx)
|
|||||||
AnsiContext *s = avctx->priv_data;
|
AnsiContext *s = avctx->priv_data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (s->y < avctx->height - s->font_height) {
|
if (s->y <= avctx->height - 2*s->font_height) {
|
||||||
s->y += s->font_height;
|
s->y += s->font_height;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -165,7 +165,7 @@ static void draw_char(AVCodecContext *avctx, int c)
|
|||||||
ff_draw_pc_font(s->frame.data[0] + s->y * s->frame.linesize[0] + s->x,
|
ff_draw_pc_font(s->frame.data[0] + s->y * s->frame.linesize[0] + s->x,
|
||||||
s->frame.linesize[0], s->font, s->font_height, c, fg, bg);
|
s->frame.linesize[0], s->font, s->font_height, c, fg, bg);
|
||||||
s->x += FONT_WIDTH;
|
s->x += FONT_WIDTH;
|
||||||
if (s->x >= avctx->width) {
|
if (s->x > avctx->width - FONT_WIDTH) {
|
||||||
s->x = 0;
|
s->x = 0;
|
||||||
hscroll(avctx);
|
hscroll(avctx);
|
||||||
}
|
}
|
||||||
@@ -239,6 +239,8 @@ static int execute_code(AVCodecContext * avctx, int c)
|
|||||||
default:
|
default:
|
||||||
av_log_ask_for_sample(avctx, "unsupported screen mode\n");
|
av_log_ask_for_sample(avctx, "unsupported screen mode\n");
|
||||||
}
|
}
|
||||||
|
s->x = av_clip(s->x, 0, width - FONT_WIDTH);
|
||||||
|
s->y = av_clip(s->y, 0, height - s->font_height);
|
||||||
if (width != avctx->width || height != avctx->height) {
|
if (width != avctx->width || height != avctx->height) {
|
||||||
if (s->frame.data[0])
|
if (s->frame.data[0])
|
||||||
avctx->release_buffer(avctx, &s->frame);
|
avctx->release_buffer(avctx, &s->frame);
|
||||||
@@ -335,6 +337,8 @@ static int execute_code(AVCodecContext * avctx, int c)
|
|||||||
av_log_ask_for_sample(avctx, "unsupported escape code\n");
|
av_log_ask_for_sample(avctx, "unsupported escape code\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
s->x = av_clip(s->x, 0, avctx->width - FONT_WIDTH);
|
||||||
|
s->y = av_clip(s->y, 0, avctx->height - s->font_height);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -415,7 +419,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
switch(buf[0]) {
|
switch(buf[0]) {
|
||||||
case '0': case '1': case '2': case '3': case '4':
|
case '0': case '1': case '2': case '3': case '4':
|
||||||
case '5': case '6': case '7': case '8': case '9':
|
case '5': case '6': case '7': case '8': case '9':
|
||||||
if (s->nb_args < MAX_NB_ARGS)
|
if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] < 6553)
|
||||||
s->args[s->nb_args] = FFMAX(s->args[s->nb_args], 0) * 10 + buf[0] - '0';
|
s->args[s->nb_args] = FFMAX(s->args[s->nb_args], 0) * 10 + buf[0] - '0';
|
||||||
break;
|
break;
|
||||||
case ';':
|
case ';':
|
||||||
|
@@ -34,46 +34,44 @@ static inline int decode_blockcodes(int code1, int code2, int levels,
|
|||||||
{
|
{
|
||||||
int v0, v1, v2, v3, v4, v5;
|
int v0, v1, v2, v3, v4, v5;
|
||||||
|
|
||||||
__asm__ ("smmul %8, %14, %18 \n"
|
__asm__ ("smmul %0, %6, %10 \n"
|
||||||
"smmul %11, %15, %18 \n"
|
"smmul %3, %7, %10 \n"
|
||||||
"smlabb %14, %8, %17, %14 \n"
|
"smlabb %6, %0, %9, %6 \n"
|
||||||
"smlabb %15, %11, %17, %15 \n"
|
"smlabb %7, %3, %9, %7 \n"
|
||||||
"smmul %9, %8, %18 \n"
|
"smmul %1, %0, %10 \n"
|
||||||
"smmul %12, %11, %18 \n"
|
"smmul %4, %3, %10 \n"
|
||||||
"sub %14, %14, %16, lsr #1 \n"
|
"sub %6, %6, %8, lsr #1 \n"
|
||||||
"sub %15, %15, %16, lsr #1 \n"
|
"sub %7, %7, %8, lsr #1 \n"
|
||||||
"smlabb %8, %9, %17, %8 \n"
|
"smlabb %0, %1, %9, %0 \n"
|
||||||
"smlabb %11, %12, %17, %11 \n"
|
"smlabb %3, %4, %9, %3 \n"
|
||||||
"smmul %10, %9, %18 \n"
|
"smmul %2, %1, %10 \n"
|
||||||
"smmul %13, %12, %18 \n"
|
"smmul %5, %4, %10 \n"
|
||||||
"str %14, %0 \n"
|
"str %6, [%11, #0] \n"
|
||||||
"str %15, %4 \n"
|
"str %7, [%11, #16] \n"
|
||||||
"sub %8, %8, %16, lsr #1 \n"
|
"sub %0, %0, %8, lsr #1 \n"
|
||||||
"sub %11, %11, %16, lsr #1 \n"
|
"sub %3, %3, %8, lsr #1 \n"
|
||||||
"smlabb %9, %10, %17, %9 \n"
|
"smlabb %1, %2, %9, %1 \n"
|
||||||
"smlabb %12, %13, %17, %12 \n"
|
"smlabb %4, %5, %9, %4 \n"
|
||||||
"smmul %14, %10, %18 \n"
|
"smmul %6, %2, %10 \n"
|
||||||
"smmul %15, %13, %18 \n"
|
"smmul %7, %5, %10 \n"
|
||||||
"str %8, %1 \n"
|
"str %0, [%11, #4] \n"
|
||||||
"str %11, %5 \n"
|
"str %3, [%11, #20] \n"
|
||||||
"sub %9, %9, %16, lsr #1 \n"
|
"sub %1, %1, %8, lsr #1 \n"
|
||||||
"sub %12, %12, %16, lsr #1 \n"
|
"sub %4, %4, %8, lsr #1 \n"
|
||||||
"smlabb %10, %14, %17, %10 \n"
|
"smlabb %2, %6, %9, %2 \n"
|
||||||
"smlabb %13, %15, %17, %13 \n"
|
"smlabb %5, %7, %9, %5 \n"
|
||||||
"str %9, %2 \n"
|
"str %1, [%11, #8] \n"
|
||||||
"str %12, %6 \n"
|
"str %4, [%11, #24] \n"
|
||||||
"sub %10, %10, %16, lsr #1 \n"
|
"sub %2, %2, %8, lsr #1 \n"
|
||||||
"sub %13, %13, %16, lsr #1 \n"
|
"sub %5, %5, %8, lsr #1 \n"
|
||||||
"str %10, %3 \n"
|
"str %2, [%11, #12] \n"
|
||||||
"str %13, %7 \n"
|
"str %5, [%11, #28] \n"
|
||||||
: "=m"(values[0]), "=m"(values[1]),
|
: "=&r"(v0), "=&r"(v1), "=&r"(v2),
|
||||||
"=m"(values[2]), "=m"(values[3]),
|
|
||||||
"=m"(values[4]), "=m"(values[5]),
|
|
||||||
"=m"(values[6]), "=m"(values[7]),
|
|
||||||
"=&r"(v0), "=&r"(v1), "=&r"(v2),
|
|
||||||
"=&r"(v3), "=&r"(v4), "=&r"(v5),
|
"=&r"(v3), "=&r"(v4), "=&r"(v5),
|
||||||
"+&r"(code1), "+&r"(code2)
|
"+&r"(code1), "+&r"(code2)
|
||||||
: "r"(levels - 1), "r"(-levels), "r"(ff_inverse[levels]));
|
: "r"(levels - 1), "r"(-levels),
|
||||||
|
"r"(ff_inverse[levels]), "r"(values)
|
||||||
|
: "memory");
|
||||||
|
|
||||||
return code1 | code2;
|
return code1 | code2;
|
||||||
}
|
}
|
||||||
|
@@ -41,10 +41,10 @@ function ff_scalarproduct_int16_neon, export=1
|
|||||||
|
|
||||||
vpadd.s32 d16, d0, d1
|
vpadd.s32 d16, d0, d1
|
||||||
vpadd.s32 d17, d2, d3
|
vpadd.s32 d17, d2, d3
|
||||||
vpadd.s32 d10, d4, d5
|
vpadd.s32 d18, d4, d5
|
||||||
vpadd.s32 d11, d6, d7
|
vpadd.s32 d19, d6, d7
|
||||||
vpadd.s32 d0, d16, d17
|
vpadd.s32 d0, d16, d17
|
||||||
vpadd.s32 d1, d10, d11
|
vpadd.s32 d1, d18, d19
|
||||||
vpadd.s32 d2, d0, d1
|
vpadd.s32 d2, d0, d1
|
||||||
vpaddl.s32 d3, d2
|
vpaddl.s32 d3, d2
|
||||||
vmov.32 r0, d3[0]
|
vmov.32 r0, d3[0]
|
||||||
@@ -81,10 +81,10 @@ function ff_scalarproduct_and_madd_int16_neon, export=1
|
|||||||
|
|
||||||
vpadd.s32 d16, d0, d1
|
vpadd.s32 d16, d0, d1
|
||||||
vpadd.s32 d17, d2, d3
|
vpadd.s32 d17, d2, d3
|
||||||
vpadd.s32 d10, d4, d5
|
vpadd.s32 d18, d4, d5
|
||||||
vpadd.s32 d11, d6, d7
|
vpadd.s32 d19, d6, d7
|
||||||
vpadd.s32 d0, d16, d17
|
vpadd.s32 d0, d16, d17
|
||||||
vpadd.s32 d1, d10, d11
|
vpadd.s32 d1, d18, d19
|
||||||
vpadd.s32 d2, d0, d1
|
vpadd.s32 d2, d0, d1
|
||||||
vpaddl.s32 d3, d2
|
vpaddl.s32 d3, d2
|
||||||
vmov.32 r0, d3[0]
|
vmov.32 r0, d3[0]
|
||||||
|
@@ -661,8 +661,8 @@ static int decode_channel_sound_unit(ATRAC3Context *q, GetBitContext *gb,
|
|||||||
|
|
||||||
snd->num_components = decode_tonal_components(gb, snd->components,
|
snd->num_components = decode_tonal_components(gb, snd->components,
|
||||||
snd->bands_coded);
|
snd->bands_coded);
|
||||||
if (snd->num_components == -1)
|
if (snd->num_components < 0)
|
||||||
return -1;
|
return snd->num_components;
|
||||||
|
|
||||||
num_subbands = decode_spectrum(gb, snd->spectrum);
|
num_subbands = decode_spectrum(gb, snd->spectrum);
|
||||||
|
|
||||||
|
@@ -286,7 +286,7 @@ int av_packet_split_side_data(AVPacket *pkt){
|
|||||||
for (i=0; ; i++){
|
for (i=0; ; i++){
|
||||||
size= AV_RB32(p);
|
size= AV_RB32(p);
|
||||||
av_assert0(size<=INT_MAX && p - pkt->data >= size);
|
av_assert0(size<=INT_MAX && p - pkt->data >= size);
|
||||||
pkt->side_data[i].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
pkt->side_data[i].data = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
pkt->side_data[i].size = size;
|
pkt->side_data[i].size = size;
|
||||||
pkt->side_data[i].type = p[4]&127;
|
pkt->side_data[i].type = p[4]&127;
|
||||||
if (!pkt->side_data[i].data)
|
if (!pkt->side_data[i].data)
|
||||||
|
@@ -117,6 +117,7 @@ typedef struct BinkContext {
|
|||||||
int version; ///< internal Bink file version
|
int version; ///< internal Bink file version
|
||||||
int has_alpha;
|
int has_alpha;
|
||||||
int swap_planes;
|
int swap_planes;
|
||||||
|
unsigned frame_num;
|
||||||
|
|
||||||
Bundle bundle[BINKB_NB_SRC]; ///< bundles for decoding all data types
|
Bundle bundle[BINKB_NB_SRC]; ///< bundles for decoding all data types
|
||||||
Tree col_high[16]; ///< trees for decoding high nibble in "colours" data type
|
Tree col_high[16]; ///< trees for decoding high nibble in "colours" data type
|
||||||
@@ -1207,6 +1208,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
if (c->version >= 'i')
|
if (c->version >= 'i')
|
||||||
skip_bits_long(&gb, 32);
|
skip_bits_long(&gb, 32);
|
||||||
|
|
||||||
|
c->frame_num++;
|
||||||
|
|
||||||
for (plane = 0; plane < 3; plane++) {
|
for (plane = 0; plane < 3; plane++) {
|
||||||
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
|
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
|
||||||
|
|
||||||
@@ -1215,7 +1218,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
return ret;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
if ((ret = binkb_decode_plane(c, &gb, plane_idx,
|
if ((ret = binkb_decode_plane(c, &gb, plane_idx,
|
||||||
!avctx->frame_number, !!plane)) < 0)
|
c->frame_num == 1, !!plane)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
if (get_bits_count(&gb) >= bits_count)
|
if (get_bits_count(&gb) >= bits_count)
|
||||||
@@ -1339,6 +1342,13 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void flush(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
BinkContext * const c = avctx->priv_data;
|
||||||
|
|
||||||
|
c->frame_num = 0;
|
||||||
|
}
|
||||||
|
|
||||||
AVCodec ff_bink_decoder = {
|
AVCodec ff_bink_decoder = {
|
||||||
.name = "binkvideo",
|
.name = "binkvideo",
|
||||||
.type = AVMEDIA_TYPE_VIDEO,
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
@@ -1348,5 +1358,6 @@ AVCodec ff_bink_decoder = {
|
|||||||
.close = decode_end,
|
.close = decode_end,
|
||||||
.decode = decode_frame,
|
.decode = decode_frame,
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("Bink video"),
|
.long_name = NULL_IF_CONFIG_SMALL("Bink video"),
|
||||||
|
.flush = flush,
|
||||||
.capabilities = CODEC_CAP_DR1,
|
.capabilities = CODEC_CAP_DR1,
|
||||||
};
|
};
|
||||||
|
@@ -305,7 +305,15 @@ int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
|
|||||||
GET_DATA(buf[j].bits, bits, i, bits_wrap, bits_size);\
|
GET_DATA(buf[j].bits, bits, i, bits_wrap, bits_size);\
|
||||||
if (!(condition))\
|
if (!(condition))\
|
||||||
continue;\
|
continue;\
|
||||||
|
if (buf[j].bits > 3*nb_bits || buf[j].bits>32) {\
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "Too long VLC in init_vlc\n");\
|
||||||
|
return -1;\
|
||||||
|
}\
|
||||||
GET_DATA(buf[j].code, codes, i, codes_wrap, codes_size);\
|
GET_DATA(buf[j].code, codes, i, codes_wrap, codes_size);\
|
||||||
|
if (buf[j].code >= (1LL<<buf[j].bits)) {\
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "Invalid code in init_vlc\n");\
|
||||||
|
return -1;\
|
||||||
|
}\
|
||||||
if (flags & INIT_VLC_LE)\
|
if (flags & INIT_VLC_LE)\
|
||||||
buf[j].code = bitswap_32(buf[j].code);\
|
buf[j].code = bitswap_32(buf[j].code);\
|
||||||
else\
|
else\
|
||||||
|
@@ -175,7 +175,13 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
case C93_4X4_FROM_PREV:
|
case C93_4X4_FROM_PREV:
|
||||||
for (j = 0; j < 8; j += 4) {
|
for (j = 0; j < 8; j += 4) {
|
||||||
for (i = 0; i < 8; i += 4) {
|
for (i = 0; i < 8; i += 4) {
|
||||||
offset = bytestream2_get_le16(&gb);
|
int offset = bytestream2_get_le16(&gb);
|
||||||
|
int from_x = offset % WIDTH;
|
||||||
|
int from_y = offset / WIDTH;
|
||||||
|
if (block_type == C93_4X4_FROM_CURR && from_y == y+j &&
|
||||||
|
(FFABS(from_x - x-i) < 4 || FFABS(from_x - x-i) > WIDTH-4)) {
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
if ((ret = copy_block(avctx, &out[j*stride+i],
|
if ((ret = copy_block(avctx, &out[j*stride+i],
|
||||||
copy_from, offset, 4, stride)) < 0)
|
copy_from, offset, 4, stride)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
@@ -305,7 +305,7 @@ STOP_TIMER("get_cabac_bypass")
|
|||||||
|
|
||||||
for(i=0; i<SIZE; i++){
|
for(i=0; i<SIZE; i++){
|
||||||
START_TIMER
|
START_TIMER
|
||||||
if( (r[i]&1) != get_cabac(&c, state) )
|
if( (r[i]&1) != get_cabac_noinline(&c, state) )
|
||||||
av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
|
av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
|
||||||
STOP_TIMER("get_cabac")
|
STOP_TIMER("get_cabac")
|
||||||
}
|
}
|
||||||
|
@@ -300,7 +300,9 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
|||||||
inst = bytestream_get_byte(&buf);
|
inst = bytestream_get_byte(&buf);
|
||||||
inst &= CDG_MASK;
|
inst &= CDG_MASK;
|
||||||
buf += 2; /// skipping 2 unneeded bytes
|
buf += 2; /// skipping 2 unneeded bytes
|
||||||
bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE);
|
|
||||||
|
if (buf_size > CDG_HEADER_SIZE)
|
||||||
|
bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE);
|
||||||
|
|
||||||
if ((command & CDG_MASK) == CDG_COMMAND) {
|
if ((command & CDG_MASK) == CDG_COMMAND) {
|
||||||
switch (inst) {
|
switch (inst) {
|
||||||
|
@@ -236,7 +236,7 @@ static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
|
|||||||
|
|
||||||
static int dnxhd_init_rc(DNXHDEncContext *ctx)
|
static int dnxhd_init_rc(DNXHDEncContext *ctx)
|
||||||
{
|
{
|
||||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry), fail);
|
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*(ctx->m.avctx->qmax + 1)*sizeof(RCEntry), fail);
|
||||||
if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
|
if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
|
||||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry), fail);
|
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry), fail);
|
||||||
|
|
||||||
|
@@ -212,6 +212,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
buf[803] = s->bits_per_component;
|
buf[803] = s->bits_per_component;
|
||||||
write16(buf + 804, (s->bits_per_component == 10 || s->bits_per_component == 12) ?
|
write16(buf + 804, (s->bits_per_component == 10 || s->bits_per_component == 12) ?
|
||||||
1 : 0); /* packing method */
|
1 : 0); /* packing method */
|
||||||
|
write32(buf + 808, HEADER_SIZE); /* data offset */
|
||||||
|
|
||||||
/* Image source information header */
|
/* Image source information header */
|
||||||
write32(buf + 1628, avctx->sample_aspect_ratio.num);
|
write32(buf + 1628, avctx->sample_aspect_ratio.num);
|
||||||
|
@@ -1897,7 +1897,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
|
|||||||
|
|
||||||
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
||||||
long i;
|
long i;
|
||||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||||
long a = *(long*)(src+i);
|
long a = *(long*)(src+i);
|
||||||
long b = *(long*)(dst+i);
|
long b = *(long*)(dst+i);
|
||||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||||
@@ -1922,7 +1922,7 @@ static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
|
|||||||
}
|
}
|
||||||
}else
|
}else
|
||||||
#endif
|
#endif
|
||||||
for(i=0; i<=w-sizeof(long); i+=sizeof(long)){
|
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||||
long a = *(long*)(src1+i);
|
long a = *(long*)(src1+i);
|
||||||
long b = *(long*)(src2+i);
|
long b = *(long*)(src2+i);
|
||||||
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
||||||
|
@@ -71,6 +71,11 @@ static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint
|
|||||||
case 4: // motion compensation
|
case 4: // motion compensation
|
||||||
x = (*mv) >> 4; if(x & 8) x = 8 - x;
|
x = (*mv) >> 4; if(x & 8) x = 8 - x;
|
||||||
y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
|
y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
|
||||||
|
if (i < -x || avctx->width - i - 4 < x ||
|
||||||
|
j < -y || avctx->height - j - 4 < y) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
tmp2 += x + y*stride;
|
tmp2 += x + y*stride;
|
||||||
case 0: // skip
|
case 0: // skip
|
||||||
case 5: // skip in method 12
|
case 5: // skip in method 12
|
||||||
@@ -128,6 +133,11 @@ static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint
|
|||||||
case 0x80: // motion compensation
|
case 0x80: // motion compensation
|
||||||
x = (*mv) >> 4; if(x & 8) x = 8 - x;
|
x = (*mv) >> 4; if(x & 8) x = 8 - x;
|
||||||
y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
|
y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
|
||||||
|
if (i + 2*(k & 1) < -x || avctx->width - i - 2*(k & 1) - 2 < x ||
|
||||||
|
j + (k & 2) < -y || avctx->height - j - (k & 2) - 2 < y) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
tmp2 += x + y*stride;
|
tmp2 += x + y*stride;
|
||||||
case 0x00: // skip
|
case 0x00: // skip
|
||||||
tmp[d + 0 ] = tmp2[0];
|
tmp[d + 0 ] = tmp2[0];
|
||||||
|
@@ -255,6 +255,11 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
calc_quant_matrix(s, buf[13]);
|
calc_quant_matrix(s, buf[13]);
|
||||||
buf += 16;
|
buf += 16;
|
||||||
|
|
||||||
|
if (width < 16 || height < 16) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Dimensions too small\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
if (avctx->width != width || avctx->height != height) {
|
if (avctx->width != width || avctx->height != height) {
|
||||||
if((width * height)/2048*7 > buf_end-buf)
|
if((width * height)/2048*7 > buf_end-buf)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
@@ -762,6 +762,17 @@ void ff_er_frame_start(ERContext *s)
|
|||||||
s->error_occurred = 0;
|
s->error_occurred = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int er_supported(ERContext *s)
|
||||||
|
{
|
||||||
|
if(s->avctx->hwaccel ||
|
||||||
|
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
||||||
|
!s->cur_pic ||
|
||||||
|
s->cur_pic->field_picture
|
||||||
|
)
|
||||||
|
return 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a slice.
|
* Add a slice.
|
||||||
* @param endx x component of the last macroblock, can be -1
|
* @param endx x component of the last macroblock, can be -1
|
||||||
@@ -828,7 +839,7 @@ void ff_er_add_slice(ERContext *s, int startx, int starty,
|
|||||||
s->error_status_table[start_xy] |= VP_START;
|
s->error_status_table[start_xy] |= VP_START;
|
||||||
|
|
||||||
if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
|
if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||||
s->avctx->skip_top * s->mb_width < start_i) {
|
er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) {
|
||||||
int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
|
int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
|
||||||
|
|
||||||
prev_status &= ~ VP_START;
|
prev_status &= ~ VP_START;
|
||||||
@@ -851,9 +862,7 @@ void ff_er_frame_end(ERContext *s)
|
|||||||
* though it should not crash if enabled. */
|
* though it should not crash if enabled. */
|
||||||
if (!s->avctx->err_recognition || s->error_count == 0 ||
|
if (!s->avctx->err_recognition || s->error_count == 0 ||
|
||||||
s->avctx->lowres ||
|
s->avctx->lowres ||
|
||||||
s->avctx->hwaccel ||
|
!er_supported(s) ||
|
||||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
|
|
||||||
!s->cur_pic || s->cur_pic->field_picture ||
|
|
||||||
s->error_count == 3 * s->mb_width *
|
s->error_count == 3 * s->mb_width *
|
||||||
(s->avctx->skip_top + s->avctx->skip_bottom)) {
|
(s->avctx->skip_top + s->avctx->skip_bottom)) {
|
||||||
return;
|
return;
|
||||||
|
@@ -374,7 +374,7 @@ static void bl_intrp(EVRCContext *e, float *ex, float delay)
|
|||||||
int offset, i, coef_idx;
|
int offset, i, coef_idx;
|
||||||
int16_t t;
|
int16_t t;
|
||||||
|
|
||||||
offset = lrintf(fabs(delay));
|
offset = lrintf(delay);
|
||||||
|
|
||||||
t = (offset - delay + 0.5) * 8.0 + 0.5;
|
t = (offset - delay + 0.5) * 8.0 + 0.5;
|
||||||
if (t == 8) {
|
if (t == 8) {
|
||||||
@@ -640,7 +640,7 @@ static void postfilter(EVRCContext *e, float *in, const float *coeff,
|
|||||||
/* Short term postfilter */
|
/* Short term postfilter */
|
||||||
synthesis_filter(temp, wcoef2, e->postfilter_iir, length, out);
|
synthesis_filter(temp, wcoef2, e->postfilter_iir, length, out);
|
||||||
|
|
||||||
memcpy(e->postfilter_residual,
|
memmove(e->postfilter_residual,
|
||||||
e->postfilter_residual + length, ACB_SIZE * sizeof(float));
|
e->postfilter_residual + length, ACB_SIZE * sizeof(float));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -714,7 +714,7 @@ static void frame_erasure(EVRCContext *e, float *samples)
|
|||||||
e->pitch[ACB_SIZE + j] = e->energy_vector[i];
|
e->pitch[ACB_SIZE + j] = e->energy_vector[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||||
|
|
||||||
if (e->bitrate != RATE_QUANT && e->avg_acb_gain < 0.4) {
|
if (e->bitrate != RATE_QUANT && e->avg_acb_gain < 0.4) {
|
||||||
f = 0.1 * e->avg_fcb_gain;
|
f = 0.1 * e->avg_fcb_gain;
|
||||||
@@ -814,7 +814,7 @@ static int evrc_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
interpolate_delay(idelay, delay, e->prev_pitch_delay, i);
|
interpolate_delay(idelay, delay, e->prev_pitch_delay, i);
|
||||||
acb_excitation(e, e->pitch + ACB_SIZE, e->avg_acb_gain, idelay, subframe_size);
|
acb_excitation(e, e->pitch + ACB_SIZE, e->avg_acb_gain, idelay, subframe_size);
|
||||||
memcpy(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -872,7 +872,7 @@ static int evrc_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
e->pitch[ACB_SIZE + j] = e->energy_vector[i];
|
e->pitch[ACB_SIZE + j] = e->energy_vector[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
|
||||||
|
|
||||||
synthesis_filter(e->pitch + ACB_SIZE, ilpc,
|
synthesis_filter(e->pitch + ACB_SIZE, ilpc,
|
||||||
e->synthesis, subframe_size, tmp);
|
e->synthesis, subframe_size, tmp);
|
||||||
|
@@ -348,7 +348,8 @@ static int decode_block(AVCodecContext *avctx, void *tdata,
|
|||||||
const uint8_t *src;
|
const uint8_t *src;
|
||||||
int axmax = (avctx->width - (s->xmax + 1)) * 2 * s->desc->nb_components;
|
int axmax = (avctx->width - (s->xmax + 1)) * 2 * s->desc->nb_components;
|
||||||
int bxmin = s->xmin * 2 * s->desc->nb_components;
|
int bxmin = s->xmin * 2 * s->desc->nb_components;
|
||||||
int ret, i, x, buf_size = s->buf_size;
|
int i, x, buf_size = s->buf_size;
|
||||||
|
int av_unused ret;
|
||||||
|
|
||||||
line_offset = AV_RL64(s->table + jobnr * 8);
|
line_offset = AV_RL64(s->table + jobnr * 8);
|
||||||
// Check if the buffer has the required bytes needed from the offset
|
// Check if the buffer has the required bytes needed from the offset
|
||||||
|
@@ -446,6 +446,10 @@ static int read_extra_header(FFV1Context *f)
|
|||||||
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
|
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
|
||||||
|
|
||||||
f->version = get_symbol(c, state, 0);
|
f->version = get_symbol(c, state, 0);
|
||||||
|
if (f->version < 2) {
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
if (f->version > 2) {
|
if (f->version > 2) {
|
||||||
c->bytestream_end -= 4;
|
c->bytestream_end -= 4;
|
||||||
f->minor_version = get_symbol(c, state, 0);
|
f->minor_version = get_symbol(c, state, 0);
|
||||||
@@ -523,6 +527,7 @@ static int read_header(FFV1Context *f)
|
|||||||
memset(state, 128, sizeof(state));
|
memset(state, 128, sizeof(state));
|
||||||
|
|
||||||
if (f->version < 2) {
|
if (f->version < 2) {
|
||||||
|
int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
|
||||||
unsigned v= get_symbol(c, state, 0);
|
unsigned v= get_symbol(c, state, 0);
|
||||||
if (v >= 2) {
|
if (v >= 2) {
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
|
av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
|
||||||
@@ -535,15 +540,32 @@ static int read_header(FFV1Context *f)
|
|||||||
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
f->colorspace = get_symbol(c, state, 0); //YUV cs type
|
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||||
|
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
|
||||||
|
chroma_planes = get_rac(c, state);
|
||||||
|
chroma_h_shift = get_symbol(c, state, 0);
|
||||||
|
chroma_v_shift = get_symbol(c, state, 0);
|
||||||
|
transparency = get_rac(c, state);
|
||||||
|
|
||||||
if (f->version > 0)
|
if (f->plane_count) {
|
||||||
f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
|
if ( colorspace != f->colorspace
|
||||||
|
|| bits_per_raw_sample != f->avctx->bits_per_raw_sample
|
||||||
|
|| chroma_planes != f->chroma_planes
|
||||||
|
|| chroma_h_shift!= f->chroma_h_shift
|
||||||
|
|| chroma_v_shift!= f->chroma_v_shift
|
||||||
|
|| transparency != f->transparency) {
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f->colorspace = colorspace;
|
||||||
|
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
||||||
|
f->chroma_planes = chroma_planes;
|
||||||
|
f->chroma_h_shift = chroma_h_shift;
|
||||||
|
f->chroma_v_shift = chroma_v_shift;
|
||||||
|
f->transparency = transparency;
|
||||||
|
|
||||||
f->chroma_planes = get_rac(c, state);
|
|
||||||
f->chroma_h_shift = get_symbol(c, state, 0);
|
|
||||||
f->chroma_v_shift = get_symbol(c, state, 0);
|
|
||||||
f->transparency = get_rac(c, state);
|
|
||||||
f->plane_count = 2 + f->transparency;
|
f->plane_count = 2 + f->transparency;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -561,47 +583,32 @@ static int read_header(FFV1Context *f)
|
|||||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
|
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
|
||||||
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
|
case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
|
||||||
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
|
case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
|
||||||
default:
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
}
|
}
|
||||||
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
|
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
|
||||||
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
|
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
|
||||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
|
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
|
||||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
|
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
|
||||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
|
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
|
||||||
default:
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
}
|
}
|
||||||
} else if (f->avctx->bits_per_raw_sample == 9) {
|
} else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
|
||||||
f->packed_at_lsb = 1;
|
f->packed_at_lsb = 1;
|
||||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
|
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
|
||||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
|
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
|
||||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
|
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
|
||||||
default:
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
}
|
}
|
||||||
} else if (f->avctx->bits_per_raw_sample == 10) {
|
} else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
|
||||||
f->packed_at_lsb = 1;
|
f->packed_at_lsb = 1;
|
||||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
|
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
|
||||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
|
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
|
||||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
|
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
|
||||||
default:
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
|
||||||
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
|
||||||
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
|
case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
|
||||||
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
|
case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
|
||||||
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
|
case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
|
||||||
default:
|
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (f->colorspace == 1) {
|
} else if (f->colorspace == 1) {
|
||||||
@@ -625,6 +632,10 @@ static int read_header(FFV1Context *f)
|
|||||||
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
|
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
|
||||||
return AVERROR(ENOSYS);
|
return AVERROR(ENOSYS);
|
||||||
}
|
}
|
||||||
|
if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) {
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
|
||||||
|
return AVERROR(ENOSYS);
|
||||||
|
}
|
||||||
|
|
||||||
av_dlog(f->avctx, "%d %d %d\n",
|
av_dlog(f->avctx, "%d %d %d\n",
|
||||||
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
|
f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
|
||||||
|
@@ -274,7 +274,7 @@ static av_always_inline int encode_line(FFV1Context *s, int w,
|
|||||||
int run_mode = 0;
|
int run_mode = 0;
|
||||||
|
|
||||||
if (s->ac) {
|
if (s->ac) {
|
||||||
if (c->bytestream_end - c->bytestream < w * 20) {
|
if (c->bytestream_end - c->bytestream < w * 35) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
@@ -27,7 +27,7 @@ const int ff_flac_sample_rate_table[16] =
|
|||||||
8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000,
|
8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000,
|
||||||
0, 0, 0, 0 };
|
0, 0, 0, 0 };
|
||||||
|
|
||||||
const int16_t ff_flac_blocksize_table[16] = {
|
const int32_t ff_flac_blocksize_table[16] = {
|
||||||
0, 192, 576<<0, 576<<1, 576<<2, 576<<3, 0, 0,
|
0, 192, 576<<0, 576<<1, 576<<2, 576<<3, 0, 0,
|
||||||
256<<0, 256<<1, 256<<2, 256<<3, 256<<4, 256<<5, 256<<6, 256<<7
|
256<<0, 256<<1, 256<<2, 256<<3, 256<<4, 256<<5, 256<<6, 256<<7
|
||||||
};
|
};
|
||||||
|
@@ -26,6 +26,6 @@
|
|||||||
|
|
||||||
extern const int ff_flac_sample_rate_table[16];
|
extern const int ff_flac_sample_rate_table[16];
|
||||||
|
|
||||||
extern const int16_t ff_flac_blocksize_table[16];
|
extern const int32_t ff_flac_blocksize_table[16];
|
||||||
|
|
||||||
#endif /* AVCODEC_FLACDATA_H */
|
#endif /* AVCODEC_FLACDATA_H */
|
||||||
|
@@ -394,6 +394,10 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
s->diff_start = get_bits(&gb, 8);
|
s->diff_start = get_bits(&gb, 8);
|
||||||
s->diff_height = get_bits(&gb, 8);
|
s->diff_height = get_bits(&gb, 8);
|
||||||
|
if (s->diff_start + s->diff_height > cur_blk_height) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Block parameters invalid\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
av_log(avctx, AV_LOG_DEBUG,
|
av_log(avctx, AV_LOG_DEBUG,
|
||||||
"%dx%d diff start %d height %d\n",
|
"%dx%d diff start %d height %d\n",
|
||||||
i, j, s->diff_start, s->diff_height);
|
i, j, s->diff_start, s->diff_height);
|
||||||
|
@@ -462,6 +462,7 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, A
|
|||||||
|
|
||||||
if (s->keyframe) {
|
if (s->keyframe) {
|
||||||
s->keyframe_ok = 0;
|
s->keyframe_ok = 0;
|
||||||
|
s->gce_prev_disposal = GCE_DISPOSAL_NONE;
|
||||||
if ((ret = gif_read_header1(s)) < 0)
|
if ((ret = gif_read_header1(s)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@@ -1782,11 +1782,6 @@ int ff_h264_frame_start(H264Context *h)
|
|||||||
h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
|
h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Some macroblocks can be accessed before they're available in case
|
|
||||||
* of lost slices, MBAFF or threading. */
|
|
||||||
memset(h->slice_table, -1,
|
|
||||||
(h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
|
|
||||||
|
|
||||||
// s->decode = (h->flags & CODEC_FLAG_PSNR) || !s->encoding ||
|
// s->decode = (h->flags & CODEC_FLAG_PSNR) || !s->encoding ||
|
||||||
// h->cur_pic.f.reference /* || h->contains_intra */ || 1;
|
// h->cur_pic.f.reference /* || h->contains_intra */ || 1;
|
||||||
|
|
||||||
@@ -2584,6 +2579,7 @@ static void flush_change(H264Context *h)
|
|||||||
h->sync= 0;
|
h->sync= 0;
|
||||||
h->list_count = 0;
|
h->list_count = 0;
|
||||||
h->current_slice = 0;
|
h->current_slice = 0;
|
||||||
|
h->mmco_reset = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* forget old pics after a seek */
|
/* forget old pics after a seek */
|
||||||
@@ -3114,7 +3110,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
unsigned int pps_id;
|
unsigned int pps_id;
|
||||||
int num_ref_idx_active_override_flag, ret;
|
int num_ref_idx_active_override_flag, ret;
|
||||||
unsigned int slice_type, tmp, i, j;
|
unsigned int slice_type, tmp, i, j;
|
||||||
int default_ref_list_done = 0;
|
|
||||||
int last_pic_structure, last_pic_droppable;
|
int last_pic_structure, last_pic_droppable;
|
||||||
int must_reinit;
|
int must_reinit;
|
||||||
int needs_reinit = 0;
|
int needs_reinit = 0;
|
||||||
@@ -3154,12 +3149,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
h->slice_type_fixed = 0;
|
h->slice_type_fixed = 0;
|
||||||
|
|
||||||
slice_type = golomb_to_pict_type[slice_type];
|
slice_type = golomb_to_pict_type[slice_type];
|
||||||
if (slice_type == AV_PICTURE_TYPE_I ||
|
|
||||||
(h0->current_slice != 0 &&
|
|
||||||
slice_type == h0->last_slice_type &&
|
|
||||||
!memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) {
|
|
||||||
default_ref_list_done = 1;
|
|
||||||
}
|
|
||||||
h->slice_type = slice_type;
|
h->slice_type = slice_type;
|
||||||
h->slice_type_nos = slice_type & 3;
|
h->slice_type_nos = slice_type & 3;
|
||||||
|
|
||||||
@@ -3219,7 +3208,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
|
||||||
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
|| h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||||
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)));
|
|| av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio)
|
||||||
|
|| h->mb_width != h->sps.mb_width
|
||||||
|
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
||||||
|
));
|
||||||
if (h0->avctx->pix_fmt != get_pixel_format(h0, 0))
|
if (h0->avctx->pix_fmt != get_pixel_format(h0, 0))
|
||||||
must_reinit = 1;
|
must_reinit = 1;
|
||||||
|
|
||||||
@@ -3337,7 +3329,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
} else {
|
} else {
|
||||||
/* Shorten frame num gaps so we don't have to allocate reference
|
/* Shorten frame num gaps so we don't have to allocate reference
|
||||||
* frames just to throw them away */
|
* frames just to throw them away */
|
||||||
if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
|
if (h->frame_num != h->prev_frame_num) {
|
||||||
int unwrap_prev_frame_num = h->prev_frame_num;
|
int unwrap_prev_frame_num = h->prev_frame_num;
|
||||||
int max_frame_num = 1 << h->sps.log2_max_frame_num;
|
int max_frame_num = 1 << h->sps.log2_max_frame_num;
|
||||||
|
|
||||||
@@ -3364,7 +3356,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
|
assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
|
||||||
|
|
||||||
/* Mark old field/frame as completed */
|
/* Mark old field/frame as completed */
|
||||||
if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) {
|
if (h0->cur_pic_ptr->owner2 == h0) {
|
||||||
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
||||||
last_pic_structure == PICT_BOTTOM_FIELD);
|
last_pic_structure == PICT_BOTTOM_FIELD);
|
||||||
}
|
}
|
||||||
@@ -3373,7 +3365,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
|
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
|
||||||
/* Previous field is unmatched. Don't display it, but let it
|
/* Previous field is unmatched. Don't display it, but let it
|
||||||
* remain for reference if marked as such. */
|
* remain for reference if marked as such. */
|
||||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
if (last_pic_structure != PICT_FRAME) {
|
||||||
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
||||||
last_pic_structure == PICT_TOP_FIELD);
|
last_pic_structure == PICT_TOP_FIELD);
|
||||||
}
|
}
|
||||||
@@ -3383,7 +3375,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
* different frame_nums. Consider this field first in
|
* different frame_nums. Consider this field first in
|
||||||
* pair. Throw away previous field except for reference
|
* pair. Throw away previous field except for reference
|
||||||
* purposes. */
|
* purposes. */
|
||||||
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
|
if (last_pic_structure != PICT_FRAME) {
|
||||||
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
|
||||||
last_pic_structure == PICT_TOP_FIELD);
|
last_pic_structure == PICT_TOP_FIELD);
|
||||||
}
|
}
|
||||||
@@ -3419,7 +3411,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field &&
|
while (h->frame_num != h->prev_frame_num && !h0->first_field &&
|
||||||
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
|
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
|
||||||
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
||||||
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
|
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
|
||||||
@@ -3498,6 +3490,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
} else {
|
} else {
|
||||||
release_unused_pictures(h, 0);
|
release_unused_pictures(h, 0);
|
||||||
}
|
}
|
||||||
|
/* Some macroblocks can be accessed before they're available in case
|
||||||
|
* of lost slices, MBAFF or threading. */
|
||||||
|
if (FIELD_PICTURE) {
|
||||||
|
for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
|
||||||
|
memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
|
||||||
|
} else {
|
||||||
|
memset(h->slice_table, -1,
|
||||||
|
(h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (h != h0 && (ret = clone_slice(h, h0)) < 0)
|
if (h != h0 && (ret = clone_slice(h, h0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
@@ -3590,9 +3591,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
h->list_count = 0;
|
h->list_count = 0;
|
||||||
h->ref_count[0] = h->ref_count[1] = 0;
|
h->ref_count[0] = h->ref_count[1] = 0;
|
||||||
}
|
}
|
||||||
|
if (slice_type != AV_PICTURE_TYPE_I &&
|
||||||
if (!default_ref_list_done)
|
(h0->current_slice == 0 ||
|
||||||
|
slice_type != h0->last_slice_type ||
|
||||||
|
memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) {
|
||||||
ff_h264_fill_default_ref_list(h);
|
ff_h264_fill_default_ref_list(h);
|
||||||
|
}
|
||||||
|
|
||||||
if (h->slice_type_nos != AV_PICTURE_TYPE_I &&
|
if (h->slice_type_nos != AV_PICTURE_TYPE_I &&
|
||||||
ff_h264_decode_ref_pic_list_reordering(h) < 0) {
|
ff_h264_decode_ref_pic_list_reordering(h) < 0) {
|
||||||
@@ -3775,6 +3779,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
|
|
||||||
if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0];
|
if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0];
|
||||||
if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0];
|
if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0];
|
||||||
|
h->er.ref_count = h->ref_count[0];
|
||||||
|
|
||||||
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
|
if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
|
||||||
av_log(h->avctx, AV_LOG_DEBUG,
|
av_log(h->avctx, AV_LOG_DEBUG,
|
||||||
@@ -4166,7 +4171,6 @@ static void er_add_slice(H264Context *h, int startx, int starty,
|
|||||||
if (CONFIG_ERROR_RESILIENCE) {
|
if (CONFIG_ERROR_RESILIENCE) {
|
||||||
ERContext *er = &h->er;
|
ERContext *er = &h->er;
|
||||||
|
|
||||||
er->ref_count = h->ref_count[0];
|
|
||||||
ff_er_add_slice(er, startx, starty, endx, endy, status);
|
ff_er_add_slice(er, startx, starty, endx, endy, status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -549,9 +549,15 @@ static int decode_residual(H264Context *h, GetBitContext *gb, int16_t *block, in
|
|||||||
if(prefix<15){
|
if(prefix<15){
|
||||||
level_code = (prefix<<suffix_length) + get_bits(gb, suffix_length);
|
level_code = (prefix<<suffix_length) + get_bits(gb, suffix_length);
|
||||||
}else{
|
}else{
|
||||||
level_code = (15<<suffix_length) + get_bits(gb, prefix-3);
|
level_code = 15<<suffix_length;
|
||||||
if(prefix>=16)
|
if (prefix>=16) {
|
||||||
|
if(prefix > 25+3){
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "Invalid level prefix\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
level_code += (1<<(prefix-3))-4096;
|
level_code += (1<<(prefix-3))-4096;
|
||||||
|
}
|
||||||
|
level_code += get_bits(gb, prefix-3);
|
||||||
}
|
}
|
||||||
mask= -(level_code&1);
|
mask= -(level_code&1);
|
||||||
level_code= (((2+level_code)>>1) ^ mask) - mask;
|
level_code= (((2+level_code)>>1) ^ mask) - mask;
|
||||||
@@ -706,7 +712,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h){
|
|||||||
down the code */
|
down the code */
|
||||||
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
|
||||||
if(h->mb_skip_run==-1)
|
if(h->mb_skip_run==-1)
|
||||||
h->mb_skip_run= get_ue_golomb(&h->gb);
|
h->mb_skip_run= get_ue_golomb_long(&h->gb);
|
||||||
|
|
||||||
if (h->mb_skip_run--) {
|
if (h->mb_skip_run--) {
|
||||||
if(FRAME_MBAFF && (h->mb_y&1) == 0){
|
if(FRAME_MBAFF && (h->mb_y&1) == 0){
|
||||||
|
@@ -543,7 +543,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
|||||||
if(!pic){
|
if(!pic){
|
||||||
if(mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg]
|
if(mmco[i].opcode != MMCO_SHORT2LONG || !h->long_ref[mmco[i].long_arg]
|
||||||
|| h->long_ref[mmco[i].long_arg]->frame_num != frame_num) {
|
|| h->long_ref[mmco[i].long_arg]->frame_num != frame_num) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "mmco: unref short failure\n");
|
av_log(h->avctx, h->short_ref_count ? AV_LOG_ERROR : AV_LOG_DEBUG, "mmco: unref short failure\n");
|
||||||
err = AVERROR_INVALIDDATA;
|
err = AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
@@ -586,6 +586,9 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
|||||||
|
|
||||||
if (h->long_ref[mmco[i].long_arg] != h->cur_pic_ptr) {
|
if (h->long_ref[mmco[i].long_arg] != h->cur_pic_ptr) {
|
||||||
remove_long(h, mmco[i].long_arg, 0);
|
remove_long(h, mmco[i].long_arg, 0);
|
||||||
|
if (remove_short(h, h->cur_pic_ptr->frame_num, 0)) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "mmco: cannot assign current picture to short and long at the same time\n");
|
||||||
|
}
|
||||||
|
|
||||||
h->long_ref[ mmco[i].long_arg ]= h->cur_pic_ptr;
|
h->long_ref[ mmco[i].long_arg ]= h->cur_pic_ptr;
|
||||||
h->long_ref[ mmco[i].long_arg ]->long_ref=1;
|
h->long_ref[ mmco[i].long_arg ]->long_ref=1;
|
||||||
@@ -680,7 +683,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
|||||||
print_short_term(h);
|
print_short_term(h);
|
||||||
print_long_term(h);
|
print_long_term(h);
|
||||||
|
|
||||||
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=1 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=2 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
|
||||||
h->cur_pic_ptr->sync |= 1;
|
h->cur_pic_ptr->sync |= 1;
|
||||||
if(!h->avctx->has_b_frames)
|
if(!h->avctx->has_b_frames)
|
||||||
h->sync = 2;
|
h->sync = 2;
|
||||||
@@ -693,7 +696,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb,
|
|||||||
int first_slice)
|
int first_slice)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret;
|
||||||
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp;
|
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = mmco_temp;
|
||||||
int mmco_index = 0;
|
int mmco_index = 0;
|
||||||
|
|
||||||
if (h->nal_unit_type == NAL_IDR_SLICE){ // FIXME fields
|
if (h->nal_unit_type == NAL_IDR_SLICE){ // FIXME fields
|
||||||
@@ -759,6 +762,7 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (first_slice && mmco_index != -1) {
|
if (first_slice && mmco_index != -1) {
|
||||||
|
memcpy(h->mmco, mmco_temp, sizeof(h->mmco));
|
||||||
h->mmco_index = mmco_index;
|
h->mmco_index = mmco_index;
|
||||||
} else if (!first_slice && mmco_index >= 0 &&
|
} else if (!first_slice && mmco_index >= 0 &&
|
||||||
(mmco_index != h->mmco_index ||
|
(mmco_index != h->mmco_index ||
|
||||||
|
@@ -31,9 +31,11 @@
|
|||||||
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_ ## depth ## _c; \
|
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_ ## depth ## _c; \
|
||||||
c->put_h264_chroma_pixels_tab[1] = put_h264_chroma_mc4_ ## depth ## _c; \
|
c->put_h264_chroma_pixels_tab[1] = put_h264_chroma_mc4_ ## depth ## _c; \
|
||||||
c->put_h264_chroma_pixels_tab[2] = put_h264_chroma_mc2_ ## depth ## _c; \
|
c->put_h264_chroma_pixels_tab[2] = put_h264_chroma_mc2_ ## depth ## _c; \
|
||||||
|
c->put_h264_chroma_pixels_tab[3] = put_h264_chroma_mc1_ ## depth ## _c; \
|
||||||
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_ ## depth ## _c; \
|
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_ ## depth ## _c; \
|
||||||
c->avg_h264_chroma_pixels_tab[1] = avg_h264_chroma_mc4_ ## depth ## _c; \
|
c->avg_h264_chroma_pixels_tab[1] = avg_h264_chroma_mc4_ ## depth ## _c; \
|
||||||
c->avg_h264_chroma_pixels_tab[2] = avg_h264_chroma_mc2_ ## depth ## _c; \
|
c->avg_h264_chroma_pixels_tab[2] = avg_h264_chroma_mc2_ ## depth ## _c; \
|
||||||
|
c->avg_h264_chroma_pixels_tab[3] = avg_h264_chroma_mc1_ ## depth ## _c; \
|
||||||
|
|
||||||
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
|
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
|
||||||
{
|
{
|
||||||
|
@@ -24,8 +24,8 @@
|
|||||||
typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y);
|
typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y);
|
||||||
|
|
||||||
typedef struct H264ChromaContext {
|
typedef struct H264ChromaContext {
|
||||||
h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
|
h264_chroma_mc_func put_h264_chroma_pixels_tab[4];
|
||||||
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
|
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4];
|
||||||
} H264ChromaContext;
|
} H264ChromaContext;
|
||||||
|
|
||||||
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth);
|
void ff_h264chroma_init(H264ChromaContext *c, int bit_depth);
|
||||||
|
@@ -24,6 +24,34 @@
|
|||||||
#include "bit_depth_template.c"
|
#include "bit_depth_template.c"
|
||||||
|
|
||||||
#define H264_CHROMA_MC(OPNAME, OP)\
|
#define H264_CHROMA_MC(OPNAME, OP)\
|
||||||
|
static void FUNCC(OPNAME ## h264_chroma_mc1)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
|
||||||
|
pixel *dst = (pixel*)_dst;\
|
||||||
|
pixel *src = (pixel*)_src;\
|
||||||
|
const int A=(8-x)*(8-y);\
|
||||||
|
const int B=( x)*(8-y);\
|
||||||
|
const int C=(8-x)*( y);\
|
||||||
|
const int D=( x)*( y);\
|
||||||
|
int i;\
|
||||||
|
stride >>= sizeof(pixel)-1;\
|
||||||
|
\
|
||||||
|
av_assert2(x<8 && y<8 && x>=0 && y>=0);\
|
||||||
|
\
|
||||||
|
if(D){\
|
||||||
|
for(i=0; i<h; i++){\
|
||||||
|
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
|
||||||
|
dst+= stride;\
|
||||||
|
src+= stride;\
|
||||||
|
}\
|
||||||
|
}else{\
|
||||||
|
const int E= B+C;\
|
||||||
|
const int step= C ? stride : 1;\
|
||||||
|
for(i=0; i<h; i++){\
|
||||||
|
OP(dst[0], (A*src[0] + E*src[step+0]));\
|
||||||
|
dst+= stride;\
|
||||||
|
src+= stride;\
|
||||||
|
}\
|
||||||
|
}\
|
||||||
|
}\
|
||||||
static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
|
static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
|
||||||
pixel *dst = (pixel*)_dst;\
|
pixel *dst = (pixel*)_dst;\
|
||||||
pixel *src = (pixel*)_src;\
|
pixel *src = (pixel*)_src;\
|
||||||
|
@@ -804,8 +804,16 @@ static int decode_band(IVI45DecContext *ctx,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
result = ivi_decode_blocks(&ctx->gb, band, tile, avctx);
|
result = ivi_decode_blocks(&ctx->gb, band, tile, avctx);
|
||||||
if (result < 0 || ((get_bits_count(&ctx->gb) - pos) >> 3) != tile->data_size) {
|
if (result < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Corrupted tile data encountered!\n");
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"Corrupted tile data encountered!\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (((get_bits_count(&ctx->gb) - pos) >> 3) != tile->data_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"Tile data_size mismatch!\n");
|
||||||
|
result = AVERROR_INVALIDDATA;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -31,6 +31,7 @@
|
|||||||
#include "bytestream.h"
|
#include "bytestream.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "j2k.h"
|
#include "j2k.h"
|
||||||
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/common.h"
|
#include "libavutil/common.h"
|
||||||
|
|
||||||
#define JP2_SIG_TYPE 0x6A502020
|
#define JP2_SIG_TYPE 0x6A502020
|
||||||
@@ -302,6 +303,10 @@ static int get_cox(J2kDecoderContext *s, J2kCodingStyle *c)
|
|||||||
c->log2_cblk_width = bytestream2_get_byteu(&s->g) + 2; // cblk width
|
c->log2_cblk_width = bytestream2_get_byteu(&s->g) + 2; // cblk width
|
||||||
c->log2_cblk_height = bytestream2_get_byteu(&s->g) + 2; // cblk height
|
c->log2_cblk_height = bytestream2_get_byteu(&s->g) + 2; // cblk height
|
||||||
|
|
||||||
|
if (c->log2_cblk_width > 6 || c->log2_cblk_height > 6) {
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
c->cblk_style = bytestream2_get_byteu(&s->g);
|
c->cblk_style = bytestream2_get_byteu(&s->g);
|
||||||
if (c->cblk_style != 0){ // cblk style
|
if (c->cblk_style != 0){ // cblk style
|
||||||
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
|
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
|
||||||
@@ -719,6 +724,9 @@ static int decode_cblk(J2kDecoderContext *s, J2kCodingStyle *codsty, J2kT1Contex
|
|||||||
int bpass_csty_symbol = J2K_CBLK_BYPASS & codsty->cblk_style;
|
int bpass_csty_symbol = J2K_CBLK_BYPASS & codsty->cblk_style;
|
||||||
int vert_causal_ctx_csty_symbol = J2K_CBLK_VSC & codsty->cblk_style;
|
int vert_causal_ctx_csty_symbol = J2K_CBLK_VSC & codsty->cblk_style;
|
||||||
|
|
||||||
|
av_assert0(width <= J2K_MAX_CBLKW);
|
||||||
|
av_assert0(height <= J2K_MAX_CBLKH);
|
||||||
|
|
||||||
for (y = 0; y < height+2; y++)
|
for (y = 0; y < height+2; y++)
|
||||||
memset(t1->flags[y], 0, (width+2)*sizeof(int));
|
memset(t1->flags[y], 0, (width+2)*sizeof(int));
|
||||||
|
|
||||||
|
@@ -142,6 +142,8 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state, int RI
|
|||||||
ret = ret >> 1;
|
ret = ret >> 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(FFABS(ret) > 0xFFFF)
|
||||||
|
return -0x10000;
|
||||||
/* update state */
|
/* update state */
|
||||||
state->A[Q] += FFABS(ret) - RItype;
|
state->A[Q] += FFABS(ret) - RItype;
|
||||||
ret *= state->twonear;
|
ret *= state->twonear;
|
||||||
|
@@ -107,7 +107,7 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, int w, int h)
|
|||||||
val = bytestream2_get_byte(&ctx->g);
|
val = bytestream2_get_byte(&ctx->g);
|
||||||
mx = val & 0xF;
|
mx = val & 0xF;
|
||||||
my = val >> 4;
|
my = val >> 4;
|
||||||
if ((l0x-mx) + 320*(l0y-my) < 0 || (l0x-mx) + 320*(l0y-my) > 316*196) {
|
if ((l0x-mx) + 320*(l0y-my) < 0 || (l0x-mx) + 320*(l0y-my) > 320*197 - 4) {
|
||||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@@ -132,7 +132,7 @@ static int kmvc_decode_intra_8x8(KmvcContext * ctx, int w, int h)
|
|||||||
val = bytestream2_get_byte(&ctx->g);
|
val = bytestream2_get_byte(&ctx->g);
|
||||||
mx = val & 0xF;
|
mx = val & 0xF;
|
||||||
my = val >> 4;
|
my = val >> 4;
|
||||||
if ((l1x-mx) + 320*(l1y-my) < 0 || (l1x-mx) + 320*(l1y-my) > 318*198) {
|
if ((l1x-mx) + 320*(l1y-my) < 0 || (l1x-mx) + 320*(l1y-my) > 320*199 - 2) {
|
||||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@@ -207,7 +207,7 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, int w, int h)
|
|||||||
val = bytestream2_get_byte(&ctx->g);
|
val = bytestream2_get_byte(&ctx->g);
|
||||||
mx = (val & 0xF) - 8;
|
mx = (val & 0xF) - 8;
|
||||||
my = (val >> 4) - 8;
|
my = (val >> 4) - 8;
|
||||||
if ((l0x+mx) + 320*(l0y+my) < 0 || (l0x+mx) + 320*(l0y+my) > 318*198) {
|
if ((l0x+mx) + 320*(l0y+my) < 0 || (l0x+mx) + 320*(l0y+my) > 320*197 - 4) {
|
||||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@@ -232,7 +232,7 @@ static int kmvc_decode_inter_8x8(KmvcContext * ctx, int w, int h)
|
|||||||
val = bytestream2_get_byte(&ctx->g);
|
val = bytestream2_get_byte(&ctx->g);
|
||||||
mx = (val & 0xF) - 8;
|
mx = (val & 0xF) - 8;
|
||||||
my = (val >> 4) - 8;
|
my = (val >> 4) - 8;
|
||||||
if ((l1x+mx) + 320*(l1y+my) < 0 || (l1x+mx) + 320*(l1y+my) > 318*198) {
|
if ((l1x+mx) + 320*(l1y+my) < 0 || (l1x+mx) + 320*(l1y+my) > 320*199 - 2) {
|
||||||
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
av_log(ctx->avctx, AV_LOG_ERROR, "Invalid MV\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
@@ -42,6 +42,7 @@
|
|||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
#include "libavutil/mem.h"
|
#include "libavutil/mem.h"
|
||||||
|
#include "libavutil/pixdesc.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "bytestream.h"
|
#include "bytestream.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
@@ -491,6 +492,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
unsigned int max_basesize = FFALIGN(avctx->width, 4) *
|
unsigned int max_basesize = FFALIGN(avctx->width, 4) *
|
||||||
FFALIGN(avctx->height, 4);
|
FFALIGN(avctx->height, 4);
|
||||||
unsigned int max_decomp_size;
|
unsigned int max_decomp_size;
|
||||||
|
int subsample_h, subsample_v;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&c->pic);
|
avcodec_get_frame_defaults(&c->pic);
|
||||||
if (avctx->extradata_size < 8) {
|
if (avctx->extradata_size < 8) {
|
||||||
@@ -517,6 +519,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
max_decomp_size = max_basesize * 2;
|
max_decomp_size = max_basesize * 2;
|
||||||
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||||
av_log(avctx, AV_LOG_DEBUG, "Image type is YUV 4:2:2.\n");
|
av_log(avctx, AV_LOG_DEBUG, "Image type is YUV 4:2:2.\n");
|
||||||
|
if (avctx->width % 4) {
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case IMGTYPE_RGB24:
|
case IMGTYPE_RGB24:
|
||||||
c->decomp_size = basesize * 3;
|
c->decomp_size = basesize * 3;
|
||||||
@@ -547,6 +552,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &subsample_h, &subsample_v);
|
||||||
|
if (avctx->width % (1<<subsample_h) || avctx->height % (1<<subsample_v)) {
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
/* Detect compression method */
|
/* Detect compression method */
|
||||||
c->compression = (int8_t)avctx->extradata[5];
|
c->compression = (int8_t)avctx->extradata[5];
|
||||||
switch (avctx->codec_id) {
|
switch (avctx->codec_id) {
|
||||||
|
@@ -380,7 +380,7 @@ static const AVOption libopus_options[] = {
|
|||||||
{ "voip", "Favor improved speech intelligibility", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_VOIP }, 0, 0, FLAGS, "application" },
|
{ "voip", "Favor improved speech intelligibility", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_VOIP }, 0, 0, FLAGS, "application" },
|
||||||
{ "audio", "Favor faithfulness to the input", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_AUDIO }, 0, 0, FLAGS, "application" },
|
{ "audio", "Favor faithfulness to the input", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_AUDIO }, 0, 0, FLAGS, "application" },
|
||||||
{ "lowdelay", "Restrict to only the lowest delay modes", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_RESTRICTED_LOWDELAY }, 0, 0, FLAGS, "application" },
|
{ "lowdelay", "Restrict to only the lowest delay modes", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_RESTRICTED_LOWDELAY }, 0, 0, FLAGS, "application" },
|
||||||
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 10.0 }, 2.5, 60.0, FLAGS },
|
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 20.0 }, 2.5, 60.0, FLAGS },
|
||||||
{ "packet_loss", "Expected packet loss percentage", OFFSET(packet_loss), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, FLAGS },
|
{ "packet_loss", "Expected packet loss percentage", OFFSET(packet_loss), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, FLAGS },
|
||||||
{ "vbr", "Variable bit rate mode", OFFSET(vbr), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, FLAGS, "vbr" },
|
{ "vbr", "Variable bit rate mode", OFFSET(vbr), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, FLAGS, "vbr" },
|
||||||
{ "off", "Use constant bit rate", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "vbr" },
|
{ "off", "Use constant bit rate", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "vbr" },
|
||||||
|
@@ -175,7 +175,7 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
|
|||||||
frame->pict_type == AV_PICTURE_TYPE_P ? X264_TYPE_P :
|
frame->pict_type == AV_PICTURE_TYPE_P ? X264_TYPE_P :
|
||||||
frame->pict_type == AV_PICTURE_TYPE_B ? X264_TYPE_B :
|
frame->pict_type == AV_PICTURE_TYPE_B ? X264_TYPE_B :
|
||||||
X264_TYPE_AUTO;
|
X264_TYPE_AUTO;
|
||||||
if (x4->params.b_tff != frame->top_field_first) {
|
if (x4->params.b_interlaced && x4->params.b_tff != frame->top_field_first) {
|
||||||
x4->params.b_tff = frame->top_field_first;
|
x4->params.b_tff = frame->top_field_first;
|
||||||
x264_encoder_reconfig(x4->enc, &x4->params);
|
x264_encoder_reconfig(x4->enc, &x4->params);
|
||||||
}
|
}
|
||||||
|
@@ -329,7 +329,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
s->first_picture = 0;
|
s->first_picture = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
|
if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
|
||||||
if (s->progressive) {
|
if (s->progressive) {
|
||||||
av_log_ask_for_sample(s->avctx, "progressively coded interlaced pictures not supported\n");
|
av_log_ask_for_sample(s->avctx, "progressively coded interlaced pictures not supported\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
@@ -747,7 +747,9 @@ static void handle_rstn(MJpegDecodeContext *s, int nb_components)
|
|||||||
|
|
||||||
i = 8 + ((-get_bits_count(&s->gb)) & 7);
|
i = 8 + ((-get_bits_count(&s->gb)) & 7);
|
||||||
/* skip RSTn */
|
/* skip RSTn */
|
||||||
if (s->restart_count == 0 && show_bits(&s->gb, i) == (1 << i) - 1) {
|
if (s->restart_count == 0) {
|
||||||
|
if( show_bits(&s->gb, i) == (1 << i) - 1
|
||||||
|
|| show_bits(&s->gb, i) == 0xFF) {
|
||||||
int pos = get_bits_count(&s->gb);
|
int pos = get_bits_count(&s->gb);
|
||||||
align_get_bits(&s->gb);
|
align_get_bits(&s->gb);
|
||||||
while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
|
while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
|
||||||
@@ -757,6 +759,7 @@ static void handle_rstn(MJpegDecodeContext *s, int nb_components)
|
|||||||
s->last_dc[i] = 1024;
|
s->last_dc[i] = 1024;
|
||||||
} else
|
} else
|
||||||
skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
|
skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -771,6 +774,12 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p
|
|||||||
int resync_mb_y = 0;
|
int resync_mb_y = 0;
|
||||||
int resync_mb_x = 0;
|
int resync_mb_x = 0;
|
||||||
|
|
||||||
|
if (s->nb_components != 3 && s->nb_components != 4)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
|
|
||||||
s->restart_count = s->restart_interval;
|
s->restart_count = s->restart_interval;
|
||||||
|
|
||||||
av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size,
|
av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size,
|
||||||
@@ -1587,8 +1596,6 @@ int ff_mjpeg_find_marker(MJpegDecodeContext *s,
|
|||||||
int t = 0, b = 0;
|
int t = 0, b = 0;
|
||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
|
|
||||||
s->cur_scan++;
|
|
||||||
|
|
||||||
/* find marker */
|
/* find marker */
|
||||||
while (src + t < buf_end) {
|
while (src + t < buf_end) {
|
||||||
uint8_t x = src[t++];
|
uint8_t x = src[t++];
|
||||||
@@ -1636,6 +1643,7 @@ int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
MJpegDecodeContext *s = avctx->priv_data;
|
MJpegDecodeContext *s = avctx->priv_data;
|
||||||
const uint8_t *buf_end, *buf_ptr;
|
const uint8_t *buf_end, *buf_ptr;
|
||||||
const uint8_t *unescaped_buf_ptr;
|
const uint8_t *unescaped_buf_ptr;
|
||||||
|
int hshift, vshift;
|
||||||
int unescaped_buf_size;
|
int unescaped_buf_size;
|
||||||
int start_code;
|
int start_code;
|
||||||
int i, index;
|
int i, index;
|
||||||
@@ -1651,7 +1659,7 @@ int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
&unescaped_buf_size);
|
&unescaped_buf_size);
|
||||||
/* EOF */
|
/* EOF */
|
||||||
if (start_code < 0) {
|
if (start_code < 0) {
|
||||||
goto the_end;
|
break;
|
||||||
} else if (unescaped_buf_size > (1U<<28)) {
|
} else if (unescaped_buf_size > (1U<<28)) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "MJPEG packet 0x%x too big (0x%x/0x%x), corrupt data?\n",
|
av_log(avctx, AV_LOG_ERROR, "MJPEG packet 0x%x too big (0x%x/0x%x), corrupt data?\n",
|
||||||
start_code, unescaped_buf_size, buf_size);
|
start_code, unescaped_buf_size, buf_size);
|
||||||
@@ -1761,6 +1769,7 @@ eoi_parser:
|
|||||||
|
|
||||||
goto the_end;
|
goto the_end;
|
||||||
case SOS:
|
case SOS:
|
||||||
|
s->cur_scan++;
|
||||||
if ((ret = ff_mjpeg_decode_sos(s, NULL, NULL)) < 0 &&
|
if ((ret = ff_mjpeg_decode_sos(s, NULL, NULL)) < 0 &&
|
||||||
(avctx->err_recognition & AV_EF_EXPLODE))
|
(avctx->err_recognition & AV_EF_EXPLODE))
|
||||||
goto fail;
|
goto fail;
|
||||||
@@ -1790,7 +1799,7 @@ eoi_parser:
|
|||||||
(get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
|
(get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s->got_picture) {
|
if (s->got_picture && s->cur_scan) {
|
||||||
av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
|
av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
|
||||||
goto eoi_parser;
|
goto eoi_parser;
|
||||||
}
|
}
|
||||||
@@ -1814,6 +1823,9 @@ the_end:
|
|||||||
}
|
}
|
||||||
if (s->upscale_v) {
|
if (s->upscale_v) {
|
||||||
uint8_t *dst = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[(s->height - 1) * s->linesize[s->upscale_v]];
|
uint8_t *dst = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[(s->height - 1) * s->linesize[s->upscale_v]];
|
||||||
|
int w;
|
||||||
|
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
|
||||||
|
w = s->width >> hshift;
|
||||||
av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
|
av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
|
||||||
avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
|
avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
|
||||||
avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
|
avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
|
||||||
@@ -1822,16 +1834,16 @@ the_end:
|
|||||||
uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[i / 2 * s->linesize[s->upscale_v]];
|
uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[i / 2 * s->linesize[s->upscale_v]];
|
||||||
uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[(i + 1) / 2 * s->linesize[s->upscale_v]];
|
uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[(i + 1) / 2 * s->linesize[s->upscale_v]];
|
||||||
if (src1 == src2) {
|
if (src1 == src2) {
|
||||||
memcpy(dst, src1, s->width);
|
memcpy(dst, src1, w);
|
||||||
} else {
|
} else {
|
||||||
for (index = 0; index < s->width; index++)
|
for (index = 0; index < w; index++)
|
||||||
dst[index] = (src1[index] + src2[index]) >> 1;
|
dst[index] = (src1[index] + src2[index]) >> 1;
|
||||||
}
|
}
|
||||||
dst -= s->linesize[s->upscale_v];
|
dst -= s->linesize[s->upscale_v];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s->flipped && (s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
|
if (s->flipped && (s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
|
||||||
int hshift, vshift, j;
|
int j;
|
||||||
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
|
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
|
||||||
for (index=0; index<4; index++) {
|
for (index=0; index<4; index++) {
|
||||||
uint8_t *dst = s->picture_ptr->data[index];
|
uint8_t *dst = s->picture_ptr->data[index];
|
||||||
|
@@ -104,6 +104,9 @@ static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
|
|||||||
if (half_horiz)
|
if (half_horiz)
|
||||||
run_length *=2;
|
run_length *=2;
|
||||||
|
|
||||||
|
if (run_length > s->avctx->width - x)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if (color) {
|
if (color) {
|
||||||
memset(s->frame.data[0] + y*s->frame.linesize[0] + x, color, run_length);
|
memset(s->frame.data[0] + y*s->frame.linesize[0] + x, color, run_length);
|
||||||
if (half_vert)
|
if (half_vert)
|
||||||
@@ -151,6 +154,8 @@ static int mm_decode_inter(MmContext * s, int half_horiz, int half_vert)
|
|||||||
int replace_array = bytestream2_get_byte(&s->gb);
|
int replace_array = bytestream2_get_byte(&s->gb);
|
||||||
for(j=0; j<8; j++) {
|
for(j=0; j<8; j++) {
|
||||||
int replace = (replace_array >> (7-j)) & 1;
|
int replace = (replace_array >> (7-j)) & 1;
|
||||||
|
if (x + half_horiz >= s->avctx->width)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
if (replace) {
|
if (replace) {
|
||||||
int color = bytestream2_get_byte(&data_ptr);
|
int color = bytestream2_get_byte(&data_ptr);
|
||||||
s->frame.data[0][y*s->frame.linesize[0] + x] = color;
|
s->frame.data[0][y*s->frame.linesize[0] + x] = color;
|
||||||
|
@@ -1266,7 +1266,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
|
|||||||
s1->save_width != s->width ||
|
s1->save_width != s->width ||
|
||||||
s1->save_height != s->height ||
|
s1->save_height != s->height ||
|
||||||
s1->save_aspect_info != s->aspect_ratio_info ||
|
s1->save_aspect_info != s->aspect_ratio_info ||
|
||||||
s1->save_progressive_seq != s->progressive_sequence ||
|
(s1->save_progressive_seq != s->progressive_sequence && (s->height&31)) ||
|
||||||
0)
|
0)
|
||||||
{
|
{
|
||||||
|
|
||||||
@@ -2277,7 +2277,8 @@ static int decode_chunks(AVCodecContext *avctx,
|
|||||||
buf_ptr = avpriv_mpv_find_start_code(buf_ptr, buf_end, &start_code);
|
buf_ptr = avpriv_mpv_find_start_code(buf_ptr, buf_end, &start_code);
|
||||||
if (start_code > 0x1ff) {
|
if (start_code > 0x1ff) {
|
||||||
if (s2->pict_type != AV_PICTURE_TYPE_B || avctx->skip_frame <= AVDISCARD_DEFAULT) {
|
if (s2->pict_type != AV_PICTURE_TYPE_B || avctx->skip_frame <= AVDISCARD_DEFAULT) {
|
||||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)) {
|
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||||
|
!avctx->hwaccel) {
|
||||||
int i;
|
int i;
|
||||||
av_assert0(avctx->thread_count > 1);
|
av_assert0(avctx->thread_count > 1);
|
||||||
|
|
||||||
@@ -2337,7 +2338,8 @@ static int decode_chunks(AVCodecContext *avctx,
|
|||||||
s2->intra_dc_precision= 3;
|
s2->intra_dc_precision= 3;
|
||||||
s2->intra_matrix[0]= 1;
|
s2->intra_matrix[0]= 1;
|
||||||
}
|
}
|
||||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && s->slice_count) {
|
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||||
|
!avctx->hwaccel && s->slice_count) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
avctx->execute(avctx, slice_decode_thread,
|
avctx->execute(avctx, slice_decode_thread,
|
||||||
@@ -2504,7 +2506,8 @@ static int decode_chunks(AVCodecContext *avctx,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)) {
|
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
|
||||||
|
!avctx->hwaccel) {
|
||||||
int threshold = (s2->mb_height * s->slice_count +
|
int threshold = (s2->mb_height * s->slice_count +
|
||||||
s2->slice_context_count / 2) /
|
s2->slice_context_count / 2) /
|
||||||
s2->slice_context_count;
|
s2->slice_context_count;
|
||||||
|
@@ -2102,7 +2102,7 @@ static inline int hpel_motion_lowres(MpegEncContext *s,
|
|||||||
int motion_x, int motion_y)
|
int motion_x, int motion_y)
|
||||||
{
|
{
|
||||||
const int lowres = s->avctx->lowres;
|
const int lowres = s->avctx->lowres;
|
||||||
const int op_index = FFMIN(lowres, 2);
|
const int op_index = FFMIN(lowres, 3);
|
||||||
const int s_mask = (2 << lowres) - 1;
|
const int s_mask = (2 << lowres) - 1;
|
||||||
int emu = 0;
|
int emu = 0;
|
||||||
int sx, sy;
|
int sx, sy;
|
||||||
@@ -2155,7 +2155,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
|||||||
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
|
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
|
||||||
uvsx, uvsy;
|
uvsx, uvsy;
|
||||||
const int lowres = s->avctx->lowres;
|
const int lowres = s->avctx->lowres;
|
||||||
const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
|
const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
|
||||||
const int block_s = 8>>lowres;
|
const int block_s = 8>>lowres;
|
||||||
const int s_mask = (2 << lowres) - 1;
|
const int s_mask = (2 << lowres) - 1;
|
||||||
const int h_edge_pos = s->h_edge_pos >> lowres;
|
const int h_edge_pos = s->h_edge_pos >> lowres;
|
||||||
@@ -2221,7 +2221,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
|||||||
ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
|
ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
|
||||||
ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
|
ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
|
||||||
|
|
||||||
if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
|
if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
|
||||||
(unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
|
(unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
|
||||||
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
|
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
|
||||||
linesize >> field_based, 17, 17 + field_based,
|
linesize >> field_based, 17, 17 + field_based,
|
||||||
@@ -2261,11 +2261,12 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
|||||||
pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
|
pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
|
||||||
|
|
||||||
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
|
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
|
||||||
|
int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
|
||||||
uvsx = (uvsx << 2) >> lowres;
|
uvsx = (uvsx << 2) >> lowres;
|
||||||
uvsy = (uvsy << 2) >> lowres;
|
uvsy = (uvsy << 2) >> lowres;
|
||||||
if (h >> s->chroma_y_shift) {
|
if (hc) {
|
||||||
pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
|
pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
|
||||||
pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
|
pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// FIXME h261 lowres loop filter
|
// FIXME h261 lowres loop filter
|
||||||
@@ -2278,7 +2279,7 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
|
|||||||
int mx, int my)
|
int mx, int my)
|
||||||
{
|
{
|
||||||
const int lowres = s->avctx->lowres;
|
const int lowres = s->avctx->lowres;
|
||||||
const int op_index = FFMIN(lowres, 2);
|
const int op_index = FFMIN(lowres, 3);
|
||||||
const int block_s = 8 >> lowres;
|
const int block_s = 8 >> lowres;
|
||||||
const int s_mask = (2 << lowres) - 1;
|
const int s_mask = (2 << lowres) - 1;
|
||||||
const int h_edge_pos = s->h_edge_pos >> lowres + 1;
|
const int h_edge_pos = s->h_edge_pos >> lowres + 1;
|
||||||
|
@@ -812,7 +812,8 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
|||||||
s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
|
s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
|
if( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
|
||||||
|
|| !ref_picture[0]){
|
||||||
ref_picture = s->current_picture_ptr->f.data;
|
ref_picture = s->current_picture_ptr->f.data;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -826,8 +827,8 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
|||||||
for(i=0; i<2; i++){
|
for(i=0; i<2; i++){
|
||||||
uint8_t ** ref2picture;
|
uint8_t ** ref2picture;
|
||||||
|
|
||||||
if(s->picture_structure == s->field_select[dir][i] + 1
|
if((s->picture_structure == s->field_select[dir][i] + 1
|
||||||
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
|
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]){
|
||||||
ref2picture= ref_picture;
|
ref2picture= ref_picture;
|
||||||
}else{
|
}else{
|
||||||
ref2picture = s->current_picture_ptr->f.data;
|
ref2picture = s->current_picture_ptr->f.data;
|
||||||
@@ -856,6 +857,9 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|
|||||||
pix_op = s->dsp.avg_pixels_tab;
|
pix_op = s->dsp.avg_pixels_tab;
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
|
if (!ref_picture[0]) {
|
||||||
|
ref_picture = s->current_picture_ptr->f.data;
|
||||||
|
}
|
||||||
for(i=0; i<2; i++){
|
for(i=0; i<2; i++){
|
||||||
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|
mpeg_motion(s, dest_y, dest_cb, dest_cr,
|
||||||
s->picture_structure != i+1,
|
s->picture_structure != i+1,
|
||||||
|
@@ -34,6 +34,7 @@
|
|||||||
|
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "msrledec.h"
|
#include "msrledec.h"
|
||||||
|
#include "libavutil/imgutils.h"
|
||||||
|
|
||||||
typedef struct MsrleContext {
|
typedef struct MsrleContext {
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
@@ -112,7 +113,7 @@ static int msrle_decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
/* FIXME how to correctly detect RLE ??? */
|
/* FIXME how to correctly detect RLE ??? */
|
||||||
if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
|
if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
|
||||||
int linesize = (avctx->width * avctx->bits_per_coded_sample + 7) / 8;
|
int linesize = av_image_get_linesize(avctx->pix_fmt, avctx->width, 0);
|
||||||
uint8_t *ptr = s->frame.data[0];
|
uint8_t *ptr = s->frame.data[0];
|
||||||
uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
|
uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
@@ -58,7 +58,7 @@ enum MSV1Mode{
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define SKIP_PREFIX 0x8400
|
#define SKIP_PREFIX 0x8400
|
||||||
#define SKIPS_MAX 0x0FFF
|
#define SKIPS_MAX 0x03FF
|
||||||
#define MKRGB555(in, off) ((in[off] << 10) | (in[off + 1] << 5) | (in[off + 2]))
|
#define MKRGB555(in, off) ((in[off] << 10) | (in[off + 1] << 5) | (in[off + 2]))
|
||||||
|
|
||||||
static const int remap[16] = { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 };
|
static const int remap[16] = { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 };
|
||||||
|
@@ -235,8 +235,10 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
|||||||
if(next == END_NOT_FOUND){
|
if(next == END_NOT_FOUND){
|
||||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
|
|
||||||
if(!new_buffer)
|
if(!new_buffer) {
|
||||||
|
pc->index = 0;
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
pc->buffer = new_buffer;
|
pc->buffer = new_buffer;
|
||||||
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
|
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
|
||||||
pc->index += *buf_size;
|
pc->index += *buf_size;
|
||||||
@@ -249,9 +251,11 @@ int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_s
|
|||||||
/* append to buffer */
|
/* append to buffer */
|
||||||
if(pc->index){
|
if(pc->index){
|
||||||
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
|
if(!new_buffer) {
|
||||||
if(!new_buffer)
|
pc->overread_index =
|
||||||
|
pc->index = 0;
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
pc->buffer = new_buffer;
|
pc->buffer = new_buffer;
|
||||||
if (next > -FF_INPUT_BUFFER_PADDING_SIZE)
|
if (next > -FF_INPUT_BUFFER_PADDING_SIZE)
|
||||||
memcpy(&pc->buffer[pc->index], *buf,
|
memcpy(&pc->buffer[pc->index], *buf,
|
||||||
|
@@ -30,7 +30,7 @@
|
|||||||
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
|
static void add_bytes_l2_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
|
||||||
{
|
{
|
||||||
long i;
|
long i;
|
||||||
for (i = 0; i <= w - sizeof(long); i += sizeof(long)) {
|
for (i = 0; i <= w - (int)sizeof(long); i += sizeof(long)) {
|
||||||
long a = *(long *)(src1 + i);
|
long a = *(long *)(src1 + i);
|
||||||
long b = *(long *)(src2 + i);
|
long b = *(long *)(src2 + i);
|
||||||
*(long *)(dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
|
*(long *)(dst + i) = ((a & pb_7f) + (b & pb_7f)) ^ ((a ^ b) & pb_80);
|
||||||
|
@@ -163,6 +163,8 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s)
|
|||||||
if (s->maxval >= 256) {
|
if (s->maxval >= 256) {
|
||||||
if (avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
|
if (avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
|
||||||
avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
|
avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
|
||||||
|
if (s->maxval != 65535)
|
||||||
|
avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
||||||
} else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
|
} else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
|
||||||
avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
|
avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
|
||||||
} else if (avctx->pix_fmt == AV_PIX_FMT_YUV420P && s->maxval < 65536) {
|
} else if (avctx->pix_fmt == AV_PIX_FMT_YUV420P && s->maxval < 65536) {
|
||||||
|
@@ -912,8 +912,6 @@ void ff_thread_flush(AVCodecContext *avctx)
|
|||||||
if (fctx->prev_thread) {
|
if (fctx->prev_thread) {
|
||||||
if (fctx->prev_thread != &fctx->threads[0])
|
if (fctx->prev_thread != &fctx->threads[0])
|
||||||
update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
|
update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
|
||||||
if (avctx->codec->flush)
|
|
||||||
avctx->codec->flush(fctx->threads[0].avctx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fctx->next_decoding = fctx->next_finished = 0;
|
fctx->next_decoding = fctx->next_finished = 0;
|
||||||
@@ -925,6 +923,9 @@ void ff_thread_flush(AVCodecContext *avctx)
|
|||||||
p->got_frame = 0;
|
p->got_frame = 0;
|
||||||
|
|
||||||
release_delayed_buffers(p);
|
release_delayed_buffers(p);
|
||||||
|
|
||||||
|
if (avctx->codec->flush)
|
||||||
|
avctx->codec->flush(fctx->threads[0].avctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -767,7 +767,8 @@ static int synthfilt_build_sb_samples (QDM2Context *q, GetBitContext *gb, int le
|
|||||||
int type34_first;
|
int type34_first;
|
||||||
float type34_div = 0;
|
float type34_div = 0;
|
||||||
float type34_predictor;
|
float type34_predictor;
|
||||||
float samples[10], sign_bits[16];
|
float samples[10];
|
||||||
|
int sign_bits[16] = {0};
|
||||||
|
|
||||||
if (length == 0) {
|
if (length == 0) {
|
||||||
// If no data use noise
|
// If no data use noise
|
||||||
|
@@ -67,7 +67,7 @@ static void qtrle_decode_1bpp(QtrleContext *s, int row_ptr, int lines_to_change)
|
|||||||
* line' at the beginning. Since we always interpret it as 'go to next line'
|
* line' at the beginning. Since we always interpret it as 'go to next line'
|
||||||
* in the decoding loop (which makes code simpler/faster), the first line
|
* in the decoding loop (which makes code simpler/faster), the first line
|
||||||
* would not be counted, so we count one more.
|
* would not be counted, so we count one more.
|
||||||
* See: https://ffmpeg.org/trac/ffmpeg/ticket/226
|
* See: https://trac.ffmpeg.org/ticket/226
|
||||||
* In the following decoding loop, row_ptr will be the position of the
|
* In the following decoding loop, row_ptr will be the position of the
|
||||||
* current row. */
|
* current row. */
|
||||||
|
|
||||||
|
@@ -84,7 +84,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
|||||||
unsigned short *pixels = (unsigned short *)s->frame.data[0];
|
unsigned short *pixels = (unsigned short *)s->frame.data[0];
|
||||||
|
|
||||||
int row_ptr = 0;
|
int row_ptr = 0;
|
||||||
int pixel_ptr = 0;
|
int pixel_ptr = -4;
|
||||||
int block_ptr;
|
int block_ptr;
|
||||||
int pixel_x, pixel_y;
|
int pixel_x, pixel_y;
|
||||||
int total_blocks;
|
int total_blocks;
|
||||||
@@ -140,6 +140,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
|||||||
colorA = AV_RB16 (&s->buf[stream_ptr]);
|
colorA = AV_RB16 (&s->buf[stream_ptr]);
|
||||||
stream_ptr += 2;
|
stream_ptr += 2;
|
||||||
while (n_blocks--) {
|
while (n_blocks--) {
|
||||||
|
ADVANCE_BLOCK()
|
||||||
block_ptr = row_ptr + pixel_ptr;
|
block_ptr = row_ptr + pixel_ptr;
|
||||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||||
@@ -148,7 +149,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
|||||||
}
|
}
|
||||||
block_ptr += row_inc;
|
block_ptr += row_inc;
|
||||||
}
|
}
|
||||||
ADVANCE_BLOCK();
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -187,6 +187,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
|||||||
if (s->size - stream_ptr < n_blocks * 4)
|
if (s->size - stream_ptr < n_blocks * 4)
|
||||||
return;
|
return;
|
||||||
while (n_blocks--) {
|
while (n_blocks--) {
|
||||||
|
ADVANCE_BLOCK();
|
||||||
block_ptr = row_ptr + pixel_ptr;
|
block_ptr = row_ptr + pixel_ptr;
|
||||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||||
index = s->buf[stream_ptr++];
|
index = s->buf[stream_ptr++];
|
||||||
@@ -197,7 +198,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
|||||||
}
|
}
|
||||||
block_ptr += row_inc;
|
block_ptr += row_inc;
|
||||||
}
|
}
|
||||||
ADVANCE_BLOCK();
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -205,6 +205,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
|||||||
case 0x00:
|
case 0x00:
|
||||||
if (s->size - stream_ptr < 16)
|
if (s->size - stream_ptr < 16)
|
||||||
return;
|
return;
|
||||||
|
ADVANCE_BLOCK();
|
||||||
block_ptr = row_ptr + pixel_ptr;
|
block_ptr = row_ptr + pixel_ptr;
|
||||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||||
@@ -218,7 +219,6 @@ static void rpza_decode_stream(RpzaContext *s)
|
|||||||
}
|
}
|
||||||
block_ptr += row_inc;
|
block_ptr += row_inc;
|
||||||
}
|
}
|
||||||
ADVANCE_BLOCK();
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* Unknown opcode */
|
/* Unknown opcode */
|
||||||
|
@@ -739,6 +739,11 @@ static int process_frame_obj(SANMVideoContext *ctx)
|
|||||||
w = bytestream2_get_le16u(&ctx->gb);
|
w = bytestream2_get_le16u(&ctx->gb);
|
||||||
h = bytestream2_get_le16u(&ctx->gb);
|
h = bytestream2_get_le16u(&ctx->gb);
|
||||||
|
|
||||||
|
if (!w || !h) {
|
||||||
|
av_log(ctx->avctx, AV_LOG_ERROR, "dimensions are invalid\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
if (ctx->width < left + w || ctx->height < top + h) {
|
if (ctx->width < left + w || ctx->height < top + h) {
|
||||||
if (av_image_check_size(FFMAX(left + w, ctx->width),
|
if (av_image_check_size(FFMAX(left + w, ctx->width),
|
||||||
FFMAX(top + h, ctx->height), 0, ctx->avctx) < 0)
|
FFMAX(top + h, ctx->height), 0, ctx->avctx) < 0)
|
||||||
|
@@ -424,7 +424,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
void *tmp_ptr;
|
void *tmp_ptr;
|
||||||
s->max_framesize = 8192; // should hopefully be enough for the first header
|
s->max_framesize = 8192; // should hopefully be enough for the first header
|
||||||
tmp_ptr = av_fast_realloc(s->bitstream, &s->allocated_bitstream_size,
|
tmp_ptr = av_fast_realloc(s->bitstream, &s->allocated_bitstream_size,
|
||||||
s->max_framesize);
|
s->max_framesize + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
if (!tmp_ptr) {
|
if (!tmp_ptr) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "error allocating bitstream buffer\n");
|
av_log(avctx, AV_LOG_ERROR, "error allocating bitstream buffer\n");
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@@ -437,7 +437,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
buf_size = FFMIN(buf_size, s->max_framesize - s->bitstream_size);
|
buf_size = FFMIN(buf_size, s->max_framesize - s->bitstream_size);
|
||||||
input_buf_size = buf_size;
|
input_buf_size = buf_size;
|
||||||
|
|
||||||
if (s->bitstream_index + s->bitstream_size + buf_size >
|
if (s->bitstream_index + s->bitstream_size + buf_size + FF_INPUT_BUFFER_PADDING_SIZE >
|
||||||
s->allocated_bitstream_size) {
|
s->allocated_bitstream_size) {
|
||||||
memmove(s->bitstream, &s->bitstream[s->bitstream_index],
|
memmove(s->bitstream, &s->bitstream[s->bitstream_index],
|
||||||
s->bitstream_size);
|
s->bitstream_size);
|
||||||
|
@@ -694,7 +694,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
val |= h[3].values[res] << 8;
|
val |= h[3].values[res] << 8;
|
||||||
pred[1] += sign_extend(val, 16);
|
pred[1] += sign_extend(val, 16);
|
||||||
*samples++ = av_clip_int16(pred[1]);
|
*samples++ = pred[1];
|
||||||
} else {
|
} else {
|
||||||
if(vlc[0].table)
|
if(vlc[0].table)
|
||||||
res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
|
res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
|
||||||
@@ -715,7 +715,7 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
val |= h[1].values[res] << 8;
|
val |= h[1].values[res] << 8;
|
||||||
pred[0] += sign_extend(val, 16);
|
pred[0] += sign_extend(val, 16);
|
||||||
*samples++ = av_clip_int16(pred[0]);
|
*samples++ = pred[0];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else { //8-bit data
|
} else { //8-bit data
|
||||||
|
@@ -315,7 +315,8 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer
|
|||||||
if(!sliced && !offset_dst)
|
if(!sliced && !offset_dst)
|
||||||
dst -= src_x;
|
dst -= src_x;
|
||||||
src_x=0;
|
src_x=0;
|
||||||
}else if(src_x + b_w > w){
|
}
|
||||||
|
if(src_x + b_w > w){
|
||||||
b_w = w - src_x;
|
b_w = w - src_x;
|
||||||
}
|
}
|
||||||
if(src_y<0){
|
if(src_y<0){
|
||||||
@@ -324,7 +325,8 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer
|
|||||||
if(!sliced && !offset_dst)
|
if(!sliced && !offset_dst)
|
||||||
dst -= src_y*dst_stride;
|
dst -= src_y*dst_stride;
|
||||||
src_y=0;
|
src_y=0;
|
||||||
}else if(src_y + b_h> h){
|
}
|
||||||
|
if(src_y + b_h> h){
|
||||||
b_h = h - src_y;
|
b_h = h - src_y;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -204,7 +204,8 @@ static const char *read_ts(const char *buf, int *ts_start, int *ts_end,
|
|||||||
"%*[ ]X1:%u X2:%u Y1:%u Y2:%u",
|
"%*[ ]X1:%u X2:%u Y1:%u Y2:%u",
|
||||||
&hs, &ms, &ss, ts_start, &he, &me, &se, ts_end,
|
&hs, &ms, &ss, ts_start, &he, &me, &se, ts_end,
|
||||||
x1, x2, y1, y2);
|
x1, x2, y1, y2);
|
||||||
buf += strcspn(buf, "\n") + 1;
|
buf += strcspn(buf, "\n");
|
||||||
|
buf += !!*buf;
|
||||||
if (c >= 8) {
|
if (c >= 8) {
|
||||||
*ts_start = 100*(ss + 60*(ms + 60*hs)) + *ts_start/10;
|
*ts_start = 100*(ss + 60*(ms + 60*hs)) + *ts_start/10;
|
||||||
*ts_end = 100*(se + 60*(me + 60*he)) + *ts_end /10;
|
*ts_end = 100*(se + 60*(me + 60*he)) + *ts_end /10;
|
||||||
|
@@ -790,8 +790,8 @@ static int svq3_decode_slice_header(AVCodecContext *avctx)
|
|||||||
header ^ s->watermark_key);
|
header ^ s->watermark_key);
|
||||||
}
|
}
|
||||||
if (length > 0) {
|
if (length > 0) {
|
||||||
memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
|
memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
|
||||||
&h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
|
&h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
|
||||||
}
|
}
|
||||||
skip_bits_long(&h->gb, 0);
|
skip_bits_long(&h->gb, 0);
|
||||||
}
|
}
|
||||||
|
@@ -732,11 +732,9 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->ti.bps != avctx->bits_per_raw_sample) {
|
avctx->bits_per_raw_sample = s->ti.bps;
|
||||||
avctx->bits_per_raw_sample = s->ti.bps;
|
if ((ret = set_bps_params(avctx)) < 0)
|
||||||
if ((ret = set_bps_params(avctx)) < 0)
|
return ret;
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
if (s->ti.sample_rate != avctx->sample_rate) {
|
if (s->ti.sample_rate != avctx->sample_rate) {
|
||||||
avctx->sample_rate = s->ti.sample_rate;
|
avctx->sample_rate = s->ti.sample_rate;
|
||||||
set_sample_rate_params(avctx);
|
set_sample_rate_params(avctx);
|
||||||
|
@@ -948,14 +948,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
if (!l->Y1_base || !l->Y2_base || !l->U1_base ||
|
if (!l->Y1_base || !l->Y2_base || !l->U1_base ||
|
||||||
!l->V1_base || !l->U2_base || !l->V2_base ||
|
!l->V1_base || !l->U2_base || !l->V2_base ||
|
||||||
!l->last || !l->clast) {
|
!l->last || !l->clast) {
|
||||||
av_freep(l->Y1_base);
|
av_freep(&l->Y1_base);
|
||||||
av_freep(l->Y2_base);
|
av_freep(&l->Y2_base);
|
||||||
av_freep(l->U1_base);
|
av_freep(&l->U1_base);
|
||||||
av_freep(l->U2_base);
|
av_freep(&l->U2_base);
|
||||||
av_freep(l->V1_base);
|
av_freep(&l->V1_base);
|
||||||
av_freep(l->V2_base);
|
av_freep(&l->V2_base);
|
||||||
av_freep(l->last);
|
av_freep(&l->last);
|
||||||
av_freep(l->clast);
|
av_freep(&l->clast);
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
l->Y1 = l->Y1_base + l->y_stride * 4 + 4;
|
l->Y1 = l->Y1_base + l->y_stride * 4 + 4;
|
||||||
|
@@ -474,7 +474,7 @@ static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
|
|||||||
|
|
||||||
buf->linesize[i] = picture.linesize[i];
|
buf->linesize[i] = picture.linesize[i];
|
||||||
|
|
||||||
buf->base[i] = av_malloc(size[i] + 16); //FIXME 16
|
buf->base[i] = av_malloc(size[i] + 16 + STRIDE_ALIGN - 1); //FIXME 16
|
||||||
if (buf->base[i] == NULL)
|
if (buf->base[i] == NULL)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
@@ -1647,10 +1647,17 @@ static int add_metadata_from_side_data(AVCodecContext *avctx, AVFrame *frame)
|
|||||||
if (!side_metadata)
|
if (!side_metadata)
|
||||||
goto end;
|
goto end;
|
||||||
end = side_metadata + size;
|
end = side_metadata + size;
|
||||||
|
if (size && end[-1])
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
while (side_metadata < end) {
|
while (side_metadata < end) {
|
||||||
const uint8_t *key = side_metadata;
|
const uint8_t *key = side_metadata;
|
||||||
const uint8_t *val = side_metadata + strlen(key) + 1;
|
const uint8_t *val = side_metadata + strlen(key) + 1;
|
||||||
int ret = av_dict_set(ff_frame_get_metadatap(frame), key, val, 0);
|
int ret;
|
||||||
|
|
||||||
|
if (val >= end)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
|
ret = av_dict_set(ff_frame_get_metadatap(frame), key, val, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
break;
|
break;
|
||||||
side_metadata = val + strlen(val) + 1;
|
side_metadata = val + strlen(val) + 1;
|
||||||
@@ -1938,7 +1945,7 @@ static int recode_subtitle(AVCodecContext *avctx,
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
outpkt->size -= outl;
|
outpkt->size -= outl;
|
||||||
outpkt->data[outpkt->size - 1] = '\0';
|
memset(outpkt->data + outpkt->size, 0, outl);
|
||||||
|
|
||||||
end:
|
end:
|
||||||
if (cd != (iconv_t)-1)
|
if (cd != (iconv_t)-1)
|
||||||
@@ -1969,6 +1976,16 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
|
|||||||
int did_split = av_packet_split_side_data(&tmp);
|
int did_split = av_packet_split_side_data(&tmp);
|
||||||
//apply_param_change(avctx, &tmp);
|
//apply_param_change(avctx, &tmp);
|
||||||
|
|
||||||
|
if (did_split) {
|
||||||
|
/* FFMIN() prevents overflow in case the packet wasn't allocated with
|
||||||
|
* proper padding.
|
||||||
|
* If the side data is smaller than the buffer padding size, the
|
||||||
|
* remaining bytes should have already been filled with zeros by the
|
||||||
|
* original packet allocation anyway. */
|
||||||
|
memset(tmp.data + tmp.size, 0,
|
||||||
|
FFMIN(avpkt->size - tmp.size, FF_INPUT_BUFFER_PADDING_SIZE));
|
||||||
|
}
|
||||||
|
|
||||||
pkt_recoded = tmp;
|
pkt_recoded = tmp;
|
||||||
ret = recode_subtitle(avctx, &pkt_recoded, &tmp);
|
ret = recode_subtitle(avctx, &pkt_recoded, &tmp);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@@ -468,7 +468,7 @@ static int encode_plane(AVCodecContext *avctx, uint8_t *src,
|
|||||||
* get the offset in bits and convert to bytes.
|
* get the offset in bits and convert to bytes.
|
||||||
*/
|
*/
|
||||||
offset += write_huff_codes(dst + sstart * width, c->slice_bits,
|
offset += write_huff_codes(dst + sstart * width, c->slice_bits,
|
||||||
width * (send - sstart), width,
|
width * height + 4, width,
|
||||||
send - sstart, he) >> 3;
|
send - sstart, he) >> 3;
|
||||||
|
|
||||||
slice_len = offset - slice_len;
|
slice_len = offset - slice_len;
|
||||||
@@ -525,8 +525,7 @@ static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
|
|
||||||
bytestream2_init_writer(&pb, dst, pkt->size);
|
bytestream2_init_writer(&pb, dst, pkt->size);
|
||||||
|
|
||||||
av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
|
av_fast_padded_malloc(&c->slice_bits, &c->slice_bits_size, width * height + 4);
|
||||||
width * height + FF_INPUT_BUFFER_PADDING_SIZE);
|
|
||||||
|
|
||||||
if (!c->slice_bits) {
|
if (!c->slice_bits) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
|
av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
|
||||||
|
@@ -46,6 +46,9 @@ int ff_vaapi_render_picture(struct vaapi_context *vactx, VASurfaceID surface)
|
|||||||
VABufferID va_buffers[3];
|
VABufferID va_buffers[3];
|
||||||
unsigned int n_va_buffers = 0;
|
unsigned int n_va_buffers = 0;
|
||||||
|
|
||||||
|
if (!vactx->pic_param_buf_id)
|
||||||
|
return 0;
|
||||||
|
|
||||||
vaUnmapBuffer(vactx->display, vactx->pic_param_buf_id);
|
vaUnmapBuffer(vactx->display, vactx->pic_param_buf_id);
|
||||||
va_buffers[n_va_buffers++] = vactx->pic_param_buf_id;
|
va_buffers[n_va_buffers++] = vactx->pic_param_buf_id;
|
||||||
|
|
||||||
@@ -212,7 +215,7 @@ int ff_vaapi_mpeg_end_frame(AVCodecContext *avctx)
|
|||||||
ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
|
ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
|
||||||
|
|
||||||
finish:
|
finish:
|
||||||
ff_vaapi_common_end_frame(avctx->priv_data);
|
ff_vaapi_common_end_frame(avctx);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -580,6 +580,8 @@ int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
|
|||||||
{
|
{
|
||||||
int pqindex, lowquant, status;
|
int pqindex, lowquant, status;
|
||||||
|
|
||||||
|
v->field_mode = 0;
|
||||||
|
v->fcm = 0;
|
||||||
if (v->finterpflag)
|
if (v->finterpflag)
|
||||||
v->interpfrm = get_bits1(gb);
|
v->interpfrm = get_bits1(gb);
|
||||||
if (!v->s.avctx->codec)
|
if (!v->s.avctx->codec)
|
||||||
|
@@ -162,7 +162,7 @@ static int rle_unpack(const unsigned char *src, int src_len, int src_count,
|
|||||||
const unsigned char *ps;
|
const unsigned char *ps;
|
||||||
const unsigned char *ps_end;
|
const unsigned char *ps_end;
|
||||||
unsigned char *pd;
|
unsigned char *pd;
|
||||||
int i, l;
|
int i, j, l;
|
||||||
unsigned char *dest_end = dest + dest_len;
|
unsigned char *dest_end = dest + dest_len;
|
||||||
|
|
||||||
ps = src;
|
ps = src;
|
||||||
@@ -188,9 +188,9 @@ static int rle_unpack(const unsigned char *src, int src_len, int src_count,
|
|||||||
ps += l;
|
ps += l;
|
||||||
pd += l;
|
pd += l;
|
||||||
} else {
|
} else {
|
||||||
if (dest_end - pd < i || ps_end - ps < 2)
|
if (dest_end - pd < 2*l || ps_end - ps < 2)
|
||||||
return ps - src;
|
return ps - src;
|
||||||
for (i = 0; i < l; i++) {
|
for (j = 0; j < l; j++) {
|
||||||
*pd++ = ps[0];
|
*pd++ = ps[0];
|
||||||
*pd++ = ps[1];
|
*pd++ = ps[1];
|
||||||
}
|
}
|
||||||
|
@@ -276,6 +276,11 @@ static int decode_hextile(VmncContext *c, uint8_t* dst, const uint8_t* src, int
|
|||||||
}
|
}
|
||||||
xy = *src++;
|
xy = *src++;
|
||||||
wh = *src++;
|
wh = *src++;
|
||||||
|
if ( (xy >> 4) + (wh >> 4) + 1 > w - i
|
||||||
|
|| (xy & 0xF) + (wh & 0xF)+1 > h - j) {
|
||||||
|
av_log(c->avctx, AV_LOG_ERROR, "Rectangle outside picture\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
paint_rect(dst2, xy >> 4, xy & 0xF, (wh>>4)+1, (wh & 0xF)+1, fg, bpp, stride);
|
paint_rect(dst2, xy >> 4, xy & 0xF, (wh>>4)+1, (wh & 0xF)+1, fg, bpp, stride);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -506,6 +506,10 @@ static int wma_decode_block(WMACodecContext *s)
|
|||||||
coef escape coding */
|
coef escape coding */
|
||||||
total_gain = 1;
|
total_gain = 1;
|
||||||
for(;;) {
|
for(;;) {
|
||||||
|
if (get_bits_left(&s->gb) < 7) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "total_gain overread\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
a = get_bits(&s->gb, 7);
|
a = get_bits(&s->gb, 7);
|
||||||
total_gain += a;
|
total_gain += a;
|
||||||
if (a != 127)
|
if (a != 127)
|
||||||
|
@@ -127,8 +127,8 @@ typedef struct WmallDecodeCtx {
|
|||||||
|
|
||||||
int8_t mclms_order;
|
int8_t mclms_order;
|
||||||
int8_t mclms_scaling;
|
int8_t mclms_scaling;
|
||||||
int16_t mclms_coeffs[128];
|
int16_t mclms_coeffs[WMALL_MAX_CHANNELS * WMALL_MAX_CHANNELS * 32];
|
||||||
int16_t mclms_coeffs_cur[4];
|
int16_t mclms_coeffs_cur[WMALL_MAX_CHANNELS * WMALL_MAX_CHANNELS];
|
||||||
int16_t mclms_prevvalues[WMALL_MAX_CHANNELS * 2 * 32];
|
int16_t mclms_prevvalues[WMALL_MAX_CHANNELS * 2 * 32];
|
||||||
int16_t mclms_updates[WMALL_MAX_CHANNELS * 2 * 32];
|
int16_t mclms_updates[WMALL_MAX_CHANNELS * 2 * 32];
|
||||||
int mclms_recent;
|
int mclms_recent;
|
||||||
|
@@ -1572,7 +1572,8 @@ static int decode_packet(AVCodecContext *avctx, void *data,
|
|||||||
(frame_size = show_bits(gb, s->log2_frame_size)) &&
|
(frame_size = show_bits(gb, s->log2_frame_size)) &&
|
||||||
frame_size <= remaining_bits(s, gb)) {
|
frame_size <= remaining_bits(s, gb)) {
|
||||||
save_bits(s, gb, frame_size, 0);
|
save_bits(s, gb, frame_size, 0);
|
||||||
s->packet_done = !decode_frame(s, data, got_frame_ptr);
|
if (!s->packet_loss)
|
||||||
|
s->packet_done = !decode_frame(s, data, got_frame_ptr);
|
||||||
} else if (!s->len_prefix
|
} else if (!s->len_prefix
|
||||||
&& s->num_saved_bits > get_bits_count(&s->gb)) {
|
&& s->num_saved_bits > get_bits_count(&s->gb)) {
|
||||||
/** when the frames do not have a length prefix, we don't know
|
/** when the frames do not have a length prefix, we don't know
|
||||||
|
@@ -1764,7 +1764,7 @@ static av_cold void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
|
|||||||
const int high_bit_depth = bit_depth > 8;
|
const int high_bit_depth = bit_depth > 8;
|
||||||
|
|
||||||
#if HAVE_SSE2_INLINE
|
#if HAVE_SSE2_INLINE
|
||||||
if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX) {
|
if (!high_bit_depth && avctx->idct_algo == FF_IDCT_XVIDMMX && avctx->lowres == 0) {
|
||||||
c->idct_put = ff_idct_xvid_sse2_put;
|
c->idct_put = ff_idct_xvid_sse2_put;
|
||||||
c->idct_add = ff_idct_xvid_sse2_add;
|
c->idct_add = ff_idct_xvid_sse2_add;
|
||||||
c->idct = ff_idct_xvid_sse2;
|
c->idct = ff_idct_xvid_sse2;
|
||||||
|
@@ -74,7 +74,7 @@ static av_always_inline av_const int64_t MUL64(int a, int b)
|
|||||||
static inline av_const int mid_pred(int a, int b, int c)
|
static inline av_const int mid_pred(int a, int b, int c)
|
||||||
{
|
{
|
||||||
int i=b;
|
int i=b;
|
||||||
__asm__ volatile(
|
__asm__ (
|
||||||
"cmp %2, %1 \n\t"
|
"cmp %2, %1 \n\t"
|
||||||
"cmovg %1, %0 \n\t"
|
"cmovg %1, %0 \n\t"
|
||||||
"cmovg %2, %1 \n\t"
|
"cmovg %2, %1 \n\t"
|
||||||
|
@@ -26,7 +26,7 @@
|
|||||||
#include "libavcodec/mpegvideo.h"
|
#include "libavcodec/mpegvideo.h"
|
||||||
#include "dsputil_mmx.h"
|
#include "dsputil_mmx.h"
|
||||||
|
|
||||||
#if HAVE_INLINE_ASM
|
#if HAVE_MMX_INLINE
|
||||||
|
|
||||||
static void dct_unquantize_h263_intra_mmx(MpegEncContext *s,
|
static void dct_unquantize_h263_intra_mmx(MpegEncContext *s,
|
||||||
int16_t *block, int n, int qscale)
|
int16_t *block, int n, int qscale)
|
||||||
@@ -574,11 +574,11 @@ static void denoise_dct_sse2(MpegEncContext *s, int16_t *block){
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* HAVE_INLINE_ASM */
|
#endif /* HAVE_MMX_INLINE */
|
||||||
|
|
||||||
av_cold void ff_MPV_common_init_x86(MpegEncContext *s)
|
av_cold void ff_MPV_common_init_x86(MpegEncContext *s)
|
||||||
{
|
{
|
||||||
#if HAVE_INLINE_ASM
|
#if HAVE_MMX_INLINE
|
||||||
int mm_flags = av_get_cpu_flags();
|
int mm_flags = av_get_cpu_flags();
|
||||||
|
|
||||||
if (mm_flags & AV_CPU_FLAG_MMX) {
|
if (mm_flags & AV_CPU_FLAG_MMX) {
|
||||||
@@ -596,5 +596,5 @@ av_cold void ff_MPV_common_init_x86(MpegEncContext *s)
|
|||||||
s->denoise_dct= denoise_dct_mmx;
|
s->denoise_dct= denoise_dct_mmx;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* HAVE_INLINE_ASM */
|
#endif /* HAVE_MMX_INLINE */
|
||||||
}
|
}
|
||||||
|
@@ -43,7 +43,7 @@ void ff_vp3_v_loop_filter_mmxext(uint8_t *src, int stride,
|
|||||||
void ff_vp3_h_loop_filter_mmxext(uint8_t *src, int stride,
|
void ff_vp3_h_loop_filter_mmxext(uint8_t *src, int stride,
|
||||||
int *bounding_values);
|
int *bounding_values);
|
||||||
|
|
||||||
#if HAVE_INLINE_ASM
|
#if HAVE_MMX_INLINE
|
||||||
|
|
||||||
#define MOVQ_BFE(regd) \
|
#define MOVQ_BFE(regd) \
|
||||||
__asm__ volatile ( \
|
__asm__ volatile ( \
|
||||||
@@ -95,15 +95,15 @@ static void put_vp_no_rnd_pixels8_l2_mmx(uint8_t *dst, const uint8_t *a, const u
|
|||||||
:"memory");
|
:"memory");
|
||||||
// STOP_TIMER("put_vp_no_rnd_pixels8_l2_mmx")
|
// STOP_TIMER("put_vp_no_rnd_pixels8_l2_mmx")
|
||||||
}
|
}
|
||||||
#endif /* HAVE_INLINE_ASM */
|
#endif /* HAVE_MMX_INLINE */
|
||||||
|
|
||||||
av_cold void ff_vp3dsp_init_x86(VP3DSPContext *c, int flags)
|
av_cold void ff_vp3dsp_init_x86(VP3DSPContext *c, int flags)
|
||||||
{
|
{
|
||||||
int cpuflags = av_get_cpu_flags();
|
int cpuflags = av_get_cpu_flags();
|
||||||
|
|
||||||
#if HAVE_INLINE_ASM
|
#if HAVE_MMX_INLINE
|
||||||
c->put_no_rnd_pixels_l2 = put_vp_no_rnd_pixels8_l2_mmx;
|
c->put_no_rnd_pixels_l2 = put_vp_no_rnd_pixels8_l2_mmx;
|
||||||
#endif /* HAVE_INLINE_ASM */
|
#endif /* HAVE_MMX_INLINE */
|
||||||
|
|
||||||
#if ARCH_X86_32
|
#if ARCH_X86_32
|
||||||
if (EXTERNAL_MMX(cpuflags)) {
|
if (EXTERNAL_MMX(cpuflags)) {
|
||||||
|
@@ -244,6 +244,11 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s, int x, int y,
|
|||||||
curframe_x = x;
|
curframe_x = x;
|
||||||
prevframe_index = (y + motion_y) * stride + x + motion_x;
|
prevframe_index = (y + motion_y) * stride + x + motion_x;
|
||||||
prevframe_x = x + motion_x;
|
prevframe_x = x + motion_x;
|
||||||
|
|
||||||
|
if (prev_palette_plane == palette_plane && FFABS(curframe_index - prevframe_index) < pixel_count) {
|
||||||
|
return ;
|
||||||
|
}
|
||||||
|
|
||||||
while (pixel_count &&
|
while (pixel_count &&
|
||||||
curframe_index < s->frame_size &&
|
curframe_index < s->frame_size &&
|
||||||
prevframe_index < s->frame_size) {
|
prevframe_index < s->frame_size) {
|
||||||
|
@@ -59,7 +59,7 @@ static int xbm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int number, len;
|
int number, len;
|
||||||
|
|
||||||
ptr += strcspn(ptr, "#");
|
ptr += strcspn(ptr, "#");
|
||||||
if (sscanf(ptr, "#define %256s %u", name, &number) != 2) {
|
if (sscanf(ptr, "#define %255s %u", name, &number) != 2) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Unexpected preprocessor directive\n");
|
av_log(avctx, AV_LOG_ERROR, "Unexpected preprocessor directive\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
@@ -757,6 +757,10 @@ static int v4l2_set_parameters(AVFormatContext *s1)
|
|||||||
standard.index = i;
|
standard.index = i;
|
||||||
if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
|
if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
|
||||||
ret = AVERROR(errno);
|
ret = AVERROR(errno);
|
||||||
|
if (ret == AVERROR(EINVAL)) {
|
||||||
|
tpf = &streamparm.parm.capture.timeperframe;
|
||||||
|
break;
|
||||||
|
}
|
||||||
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
|
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -985,6 +989,9 @@ static int v4l2_read_header(AVFormatContext *s1)
|
|||||||
if (codec_id == AV_CODEC_ID_RAWVIDEO)
|
if (codec_id == AV_CODEC_ID_RAWVIDEO)
|
||||||
st->codec->codec_tag =
|
st->codec->codec_tag =
|
||||||
avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
|
avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
|
||||||
|
else if (codec_id == AV_CODEC_ID_H264) {
|
||||||
|
st->need_parsing = AVSTREAM_PARSE_HEADERS;
|
||||||
|
}
|
||||||
if (desired_format == V4L2_PIX_FMT_YVU420)
|
if (desired_format == V4L2_PIX_FMT_YVU420)
|
||||||
st->codec->codec_tag = MKTAG('Y', 'V', '1', '2');
|
st->codec->codec_tag = MKTAG('Y', 'V', '1', '2');
|
||||||
else if (desired_format == V4L2_PIX_FMT_YVU410)
|
else if (desired_format == V4L2_PIX_FMT_YVU410)
|
||||||
|
@@ -78,6 +78,7 @@ struct x11grab {
|
|||||||
int show_region; /**< set by a private option. */
|
int show_region; /**< set by a private option. */
|
||||||
char *framerate; /**< Set by a private option. */
|
char *framerate; /**< Set by a private option. */
|
||||||
|
|
||||||
|
Cursor c;
|
||||||
Window region_win; /**< This is used by show_region option. */
|
Window region_win; /**< This is used by show_region option. */
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -353,7 +354,6 @@ paint_mouse_pointer(XImage *image, struct x11grab *s)
|
|||||||
* Anyone who performs further investigation of the xlib API likely risks
|
* Anyone who performs further investigation of the xlib API likely risks
|
||||||
* permanent brain damage. */
|
* permanent brain damage. */
|
||||||
uint8_t *pix = image->data;
|
uint8_t *pix = image->data;
|
||||||
Cursor c;
|
|
||||||
Window w;
|
Window w;
|
||||||
XSetWindowAttributes attr;
|
XSetWindowAttributes attr;
|
||||||
|
|
||||||
@@ -361,9 +361,10 @@ paint_mouse_pointer(XImage *image, struct x11grab *s)
|
|||||||
if (image->bits_per_pixel != 24 && image->bits_per_pixel != 32)
|
if (image->bits_per_pixel != 24 && image->bits_per_pixel != 32)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
c = XCreateFontCursor(dpy, XC_left_ptr);
|
if(!s->c)
|
||||||
|
s->c = XCreateFontCursor(dpy, XC_left_ptr);
|
||||||
w = DefaultRootWindow(dpy);
|
w = DefaultRootWindow(dpy);
|
||||||
attr.cursor = c;
|
attr.cursor = s->c;
|
||||||
XChangeWindowAttributes(dpy, w, CWCursor, &attr);
|
XChangeWindowAttributes(dpy, w, CWCursor, &attr);
|
||||||
|
|
||||||
xcim = XFixesGetCursorImage(dpy);
|
xcim = XFixesGetCursorImage(dpy);
|
||||||
|
@@ -117,6 +117,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
ff_get_audio_buffer(inlink, AV_PERM_WRITE,
|
ff_get_audio_buffer(inlink, AV_PERM_WRITE,
|
||||||
insamples->audio->nb_samples);
|
insamples->audio->nb_samples);
|
||||||
int ret;
|
int ret;
|
||||||
|
int len;
|
||||||
|
|
||||||
if (!outsamples)
|
if (!outsamples)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@@ -126,16 +127,20 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
out = (int16_t *)outsamples->data[0];
|
out = (int16_t *)outsamples->data[0];
|
||||||
in = (int16_t *)insamples ->data[0];
|
in = (int16_t *)insamples ->data[0];
|
||||||
|
|
||||||
|
len = FFMIN(NUMTAPS, 2*insamples->audio->nb_samples);
|
||||||
// copy part of new input and process with saved input
|
// copy part of new input and process with saved input
|
||||||
memcpy(taps+NUMTAPS, in, NUMTAPS * sizeof(*taps));
|
memcpy(taps+NUMTAPS, in, len * sizeof(*taps));
|
||||||
out = scalarproduct(taps, taps + NUMTAPS, out);
|
out = scalarproduct(taps, taps + len, out);
|
||||||
|
|
||||||
// process current input
|
// process current input
|
||||||
endin = in + insamples->audio->nb_samples * 2 - NUMTAPS;
|
if (2*insamples->audio->nb_samples >= NUMTAPS ){
|
||||||
scalarproduct(in, endin, out);
|
endin = in + insamples->audio->nb_samples * 2 - NUMTAPS;
|
||||||
|
scalarproduct(in, endin, out);
|
||||||
|
|
||||||
// save part of input for next round
|
// save part of input for next round
|
||||||
memcpy(taps, endin, NUMTAPS * sizeof(*taps));
|
memcpy(taps, endin, NUMTAPS * sizeof(*taps));
|
||||||
|
} else
|
||||||
|
memmove(taps, taps + 2*insamples->audio->nb_samples, NUMTAPS * sizeof(*taps));
|
||||||
|
|
||||||
ret = ff_filter_frame(outlink, outsamples);
|
ret = ff_filter_frame(outlink, outsamples);
|
||||||
avfilter_unref_buffer(insamples);
|
avfilter_unref_buffer(insamples);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user