Compare commits
375 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
73bfb21672 | ||
![]() |
86dac9d6b1 | ||
![]() |
645a299132 | ||
![]() |
b8c5cd5b8d | ||
![]() |
e256506950 | ||
![]() |
3cb8490066 | ||
![]() |
8d0112d8fc | ||
![]() |
fdb05ff57b | ||
![]() |
676dff8c54 | ||
![]() |
95d6bd95b1 | ||
![]() |
c80c0b2637 | ||
![]() |
190e8b4283 | ||
![]() |
327b284b58 | ||
![]() |
c34ec56ea8 | ||
![]() |
2f4c2cc6d3 | ||
![]() |
9814419deb | ||
![]() |
facd212f4a | ||
![]() |
ec02afcb6e | ||
![]() |
81f80d7b48 | ||
![]() |
cbda382526 | ||
![]() |
03be529f39 | ||
![]() |
07dcf5b05a | ||
![]() |
aef0c97b45 | ||
![]() |
aa3b63a6a5 | ||
![]() |
9db3ed2583 | ||
![]() |
9d49f4e081 | ||
![]() |
7add0b0692 | ||
![]() |
097417299a | ||
![]() |
3a691185f7 | ||
![]() |
6b4fc845f5 | ||
![]() |
485f78be20 | ||
![]() |
21ebfcdd43 | ||
![]() |
5601f216bf | ||
![]() |
0a3371f382 | ||
![]() |
a1fec9d141 | ||
![]() |
2894f27761 | ||
![]() |
bbef5b0b86 | ||
![]() |
de84d683b2 | ||
![]() |
e9179e6673 | ||
![]() |
633461d8bb | ||
![]() |
77c20198bb | ||
![]() |
ad0c39c1f1 | ||
![]() |
3eee7e0db6 | ||
![]() |
e2e66f2f99 | ||
![]() |
b41bc71143 | ||
![]() |
1b9a62c357 | ||
![]() |
93c9f1b97c | ||
![]() |
e76a5a9c28 | ||
![]() |
9f650fb5fb | ||
![]() |
9e835572f8 | ||
![]() |
d7a8d07fd8 | ||
![]() |
a2dc8dcb66 | ||
![]() |
1beea3b859 | ||
![]() |
0baea332cb | ||
![]() |
396b47d2a7 | ||
![]() |
dec5586bc7 | ||
![]() |
c5587516ca | ||
![]() |
94bd579319 | ||
![]() |
eb6d64edef | ||
![]() |
b69c7f20e8 | ||
![]() |
31b697f19c | ||
![]() |
798b3ed3fb | ||
![]() |
62b0462e5f | ||
![]() |
789f433bc6 | ||
![]() |
3756b306a2 | ||
![]() |
ded9931d16 | ||
![]() |
c17da32ba2 | ||
![]() |
9841654c15 | ||
![]() |
42eaec076b | ||
![]() |
473281193b | ||
![]() |
11f98c83d1 | ||
![]() |
eb1aa871d4 | ||
![]() |
6222ee068e | ||
![]() |
c5f52a9440 | ||
![]() |
45509d0730 | ||
![]() |
e78e2dc6e1 | ||
![]() |
dc1cf5febb | ||
![]() |
2c5898c1d3 | ||
![]() |
b9d2c2e987 | ||
![]() |
24fefa73db | ||
![]() |
9189ae8ff4 | ||
![]() |
7520f1fb3a | ||
![]() |
437e50b3d6 | ||
![]() |
b3e98715c0 | ||
![]() |
ed75a9a60e | ||
![]() |
33d74e62c2 | ||
![]() |
2fcfcd5c87 | ||
![]() |
46dfd59ae1 | ||
![]() |
07b98ea396 | ||
![]() |
b4f08f34a8 | ||
![]() |
c1e048dffe | ||
![]() |
ce35c10b88 | ||
![]() |
3535f0867e | ||
![]() |
6c5a57db94 | ||
![]() |
961bbb98cf | ||
![]() |
e43872c3a9 | ||
![]() |
251d66d746 | ||
![]() |
26dddec466 | ||
![]() |
03ea398227 | ||
![]() |
f6cfd160ca | ||
![]() |
5e8b8e4b9d | ||
![]() |
86e5769525 | ||
![]() |
d37e539d5e | ||
![]() |
0b99a18ebc | ||
![]() |
aa17021e81 | ||
![]() |
d68ae96ea8 | ||
![]() |
37800352bb | ||
![]() |
29abba3572 | ||
![]() |
d44d87e172 | ||
![]() |
0872359f9f | ||
![]() |
135a09c46e | ||
![]() |
1eb59e4d54 | ||
![]() |
a76198ed28 | ||
![]() |
66b1f5894a | ||
![]() |
91c3a829a4 | ||
![]() |
a2933aa59a | ||
![]() |
857eaa45ba | ||
![]() |
8df5b0250d | ||
![]() |
c633427e92 | ||
![]() |
0ee7505e63 | ||
![]() |
9bbe24d455 | ||
![]() |
6e5dd43bf9 | ||
![]() |
5e16809d49 | ||
![]() |
7a6907fcc3 | ||
![]() |
74e52afe8f | ||
![]() |
8eaecab5f4 | ||
![]() |
c7b8f02940 | ||
![]() |
8a9feeff9e | ||
![]() |
2b8effcda3 | ||
![]() |
306ee95088 | ||
![]() |
02de44073a | ||
![]() |
03dba25a40 | ||
![]() |
50cb695bf1 | ||
![]() |
93f919d0b4 | ||
![]() |
ce424e57d6 | ||
![]() |
489c180451 | ||
![]() |
56ac2cbd04 | ||
![]() |
606bbd50b1 | ||
![]() |
2cd5640bce | ||
![]() |
9920a8a83f | ||
![]() |
f3c8a8b087 | ||
![]() |
60f94f7084 | ||
![]() |
7f7cf051ed | ||
![]() |
3231e7ab64 | ||
![]() |
552fe9b07f | ||
![]() |
3ed4dc9228 | ||
![]() |
4ede955d86 | ||
![]() |
195fcbff2b | ||
![]() |
96d1a8f014 | ||
![]() |
0bf0de7185 | ||
![]() |
c58d7f9eb5 | ||
![]() |
11a61dd0e2 | ||
![]() |
5865d599c3 | ||
![]() |
d03fd2c805 | ||
![]() |
946a106995 | ||
![]() |
fed28fe054 | ||
![]() |
0a6d397911 | ||
![]() |
081f4f5f56 | ||
![]() |
f953c3c234 | ||
![]() |
dcce698fd8 | ||
![]() |
807b738840 | ||
![]() |
a507fea707 | ||
![]() |
124ec8b130 | ||
![]() |
e1f0c41e1a | ||
![]() |
8d7839fc7c | ||
![]() |
bbd632082b | ||
![]() |
e4fb53c73a | ||
![]() |
bd41211395 | ||
![]() |
e86074e6ef | ||
![]() |
8da037af33 | ||
![]() |
41e8591515 | ||
![]() |
a04bb8d6e7 | ||
![]() |
1298aa8318 | ||
![]() |
e5fcc16a1f | ||
![]() |
fe461238d3 | ||
![]() |
b52952c6e9 | ||
![]() |
b65c290f7f | ||
![]() |
437848e37a | ||
![]() |
09e3fe79fc | ||
![]() |
e4b1cffade | ||
![]() |
06f7e87e15 | ||
![]() |
420f63984b | ||
![]() |
91437631d7 | ||
![]() |
0d6ed2f13f | ||
![]() |
a88236f3d5 | ||
![]() |
02018a359e | ||
![]() |
451bc8ee2f | ||
![]() |
ae7ea2eabf | ||
![]() |
0bcf514198 | ||
![]() |
1ee5e2ce3d | ||
![]() |
6e83c26620 | ||
![]() |
a4de70df20 | ||
![]() |
eaf64192d8 | ||
![]() |
c074feed29 | ||
![]() |
53c3abc108 | ||
![]() |
6333c6c17d | ||
![]() |
5fa56e6e62 | ||
![]() |
cf7f798984 | ||
![]() |
244a58fff0 | ||
![]() |
addbaf1348 | ||
![]() |
694b7cd873 | ||
![]() |
ecda9b90ec | ||
![]() |
52254067b3 | ||
![]() |
af9b62654d | ||
![]() |
80c268eaae | ||
![]() |
8cd67ddde4 | ||
![]() |
c53effc41b | ||
![]() |
ede7388800 | ||
![]() |
36d8914f1b | ||
![]() |
146b187113 | ||
![]() |
43d6764327 | ||
![]() |
5123541913 | ||
![]() |
01f9540320 | ||
![]() |
00915d3cd2 | ||
![]() |
58d7b835e3 | ||
![]() |
d16515ae5f | ||
![]() |
3a6bc3e381 | ||
![]() |
e8ff797206 | ||
![]() |
3ecbd911ff | ||
![]() |
5e8eaa26b2 | ||
![]() |
21d3e0ac9e | ||
![]() |
744e7eea5d | ||
![]() |
d7dbc687e3 | ||
![]() |
7997acee05 | ||
![]() |
4dfd4399ea | ||
![]() |
f0d56d4f55 | ||
![]() |
71a4a9183f | ||
![]() |
80588930ff | ||
![]() |
bd2266975c | ||
![]() |
e68114868f | ||
![]() |
4533e8b30a | ||
![]() |
a8dd7fe5b9 | ||
![]() |
503322f97c | ||
![]() |
574bf2ce4c | ||
![]() |
1f2f6b247a | ||
![]() |
724deeb7a8 | ||
![]() |
62899d79a3 | ||
![]() |
45283653a3 | ||
![]() |
6af1793dd5 | ||
![]() |
4310ba273d | ||
![]() |
ba8ab4e7ae | ||
![]() |
e0fcad7761 | ||
![]() |
9455a023be | ||
![]() |
03bda44689 | ||
![]() |
8e8dc210cd | ||
![]() |
3af977d4d6 | ||
![]() |
cb0d255e2f | ||
![]() |
d2ceca6e8b | ||
![]() |
e8411c3b6d | ||
![]() |
3d5c38ca7d | ||
![]() |
8713b8e3fb | ||
![]() |
714dd84f20 | ||
![]() |
ebd74c42a2 | ||
![]() |
0597f05237 | ||
![]() |
227ac71a60 | ||
![]() |
d4e85004ec | ||
![]() |
514917bf1c | ||
![]() |
9ae26ef9c3 | ||
![]() |
b9be91c03d | ||
![]() |
d844179141 | ||
![]() |
707cfea374 | ||
![]() |
ebefc92a3d | ||
![]() |
11b3462aa3 | ||
![]() |
2340ce6954 | ||
![]() |
1f5678c5e4 | ||
![]() |
9a2387cfb1 | ||
![]() |
696a74e815 | ||
![]() |
3938b67621 | ||
![]() |
eb89e1b98d | ||
![]() |
05de372350 | ||
![]() |
b5f92f9603 | ||
![]() |
42a8d32697 | ||
![]() |
945a1b7ab9 | ||
![]() |
cf7bb6ceb1 | ||
![]() |
5f390ee944 | ||
![]() |
b24e68c9c3 | ||
![]() |
353ff5b4eb | ||
![]() |
47c23f9392 | ||
![]() |
d51e0dd9b4 | ||
![]() |
f30bd7c215 | ||
![]() |
e459c80fdd | ||
![]() |
15f122316e | ||
![]() |
b546b7a029 | ||
![]() |
8f596a4599 | ||
![]() |
eea1c5daa0 | ||
![]() |
2880de35b5 | ||
![]() |
0143eb9109 | ||
![]() |
93fe53da91 | ||
![]() |
88ec8021ce | ||
![]() |
8130449f32 | ||
![]() |
f741c39752 | ||
![]() |
af1bf7f277 | ||
![]() |
e770488b50 | ||
![]() |
6294d84dca | ||
![]() |
a68d4f55de | ||
![]() |
fdcdfb36b7 | ||
![]() |
238360665b | ||
![]() |
10b8ae5b10 | ||
![]() |
63b38d4a91 | ||
![]() |
260f37ee4d | ||
![]() |
da32e3c398 | ||
![]() |
6e3830cca2 | ||
![]() |
6a35680195 | ||
![]() |
cb29eb144b | ||
![]() |
2ce46a959c | ||
![]() |
8ca0d74d59 | ||
![]() |
1552b850db | ||
![]() |
47e18e2e03 | ||
![]() |
0162ad1a59 | ||
![]() |
fa73d13678 | ||
![]() |
8401ed651b | ||
![]() |
fcaf78f5f9 | ||
![]() |
8e5a021512 | ||
![]() |
a23748a804 | ||
![]() |
6961f9f9f4 | ||
![]() |
11dfd2a74a | ||
![]() |
f6c1fdba22 | ||
![]() |
f294748cbc | ||
![]() |
40e6c148f5 | ||
![]() |
9081b4dddd | ||
![]() |
58e2d529b3 | ||
![]() |
b9550ea116 | ||
![]() |
58bdec57fa | ||
![]() |
7434225ed1 | ||
![]() |
c9f17afe4e | ||
![]() |
6d33c8ec84 | ||
![]() |
238532700e | ||
![]() |
ed9ab30f4c | ||
![]() |
1fabe4ae8c | ||
![]() |
4ccde2d356 | ||
![]() |
fb487e2f66 | ||
![]() |
be9074debc | ||
![]() |
fa29e89d41 | ||
![]() |
f6cb3827df | ||
![]() |
c40631baf7 | ||
![]() |
ce364088ef | ||
![]() |
ca14a2d0e4 | ||
![]() |
b77819afbc | ||
![]() |
dccac759d3 | ||
![]() |
85b8b16917 | ||
![]() |
d946e4c1bc | ||
![]() |
0c5e6c542f | ||
![]() |
d21bf0d27b | ||
![]() |
03562c44c0 | ||
![]() |
ee21d270f8 | ||
![]() |
9f7119b7fe | ||
![]() |
d375d6395c | ||
![]() |
b1432e905d | ||
![]() |
1bccf68cae | ||
![]() |
2eb15cdeef | ||
![]() |
b701e26a4e | ||
![]() |
ea1806ce65 | ||
![]() |
ff409c7d80 | ||
![]() |
c9be276f8b | ||
![]() |
d8e89a3726 | ||
![]() |
7e8d27c851 | ||
![]() |
7fd6c9fb49 | ||
![]() |
175b53d051 | ||
![]() |
7f604a048e | ||
![]() |
7e513d85e8 | ||
![]() |
b6a0f5cde8 | ||
![]() |
1147d39bca | ||
![]() |
30e58e65e5 | ||
![]() |
af40847681 | ||
![]() |
fc53a09909 | ||
![]() |
4c1e4bc6af | ||
![]() |
c6f2ad9f94 | ||
![]() |
cf4099161a | ||
![]() |
d791e8c990 | ||
![]() |
f1a2364ceb | ||
![]() |
b00444c0bb | ||
![]() |
29b3ca743d | ||
![]() |
a32e45df82 | ||
![]() |
7a2254ab26 | ||
![]() |
cb95c1935f |
@@ -65,7 +65,7 @@ struct SwsContext *sws_opts;
|
|||||||
SwrContext *swr_opts;
|
SwrContext *swr_opts;
|
||||||
AVDictionary *format_opts, *codec_opts;
|
AVDictionary *format_opts, *codec_opts;
|
||||||
|
|
||||||
const int this_year = 2014;
|
const int this_year = 2015;
|
||||||
|
|
||||||
static FILE *report_file;
|
static FILE *report_file;
|
||||||
|
|
||||||
|
42
configure
vendored
42
configure
vendored
@@ -797,6 +797,13 @@ check_ld(){
|
|||||||
check_cmd $ld $LDFLAGS $flags $(ld_o $TMPE) $TMPO $libs $extralibs
|
check_cmd $ld $LDFLAGS $flags $(ld_o $TMPE) $TMPO $libs $extralibs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
print_include(){
|
||||||
|
hdr=$1
|
||||||
|
test "${hdr%.h}" = "${hdr}" &&
|
||||||
|
echo "#include $hdr" ||
|
||||||
|
echo "#include <$hdr>"
|
||||||
|
}
|
||||||
|
|
||||||
check_code(){
|
check_code(){
|
||||||
log check_code "$@"
|
log check_code "$@"
|
||||||
check=$1
|
check=$1
|
||||||
@@ -805,7 +812,7 @@ check_code(){
|
|||||||
shift 3
|
shift 3
|
||||||
{
|
{
|
||||||
for hdr in $headers; do
|
for hdr in $headers; do
|
||||||
echo "#include <$hdr>"
|
print_include $hdr
|
||||||
done
|
done
|
||||||
echo "int main(void) { $code; return 0; }"
|
echo "int main(void) { $code; return 0; }"
|
||||||
} | check_$check "$@"
|
} | check_$check "$@"
|
||||||
@@ -889,7 +896,7 @@ check_func_headers(){
|
|||||||
shift 2
|
shift 2
|
||||||
{
|
{
|
||||||
for hdr in $headers; do
|
for hdr in $headers; do
|
||||||
echo "#include <$hdr>"
|
print_include $hdr
|
||||||
done
|
done
|
||||||
for func in $funcs; do
|
for func in $funcs; do
|
||||||
echo "long check_$func(void) { return (long) $func; }"
|
echo "long check_$func(void) { return (long) $func; }"
|
||||||
@@ -1053,6 +1060,26 @@ require_pkg_config(){
|
|||||||
add_extralibs $(get_safe ${pkg}_libs)
|
add_extralibs $(get_safe ${pkg}_libs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
require_libfreetype(){
|
||||||
|
log require_libfreetype "$@"
|
||||||
|
pkg="freetype2"
|
||||||
|
check_cmd $pkg_config --exists --print-errors $pkg \
|
||||||
|
|| die "ERROR: $pkg not found"
|
||||||
|
pkg_cflags=$($pkg_config --cflags $pkg)
|
||||||
|
pkg_libs=$($pkg_config --libs $pkg)
|
||||||
|
{
|
||||||
|
echo "#include <ft2build.h>"
|
||||||
|
echo "#include FT_FREETYPE_H"
|
||||||
|
echo "long check_func(void) { return (long) FT_Init_FreeType; }"
|
||||||
|
echo "int main(void) { return 0; }"
|
||||||
|
} | check_ld "cc" $pkg_cflags $pkg_libs \
|
||||||
|
&& set_safe ${pkg}_cflags $pkg_cflags \
|
||||||
|
&& set_safe ${pkg}_libs $pkg_libs \
|
||||||
|
|| die "ERROR: $pkg not found"
|
||||||
|
add_cflags $(get_safe ${pkg}_cflags)
|
||||||
|
add_extralibs $(get_safe ${pkg}_libs)
|
||||||
|
}
|
||||||
|
|
||||||
hostcc_o(){
|
hostcc_o(){
|
||||||
eval printf '%s\\n' $HOSTCC_O
|
eval printf '%s\\n' $HOSTCC_O
|
||||||
}
|
}
|
||||||
@@ -1338,6 +1365,7 @@ HAVE_LIST="
|
|||||||
alsa_asoundlib_h
|
alsa_asoundlib_h
|
||||||
altivec_h
|
altivec_h
|
||||||
arpa_inet_h
|
arpa_inet_h
|
||||||
|
as_object_arch
|
||||||
asm_mod_q
|
asm_mod_q
|
||||||
asm_mod_y
|
asm_mod_y
|
||||||
asm_types_h
|
asm_types_h
|
||||||
@@ -3537,6 +3565,11 @@ EOF
|
|||||||
|
|
||||||
enabled_all armv6t2 shared !pic && enable_pic
|
enabled_all armv6t2 shared !pic && enable_pic
|
||||||
|
|
||||||
|
# llvm's integrated assembler supports .object_arch from llvm 3.5
|
||||||
|
[ "$objformat" = elf ] && check_as <<EOF && enable as_object_arch
|
||||||
|
.object_arch armv4
|
||||||
|
EOF
|
||||||
|
|
||||||
elif enabled mips; then
|
elif enabled mips; then
|
||||||
|
|
||||||
check_inline_asm loongson '"dmult.g $1, $2, $3"'
|
check_inline_asm loongson '"dmult.g $1, $2, $3"'
|
||||||
@@ -3664,6 +3697,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
check_ldflags -Wl,--as-needed
|
check_ldflags -Wl,--as-needed
|
||||||
|
check_ldflags -Wl,-z,noexecstack
|
||||||
|
|
||||||
if check_func dlopen; then
|
if check_func dlopen; then
|
||||||
ldl=
|
ldl=
|
||||||
@@ -3837,7 +3871,7 @@ enabled gnutls && require_pkg_config gnutls gnutls/gnutls.h gnutls_global_in
|
|||||||
enabled libiec61883 && require libiec61883 libiec61883/iec61883.h iec61883_cmp_connect -lraw1394 -lavc1394 -lrom1394 -liec61883
|
enabled libiec61883 && require libiec61883 libiec61883/iec61883.h iec61883_cmp_connect -lraw1394 -lavc1394 -lrom1394 -liec61883
|
||||||
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
||||||
enabled libass && require_pkg_config libass ass/ass.h ass_library_init
|
enabled libass && require_pkg_config libass ass/ass.h ass_library_init
|
||||||
enabled libbluray && require libbluray libbluray/bluray.h bd_open -lbluray
|
enabled libbluray && require_pkg_config libbluray libbluray/bluray.h bd_open
|
||||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
|
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
|
||||||
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
|
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
|
||||||
die "ERROR: libcelt must be installed and version must be >= 0.11.0."; }
|
die "ERROR: libcelt must be installed and version must be >= 0.11.0."; }
|
||||||
@@ -3846,7 +3880,7 @@ enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaa
|
|||||||
enabled libfdk_aac && require libfdk_aac fdk-aac/aacenc_lib.h aacEncOpen -lfdk-aac
|
enabled libfdk_aac && require libfdk_aac fdk-aac/aacenc_lib.h aacEncOpen -lfdk-aac
|
||||||
flite_libs="-lflite_cmu_time_awb -lflite_cmu_us_awb -lflite_cmu_us_kal -lflite_cmu_us_kal16 -lflite_cmu_us_rms -lflite_cmu_us_slt -lflite_usenglish -lflite_cmulex -lflite"
|
flite_libs="-lflite_cmu_time_awb -lflite_cmu_us_awb -lflite_cmu_us_kal -lflite_cmu_us_kal16 -lflite_cmu_us_rms -lflite_cmu_us_slt -lflite_usenglish -lflite_cmulex -lflite"
|
||||||
enabled libflite && require2 libflite "flite/flite.h" flite_init $flite_libs
|
enabled libflite && require2 libflite "flite/flite.h" flite_init $flite_libs
|
||||||
enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType
|
enabled libfreetype && require_libfreetype
|
||||||
enabled libgsm && require libgsm gsm/gsm.h gsm_create -lgsm
|
enabled libgsm && require libgsm gsm/gsm.h gsm_create -lgsm
|
||||||
enabled libilbc && require libilbc ilbc.h WebRtcIlbcfix_InitDecode -lilbc
|
enabled libilbc && require libilbc ilbc.h WebRtcIlbcfix_InitDecode -lilbc
|
||||||
enabled libmodplug && require libmodplug libmodplug/modplug.h ModPlug_Load -lmodplug
|
enabled libmodplug && require libmodplug libmodplug/modplug.h ModPlug_Load -lmodplug
|
||||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# This could be handy for archiving the generated documentation or
|
# This could be handy for archiving the generated documentation or
|
||||||
# if some version control system is used.
|
# if some version control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER = 1.1.9
|
PROJECT_NUMBER = 1.1.16
|
||||||
|
|
||||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||||
# in the documentation. The maximum height of the logo should not exceed 55
|
# in the documentation. The maximum height of the logo should not exceed 55
|
||||||
|
@@ -1254,11 +1254,11 @@ ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
|
|||||||
You can put many streams of the same type in the output:
|
You can put many streams of the same type in the output:
|
||||||
|
|
||||||
@example
|
@example
|
||||||
ffmpeg -i test1.avi -i test2.avi -map 0:3 -map 0:2 -map 0:1 -map 0:0 -c copy test12.nut
|
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
The resulting output file @file{test12.avi} will contain first four streams from
|
The resulting output file @file{test12.nut} will contain the first four streams
|
||||||
the input file in reverse order.
|
from the input files in reverse order.
|
||||||
|
|
||||||
@item
|
@item
|
||||||
To force CBR video output:
|
To force CBR video output:
|
||||||
|
@@ -51,8 +51,9 @@ The toolchain provided with Xcode is sufficient to build the basic
|
|||||||
unacelerated code.
|
unacelerated code.
|
||||||
|
|
||||||
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
|
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
|
||||||
|
@url{https://github.com/FFmpeg/gas-preprocessor} or
|
||||||
@url{http://github.com/yuvi/gas-preprocessor} to build the optimized
|
@url{http://github.com/yuvi/gas-preprocessor} to build the optimized
|
||||||
assembler functions. Just download the Perl script and put it somewhere
|
assembler functions. Put the Perl script somewhere
|
||||||
in your PATH, FFmpeg's configure will pick it up automatically.
|
in your PATH, FFmpeg's configure will pick it up automatically.
|
||||||
|
|
||||||
Mac OS X on amd64 and x86 requires @command{yasm} to build most of the
|
Mac OS X on amd64 and x86 requires @command{yasm} to build most of the
|
||||||
|
36
ffmpeg.c
36
ffmpeg.c
@@ -578,6 +578,25 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
|||||||
bsfc = bsfc->next;
|
bsfc = bsfc->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
|
||||||
|
ost->last_mux_dts != AV_NOPTS_VALUE &&
|
||||||
|
pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
|
||||||
|
av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
|
||||||
|
"%d:%d; previous: %"PRId64", current: %"PRId64"; ",
|
||||||
|
ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
|
||||||
|
if (exit_on_error) {
|
||||||
|
av_log(NULL, AV_LOG_FATAL, "aborting.\n");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
|
||||||
|
"in incorrect timestamps in the output file.\n",
|
||||||
|
ost->last_mux_dts + 1);
|
||||||
|
pkt->dts = ost->last_mux_dts + 1;
|
||||||
|
if (pkt->pts != AV_NOPTS_VALUE)
|
||||||
|
pkt->pts = FFMAX(pkt->pts, pkt->dts);
|
||||||
|
}
|
||||||
|
ost->last_mux_dts = pkt->dts;
|
||||||
|
|
||||||
pkt->stream_index = ost->index;
|
pkt->stream_index = ost->index;
|
||||||
|
|
||||||
if (debug_ts) {
|
if (debug_ts) {
|
||||||
@@ -996,6 +1015,19 @@ static void do_video_stats(OutputStream *ost, int frame_size)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void finish_output_stream(OutputStream *ost)
|
||||||
|
{
|
||||||
|
OutputFile *of = output_files[ost->file_index];
|
||||||
|
int i;
|
||||||
|
|
||||||
|
ost->finished = 1;
|
||||||
|
|
||||||
|
if (of->shortest) {
|
||||||
|
for (i = 0; i < of->ctx->nb_streams; i++)
|
||||||
|
output_streams[of->ost_index + i]->finished = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get and encode new output from any of the filtergraphs, without causing
|
* Get and encode new output from any of the filtergraphs, without causing
|
||||||
* activity.
|
* activity.
|
||||||
@@ -1988,7 +2020,7 @@ static int transcode_init(void)
|
|||||||
AVCodecContext *codec;
|
AVCodecContext *codec;
|
||||||
OutputStream *ost;
|
OutputStream *ost;
|
||||||
InputStream *ist;
|
InputStream *ist;
|
||||||
char error[1024];
|
char error[1024] = {0};
|
||||||
int want_sdp = 1;
|
int want_sdp = 1;
|
||||||
|
|
||||||
/* init framerate emulation */
|
/* init framerate emulation */
|
||||||
@@ -2764,7 +2796,7 @@ static int process_input(int file_index)
|
|||||||
|
|
||||||
if (ost->source_index == ifile->ist_index + i &&
|
if (ost->source_index == ifile->ist_index + i &&
|
||||||
(ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
|
(ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
|
||||||
close_output_stream(ost);
|
finish_output_stream(ost);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
ffmpeg.h
2
ffmpeg.h
@@ -300,6 +300,8 @@ typedef struct OutputStream {
|
|||||||
/* pts of the first frame encoded for this stream, used for limiting
|
/* pts of the first frame encoded for this stream, used for limiting
|
||||||
* recording time */
|
* recording time */
|
||||||
int64_t first_pts;
|
int64_t first_pts;
|
||||||
|
/* dts of the last packet sent to the muxer */
|
||||||
|
int64_t last_mux_dts;
|
||||||
AVBitStreamFilterContext *bitstream_filters;
|
AVBitStreamFilterContext *bitstream_filters;
|
||||||
AVCodec *enc;
|
AVCodec *enc;
|
||||||
int64_t max_frames;
|
int64_t max_frames;
|
||||||
|
@@ -41,12 +41,15 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFo
|
|||||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
||||||
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
||||||
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
||||||
|
const enum AVPixelFormat mjpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||||
|
const enum AVPixelFormat ljpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||||
|
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||||
|
|
||||||
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||||
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
p = mjpeg_formats;
|
||||||
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
p =ljpeg_formats;
|
||||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (; *p != AV_PIX_FMT_NONE; p++) {
|
for (; *p != AV_PIX_FMT_NONE; p++) {
|
||||||
|
@@ -1021,6 +1021,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
input_streams[source_index]->discard = 0;
|
input_streams[source_index]->discard = 0;
|
||||||
input_streams[source_index]->st->discard = AVDISCARD_NONE;
|
input_streams[source_index]->st->discard = AVDISCARD_NONE;
|
||||||
}
|
}
|
||||||
|
ost->last_mux_dts = AV_NOPTS_VALUE;
|
||||||
|
|
||||||
return ost;
|
return ost;
|
||||||
}
|
}
|
||||||
@@ -1824,7 +1825,8 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
|||||||
for (j = 0; j < nb_input_files; j++) {
|
for (j = 0; j < nb_input_files; j++) {
|
||||||
for (i = 0; i < input_files[j]->nb_streams; i++) {
|
for (i = 0; i < input_files[j]->nb_streams; i++) {
|
||||||
AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
|
AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
|
||||||
if (c->codec_type != AVMEDIA_TYPE_VIDEO)
|
if (c->codec_type != AVMEDIA_TYPE_VIDEO ||
|
||||||
|
!c->time_base.num)
|
||||||
continue;
|
continue;
|
||||||
fr = c->time_base.den * 1000 / c->time_base.num;
|
fr = c->time_base.den * 1000 / c->time_base.num;
|
||||||
if (fr == 25000) {
|
if (fr == 25000) {
|
||||||
@@ -1938,6 +1940,10 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
|||||||
av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
|
av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
av_dict_copy(&o->g->codec_opts, codec_opts, 0);
|
||||||
|
av_dict_copy(&o->g->format_opts, format_opts, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -44,7 +44,7 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
|||||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
int line = 0, ret;
|
int line, ret;
|
||||||
const int width = avctx->width;
|
const int width = avctx->width;
|
||||||
AVFrame *pic = avctx->coded_frame;
|
AVFrame *pic = avctx->coded_frame;
|
||||||
uint16_t *y, *u, *v;
|
uint16_t *y, *u, *v;
|
||||||
@@ -54,8 +54,8 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (pic->data[0])
|
if (pic->data[0])
|
||||||
avctx->release_buffer(avctx, pic);
|
avctx->release_buffer(avctx, pic);
|
||||||
|
|
||||||
if (width == 1) {
|
if (width <= 1 || avctx->height <= 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
|
av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
if (avpkt->size < avctx->height * stride) {
|
if (avpkt->size < avctx->height * stride) {
|
||||||
@@ -68,45 +68,45 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if ((ret = ff_get_buffer(avctx, pic)) < 0)
|
if ((ret = ff_get_buffer(avctx, pic)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
y = (uint16_t *)pic->data[0];
|
|
||||||
u = (uint16_t *)pic->data[1];
|
|
||||||
v = (uint16_t *)pic->data[2];
|
|
||||||
line_end = avpkt->data + stride;
|
line_end = avpkt->data + stride;
|
||||||
|
for (line = 0; line < avctx->height; line++) {
|
||||||
|
uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
|
||||||
|
uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||||
|
uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||||
|
int x;
|
||||||
|
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||||
|
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||||
|
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||||
|
|
||||||
while (line++ < avctx->height) {
|
for (x = 0; x < width; x += 6) {
|
||||||
while (1) {
|
uint32_t t;
|
||||||
uint32_t t = AV_RL32(src);
|
|
||||||
|
if (width - x < 6 || line_end - src < 16) {
|
||||||
|
y = y_temp;
|
||||||
|
u = u_temp;
|
||||||
|
v = v_temp;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (line_end - src < 4)
|
||||||
|
break;
|
||||||
|
|
||||||
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
*u++ = t << 6 & 0xFFC0;
|
*u++ = t << 6 & 0xFFC0;
|
||||||
*y++ = t >> 4 & 0xFFC0;
|
*y++ = t >> 4 & 0xFFC0;
|
||||||
*v++ = t >> 14 & 0xFFC0;
|
*v++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (src >= line_end - 1) {
|
if (line_end - src < 4)
|
||||||
*y = 0x80;
|
|
||||||
src++;
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
*y++ = t << 6 & 0xFFC0;
|
*y++ = t << 6 & 0xFFC0;
|
||||||
*u++ = t >> 4 & 0xFFC0;
|
*u++ = t >> 4 & 0xFFC0;
|
||||||
*y++ = t >> 14 & 0xFFC0;
|
*y++ = t >> 14 & 0xFFC0;
|
||||||
if (src >= line_end - 2) {
|
|
||||||
if (!(width & 1)) {
|
if (line_end - src < 4)
|
||||||
*y = 0x80;
|
|
||||||
src += 2;
|
|
||||||
}
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
@@ -114,15 +114,8 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
*y++ = t >> 4 & 0xFFC0;
|
*y++ = t >> 4 & 0xFFC0;
|
||||||
*u++ = t >> 14 & 0xFFC0;
|
*u++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (src >= line_end - 1) {
|
if (line_end - src < 4)
|
||||||
*y = 0x80;
|
|
||||||
src++;
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
@@ -130,18 +123,21 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
*v++ = t >> 4 & 0xFFC0;
|
*v++ = t >> 4 & 0xFFC0;
|
||||||
*y++ = t >> 14 & 0xFFC0;
|
*y++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (src >= line_end - 2) {
|
if (width - x < 6)
|
||||||
if (width & 1) {
|
|
||||||
*y = 0x80;
|
|
||||||
src += 2;
|
|
||||||
}
|
|
||||||
line_end += stride;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (x < width) {
|
||||||
|
y = x + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||||
|
u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||||
|
v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||||
|
memcpy(y, y_temp, sizeof(*y) * (width - x));
|
||||||
|
memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
|
||||||
|
memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
line_end += stride;
|
||||||
|
src = line_end - stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
@@ -57,9 +57,13 @@ static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
|
|||||||
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
||||||
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
||||||
if(x < width && y < height) {
|
if(x < width && y < height) {
|
||||||
/* build average over 2 pixels */
|
if (x + 1 < width) {
|
||||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
/* build average over 2 pixels */
|
||||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||||
|
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||||
|
} else {
|
||||||
|
luma = src[(x + y * p->linesize[0])];
|
||||||
|
}
|
||||||
/* write blocks as linear data now so they are suitable for elbg */
|
/* write blocks as linear data now so they are suitable for elbg */
|
||||||
dest[0] = luma;
|
dest[0] = luma;
|
||||||
}
|
}
|
||||||
|
@@ -34,7 +34,7 @@ static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
|||||||
int size;
|
int size;
|
||||||
union {
|
union {
|
||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
uint8_t u8[8];
|
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||||
} tmp;
|
} tmp;
|
||||||
|
|
||||||
tmp.u64 = av_be2ne64(state);
|
tmp.u64 = av_be2ne64(state);
|
||||||
|
@@ -166,7 +166,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
|
|||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
AACEncContext *s = avctx->priv_data;
|
AACEncContext *s = avctx->priv_data;
|
||||||
|
|
||||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
|
init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
|
||||||
put_bits(&pb, 5, 2); //object type - AAC-LC
|
put_bits(&pb, 5, 2); //object type - AAC-LC
|
||||||
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
||||||
put_bits(&pb, 4, s->channels);
|
put_bits(&pb, 4, s->channels);
|
||||||
|
@@ -147,7 +147,7 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
|||||||
int err;
|
int err;
|
||||||
union {
|
union {
|
||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
uint8_t u8[8];
|
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||||
} tmp = { av_be2ne64(state) };
|
} tmp = { av_be2ne64(state) };
|
||||||
AC3HeaderInfo hdr;
|
AC3HeaderInfo hdr;
|
||||||
GetBitContext gbc;
|
GetBitContext gbc;
|
||||||
|
@@ -259,7 +259,7 @@ static void apply_channel_coupling(AC3EncodeContext *s)
|
|||||||
energy_cpl = energy[blk][CPL_CH][bnd];
|
energy_cpl = energy[blk][CPL_CH][bnd];
|
||||||
energy_ch = energy[blk][ch][bnd];
|
energy_ch = energy[blk][ch][bnd];
|
||||||
blk1 = blk+1;
|
blk1 = blk+1;
|
||||||
while (!s->blocks[blk1].new_cpl_coords[ch] && blk1 < s->num_blocks) {
|
while (blk1 < s->num_blocks && !s->blocks[blk1].new_cpl_coords[ch]) {
|
||||||
if (s->blocks[blk1].cpl_in_use) {
|
if (s->blocks[blk1].cpl_in_use) {
|
||||||
energy_cpl += energy[blk1][CPL_CH][bnd];
|
energy_cpl += energy[blk1][CPL_CH][bnd];
|
||||||
energy_ch += energy[blk1][ch][bnd];
|
energy_ch += energy[blk1][ch][bnd];
|
||||||
|
@@ -1387,7 +1387,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
static const enum AVSampleFormat sample_fmts_s16[] = { AV_SAMPLE_FMT_S16,
|
static const enum AVSampleFormat sample_fmts_s16[] = { AV_SAMPLE_FMT_S16,
|
||||||
AV_SAMPLE_FMT_NONE };
|
AV_SAMPLE_FMT_NONE };
|
||||||
static const enum AVSampleFormat sample_fmts_s16p[] = { AV_SAMPLE_FMT_S16,
|
static const enum AVSampleFormat sample_fmts_s16p[] = { AV_SAMPLE_FMT_S16P,
|
||||||
AV_SAMPLE_FMT_NONE };
|
AV_SAMPLE_FMT_NONE };
|
||||||
static const enum AVSampleFormat sample_fmts_both[] = { AV_SAMPLE_FMT_S16,
|
static const enum AVSampleFormat sample_fmts_both[] = { AV_SAMPLE_FMT_S16,
|
||||||
AV_SAMPLE_FMT_S16P,
|
AV_SAMPLE_FMT_S16P,
|
||||||
|
@@ -550,7 +550,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
case AV_CODEC_ID_ADPCM_IMA_QT:
|
case AV_CODEC_ID_ADPCM_IMA_QT:
|
||||||
{
|
{
|
||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
init_put_bits(&pb, dst, pkt_size * 8);
|
init_put_bits(&pb, dst, pkt_size);
|
||||||
|
|
||||||
for (ch = 0; ch < avctx->channels; ch++) {
|
for (ch = 0; ch < avctx->channels; ch++) {
|
||||||
ADPCMChannelStatus *status = &c->status[ch];
|
ADPCMChannelStatus *status = &c->status[ch];
|
||||||
@@ -558,10 +558,11 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
put_bits(&pb, 7, status->step_index);
|
put_bits(&pb, 7, status->step_index);
|
||||||
if (avctx->trellis > 0) {
|
if (avctx->trellis > 0) {
|
||||||
uint8_t buf[64];
|
uint8_t buf[64];
|
||||||
adpcm_compress_trellis(avctx, &samples_p[ch][1], buf, status,
|
adpcm_compress_trellis(avctx, &samples_p[ch][0], buf, status,
|
||||||
64, 1);
|
64, 1);
|
||||||
for (i = 0; i < 64; i++)
|
for (i = 0; i < 64; i++)
|
||||||
put_bits(&pb, 4, buf[i ^ 1]);
|
put_bits(&pb, 4, buf[i ^ 1]);
|
||||||
|
status->prev_sample = status->predictor;
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < 64; i += 2) {
|
for (i = 0; i < 64; i += 2) {
|
||||||
int t1, t2;
|
int t1, t2;
|
||||||
@@ -579,7 +580,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
case AV_CODEC_ID_ADPCM_SWF:
|
case AV_CODEC_ID_ADPCM_SWF:
|
||||||
{
|
{
|
||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
init_put_bits(&pb, dst, pkt_size * 8);
|
init_put_bits(&pb, dst, pkt_size);
|
||||||
|
|
||||||
n = frame->nb_samples - 1;
|
n = frame->nb_samples - 1;
|
||||||
|
|
||||||
|
@@ -47,13 +47,8 @@ int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
offset = AV_RB16(buf + 2) + 4;
|
offset = AV_RB16(buf + 2) + 4;
|
||||||
|
|
||||||
if (offset < 6) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "offset is prior data\n");
|
|
||||||
return AVERROR_INVALIDDATA;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if copyright string is within the provided data, validate it */
|
/* if copyright string is within the provided data, validate it */
|
||||||
if (bufsize >= offset && memcmp(buf + offset - 6, "(c)CRI", 6))
|
if (bufsize >= offset && offset >= 6 && memcmp(buf + offset - 6, "(c)CRI", 6))
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
/* check for encoding=3 block_size=18, sample_size=4 */
|
/* check for encoding=3 block_size=18, sample_size=4 */
|
||||||
|
@@ -67,7 +67,7 @@ typedef struct AlacEncodeContext {
|
|||||||
int write_sample_size;
|
int write_sample_size;
|
||||||
int extra_bits;
|
int extra_bits;
|
||||||
int32_t sample_buf[2][DEFAULT_FRAME_SIZE];
|
int32_t sample_buf[2][DEFAULT_FRAME_SIZE];
|
||||||
int32_t predictor_buf[DEFAULT_FRAME_SIZE];
|
int32_t predictor_buf[2][DEFAULT_FRAME_SIZE];
|
||||||
int interlacing_shift;
|
int interlacing_shift;
|
||||||
int interlacing_leftweight;
|
int interlacing_leftweight;
|
||||||
PutBitContext pbctx;
|
PutBitContext pbctx;
|
||||||
@@ -254,13 +254,14 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
AlacLPCContext lpc = s->lpc[ch];
|
AlacLPCContext lpc = s->lpc[ch];
|
||||||
|
int32_t *residual = s->predictor_buf[ch];
|
||||||
|
|
||||||
if (lpc.lpc_order == 31) {
|
if (lpc.lpc_order == 31) {
|
||||||
s->predictor_buf[0] = s->sample_buf[ch][0];
|
residual[0] = s->sample_buf[ch][0];
|
||||||
|
|
||||||
for (i = 1; i < s->frame_size; i++) {
|
for (i = 1; i < s->frame_size; i++) {
|
||||||
s->predictor_buf[i] = s->sample_buf[ch][i ] -
|
residual[i] = s->sample_buf[ch][i ] -
|
||||||
s->sample_buf[ch][i - 1];
|
s->sample_buf[ch][i - 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@@ -270,7 +271,6 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
|
|||||||
|
|
||||||
if (lpc.lpc_order > 0) {
|
if (lpc.lpc_order > 0) {
|
||||||
int32_t *samples = s->sample_buf[ch];
|
int32_t *samples = s->sample_buf[ch];
|
||||||
int32_t *residual = s->predictor_buf;
|
|
||||||
|
|
||||||
// generate warm-up samples
|
// generate warm-up samples
|
||||||
residual[0] = samples[0];
|
residual[0] = samples[0];
|
||||||
@@ -314,11 +314,11 @@ static void alac_linear_predictor(AlacEncodeContext *s, int ch)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void alac_entropy_coder(AlacEncodeContext *s)
|
static void alac_entropy_coder(AlacEncodeContext *s, int ch)
|
||||||
{
|
{
|
||||||
unsigned int history = s->rc.initial_history;
|
unsigned int history = s->rc.initial_history;
|
||||||
int sign_modifier = 0, i, k;
|
int sign_modifier = 0, i, k;
|
||||||
int32_t *samples = s->predictor_buf;
|
int32_t *samples = s->predictor_buf[ch];
|
||||||
|
|
||||||
for (i = 0; i < s->frame_size;) {
|
for (i = 0; i < s->frame_size;) {
|
||||||
int x;
|
int x;
|
||||||
@@ -395,6 +395,19 @@ static void write_element(AlacEncodeContext *s,
|
|||||||
init_sample_buffers(s, channels, samples);
|
init_sample_buffers(s, channels, samples);
|
||||||
write_element_header(s, element, instance);
|
write_element_header(s, element, instance);
|
||||||
|
|
||||||
|
// extract extra bits if needed
|
||||||
|
if (s->extra_bits) {
|
||||||
|
uint32_t mask = (1 << s->extra_bits) - 1;
|
||||||
|
for (j = 0; j < channels; j++) {
|
||||||
|
int32_t *extra = s->predictor_buf[j];
|
||||||
|
int32_t *smp = s->sample_buf[j];
|
||||||
|
for (i = 0; i < s->frame_size; i++) {
|
||||||
|
extra[i] = smp[i] & mask;
|
||||||
|
smp[i] >>= s->extra_bits;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (channels == 2)
|
if (channels == 2)
|
||||||
alac_stereo_decorrelation(s);
|
alac_stereo_decorrelation(s);
|
||||||
else
|
else
|
||||||
@@ -420,8 +433,7 @@ static void write_element(AlacEncodeContext *s,
|
|||||||
uint32_t mask = (1 << s->extra_bits) - 1;
|
uint32_t mask = (1 << s->extra_bits) - 1;
|
||||||
for (i = 0; i < s->frame_size; i++) {
|
for (i = 0; i < s->frame_size; i++) {
|
||||||
for (j = 0; j < channels; j++) {
|
for (j = 0; j < channels; j++) {
|
||||||
put_bits(pb, s->extra_bits, s->sample_buf[j][i] & mask);
|
put_bits(pb, s->extra_bits, s->predictor_buf[j][i] & mask);
|
||||||
s->sample_buf[j][i] >>= s->extra_bits;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -433,10 +445,11 @@ static void write_element(AlacEncodeContext *s,
|
|||||||
// TODO: determine when this will actually help. for now it's not used.
|
// TODO: determine when this will actually help. for now it's not used.
|
||||||
if (prediction_type == 15) {
|
if (prediction_type == 15) {
|
||||||
// 2nd pass 1st order filter
|
// 2nd pass 1st order filter
|
||||||
|
int32_t *residual = s->predictor_buf[channels];
|
||||||
for (j = s->frame_size - 1; j > 0; j--)
|
for (j = s->frame_size - 1; j > 0; j--)
|
||||||
s->predictor_buf[j] -= s->predictor_buf[j - 1];
|
residual[j] -= residual[j - 1];
|
||||||
}
|
}
|
||||||
alac_entropy_coder(s);
|
alac_entropy_coder(s, i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -285,7 +285,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
|||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
uint64_t ht_size;
|
uint64_t ht_size;
|
||||||
int i, config_offset;
|
int i, config_offset;
|
||||||
MPEG4AudioConfig m4ac;
|
MPEG4AudioConfig m4ac = {0};
|
||||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||||
AVCodecContext *avctx = ctx->avctx;
|
AVCodecContext *avctx = ctx->avctx;
|
||||||
uint32_t als_id, header_size, trailer_size;
|
uint32_t als_id, header_size, trailer_size;
|
||||||
|
@@ -132,12 +132,11 @@ function ff_put_pixels8_y2_armv6, export=1
|
|||||||
uhadd8 r9, r5, r7
|
uhadd8 r9, r5, r7
|
||||||
eor r11, r5, r7
|
eor r11, r5, r7
|
||||||
and r10, r10, r12
|
and r10, r10, r12
|
||||||
ldrc_pre ne, r4, r1, r2
|
ldr_pre r4, r1, r2
|
||||||
uadd8 r8, r8, r10
|
uadd8 r8, r8, r10
|
||||||
and r11, r11, r12
|
and r11, r11, r12
|
||||||
uadd8 r9, r9, r11
|
uadd8 r9, r9, r11
|
||||||
it ne
|
ldr r5, [r1, #4]
|
||||||
ldrne r5, [r1, #4]
|
|
||||||
uhadd8 r10, r4, r6
|
uhadd8 r10, r4, r6
|
||||||
eor r6, r4, r6
|
eor r6, r4, r6
|
||||||
uhadd8 r11, r5, r7
|
uhadd8 r11, r5, r7
|
||||||
@@ -194,10 +193,9 @@ function ff_put_pixels8_y2_no_rnd_armv6, export=1
|
|||||||
1:
|
1:
|
||||||
subs r3, r3, #2
|
subs r3, r3, #2
|
||||||
uhadd8 r8, r4, r6
|
uhadd8 r8, r4, r6
|
||||||
ldrc_pre ne, r4, r1, r2
|
ldr_pre r4, r1, r2
|
||||||
uhadd8 r9, r5, r7
|
uhadd8 r9, r5, r7
|
||||||
it ne
|
ldr r5, [r1, #4]
|
||||||
ldrne r5, [r1, #4]
|
|
||||||
uhadd8 r12, r4, r6
|
uhadd8 r12, r4, r6
|
||||||
ldrc_pre ne, r6, r1, r2
|
ldrc_pre ne, r6, r1, r2
|
||||||
uhadd8 r14, r5, r7
|
uhadd8 r14, r5, r7
|
||||||
|
@@ -91,7 +91,7 @@ static void ff_h264dsp_init_neon(H264DSPContext *c, const int bit_depth, const i
|
|||||||
c->h264_idct_dc_add = ff_h264_idct_dc_add_neon;
|
c->h264_idct_dc_add = ff_h264_idct_dc_add_neon;
|
||||||
c->h264_idct_add16 = ff_h264_idct_add16_neon;
|
c->h264_idct_add16 = ff_h264_idct_add16_neon;
|
||||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_neon;
|
c->h264_idct_add16intra = ff_h264_idct_add16intra_neon;
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
c->h264_idct_add8 = ff_h264_idct_add8_neon;
|
c->h264_idct_add8 = ff_h264_idct_add8_neon;
|
||||||
c->h264_idct8_add = ff_h264_idct8_add_neon;
|
c->h264_idct8_add = ff_h264_idct8_add_neon;
|
||||||
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_neon;
|
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_neon;
|
||||||
|
@@ -325,6 +325,32 @@ static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
|
|||||||
return p->eof;
|
return p->eof;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static av_always_inline unsigned int bytestream2_copy_bufferu(PutByteContext *p,
|
||||||
|
GetByteContext *g,
|
||||||
|
unsigned int size)
|
||||||
|
{
|
||||||
|
memcpy(p->buffer, g->buffer, size);
|
||||||
|
p->buffer += size;
|
||||||
|
g->buffer += size;
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static av_always_inline unsigned int bytestream2_copy_buffer(PutByteContext *p,
|
||||||
|
GetByteContext *g,
|
||||||
|
unsigned int size)
|
||||||
|
{
|
||||||
|
int size2;
|
||||||
|
|
||||||
|
if (p->eof)
|
||||||
|
return 0;
|
||||||
|
size = FFMIN(g->buffer_end - g->buffer, size);
|
||||||
|
size2 = FFMIN(p->buffer_end - p->buffer, size);
|
||||||
|
if (size2 != size)
|
||||||
|
p->eof = 1;
|
||||||
|
|
||||||
|
return bytestream2_copy_bufferu(p, g, size2);
|
||||||
|
}
|
||||||
|
|
||||||
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b,
|
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b,
|
||||||
uint8_t *dst,
|
uint8_t *dst,
|
||||||
unsigned int size)
|
unsigned int size)
|
||||||
|
@@ -269,7 +269,7 @@ static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data,
|
|||||||
static int cdg_decode_frame(AVCodecContext *avctx,
|
static int cdg_decode_frame(AVCodecContext *avctx,
|
||||||
void *data, int *got_frame, AVPacket *avpkt)
|
void *data, int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
GetByteContext gb;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int ret;
|
int ret;
|
||||||
uint8_t command, inst;
|
uint8_t command, inst;
|
||||||
@@ -286,19 +286,19 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
|||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bytestream2_init(&gb, avpkt->data, avpkt->size);
|
||||||
|
|
||||||
ret = avctx->reget_buffer(avctx, &cc->frame);
|
ret = avctx->reget_buffer(avctx, &cc->frame);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
command = bytestream_get_byte(&buf);
|
command = bytestream2_get_byte(&gb);
|
||||||
inst = bytestream_get_byte(&buf);
|
inst = bytestream2_get_byte(&gb);
|
||||||
inst &= CDG_MASK;
|
inst &= CDG_MASK;
|
||||||
buf += 2; /// skipping 2 unneeded bytes
|
bytestream2_skip(&gb, 2);
|
||||||
|
bytestream2_get_buffer(&gb, cdg_data, sizeof(cdg_data));
|
||||||
if (buf_size > CDG_HEADER_SIZE)
|
|
||||||
bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE);
|
|
||||||
|
|
||||||
if ((command & CDG_MASK) == CDG_COMMAND) {
|
if ((command & CDG_MASK) == CDG_COMMAND) {
|
||||||
switch (inst) {
|
switch (inst) {
|
||||||
@@ -357,11 +357,10 @@ static int cdg_decode_frame(AVCodecContext *avctx,
|
|||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
} else {
|
} else {
|
||||||
*got_frame = 0;
|
*got_frame = 0;
|
||||||
buf_size = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*(AVFrame *) data = cc->frame;
|
*(AVFrame *) data = cc->frame;
|
||||||
return buf_size;
|
return avpkt->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int cdg_decode_end(AVCodecContext *avctx)
|
static av_cold int cdg_decode_end(AVCodecContext *avctx)
|
||||||
|
@@ -28,6 +28,7 @@
|
|||||||
#ifndef AVCODEC_DIRAC_ARITH_H
|
#ifndef AVCODEC_DIRAC_ARITH_H
|
||||||
#define AVCODEC_DIRAC_ARITH_H
|
#define AVCODEC_DIRAC_ARITH_H
|
||||||
|
|
||||||
|
#include "libavutil/x86/asm.h"
|
||||||
#include "bytestream.h"
|
#include "bytestream.h"
|
||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
|
|
||||||
@@ -134,7 +135,7 @@ static inline int dirac_get_arith_bit(DiracArith *c, int ctx)
|
|||||||
|
|
||||||
range_times_prob = (c->range * prob_zero) >> 16;
|
range_times_prob = (c->range * prob_zero) >> 16;
|
||||||
|
|
||||||
#if HAVE_FAST_CMOV && HAVE_INLINE_ASM
|
#if HAVE_FAST_CMOV && HAVE_INLINE_ASM && HAVE_6REGS
|
||||||
low -= range_times_prob << 16;
|
low -= range_times_prob << 16;
|
||||||
range -= range_times_prob;
|
range -= range_times_prob;
|
||||||
bit = 0;
|
bit = 0;
|
||||||
|
@@ -200,6 +200,7 @@ typedef struct DiracContext {
|
|||||||
|
|
||||||
uint16_t *mctmp; /* buffer holding the MC data multipled by OBMC weights */
|
uint16_t *mctmp; /* buffer holding the MC data multipled by OBMC weights */
|
||||||
uint8_t *mcscratch;
|
uint8_t *mcscratch;
|
||||||
|
int buffer_stride;
|
||||||
|
|
||||||
DECLARE_ALIGNED(16, uint8_t, obmc_weight)[3][MAX_BLOCKSIZE*MAX_BLOCKSIZE];
|
DECLARE_ALIGNED(16, uint8_t, obmc_weight)[3][MAX_BLOCKSIZE*MAX_BLOCKSIZE];
|
||||||
|
|
||||||
@@ -342,22 +343,44 @@ static int alloc_sequence_buffers(DiracContext *s)
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
w = s->source.width;
|
|
||||||
h = s->source.height;
|
|
||||||
|
|
||||||
/* fixme: allocate using real stride here */
|
/* fixme: allocate using real stride here */
|
||||||
s->sbsplit = av_malloc(sbwidth * sbheight);
|
s->sbsplit = av_malloc_array(sbwidth, sbheight);
|
||||||
s->blmotion = av_malloc(sbwidth * sbheight * 16 * sizeof(*s->blmotion));
|
s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
|
||||||
s->edge_emu_buffer_base = av_malloc((w+64)*MAX_BLOCKSIZE);
|
|
||||||
|
|
||||||
s->mctmp = av_malloc((w+64+MAX_BLOCKSIZE) * (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
|
if (!s->sbsplit || !s->blmotion)
|
||||||
s->mcscratch = av_malloc((w+64)*MAX_BLOCKSIZE);
|
|
||||||
|
|
||||||
if (!s->sbsplit || !s->blmotion || !s->mctmp || !s->mcscratch)
|
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int alloc_buffers(DiracContext *s, int stride)
|
||||||
|
{
|
||||||
|
int w = s->source.width;
|
||||||
|
int h = s->source.height;
|
||||||
|
|
||||||
|
av_assert0(stride >= w);
|
||||||
|
stride += 64;
|
||||||
|
|
||||||
|
if (s->buffer_stride >= stride)
|
||||||
|
return 0;
|
||||||
|
s->buffer_stride = 0;
|
||||||
|
|
||||||
|
av_freep(&s->edge_emu_buffer_base);
|
||||||
|
memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
|
||||||
|
av_freep(&s->mctmp);
|
||||||
|
av_freep(&s->mcscratch);
|
||||||
|
|
||||||
|
s->edge_emu_buffer_base = av_malloc_array(stride, MAX_BLOCKSIZE);
|
||||||
|
|
||||||
|
s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
|
||||||
|
s->mcscratch = av_malloc_array(stride, MAX_BLOCKSIZE);
|
||||||
|
|
||||||
|
if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
|
s->buffer_stride = stride;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void free_sequence_buffers(DiracContext *s)
|
static void free_sequence_buffers(DiracContext *s)
|
||||||
{
|
{
|
||||||
int i, j, k;
|
int i, j, k;
|
||||||
@@ -381,6 +404,7 @@ static void free_sequence_buffers(DiracContext *s)
|
|||||||
av_freep(&s->plane[i].idwt_tmp);
|
av_freep(&s->plane[i].idwt_tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s->buffer_stride = 0;
|
||||||
av_freep(&s->sbsplit);
|
av_freep(&s->sbsplit);
|
||||||
av_freep(&s->blmotion);
|
av_freep(&s->blmotion);
|
||||||
av_freep(&s->edge_emu_buffer_base);
|
av_freep(&s->edge_emu_buffer_base);
|
||||||
@@ -1342,8 +1366,8 @@ static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5],
|
|||||||
motion_y >>= s->chroma_y_shift;
|
motion_y >>= s->chroma_y_shift;
|
||||||
}
|
}
|
||||||
|
|
||||||
mx = motion_x & ~(-1 << s->mv_precision);
|
mx = motion_x & ~(-1U << s->mv_precision);
|
||||||
my = motion_y & ~(-1 << s->mv_precision);
|
my = motion_y & ~(-1U << s->mv_precision);
|
||||||
motion_x >>= s->mv_precision;
|
motion_x >>= s->mv_precision;
|
||||||
motion_y >>= s->mv_precision;
|
motion_y >>= s->mv_precision;
|
||||||
/* normalize subpel coordinates to epel */
|
/* normalize subpel coordinates to epel */
|
||||||
@@ -1817,6 +1841,9 @@ static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int
|
|||||||
s->plane[1].stride = pic->avframe.linesize[1];
|
s->plane[1].stride = pic->avframe.linesize[1];
|
||||||
s->plane[2].stride = pic->avframe.linesize[2];
|
s->plane[2].stride = pic->avframe.linesize[2];
|
||||||
|
|
||||||
|
if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
/* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
|
/* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
|
||||||
if (dirac_decode_picture_header(s))
|
if (dirac_decode_picture_header(s))
|
||||||
return -1;
|
return -1;
|
||||||
|
@@ -39,6 +39,7 @@ typedef struct DNXHDContext {
|
|||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int64_t cid; ///< compression id
|
int64_t cid; ///< compression id
|
||||||
unsigned int width, height;
|
unsigned int width, height;
|
||||||
|
enum AVPixelFormat pix_fmt;
|
||||||
unsigned int mb_width, mb_height;
|
unsigned int mb_width, mb_height;
|
||||||
uint32_t mb_scan_index[68]; /* max for 1080p */
|
uint32_t mb_scan_index[68]; /* max for 1080p */
|
||||||
int cur_field; ///< current interlaced field
|
int cur_field; ///< current interlaced field
|
||||||
@@ -135,7 +136,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si
|
|||||||
av_dlog(ctx->avctx, "width %d, height %d\n", ctx->width, ctx->height);
|
av_dlog(ctx->avctx, "width %d, height %d\n", ctx->width, ctx->height);
|
||||||
|
|
||||||
if (buf[0x21] & 0x40) {
|
if (buf[0x21] & 0x40) {
|
||||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
ctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
||||||
ctx->avctx->bits_per_raw_sample = 10;
|
ctx->avctx->bits_per_raw_sample = 10;
|
||||||
if (ctx->bit_depth != 10) {
|
if (ctx->bit_depth != 10) {
|
||||||
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
||||||
@@ -143,7 +144,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si
|
|||||||
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
|
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ctx->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
ctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||||
ctx->avctx->bits_per_raw_sample = 8;
|
ctx->avctx->bits_per_raw_sample = 8;
|
||||||
if (ctx->bit_depth != 8) {
|
if (ctx->bit_depth != 8) {
|
||||||
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
ff_dsputil_init(&ctx->dsp, ctx->avctx);
|
||||||
@@ -381,9 +382,15 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
avctx->width, avctx->height, ctx->width, ctx->height);
|
avctx->width, avctx->height, ctx->width, ctx->height);
|
||||||
first_field = 1;
|
first_field = 1;
|
||||||
}
|
}
|
||||||
|
if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) {
|
||||||
|
av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n",
|
||||||
|
av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt));
|
||||||
|
first_field = 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (av_image_check_size(ctx->width, ctx->height, 0, avctx))
|
if (av_image_check_size(ctx->width, ctx->height, 0, avctx))
|
||||||
return -1;
|
return -1;
|
||||||
|
avctx->pix_fmt = ctx->pix_fmt;
|
||||||
avcodec_set_dimensions(avctx, ctx->width, ctx->height);
|
avcodec_set_dimensions(avctx, ctx->width, ctx->height);
|
||||||
|
|
||||||
if (first_field) {
|
if (first_field) {
|
||||||
|
@@ -45,8 +45,11 @@ static int dvdsub_parse(AVCodecParserContext *s,
|
|||||||
DVDSubParseContext *pc = s->priv_data;
|
DVDSubParseContext *pc = s->priv_data;
|
||||||
|
|
||||||
if (pc->packet_index == 0) {
|
if (pc->packet_index == 0) {
|
||||||
if (buf_size < 2)
|
if (buf_size < 2 || AV_RB16(buf) && buf_size < 6) {
|
||||||
return 0;
|
if (buf_size)
|
||||||
|
av_log(avctx, AV_LOG_DEBUG, "Parser input %d too small\n", buf_size);
|
||||||
|
return buf_size;
|
||||||
|
}
|
||||||
pc->packet_len = AV_RB16(buf);
|
pc->packet_len = AV_RB16(buf);
|
||||||
if (pc->packet_len == 0) /* HD-DVD subpicture packet */
|
if (pc->packet_len == 0) /* HD-DVD subpicture packet */
|
||||||
pc->packet_len = AV_RB32(buf+2);
|
pc->packet_len = AV_RB32(buf+2);
|
||||||
|
@@ -98,6 +98,12 @@ static int decode_rle(uint8_t *bitmap, int linesize, int w, int h,
|
|||||||
int x, y, len, color;
|
int x, y, len, color;
|
||||||
uint8_t *d;
|
uint8_t *d;
|
||||||
|
|
||||||
|
if (start >= buf_size)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
if (w <= 0 || h <= 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
bit_len = (buf_size - start) * 8;
|
bit_len = (buf_size - start) * 8;
|
||||||
init_get_bits(&gb, buf + start, bit_len);
|
init_get_bits(&gb, buf + start, bit_len);
|
||||||
|
|
||||||
@@ -339,10 +345,12 @@ static int decode_dvd_subtitles(DVDSubContext *ctx, AVSubtitle *sub_header,
|
|||||||
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect));
|
||||||
sub_header->num_rects = 1;
|
sub_header->num_rects = 1;
|
||||||
sub_header->rects[0]->pict.data[0] = bitmap;
|
sub_header->rects[0]->pict.data[0] = bitmap;
|
||||||
decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
if (decode_rle(bitmap, w * 2, w, (h + 1) / 2,
|
||||||
buf, offset1, buf_size, is_8bit);
|
buf, offset1, buf_size, is_8bit) < 0)
|
||||||
decode_rle(bitmap + w, w * 2, w, h / 2,
|
goto fail;
|
||||||
buf, offset2, buf_size, is_8bit);
|
if (decode_rle(bitmap + w, w * 2, w, h / 2,
|
||||||
|
buf, offset2, buf_size, is_8bit) < 0)
|
||||||
|
goto fail;
|
||||||
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
sub_header->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
|
||||||
if (is_8bit) {
|
if (is_8bit) {
|
||||||
if (yuv_palette == 0)
|
if (yuv_palette == 0)
|
||||||
|
@@ -304,6 +304,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||||
|
|
||||||
|
if (avctx->width%4 || avctx->height%4) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "dimensions are not a multiple of 4");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&c->pic);
|
avcodec_get_frame_defaults(&c->pic);
|
||||||
avcodec_get_frame_defaults(&c->prev);
|
avcodec_get_frame_defaults(&c->prev);
|
||||||
|
|
||||||
|
@@ -29,6 +29,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
|
#include "bytestream.h"
|
||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
#include "dsputil.h"
|
#include "dsputil.h"
|
||||||
#include "aandcttab.h"
|
#include "aandcttab.h"
|
||||||
@@ -139,6 +140,11 @@ static inline int decode_block_intra(MadContext *s, DCTELEM * block)
|
|||||||
break;
|
break;
|
||||||
} else if (level != 0) {
|
} else if (level != 0) {
|
||||||
i += run;
|
i += run;
|
||||||
|
if (i > 63) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
level = (level*quant_matrix[j]) >> 4;
|
level = (level*quant_matrix[j]) >> 4;
|
||||||
level = (level-1)|1;
|
level = (level-1)|1;
|
||||||
@@ -153,6 +159,11 @@ static inline int decode_block_intra(MadContext *s, DCTELEM * block)
|
|||||||
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
|
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6);
|
||||||
|
|
||||||
i += run;
|
i += run;
|
||||||
|
if (i > 63) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
if (level < 0) {
|
if (level < 0) {
|
||||||
level = -level;
|
level = -level;
|
||||||
@@ -164,10 +175,6 @@ static inline int decode_block_intra(MadContext *s, DCTELEM * block)
|
|||||||
level = (level-1)|1;
|
level = (level-1)|1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (i > 63) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
block[j] = level;
|
block[j] = level;
|
||||||
}
|
}
|
||||||
@@ -232,32 +239,34 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
{
|
{
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
const uint8_t *buf_end = buf+buf_size;
|
|
||||||
MadContext *s = avctx->priv_data;
|
MadContext *s = avctx->priv_data;
|
||||||
|
GetByteContext gb;
|
||||||
int width, height, ret;
|
int width, height, ret;
|
||||||
int chunk_type;
|
int chunk_type;
|
||||||
int inter;
|
int inter;
|
||||||
|
|
||||||
if (buf_size < 26) {
|
bytestream2_init(&gb, buf, buf_size);
|
||||||
av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n");
|
|
||||||
*got_frame = 0;
|
chunk_type = bytestream2_get_le32(&gb);
|
||||||
|
inter = (chunk_type == MADm_TAG || chunk_type == MADe_TAG);
|
||||||
|
bytestream2_skip(&gb, 10);
|
||||||
|
|
||||||
|
av_reduce(&avctx->time_base.num, &avctx->time_base.den,
|
||||||
|
bytestream2_get_le16(&gb), 1000, 1<<30);
|
||||||
|
|
||||||
|
width = bytestream2_get_le16(&gb);
|
||||||
|
height = bytestream2_get_le16(&gb);
|
||||||
|
bytestream2_skip(&gb, 1);
|
||||||
|
calc_quant_matrix(s, bytestream2_get_byte(&gb));
|
||||||
|
bytestream2_skip(&gb, 2);
|
||||||
|
|
||||||
|
if (bytestream2_get_bytes_left(&gb) < 2) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Input data too small\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk_type = AV_RL32(&buf[0]);
|
|
||||||
inter = (chunk_type == MADm_TAG || chunk_type == MADe_TAG);
|
|
||||||
buf += 8;
|
|
||||||
|
|
||||||
av_reduce(&avctx->time_base.num, &avctx->time_base.den,
|
|
||||||
AV_RL16(&buf[6]), 1000, 1<<30);
|
|
||||||
|
|
||||||
width = AV_RL16(&buf[8]);
|
|
||||||
height = AV_RL16(&buf[10]);
|
|
||||||
calc_quant_matrix(s, buf[13]);
|
|
||||||
buf += 16;
|
|
||||||
|
|
||||||
if (avctx->width != width || avctx->height != height) {
|
if (avctx->width != width || avctx->height != height) {
|
||||||
if((width * height)/2048*7 > buf_end-buf)
|
if((width * height)/2048*7 > bytestream2_get_bytes_left(&gb))
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
if ((ret = av_image_check_size(width, height, 0, avctx)) < 0)
|
if ((ret = av_image_check_size(width, height, 0, avctx)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
@@ -292,13 +301,13 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size,
|
av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size,
|
||||||
buf_end - buf);
|
bytestream2_get_bytes_left(&gb));
|
||||||
if (!s->bitstream_buf)
|
if (!s->bitstream_buf)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
s->dsp.bswap16_buf(s->bitstream_buf, (const uint16_t*)buf, (buf_end-buf)/2);
|
s->dsp.bswap16_buf(s->bitstream_buf, (const uint16_t *)(buf + bytestream2_tell(&gb)),
|
||||||
memset((uint8_t*)s->bitstream_buf + (buf_end-buf), 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
bytestream2_get_bytes_left(&gb) / 2);
|
||||||
init_get_bits(&s->gb, s->bitstream_buf, 8*(buf_end-buf));
|
memset((uint8_t*)s->bitstream_buf + bytestream2_get_bytes_left(&gb), 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
|
init_get_bits(&s->gb, s->bitstream_buf, 8*(bytestream2_get_bytes_left(&gb)));
|
||||||
for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++)
|
for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++)
|
||||||
for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++)
|
for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++)
|
||||||
if(decode_mb(s, inter) < 0)
|
if(decode_mb(s, inter) < 0)
|
||||||
|
@@ -924,8 +924,8 @@ void ff_er_frame_end(MpegEncContext *s)
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
if ( s->picture_structure == PICT_FRAME
|
if (s->picture_structure == PICT_FRAME &&
|
||||||
&& s->current_picture.f.linesize[0] != s->current_picture_ptr->f.linesize[0]) {
|
s->current_picture.f.linesize[0] != s->current_picture_ptr->f.linesize[0]) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Error concealment not possible, frame not fully initialized\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Error concealment not possible, frame not fully initialized\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@@ -247,7 +247,7 @@ static void put_line(uint8_t *dst, int size, int width, const int *runs)
|
|||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
int run, mode = ~0, pix_left = width, run_idx = 0;
|
int run, mode = ~0, pix_left = width, run_idx = 0;
|
||||||
|
|
||||||
init_put_bits(&pb, dst, size*8);
|
init_put_bits(&pb, dst, size);
|
||||||
while(pix_left > 0){
|
while(pix_left > 0){
|
||||||
run = runs[run_idx++];
|
run = runs[run_idx++];
|
||||||
mode = ~mode;
|
mode = ~mode;
|
||||||
|
@@ -112,6 +112,7 @@ static void fft_ref(FFTComplex *tabr, FFTComplex *tab, int nbits)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if CONFIG_MDCT
|
||||||
static void imdct_ref(FFTSample *out, FFTSample *in, int nbits)
|
static void imdct_ref(FFTSample *out, FFTSample *in, int nbits)
|
||||||
{
|
{
|
||||||
int n = 1<<nbits;
|
int n = 1<<nbits;
|
||||||
@@ -146,8 +147,10 @@ static void mdct_ref(FFTSample *output, FFTSample *input, int nbits)
|
|||||||
output[k] = REF_SCALE(s, nbits - 1);
|
output[k] = REF_SCALE(s, nbits - 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif /* CONFIG_MDCT */
|
||||||
|
|
||||||
#if CONFIG_FFT_FLOAT
|
#if CONFIG_FFT_FLOAT
|
||||||
|
#if CONFIG_DCT
|
||||||
static void idct_ref(FFTSample *output, FFTSample *input, int nbits)
|
static void idct_ref(FFTSample *output, FFTSample *input, int nbits)
|
||||||
{
|
{
|
||||||
int n = 1<<nbits;
|
int n = 1<<nbits;
|
||||||
@@ -180,6 +183,7 @@ static void dct_ref(FFTSample *output, FFTSample *input, int nbits)
|
|||||||
output[k] = s;
|
output[k] = s;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif /* CONFIG_DCT */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
@@ -305,6 +309,7 @@ int main(int argc, char **argv)
|
|||||||
tab2 = av_malloc(fft_size * sizeof(FFTSample));
|
tab2 = av_malloc(fft_size * sizeof(FFTSample));
|
||||||
|
|
||||||
switch (transform) {
|
switch (transform) {
|
||||||
|
#if CONFIG_MDCT
|
||||||
case TRANSFORM_MDCT:
|
case TRANSFORM_MDCT:
|
||||||
av_log(NULL, AV_LOG_INFO,"Scale factor is set to %f\n", scale);
|
av_log(NULL, AV_LOG_INFO,"Scale factor is set to %f\n", scale);
|
||||||
if (do_inverse)
|
if (do_inverse)
|
||||||
@@ -313,6 +318,7 @@ int main(int argc, char **argv)
|
|||||||
av_log(NULL, AV_LOG_INFO,"MDCT");
|
av_log(NULL, AV_LOG_INFO,"MDCT");
|
||||||
ff_mdct_init(m, fft_nbits, do_inverse, scale);
|
ff_mdct_init(m, fft_nbits, do_inverse, scale);
|
||||||
break;
|
break;
|
||||||
|
#endif /* CONFIG_MDCT */
|
||||||
case TRANSFORM_FFT:
|
case TRANSFORM_FFT:
|
||||||
if (do_inverse)
|
if (do_inverse)
|
||||||
av_log(NULL, AV_LOG_INFO,"IFFT");
|
av_log(NULL, AV_LOG_INFO,"IFFT");
|
||||||
@@ -322,6 +328,7 @@ int main(int argc, char **argv)
|
|||||||
fft_ref_init(fft_nbits, do_inverse);
|
fft_ref_init(fft_nbits, do_inverse);
|
||||||
break;
|
break;
|
||||||
#if CONFIG_FFT_FLOAT
|
#if CONFIG_FFT_FLOAT
|
||||||
|
#if CONFIG_RDFT
|
||||||
case TRANSFORM_RDFT:
|
case TRANSFORM_RDFT:
|
||||||
if (do_inverse)
|
if (do_inverse)
|
||||||
av_log(NULL, AV_LOG_INFO,"IDFT_C2R");
|
av_log(NULL, AV_LOG_INFO,"IDFT_C2R");
|
||||||
@@ -330,6 +337,8 @@ int main(int argc, char **argv)
|
|||||||
ff_rdft_init(r, fft_nbits, do_inverse ? IDFT_C2R : DFT_R2C);
|
ff_rdft_init(r, fft_nbits, do_inverse ? IDFT_C2R : DFT_R2C);
|
||||||
fft_ref_init(fft_nbits, do_inverse);
|
fft_ref_init(fft_nbits, do_inverse);
|
||||||
break;
|
break;
|
||||||
|
#endif /* CONFIG_RDFT */
|
||||||
|
#if CONFIG_DCT
|
||||||
case TRANSFORM_DCT:
|
case TRANSFORM_DCT:
|
||||||
if (do_inverse)
|
if (do_inverse)
|
||||||
av_log(NULL, AV_LOG_INFO,"DCT_III");
|
av_log(NULL, AV_LOG_INFO,"DCT_III");
|
||||||
@@ -337,6 +346,7 @@ int main(int argc, char **argv)
|
|||||||
av_log(NULL, AV_LOG_INFO,"DCT_II");
|
av_log(NULL, AV_LOG_INFO,"DCT_II");
|
||||||
ff_dct_init(d, fft_nbits, do_inverse ? DCT_III : DCT_II);
|
ff_dct_init(d, fft_nbits, do_inverse ? DCT_III : DCT_II);
|
||||||
break;
|
break;
|
||||||
|
#endif /* CONFIG_DCT */
|
||||||
#endif
|
#endif
|
||||||
default:
|
default:
|
||||||
av_log(NULL, AV_LOG_ERROR, "Requested transform not supported\n");
|
av_log(NULL, AV_LOG_ERROR, "Requested transform not supported\n");
|
||||||
@@ -355,6 +365,7 @@ int main(int argc, char **argv)
|
|||||||
av_log(NULL, AV_LOG_INFO,"Checking...\n");
|
av_log(NULL, AV_LOG_INFO,"Checking...\n");
|
||||||
|
|
||||||
switch (transform) {
|
switch (transform) {
|
||||||
|
#if CONFIG_MDCT
|
||||||
case TRANSFORM_MDCT:
|
case TRANSFORM_MDCT:
|
||||||
if (do_inverse) {
|
if (do_inverse) {
|
||||||
imdct_ref((FFTSample *)tab_ref, (FFTSample *)tab1, fft_nbits);
|
imdct_ref((FFTSample *)tab_ref, (FFTSample *)tab1, fft_nbits);
|
||||||
@@ -368,6 +379,7 @@ int main(int argc, char **argv)
|
|||||||
err = check_diff((FFTSample *)tab_ref, tab2, fft_size / 2, scale);
|
err = check_diff((FFTSample *)tab_ref, tab2, fft_size / 2, scale);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
#endif /* CONFIG_MDCT */
|
||||||
case TRANSFORM_FFT:
|
case TRANSFORM_FFT:
|
||||||
memcpy(tab, tab1, fft_size * sizeof(FFTComplex));
|
memcpy(tab, tab1, fft_size * sizeof(FFTComplex));
|
||||||
s->fft_permute(s, tab);
|
s->fft_permute(s, tab);
|
||||||
@@ -377,6 +389,7 @@ int main(int argc, char **argv)
|
|||||||
err = check_diff((FFTSample *)tab_ref, (FFTSample *)tab, fft_size * 2, 1.0);
|
err = check_diff((FFTSample *)tab_ref, (FFTSample *)tab, fft_size * 2, 1.0);
|
||||||
break;
|
break;
|
||||||
#if CONFIG_FFT_FLOAT
|
#if CONFIG_FFT_FLOAT
|
||||||
|
#if CONFIG_RDFT
|
||||||
case TRANSFORM_RDFT:
|
case TRANSFORM_RDFT:
|
||||||
fft_size_2 = fft_size >> 1;
|
fft_size_2 = fft_size >> 1;
|
||||||
if (do_inverse) {
|
if (do_inverse) {
|
||||||
@@ -408,6 +421,8 @@ int main(int argc, char **argv)
|
|||||||
err = check_diff((float *)tab_ref, (float *)tab2, fft_size, 1.0);
|
err = check_diff((float *)tab_ref, (float *)tab2, fft_size, 1.0);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
#endif /* CONFIG_RDFT */
|
||||||
|
#if CONFIG_DCT
|
||||||
case TRANSFORM_DCT:
|
case TRANSFORM_DCT:
|
||||||
memcpy(tab, tab1, fft_size * sizeof(FFTComplex));
|
memcpy(tab, tab1, fft_size * sizeof(FFTComplex));
|
||||||
d->dct_calc(d, (FFTSample *)tab);
|
d->dct_calc(d, (FFTSample *)tab);
|
||||||
@@ -418,6 +433,7 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
err = check_diff((float *)tab_ref, (float *)tab, fft_size, 1.0);
|
err = check_diff((float *)tab_ref, (float *)tab, fft_size, 1.0);
|
||||||
break;
|
break;
|
||||||
|
#endif /* CONFIG_DCT */
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -469,19 +485,25 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch (transform) {
|
switch (transform) {
|
||||||
|
#if CONFIG_MDCT
|
||||||
case TRANSFORM_MDCT:
|
case TRANSFORM_MDCT:
|
||||||
ff_mdct_end(m);
|
ff_mdct_end(m);
|
||||||
break;
|
break;
|
||||||
|
#endif /* CONFIG_MDCT */
|
||||||
case TRANSFORM_FFT:
|
case TRANSFORM_FFT:
|
||||||
ff_fft_end(s);
|
ff_fft_end(s);
|
||||||
break;
|
break;
|
||||||
#if CONFIG_FFT_FLOAT
|
#if CONFIG_FFT_FLOAT
|
||||||
|
#if CONFIG_RDFT
|
||||||
case TRANSFORM_RDFT:
|
case TRANSFORM_RDFT:
|
||||||
ff_rdft_end(r);
|
ff_rdft_end(r);
|
||||||
break;
|
break;
|
||||||
|
#endif /* CONFIG_RDFT */
|
||||||
|
#if CONFIG_DCT
|
||||||
case TRANSFORM_DCT:
|
case TRANSFORM_DCT:
|
||||||
ff_dct_end(d);
|
ff_dct_end(d);
|
||||||
break;
|
break;
|
||||||
|
#endif /* CONFIG_DCT */
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -541,31 +541,31 @@ static int read_header(FFV1Context *f)
|
|||||||
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
colorspace = get_symbol(c, state, 0); //YUV cs type
|
colorspace = get_symbol(c, state, 0); //YUV cs type
|
||||||
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
|
bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
|
||||||
chroma_planes = get_rac(c, state);
|
chroma_planes = get_rac(c, state);
|
||||||
chroma_h_shift = get_symbol(c, state, 0);
|
chroma_h_shift = get_symbol(c, state, 0);
|
||||||
chroma_v_shift = get_symbol(c, state, 0);
|
chroma_v_shift = get_symbol(c, state, 0);
|
||||||
transparency = get_rac(c, state);
|
transparency = get_rac(c, state);
|
||||||
|
|
||||||
if (f->plane_count) {
|
if (f->plane_count) {
|
||||||
if ( colorspace != f->colorspace
|
if (colorspace != f->colorspace ||
|
||||||
|| bits_per_raw_sample != f->avctx->bits_per_raw_sample
|
bits_per_raw_sample != f->avctx->bits_per_raw_sample ||
|
||||||
|| chroma_planes != f->chroma_planes
|
chroma_planes != f->chroma_planes ||
|
||||||
|| chroma_h_shift!= f->chroma_h_shift
|
chroma_h_shift != f->chroma_h_shift ||
|
||||||
|| chroma_v_shift!= f->chroma_v_shift
|
chroma_v_shift != f->chroma_v_shift ||
|
||||||
|| transparency != f->transparency) {
|
transparency != f->transparency) {
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
|
av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
f->colorspace = colorspace;
|
f->colorspace = colorspace;
|
||||||
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
||||||
f->chroma_planes = chroma_planes;
|
f->chroma_planes = chroma_planes;
|
||||||
f->chroma_h_shift = chroma_h_shift;
|
f->chroma_h_shift = chroma_h_shift;
|
||||||
f->chroma_v_shift = chroma_v_shift;
|
f->chroma_v_shift = chroma_v_shift;
|
||||||
f->transparency = transparency;
|
f->transparency = transparency;
|
||||||
|
|
||||||
f->plane_count = 2 + f->transparency;
|
f->plane_count = 2 + f->transparency;
|
||||||
}
|
}
|
||||||
|
@@ -655,7 +655,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
|
|||||||
handle_error:
|
handle_error:
|
||||||
*poutbuf = NULL;
|
*poutbuf = NULL;
|
||||||
*poutbuf_size = 0;
|
*poutbuf_size = 0;
|
||||||
return read_end - buf;
|
return buf_size ? read_end - buf : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int flac_parse_init(AVCodecParserContext *c)
|
static int flac_parse_init(AVCodecParserContext *c)
|
||||||
|
@@ -467,10 +467,10 @@ static int decode_frame(FLACContext *s)
|
|||||||
ret = allocate_buffers(s);
|
ret = allocate_buffers(s);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
|
||||||
s->got_streaminfo = 1;
|
s->got_streaminfo = 1;
|
||||||
dump_headers(s->avctx, (FLACStreaminfo *)s);
|
dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||||
}
|
}
|
||||||
|
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->bps);
|
||||||
|
|
||||||
// dump_headers(s->avctx, (FLACStreaminfo *)s);
|
// dump_headers(s->avctx, (FLACStreaminfo *)s);
|
||||||
|
|
||||||
|
@@ -288,7 +288,7 @@ static int write_header(FlashSV2Context * s, uint8_t * buf, int buf_size)
|
|||||||
if (buf_size < 5)
|
if (buf_size < 5)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
init_put_bits(&pb, buf, buf_size * 8);
|
init_put_bits(&pb, buf, buf_size);
|
||||||
|
|
||||||
put_bits(&pb, 4, (s->block_width >> 4) - 1);
|
put_bits(&pb, 4, (s->block_width >> 4) - 1);
|
||||||
put_bits(&pb, 12, s->image_width);
|
put_bits(&pb, 12, s->image_width);
|
||||||
|
@@ -131,7 +131,7 @@ static int encode_bitstream(FlashSVContext *s, AVFrame *p, uint8_t *buf,
|
|||||||
int buf_pos, res;
|
int buf_pos, res;
|
||||||
int pred_blocks = 0;
|
int pred_blocks = 0;
|
||||||
|
|
||||||
init_put_bits(&pb, buf, buf_size * 8);
|
init_put_bits(&pb, buf, buf_size);
|
||||||
|
|
||||||
put_bits(&pb, 4, block_width / 16 - 1);
|
put_bits(&pb, 4, block_width / 16 - 1);
|
||||||
put_bits(&pb, 12, s->image_width);
|
put_bits(&pb, 12, s->image_width);
|
||||||
|
@@ -2292,7 +2292,8 @@ static int pack_bitstream(G723_1_Context *p, unsigned char *frame, int size)
|
|||||||
if (p->cur_rate == RATE_6300) {
|
if (p->cur_rate == RATE_6300) {
|
||||||
info_bits = 0;
|
info_bits = 0;
|
||||||
put_bits(&pb, 2, info_bits);
|
put_bits(&pb, 2, info_bits);
|
||||||
}
|
}else
|
||||||
|
av_assert0(0);
|
||||||
|
|
||||||
put_bits(&pb, 8, p->lsp_index[2]);
|
put_bits(&pb, 8, p->lsp_index[2]);
|
||||||
put_bits(&pb, 8, p->lsp_index[1]);
|
put_bits(&pb, 8, p->lsp_index[1]);
|
||||||
|
@@ -256,26 +256,21 @@ static int gif_read_image(GifState *s)
|
|||||||
case 1:
|
case 1:
|
||||||
y1 += 8;
|
y1 += 8;
|
||||||
ptr += linesize * 8;
|
ptr += linesize * 8;
|
||||||
if (y1 >= height) {
|
|
||||||
y1 = pass ? 2 : 4;
|
|
||||||
ptr = ptr1 + linesize * y1;
|
|
||||||
pass++;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
y1 += 4;
|
y1 += 4;
|
||||||
ptr += linesize * 4;
|
ptr += linesize * 4;
|
||||||
if (y1 >= height) {
|
|
||||||
y1 = 1;
|
|
||||||
ptr = ptr1 + linesize;
|
|
||||||
pass++;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
y1 += 2;
|
y1 += 2;
|
||||||
ptr += linesize * 2;
|
ptr += linesize * 2;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
while (y1 >= height) {
|
||||||
|
y1 = 4 >> pass;
|
||||||
|
ptr = ptr1 + linesize * y1;
|
||||||
|
pass++;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ptr += linesize;
|
ptr += linesize;
|
||||||
}
|
}
|
||||||
|
@@ -720,10 +720,10 @@ frame_end:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(startcode_found){
|
if(startcode_found){
|
||||||
av_fast_malloc(
|
av_fast_padded_mallocz(
|
||||||
&s->bitstream_buffer,
|
&s->bitstream_buffer,
|
||||||
&s->allocated_bitstream_buffer_size,
|
&s->allocated_bitstream_buffer_size,
|
||||||
buf_size - current_pos + FF_INPUT_BUFFER_PADDING_SIZE);
|
buf_size - current_pos);
|
||||||
if (!s->bitstream_buffer)
|
if (!s->bitstream_buffer)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos);
|
memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos);
|
||||||
|
@@ -141,10 +141,10 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h)
|
|||||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
|
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
|
||||||
{
|
{
|
||||||
MpegEncContext *const s = &h->s;
|
MpegEncContext *const s = &h->s;
|
||||||
static const int8_t top[7] = { LEFT_DC_PRED8x8, 1, -1, -1 };
|
static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
|
||||||
static const int8_t left[7] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
|
static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
|
||||||
|
|
||||||
if (mode > 6U) {
|
if (mode > 3U) {
|
||||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||||
"out of range intra chroma pred mode at %d %d\n",
|
"out of range intra chroma pred mode at %d %d\n",
|
||||||
s->mb_x, s->mb_y);
|
s->mb_x, s->mb_y);
|
||||||
@@ -163,18 +163,18 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
|
|||||||
|
|
||||||
if ((h->left_samples_available & 0x8080) != 0x8080) {
|
if ((h->left_samples_available & 0x8080) != 0x8080) {
|
||||||
mode = left[mode];
|
mode = left[mode];
|
||||||
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
|
||||||
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
|
||||||
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
|
||||||
(!(h->left_samples_available & 0x8000)) +
|
|
||||||
2 * (mode == DC_128_PRED8x8);
|
|
||||||
}
|
|
||||||
if (mode < 0) {
|
if (mode < 0) {
|
||||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||||
"left block unavailable for requested intra mode at %d %d\n",
|
"left block unavailable for requested intra mode at %d %d\n",
|
||||||
s->mb_x, s->mb_y);
|
s->mb_x, s->mb_y);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
if (is_chroma && (h->left_samples_available & 0x8080)) {
|
||||||
|
// mad cow disease mode, aka MBAFF + constrained_intra_pred
|
||||||
|
mode = ALZHEIMER_DC_L0T_PRED8x8 +
|
||||||
|
(!(h->left_samples_available & 0x8000)) +
|
||||||
|
2 * (mode == DC_128_PRED8x8);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return mode;
|
return mode;
|
||||||
@@ -1249,6 +1249,18 @@ static int decode_update_thread_context(AVCodecContext *dst,
|
|||||||
memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
|
memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
|
||||||
memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
|
memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
|
||||||
|
|
||||||
|
h->intra4x4_pred_mode= NULL;
|
||||||
|
h->non_zero_count = NULL;
|
||||||
|
h->slice_table_base = NULL;
|
||||||
|
h->slice_table = NULL;
|
||||||
|
h->cbp_table = NULL;
|
||||||
|
h->chroma_pred_mode_table = NULL;
|
||||||
|
memset(h->mvd_table, 0, sizeof(h->mvd_table));
|
||||||
|
h->direct_table = NULL;
|
||||||
|
h->list_counts = NULL;
|
||||||
|
h->mb2b_xy = NULL;
|
||||||
|
h->mb2br_xy = NULL;
|
||||||
|
|
||||||
if (s1->context_initialized) {
|
if (s1->context_initialized) {
|
||||||
if (ff_h264_alloc_tables(h) < 0) {
|
if (ff_h264_alloc_tables(h) < 0) {
|
||||||
av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
|
av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
|
||||||
@@ -1337,6 +1349,8 @@ int ff_h264_frame_start(H264Context *h)
|
|||||||
int i;
|
int i;
|
||||||
const int pixel_shift = h->pixel_shift;
|
const int pixel_shift = h->pixel_shift;
|
||||||
|
|
||||||
|
h->next_output_pic = NULL;
|
||||||
|
|
||||||
if (ff_MPV_frame_start(s, s->avctx) < 0)
|
if (ff_MPV_frame_start(s, s->avctx) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
ff_er_frame_start(s);
|
ff_er_frame_start(s);
|
||||||
@@ -1389,8 +1403,6 @@ int ff_h264_frame_start(H264Context *h)
|
|||||||
s->current_picture_ptr->field_poc[0] =
|
s->current_picture_ptr->field_poc[0] =
|
||||||
s->current_picture_ptr->field_poc[1] = INT_MAX;
|
s->current_picture_ptr->field_poc[1] = INT_MAX;
|
||||||
|
|
||||||
h->next_output_pic = NULL;
|
|
||||||
|
|
||||||
assert(s->current_picture_ptr->long_ref == 0);
|
assert(s->current_picture_ptr->long_ref == 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -2450,12 +2462,6 @@ static int h264_set_parameter_from_sps(H264Context *h)
|
|||||||
if (s->avctx->has_b_frames < 2)
|
if (s->avctx->has_b_frames < 2)
|
||||||
s->avctx->has_b_frames = !s->low_delay;
|
s->avctx->has_b_frames = !s->low_delay;
|
||||||
|
|
||||||
if (h->sps.bit_depth_luma != h->sps.bit_depth_chroma) {
|
|
||||||
av_log_missing_feature(s->avctx,
|
|
||||||
"Different bit depth between chroma and luma", 1);
|
|
||||||
return AVERROR_PATCHWELCOME;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
|
if (s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
|
||||||
h->cur_chroma_format_idc != h->sps.chroma_format_idc) {
|
h->cur_chroma_format_idc != h->sps.chroma_format_idc) {
|
||||||
if (s->avctx->codec &&
|
if (s->avctx->codec &&
|
||||||
@@ -2996,8 +3002,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
if (!h->sps.gaps_in_frame_num_allowed_flag)
|
if (!h->sps.gaps_in_frame_num_allowed_flag)
|
||||||
for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
|
for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
|
||||||
h->last_pocs[i] = INT_MIN;
|
h->last_pocs[i] = INT_MIN;
|
||||||
if (ff_h264_frame_start(h) < 0)
|
if (ff_h264_frame_start(h) < 0) {
|
||||||
|
s0->first_field = 0;
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
h->prev_frame_num++;
|
h->prev_frame_num++;
|
||||||
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
|
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
|
||||||
s->current_picture_ptr->frame_num = h->prev_frame_num;
|
s->current_picture_ptr->frame_num = h->prev_frame_num;
|
||||||
@@ -3920,6 +3928,8 @@ static int execute_decode_slices(H264Context *h, int context_count)
|
|||||||
H264Context *hx;
|
H264Context *hx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
av_assert0(s->mb_y < s->mb_height);
|
||||||
|
|
||||||
if (s->avctx->hwaccel ||
|
if (s->avctx->hwaccel ||
|
||||||
s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
|
s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
|
||||||
return 0;
|
return 0;
|
||||||
@@ -4042,7 +4052,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
|
|||||||
s->workaround_bugs |= FF_BUG_TRUNCATED;
|
s->workaround_bugs |= FF_BUG_TRUNCATED;
|
||||||
|
|
||||||
if (!(s->workaround_bugs & FF_BUG_TRUNCATED))
|
if (!(s->workaround_bugs & FF_BUG_TRUNCATED))
|
||||||
while(dst_length > 0 && ptr[dst_length - 1] == 0)
|
while (dst_length > 0 && ptr[dst_length - 1] == 0)
|
||||||
dst_length--;
|
dst_length--;
|
||||||
bit_length = !dst_length ? 0
|
bit_length = !dst_length ? 0
|
||||||
: (8 * dst_length -
|
: (8 * dst_length -
|
||||||
@@ -4195,12 +4205,24 @@ again:
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case NAL_DPA:
|
case NAL_DPA:
|
||||||
|
if (s->flags2 & CODEC_FLAG2_CHUNKS) {
|
||||||
|
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||||
|
"Decoding in chunks is not supported for "
|
||||||
|
"partitioned slices.\n");
|
||||||
|
return AVERROR(ENOSYS);
|
||||||
|
}
|
||||||
|
|
||||||
init_get_bits(&hx->s.gb, ptr, bit_length);
|
init_get_bits(&hx->s.gb, ptr, bit_length);
|
||||||
hx->intra_gb_ptr =
|
hx->intra_gb_ptr =
|
||||||
hx->inter_gb_ptr = NULL;
|
hx->inter_gb_ptr = NULL;
|
||||||
|
|
||||||
if ((err = decode_slice_header(hx, h)) < 0)
|
if ((err = decode_slice_header(hx, h)) < 0) {
|
||||||
|
/* make sure data_partitioning is cleared if it was set
|
||||||
|
* before, so we don't try decoding a slice without a valid
|
||||||
|
* slice header later */
|
||||||
|
h->s.data_partitioning = 0;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
hx->s.data_partitioning = 1;
|
hx->s.data_partitioning = 1;
|
||||||
break;
|
break;
|
||||||
@@ -4270,9 +4292,10 @@ again:
|
|||||||
context_count = 0;
|
context_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err < 0)
|
if (err < 0) {
|
||||||
av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
|
av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
|
||||||
else if (err == 1) {
|
h->ref_count[0] = h->ref_count[1] = h->list_count = 0;
|
||||||
|
} else if (err == 1) {
|
||||||
/* Slice could not be decoded in parallel mode, copy down
|
/* Slice could not be decoded in parallel mode, copy down
|
||||||
* NAL unit stuff to context 0 and restart. Note that
|
* NAL unit stuff to context 0 and restart. Note that
|
||||||
* rbsp_buffer is not transferred, but since we no longer
|
* rbsp_buffer is not transferred, but since we no longer
|
||||||
@@ -4325,6 +4348,9 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
s->flags = avctx->flags;
|
s->flags = avctx->flags;
|
||||||
s->flags2 = avctx->flags2;
|
s->flags2 = avctx->flags2;
|
||||||
|
/* reset data partitioning here, to ensure GetBitContexts from previous
|
||||||
|
* packets do not get used. */
|
||||||
|
s->data_partitioning = 0;
|
||||||
|
|
||||||
/* end of stream, output what is still in the buffers */
|
/* end of stream, output what is still in the buffers */
|
||||||
if (buf_size == 0) {
|
if (buf_size == 0) {
|
||||||
|
@@ -1712,7 +1712,7 @@ decode_cabac_residual_internal(H264Context *h, DCTELEM *block,
|
|||||||
\
|
\
|
||||||
if( coeff_abs >= 15 ) { \
|
if( coeff_abs >= 15 ) { \
|
||||||
int j = 0; \
|
int j = 0; \
|
||||||
while( get_cabac_bypass( CC ) ) { \
|
while (get_cabac_bypass(CC) && j < 30) { \
|
||||||
j++; \
|
j++; \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
|
@@ -154,7 +154,7 @@ pps:
|
|||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
||||||
if (ctx->first_idr && unit_type == 5) {
|
if (ctx->first_idr && (unit_type == 5 || unit_type == 7 || unit_type == 8)) {
|
||||||
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
||||||
avctx->extradata, avctx->extradata_size,
|
avctx->extradata, avctx->extradata_size,
|
||||||
buf, nal_size)) < 0)
|
buf, nal_size)) < 0)
|
||||||
|
@@ -387,7 +387,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
|||||||
}
|
}
|
||||||
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
||||||
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
||||||
if (sps->bit_depth_luma > 14U || sps->bit_depth_chroma > 14U || sps->bit_depth_luma != sps->bit_depth_chroma) {
|
if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
|
||||||
|
sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14 ||
|
||||||
|
sps->bit_depth_luma != sps->bit_depth_chroma) {
|
||||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
av_log(h->s.avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
||||||
sps->bit_depth_luma, sps->bit_depth_chroma);
|
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@@ -63,7 +63,9 @@ static int split_field_copy(Picture *dest, Picture *src,
|
|||||||
return match;
|
return match;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int build_def_list(Picture *def, Picture **in, int len, int is_long, int sel){
|
static int build_def_list(Picture *def, int def_len,
|
||||||
|
Picture **in, int len, int is_long, int sel)
|
||||||
|
{
|
||||||
int i[2]={0};
|
int i[2]={0};
|
||||||
int index=0;
|
int index=0;
|
||||||
|
|
||||||
@@ -73,10 +75,12 @@ static int build_def_list(Picture *def, Picture **in, int len, int is_long, int
|
|||||||
while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3))))
|
while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3))))
|
||||||
i[1]++;
|
i[1]++;
|
||||||
if(i[0] < len){
|
if(i[0] < len){
|
||||||
|
av_assert0(index < def_len);
|
||||||
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
|
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
|
||||||
split_field_copy(&def[index++], in[ i[0]++ ], sel , 1);
|
split_field_copy(&def[index++], in[ i[0]++ ], sel , 1);
|
||||||
}
|
}
|
||||||
if(i[1] < len){
|
if(i[1] < len){
|
||||||
|
av_assert0(index < def_len);
|
||||||
in[ i[1] ]->pic_id= is_long ? i[1] : in[ i[1] ]->frame_num;
|
in[ i[1] ]->pic_id= is_long ? i[1] : in[ i[1] ]->frame_num;
|
||||||
split_field_copy(&def[index++], in[ i[1]++ ], sel^3, 0);
|
split_field_copy(&def[index++], in[ i[1]++ ], sel^3, 0);
|
||||||
}
|
}
|
||||||
@@ -124,8 +128,12 @@ int ff_h264_fill_default_ref_list(H264Context *h){
|
|||||||
len= add_sorted(sorted , h->short_ref, h->short_ref_count, cur_poc, 1^list);
|
len= add_sorted(sorted , h->short_ref, h->short_ref_count, cur_poc, 1^list);
|
||||||
len+=add_sorted(sorted+len, h->short_ref, h->short_ref_count, cur_poc, 0^list);
|
len+=add_sorted(sorted+len, h->short_ref, h->short_ref_count, cur_poc, 0^list);
|
||||||
av_assert0(len<=32);
|
av_assert0(len<=32);
|
||||||
len= build_def_list(h->default_ref_list[list] , sorted , len, 0, s->picture_structure);
|
len = build_def_list(h->default_ref_list[list], FF_ARRAY_ELEMS(h->default_ref_list[0]),
|
||||||
len+=build_def_list(h->default_ref_list[list]+len, h->long_ref, 16 , 1, s->picture_structure);
|
sorted, len, 0, s->picture_structure);
|
||||||
|
len += build_def_list(h->default_ref_list[list] + len,
|
||||||
|
FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,
|
||||||
|
h->long_ref, 16, 1, s->picture_structure);
|
||||||
|
|
||||||
av_assert0(len<=32);
|
av_assert0(len<=32);
|
||||||
|
|
||||||
if(len < h->ref_count[list])
|
if(len < h->ref_count[list])
|
||||||
@@ -139,8 +147,12 @@ int ff_h264_fill_default_ref_list(H264Context *h){
|
|||||||
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
|
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
|
||||||
}
|
}
|
||||||
}else{
|
}else{
|
||||||
len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, s->picture_structure);
|
len = build_def_list(h->default_ref_list[0], FF_ARRAY_ELEMS(h->default_ref_list[0]),
|
||||||
len+= build_def_list(h->default_ref_list[0]+len, h-> long_ref, 16 , 1, s->picture_structure);
|
h->short_ref, h->short_ref_count, 0, s->picture_structure);
|
||||||
|
len += build_def_list(h->default_ref_list[0] + len,
|
||||||
|
FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,
|
||||||
|
h-> long_ref, 16, 1, s->picture_structure);
|
||||||
|
|
||||||
av_assert0(len<=32);
|
av_assert0(len<=32);
|
||||||
if(len < h->ref_count[0])
|
if(len < h->ref_count[0])
|
||||||
memset(&h->default_ref_list[0][len], 0, sizeof(Picture)*(h->ref_count[0] - len));
|
memset(&h->default_ref_list[0][len], 0, sizeof(Picture)*(h->ref_count[0] - len));
|
||||||
|
@@ -223,6 +223,12 @@ int ff_h264_decode_sei(H264Context *h){
|
|||||||
if(s->avctx->debug&FF_DEBUG_STARTCODE)
|
if(s->avctx->debug&FF_DEBUG_STARTCODE)
|
||||||
av_log(h->s.avctx, AV_LOG_DEBUG, "SEI %d len:%d\n", type, size);
|
av_log(h->s.avctx, AV_LOG_DEBUG, "SEI %d len:%d\n", type, size);
|
||||||
|
|
||||||
|
if (size > get_bits_left(&s->gb) / 8) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "SEI type %d truncated at %d\n",
|
||||||
|
type, get_bits_left(&s->gb));
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
switch(type){
|
switch(type){
|
||||||
case SEI_TYPE_PIC_TIMING: // Picture timing SEI
|
case SEI_TYPE_PIC_TIMING: // Picture timing SEI
|
||||||
if(decode_picture_timing(h) < 0)
|
if(decode_picture_timing(h) < 0)
|
||||||
|
@@ -63,13 +63,13 @@ void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_fo
|
|||||||
c->h264_idct8_dc_add= FUNC(ff_h264_idct8_dc_add, depth);\
|
c->h264_idct8_dc_add= FUNC(ff_h264_idct8_dc_add, depth);\
|
||||||
c->h264_idct_add16 = FUNC(ff_h264_idct_add16, depth);\
|
c->h264_idct_add16 = FUNC(ff_h264_idct_add16, depth);\
|
||||||
c->h264_idct8_add4 = FUNC(ff_h264_idct8_add4, depth);\
|
c->h264_idct8_add4 = FUNC(ff_h264_idct8_add4, depth);\
|
||||||
if (chroma_format_idc == 1)\
|
if (chroma_format_idc <= 1)\
|
||||||
c->h264_idct_add8 = FUNC(ff_h264_idct_add8, depth);\
|
c->h264_idct_add8 = FUNC(ff_h264_idct_add8, depth);\
|
||||||
else\
|
else\
|
||||||
c->h264_idct_add8 = FUNC(ff_h264_idct_add8_422, depth);\
|
c->h264_idct_add8 = FUNC(ff_h264_idct_add8_422, depth);\
|
||||||
c->h264_idct_add16intra= FUNC(ff_h264_idct_add16intra, depth);\
|
c->h264_idct_add16intra= FUNC(ff_h264_idct_add16intra, depth);\
|
||||||
c->h264_luma_dc_dequant_idct= FUNC(ff_h264_luma_dc_dequant_idct, depth);\
|
c->h264_luma_dc_dequant_idct= FUNC(ff_h264_luma_dc_dequant_idct, depth);\
|
||||||
if (chroma_format_idc == 1)\
|
if (chroma_format_idc <= 1)\
|
||||||
c->h264_chroma_dc_dequant_idct= FUNC(ff_h264_chroma_dc_dequant_idct, depth);\
|
c->h264_chroma_dc_dequant_idct= FUNC(ff_h264_chroma_dc_dequant_idct, depth);\
|
||||||
else\
|
else\
|
||||||
c->h264_chroma_dc_dequant_idct= FUNC(ff_h264_chroma422_dc_dequant_idct, depth);\
|
c->h264_chroma_dc_dequant_idct= FUNC(ff_h264_chroma422_dc_dequant_idct, depth);\
|
||||||
@@ -90,20 +90,20 @@ void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_fo
|
|||||||
c->h264_h_loop_filter_luma_intra= FUNC(h264_h_loop_filter_luma_intra, depth);\
|
c->h264_h_loop_filter_luma_intra= FUNC(h264_h_loop_filter_luma_intra, depth);\
|
||||||
c->h264_h_loop_filter_luma_mbaff_intra= FUNC(h264_h_loop_filter_luma_mbaff_intra, depth);\
|
c->h264_h_loop_filter_luma_mbaff_intra= FUNC(h264_h_loop_filter_luma_mbaff_intra, depth);\
|
||||||
c->h264_v_loop_filter_chroma= FUNC(h264_v_loop_filter_chroma, depth);\
|
c->h264_v_loop_filter_chroma= FUNC(h264_v_loop_filter_chroma, depth);\
|
||||||
if (chroma_format_idc == 1)\
|
if (chroma_format_idc <= 1)\
|
||||||
c->h264_h_loop_filter_chroma= FUNC(h264_h_loop_filter_chroma, depth);\
|
c->h264_h_loop_filter_chroma= FUNC(h264_h_loop_filter_chroma, depth);\
|
||||||
else\
|
else\
|
||||||
c->h264_h_loop_filter_chroma= FUNC(h264_h_loop_filter_chroma422, depth);\
|
c->h264_h_loop_filter_chroma= FUNC(h264_h_loop_filter_chroma422, depth);\
|
||||||
if (chroma_format_idc == 1)\
|
if (chroma_format_idc <= 1)\
|
||||||
c->h264_h_loop_filter_chroma_mbaff= FUNC(h264_h_loop_filter_chroma_mbaff, depth);\
|
c->h264_h_loop_filter_chroma_mbaff= FUNC(h264_h_loop_filter_chroma_mbaff, depth);\
|
||||||
else\
|
else\
|
||||||
c->h264_h_loop_filter_chroma_mbaff= FUNC(h264_h_loop_filter_chroma422_mbaff, depth);\
|
c->h264_h_loop_filter_chroma_mbaff= FUNC(h264_h_loop_filter_chroma422_mbaff, depth);\
|
||||||
c->h264_v_loop_filter_chroma_intra= FUNC(h264_v_loop_filter_chroma_intra, depth);\
|
c->h264_v_loop_filter_chroma_intra= FUNC(h264_v_loop_filter_chroma_intra, depth);\
|
||||||
if (chroma_format_idc == 1)\
|
if (chroma_format_idc <= 1)\
|
||||||
c->h264_h_loop_filter_chroma_intra= FUNC(h264_h_loop_filter_chroma_intra, depth);\
|
c->h264_h_loop_filter_chroma_intra= FUNC(h264_h_loop_filter_chroma_intra, depth);\
|
||||||
else\
|
else\
|
||||||
c->h264_h_loop_filter_chroma_intra= FUNC(h264_h_loop_filter_chroma422_intra, depth);\
|
c->h264_h_loop_filter_chroma_intra= FUNC(h264_h_loop_filter_chroma422_intra, depth);\
|
||||||
if (chroma_format_idc == 1)\
|
if (chroma_format_idc <= 1)\
|
||||||
c->h264_h_loop_filter_chroma_mbaff_intra= FUNC(h264_h_loop_filter_chroma_mbaff_intra, depth);\
|
c->h264_h_loop_filter_chroma_mbaff_intra= FUNC(h264_h_loop_filter_chroma_mbaff_intra, depth);\
|
||||||
else\
|
else\
|
||||||
c->h264_h_loop_filter_chroma_mbaff_intra= FUNC(h264_h_loop_filter_chroma422_mbaff_intra, depth);\
|
c->h264_h_loop_filter_chroma_mbaff_intra= FUNC(h264_h_loop_filter_chroma422_mbaff_intra, depth);\
|
||||||
|
@@ -480,7 +480,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
|
|||||||
h->pred8x8l[TOP_DC_PRED ]= FUNCC(pred8x8l_top_dc , depth);\
|
h->pred8x8l[TOP_DC_PRED ]= FUNCC(pred8x8l_top_dc , depth);\
|
||||||
h->pred8x8l[DC_128_PRED ]= FUNCC(pred8x8l_128_dc , depth);\
|
h->pred8x8l[DC_128_PRED ]= FUNCC(pred8x8l_128_dc , depth);\
|
||||||
\
|
\
|
||||||
if (chroma_format_idc == 1) {\
|
if (chroma_format_idc <= 1) {\
|
||||||
h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x8_vertical , depth);\
|
h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x8_vertical , depth);\
|
||||||
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x8_horizontal , depth);\
|
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x8_horizontal , depth);\
|
||||||
} else {\
|
} else {\
|
||||||
@@ -488,7 +488,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
|
|||||||
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\
|
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\
|
||||||
}\
|
}\
|
||||||
if (codec_id != AV_CODEC_ID_VP8) {\
|
if (codec_id != AV_CODEC_ID_VP8) {\
|
||||||
if (chroma_format_idc == 1) {\
|
if (chroma_format_idc <= 1) {\
|
||||||
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\
|
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\
|
||||||
} else {\
|
} else {\
|
||||||
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x16_plane , depth);\
|
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x16_plane , depth);\
|
||||||
@@ -496,7 +496,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
|
|||||||
} else\
|
} else\
|
||||||
h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\
|
h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\
|
||||||
if(codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP8){\
|
if(codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP8){\
|
||||||
if (chroma_format_idc == 1) {\
|
if (chroma_format_idc <= 1) {\
|
||||||
h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\
|
h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\
|
||||||
h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\
|
h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\
|
||||||
h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x8_top_dc , depth);\
|
h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x8_top_dc , depth);\
|
||||||
@@ -522,7 +522,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
|
|||||||
h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\
|
h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\
|
||||||
}\
|
}\
|
||||||
}\
|
}\
|
||||||
if (chroma_format_idc == 1) {\
|
if (chroma_format_idc <= 1) {\
|
||||||
h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x8_128_dc , depth);\
|
h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x8_128_dc , depth);\
|
||||||
} else {\
|
} else {\
|
||||||
h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x16_128_dc , depth);\
|
h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x16_128_dc , depth);\
|
||||||
@@ -556,7 +556,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth,
|
|||||||
h->pred4x4_add [ HOR_PRED ]= FUNCC(pred4x4_horizontal_add , depth);\
|
h->pred4x4_add [ HOR_PRED ]= FUNCC(pred4x4_horizontal_add , depth);\
|
||||||
h->pred8x8l_add [VERT_PRED ]= FUNCC(pred8x8l_vertical_add , depth);\
|
h->pred8x8l_add [VERT_PRED ]= FUNCC(pred8x8l_vertical_add , depth);\
|
||||||
h->pred8x8l_add [ HOR_PRED ]= FUNCC(pred8x8l_horizontal_add , depth);\
|
h->pred8x8l_add [ HOR_PRED ]= FUNCC(pred8x8l_horizontal_add , depth);\
|
||||||
if (chroma_format_idc == 1) {\
|
if (chroma_format_idc <= 1) {\
|
||||||
h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x8_vertical_add , depth);\
|
h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x8_vertical_add , depth);\
|
||||||
h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x8_horizontal_add , depth);\
|
h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x8_horizontal_add , depth);\
|
||||||
} else {\
|
} else {\
|
||||||
|
@@ -107,11 +107,13 @@ static int read_len_table(uint8_t *dst, GetBitContext *gb)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void generate_joint_tables(HYuvContext *s)
|
static int generate_joint_tables(HYuvContext *s)
|
||||||
{
|
{
|
||||||
uint16_t symbols[1 << VLC_BITS];
|
uint16_t symbols[1 << VLC_BITS];
|
||||||
uint16_t bits[1 << VLC_BITS];
|
uint16_t bits[1 << VLC_BITS];
|
||||||
uint8_t len[1 << VLC_BITS];
|
uint8_t len[1 << VLC_BITS];
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (s->bitstream_bpp < 24) {
|
if (s->bitstream_bpp < 24) {
|
||||||
int p, i, y, u;
|
int p, i, y, u;
|
||||||
for (p = 0; p < 3; p++) {
|
for (p = 0; p < 3; p++) {
|
||||||
@@ -133,8 +135,9 @@ static void generate_joint_tables(HYuvContext *s)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ff_free_vlc(&s->vlc[3 + p]);
|
ff_free_vlc(&s->vlc[3 + p]);
|
||||||
ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
|
if ((ret = ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
|
||||||
bits, 2, 2, symbols, 2, 2, 0);
|
bits, 2, 2, symbols, 2, 2, 0)) < 0)
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
|
uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
|
||||||
@@ -176,31 +179,34 @@ static void generate_joint_tables(HYuvContext *s)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ff_free_vlc(&s->vlc[3]);
|
ff_free_vlc(&s->vlc[3]);
|
||||||
init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
|
if ((ret = init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1,
|
||||||
|
bits, 2, 2, 0)) < 0)
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
|
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
|
||||||
{
|
{
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int i;
|
int i, ret;
|
||||||
int ret;
|
|
||||||
|
|
||||||
init_get_bits(&gb, src, length * 8);
|
if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
if (read_len_table(s->len[i], &gb) < 0)
|
if ((ret = read_len_table(s->len[i], &gb)) < 0)
|
||||||
return -1;
|
return ret;
|
||||||
if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0) {
|
if ((ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i])) < 0)
|
||||||
return -1;
|
return ret;
|
||||||
}
|
|
||||||
ff_free_vlc(&s->vlc[i]);
|
ff_free_vlc(&s->vlc[i]);
|
||||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
|
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
|
||||||
s->bits[i], 4, 4, 0)) < 0)
|
s->bits[i], 4, 4, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
generate_joint_tables(s);
|
if ((ret = generate_joint_tables(s)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return (get_bits_count(&gb) + 7) / 8;
|
return (get_bits_count(&gb) + 7) / 8;
|
||||||
}
|
}
|
||||||
@@ -208,18 +214,19 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
|
|||||||
static int read_old_huffman_tables(HYuvContext *s)
|
static int read_old_huffman_tables(HYuvContext *s)
|
||||||
{
|
{
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int i;
|
int i, ret;
|
||||||
int ret;
|
|
||||||
|
|
||||||
init_get_bits(&gb, classic_shift_luma,
|
if ((ret = init_get_bits(&gb, classic_shift_luma,
|
||||||
classic_shift_luma_table_size * 8);
|
classic_shift_luma_table_size * 8)) < 0)
|
||||||
if (read_len_table(s->len[0], &gb) < 0)
|
return ret;
|
||||||
return -1;
|
if ((ret = read_len_table(s->len[0], &gb)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
init_get_bits(&gb, classic_shift_chroma,
|
if ((ret = init_get_bits(&gb, classic_shift_chroma,
|
||||||
classic_shift_chroma_table_size * 8);
|
classic_shift_chroma_table_size * 8)) < 0)
|
||||||
if (read_len_table(s->len[1], &gb) < 0)
|
return ret;
|
||||||
return -1;
|
if ((ret = read_len_table(s->len[1], &gb)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
|
for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
|
||||||
for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
|
for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
|
||||||
@@ -238,7 +245,8 @@ static int read_old_huffman_tables(HYuvContext *s)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
generate_joint_tables(s);
|
if ((ret = generate_joint_tables(s)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -246,6 +254,7 @@ static int read_old_huffman_tables(HYuvContext *s)
|
|||||||
static av_cold int decode_init(AVCodecContext *avctx)
|
static av_cold int decode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
HYuvContext *s = avctx->priv_data;
|
HYuvContext *s = avctx->priv_data;
|
||||||
|
int ret;
|
||||||
|
|
||||||
ff_huffyuv_common_init(avctx);
|
ff_huffyuv_common_init(avctx);
|
||||||
memset(s->vlc, 0, 3 * sizeof(VLC));
|
memset(s->vlc, 0, 3 * sizeof(VLC));
|
||||||
@@ -281,9 +290,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
|
s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
|
||||||
s->context = ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
|
s->context = ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
|
||||||
|
|
||||||
if ( read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
|
if ((ret = read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
|
||||||
avctx->extradata_size - 4) < 0)
|
avctx->extradata_size - 4)) < 0)
|
||||||
return AVERROR_INVALIDDATA;
|
return ret;
|
||||||
}else{
|
}else{
|
||||||
switch (avctx->bits_per_coded_sample & 7) {
|
switch (avctx->bits_per_coded_sample & 7) {
|
||||||
case 1:
|
case 1:
|
||||||
@@ -310,8 +319,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
|
s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
|
||||||
s->context = 0;
|
s->context = 0;
|
||||||
|
|
||||||
if (read_old_huffman_tables(s) < 0)
|
if ((ret = read_old_huffman_tables(s)) < 0)
|
||||||
return AVERROR_INVALIDDATA;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (s->bitstream_bpp) {
|
switch (s->bitstream_bpp) {
|
||||||
@@ -341,13 +350,16 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
|
av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P && avctx->width%4) {
|
if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P &&
|
||||||
av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 this colorspace and predictor\n");
|
avctx->width % 4) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 "
|
||||||
|
"for this combination of colorspace and predictor type.\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
if (ff_huffyuv_alloc_temp(s)) {
|
|
||||||
|
if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
|
||||||
ff_huffyuv_common_end(s);
|
ff_huffyuv_common_end(s);
|
||||||
return AVERROR(ENOMEM);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -356,24 +368,24 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
|
static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
HYuvContext *s = avctx->priv_data;
|
HYuvContext *s = avctx->priv_data;
|
||||||
int i;
|
int i, ret;
|
||||||
|
|
||||||
avctx->coded_frame= &s->picture;
|
avctx->coded_frame= &s->picture;
|
||||||
if (ff_huffyuv_alloc_temp(s)) {
|
if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
|
||||||
ff_huffyuv_common_end(s);
|
ff_huffyuv_common_end(s);
|
||||||
return AVERROR(ENOMEM);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 6; i++)
|
for (i = 0; i < 6; i++)
|
||||||
s->vlc[i].table = NULL;
|
s->vlc[i].table = NULL;
|
||||||
|
|
||||||
if (s->version == 2) {
|
if (s->version == 2) {
|
||||||
if (read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
|
if ((ret = read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
|
||||||
avctx->extradata_size) < 0)
|
avctx->extradata_size)) < 0)
|
||||||
return AVERROR_INVALIDDATA;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
if (read_old_huffman_tables(s) < 0)
|
if ((ret = read_old_huffman_tables(s)) < 0)
|
||||||
return AVERROR_INVALIDDATA;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -532,14 +544,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
if (s->context) {
|
if (s->context) {
|
||||||
table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
|
table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
|
||||||
if (table_size < 0)
|
if (table_size < 0)
|
||||||
return AVERROR_INVALIDDATA;
|
return table_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((unsigned)(buf_size-table_size) >= INT_MAX / 8)
|
if ((unsigned)(buf_size-table_size) >= INT_MAX / 8)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
init_get_bits(&s->gb, s->bitstream_buffer+table_size,
|
if ((ret = init_get_bits(&s->gb, s->bitstream_buffer + table_size,
|
||||||
(buf_size-table_size) * 8);
|
(buf_size - table_size) * 8)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
|
fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
|
||||||
fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
|
fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
|
||||||
|
@@ -832,9 +832,9 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
bytestream2_init(&gb, buf, buf_size);
|
bytestream2_init(&gb, buf, buf_size);
|
||||||
if (avctx->codec_tag == MKTAG('R','G','B','8'))
|
if (avctx->codec_tag == MKTAG('R','G','B','8') && avctx->pix_fmt == AV_PIX_FMT_RGB32)
|
||||||
decode_rgb8(&gb, s->frame.data[0], avctx->width, avctx->height, s->frame.linesize[0]);
|
decode_rgb8(&gb, s->frame.data[0], avctx->width, avctx->height, s->frame.linesize[0]);
|
||||||
else if (avctx->codec_tag == MKTAG('R','G','B','N'))
|
else if (avctx->codec_tag == MKTAG('R','G','B','N') && avctx->pix_fmt == AV_PIX_FMT_RGB444)
|
||||||
decode_rgbn(&gb, s->frame.data[0], avctx->width, avctx->height, s->frame.linesize[0]);
|
decode_rgbn(&gb, s->frame.data[0], avctx->width, avctx->height, s->frame.linesize[0]);
|
||||||
else
|
else
|
||||||
return unsupported(avctx);
|
return unsupported(avctx);
|
||||||
|
@@ -94,7 +94,7 @@ typedef struct Indeo3DecodeContext {
|
|||||||
|
|
||||||
int16_t width, height;
|
int16_t width, height;
|
||||||
uint32_t frame_num; ///< current frame number (zero-based)
|
uint32_t frame_num; ///< current frame number (zero-based)
|
||||||
uint32_t data_size; ///< size of the frame data in bytes
|
int data_size; ///< size of the frame data in bytes
|
||||||
uint16_t frame_flags; ///< frame properties
|
uint16_t frame_flags; ///< frame properties
|
||||||
uint8_t cb_offset; ///< needed for selecting VQ tables
|
uint8_t cb_offset; ///< needed for selecting VQ tables
|
||||||
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
|
||||||
@@ -906,7 +906,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
|||||||
GetByteContext gb;
|
GetByteContext gb;
|
||||||
const uint8_t *bs_hdr;
|
const uint8_t *bs_hdr;
|
||||||
uint32_t frame_num, word2, check_sum, data_size;
|
uint32_t frame_num, word2, check_sum, data_size;
|
||||||
uint32_t y_offset, u_offset, v_offset, starts[3], ends[3];
|
int y_offset, u_offset, v_offset;
|
||||||
|
uint32_t starts[3], ends[3];
|
||||||
uint16_t height, width;
|
uint16_t height, width;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
@@ -987,7 +988,8 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
|
|||||||
ctx->y_data_size = ends[0] - starts[0];
|
ctx->y_data_size = ends[0] - starts[0];
|
||||||
ctx->v_data_size = ends[1] - starts[1];
|
ctx->v_data_size = ends[1] - starts[1];
|
||||||
ctx->u_data_size = ends[2] - starts[2];
|
ctx->u_data_size = ends[2] - starts[2];
|
||||||
if (FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
if (FFMIN3(y_offset, v_offset, u_offset) < 0 ||
|
||||||
|
FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
|
||||||
FFMIN3(y_offset, v_offset, u_offset) < gb.buffer - bs_hdr + 16 ||
|
FFMIN3(y_offset, v_offset, u_offset) < gb.buffer - bs_hdr + 16 ||
|
||||||
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
|
||||||
|
@@ -207,6 +207,11 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, void *
|
|||||||
x += stride;
|
x += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (x >= w) {
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "run overflow\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* decode run termination value */
|
/* decode run termination value */
|
||||||
Rb = R(last, x);
|
Rb = R(last, x);
|
||||||
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
|
||||||
|
@@ -40,6 +40,14 @@ typedef struct JvContext {
|
|||||||
static av_cold int decode_init(AVCodecContext *avctx)
|
static av_cold int decode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
JvContext *s = avctx->priv_data;
|
JvContext *s = avctx->priv_data;
|
||||||
|
|
||||||
|
if (!avctx->width || !avctx->height ||
|
||||||
|
(avctx->width & 7) || (avctx->height & 7)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n",
|
||||||
|
avctx->width, avctx->height);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||||
ff_dsputil_init(&s->dsp, avctx);
|
ff_dsputil_init(&s->dsp, avctx);
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -199,7 +199,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
case AV_CODEC_ID_MSZH:
|
case AV_CODEC_ID_MSZH:
|
||||||
switch (c->compression) {
|
switch (c->compression) {
|
||||||
case COMP_MSZH:
|
case COMP_MSZH:
|
||||||
if (c->imgtype == IMGTYPE_RGB24 && len == width * height * 3) {
|
if (c->imgtype == IMGTYPE_RGB24 && len == width * height * 3 ||
|
||||||
|
c->imgtype == IMGTYPE_YUV111 && len == width * height * 3) {
|
||||||
;
|
;
|
||||||
} else if (c->flags & FLAG_MULTITHREAD) {
|
} else if (c->flags & FLAG_MULTITHREAD) {
|
||||||
mthread_inlen = AV_RL32(encoded);
|
mthread_inlen = AV_RL32(encoded);
|
||||||
|
@@ -191,6 +191,7 @@ static int mp3lame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
MPADecodeHeader hdr;
|
MPADecodeHeader hdr;
|
||||||
int len, ret, ch;
|
int len, ret, ch;
|
||||||
int lame_result;
|
int lame_result;
|
||||||
|
uint32_t h;
|
||||||
|
|
||||||
if (frame) {
|
if (frame) {
|
||||||
switch (avctx->sample_fmt) {
|
switch (avctx->sample_fmt) {
|
||||||
@@ -246,7 +247,12 @@ static int mp3lame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
determine the frame size. */
|
determine the frame size. */
|
||||||
if (s->buffer_index < 4)
|
if (s->buffer_index < 4)
|
||||||
return 0;
|
return 0;
|
||||||
if (avpriv_mpegaudio_decode_header(&hdr, AV_RB32(s->buffer))) {
|
h = AV_RB32(s->buffer);
|
||||||
|
if (ff_mpa_check_header(h) < 0) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid mp3 header at start of buffer\n");
|
||||||
|
return AVERROR_BUG;
|
||||||
|
}
|
||||||
|
if (avpriv_mpegaudio_decode_header(&hdr, h)) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "free format output not supported\n");
|
av_log(avctx, AV_LOG_ERROR, "free format output not supported\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@@ -362,7 +362,8 @@ static int oggvorbis_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
avctx->delay = duration;
|
avctx->delay = duration;
|
||||||
av_assert0(!s->afq.remaining_delay);
|
av_assert0(!s->afq.remaining_delay);
|
||||||
s->afq.frames->duration += duration;
|
s->afq.frames->duration += duration;
|
||||||
s->afq.frames->pts -= duration;
|
if (s->afq.frames->pts != AV_NOPTS_VALUE)
|
||||||
|
s->afq.frames->pts -= duration;
|
||||||
s->afq.remaining_samples += duration;
|
s->afq.remaining_samples += duration;
|
||||||
}
|
}
|
||||||
ff_af_queue_remove(&s->afq, duration, &avpkt->pts, &avpkt->duration);
|
ff_af_queue_remove(&s->afq, duration, &avpkt->pts, &avpkt->duration);
|
||||||
|
@@ -89,7 +89,7 @@ static void ff_acelp_interpolatef_mips(float *out, const float *in,
|
|||||||
"addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \n\t"
|
"addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \n\t"
|
||||||
"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
|
"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
|
||||||
|
|
||||||
: [v] "=&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
|
: [v] "+&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
|
||||||
[p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
|
[p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
|
||||||
[in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
|
[in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
|
||||||
[fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
|
[fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
|
||||||
|
@@ -211,7 +211,7 @@ int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||||
{
|
{
|
||||||
int len, nb_components, i, width, height, pix_fmt_id;
|
int len, nb_components, i, width, height, bits, pix_fmt_id;
|
||||||
int h_count[MAX_COMPONENTS];
|
int h_count[MAX_COMPONENTS];
|
||||||
int v_count[MAX_COMPONENTS];
|
int v_count[MAX_COMPONENTS];
|
||||||
|
|
||||||
@@ -220,14 +220,14 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
/* XXX: verify len field validity */
|
/* XXX: verify len field validity */
|
||||||
len = get_bits(&s->gb, 16);
|
len = get_bits(&s->gb, 16);
|
||||||
s->bits = get_bits(&s->gb, 8);
|
bits = get_bits(&s->gb, 8);
|
||||||
|
|
||||||
if (s->pegasus_rct)
|
if (s->pegasus_rct)
|
||||||
s->bits = 9;
|
bits = 9;
|
||||||
if (s->bits == 9 && !s->pegasus_rct)
|
if (bits == 9 && !s->pegasus_rct)
|
||||||
s->rct = 1; // FIXME ugly
|
s->rct = 1; // FIXME ugly
|
||||||
|
|
||||||
if (s->bits != 8 && !s->lossless) {
|
if (bits != 8 && !s->lossless) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "only 8 bits/component accepted\n");
|
av_log(s->avctx, AV_LOG_ERROR, "only 8 bits/component accepted\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@@ -259,7 +259,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s->ls && !(s->bits <= 8 || nb_components == 1)) {
|
if (s->ls && !(bits <= 8 || nb_components == 1)) {
|
||||||
av_log_missing_feature(s->avctx,
|
av_log_missing_feature(s->avctx,
|
||||||
"For JPEG-LS anything except <= 8 bits/component"
|
"For JPEG-LS anything except <= 8 bits/component"
|
||||||
" or 16-bit gray", 0);
|
" or 16-bit gray", 0);
|
||||||
@@ -307,12 +307,14 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
/* if different size, realloc/alloc picture */
|
/* if different size, realloc/alloc picture */
|
||||||
if ( width != s->width || height != s->height
|
if ( width != s->width || height != s->height
|
||||||
|
|| bits != s->bits
|
||||||
|| memcmp(s->h_count, h_count, sizeof(h_count[0])*nb_components)
|
|| memcmp(s->h_count, h_count, sizeof(h_count[0])*nb_components)
|
||||||
|| memcmp(s->v_count, v_count, sizeof(v_count[0])*nb_components)) {
|
|| memcmp(s->v_count, v_count, sizeof(v_count[0])*nb_components)) {
|
||||||
av_freep(&s->qscale_table);
|
av_freep(&s->qscale_table);
|
||||||
|
|
||||||
s->width = width;
|
s->width = width;
|
||||||
s->height = height;
|
s->height = height;
|
||||||
|
s->bits = bits;
|
||||||
memcpy(s->h_count, h_count, sizeof(h_count));
|
memcpy(s->h_count, h_count, sizeof(h_count));
|
||||||
memcpy(s->v_count, v_count, sizeof(v_count));
|
memcpy(s->v_count, v_count, sizeof(v_count));
|
||||||
s->interlaced = 0;
|
s->interlaced = 0;
|
||||||
@@ -437,9 +439,12 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
}
|
}
|
||||||
if (s->ls) {
|
if (s->ls) {
|
||||||
s->upscale_h = s->upscale_v = 0;
|
s->upscale_h = s->upscale_v = 0;
|
||||||
if (s->nb_components > 1)
|
if (s->nb_components == 3) {
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
|
||||||
else if (s->bits <= 8)
|
} else if (s->nb_components != 1) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
} else if (s->bits <= 8)
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
||||||
else
|
else
|
||||||
s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
|
||||||
@@ -1074,12 +1079,17 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
|||||||
|
|
||||||
if (s->interlaced && s->bottom_field)
|
if (s->interlaced && s->bottom_field)
|
||||||
block_offset += linesize[c] >> 1;
|
block_offset += linesize[c] >> 1;
|
||||||
ptr = data[c] + block_offset;
|
if ( 8*(h * mb_x + x) < s->width
|
||||||
|
&& 8*(v * mb_y + y) < s->height) {
|
||||||
|
ptr = data[c] + block_offset;
|
||||||
|
} else
|
||||||
|
ptr = NULL;
|
||||||
if (!s->progressive) {
|
if (!s->progressive) {
|
||||||
if (copy_mb)
|
if (copy_mb) {
|
||||||
mjpeg_copy_block(ptr, reference_data[c] + block_offset,
|
if (ptr)
|
||||||
linesize[c], s->avctx->lowres);
|
mjpeg_copy_block(ptr, reference_data[c] + block_offset,
|
||||||
else {
|
linesize[c], s->avctx->lowres);
|
||||||
|
} else {
|
||||||
s->dsp.clear_block(s->block);
|
s->dsp.clear_block(s->block);
|
||||||
if (decode_block(s, s->block, i,
|
if (decode_block(s, s->block, i,
|
||||||
s->dc_index[i], s->ac_index[i],
|
s->dc_index[i], s->ac_index[i],
|
||||||
@@ -1088,7 +1098,9 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
|||||||
"error y=%d x=%d\n", mb_y, mb_x);
|
"error y=%d x=%d\n", mb_y, mb_x);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
s->dsp.idct_put(ptr, linesize[c], s->block);
|
if (ptr) {
|
||||||
|
s->dsp.idct_put(ptr, linesize[c], s->block);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int block_idx = s->block_stride[c] * (v * mb_y + y) +
|
int block_idx = s->block_stride[c] * (v * mb_y + y) +
|
||||||
@@ -1140,7 +1152,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!Al) {
|
if (!Al) {
|
||||||
s->coefs_finished[c] |= (1LL << (se + 1)) - (1LL << ss);
|
s->coefs_finished[c] |= (2LL << se) - (1LL << ss);
|
||||||
last_scan = !~s->coefs_finished[c];
|
last_scan = !~s->coefs_finished[c];
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1434,6 +1446,8 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (id == AV_RL32("LJIF")) {
|
if (id == AV_RL32("LJIF")) {
|
||||||
|
int rgb = s->rgb;
|
||||||
|
int pegasus_rct = s->pegasus_rct;
|
||||||
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
||||||
av_log(s->avctx, AV_LOG_INFO,
|
av_log(s->avctx, AV_LOG_INFO,
|
||||||
"Pegasus lossless jpeg header found\n");
|
"Pegasus lossless jpeg header found\n");
|
||||||
@@ -1443,17 +1457,27 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
|||||||
skip_bits(&s->gb, 16); /* unknown always 0? */
|
skip_bits(&s->gb, 16); /* unknown always 0? */
|
||||||
switch (get_bits(&s->gb, 8)) {
|
switch (get_bits(&s->gb, 8)) {
|
||||||
case 1:
|
case 1:
|
||||||
s->rgb = 1;
|
rgb = 1;
|
||||||
s->pegasus_rct = 0;
|
pegasus_rct = 0;
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
s->rgb = 1;
|
rgb = 1;
|
||||||
s->pegasus_rct = 1;
|
pegasus_rct = 1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace\n");
|
av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
len -= 9;
|
len -= 9;
|
||||||
|
if (s->got_picture)
|
||||||
|
if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
|
||||||
|
av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
s->rgb = rgb;
|
||||||
|
s->pegasus_rct = pegasus_rct;
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1617,6 +1641,10 @@ int ff_mjpeg_find_marker(MJpegDecodeContext *s,
|
|||||||
put_bits(&pb, 8, x);
|
put_bits(&pb, 8, x);
|
||||||
if (x == 0xFF) {
|
if (x == 0xFF) {
|
||||||
x = src[b++];
|
x = src[b++];
|
||||||
|
if (x & 0x80) {
|
||||||
|
av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
|
||||||
|
x &= 0x7f;
|
||||||
|
}
|
||||||
put_bits(&pb, 7, x);
|
put_bits(&pb, 7, x);
|
||||||
bit_count--;
|
bit_count--;
|
||||||
}
|
}
|
||||||
|
@@ -60,6 +60,13 @@ static av_cold int mm_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||||
|
|
||||||
|
if (!avctx->width || !avctx->height ||
|
||||||
|
(avctx->width & 1) || (avctx->height & 1)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n",
|
||||||
|
avctx->width, avctx->height);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
avcodec_get_frame_defaults(&s->frame);
|
||||||
s->frame.reference = 3;
|
s->frame.reference = 3;
|
||||||
|
|
||||||
@@ -109,7 +116,7 @@ static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
|
|||||||
|
|
||||||
if (color) {
|
if (color) {
|
||||||
memset(s->frame.data[0] + y*s->frame.linesize[0] + x, color, run_length);
|
memset(s->frame.data[0] + y*s->frame.linesize[0] + x, color, run_length);
|
||||||
if (half_vert)
|
if (half_vert && y + half_vert < s->avctx->height)
|
||||||
memset(s->frame.data[0] + (y+1)*s->frame.linesize[0] + x, color, run_length);
|
memset(s->frame.data[0] + (y+1)*s->frame.linesize[0] + x, color, run_length);
|
||||||
}
|
}
|
||||||
x+= run_length;
|
x+= run_length;
|
||||||
|
@@ -81,6 +81,15 @@ static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
|
|||||||
return sign_extend(val, 5 + shift);
|
return sign_extend(val, 5 + shift);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define check_scantable_index(ctx, x) \
|
||||||
|
do { \
|
||||||
|
if ((x) > 63) { \
|
||||||
|
av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
|
||||||
|
ctx->mb_x, ctx->mb_y); \
|
||||||
|
return AVERROR_INVALIDDATA; \
|
||||||
|
} \
|
||||||
|
} while (0) \
|
||||||
|
|
||||||
static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n)
|
static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n)
|
||||||
{
|
{
|
||||||
int level, dc, diff, i, j, run;
|
int level, dc, diff, i, j, run;
|
||||||
@@ -112,6 +121,7 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
|||||||
break;
|
break;
|
||||||
} else if (level != 0) {
|
} else if (level != 0) {
|
||||||
i += run;
|
i += run;
|
||||||
|
check_scantable_index(s, i);
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
level = (level * qscale * quant_matrix[j]) >> 4;
|
level = (level * qscale * quant_matrix[j]) >> 4;
|
||||||
level = (level - 1) | 1;
|
level = (level - 1) | 1;
|
||||||
@@ -128,6 +138,7 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
|||||||
level = SHOW_UBITS(re, &s->gb, 8) ; LAST_SKIP_BITS(re, &s->gb, 8);
|
level = SHOW_UBITS(re, &s->gb, 8) ; LAST_SKIP_BITS(re, &s->gb, 8);
|
||||||
}
|
}
|
||||||
i += run;
|
i += run;
|
||||||
|
check_scantable_index(s, i);
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
if (level < 0) {
|
if (level < 0) {
|
||||||
level = -level;
|
level = -level;
|
||||||
@@ -139,10 +150,6 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
|||||||
level = (level - 1) | 1;
|
level = (level - 1) | 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (i > 63) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
block[j] = level;
|
block[j] = level;
|
||||||
}
|
}
|
||||||
@@ -267,6 +274,7 @@ static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *bloc
|
|||||||
|
|
||||||
if (level != 0) {
|
if (level != 0) {
|
||||||
i += run;
|
i += run;
|
||||||
|
check_scantable_index(s, i);
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
level = ((level * 2 + 1) * qscale) >> 1;
|
level = ((level * 2 + 1) * qscale) >> 1;
|
||||||
level = (level - 1) | 1;
|
level = (level - 1) | 1;
|
||||||
@@ -283,6 +291,7 @@ static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *bloc
|
|||||||
level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8);
|
level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8);
|
||||||
}
|
}
|
||||||
i += run;
|
i += run;
|
||||||
|
check_scantable_index(s, i);
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
if (level < 0) {
|
if (level < 0) {
|
||||||
level = -level;
|
level = -level;
|
||||||
@@ -348,6 +357,7 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, DCTELEM *block
|
|||||||
|
|
||||||
if (level != 0) {
|
if (level != 0) {
|
||||||
i += run;
|
i += run;
|
||||||
|
check_scantable_index(s, i);
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
|
level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
|
||||||
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
||||||
@@ -359,6 +369,7 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, DCTELEM *block
|
|||||||
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
||||||
|
|
||||||
i += run;
|
i += run;
|
||||||
|
check_scantable_index(s, i);
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
if (level < 0) {
|
if (level < 0) {
|
||||||
level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
|
level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
|
||||||
@@ -367,10 +378,6 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, DCTELEM *block
|
|||||||
level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
|
level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (i > 63) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
mismatch ^= level;
|
mismatch ^= level;
|
||||||
block[j] = level;
|
block[j] = level;
|
||||||
@@ -422,6 +429,7 @@ static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
|
|||||||
|
|
||||||
if (level != 0) {
|
if (level != 0) {
|
||||||
i += run;
|
i += run;
|
||||||
|
check_scantable_index(s, i);
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
level = ((level * 2 + 1) * qscale) >> 1;
|
level = ((level * 2 + 1) * qscale) >> 1;
|
||||||
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
||||||
@@ -433,6 +441,7 @@ static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
|
|||||||
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
||||||
|
|
||||||
i += run;
|
i += run;
|
||||||
|
check_scantable_index(s, i);
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
if (level < 0) {
|
if (level < 0) {
|
||||||
level = ((-level * 2 + 1) * qscale) >> 1;
|
level = ((-level * 2 + 1) * qscale) >> 1;
|
||||||
@@ -499,6 +508,7 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
|||||||
break;
|
break;
|
||||||
} else if (level != 0) {
|
} else if (level != 0) {
|
||||||
i += run;
|
i += run;
|
||||||
|
check_scantable_index(s, i);
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
level = (level * qscale * quant_matrix[j]) >> 4;
|
level = (level * qscale * quant_matrix[j]) >> 4;
|
||||||
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
||||||
@@ -509,6 +519,7 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
|||||||
UPDATE_CACHE(re, &s->gb);
|
UPDATE_CACHE(re, &s->gb);
|
||||||
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
||||||
i += run;
|
i += run;
|
||||||
|
check_scantable_index(s, i);
|
||||||
j = scantable[i];
|
j = scantable[i];
|
||||||
if (level < 0) {
|
if (level < 0) {
|
||||||
level = (-level * qscale * quant_matrix[j]) >> 4;
|
level = (-level * qscale * quant_matrix[j]) >> 4;
|
||||||
@@ -517,10 +528,6 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
|||||||
level = (level * qscale * quant_matrix[j]) >> 4;
|
level = (level * qscale * quant_matrix[j]) >> 4;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (i > 63) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
mismatch ^= level;
|
mismatch ^= level;
|
||||||
block[j] = level;
|
block[j] = level;
|
||||||
@@ -540,10 +547,10 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
|||||||
*/
|
*/
|
||||||
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n)
|
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n)
|
||||||
{
|
{
|
||||||
int level, dc, diff, j, run;
|
int level, dc, diff, i, j, run;
|
||||||
int component;
|
int component;
|
||||||
RLTable *rl;
|
RLTable *rl;
|
||||||
uint8_t * scantable = s->intra_scantable.permutated;
|
uint8_t * const scantable = s->intra_scantable.permutated;
|
||||||
const uint16_t *quant_matrix;
|
const uint16_t *quant_matrix;
|
||||||
const int qscale = s->qscale;
|
const int qscale = s->qscale;
|
||||||
|
|
||||||
@@ -562,6 +569,7 @@ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *bloc
|
|||||||
dc += diff;
|
dc += diff;
|
||||||
s->last_dc[component] = dc;
|
s->last_dc[component] = dc;
|
||||||
block[0] = dc << (3 - s->intra_dc_precision);
|
block[0] = dc << (3 - s->intra_dc_precision);
|
||||||
|
i = 0;
|
||||||
if (s->intra_vlc_format)
|
if (s->intra_vlc_format)
|
||||||
rl = &ff_rl_mpeg2;
|
rl = &ff_rl_mpeg2;
|
||||||
else
|
else
|
||||||
@@ -577,8 +585,9 @@ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *bloc
|
|||||||
if (level == 127) {
|
if (level == 127) {
|
||||||
break;
|
break;
|
||||||
} else if (level != 0) {
|
} else if (level != 0) {
|
||||||
scantable += run;
|
i += run;
|
||||||
j = *scantable;
|
check_scantable_index(s, i);
|
||||||
|
j = scantable[i];
|
||||||
level = (level * qscale * quant_matrix[j]) >> 4;
|
level = (level * qscale * quant_matrix[j]) >> 4;
|
||||||
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
||||||
LAST_SKIP_BITS(re, &s->gb, 1);
|
LAST_SKIP_BITS(re, &s->gb, 1);
|
||||||
@@ -587,8 +596,9 @@ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *bloc
|
|||||||
run = SHOW_UBITS(re, &s->gb, 6) + 1; LAST_SKIP_BITS(re, &s->gb, 6);
|
run = SHOW_UBITS(re, &s->gb, 6) + 1; LAST_SKIP_BITS(re, &s->gb, 6);
|
||||||
UPDATE_CACHE(re, &s->gb);
|
UPDATE_CACHE(re, &s->gb);
|
||||||
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
||||||
scantable += run;
|
i += run;
|
||||||
j = *scantable;
|
check_scantable_index(s, i);
|
||||||
|
j = scantable[i];
|
||||||
if (level < 0) {
|
if (level < 0) {
|
||||||
level = (-level * qscale * quant_matrix[j]) >> 4;
|
level = (-level * qscale * quant_matrix[j]) >> 4;
|
||||||
level = -level;
|
level = -level;
|
||||||
@@ -602,7 +612,7 @@ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *bloc
|
|||||||
CLOSE_READER(re, &s->gb);
|
CLOSE_READER(re, &s->gb);
|
||||||
}
|
}
|
||||||
|
|
||||||
s->block_last_index[n] = scantable - s->intra_scantable.permutated;
|
s->block_last_index[n] = i;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -25,6 +25,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
//#define DEBUG
|
//#define DEBUG
|
||||||
|
#include "libavutil/common.h"
|
||||||
|
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "mpegaudio.h"
|
#include "mpegaudio.h"
|
||||||
#include "mpegaudiodata.h"
|
#include "mpegaudiodata.h"
|
||||||
@@ -46,6 +48,8 @@ int avpriv_mpegaudio_decode_header(MPADecodeHeader *s, uint32_t header)
|
|||||||
s->layer = 4 - ((header >> 17) & 3);
|
s->layer = 4 - ((header >> 17) & 3);
|
||||||
/* extract frequency */
|
/* extract frequency */
|
||||||
sample_rate_index = (header >> 10) & 3;
|
sample_rate_index = (header >> 10) & 3;
|
||||||
|
if (sample_rate_index >= FF_ARRAY_ELEMS(avpriv_mpa_freq_tab))
|
||||||
|
sample_rate_index = 0;
|
||||||
sample_rate = avpriv_mpa_freq_tab[sample_rate_index] >> (s->lsf + mpeg25);
|
sample_rate = avpriv_mpa_freq_tab[sample_rate_index] >> (s->lsf + mpeg25);
|
||||||
sample_rate_index += 3 * (s->lsf + mpeg25);
|
sample_rate_index += 3 * (s->lsf + mpeg25);
|
||||||
s->sample_rate_index = sample_rate_index;
|
s->sample_rate_index = sample_rate_index;
|
||||||
|
@@ -1034,6 +1034,9 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
|||||||
{
|
{
|
||||||
int i, err = 0;
|
int i, err = 0;
|
||||||
|
|
||||||
|
if (!s->context_initialized)
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
if (s->slice_context_count > 1) {
|
if (s->slice_context_count > 1) {
|
||||||
for (i = 0; i < s->slice_context_count; i++) {
|
for (i = 0; i < s->slice_context_count; i++) {
|
||||||
free_duplicate_context(s->thread_context[i]);
|
free_duplicate_context(s->thread_context[i]);
|
||||||
@@ -1062,8 +1065,8 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
|||||||
s->mb_height = (s->height + 15) / 16;
|
s->mb_height = (s->height + 15) / 16;
|
||||||
|
|
||||||
if ((s->width || s->height) &&
|
if ((s->width || s->height) &&
|
||||||
av_image_check_size(s->width, s->height, 0, s->avctx))
|
(err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
|
||||||
return AVERROR_INVALIDDATA;
|
goto fail;
|
||||||
|
|
||||||
if ((err = init_context_frame(s)))
|
if ((err = init_context_frame(s)))
|
||||||
goto fail;
|
goto fail;
|
||||||
@@ -1079,7 +1082,7 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nb_slices; i++) {
|
for (i = 0; i < nb_slices; i++) {
|
||||||
if (init_duplicate_context(s->thread_context[i], s) < 0)
|
if ((err = init_duplicate_context(s->thread_context[i], s)) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
s->thread_context[i]->start_mb_y =
|
s->thread_context[i]->start_mb_y =
|
||||||
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
|
||||||
@@ -1468,7 +1471,11 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
s->last_picture_ptr = &s->picture[i];
|
s->last_picture_ptr = &s->picture[i];
|
||||||
|
|
||||||
s->last_picture_ptr->f.key_frame = 0;
|
s->last_picture_ptr->f.key_frame = 0;
|
||||||
|
s->last_picture_ptr->f.reference = 3;
|
||||||
|
s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_P;
|
||||||
|
|
||||||
if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
|
if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
|
||||||
s->last_picture_ptr = NULL;
|
s->last_picture_ptr = NULL;
|
||||||
return -1;
|
return -1;
|
||||||
@@ -1494,6 +1501,9 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
s->next_picture_ptr = &s->picture[i];
|
s->next_picture_ptr = &s->picture[i];
|
||||||
s->next_picture_ptr->f.key_frame = 0;
|
s->next_picture_ptr->f.key_frame = 0;
|
||||||
|
s->next_picture_ptr->f.reference = 3;
|
||||||
|
s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_P;
|
||||||
|
|
||||||
if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
|
if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
|
||||||
s->next_picture_ptr = NULL;
|
s->next_picture_ptr = NULL;
|
||||||
return -1;
|
return -1;
|
||||||
|
@@ -910,7 +910,7 @@ int ff_h261_get_picture_format(int width, int height);
|
|||||||
|
|
||||||
|
|
||||||
/* rv10.c */
|
/* rv10.c */
|
||||||
void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number);
|
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||||
int ff_rv_decode_dc(MpegEncContext *s, int n);
|
int ff_rv_decode_dc(MpegEncContext *s, int n);
|
||||||
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number);
|
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number);
|
||||||
|
|
||||||
|
@@ -411,18 +411,18 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
|
|||||||
switch(avctx->codec_id) {
|
switch(avctx->codec_id) {
|
||||||
case AV_CODEC_ID_MPEG1VIDEO:
|
case AV_CODEC_ID_MPEG1VIDEO:
|
||||||
case AV_CODEC_ID_MPEG2VIDEO:
|
case AV_CODEC_ID_MPEG2VIDEO:
|
||||||
avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
|
avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
|
||||||
break;
|
break;
|
||||||
case AV_CODEC_ID_MPEG4:
|
case AV_CODEC_ID_MPEG4:
|
||||||
case AV_CODEC_ID_MSMPEG4V1:
|
case AV_CODEC_ID_MSMPEG4V1:
|
||||||
case AV_CODEC_ID_MSMPEG4V2:
|
case AV_CODEC_ID_MSMPEG4V2:
|
||||||
case AV_CODEC_ID_MSMPEG4V3:
|
case AV_CODEC_ID_MSMPEG4V3:
|
||||||
if (avctx->rc_max_rate >= 15000000) {
|
if (avctx->rc_max_rate >= 15000000) {
|
||||||
avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
|
avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
|
||||||
} else if(avctx->rc_max_rate >= 2000000) {
|
} else if(avctx->rc_max_rate >= 2000000) {
|
||||||
avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
|
avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
|
||||||
} else if(avctx->rc_max_rate >= 384000) {
|
} else if(avctx->rc_max_rate >= 384000) {
|
||||||
avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
|
avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
|
||||||
} else
|
} else
|
||||||
avctx->rc_buffer_size = 40;
|
avctx->rc_buffer_size = 40;
|
||||||
avctx->rc_buffer_size *= 16384;
|
avctx->rc_buffer_size *= 16384;
|
||||||
@@ -3447,8 +3447,11 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
|||||||
ff_msmpeg4_encode_picture_header(s, picture_number);
|
ff_msmpeg4_encode_picture_header(s, picture_number);
|
||||||
else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
|
else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
|
||||||
ff_mpeg4_encode_picture_header(s, picture_number);
|
ff_mpeg4_encode_picture_header(s, picture_number);
|
||||||
else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
|
else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
|
||||||
ff_rv10_encode_picture_header(s, picture_number);
|
ret = ff_rv10_encode_picture_header(s, picture_number);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
|
else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
|
||||||
ff_rv20_encode_picture_header(s, picture_number);
|
ff_rv20_encode_picture_header(s, picture_number);
|
||||||
else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
|
else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
|
||||||
|
@@ -315,7 +315,7 @@ static void encode_block(NellyMoserEncodeContext *s, unsigned char *output, int
|
|||||||
|
|
||||||
apply_mdct(s);
|
apply_mdct(s);
|
||||||
|
|
||||||
init_put_bits(&pb, output, output_size * 8);
|
init_put_bits(&pb, output, output_size);
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
for (band = 0; band < NELLY_BANDS; band++) {
|
for (band = 0; band < NELLY_BANDS; band++) {
|
||||||
|
@@ -98,11 +98,10 @@ static const AVOption options[]={
|
|||||||
{"hex", "hex motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_HEX }, INT_MIN, INT_MAX, V|E, "me_method" },
|
{"hex", "hex motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_HEX }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||||
{"umh", "umh motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_UMH }, INT_MIN, INT_MAX, V|E, "me_method" },
|
{"umh", "umh motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_UMH }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||||
{"iter", "iter motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_ITER }, INT_MIN, INT_MAX, V|E, "me_method" },
|
{"iter", "iter motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_ITER }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||||
{"extradata_size", NULL, OFFSET(extradata_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
|
||||||
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
||||||
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
||||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|D|E},
|
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||||
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|D|E},
|
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||||
{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
||||||
{"frame_size", NULL, OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
{"frame_size", NULL, OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
|
||||||
{"frame_number", NULL, OFFSET(frame_number), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
{"frame_number", NULL, OFFSET(frame_number), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||||
|
@@ -212,6 +212,13 @@ static int parse_picture_segment(AVCodecContext *avctx,
|
|||||||
/* Decode rle bitmap length, stored size includes width/height data */
|
/* Decode rle bitmap length, stored size includes width/height data */
|
||||||
rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;
|
rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;
|
||||||
|
|
||||||
|
if (buf_size > rle_bitmap_len) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"Buffer dimension %d larger than the expected RLE data %d\n",
|
||||||
|
buf_size, rle_bitmap_len);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
/* Get bitmap dimensions from data */
|
/* Get bitmap dimensions from data */
|
||||||
width = bytestream_get_be16(&buf);
|
width = bytestream_get_be16(&buf);
|
||||||
height = bytestream_get_be16(&buf);
|
height = bytestream_get_be16(&buf);
|
||||||
@@ -222,11 +229,6 @@ static int parse_picture_segment(AVCodecContext *avctx,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buf_size > rle_bitmap_len) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "too much RLE data\n");
|
|
||||||
return AVERROR_INVALIDDATA;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->pictures[picture_id].w = width;
|
ctx->pictures[picture_id].w = width;
|
||||||
ctx->pictures[picture_id].h = height;
|
ctx->pictures[picture_id].h = height;
|
||||||
|
|
||||||
|
@@ -567,6 +567,12 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
case MKTAG('I', 'H', 'D', 'R'):
|
case MKTAG('I', 'H', 'D', 'R'):
|
||||||
if (length != 13)
|
if (length != 13)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
if (s->state & PNG_IDAT) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
s->width = bytestream2_get_be32(&s->gb);
|
s->width = bytestream2_get_be32(&s->gb);
|
||||||
s->height = bytestream2_get_be32(&s->gb);
|
s->height = bytestream2_get_be32(&s->gb);
|
||||||
if(av_image_check_size(s->width, s->height, 0, avctx)){
|
if(av_image_check_size(s->width, s->height, 0, avctx)){
|
||||||
@@ -634,7 +640,7 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
} else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
|
} else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
|
||||||
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||||
} else if (s->bit_depth == 1) {
|
} else if (s->bit_depth == 1 && s->bits_per_pixel == 1) {
|
||||||
avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
|
avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
|
||||||
} else if (s->bit_depth == 8 &&
|
} else if (s->bit_depth == 8 &&
|
||||||
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
||||||
@@ -841,9 +847,10 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
int i, j;
|
int i, j;
|
||||||
uint8_t *pd = s->current_picture->data[0];
|
uint8_t *pd = s->current_picture->data[0];
|
||||||
uint8_t *pd_last = s->last_picture->data[0];
|
uint8_t *pd_last = s->last_picture->data[0];
|
||||||
|
int ls = FFMIN(av_image_get_linesize(s->current_picture->format, s->width, 0), s->width * s->bpp);
|
||||||
|
|
||||||
for(j=0; j < s->height; j++) {
|
for(j=0; j < s->height; j++) {
|
||||||
for(i=0; i < s->width * s->bpp; i++) {
|
for(i=0; i < ls; i++) {
|
||||||
pd[i] += pd_last[i];
|
pd[i] += pd_last[i];
|
||||||
}
|
}
|
||||||
pd += s->image_linesize;
|
pd += s->image_linesize;
|
||||||
|
@@ -1005,7 +1005,7 @@ void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, const int chrom
|
|||||||
if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
|
if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
|
||||||
if (bit_depth == 8) {
|
if (bit_depth == 8) {
|
||||||
c->h264_idct_add = ff_h264_idct_add_altivec;
|
c->h264_idct_add = ff_h264_idct_add_altivec;
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
c->h264_idct_add8 = ff_h264_idct_add8_altivec;
|
c->h264_idct_add8 = ff_h264_idct_add8_altivec;
|
||||||
c->h264_idct_add16 = ff_h264_idct_add16_altivec;
|
c->h264_idct_add16 = ff_h264_idct_add16_altivec;
|
||||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
|
c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
|
||||||
|
@@ -303,7 +303,7 @@ static int encode_slice_plane(AVCodecContext *avctx, int mb_count,
|
|||||||
}
|
}
|
||||||
|
|
||||||
blocks_per_slice = mb_count << (2 - chroma);
|
blocks_per_slice = mb_count << (2 - chroma);
|
||||||
init_put_bits(&pb, buf, buf_size << 3);
|
init_put_bits(&pb, buf, buf_size);
|
||||||
|
|
||||||
encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
|
encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
|
||||||
encode_ac_coeffs(avctx, &pb, blocks, blocks_per_slice, qmat);
|
encode_ac_coeffs(avctx, &pb, blocks, blocks_per_slice, qmat);
|
||||||
|
@@ -455,6 +455,11 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
|
|||||||
num_cblocks, plane_factor,
|
num_cblocks, plane_factor,
|
||||||
qmat);
|
qmat);
|
||||||
total_size += sizes[i];
|
total_size += sizes[i];
|
||||||
|
if (put_bits_left(pb) < 0) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Serious underevaluation of"
|
||||||
|
"required buffer size");
|
||||||
|
return AVERROR_BUFFER_TOO_SMALL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return total_size;
|
return total_size;
|
||||||
}
|
}
|
||||||
@@ -753,9 +758,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
avctx->coded_frame->key_frame = 1;
|
avctx->coded_frame->key_frame = 1;
|
||||||
|
|
||||||
pkt_size = ctx->frame_size_upper_bound + FF_MIN_BUFFER_SIZE;
|
pkt_size = ctx->frame_size_upper_bound;
|
||||||
|
|
||||||
if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size + FF_MIN_BUFFER_SIZE)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
orig_buf = pkt->data;
|
orig_buf = pkt->data;
|
||||||
@@ -831,8 +836,10 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
bytestream_put_byte(&buf, slice_hdr_size << 3);
|
bytestream_put_byte(&buf, slice_hdr_size << 3);
|
||||||
slice_hdr = buf;
|
slice_hdr = buf;
|
||||||
buf += slice_hdr_size - 1;
|
buf += slice_hdr_size - 1;
|
||||||
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8);
|
init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)));
|
||||||
encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice);
|
ret = encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
bytestream_put_byte(&slice_hdr, q);
|
bytestream_put_byte(&slice_hdr, q);
|
||||||
slice_size = slice_hdr_size + sizes[ctx->num_planes - 1];
|
slice_size = slice_hdr_size + sizes[ctx->num_planes - 1];
|
||||||
|
@@ -163,7 +163,7 @@ static void qpeg_decode_inter(QpegContext *qctx, uint8_t *dst,
|
|||||||
|
|
||||||
/* check motion vector */
|
/* check motion vector */
|
||||||
if ((me_x + filled < 0) || (me_x + me_w + filled > width) ||
|
if ((me_x + filled < 0) || (me_x + me_w + filled > width) ||
|
||||||
(height - me_y - me_h < 0) || (height - me_y > orig_height) ||
|
(height - me_y - me_h < 0) || (height - me_y >= orig_height) ||
|
||||||
(filled + me_w > width) || (height - me_h < 0))
|
(filled + me_w > width) || (height - me_h < 0))
|
||||||
av_log(NULL, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n",
|
av_log(NULL, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n",
|
||||||
me_x, me_y, me_w, me_h, filled, height);
|
me_x, me_y, me_w, me_h, filled, height);
|
||||||
|
@@ -942,6 +942,8 @@ static int roq_encode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
av_lfg_init(&enc->randctx, 1);
|
av_lfg_init(&enc->randctx, 1);
|
||||||
|
|
||||||
|
enc->avctx = avctx;
|
||||||
|
|
||||||
enc->framesSinceKeyframe = 0;
|
enc->framesSinceKeyframe = 0;
|
||||||
if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
|
if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
|
av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
|
||||||
|
@@ -28,7 +28,7 @@
|
|||||||
#include "mpegvideo.h"
|
#include "mpegvideo.h"
|
||||||
#include "put_bits.h"
|
#include "put_bits.h"
|
||||||
|
|
||||||
void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||||
{
|
{
|
||||||
int full_frame= 0;
|
int full_frame= 0;
|
||||||
|
|
||||||
@@ -48,12 +48,17 @@ void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
|||||||
/* if multiple packets per frame are sent, the position at which
|
/* if multiple packets per frame are sent, the position at which
|
||||||
to display the macroblocks is coded here */
|
to display the macroblocks is coded here */
|
||||||
if(!full_frame){
|
if(!full_frame){
|
||||||
|
if (s->mb_width * s->mb_height >= (1U << 12)) {
|
||||||
|
av_log_missing_feature(s->avctx, "Encoding frames with 4096 macroblocks or more", 0);
|
||||||
|
return AVERROR(ENOSYS);
|
||||||
|
}
|
||||||
put_bits(&s->pb, 6, 0); /* mb_x */
|
put_bits(&s->pb, 6, 0); /* mb_x */
|
||||||
put_bits(&s->pb, 6, 0); /* mb_y */
|
put_bits(&s->pb, 6, 0); /* mb_y */
|
||||||
put_bits(&s->pb, 12, s->mb_width * s->mb_height);
|
put_bits(&s->pb, 12, s->mb_width * s->mb_height);
|
||||||
}
|
}
|
||||||
|
|
||||||
put_bits(&s->pb, 3, 0); /* ignored */
|
put_bits(&s->pb, 3, 0); /* ignored */
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
FF_MPV_GENERIC_CLASS(rv10)
|
FF_MPV_GENERIC_CLASS(rv10)
|
||||||
|
@@ -28,6 +28,7 @@
|
|||||||
|
|
||||||
typedef struct SgiState {
|
typedef struct SgiState {
|
||||||
AVFrame picture;
|
AVFrame picture;
|
||||||
|
AVCodecContext *avctx;
|
||||||
unsigned int width;
|
unsigned int width;
|
||||||
unsigned int height;
|
unsigned int height;
|
||||||
unsigned int depth;
|
unsigned int depth;
|
||||||
@@ -40,15 +41,16 @@ typedef struct SgiState {
|
|||||||
* Expand an RLE row into a channel.
|
* Expand an RLE row into a channel.
|
||||||
* @param s the current image state
|
* @param s the current image state
|
||||||
* @param out_buf Points to one line after the output buffer.
|
* @param out_buf Points to one line after the output buffer.
|
||||||
* @param out_end end of line in output buffer
|
* @param len length of out_buf in bytes
|
||||||
* @param pixelstride pixel stride of input buffer
|
* @param pixelstride pixel stride of input buffer
|
||||||
* @return size of output in bytes, -1 if buffer overflows
|
* @return size of output in bytes, -1 if buffer overflows
|
||||||
*/
|
*/
|
||||||
static int expand_rle_row(SgiState *s, uint8_t *out_buf,
|
static int expand_rle_row(SgiState *s, uint8_t *out_buf,
|
||||||
uint8_t *out_end, int pixelstride)
|
int len, int pixelstride)
|
||||||
{
|
{
|
||||||
unsigned char pixel, count;
|
unsigned char pixel, count;
|
||||||
unsigned char *orig = out_buf;
|
unsigned char *orig = out_buf;
|
||||||
|
uint8_t *out_end = out_buf + len;
|
||||||
|
|
||||||
while (out_buf < out_end) {
|
while (out_buf < out_end) {
|
||||||
if (bytestream2_get_bytes_left(&s->g) < 1)
|
if (bytestream2_get_bytes_left(&s->g) < 1)
|
||||||
@@ -59,7 +61,10 @@ static int expand_rle_row(SgiState *s, uint8_t *out_buf,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Check for buffer overflow. */
|
/* Check for buffer overflow. */
|
||||||
if(out_buf + pixelstride * (count-1) >= out_end) return -1;
|
if (out_end - out_buf <= pixelstride * (count - 1)) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid pixel count.\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
if (pixel & 0x80) {
|
if (pixel & 0x80) {
|
||||||
while (count--) {
|
while (count--) {
|
||||||
@@ -103,7 +108,7 @@ static int read_rle_sgi(uint8_t *out_buf, SgiState *s)
|
|||||||
dest_row -= s->linesize;
|
dest_row -= s->linesize;
|
||||||
start_offset = bytestream2_get_be32(&g_table);
|
start_offset = bytestream2_get_be32(&g_table);
|
||||||
bytestream2_seek(&s->g, start_offset, SEEK_SET);
|
bytestream2_seek(&s->g, start_offset, SEEK_SET);
|
||||||
if (expand_rle_row(s, dest_row + z, dest_row + s->width*s->depth,
|
if (expand_rle_row(s, dest_row + z, s->width*s->depth,
|
||||||
s->depth) != s->width) {
|
s->depth) != s->width) {
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@@ -244,6 +249,8 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
static av_cold int sgi_init(AVCodecContext *avctx){
|
static av_cold int sgi_init(AVCodecContext *avctx){
|
||||||
SgiState *s = avctx->priv_data;
|
SgiState *s = avctx->priv_data;
|
||||||
|
|
||||||
|
s->avctx = avctx;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->picture);
|
avcodec_get_frame_defaults(&s->picture);
|
||||||
avctx->coded_frame = &s->picture;
|
avctx->coded_frame = &s->picture;
|
||||||
|
|
||||||
|
@@ -69,7 +69,7 @@ typedef struct SmcContext {
|
|||||||
row_ptr += stride * 4; \
|
row_ptr += stride * 4; \
|
||||||
} \
|
} \
|
||||||
total_blocks--; \
|
total_blocks--; \
|
||||||
if (total_blocks < 0) \
|
if (total_blocks < !!n_blocks) \
|
||||||
{ \
|
{ \
|
||||||
av_log(s->avctx, AV_LOG_INFO, "warning: block counter just went negative (this should not happen)\n"); \
|
av_log(s->avctx, AV_LOG_INFO, "warning: block counter just went negative (this should not happen)\n"); \
|
||||||
return; \
|
return; \
|
||||||
|
@@ -650,7 +650,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
|||||||
if(v){
|
if(v){
|
||||||
v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
|
v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
|
||||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
|
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
|
||||||
|
if ((uint16_t)v != v) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||||
|
v = 1;
|
||||||
|
}
|
||||||
xc->x=x;
|
xc->x=x;
|
||||||
(xc++)->coeff= v;
|
(xc++)->coeff= v;
|
||||||
}
|
}
|
||||||
@@ -660,6 +663,10 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
|
|||||||
else run= INT_MAX;
|
else run= INT_MAX;
|
||||||
v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
|
v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
|
||||||
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
|
v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
|
||||||
|
if ((uint16_t)v != v) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
|
||||||
|
v = 1;
|
||||||
|
}
|
||||||
|
|
||||||
xc->x=x;
|
xc->x=x;
|
||||||
(xc++)->coeff= v;
|
(xc++)->coeff= v;
|
||||||
|
@@ -156,7 +156,7 @@ static int decode_q_branch(SnowContext *s, int level, int x, int y){
|
|||||||
int l = left->color[0];
|
int l = left->color[0];
|
||||||
int cb= left->color[1];
|
int cb= left->color[1];
|
||||||
int cr= left->color[2];
|
int cr= left->color[2];
|
||||||
int ref = 0;
|
unsigned ref = 0;
|
||||||
int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
|
int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
|
||||||
int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
|
int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
|
||||||
int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
|
int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
|
||||||
|
@@ -61,6 +61,10 @@ typedef struct SVQ1Context {
|
|||||||
DSPContext dsp;
|
DSPContext dsp;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
AVFrame *cur, *prev;
|
AVFrame *cur, *prev;
|
||||||
|
|
||||||
|
uint8_t *pkt_swapped;
|
||||||
|
int pkt_swapped_allocated;
|
||||||
|
|
||||||
int width;
|
int width;
|
||||||
int height;
|
int height;
|
||||||
int frame_code;
|
int frame_code;
|
||||||
@@ -496,7 +500,7 @@ static int svq1_decode_delta_block(AVCodecContext *avctx, DSPContext *dsp,
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
|
static void svq1_parse_string(GetBitContext *bitbuf, uint8_t out[257])
|
||||||
{
|
{
|
||||||
uint8_t seed;
|
uint8_t seed;
|
||||||
int i;
|
int i;
|
||||||
@@ -508,6 +512,7 @@ static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
|
|||||||
out[i] = get_bits(bitbuf, 8) ^ seed;
|
out[i] = get_bits(bitbuf, 8) ^ seed;
|
||||||
seed = string_table[out[i] ^ seed];
|
seed = string_table[out[i] ^ seed];
|
||||||
}
|
}
|
||||||
|
out[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
|
static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
|
||||||
@@ -550,12 +555,12 @@ static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((s->frame_code ^ 0x10) >= 0x50) {
|
if ((s->frame_code ^ 0x10) >= 0x50) {
|
||||||
uint8_t msg[256];
|
uint8_t msg[257];
|
||||||
|
|
||||||
svq1_parse_string(bitbuf, msg);
|
svq1_parse_string(bitbuf, msg);
|
||||||
|
|
||||||
av_log(avctx, AV_LOG_INFO,
|
av_log(avctx, AV_LOG_INFO,
|
||||||
"embedded message: \"%s\"\n", (char *)msg);
|
"embedded message: \"%s\"\n", ((char *)msg) + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
skip_bits(bitbuf, 2);
|
skip_bits(bitbuf, 2);
|
||||||
@@ -628,7 +633,24 @@ static int svq1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
/* swap some header bytes (why?) */
|
/* swap some header bytes (why?) */
|
||||||
if (s->frame_code != 0x20) {
|
if (s->frame_code != 0x20) {
|
||||||
uint32_t *src = (uint32_t *)(buf + 4);
|
uint32_t *src;
|
||||||
|
|
||||||
|
if (buf_size < 9 * 4) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Input packet too small\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
|
av_fast_padded_malloc(&s->pkt_swapped, &s->pkt_swapped_allocated,
|
||||||
|
buf_size);
|
||||||
|
if (!s->pkt_swapped)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
|
memcpy(s->pkt_swapped, buf, buf_size);
|
||||||
|
buf = s->pkt_swapped;
|
||||||
|
init_get_bits(&s->gb, buf, buf_size * 8);
|
||||||
|
skip_bits(&s->gb, 22);
|
||||||
|
|
||||||
|
src = (uint32_t *)(s->pkt_swapped + 4);
|
||||||
|
|
||||||
if (buf_size < 36)
|
if (buf_size < 36)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
@@ -804,6 +826,8 @@ static av_cold int svq1_decode_end(AVCodecContext *avctx)
|
|||||||
avctx->release_buffer(avctx, s->prev);
|
avctx->release_buffer(avctx, s->prev);
|
||||||
avcodec_free_frame(&s->cur);
|
avcodec_free_frame(&s->cur);
|
||||||
avcodec_free_frame(&s->prev);
|
avcodec_free_frame(&s->prev);
|
||||||
|
av_freep(&s->pkt_swapped);
|
||||||
|
s->pkt_swapped_allocated = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -409,6 +409,7 @@ static void av_always_inline horizontal_fill(unsigned int bpp, uint8_t* dst,
|
|||||||
static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride,
|
static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride,
|
||||||
const uint8_t *src, int size, int lines)
|
const uint8_t *src, int size, int lines)
|
||||||
{
|
{
|
||||||
|
PutByteContext pb;
|
||||||
int c, line, pixels, code;
|
int c, line, pixels, code;
|
||||||
const uint8_t *ssrc = src;
|
const uint8_t *ssrc = src;
|
||||||
int width = ((s->width * s->bpp) + 7) >> 3;
|
int width = ((s->width * s->bpp) + 7) >> 3;
|
||||||
@@ -479,6 +480,18 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride,
|
|||||||
av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
for (line = 0; line < lines; line++) {
|
||||||
|
pixels = ff_lzw_decode(s->lzw, dst, width);
|
||||||
|
if (pixels < width) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
|
||||||
|
pixels, width);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
|
||||||
|
horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0);
|
||||||
|
dst += stride;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
if (s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3
|
if (s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3
|
||||||
|| s->compr == TIFF_G4) {
|
|| s->compr == TIFF_G4) {
|
||||||
@@ -520,15 +533,24 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride,
|
|||||||
av_free(src2);
|
av_free(src2);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bytestream2_init(&s->gb, src, size);
|
||||||
|
bytestream2_init_writer(&pb, dst, stride * lines);
|
||||||
|
|
||||||
for (line = 0; line < lines; line++) {
|
for (line = 0; line < lines; line++) {
|
||||||
if (src - ssrc > size) {
|
if (src - ssrc > size) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
|
||||||
|
break;
|
||||||
|
bytestream2_seek_p(&pb, stride * line, SEEK_SET);
|
||||||
switch (s->compr) {
|
switch (s->compr) {
|
||||||
case TIFF_RAW:
|
case TIFF_RAW:
|
||||||
if (ssrc + size - src < width)
|
if (ssrc + size - src < width)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if (!s->fill_order) {
|
if (!s->fill_order) {
|
||||||
horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
|
horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
|
||||||
dst, 1, src, 0, width, 0);
|
dst, 1, src, 0, width, 0);
|
||||||
@@ -572,16 +594,6 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TIFF_LZW:
|
|
||||||
pixels = ff_lzw_decode(s->lzw, dst, width);
|
|
||||||
if (pixels < width) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
|
|
||||||
pixels, width);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
|
|
||||||
horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
dst += stride;
|
dst += stride;
|
||||||
}
|
}
|
||||||
@@ -593,6 +605,14 @@ static int init_image(TiffContext *s)
|
|||||||
int i, ret;
|
int i, ret;
|
||||||
uint32_t *pal;
|
uint32_t *pal;
|
||||||
|
|
||||||
|
// make sure there is no aliasing in the following switch
|
||||||
|
if (s->bpp >= 100 || s->bppcount >= 10) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"Unsupported image parameters: bpp=%d, bppcount=%d\n",
|
||||||
|
s->bpp, s->bppcount);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
switch (s->bpp * 10 + s->bppcount) {
|
switch (s->bpp * 10 + s->bppcount) {
|
||||||
case 11:
|
case 11:
|
||||||
if (!s->palette_is_set) {
|
if (!s->palette_is_set) {
|
||||||
@@ -655,7 +675,8 @@ static int init_image(TiffContext *s)
|
|||||||
static int tiff_decode_tag(TiffContext *s)
|
static int tiff_decode_tag(TiffContext *s)
|
||||||
{
|
{
|
||||||
unsigned tag, type, count, off, value = 0;
|
unsigned tag, type, count, off, value = 0;
|
||||||
int i, j, k, pos, start;
|
int i, start;
|
||||||
|
int j, k, pos;
|
||||||
int ret;
|
int ret;
|
||||||
uint32_t *pal;
|
uint32_t *pal;
|
||||||
double *dp;
|
double *dp;
|
||||||
@@ -707,13 +728,13 @@ static int tiff_decode_tag(TiffContext *s)
|
|||||||
s->height = value;
|
s->height = value;
|
||||||
break;
|
break;
|
||||||
case TIFF_BPP:
|
case TIFF_BPP:
|
||||||
s->bppcount = count;
|
if (count > 4U) {
|
||||||
if (count > 4) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"This format is not supported (bpp=%d, %d components)\n",
|
"This format is not supported (bpp=%d, %d components)\n",
|
||||||
s->bpp, count);
|
value, count);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
s->bppcount = count;
|
||||||
if (count == 1)
|
if (count == 1)
|
||||||
s->bpp = value;
|
s->bpp = value;
|
||||||
else {
|
else {
|
||||||
@@ -797,27 +818,17 @@ static int tiff_decode_tag(TiffContext *s)
|
|||||||
if (s->strips == 1)
|
if (s->strips == 1)
|
||||||
s->rps = s->height;
|
s->rps = s->height;
|
||||||
s->sot = type;
|
s->sot = type;
|
||||||
if (s->strippos > bytestream2_size(&s->gb)) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
|
||||||
"Tag referencing position outside the image\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case TIFF_STRIP_SIZE:
|
case TIFF_STRIP_SIZE:
|
||||||
if (count == 1) {
|
if (count == 1) {
|
||||||
s->stripsizesoff = 0;
|
s->stripsizesoff = 0;
|
||||||
s->stripsize = value;
|
s->stripsize = value;
|
||||||
s->strips = 1;
|
s->strips = 1;
|
||||||
} else {
|
} else {
|
||||||
s->stripsizesoff = off;
|
s->stripsizesoff = off;
|
||||||
}
|
}
|
||||||
s->strips = count;
|
s->strips = count;
|
||||||
s->sstype = type;
|
s->sstype = type;
|
||||||
if (s->stripsizesoff > bytestream2_size(&s->gb)) {
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
|
||||||
"Tag referencing position outside the image\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case TIFF_TILE_BYTE_COUNTS:
|
case TIFF_TILE_BYTE_COUNTS:
|
||||||
case TIFF_TILE_LENGTH:
|
case TIFF_TILE_LENGTH:
|
||||||
@@ -854,11 +865,13 @@ static int tiff_decode_tag(TiffContext *s)
|
|||||||
}
|
}
|
||||||
s->fill_order = value - 1;
|
s->fill_order = value - 1;
|
||||||
break;
|
break;
|
||||||
case TIFF_PAL:
|
case TIFF_PAL: {
|
||||||
pal = (uint32_t *) s->palette;
|
pal = (uint32_t *) s->palette;
|
||||||
off = type_sizes[type];
|
off = type_sizes[type];
|
||||||
if (count / 3 > 256 || bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
|
if (count / 3 > 256 ||
|
||||||
|
bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
off = (type_sizes[type] - 1) << 3;
|
off = (type_sizes[type] - 1) << 3;
|
||||||
for (k = 2; k >= 0; k--) {
|
for (k = 2; k >= 0; k--) {
|
||||||
for (i = 0; i < count / 3; i++) {
|
for (i = 0; i < count / 3; i++) {
|
||||||
@@ -870,6 +883,7 @@ static int tiff_decode_tag(TiffContext *s)
|
|||||||
}
|
}
|
||||||
s->palette_is_set = 1;
|
s->palette_is_set = 1;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case TIFF_PLANAR:
|
case TIFF_PLANAR:
|
||||||
if (value == 2) {
|
if (value == 2) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n");
|
||||||
@@ -1016,6 +1030,13 @@ static int tiff_decode_tag(TiffContext *s)
|
|||||||
av_log(s->avctx, AV_LOG_DEBUG, "Unknown or unsupported tag %d/0X%0X\n",
|
av_log(s->avctx, AV_LOG_DEBUG, "Unknown or unsupported tag %d/0X%0X\n",
|
||||||
tag, tag);
|
tag, tag);
|
||||||
}
|
}
|
||||||
|
if (s->bpp > 64U) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"This format is not supported (bpp=%d, %d components)\n",
|
||||||
|
s->bpp, count);
|
||||||
|
s->bpp = 0;
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
bytestream2_seek(&s->gb, start, SEEK_SET);
|
bytestream2_seek(&s->gb, start, SEEK_SET);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1119,12 +1140,14 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
if (s->stripsizesoff) {
|
if (s->stripsizesoff) {
|
||||||
if (s->stripsizesoff >= (unsigned)avpkt->size)
|
if (s->stripsizesoff >= (unsigned)avpkt->size)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff, avpkt->size - s->stripsizesoff);
|
bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
|
||||||
|
avpkt->size - s->stripsizesoff);
|
||||||
}
|
}
|
||||||
if (s->strippos) {
|
if (s->strippos) {
|
||||||
if (s->strippos >= (unsigned)avpkt->size)
|
if (s->strippos >= (unsigned)avpkt->size)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
bytestream2_init(&stripdata, avpkt->data + s->strippos, avpkt->size - s->strippos);
|
bytestream2_init(&stripdata, avpkt->data + s->strippos,
|
||||||
|
avpkt->size - s->strippos);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->rps <= 0) {
|
if (s->rps <= 0) {
|
||||||
@@ -1134,12 +1157,12 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
for (i = 0; i < s->height; i += s->rps) {
|
for (i = 0; i < s->height; i += s->rps) {
|
||||||
if (s->stripsizesoff)
|
if (s->stripsizesoff)
|
||||||
ssize = tget(&stripsizes, s->sstype, s->le);
|
ssize = tget(&stripsizes, s->sstype, le);
|
||||||
else
|
else
|
||||||
ssize = s->stripsize;
|
ssize = s->stripsize;
|
||||||
|
|
||||||
if (s->strippos)
|
if (s->strippos)
|
||||||
soff = tget(&stripdata, s->sot, s->le);
|
soff = tget(&stripdata, s->sot, le);
|
||||||
else
|
else
|
||||||
soff = s->stripoff;
|
soff = s->stripoff;
|
||||||
|
|
||||||
|
@@ -183,6 +183,12 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
int i;
|
int i;
|
||||||
int w_align = 1;
|
int w_align = 1;
|
||||||
int h_align = 1;
|
int h_align = 1;
|
||||||
|
AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt);
|
||||||
|
|
||||||
|
if (desc) {
|
||||||
|
w_align = 1 << desc->log2_chroma_w;
|
||||||
|
h_align = 1 << desc->log2_chroma_h;
|
||||||
|
}
|
||||||
|
|
||||||
switch (s->pix_fmt) {
|
switch (s->pix_fmt) {
|
||||||
case AV_PIX_FMT_YUV420P:
|
case AV_PIX_FMT_YUV420P:
|
||||||
@@ -234,13 +240,15 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
case AV_PIX_FMT_GBRP12BE:
|
case AV_PIX_FMT_GBRP12BE:
|
||||||
case AV_PIX_FMT_GBRP14LE:
|
case AV_PIX_FMT_GBRP14LE:
|
||||||
case AV_PIX_FMT_GBRP14BE:
|
case AV_PIX_FMT_GBRP14BE:
|
||||||
|
case AV_PIX_FMT_GBRP16LE:
|
||||||
|
case AV_PIX_FMT_GBRP16BE:
|
||||||
w_align = 16; //FIXME assume 16 pixel per macroblock
|
w_align = 16; //FIXME assume 16 pixel per macroblock
|
||||||
h_align = 16 * 2; // interlaced needs 2 macroblocks height
|
h_align = 16 * 2; // interlaced needs 2 macroblocks height
|
||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_YUV411P:
|
case AV_PIX_FMT_YUV411P:
|
||||||
case AV_PIX_FMT_UYYVYY411:
|
case AV_PIX_FMT_UYYVYY411:
|
||||||
w_align = 32;
|
w_align = 32;
|
||||||
h_align = 8;
|
h_align = 16 * 2;
|
||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_YUV410P:
|
case AV_PIX_FMT_YUV410P:
|
||||||
if (s->codec_id == AV_CODEC_ID_SVQ1) {
|
if (s->codec_id == AV_CODEC_ID_SVQ1) {
|
||||||
@@ -261,6 +269,10 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
w_align = 4;
|
w_align = 4;
|
||||||
h_align = 4;
|
h_align = 4;
|
||||||
}
|
}
|
||||||
|
if (s->codec_id == AV_CODEC_ID_JV) {
|
||||||
|
w_align = 8;
|
||||||
|
h_align = 8;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_BGR24:
|
case AV_PIX_FMT_BGR24:
|
||||||
if ((s->codec_id == AV_CODEC_ID_MSZH) ||
|
if ((s->codec_id == AV_CODEC_ID_MSZH) ||
|
||||||
@@ -270,8 +282,6 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
w_align = 1;
|
|
||||||
h_align = 1;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -540,6 +550,11 @@ void ff_init_buffer_info(AVCodecContext *s, AVFrame *frame)
|
|||||||
|
|
||||||
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame)
|
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame)
|
||||||
{
|
{
|
||||||
|
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
|
||||||
|
if (av_image_check_size(avctx->width, avctx->height, 0, avctx))
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
ff_init_buffer_info(avctx, frame);
|
ff_init_buffer_info(avctx, frame);
|
||||||
|
|
||||||
return avctx->get_buffer(avctx, frame);
|
return avctx->get_buffer(avctx, frame);
|
||||||
@@ -2698,6 +2713,11 @@ int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf)
|
|||||||
ret = av_bprint_finalize(buf, &str);
|
ret = av_bprint_finalize(buf, &str);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
if (!av_bprint_is_complete(buf)) {
|
||||||
|
av_free(str);
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
avctx->extradata = str;
|
avctx->extradata = str;
|
||||||
/* Note: the string is NUL terminated (so extradata can be read as a
|
/* Note: the string is NUL terminated (so extradata can be read as a
|
||||||
* string), but the ending character is not accounted in the size (in
|
* string), but the ending character is not accounted in the size (in
|
||||||
|
@@ -212,6 +212,8 @@ static void restore_median(uint8_t *src, int step, int stride,
|
|||||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||||
slice_start;
|
slice_start;
|
||||||
|
|
||||||
|
if (!slice_height)
|
||||||
|
continue;
|
||||||
bsrc = src + slice_start * stride;
|
bsrc = src + slice_start * stride;
|
||||||
|
|
||||||
// first line - left neighbour prediction
|
// first line - left neighbour prediction
|
||||||
@@ -222,7 +224,7 @@ static void restore_median(uint8_t *src, int step, int stride,
|
|||||||
A = bsrc[i];
|
A = bsrc[i];
|
||||||
}
|
}
|
||||||
bsrc += stride;
|
bsrc += stride;
|
||||||
if (slice_height == 1)
|
if (slice_height <= 1)
|
||||||
continue;
|
continue;
|
||||||
// second line - first element has top prediction, the rest uses median
|
// second line - first element has top prediction, the rest uses median
|
||||||
C = bsrc[-stride];
|
C = bsrc[-stride];
|
||||||
@@ -267,6 +269,8 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
|||||||
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
slice_height = ((((slice + 1) * height) / slices) & cmask) -
|
||||||
slice_start;
|
slice_start;
|
||||||
slice_height >>= 1;
|
slice_height >>= 1;
|
||||||
|
if (!slice_height)
|
||||||
|
continue;
|
||||||
|
|
||||||
bsrc = src + slice_start * stride;
|
bsrc = src + slice_start * stride;
|
||||||
|
|
||||||
@@ -282,7 +286,7 @@ static void restore_median_il(uint8_t *src, int step, int stride,
|
|||||||
A = bsrc[stride + i];
|
A = bsrc[stride + i];
|
||||||
}
|
}
|
||||||
bsrc += stride2;
|
bsrc += stride2;
|
||||||
if (slice_height == 1)
|
if (slice_height <= 1)
|
||||||
continue;
|
continue;
|
||||||
// second line - first element has top prediction, the rest uses median
|
// second line - first element has top prediction, the rest uses median
|
||||||
C = bsrc[-stride2];
|
C = bsrc[-stride2];
|
||||||
|
@@ -340,8 +340,12 @@ static int vmd_decode(VmdVideoContext *s)
|
|||||||
len = rle_unpack(gb.buffer, &dp[ofs],
|
len = rle_unpack(gb.buffer, &dp[ofs],
|
||||||
len, bytestream2_get_bytes_left(&gb),
|
len, bytestream2_get_bytes_left(&gb),
|
||||||
frame_width - ofs);
|
frame_width - ofs);
|
||||||
else
|
else {
|
||||||
|
if (ofs + len > frame_width ||
|
||||||
|
bytestream2_get_bytes_left(&gb) < len)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
bytestream2_get_buffer(&gb, &dp[ofs], len);
|
bytestream2_get_buffer(&gb, &dp[ofs], len);
|
||||||
|
}
|
||||||
bytestream2_skip(&gb, len);
|
bytestream2_skip(&gb, len);
|
||||||
} else {
|
} else {
|
||||||
/* interframe pixel copy */
|
/* interframe pixel copy */
|
||||||
|
@@ -398,6 +398,10 @@ static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb,
|
|||||||
return sign ? ~ret : ret;
|
return sign ? ~ret : ret;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
ret = get_bits_left(gb);
|
||||||
|
if (ret <= 0) {
|
||||||
|
av_log(ctx->avctx, AV_LOG_ERROR, "Too few bits (%d) left\n", ret);
|
||||||
|
}
|
||||||
*last = 1;
|
*last = 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -382,9 +382,9 @@ int ff_wma_end(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
ff_free_vlc(&s->coef_vlc[i]);
|
ff_free_vlc(&s->coef_vlc[i]);
|
||||||
av_free(s->run_table[i]);
|
av_freep(&s->run_table[i]);
|
||||||
av_free(s->level_table[i]);
|
av_freep(&s->level_table[i]);
|
||||||
av_free(s->int_table[i]);
|
av_freep(&s->int_table[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -419,6 +419,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
offset &= ~3;
|
offset &= ~3;
|
||||||
if (offset > s->sfb_offsets[i][band - 1])
|
if (offset > s->sfb_offsets[i][band - 1])
|
||||||
s->sfb_offsets[i][band++] = offset;
|
s->sfb_offsets[i][band++] = offset;
|
||||||
|
|
||||||
|
if (offset >= subframe_len)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
s->sfb_offsets[i][band - 1] = subframe_len;
|
s->sfb_offsets[i][band - 1] = subframe_len;
|
||||||
s->num_sfb[i] = band - 1;
|
s->num_sfb[i] = band - 1;
|
||||||
|
@@ -187,7 +187,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
|||||||
if (EXTERNAL_MMX(mm_flags)) {
|
if (EXTERNAL_MMX(mm_flags)) {
|
||||||
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_8_mmx;
|
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_8_mmx;
|
||||||
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_mmx;
|
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_mmx;
|
||||||
if (chroma_format_idc == 1) {
|
if (chroma_format_idc <= 1) {
|
||||||
h->pred8x8 [VERT_PRED8x8 ] = ff_pred8x8_vertical_8_mmx;
|
h->pred8x8 [VERT_PRED8x8 ] = ff_pred8x8_vertical_8_mmx;
|
||||||
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmx;
|
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmx;
|
||||||
}
|
}
|
||||||
@@ -196,7 +196,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
|||||||
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_mmx;
|
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_mmx;
|
||||||
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmx;
|
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmx;
|
||||||
} else {
|
} else {
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_mmx;
|
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_mmx;
|
||||||
if (codec_id == AV_CODEC_ID_SVQ3) {
|
if (codec_id == AV_CODEC_ID_SVQ3) {
|
||||||
if (mm_flags & AV_CPU_FLAG_CMOV)
|
if (mm_flags & AV_CPU_FLAG_CMOV)
|
||||||
@@ -212,7 +212,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
|||||||
if (EXTERNAL_MMXEXT(mm_flags)) {
|
if (EXTERNAL_MMXEXT(mm_flags)) {
|
||||||
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_mmxext;
|
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_mmxext;
|
||||||
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_8_mmxext;
|
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_8_mmxext;
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmxext;
|
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmxext;
|
||||||
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_8_mmxext;
|
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_8_mmxext;
|
||||||
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_8_mmxext;
|
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_8_mmxext;
|
||||||
@@ -237,7 +237,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
|||||||
h->pred4x4 [HOR_UP_PRED ] = ff_pred4x4_horizontal_up_8_mmxext;
|
h->pred4x4 [HOR_UP_PRED ] = ff_pred4x4_horizontal_up_8_mmxext;
|
||||||
}
|
}
|
||||||
if (codec_id == AV_CODEC_ID_SVQ3 || codec_id == AV_CODEC_ID_H264) {
|
if (codec_id == AV_CODEC_ID_SVQ3 || codec_id == AV_CODEC_ID_H264) {
|
||||||
if (chroma_format_idc == 1) {
|
if (chroma_format_idc <= 1) {
|
||||||
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_8_mmxext;
|
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_8_mmxext;
|
||||||
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_8_mmxext;
|
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_8_mmxext;
|
||||||
}
|
}
|
||||||
@@ -249,7 +249,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
|||||||
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmxext;
|
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmxext;
|
||||||
h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_8_mmxext;
|
h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_8_mmxext;
|
||||||
} else {
|
} else {
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_mmxext;
|
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_mmxext;
|
||||||
if (codec_id == AV_CODEC_ID_SVQ3) {
|
if (codec_id == AV_CODEC_ID_SVQ3) {
|
||||||
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_svq3_8_mmxext;
|
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_svq3_8_mmxext;
|
||||||
@@ -276,7 +276,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
|||||||
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_sse2;
|
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_sse2;
|
||||||
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_sse2;
|
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_sse2;
|
||||||
} else {
|
} else {
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_sse2;
|
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_sse2;
|
||||||
if (codec_id == AV_CODEC_ID_SVQ3) {
|
if (codec_id == AV_CODEC_ID_SVQ3) {
|
||||||
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_8_sse2;
|
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_8_sse2;
|
||||||
@@ -291,7 +291,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
|||||||
if (EXTERNAL_SSSE3(mm_flags)) {
|
if (EXTERNAL_SSSE3(mm_flags)) {
|
||||||
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_ssse3;
|
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_ssse3;
|
||||||
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_8_ssse3;
|
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_8_ssse3;
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_ssse3;
|
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_ssse3;
|
||||||
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_8_ssse3;
|
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_8_ssse3;
|
||||||
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_8_ssse3;
|
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_8_ssse3;
|
||||||
@@ -307,7 +307,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
|||||||
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_ssse3;
|
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_ssse3;
|
||||||
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_ssse3;
|
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_ssse3;
|
||||||
} else {
|
} else {
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_ssse3;
|
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_ssse3;
|
||||||
if (codec_id == AV_CODEC_ID_SVQ3) {
|
if (codec_id == AV_CODEC_ID_SVQ3) {
|
||||||
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_8_ssse3;
|
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_8_ssse3;
|
||||||
@@ -323,7 +323,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
|||||||
h->pred4x4[DC_PRED ] = ff_pred4x4_dc_10_mmxext;
|
h->pred4x4[DC_PRED ] = ff_pred4x4_dc_10_mmxext;
|
||||||
h->pred4x4[HOR_UP_PRED ] = ff_pred4x4_horizontal_up_10_mmxext;
|
h->pred4x4[HOR_UP_PRED ] = ff_pred4x4_horizontal_up_10_mmxext;
|
||||||
|
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_mmxext;
|
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_mmxext;
|
||||||
|
|
||||||
h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_mmxext;
|
h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_mmxext;
|
||||||
@@ -342,7 +342,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
|||||||
h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_sse2;
|
h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_sse2;
|
||||||
h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_sse2;
|
h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_sse2;
|
||||||
|
|
||||||
if (chroma_format_idc == 1) {
|
if (chroma_format_idc <= 1) {
|
||||||
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_sse2;
|
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_sse2;
|
||||||
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_10_sse2;
|
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_10_sse2;
|
||||||
h->pred8x8[PLANE_PRED8x8 ] = ff_pred8x8_plane_10_sse2;
|
h->pred8x8[PLANE_PRED8x8 ] = ff_pred8x8_plane_10_sse2;
|
||||||
|
@@ -213,7 +213,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
|
|||||||
#if HAVE_YASM
|
#if HAVE_YASM
|
||||||
int mm_flags = av_get_cpu_flags();
|
int mm_flags = av_get_cpu_flags();
|
||||||
|
|
||||||
if (chroma_format_idc == 1 && EXTERNAL_MMXEXT(mm_flags))
|
if (chroma_format_idc <= 1 && EXTERNAL_MMXEXT(mm_flags))
|
||||||
c->h264_loop_filter_strength = ff_h264_loop_filter_strength_mmxext;
|
c->h264_loop_filter_strength = ff_h264_loop_filter_strength_mmxext;
|
||||||
|
|
||||||
if (bit_depth == 8) {
|
if (bit_depth == 8) {
|
||||||
@@ -225,7 +225,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
|
|||||||
|
|
||||||
c->h264_idct_add16 = ff_h264_idct_add16_8_mmx;
|
c->h264_idct_add16 = ff_h264_idct_add16_8_mmx;
|
||||||
c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx;
|
c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx;
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
c->h264_idct_add8 = ff_h264_idct_add8_8_mmx;
|
c->h264_idct_add8 = ff_h264_idct_add8_8_mmx;
|
||||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx;
|
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx;
|
||||||
if (mm_flags & AV_CPU_FLAG_CMOV)
|
if (mm_flags & AV_CPU_FLAG_CMOV)
|
||||||
@@ -236,13 +236,13 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
|
|||||||
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_8_mmxext;
|
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_8_mmxext;
|
||||||
c->h264_idct_add16 = ff_h264_idct_add16_8_mmxext;
|
c->h264_idct_add16 = ff_h264_idct_add16_8_mmxext;
|
||||||
c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmxext;
|
c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmxext;
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
c->h264_idct_add8 = ff_h264_idct_add8_8_mmxext;
|
c->h264_idct_add8 = ff_h264_idct_add8_8_mmxext;
|
||||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmxext;
|
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmxext;
|
||||||
|
|
||||||
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_8_mmxext;
|
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_8_mmxext;
|
||||||
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_8_mmxext;
|
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_8_mmxext;
|
||||||
if (chroma_format_idc == 1) {
|
if (chroma_format_idc <= 1) {
|
||||||
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_8_mmxext;
|
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_8_mmxext;
|
||||||
c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_mmxext;
|
c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_mmxext;
|
||||||
}
|
}
|
||||||
@@ -265,7 +265,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
|
|||||||
|
|
||||||
c->h264_idct_add16 = ff_h264_idct_add16_8_sse2;
|
c->h264_idct_add16 = ff_h264_idct_add16_8_sse2;
|
||||||
c->h264_idct8_add4 = ff_h264_idct8_add4_8_sse2;
|
c->h264_idct8_add4 = ff_h264_idct8_add4_8_sse2;
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
c->h264_idct_add8 = ff_h264_idct_add8_8_sse2;
|
c->h264_idct_add8 = ff_h264_idct_add8_8_sse2;
|
||||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2;
|
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2;
|
||||||
c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_sse2;
|
c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_sse2;
|
||||||
@@ -310,7 +310,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
|
|||||||
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
|
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
|
||||||
|
|
||||||
c->h264_idct_add16 = ff_h264_idct_add16_10_sse2;
|
c->h264_idct_add16 = ff_h264_idct_add16_10_sse2;
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
c->h264_idct_add8 = ff_h264_idct_add8_10_sse2;
|
c->h264_idct_add8 = ff_h264_idct_add8_10_sse2;
|
||||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_10_sse2;
|
c->h264_idct_add16intra = ff_h264_idct_add16intra_10_sse2;
|
||||||
#if HAVE_ALIGNED_STACK
|
#if HAVE_ALIGNED_STACK
|
||||||
@@ -350,7 +350,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
|
|||||||
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
|
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
|
||||||
|
|
||||||
c->h264_idct_add16 = ff_h264_idct_add16_10_avx;
|
c->h264_idct_add16 = ff_h264_idct_add16_10_avx;
|
||||||
if (chroma_format_idc == 1)
|
if (chroma_format_idc <= 1)
|
||||||
c->h264_idct_add8 = ff_h264_idct_add8_10_avx;
|
c->h264_idct_add8 = ff_h264_idct_add8_10_avx;
|
||||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_10_avx;
|
c->h264_idct_add16intra = ff_h264_idct_add16intra_10_avx;
|
||||||
#if HAVE_ALIGNED_STACK
|
#if HAVE_ALIGNED_STACK
|
||||||
|
@@ -344,7 +344,7 @@ DECLARE_ASM_CONST(16, int32_t, walkenIdctRounders)[] = {
|
|||||||
"movdqa %%xmm6, 4*16("dct") \n\t" \
|
"movdqa %%xmm6, 4*16("dct") \n\t" \
|
||||||
"movdqa "SREG2", 7*16("dct") \n\t"
|
"movdqa "SREG2", 7*16("dct") \n\t"
|
||||||
|
|
||||||
inline void ff_idct_xvid_sse2(short *block)
|
av_extern_inline void ff_idct_xvid_sse2(short *block)
|
||||||
{
|
{
|
||||||
__asm__ volatile(
|
__asm__ volatile(
|
||||||
"movq "MANGLE(m127)", %%mm0 \n\t"
|
"movq "MANGLE(m127)", %%mm0 \n\t"
|
||||||
|
@@ -129,8 +129,8 @@ static void mlp_filter_channel_x86(int32_t *state, const int32_t *coeff,
|
|||||||
FIRMUL (ff_mlp_firorder_6, 0x14 )
|
FIRMUL (ff_mlp_firorder_6, 0x14 )
|
||||||
FIRMUL (ff_mlp_firorder_5, 0x10 )
|
FIRMUL (ff_mlp_firorder_5, 0x10 )
|
||||||
FIRMUL (ff_mlp_firorder_4, 0x0c )
|
FIRMUL (ff_mlp_firorder_4, 0x0c )
|
||||||
FIRMULREG(ff_mlp_firorder_3, 0x08,10)
|
FIRMUL (ff_mlp_firorder_3, 0x08 )
|
||||||
FIRMULREG(ff_mlp_firorder_2, 0x04, 9)
|
FIRMUL (ff_mlp_firorder_2, 0x04 )
|
||||||
FIRMULREG(ff_mlp_firorder_1, 0x00, 8)
|
FIRMULREG(ff_mlp_firorder_1, 0x00, 8)
|
||||||
LABEL_MANGLE(ff_mlp_firorder_0)":\n\t"
|
LABEL_MANGLE(ff_mlp_firorder_0)":\n\t"
|
||||||
"jmp *%6 \n\t"
|
"jmp *%6 \n\t"
|
||||||
@@ -159,8 +159,6 @@ static void mlp_filter_channel_x86(int32_t *state, const int32_t *coeff,
|
|||||||
: /* 4*/"r"((x86_reg)mask), /* 5*/"r"(firjump),
|
: /* 4*/"r"((x86_reg)mask), /* 5*/"r"(firjump),
|
||||||
/* 6*/"r"(iirjump) , /* 7*/"c"(filter_shift)
|
/* 6*/"r"(iirjump) , /* 7*/"c"(filter_shift)
|
||||||
, /* 8*/"r"((int64_t)coeff[0])
|
, /* 8*/"r"((int64_t)coeff[0])
|
||||||
, /* 9*/"r"((int64_t)coeff[1])
|
|
||||||
, /*10*/"r"((int64_t)coeff[2])
|
|
||||||
: "rax", "rdx", "rsi"
|
: "rax", "rdx", "rsi"
|
||||||
#else /* ARCH_X86_32 */
|
#else /* ARCH_X86_32 */
|
||||||
/* 3*/"+m"(blocksize)
|
/* 3*/"+m"(blocksize)
|
||||||
|
@@ -216,7 +216,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
|
|||||||
"psubusw "MM"1, "MM"4 \n\t"
|
"psubusw "MM"1, "MM"4 \n\t"
|
||||||
"packuswb "MM"4, "MM"4 \n\t"
|
"packuswb "MM"4, "MM"4 \n\t"
|
||||||
#if COMPILE_TEMPLATE_SSE2
|
#if COMPILE_TEMPLATE_SSE2
|
||||||
"packuswb "MM"4, "MM"4 \n\t"
|
"packsswb "MM"4, "MM"4 \n\t"
|
||||||
#endif
|
#endif
|
||||||
"movd "MM"4, %0 \n\t" // *overflow
|
"movd "MM"4, %0 \n\t" // *overflow
|
||||||
: "=g" (*overflow)
|
: "=g" (*overflow)
|
||||||
|
@@ -418,11 +418,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* parse header */
|
/* parse header */
|
||||||
|
if (len < 1)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
c->flags = buf[0];
|
c->flags = buf[0];
|
||||||
buf++; len--;
|
buf++; len--;
|
||||||
if (c->flags & ZMBV_KEYFRAME) {
|
if (c->flags & ZMBV_KEYFRAME) {
|
||||||
void *decode_intra = NULL;
|
void *decode_intra = NULL;
|
||||||
c->decode_intra= NULL;
|
c->decode_intra= NULL;
|
||||||
|
|
||||||
|
if (len < 6)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
hi_ver = buf[0];
|
hi_ver = buf[0];
|
||||||
lo_ver = buf[1];
|
lo_ver = buf[1];
|
||||||
c->comp = buf[2];
|
c->comp = buf[2];
|
||||||
|
@@ -31,6 +31,7 @@
|
|||||||
|
|
||||||
#include "libavformat/avformat.h"
|
#include "libavformat/avformat.h"
|
||||||
#include "libavformat/internal.h"
|
#include "libavformat/internal.h"
|
||||||
|
#include "libavutil/time.h"
|
||||||
#include "libavutil/opt.h"
|
#include "libavutil/opt.h"
|
||||||
|
|
||||||
#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
|
#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
|
||||||
@@ -47,6 +48,7 @@ typedef struct PulseData {
|
|||||||
pa_simple *s;
|
pa_simple *s;
|
||||||
int64_t pts;
|
int64_t pts;
|
||||||
int64_t frame_duration;
|
int64_t frame_duration;
|
||||||
|
int wallclock;
|
||||||
} PulseData;
|
} PulseData;
|
||||||
|
|
||||||
static pa_sample_format_t codec_id_to_pulse_format(int codec_id) {
|
static pa_sample_format_t codec_id_to_pulse_format(int codec_id) {
|
||||||
@@ -141,6 +143,8 @@ static int pulse_read_packet(AVFormatContext *s, AVPacket *pkt)
|
|||||||
|
|
||||||
if (pd->pts == AV_NOPTS_VALUE) {
|
if (pd->pts == AV_NOPTS_VALUE) {
|
||||||
pd->pts = -latency;
|
pd->pts = -latency;
|
||||||
|
if (pd->wallclock)
|
||||||
|
pd->pts += av_gettime();
|
||||||
}
|
}
|
||||||
|
|
||||||
pkt->pts = pd->pts;
|
pkt->pts = pd->pts;
|
||||||
@@ -168,6 +172,7 @@ static const AVOption options[] = {
|
|||||||
{ "channels", "number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
|
{ "channels", "number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
|
||||||
{ "frame_size", "number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
|
{ "frame_size", "number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
|
||||||
{ "fragment_size", "buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
|
{ "fragment_size", "buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
|
||||||
|
{ "wallclock", "set the initial pts using the current time", OFFSET(wallclock), AV_OPT_TYPE_INT, {.i64 = 1}, -1, 1, D },
|
||||||
{ NULL },
|
{ NULL },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -196,7 +196,7 @@ static av_cold int channelmap_init(AVFilterContext *ctx, const char *args)
|
|||||||
s->map[i].out_channel_idx = i;
|
s->map[i].out_channel_idx = i;
|
||||||
break;
|
break;
|
||||||
case MAP_ONE_STR:
|
case MAP_ONE_STR:
|
||||||
if (!get_channel(&mapping, &in_ch, ',')) {
|
if (get_channel(&mapping, &in_ch, ',') < 0) {
|
||||||
av_log(ctx, AV_LOG_ERROR, err);
|
av_log(ctx, AV_LOG_ERROR, err);
|
||||||
ret = AVERROR(EINVAL);
|
ret = AVERROR(EINVAL);
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@@ -275,6 +275,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
emms_c();
|
||||||
|
|
||||||
if (buf != out_buf)
|
if (buf != out_buf)
|
||||||
avfilter_unref_buffer(buf);
|
avfilter_unref_buffer(buf);
|
||||||
|
|
||||||
|
@@ -298,13 +298,6 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref)
|
|||||||
}
|
}
|
||||||
|
|
||||||
res = av_expr_eval(select->expr, select->var_values, NULL);
|
res = av_expr_eval(select->expr, select->var_values, NULL);
|
||||||
av_log(inlink->dst, AV_LOG_DEBUG,
|
|
||||||
"n:%d pts:%d t:%f pos:%d key:%d",
|
|
||||||
(int)select->var_values[VAR_N],
|
|
||||||
(int)select->var_values[VAR_PTS],
|
|
||||||
select->var_values[VAR_T],
|
|
||||||
(int)select->var_values[VAR_POS],
|
|
||||||
(int)select->var_values[VAR_KEY]);
|
|
||||||
|
|
||||||
switch (inlink->type) {
|
switch (inlink->type) {
|
||||||
case AVMEDIA_TYPE_VIDEO:
|
case AVMEDIA_TYPE_VIDEO:
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user