Compare commits
178 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
5bed920971 | ||
![]() |
705e89d75f | ||
![]() |
ef688e7425 | ||
![]() |
02d1efdd5b | ||
![]() |
469cb61193 | ||
![]() |
a642be972d | ||
![]() |
80ddf7889e | ||
![]() |
4be63111d1 | ||
![]() |
6626a7df53 | ||
![]() |
ab434bf0d0 | ||
![]() |
52b18c1fde | ||
![]() |
0b2b8ab979 | ||
![]() |
65bf4c9c45 | ||
![]() |
7c40a0449b | ||
![]() |
811a504c6b | ||
![]() |
75211f2b8c | ||
![]() |
f6687bbb64 | ||
![]() |
1400f1a1e4 | ||
![]() |
1ea5bbc594 | ||
![]() |
f5955d9f6f | ||
![]() |
e14564b926 | ||
![]() |
0f5a0a4155 | ||
![]() |
7acfa7758c | ||
![]() |
56b6909b39 | ||
![]() |
c6f59b95c5 | ||
![]() |
d61c6ebccf | ||
![]() |
b9a287f237 | ||
![]() |
6407800521 | ||
![]() |
6c62098827 | ||
![]() |
a23d6ea1e4 | ||
![]() |
e492818d89 | ||
![]() |
1ca25bc387 | ||
![]() |
057051b848 | ||
![]() |
71fee2ab1e | ||
![]() |
7d3e217623 | ||
![]() |
2ac6b573a4 | ||
![]() |
7f8846405e | ||
![]() |
81bcf9454e | ||
![]() |
5a3c8f95d5 | ||
![]() |
358e4081ed | ||
![]() |
6baaaa0174 | ||
![]() |
9e3e11a348 | ||
![]() |
1d20d975aa | ||
![]() |
e67491a2a4 | ||
![]() |
e1a86b1433 | ||
![]() |
5310da7e83 | ||
![]() |
4eede1fca2 | ||
![]() |
b7765d00f9 | ||
![]() |
5479e08cc4 | ||
![]() |
d0249f1c2e | ||
![]() |
108ca6fad1 | ||
![]() |
5bee21d724 | ||
![]() |
1f8bf163e4 | ||
![]() |
7e35c50b81 | ||
![]() |
e835ce83e2 | ||
![]() |
00bf66785f | ||
![]() |
e0e4250421 | ||
![]() |
901682ff78 | ||
![]() |
5af78cc98d | ||
![]() |
59f22ef91a | ||
![]() |
5393a5600d | ||
![]() |
077beee465 | ||
![]() |
02d3ad8609 | ||
![]() |
b48cf5412b | ||
![]() |
5f3fa5f930 | ||
![]() |
0e1bb99f26 | ||
![]() |
d2c1a8dc2d | ||
![]() |
5a97a5291a | ||
![]() |
f6b50924a5 | ||
![]() |
a55c274f51 | ||
![]() |
eaa9d2cd6b | ||
![]() |
d3bec24739 | ||
![]() |
3ef1538121 | ||
![]() |
47e462eecc | ||
![]() |
f3d1670606 | ||
![]() |
9547034f91 | ||
![]() |
62c9beda0c | ||
![]() |
0e68b6ddce | ||
![]() |
75e88db330 | ||
![]() |
6baa549249 | ||
![]() |
22561bc0e9 | ||
![]() |
8a4464514f | ||
![]() |
85e94a30ee | ||
![]() |
3445bec6fc | ||
![]() |
c8dace2728 | ||
![]() |
9bcb84810f | ||
![]() |
54e19092fd | ||
![]() |
3d67f52f9d | ||
![]() |
bfd586577c | ||
![]() |
5589549c1d | ||
![]() |
5c316acaa0 | ||
![]() |
f4fb841ad1 | ||
![]() |
c2d11275f7 | ||
![]() |
b54c155f5b | ||
![]() |
ea2d44503f | ||
![]() |
59f7d583a3 | ||
![]() |
fb876e4572 | ||
![]() |
c2d2bf1d6b | ||
![]() |
302094e1d2 | ||
![]() |
8d55c2441c | ||
![]() |
d7e7e12abc | ||
![]() |
a856623e87 | ||
![]() |
348cd84fc8 | ||
![]() |
62de693a17 | ||
![]() |
33769e908d | ||
![]() |
1a28948eb3 | ||
![]() |
01050448cf | ||
![]() |
edc00dea02 | ||
![]() |
8d0631c8fa | ||
![]() |
1135928903 | ||
![]() |
6f3bc92c29 | ||
![]() |
bd531038e8 | ||
![]() |
90da0cb60e | ||
![]() |
3049d5b9b3 | ||
![]() |
43c6b45a53 | ||
![]() |
68a0477bc0 | ||
![]() |
ccf0cd967d | ||
![]() |
002ad7cd39 | ||
![]() |
397fafad23 | ||
![]() |
30f0cd2f1e | ||
![]() |
4d6d8d9ae9 | ||
![]() |
9348514a67 | ||
![]() |
17704500fb | ||
![]() |
2338eda8d8 | ||
![]() |
6a0633e961 | ||
![]() |
16dc41de27 | ||
![]() |
ab471e17e4 | ||
![]() |
3be8aeb14e | ||
![]() |
b48e251360 | ||
![]() |
65a4b90840 | ||
![]() |
59956a5957 | ||
![]() |
d4a08e560d | ||
![]() |
dacac91973 | ||
![]() |
d39400fed7 | ||
![]() |
07174ed841 | ||
![]() |
e7475335b1 | ||
![]() |
722bfe4e7c | ||
![]() |
cc8ab98656 | ||
![]() |
d7cff9f8e8 | ||
![]() |
9bfda9df71 | ||
![]() |
0a837b6317 | ||
![]() |
c3c1db7c56 | ||
![]() |
21ca4ab944 | ||
![]() |
c749bec8c3 | ||
![]() |
a95306e2d7 | ||
![]() |
ed12d1ecad | ||
![]() |
05ed9b7005 | ||
![]() |
76477c3843 | ||
![]() |
ccc4219558 | ||
![]() |
9d60f608af | ||
![]() |
6a4803a6a9 | ||
![]() |
c3b67720f9 | ||
![]() |
1c373456f6 | ||
![]() |
9636266cbd | ||
![]() |
dc3349024a | ||
![]() |
66a3112100 | ||
![]() |
72eca26bf9 | ||
![]() |
e44d56b18d | ||
![]() |
71e00caeab | ||
![]() |
7a2ee770f5 | ||
![]() |
fadebd256e | ||
![]() |
3dab6e5429 | ||
![]() |
bc182a6aca | ||
![]() |
fbde7b2d0a | ||
![]() |
58baa367d6 | ||
![]() |
ca2e3f1131 | ||
![]() |
ebd3aa429c | ||
![]() |
ddb0317154 | ||
![]() |
606aa3baee | ||
![]() |
36dac6da41 | ||
![]() |
9202824e1b | ||
![]() |
0135dd73bb | ||
![]() |
c01be297ce | ||
![]() |
42bd6d9cf6 | ||
![]() |
79013a59c0 | ||
![]() |
c1555ae4b6 | ||
![]() |
a557005417 | ||
![]() |
8069b44ebf |
@@ -2,6 +2,9 @@ Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version <next>:
|
||||
- Fix a crash on windows platforms related to automatic stack alignment
|
||||
in libavresample
|
||||
- Fix memleaks in the ogg demuxer. Related to CVE-2012-2882
|
||||
|
||||
|
||||
version 1.1:
|
||||
@@ -60,7 +63,9 @@ version 1.1:
|
||||
- support building on the Plan 9 operating system
|
||||
- kerndeint filter ported from MPlayer
|
||||
- histeq filter ported from VirtualDub
|
||||
- Megalux Frame demuxer
|
||||
- 012v decoder
|
||||
- Improved AVC Intra decoding support
|
||||
|
||||
|
||||
version 1.0:
|
||||
@@ -349,7 +354,6 @@ easier to use. The changes are:
|
||||
- Simple segmenting muxer
|
||||
- Indeo 4 decoder
|
||||
- SMJPEG demuxer
|
||||
- Megalux Frame demuxer
|
||||
|
||||
|
||||
version 0.8:
|
||||
|
@@ -1851,7 +1851,7 @@ static int alloc_buffer(FrameBuffer **pool, AVCodecContext *s, FrameBuffer **pbu
|
||||
/* XXX this shouldn't be needed, but some tests break without this line
|
||||
* those decoders are buggy and need to be fixed.
|
||||
* the following tests fail:
|
||||
* cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
|
||||
* cdgraphics, ansi
|
||||
*/
|
||||
memset(buf->base[0], 128, ret);
|
||||
|
||||
|
31
configure
vendored
31
configure
vendored
@@ -789,8 +789,8 @@ check_ld(){
|
||||
log check_ld "$@"
|
||||
type=$1
|
||||
shift 1
|
||||
flags=$(filter_out '-l*' $@)
|
||||
libs=$(filter '-l*' $@)
|
||||
flags=$(filter_out '-l*|*.so' $@)
|
||||
libs=$(filter '-l*|*.so' $@)
|
||||
check_$type $($cflags_filter $flags) || return
|
||||
flags=$($ldflags_filter $flags)
|
||||
libs=$($ldflags_filter $libs)
|
||||
@@ -1343,6 +1343,8 @@ HAVE_LIST="
|
||||
asm_types_h
|
||||
attribute_may_alias
|
||||
attribute_packed
|
||||
cdio_paranoia_h
|
||||
cdio_paranoia_paranoia_h
|
||||
clock_gettime
|
||||
closesocket
|
||||
cmov
|
||||
@@ -1464,6 +1466,7 @@ HAVE_LIST="
|
||||
CONFIG_EXTRA="
|
||||
aandcttables
|
||||
ac3dsp
|
||||
audio_frame_queue
|
||||
error_resilience
|
||||
gcrypt
|
||||
golomb
|
||||
@@ -1631,7 +1634,7 @@ mpegvideoenc_select="mpegvideo"
|
||||
|
||||
# decoders / encoders
|
||||
aac_decoder_select="mdct sinewin"
|
||||
aac_encoder_select="mdct sinewin"
|
||||
aac_encoder_select="audio_frame_queue mdct sinewin"
|
||||
aac_latm_decoder_select="aac_decoder aac_latm_parser"
|
||||
ac3_decoder_select="mdct ac3dsp ac3_parser"
|
||||
ac3_encoder_select="mdct ac3dsp"
|
||||
@@ -1716,13 +1719,13 @@ msmpeg4v3_decoder_select="h263_decoder"
|
||||
msmpeg4v3_encoder_select="h263_encoder"
|
||||
mss2_decoder_select="vc1_decoder"
|
||||
nellymoser_decoder_select="mdct sinewin"
|
||||
nellymoser_encoder_select="mdct sinewin"
|
||||
nellymoser_encoder_select="audio_frame_queue mdct sinewin"
|
||||
nuv_decoder_select="lzo"
|
||||
png_decoder_select="zlib"
|
||||
png_encoder_select="zlib"
|
||||
qcelp_decoder_select="lsp"
|
||||
qdm2_decoder_select="mdct rdft mpegaudiodsp"
|
||||
ra_144_encoder_select="lpc"
|
||||
ra_144_encoder_select="audio_frame_queue lpc"
|
||||
ralf_decoder_select="golomb"
|
||||
rv10_decoder_select="h263_decoder"
|
||||
rv10_encoder_select="h263_encoder"
|
||||
@@ -1823,7 +1826,9 @@ vc1_parser_select="error_resilience mpegvideo"
|
||||
libaacplus_encoder_deps="libaacplus"
|
||||
libcelt_decoder_deps="libcelt"
|
||||
libfaac_encoder_deps="libfaac"
|
||||
libfaac_encoder_select="audio_frame_queue"
|
||||
libfdk_aac_encoder_deps="libfdk_aac"
|
||||
libfdk_aac_encoder_select="audio_frame_queue"
|
||||
libgsm_decoder_deps="libgsm"
|
||||
libgsm_encoder_deps="libgsm"
|
||||
libgsm_ms_decoder_deps="libgsm"
|
||||
@@ -1832,24 +1837,30 @@ libilbc_decoder_deps="libilbc"
|
||||
libilbc_encoder_deps="libilbc"
|
||||
libmodplug_demuxer_deps="libmodplug"
|
||||
libmp3lame_encoder_deps="libmp3lame"
|
||||
libmp3lame_encoder_select="audio_frame_queue"
|
||||
libopencore_amrnb_decoder_deps="libopencore_amrnb"
|
||||
libopencore_amrnb_encoder_deps="libopencore_amrnb"
|
||||
libopencore_amrnb_encoder_select="audio_frame_queue"
|
||||
libopencore_amrwb_decoder_deps="libopencore_amrwb"
|
||||
libopenjpeg_decoder_deps="libopenjpeg"
|
||||
libopenjpeg_encoder_deps="libopenjpeg"
|
||||
libopus_decoder_deps="libopus"
|
||||
libopus_encoder_deps="libopus"
|
||||
libopus_encoder_select="audio_frame_queue"
|
||||
libschroedinger_decoder_deps="libschroedinger"
|
||||
libschroedinger_encoder_deps="libschroedinger"
|
||||
libspeex_decoder_deps="libspeex"
|
||||
libspeex_encoder_deps="libspeex"
|
||||
libspeex_encoder_select="audio_frame_queue"
|
||||
libstagefright_h264_decoder_deps="libstagefright_h264"
|
||||
libtheora_encoder_deps="libtheora"
|
||||
libtwolame_encoder_deps="libtwolame"
|
||||
libvo_aacenc_encoder_deps="libvo_aacenc"
|
||||
libvo_aacenc_encoder_select="audio_frame_queue"
|
||||
libvo_amrwbenc_encoder_deps="libvo_amrwbenc"
|
||||
libvorbis_decoder_deps="libvorbis"
|
||||
libvorbis_encoder_deps="libvorbis"
|
||||
libvorbis_encoder_select="audio_frame_queue"
|
||||
libvpx_decoder_deps="libvpx"
|
||||
libvpx_encoder_deps="libvpx"
|
||||
libx264_encoder_deps="libx264"
|
||||
@@ -3076,7 +3087,7 @@ check_64bit(){
|
||||
}
|
||||
|
||||
case "$arch" in
|
||||
alpha|ia64)
|
||||
aarch64|alpha|ia64)
|
||||
spic=$shared
|
||||
;;
|
||||
mips)
|
||||
@@ -3817,7 +3828,6 @@ enabled libiec61883 && require libiec61883 libiec61883/iec61883.h iec61883_cmp_c
|
||||
enabled libaacplus && require "libaacplus >= 2.0.0" aacplus.h aacplusEncOpen -laacplus
|
||||
enabled libass && require_pkg_config libass ass/ass.h ass_library_init
|
||||
enabled libbluray && require libbluray libbluray/bluray.h bd_open -lbluray
|
||||
enabled libcdio && require2 libcdio "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio
|
||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 &&
|
||||
{ check_lib celt/celt.h celt_decoder_create_custom -lcelt0 ||
|
||||
die "ERROR: libcelt must be installed and version must be >= 0.11.0."; }
|
||||
@@ -3916,7 +3926,7 @@ rsync --help 2> /dev/null | grep -q 'contimeout' && enable rsync_contimeout || d
|
||||
check_header linux/fb.h
|
||||
check_header linux/videodev.h
|
||||
check_header linux/videodev2.h
|
||||
check_struct linux/videodev2.h "struct v4l2_frmivalenum" discrete
|
||||
check_code cc linux/videodev2.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_safe struct_v4l2_frmivalenum_discrete
|
||||
|
||||
check_header sys/videoio.h
|
||||
|
||||
@@ -3954,6 +3964,9 @@ enabled jack_indev && check_lib2 jack/jack.h jack_client_open -ljack && check_fu
|
||||
|
||||
enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio
|
||||
|
||||
if enabled libcdio; then
|
||||
check_lib2 "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio || check_lib2 "cdio/paranoia/cdda.h cdio/paranoia/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio
|
||||
fi
|
||||
|
||||
enabled x11grab &&
|
||||
require X11 X11/Xlib.h XOpenDisplay -lX11 &&
|
||||
@@ -4090,6 +4103,7 @@ elif enabled gcc; then
|
||||
check_optflags -fno-tree-vectorize
|
||||
check_cflags -Werror=implicit-function-declaration
|
||||
check_cflags -Werror=missing-prototypes
|
||||
check_cflags -Werror=return-type
|
||||
check_cflags -Werror=vla
|
||||
elif enabled llvm_gcc; then
|
||||
check_cflags -mllvm -stack-alignment=16
|
||||
@@ -4098,6 +4112,7 @@ elif enabled clang; then
|
||||
check_cflags -Qunused-arguments
|
||||
check_cflags -Werror=implicit-function-declaration
|
||||
check_cflags -Werror=missing-prototypes
|
||||
check_cflags -Werror=return-type
|
||||
elif enabled armcc; then
|
||||
# 2523: use of inline assembler is deprecated
|
||||
add_cflags -W${armcc_opt},--diag_suppress=2523
|
||||
|
@@ -132,30 +132,30 @@ API changes, most recent first:
|
||||
2012-03-26 - a67d9cf - lavfi 2.66.100
|
||||
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
|
||||
|
||||
2012-xx-xx - xxxxxxx - lavu 52.2.1 - avstring.h
|
||||
2012-12-29 - d8fd06c - lavu 52.3.0 - avstring.h
|
||||
Add av_basename() and av_dirname().
|
||||
|
||||
2012-11-10 - 5980f5dd - lavu 52.2.0 - audioconvert.h
|
||||
2012-11-11 - 5980f5d - lavu 52.2.0 - audioconvert.h
|
||||
Rename audioconvert.h to channel_layout.h. audioconvert.h is now deprecated.
|
||||
|
||||
2012-10-26 - dfde8a34 - lavu 52.1.0 - intmath.h
|
||||
Add av_ctz() for trailing zero bit count.
|
||||
2012-11-05 - dfde8a3 - lavu 52.1.0 - intmath.h
|
||||
Add av_ctz() for trailing zero bit count
|
||||
|
||||
2012-10-18 - a893655b - lavu 51.45.0 - error.h
|
||||
Add AVERROR_EXPERIMENTAL.
|
||||
2012-10-21 - a893655 - lavu 51.45.0 - error.h
|
||||
Add AVERROR_EXPERIMENTAL
|
||||
|
||||
2012-10-12 - d2fcb356 - lavu 51.44.0 - pixdesc.h
|
||||
2012-10-12 - d2fcb35 - lavu 51.44.0 - pixdesc.h
|
||||
Add functions for accessing pixel format descriptors.
|
||||
Accessing the av_pix_fmt_descriptors array directly is now
|
||||
deprecated.
|
||||
|
||||
2012-10-11 - 9a92aea2 - lavu 51.43.0 - aes.h, md5.h, sha.h, tree.h
|
||||
2012-10-11 - 9a92aea - lavu 51.43.0 - aes.h, md5.h, sha.h, tree.h
|
||||
Add functions for allocating the opaque contexts for the algorithms,
|
||||
|
||||
2012-10-10 - b522000e - lavf 54.18.0 - avio.h
|
||||
2012-10-10 - b522000 - lavf 54.18.0 - avio.h
|
||||
Add avio_closep to complement avio_close.
|
||||
|
||||
2012-10-06 - 78071a14 - lavu 51.42.0 - pixfmt.h
|
||||
2012-10-08 - 78071a1 - lavu 51.42.0 - pixfmt.h
|
||||
Rename PixelFormat to AVPixelFormat and all PIX_FMT_* to AV_PIX_FMT_*.
|
||||
To provide backwards compatibility, PixelFormat is now #defined as
|
||||
AVPixelFormat.
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER =
|
||||
PROJECT_NUMBER = 1.1.3
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -1,7 +1,7 @@
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 0.10 "Freedom" January, 2012
|
||||
* 1.1 "Fire Flower" January, 2013
|
||||
|
||||
|
||||
General notes
|
||||
@@ -20,3 +20,6 @@ compiler. Since MSVC does not support C99 features used extensively by FFmpeg,
|
||||
this has been accomplished using a converter that turns C99 code to C89. See the
|
||||
platform-specific documentation for more detailed documentation on building
|
||||
FFmpeg with MSVC.
|
||||
|
||||
The used output sample format for several audio decoders has changed, make
|
||||
sure you always check/use AVCodecContext.sample_fmt or AVFrame.format.
|
||||
|
@@ -314,7 +314,7 @@ int main (int argc, char **argv)
|
||||
if (audio_stream) {
|
||||
const char *fmt;
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt) < 0))
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt)) < 0)
|
||||
goto end;
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
|
@@ -200,7 +200,7 @@ int main(int argc, char **argv)
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt) < 0))
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
|
@@ -711,10 +711,11 @@ Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
|
||||
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
|
||||
@end example
|
||||
|
||||
Attach a picture to an mp3:
|
||||
To attach a picture to an mp3 file select both the audio and the picture stream
|
||||
with @code{map}:
|
||||
@example
|
||||
ffmpeg -i input.mp3 -i cover.png -c copy -metadata:s:v title="Album cover"
|
||||
-metadata:s:v comment="Cover (Front)" out.mp3
|
||||
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
|
||||
-metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
|
||||
@end example
|
||||
|
||||
@c man end MUXERS
|
||||
|
@@ -114,7 +114,7 @@ wrapper.
|
||||
You will need the following prerequisites:
|
||||
|
||||
@itemize
|
||||
@item @uref{https://github.com/libav/c99-to-c89/, C99-to-C89 Converter & Wrapper}
|
||||
@item @uref{http://download.videolan.org/pub/contrib/c99-to-c89/, C99-to-C89 Converter & Wrapper}
|
||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||
@item @uref{http://www.mingw.org/, MSYS}
|
||||
@item @uref{http://yasm.tortall.net/, YASM}
|
||||
|
9
ffmpeg.c
9
ffmpeg.c
@@ -471,7 +471,6 @@ static void exit_program(void)
|
||||
if (received_sigterm) {
|
||||
av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
|
||||
(int) received_sigterm);
|
||||
exit (255);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2100,6 +2099,12 @@ static int transcode_init(void)
|
||||
codec->time_base.num *= icodec->ticks_per_frame;
|
||||
}
|
||||
}
|
||||
if ( codec->codec_tag == AV_RL32("tmcd")
|
||||
&& icodec->time_base.num < icodec->time_base.den
|
||||
&& icodec->time_base.num > 0
|
||||
&& 121LL*icodec->time_base.num > icodec->time_base.den) {
|
||||
codec->time_base = icodec->time_base;
|
||||
}
|
||||
|
||||
if(ost->frame_rate.num)
|
||||
codec->time_base = av_inv_q(ost->frame_rate);
|
||||
@@ -3214,6 +3219,6 @@ int main(int argc, char **argv)
|
||||
printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
exit(received_nb_signals ? 255 : 0);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1143,8 +1143,6 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
if (p) p++;
|
||||
}
|
||||
video_enc->rc_override_count = i;
|
||||
if (!video_enc->rc_initial_buffer_occupancy)
|
||||
video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
|
||||
video_enc->intra_dc_precision = intra_dc_precision - 8;
|
||||
|
||||
if (do_psnr)
|
||||
@@ -1155,9 +1153,11 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
if (do_pass) {
|
||||
if (do_pass & 1) {
|
||||
video_enc->flags |= CODEC_FLAG_PASS1;
|
||||
av_dict_set(&ost->opts, "flags", "+pass1", AV_DICT_APPEND);
|
||||
}
|
||||
if (do_pass & 2) {
|
||||
video_enc->flags |= CODEC_FLAG_PASS2;
|
||||
av_dict_set(&ost->opts, "flags", "+pass2", AV_DICT_APPEND);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2150,7 +2150,7 @@ static int opt_channel_layout(void *optctx, const char *opt, const char *arg)
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
|
||||
ret = opt_default(NULL, opt, layout_str);
|
||||
ret = opt_default_new(o, opt, layout_str);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@@ -44,7 +44,7 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
{
|
||||
int line = 0;
|
||||
int line = 0, ret;
|
||||
const int width = avctx->width;
|
||||
AVFrame *pic = avctx->coded_frame;
|
||||
uint16_t *y, *u, *v;
|
||||
@@ -65,8 +65,8 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
pic->reference = 0;
|
||||
if (ff_get_buffer(avctx, pic) < 0)
|
||||
return AVERROR_INVALIDDATA;;
|
||||
if ((ret = ff_get_buffer(avctx, pic)) < 0)
|
||||
return ret;
|
||||
|
||||
y = (uint16_t *)pic->data[0];
|
||||
u = (uint16_t *)pic->data[1];
|
||||
|
@@ -39,6 +39,7 @@ OBJS = allcodecs.o \
|
||||
# parts needed for many different codecs
|
||||
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
|
||||
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o
|
||||
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE) += audio_frame_queue.o
|
||||
OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
|
||||
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
|
||||
OBJS-$(CONFIG_DWT) += dwt.o snow.o
|
||||
@@ -84,8 +85,7 @@ OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps.o \
|
||||
OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o \
|
||||
aacpsy.o aactab.o \
|
||||
psymodel.o iirfilter.o \
|
||||
mpeg4audio.o kbdwin.o \
|
||||
audio_frame_queue.o
|
||||
mpeg4audio.o kbdwin.o
|
||||
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
|
||||
OBJS-$(CONFIG_AC3_DECODER) += ac3dec.o ac3dec_data.o ac3.o kbdwin.o
|
||||
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
|
||||
@@ -317,8 +317,7 @@ OBJS-$(CONFIG_MVC1_DECODER) += mvcdec.o
|
||||
OBJS-$(CONFIG_MVC2_DECODER) += mvcdec.o
|
||||
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
|
||||
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
|
||||
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += paf.o
|
||||
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += paf.o
|
||||
@@ -358,8 +357,7 @@ OBJS-$(CONFIG_R10K_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_R210_DECODER) += r210dec.o
|
||||
OBJS-$(CONFIG_R210_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_RA_144_DECODER) += ra144dec.o ra144.o celp_filters.o
|
||||
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o
|
||||
OBJS-$(CONFIG_RA_288_DECODER) += ra288.o celp_filters.o
|
||||
OBJS-$(CONFIG_RALF_DECODER) += ralf.o
|
||||
OBJS-$(CONFIG_RAWVIDEO_DECODER) += rawdec.o
|
||||
@@ -660,43 +658,39 @@ OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
# external codec libraries
|
||||
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
|
||||
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
||||
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
|
||||
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o
|
||||
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_MS_ENCODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBILBC_DECODER) += libilbc.o
|
||||
OBJS-$(CONFIG_LIBILBC_ENCODER) += libilbc.o
|
||||
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
|
||||
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
|
||||
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopusdec.o libopus.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_LIBOPUS_ENCODER) += libopusenc.o libopus.o \
|
||||
vorbis_data.o audio_frame_queue.o
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_LIBSCHROEDINGER_DECODER) += libschroedingerdec.o \
|
||||
libschroedinger.o
|
||||
OBJS-$(CONFIG_LIBSCHROEDINGER_ENCODER) += libschroedingerenc.o \
|
||||
libschroedinger.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_DECODER) += libspeexdec.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o
|
||||
OBJS-$(CONFIG_LIBSTAGEFRIGHT_H264_DECODER)+= libstagefright.o
|
||||
OBJS-$(CONFIG_LIBTHEORA_ENCODER) += libtheoraenc.o
|
||||
OBJS-$(CONFIG_LIBTWOLAME_ENCODER) += libtwolame.o
|
||||
OBJS-$(CONFIG_LIBUTVIDEO_DECODER) += libutvideodec.o
|
||||
OBJS-$(CONFIG_LIBUTVIDEO_ENCODER) += libutvideoenc.o
|
||||
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o \
|
||||
audio_frame_queue.o
|
||||
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
|
||||
OBJS-$(CONFIG_LIBVORBIS_DECODER) += libvorbisdec.o
|
||||
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o audio_frame_queue.o \
|
||||
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o \
|
||||
vorbis_data.o vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_LIBVPX_DECODER) += libvpxdec.o
|
||||
OBJS-$(CONFIG_LIBVPX_ENCODER) += libvpxenc.o
|
||||
|
@@ -914,6 +914,11 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
}
|
||||
}
|
||||
|
||||
if (avctx->channels > MAX_CHANNELS) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Too many channels\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
AAC_INIT_VLC_STATIC( 0, 304);
|
||||
AAC_INIT_VLC_STATIC( 1, 270);
|
||||
AAC_INIT_VLC_STATIC( 2, 550);
|
||||
|
@@ -517,7 +517,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
|
||||
/* add current frame to queue */
|
||||
if (frame) {
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -60,7 +60,7 @@ static av_cold int aasc_decode_init(AVCodecContext *avctx)
|
||||
}
|
||||
break;
|
||||
case 16:
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB555;
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB555LE;
|
||||
break;
|
||||
case 24:
|
||||
avctx->pix_fmt = AV_PIX_FMT_BGR24;
|
||||
|
@@ -22,7 +22,6 @@
|
||||
#include "config.h"
|
||||
#include "libavutil/arm/asm.S"
|
||||
|
||||
#if HAVE_ARMV5TE_EXTERNAL
|
||||
function ff_prefetch_arm, export=1
|
||||
subs r2, r2, #1
|
||||
pld [r0]
|
||||
@@ -30,4 +29,3 @@ function ff_prefetch_arm, export=1
|
||||
bne ff_prefetch_arm
|
||||
bx lr
|
||||
endfunc
|
||||
#endif
|
||||
|
@@ -19,7 +19,7 @@
|
||||
*/
|
||||
|
||||
#include "libavutil/arm/cpu.h"
|
||||
#include <libavcodec/videodsp.h>
|
||||
#include "libavcodec/videodsp.h"
|
||||
#include "videodsp_arm.h"
|
||||
|
||||
void ff_prefetch_arm(uint8_t *mem, ptrdiff_t stride, int h);
|
||||
|
@@ -124,14 +124,14 @@ function ff_vp8_luma_dc_wht_armv6, export=1
|
||||
sbfx r1, r9, #3, #13
|
||||
sbfx r10, r4, #3, #13
|
||||
#else
|
||||
sxth r8, r8
|
||||
sxth r7, r7
|
||||
sxth r9, r9
|
||||
sxth r4, r4
|
||||
asr r8, #3 @ block[0][0]
|
||||
asr r7, #3 @ block[0][1]
|
||||
asr r9, #3 @ block[0][2]
|
||||
asr r4, #3 @ block[0][3]
|
||||
sxth r6, r8
|
||||
sxth r12, r7
|
||||
sxth r1, r9
|
||||
sxth r10, r4
|
||||
asr r6, #3 @ block[0][0]
|
||||
asr r12, #3 @ block[0][1]
|
||||
asr r1, #3 @ block[0][2]
|
||||
asr r10, #3 @ block[0][3]
|
||||
#endif
|
||||
|
||||
strh r6, [r0], #32
|
||||
|
@@ -518,7 +518,7 @@ static int add_tonal_components(float *spectrum, int num_components,
|
||||
output = &spectrum[components[i].pos];
|
||||
|
||||
for (j = 0; j < components[i].num_coefs; j++)
|
||||
output[i] += input[i];
|
||||
output[j] += input[j];
|
||||
}
|
||||
|
||||
return last_pos;
|
||||
|
@@ -237,7 +237,7 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
|
||||
|
||||
avctx->pix_fmt = dirac_pix_fmt[!luma_offset][source->chroma_format];
|
||||
avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_x_shift, &chroma_y_shift);
|
||||
if (!(source->width % (1<<chroma_x_shift)) || !(source->height % (1<<chroma_y_shift))) {
|
||||
if ((source->width % (1<<chroma_x_shift)) || (source->height % (1<<chroma_y_shift))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions must be a integer multiply of the chroma subsampling\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -629,14 +629,35 @@ static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
|
||||
static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
|
||||
{
|
||||
DNXHDEncContext *ctx = avctx->priv_data;
|
||||
int mb_y = jobnr, mb_x;
|
||||
int mb_y = jobnr, mb_x, x, y;
|
||||
int partial_last_row = (mb_y == ctx->m.mb_height - 1) &&
|
||||
((avctx->height >> ctx->interlaced) & 0xF);
|
||||
|
||||
ctx = ctx->thread[threadnr];
|
||||
if (ctx->cid_table->bit_depth == 8) {
|
||||
uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize);
|
||||
for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x, pix += 16) {
|
||||
unsigned mb = mb_y * ctx->m.mb_width + mb_x;
|
||||
int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize);
|
||||
int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)sum*sum)>>8)+128)>>8;
|
||||
int sum;
|
||||
int varc;
|
||||
|
||||
if (!partial_last_row && mb_x * 16 <= avctx->width - 16) {
|
||||
sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize);
|
||||
varc = ctx->m.dsp.pix_norm1(pix, ctx->m.linesize);
|
||||
} else {
|
||||
int bw = FFMIN(avctx->width - 16 * mb_x, 16);
|
||||
int bh = FFMIN((avctx->height >> ctx->interlaced) - 16 * mb_y, 16);
|
||||
sum = varc = 0;
|
||||
for (y = 0; y < bh; y++) {
|
||||
for (x = 0; x < bw; x++) {
|
||||
uint8_t val = pix[x + y * ctx->m.linesize];
|
||||
sum += val;
|
||||
varc += val * val;
|
||||
}
|
||||
}
|
||||
}
|
||||
varc = (varc - (((unsigned)sum * sum) >> 8) + 128) >> 8;
|
||||
|
||||
ctx->mb_cmp[mb].value = varc;
|
||||
ctx->mb_cmp[mb].mb = mb;
|
||||
}
|
||||
|
@@ -924,6 +924,12 @@ void ff_er_frame_end(MpegEncContext *s)
|
||||
return;
|
||||
};
|
||||
|
||||
if ( s->picture_structure == PICT_FRAME
|
||||
&& s->current_picture.f.linesize[0] != s->current_picture_ptr->f.linesize[0]) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Error concealment not possible, frame not fully initialized\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (s->current_picture.f.motion_val[0] == NULL) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
|
||||
|
||||
|
@@ -245,10 +245,10 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
case 1:
|
||||
/* Fraps v1 is an upside-down BGR24 */
|
||||
for(y=0; y<avctx->height; y++)
|
||||
memcpy(&f->data[0][ (avctx->height-y)*f->linesize[0] ],
|
||||
&buf[y*avctx->width*3],
|
||||
3*avctx->width);
|
||||
for(y=0; y<avctx->height; y++)
|
||||
memcpy(&f->data[0][ (avctx->height - y -1) * f->linesize[0]],
|
||||
&buf[y*avctx->width*3],
|
||||
3*avctx->width);
|
||||
break;
|
||||
|
||||
case 2:
|
||||
|
@@ -366,25 +366,49 @@ static inline int check_marker(GetBitContext *s, const char *msg)
|
||||
}
|
||||
|
||||
/**
|
||||
* Inititalize GetBitContext.
|
||||
* @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger than the actual read bits
|
||||
* because some optimized bitstream readers read 32 or 64 bit at once and could read over the end
|
||||
* Initialize GetBitContext.
|
||||
* @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes
|
||||
* larger than the actual read bits because some optimized bitstream
|
||||
* readers read 32 or 64 bit at once and could read over the end
|
||||
* @param bit_size the size of the buffer in bits
|
||||
* @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
|
||||
*/
|
||||
static inline void init_get_bits(GetBitContext *s, const uint8_t *buffer,
|
||||
int bit_size)
|
||||
static inline int init_get_bits(GetBitContext *s, const uint8_t *buffer,
|
||||
int bit_size)
|
||||
{
|
||||
int buffer_size = (bit_size+7)>>3;
|
||||
if (buffer_size < 0 || bit_size < 0) {
|
||||
int buffer_size;
|
||||
int ret = 0;
|
||||
|
||||
if (bit_size >= INT_MAX - 7 || bit_size < 0 || !buffer) {
|
||||
buffer_size = bit_size = 0;
|
||||
buffer = NULL;
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
buffer_size = (bit_size + 7) >> 3;
|
||||
|
||||
s->buffer = buffer;
|
||||
s->size_in_bits = bit_size;
|
||||
s->size_in_bits_plus8 = bit_size + 8;
|
||||
s->buffer_end = buffer + buffer_size;
|
||||
s->index = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize GetBitContext.
|
||||
* @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes
|
||||
* larger than the actual read bits because some optimized bitstream
|
||||
* readers read 32 or 64 bit at once and could read over the end
|
||||
* @param byte_size the size of the buffer in bytes
|
||||
* @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
|
||||
*/
|
||||
static inline int init_get_bits8(GetBitContext *s, const uint8_t *buffer,
|
||||
int byte_size)
|
||||
{
|
||||
if (byte_size > INT_MAX / 8 || byte_size < 0)
|
||||
byte_size = -1;
|
||||
return init_get_bits(s, buffer, byte_size * 8);
|
||||
}
|
||||
|
||||
static inline void align_get_bits(GetBitContext *s)
|
||||
|
@@ -118,7 +118,7 @@ static void gif_copy_img_rect(const uint32_t *src, uint32_t *dst,
|
||||
const uint32_t *src_px, *src_pr,
|
||||
*src_py = src + y_start,
|
||||
*dst_py = dst + y_start;
|
||||
const uint32_t *src_pb = src_py + t * linesize;
|
||||
const uint32_t *src_pb = src_py + h * linesize;
|
||||
uint32_t *dst_px;
|
||||
|
||||
for (; src_py < src_pb; src_py += linesize, dst_py += linesize) {
|
||||
|
@@ -309,10 +309,11 @@ static inline int get_lowest_part_list_y(H264Context *h, Picture *pic, int n,
|
||||
int height, int y_offset, int list)
|
||||
{
|
||||
int raw_my = h->mv_cache[list][scan8[n]][1];
|
||||
int filter_height = (raw_my & 3) ? 2 : 0;
|
||||
int filter_height_up = (raw_my & 3) ? 2 : 0;
|
||||
int filter_height_down = (raw_my & 3) ? 3 : 0;
|
||||
int full_my = (raw_my >> 2) + y_offset;
|
||||
int top = full_my - filter_height;
|
||||
int bottom = full_my + filter_height + height;
|
||||
int top = full_my - filter_height_up;
|
||||
int bottom = full_my + filter_height_down + height;
|
||||
|
||||
return FFMAX(abs(top), bottom);
|
||||
}
|
||||
@@ -2350,7 +2351,7 @@ static int field_end(H264Context *h, int in_setup)
|
||||
* past end by one (callers fault) and resync_mb_y != 0
|
||||
* causes problems for the first MB line, too.
|
||||
*/
|
||||
if (!FIELD_PICTURE)
|
||||
if (!FIELD_PICTURE && h->current_slice && !h->sps.new)
|
||||
ff_er_frame_end(s);
|
||||
|
||||
ff_MPV_frame_end(s);
|
||||
@@ -2474,7 +2475,7 @@ static int h264_set_parameter_from_sps(H264Context *h)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum PixelFormat get_pixel_format(H264Context *h)
|
||||
static enum PixelFormat get_pixel_format(H264Context *h, int force_callback)
|
||||
{
|
||||
MpegEncContext *const s = &h->s;
|
||||
switch (h->sps.bit_depth_luma) {
|
||||
@@ -2536,11 +2537,17 @@ static enum PixelFormat get_pixel_format(H264Context *h)
|
||||
return s->avctx->color_range == AVCOL_RANGE_JPEG ? AV_PIX_FMT_YUVJ422P
|
||||
: AV_PIX_FMT_YUV422P;
|
||||
} else {
|
||||
return s->avctx->get_format(s->avctx, s->avctx->codec->pix_fmts ?
|
||||
int i;
|
||||
const enum AVPixelFormat * fmt = s->avctx->codec->pix_fmts ?
|
||||
s->avctx->codec->pix_fmts :
|
||||
s->avctx->color_range == AVCOL_RANGE_JPEG ?
|
||||
hwaccel_pixfmt_list_h264_jpeg_420 :
|
||||
ff_hwaccel_pixfmt_list_420);
|
||||
ff_hwaccel_pixfmt_list_420;
|
||||
|
||||
for (i=0; fmt[i] != AV_PIX_FMT_NONE; i++)
|
||||
if (fmt[i] == s->avctx->pix_fmt && !force_callback)
|
||||
return fmt[i];
|
||||
return s->avctx->get_format(s->avctx, fmt);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@@ -2589,7 +2596,7 @@ static int h264_slice_header_init(H264Context *h, int reinit)
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
if ((ret = ff_MPV_common_init(s) < 0)) {
|
||||
if ((ret = ff_MPV_common_init(s)) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
|
||||
return ret;
|
||||
}
|
||||
@@ -2764,7 +2771,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|| s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma
|
||||
|| h->cur_chroma_format_idc != h->sps.chroma_format_idc
|
||||
|| av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio)));
|
||||
|
||||
if (h0->s.avctx->pix_fmt != get_pixel_format(h0, 0))
|
||||
must_reinit = 1;
|
||||
|
||||
s->mb_width = h->sps.mb_width;
|
||||
s->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
|
||||
@@ -2801,7 +2809,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|
||||
flush_change(h);
|
||||
|
||||
if ((ret = get_pixel_format(h)) < 0)
|
||||
if ((ret = get_pixel_format(h, 1)) < 0)
|
||||
return ret;
|
||||
s->avctx->pix_fmt = ret;
|
||||
|
||||
@@ -2822,7 +2830,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((ret = get_pixel_format(h)) < 0)
|
||||
if ((ret = get_pixel_format(h, 1)) < 0)
|
||||
return ret;
|
||||
s->avctx->pix_fmt = ret;
|
||||
|
||||
@@ -2966,6 +2974,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
||||
av_log(h->s.avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
|
||||
h->frame_num, h->prev_frame_num);
|
||||
if (!h->sps.gaps_in_frame_num_allowed_flag)
|
||||
for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
|
||||
h->last_pocs[i] = INT_MIN;
|
||||
if (ff_h264_frame_start(h) < 0)
|
||||
return -1;
|
||||
h->prev_frame_num++;
|
||||
@@ -2973,7 +2984,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
s->current_picture_ptr->frame_num = h->prev_frame_num;
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 1);
|
||||
ff_generate_sliding_window_mmcos(h);
|
||||
if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
|
||||
s->avctx->err_recognition & AV_EF_EXPLODE)
|
||||
return ret;
|
||||
if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 &&
|
||||
(s->avctx->err_recognition & AV_EF_EXPLODE))
|
||||
return AVERROR_INVALIDDATA;
|
||||
@@ -3152,7 +3165,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
}
|
||||
}
|
||||
|
||||
if (h->nal_ref_idc && ff_h264_decode_ref_pic_marking(h0, &s->gb) < 0 &&
|
||||
// If frame-mt is enabled, only update mmco tables for the first slice
|
||||
// in a field. Subsequent slices can temporarily clobber h->mmco_index
|
||||
// or h->mmco, which will cause ref list mix-ups and decoding errors
|
||||
// further down the line. This may break decoding if the first slice is
|
||||
// corrupt, thus we only do this if frame-mt is enabled.
|
||||
if (h->nal_ref_idc &&
|
||||
ff_h264_decode_ref_pic_marking(h0, &s->gb,
|
||||
!(s->avctx->active_thread_type & FF_THREAD_FRAME) ||
|
||||
h0->current_slice == 0) < 0 &&
|
||||
(s->avctx->err_recognition & AV_EF_EXPLODE))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
|
@@ -669,9 +669,10 @@ void ff_h264_remove_all_refs(H264Context *h);
|
||||
*/
|
||||
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count);
|
||||
|
||||
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb);
|
||||
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb,
|
||||
int first_slice);
|
||||
|
||||
void ff_generate_sliding_window_mmcos(H264Context *h);
|
||||
int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice);
|
||||
|
||||
/**
|
||||
* Check if the top & left blocks are available if needed & change the
|
||||
|
@@ -385,7 +385,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
}
|
||||
sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8;
|
||||
sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8;
|
||||
if (sps->bit_depth_luma > 14U || sps->bit_depth_chroma > 14U) {
|
||||
if (sps->bit_depth_luma > 14U || sps->bit_depth_chroma > 14U || sps->bit_depth_luma != sps->bit_depth_chroma) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
|
||||
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||
goto fail;
|
||||
|
@@ -287,7 +287,10 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
|
||||
for(list=0; list<h->list_count; list++){
|
||||
for(index= 0; index < h->ref_count[list]; index++){
|
||||
if (!h->ref_list[list][index].f.data[0]) {
|
||||
int i;
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture, default is %d\n", h->default_ref_list[list][0].poc);
|
||||
for (i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
|
||||
h->last_pocs[i] = INT_MIN;
|
||||
if (h->default_ref_list[list][0].f.data[0])
|
||||
h->ref_list[list][index]= h->default_ref_list[list][0];
|
||||
else
|
||||
@@ -480,22 +483,50 @@ static void print_long_term(H264Context *h) {
|
||||
}
|
||||
}
|
||||
|
||||
void ff_generate_sliding_window_mmcos(H264Context *h) {
|
||||
MpegEncContext * const s = &h->s;
|
||||
static int check_opcodes(MMCO *mmco1, MMCO *mmco2, int n_mmcos)
|
||||
{
|
||||
int i;
|
||||
|
||||
h->mmco_index= 0;
|
||||
if(h->short_ref_count && h->long_ref_count + h->short_ref_count >= h->sps.ref_frame_count &&
|
||||
!(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->f.reference)) {
|
||||
h->mmco[0].opcode= MMCO_SHORT2UNUSED;
|
||||
h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
|
||||
h->mmco_index= 1;
|
||||
for (i = 0; i < n_mmcos; i++) {
|
||||
if (mmco1[i].opcode != mmco2[i].opcode)
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
|
||||
{
|
||||
MpegEncContext * const s = &h->s;
|
||||
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp;
|
||||
int mmco_index = 0, i;
|
||||
|
||||
if (h->short_ref_count &&
|
||||
h->long_ref_count + h->short_ref_count >= h->sps.ref_frame_count &&
|
||||
!(FIELD_PICTURE && !s->first_field &&
|
||||
s->current_picture_ptr->f.reference)) {
|
||||
mmco[0].opcode = MMCO_SHORT2UNUSED;
|
||||
mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num;
|
||||
mmco_index = 1;
|
||||
if (FIELD_PICTURE) {
|
||||
h->mmco[0].short_pic_num *= 2;
|
||||
h->mmco[1].opcode= MMCO_SHORT2UNUSED;
|
||||
h->mmco[1].short_pic_num= h->mmco[0].short_pic_num + 1;
|
||||
h->mmco_index= 2;
|
||||
mmco[0].short_pic_num *= 2;
|
||||
mmco[1].opcode = MMCO_SHORT2UNUSED;
|
||||
mmco[1].short_pic_num = mmco[0].short_pic_num + 1;
|
||||
mmco_index = 2;
|
||||
}
|
||||
}
|
||||
|
||||
if (first_slice) {
|
||||
h->mmco_index = mmco_index;
|
||||
} else if (!first_slice && mmco_index >= 0 &&
|
||||
(mmco_index != h->mmco_index ||
|
||||
(i = check_opcodes(h->mmco, mmco_temp, mmco_index)))) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"Inconsistent MMCO state between slices [%d, %d, %d]\n",
|
||||
mmco_index, h->mmco_index, i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
@@ -665,52 +696,86 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
|
||||
return (h->s.avctx->err_recognition & AV_EF_EXPLODE) ? err : 0;
|
||||
}
|
||||
|
||||
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb){
|
||||
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb,
|
||||
int first_slice)
|
||||
{
|
||||
MpegEncContext * const s = &h->s;
|
||||
int i;
|
||||
int i, ret;
|
||||
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp;
|
||||
int mmco_index = 0;
|
||||
|
||||
h->mmco_index= 0;
|
||||
if(h->nal_unit_type == NAL_IDR_SLICE){ //FIXME fields
|
||||
s->broken_link= get_bits1(gb) -1;
|
||||
if(get_bits1(gb)){
|
||||
h->mmco[0].opcode= MMCO_LONG;
|
||||
h->mmco[0].long_arg= 0;
|
||||
h->mmco_index= 1;
|
||||
if (h->nal_unit_type == NAL_IDR_SLICE){ // FIXME fields
|
||||
s->broken_link = get_bits1(gb) - 1;
|
||||
if (get_bits1(gb)){
|
||||
mmco[0].opcode = MMCO_LONG;
|
||||
mmco[0].long_arg = 0;
|
||||
mmco_index = 1;
|
||||
}
|
||||
}else{
|
||||
if(get_bits1(gb)){ // adaptive_ref_pic_marking_mode_flag
|
||||
for(i= 0; i<MAX_MMCO_COUNT; i++) {
|
||||
MMCOOpcode opcode= get_ue_golomb_31(gb);
|
||||
} else {
|
||||
if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag
|
||||
for (i = 0; i < MAX_MMCO_COUNT; i++) {
|
||||
MMCOOpcode opcode = get_ue_golomb_31(gb);
|
||||
|
||||
h->mmco[i].opcode= opcode;
|
||||
if(opcode==MMCO_SHORT2UNUSED || opcode==MMCO_SHORT2LONG){
|
||||
h->mmco[i].short_pic_num= (h->curr_pic_num - get_ue_golomb(gb) - 1) & (h->max_pic_num - 1);
|
||||
/* if(h->mmco[i].short_pic_num >= h->short_ref_count || h->short_ref[ h->mmco[i].short_pic_num ] == NULL){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "illegal short ref in memory management control operation %d\n", mmco);
|
||||
return -1;
|
||||
}*/
|
||||
}
|
||||
if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){
|
||||
unsigned int long_arg= get_ue_golomb_31(gb);
|
||||
if(long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_SET_MAX_LONG && long_arg == 16) && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %d\n", opcode);
|
||||
mmco[i].opcode = opcode;
|
||||
if (opcode == MMCO_SHORT2UNUSED || opcode == MMCO_SHORT2LONG){
|
||||
mmco[i].short_pic_num =
|
||||
(h->curr_pic_num - get_ue_golomb(gb) - 1) &
|
||||
(h->max_pic_num - 1);
|
||||
#if 0
|
||||
if (mmco[i].short_pic_num >= h->short_ref_count ||
|
||||
h->short_ref[ mmco[i].short_pic_num ] == NULL){
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"illegal short ref in memory management control "
|
||||
"operation %d\n", mmco);
|
||||
return -1;
|
||||
}
|
||||
h->mmco[i].long_arg= long_arg;
|
||||
#endif
|
||||
}
|
||||
if (opcode == MMCO_SHORT2LONG || opcode == MMCO_LONG2UNUSED ||
|
||||
opcode == MMCO_LONG || opcode == MMCO_SET_MAX_LONG) {
|
||||
unsigned int long_arg = get_ue_golomb_31(gb);
|
||||
if (long_arg >= 32 ||
|
||||
(long_arg >= 16 && !(opcode == MMCO_SET_MAX_LONG &&
|
||||
long_arg == 16) &&
|
||||
!(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"illegal long ref in memory management control "
|
||||
"operation %d\n", opcode);
|
||||
return -1;
|
||||
}
|
||||
mmco[i].long_arg = long_arg;
|
||||
}
|
||||
|
||||
if(opcode > (unsigned)MMCO_LONG){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal memory management control operation %d\n", opcode);
|
||||
if (opcode > (unsigned) MMCO_LONG){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"illegal memory management control operation %d\n",
|
||||
opcode);
|
||||
return -1;
|
||||
}
|
||||
if(opcode == MMCO_END)
|
||||
if (opcode == MMCO_END)
|
||||
break;
|
||||
}
|
||||
h->mmco_index= i;
|
||||
}else{
|
||||
ff_generate_sliding_window_mmcos(h);
|
||||
mmco_index = i;
|
||||
} else {
|
||||
if (first_slice) {
|
||||
ret = ff_generate_sliding_window_mmcos(h, first_slice);
|
||||
if (ret < 0 && s->avctx->err_recognition & AV_EF_EXPLODE)
|
||||
return ret;
|
||||
}
|
||||
mmco_index = -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (first_slice && mmco_index != -1) {
|
||||
h->mmco_index = mmco_index;
|
||||
} else if (!first_slice && mmco_index >= 0 &&
|
||||
(mmco_index != h->mmco_index ||
|
||||
(i = check_opcodes(h->mmco, mmco_temp, mmco_index)))) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"Inconsistent MMCO state between slices [%d, %d, %d]\n",
|
||||
mmco_index, h->mmco_index, i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -118,12 +118,13 @@ static void generate_joint_tables(HYuvContext *s)
|
||||
for (i = y = 0; y < 256; y++) {
|
||||
int len0 = s->len[0][y];
|
||||
int limit = VLC_BITS - len0;
|
||||
if(limit <= 0)
|
||||
if(limit <= 0 || !len0)
|
||||
continue;
|
||||
for (u = 0; u < 256; u++) {
|
||||
int len1 = s->len[p][u];
|
||||
if (len1 > limit)
|
||||
if (len1 > limit || !len1)
|
||||
continue;
|
||||
av_assert0(i < (1 << VLC_BITS));
|
||||
len[i] = len0 + len1;
|
||||
bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
|
||||
symbols[i] = (y << 8) + u;
|
||||
@@ -146,18 +147,19 @@ static void generate_joint_tables(HYuvContext *s)
|
||||
for (i = 0, g = -16; g < 16; g++) {
|
||||
int len0 = s->len[p0][g & 255];
|
||||
int limit0 = VLC_BITS - len0;
|
||||
if (limit0 < 2)
|
||||
if (limit0 < 2 || !len0)
|
||||
continue;
|
||||
for (b = -16; b < 16; b++) {
|
||||
int len1 = s->len[p1][b & 255];
|
||||
int limit1 = limit0 - len1;
|
||||
if (limit1 < 1)
|
||||
if (limit1 < 1 || !len1)
|
||||
continue;
|
||||
code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
|
||||
for (r = -16; r < 16; r++) {
|
||||
int len2 = s->len[2][r & 255];
|
||||
if (len2 > limit1)
|
||||
if (len2 > limit1 || !len2)
|
||||
continue;
|
||||
av_assert0(i < (1 << VLC_BITS));
|
||||
len[i] = len0 + len1 + len2;
|
||||
bits[i] = (code << len2) + s->bits[2][r & 255];
|
||||
if (s->decorrelate) {
|
||||
@@ -182,6 +184,7 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
|
||||
{
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_get_bits(&gb, src, length * 8);
|
||||
|
||||
@@ -192,8 +195,9 @@ static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
|
||||
return -1;
|
||||
}
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
|
||||
s->bits[i], 4, 4, 0);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
|
||||
s->bits[i], 4, 4, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
generate_joint_tables(s);
|
||||
@@ -205,6 +209,7 @@ static int read_old_huffman_tables(HYuvContext *s)
|
||||
{
|
||||
GetBitContext gb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
init_get_bits(&gb, classic_shift_luma,
|
||||
classic_shift_luma_table_size * 8);
|
||||
@@ -228,8 +233,9 @@ static int read_old_huffman_tables(HYuvContext *s)
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
ff_free_vlc(&s->vlc[i]);
|
||||
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
|
||||
s->bits[i], 4, 4, 0);
|
||||
if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
|
||||
s->bits[i], 4, 4, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
generate_joint_tables(s);
|
||||
|
@@ -341,7 +341,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB444;
|
||||
} else if (avctx->codec_tag != MKTAG('D','E','E','P')) {
|
||||
if (avctx->bits_per_coded_sample == 24) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB0;
|
||||
avctx->pix_fmt = AV_PIX_FMT_0BGR32;
|
||||
} else if (avctx->bits_per_coded_sample == 32) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_BGR32;
|
||||
} else {
|
||||
|
@@ -16,9 +16,8 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/attributes.h"
|
||||
#include "kbdwin.h"
|
||||
|
||||
|
@@ -199,7 +199,7 @@ static int Faac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
|
||||
/* add current frame to the queue */
|
||||
if (frame) {
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -334,7 +334,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
in_buf.bufElSizes = &in_buffer_element_size;
|
||||
|
||||
/* add current frame to the queue */
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -237,7 +237,7 @@ static int mp3lame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
|
||||
/* add current frame to the queue */
|
||||
if (frame) {
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -50,41 +50,6 @@ static int amr_decode_fix_avctx(AVCodecContext *avctx)
|
||||
#include <opencore-amrnb/interf_dec.h>
|
||||
#include <opencore-amrnb/interf_enc.h>
|
||||
|
||||
/* Common code for fixed and float version*/
|
||||
typedef struct AMR_bitrates {
|
||||
int rate;
|
||||
enum Mode mode;
|
||||
} AMR_bitrates;
|
||||
|
||||
/* Match desired bitrate */
|
||||
static int get_bitrate_mode(int bitrate, void *log_ctx)
|
||||
{
|
||||
/* make the correspondance between bitrate and mode */
|
||||
static const AMR_bitrates rates[] = {
|
||||
{ 4750, MR475 }, { 5150, MR515 }, { 5900, MR59 }, { 6700, MR67 },
|
||||
{ 7400, MR74 }, { 7950, MR795 }, { 10200, MR102 }, { 12200, MR122 }
|
||||
};
|
||||
int i, best = -1, min_diff = 0;
|
||||
char log_buf[200];
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (rates[i].rate == bitrate)
|
||||
return rates[i].mode;
|
||||
if (best < 0 || abs(rates[i].rate - bitrate) < min_diff) {
|
||||
best = i;
|
||||
min_diff = abs(rates[i].rate - bitrate);
|
||||
}
|
||||
}
|
||||
/* no bitrate matching exactly, log a warning */
|
||||
snprintf(log_buf, sizeof(log_buf), "bitrate not supported: use one of ");
|
||||
for (i = 0; i < 8; i++)
|
||||
av_strlcatf(log_buf, sizeof(log_buf), "%.2fk, ", rates[i].rate / 1000.f);
|
||||
av_strlcatf(log_buf, sizeof(log_buf), "using %.2fk", rates[best].rate / 1000.f);
|
||||
av_log(log_ctx, AV_LOG_WARNING, "%s\n", log_buf);
|
||||
|
||||
return best;
|
||||
}
|
||||
|
||||
typedef struct AMRContext {
|
||||
AVClass *av_class;
|
||||
AVFrame frame;
|
||||
@@ -97,15 +62,7 @@ typedef struct AMRContext {
|
||||
AudioFrameQueue afq;
|
||||
} AMRContext;
|
||||
|
||||
static const AVOption options[] = {
|
||||
{ "dtx", "Allow DTX (generate comfort noise)", offsetof(AMRContext, enc_dtx), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVClass class = {
|
||||
"libopencore_amrnb", av_default_item_name, options, LIBAVUTIL_VERSION_INT
|
||||
};
|
||||
|
||||
#if CONFIG_LIBOPENCORE_AMRNB_DECODER
|
||||
static av_cold int amr_nb_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
AMRContext *s = avctx->priv_data;
|
||||
@@ -186,6 +143,52 @@ AVCodec ff_libopencore_amrnb_decoder = {
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("OpenCORE AMR-NB (Adaptive Multi-Rate Narrow-Band)"),
|
||||
};
|
||||
#endif /* CONFIG_LIBOPENCORE_AMRNB_DECODER */
|
||||
|
||||
#if CONFIG_LIBOPENCORE_AMRNB_ENCODER
|
||||
/* Common code for fixed and float version*/
|
||||
typedef struct AMR_bitrates {
|
||||
int rate;
|
||||
enum Mode mode;
|
||||
} AMR_bitrates;
|
||||
|
||||
/* Match desired bitrate */
|
||||
static int get_bitrate_mode(int bitrate, void *log_ctx)
|
||||
{
|
||||
/* make the correspondance between bitrate and mode */
|
||||
static const AMR_bitrates rates[] = {
|
||||
{ 4750, MR475 }, { 5150, MR515 }, { 5900, MR59 }, { 6700, MR67 },
|
||||
{ 7400, MR74 }, { 7950, MR795 }, { 10200, MR102 }, { 12200, MR122 }
|
||||
};
|
||||
int i, best = -1, min_diff = 0;
|
||||
char log_buf[200];
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (rates[i].rate == bitrate)
|
||||
return rates[i].mode;
|
||||
if (best < 0 || abs(rates[i].rate - bitrate) < min_diff) {
|
||||
best = i;
|
||||
min_diff = abs(rates[i].rate - bitrate);
|
||||
}
|
||||
}
|
||||
/* no bitrate matching exactly, log a warning */
|
||||
snprintf(log_buf, sizeof(log_buf), "bitrate not supported: use one of ");
|
||||
for (i = 0; i < 8; i++)
|
||||
av_strlcatf(log_buf, sizeof(log_buf), "%.2fk, ", rates[i].rate / 1000.f);
|
||||
av_strlcatf(log_buf, sizeof(log_buf), "using %.2fk", rates[best].rate / 1000.f);
|
||||
av_log(log_ctx, AV_LOG_WARNING, "%s\n", log_buf);
|
||||
|
||||
return best;
|
||||
}
|
||||
|
||||
static const AVOption options[] = {
|
||||
{ "dtx", "Allow DTX (generate comfort noise)", offsetof(AMRContext, enc_dtx), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVClass class = {
|
||||
"libopencore_amrnb", av_default_item_name, options, LIBAVUTIL_VERSION_INT
|
||||
};
|
||||
|
||||
static av_cold int amr_nb_encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
@@ -261,7 +264,7 @@ static int amr_nb_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
if (frame->nb_samples < avctx->frame_size - avctx->delay)
|
||||
s->enc_last_frame = -1;
|
||||
}
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0)) {
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0) {
|
||||
av_freep(&flush_buf);
|
||||
return ret;
|
||||
}
|
||||
@@ -304,11 +307,12 @@ AVCodec ff_libopencore_amrnb_encoder = {
|
||||
.long_name = NULL_IF_CONFIG_SMALL("OpenCORE AMR-NB (Adaptive Multi-Rate Narrow-Band)"),
|
||||
.priv_class = &class,
|
||||
};
|
||||
#endif /* CONFIG_LIBOPENCORE_AMRNB_ENCODER */
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_LIBOPENCORE_AMRNB */
|
||||
|
||||
/* -----------AMR wideband ------------*/
|
||||
#if CONFIG_LIBOPENCORE_AMRWB
|
||||
#if CONFIG_LIBOPENCORE_AMRWB_DECODER
|
||||
|
||||
#include <opencore-amrwb/dec_if.h>
|
||||
#include <opencore-amrwb/if_rom.h>
|
||||
@@ -392,4 +396,4 @@ AVCodec ff_libopencore_amrwb_decoder = {
|
||||
.long_name = NULL_IF_CONFIG_SMALL("OpenCORE AMR-WB (Adaptive Multi-Rate Wide-Band)"),
|
||||
};
|
||||
|
||||
#endif /* CONFIG_LIBOPENCORE_AMRWB */
|
||||
#endif /* CONFIG_LIBOPENCORE_AMRWB_DECODER */
|
||||
|
@@ -288,7 +288,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
speex_encode_stereo_int(samples, s->header.frame_size, &s->bits);
|
||||
speex_encode_int(s->enc_state, samples, &s->bits);
|
||||
s->pkt_frame_count++;
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
/* handle end-of-stream */
|
||||
|
@@ -157,7 +157,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
samples = (VO_PBYTE)frame->data[0];
|
||||
}
|
||||
/* add current frame to the queue */
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -305,7 +305,7 @@ static int oggvorbis_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
av_log(avctx, AV_LOG_ERROR, "error in vorbis_analysis_wrote()\n");
|
||||
return vorbis_error_to_averror(ret);
|
||||
}
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
if (!s->eof)
|
||||
|
@@ -335,7 +335,7 @@ static av_cold int X264_init(AVCodecContext *avctx)
|
||||
x4->params.rc.f_rf_constant_max = x4->crf_max;
|
||||
}
|
||||
|
||||
if (avctx->rc_buffer_size && avctx->rc_initial_buffer_occupancy &&
|
||||
if (avctx->rc_buffer_size && avctx->rc_initial_buffer_occupancy > 0 &&
|
||||
(avctx->rc_initial_buffer_occupancy <= avctx->rc_buffer_size)) {
|
||||
x4->params.rc.f_vbv_buffer_init =
|
||||
(float)avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size;
|
||||
@@ -695,6 +695,7 @@ static const AVCodecDefault x264_defaults[] = {
|
||||
{ "threads", AV_STRINGIFY(X264_THREADS_AUTO) },
|
||||
{ "thread_type", "0" },
|
||||
{ "flags", "+cgop" },
|
||||
{ "rc_init_occupancy","-1" },
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
|
@@ -57,6 +57,13 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt,
|
||||
max_pkt_size += mb_width * mb_height * 3 * 4
|
||||
* s->mjpeg_hsample[0] * s->mjpeg_vsample[0];
|
||||
}
|
||||
|
||||
if (!s->edge_emu_buffer &&
|
||||
(ret = ff_mpv_frame_size_alloc(s, pict->linesize[0])) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "failed to allocate context scratch buffers.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, max_pkt_size)) < 0)
|
||||
return ret;
|
||||
|
||||
|
@@ -126,7 +126,7 @@ uint64_t ff_truehd_layout(int chanmap)
|
||||
|
||||
int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb)
|
||||
{
|
||||
int ratebits;
|
||||
int ratebits, channel_arrangement;
|
||||
uint16_t checksum;
|
||||
|
||||
av_assert1(get_bits_count(gb) == 0);
|
||||
@@ -157,7 +157,10 @@ int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb)
|
||||
|
||||
skip_bits(gb, 11);
|
||||
|
||||
mh->channels_mlp = get_bits(gb, 5);
|
||||
mh->channel_arrangement=
|
||||
channel_arrangement = get_bits(gb, 5);
|
||||
mh->channels_mlp = mlp_channels[channel_arrangement];
|
||||
mh->channel_layout_mlp = ff_mlp_layout[channel_arrangement];
|
||||
} else if (mh->stream_type == 0xba) {
|
||||
mh->group1_bits = 24; // TODO: Is this information actually conveyed anywhere?
|
||||
mh->group2_bits = 0;
|
||||
@@ -168,11 +171,16 @@ int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb)
|
||||
|
||||
skip_bits(gb, 8);
|
||||
|
||||
mh->channels_thd_stream1 = get_bits(gb, 5);
|
||||
mh->channel_arrangement=
|
||||
channel_arrangement = get_bits(gb, 5);
|
||||
mh->channels_thd_stream1 = truehd_channels(channel_arrangement);
|
||||
mh->channel_layout_thd_stream1 = ff_truehd_layout(channel_arrangement);
|
||||
|
||||
skip_bits(gb, 2);
|
||||
|
||||
mh->channels_thd_stream2 = get_bits(gb, 13);
|
||||
channel_arrangement = get_bits(gb, 13);
|
||||
mh->channels_thd_stream2 = truehd_channels(channel_arrangement);
|
||||
mh->channel_layout_thd_stream2 = ff_truehd_layout(channel_arrangement);
|
||||
} else
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
@@ -322,16 +330,16 @@ static int mlp_parse(AVCodecParserContext *s,
|
||||
if(!avctx->channels || !avctx->channel_layout) {
|
||||
if (mh.stream_type == 0xbb) {
|
||||
/* MLP stream */
|
||||
avctx->channels = mlp_channels[mh.channels_mlp];
|
||||
avctx->channel_layout = ff_mlp_layout[mh.channels_mlp];
|
||||
avctx->channels = mh.channels_mlp;
|
||||
avctx->channel_layout = mh.channel_layout_mlp;
|
||||
} else { /* mh.stream_type == 0xba */
|
||||
/* TrueHD stream */
|
||||
if (mh.channels_thd_stream2) {
|
||||
avctx->channels = truehd_channels(mh.channels_thd_stream2);
|
||||
avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream2);
|
||||
avctx->channels = mh.channels_thd_stream2;
|
||||
avctx->channel_layout = mh.channel_layout_thd_stream2;
|
||||
} else {
|
||||
avctx->channels = truehd_channels(mh.channels_thd_stream1);
|
||||
avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream1);
|
||||
avctx->channels = mh.channels_thd_stream1;
|
||||
avctx->channel_layout = mh.channel_layout_thd_stream1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -39,9 +39,13 @@ typedef struct MLPHeaderInfo
|
||||
int group1_samplerate; ///< Sample rate of first substream
|
||||
int group2_samplerate; ///< Sample rate of second substream (MLP only)
|
||||
|
||||
int channels_mlp; ///< Channel arrangement for MLP streams
|
||||
int channels_thd_stream1; ///< Channel arrangement for substream 1 of TrueHD streams (5.1)
|
||||
int channels_thd_stream2; ///< Channel arrangement for substream 2 of TrueHD streams (7.1)
|
||||
int channel_arrangement;
|
||||
int channels_mlp; ///< Channel count for MLP streams
|
||||
int channels_thd_stream1; ///< Channel count for substream 1 of TrueHD streams ("6-channel presentation")
|
||||
int channels_thd_stream2; ///< Channel count for substream 2 of TrueHD streams ("8-channel presentation")
|
||||
uint64_t channel_layout_mlp; ///< Channel layout for MLP streams
|
||||
uint64_t channel_layout_thd_stream1; ///< Channel layout for substream 1 of TrueHD streams ("6-channel presentation")
|
||||
uint64_t channel_layout_thd_stream2; ///< Channel layout for substream 2 of TrueHD streams ("8-channel presentation")
|
||||
|
||||
int access_unit_size; ///< Number of samples per coded frame
|
||||
int access_unit_size_pow2; ///< Next power of two above number of samples per frame
|
||||
|
@@ -28,6 +28,7 @@
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "get_bits.h"
|
||||
#include "internal.h"
|
||||
#include "libavutil/crc.h"
|
||||
@@ -56,6 +57,8 @@ typedef struct SubStream {
|
||||
uint8_t max_matrix_channel;
|
||||
/// For each channel output by the matrix, the output channel to map it to
|
||||
uint8_t ch_assign[MAX_CHANNELS];
|
||||
/// The channel layout for this substream
|
||||
uint64_t ch_layout;
|
||||
|
||||
/// Channel coding parameters for channels in the substream
|
||||
ChannelParams channel_params[MAX_CHANNELS];
|
||||
@@ -149,6 +152,36 @@ typedef struct MLPDecodeContext {
|
||||
MLPDSPContext dsp;
|
||||
} MLPDecodeContext;
|
||||
|
||||
static const uint64_t thd_channel_order[] = {
|
||||
AV_CH_FRONT_LEFT, AV_CH_FRONT_RIGHT, // LR
|
||||
AV_CH_FRONT_CENTER, // C
|
||||
AV_CH_LOW_FREQUENCY, // LFE
|
||||
AV_CH_SIDE_LEFT, AV_CH_SIDE_RIGHT, // LRs
|
||||
AV_CH_TOP_FRONT_LEFT, AV_CH_TOP_FRONT_RIGHT, // LRvh
|
||||
AV_CH_FRONT_LEFT_OF_CENTER, AV_CH_FRONT_RIGHT_OF_CENTER, // LRc
|
||||
AV_CH_BACK_LEFT, AV_CH_BACK_RIGHT, // LRrs
|
||||
AV_CH_BACK_CENTER, // Cs
|
||||
AV_CH_TOP_CENTER, // Ts
|
||||
AV_CH_SURROUND_DIRECT_LEFT, AV_CH_SURROUND_DIRECT_RIGHT, // LRsd
|
||||
AV_CH_WIDE_LEFT, AV_CH_WIDE_RIGHT, // LRw
|
||||
AV_CH_TOP_FRONT_CENTER, // Cvh
|
||||
AV_CH_LOW_FREQUENCY_2, // LFE2
|
||||
};
|
||||
|
||||
static uint64_t thd_channel_layout_extract_channel(uint64_t channel_layout,
|
||||
int index)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (av_get_channel_layout_nb_channels(channel_layout) <= index)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(thd_channel_order); i++)
|
||||
if (channel_layout & thd_channel_order[i] && !index--)
|
||||
return thd_channel_order[i];
|
||||
return 0;
|
||||
}
|
||||
|
||||
static VLC huff_vlc[3];
|
||||
|
||||
/** Initialize static data, constant between all invocations of the codec. */
|
||||
@@ -328,31 +361,32 @@ static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb)
|
||||
for (substr = 0; substr < MAX_SUBSTREAMS; substr++)
|
||||
m->substream[substr].restart_seen = 0;
|
||||
|
||||
if (mh.stream_type == 0xbb) {
|
||||
/* MLP stream */
|
||||
m->avctx->channel_layout = ff_mlp_layout[mh.channels_mlp];
|
||||
} else { /* mh.stream_type == 0xba */
|
||||
/* TrueHD stream */
|
||||
if (mh.channels_thd_stream2) {
|
||||
m->avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream2);
|
||||
} else {
|
||||
m->avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream1);
|
||||
}
|
||||
if (m->avctx->channels<=2 && m->avctx->channel_layout == AV_CH_LAYOUT_MONO && m->max_decoded_substream == 1) {
|
||||
/* Set the layout for each substream. When there's more than one, the first
|
||||
* substream is Stereo. Subsequent substreams' layouts are indicated in the
|
||||
* major sync. */
|
||||
if (m->avctx->codec_id == AV_CODEC_ID_MLP) {
|
||||
if ((substr = (mh.num_substreams > 1)))
|
||||
m->substream[0].ch_layout = AV_CH_LAYOUT_STEREO;
|
||||
m->substream[substr].ch_layout = mh.channel_layout_mlp;
|
||||
} else {
|
||||
if ((substr = (mh.num_substreams > 1)))
|
||||
m->substream[0].ch_layout = AV_CH_LAYOUT_STEREO;
|
||||
if (mh.num_substreams > 2)
|
||||
if (mh.channel_layout_thd_stream2)
|
||||
m->substream[2].ch_layout = mh.channel_layout_thd_stream2;
|
||||
else
|
||||
m->substream[2].ch_layout = mh.channel_layout_thd_stream1;
|
||||
m->substream[substr].ch_layout = mh.channel_layout_thd_stream1;
|
||||
|
||||
if (m->avctx->channels<=2 && m->substream[substr].ch_layout == AV_CH_LAYOUT_MONO && m->max_decoded_substream == 1) {
|
||||
av_log(m->avctx, AV_LOG_DEBUG, "Mono stream with 2 substreams, ignoring 2nd\n");
|
||||
m->max_decoded_substream = 0;
|
||||
if (m->avctx->channels==2)
|
||||
m->avctx->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
if (m->avctx->channels &&
|
||||
!m->avctx->request_channels && !m->avctx->request_channel_layout &&
|
||||
av_get_channel_layout_nb_channels(m->avctx->channel_layout) != m->avctx->channels) {
|
||||
m->avctx->channel_layout = 0;
|
||||
av_log_ask_for_sample(m->avctx, "Unknown channel layout.");
|
||||
}
|
||||
}
|
||||
|
||||
m->needs_reordering = mh.channels_mlp >= 18 && mh.channels_mlp <= 20;
|
||||
m->needs_reordering = mh.channel_arrangement >= 18 && mh.channel_arrangement <= 20;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -460,6 +494,12 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
|
||||
|
||||
for (ch = 0; ch <= s->max_matrix_channel; ch++) {
|
||||
int ch_assign = get_bits(gbp, 6);
|
||||
if (m->avctx->codec_id == AV_CODEC_ID_TRUEHD) {
|
||||
uint64_t channel = thd_channel_layout_extract_channel(s->ch_layout,
|
||||
ch_assign);
|
||||
ch_assign = av_get_channel_layout_channel_index(s->ch_layout,
|
||||
channel);
|
||||
}
|
||||
if (ch_assign > s->max_matrix_channel) {
|
||||
av_log_ask_for_sample(m->avctx,
|
||||
"Assignment of matrix channel %d to invalid output channel %d.\n",
|
||||
@@ -481,20 +521,6 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
|
||||
FFSWAP(int, s->ch_assign[3], s->ch_assign[5]);
|
||||
}
|
||||
}
|
||||
if (m->avctx->codec_id == AV_CODEC_ID_TRUEHD &&
|
||||
(m->avctx->channel_layout == AV_CH_LAYOUT_7POINT1 ||
|
||||
m->avctx->channel_layout == AV_CH_LAYOUT_7POINT1_WIDE)) {
|
||||
FFSWAP(int, s->ch_assign[4], s->ch_assign[6]);
|
||||
FFSWAP(int, s->ch_assign[5], s->ch_assign[7]);
|
||||
} else if (m->avctx->codec_id == AV_CODEC_ID_TRUEHD &&
|
||||
(m->avctx->channel_layout == AV_CH_LAYOUT_6POINT1 ||
|
||||
m->avctx->channel_layout == (AV_CH_LAYOUT_6POINT1 | AV_CH_TOP_CENTER) ||
|
||||
m->avctx->channel_layout == (AV_CH_LAYOUT_6POINT1 | AV_CH_TOP_FRONT_CENTER))) {
|
||||
int i = s->ch_assign[6];
|
||||
s->ch_assign[6] = s->ch_assign[5];
|
||||
s->ch_assign[5] = s->ch_assign[4];
|
||||
s->ch_assign[4] = i;
|
||||
}
|
||||
|
||||
checksum = ff_mlp_restart_checksum(buf, get_bits_count(gbp) - start_count);
|
||||
|
||||
@@ -524,10 +550,9 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
|
||||
cp->huff_lsbs = 24;
|
||||
}
|
||||
|
||||
if (substr == m->max_decoded_substream &&
|
||||
m->avctx->channels != s->max_matrix_channel + 1) {
|
||||
m->avctx->channels = s->max_matrix_channel + 1;
|
||||
m->avctx->channel_layout = 0;
|
||||
if (substr == m->max_decoded_substream) {
|
||||
m->avctx->channels = s->max_matrix_channel + 1;
|
||||
m->avctx->channel_layout = s->ch_layout;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -720,6 +745,7 @@ static int read_channel_params(MLPDecodeContext *m, unsigned int substr,
|
||||
|
||||
if (cp->huff_lsbs > 24) {
|
||||
av_log(m->avctx, AV_LOG_ERROR, "Invalid huff_lsbs.\n");
|
||||
cp->huff_lsbs = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
|
@@ -21,6 +21,7 @@
|
||||
|
||||
#include <stdarg.h>
|
||||
#include "avcodec.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "ass_split.h"
|
||||
@@ -87,15 +88,18 @@ static av_cold int mov_text_encode_init(AVCodecContext *avctx)
|
||||
static void mov_text_text_cb(void *priv, const char *text, int len)
|
||||
{
|
||||
MovTextContext *s = priv;
|
||||
av_assert0(s->end >= s->ptr);
|
||||
av_strlcpy(s->ptr, text, FFMIN(s->end - s->ptr, len + 1));
|
||||
s->ptr += len;
|
||||
s->ptr += FFMIN(s->end - s->ptr, len);
|
||||
}
|
||||
|
||||
static void mov_text_new_line_cb(void *priv, int forced)
|
||||
{
|
||||
MovTextContext *s = priv;
|
||||
av_assert0(s->end >= s->ptr);
|
||||
av_strlcpy(s->ptr, "\n", FFMIN(s->end - s->ptr, 2));
|
||||
s->ptr++;
|
||||
if (s->end > s->ptr)
|
||||
s->ptr++;
|
||||
}
|
||||
|
||||
static const ASSCodesCallbacks mov_text_callbacks = {
|
||||
|
@@ -2001,8 +2001,6 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx,
|
||||
|
||||
width = get_bits(&s->gb, 12);
|
||||
height = get_bits(&s->gb, 12);
|
||||
if (width <= 0 || height <= 0)
|
||||
return -1;
|
||||
s->aspect_ratio_info = get_bits(&s->gb, 4);
|
||||
if (s->aspect_ratio_info == 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
|
||||
@@ -2312,6 +2310,11 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
break;
|
||||
|
||||
case PICTURE_START_CODE:
|
||||
if (s2->width <= 0 || s2->height <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "%dx%d is invalid\n", s2->width, s2->height);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if(s->tmpgexs){
|
||||
s2->intra_dc_precision= 3;
|
||||
s2->intra_matrix[0]= 1;
|
||||
|
@@ -145,6 +145,9 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
|
||||
if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && avctx->height > 2800)
|
||||
avctx->thread_count = 1;
|
||||
|
||||
if(ff_MPV_encode_init(avctx) < 0)
|
||||
return -1;
|
||||
|
||||
@@ -180,6 +183,19 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
}
|
||||
}
|
||||
|
||||
if ((avctx->width & 0xFFF) == 0 && (avctx->height & 0xFFF) == 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Width / Height is invalid for MPEG2\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (s->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if ((avctx->width & 0xFFF) == 0 || (avctx->height & 0xFFF) == 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Width or Height are not allowed to be multiplies of 4096\n"
|
||||
"add '-strict %d' if you want to use them anyway.\n", FF_COMPLIANCE_UNOFFICIAL);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
s->drop_frame_timecode = s->drop_frame_timecode || !!(avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE);
|
||||
if (s->drop_frame_timecode)
|
||||
s->tc.flags |= AV_TIMECODE_FLAG_DROPFRAME;
|
||||
@@ -227,8 +243,8 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
|
||||
/* mpeg1 header repeated every gop */
|
||||
put_header(s, SEQ_START_CODE);
|
||||
|
||||
put_sbits(&s->pb, 12, s->width );
|
||||
put_sbits(&s->pb, 12, s->height);
|
||||
put_sbits(&s->pb, 12, s->width & 0xFFF);
|
||||
put_sbits(&s->pb, 12, s->height & 0xFFF);
|
||||
|
||||
for(i=1; i<15; i++){
|
||||
float error= aspect_ratio;
|
||||
|
@@ -1653,7 +1653,7 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
|
||||
buf += sx + sy * stride;
|
||||
ex -= sx;
|
||||
f = ((ey - sy) << 16) / ex;
|
||||
for(x= 0; x <= ex; x++){
|
||||
for (x = 0; x <= ex; x++) {
|
||||
y = (x * f) >> 16;
|
||||
fr = (x * f) & 0xFFFF;
|
||||
buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
|
||||
|
@@ -731,7 +731,7 @@ typedef struct MpegEncContext {
|
||||
|
||||
#define REBASE_PICTURE(pic, new_ctx, old_ctx) (pic ? \
|
||||
(pic >= old_ctx->picture && pic < old_ctx->picture+old_ctx->picture_count ?\
|
||||
&new_ctx->picture[pic - old_ctx->picture] : pic - (Picture*)old_ctx + (Picture*)new_ctx)\
|
||||
&new_ctx->picture[pic - old_ctx->picture] : (Picture*) ((uint8_t*)pic - (uint8_t*)old_ctx + (uint8_t*)new_ctx))\
|
||||
: NULL)
|
||||
|
||||
/* mpegvideo_enc common options */
|
||||
|
@@ -144,8 +144,7 @@ static int msrle_decode_8_16_24_32(AVCodecContext *avctx, AVPicture *pic,
|
||||
if(p1 == 0) { //Escape code
|
||||
p2 = bytestream2_get_byte(gb);
|
||||
if(p2 == 0) { //End-of-line
|
||||
output = pic->data[0] + (--line) * pic->linesize[0];
|
||||
if (line < 0) {
|
||||
if (--line < 0) {
|
||||
if (bytestream2_get_be16(gb) == 1) { // end-of-picture
|
||||
return 0;
|
||||
} else {
|
||||
@@ -155,6 +154,7 @@ static int msrle_decode_8_16_24_32(AVCodecContext *avctx, AVPicture *pic,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
output = pic->data[0] + line * pic->linesize[0];
|
||||
pos = 0;
|
||||
continue;
|
||||
} else if(p2 == 1) { //End-of-picture
|
||||
|
@@ -397,7 +397,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
if (frame->nb_samples >= NELLY_BUF_LEN)
|
||||
s->last_frame = 1;
|
||||
}
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
|
||||
if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
memset(s->buf + NELLY_BUF_LEN, 0, NELLY_SAMPLES * sizeof(*s->buf));
|
||||
|
@@ -189,7 +189,7 @@ void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w
|
||||
if(bpp >= 2) g = dst[1];\
|
||||
if(bpp >= 3) b = dst[2];\
|
||||
if(bpp >= 4) a = dst[3];\
|
||||
for(; i < size; i+=bpp) {\
|
||||
for(; i <= size - bpp; i+=bpp) {\
|
||||
dst[i+0] = r = op(r, src[i+0], last[i+0]);\
|
||||
if(bpp == 1) continue;\
|
||||
dst[i+1] = g = op(g, src[i+1], last[i+1]);\
|
||||
@@ -205,13 +205,9 @@ void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w
|
||||
else if(bpp == 2) UNROLL1(2, op)\
|
||||
else if(bpp == 3) UNROLL1(3, op)\
|
||||
else if(bpp == 4) UNROLL1(4, op)\
|
||||
else {\
|
||||
for (; i < size; i += bpp) {\
|
||||
int j;\
|
||||
for (j = 0; j < bpp; j++)\
|
||||
dst[i+j] = op(dst[i+j-bpp], src[i+j], last[i+j]);\
|
||||
}\
|
||||
}
|
||||
for (; i < size; i++) {\
|
||||
dst[i] = op(dst[i-bpp], src[i], last[i]);\
|
||||
}\
|
||||
|
||||
/* NOTE: 'dst' can be equal to 'last' */
|
||||
static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type,
|
||||
|
@@ -244,12 +244,10 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
color_type = PNG_COLOR_TYPE_RGB;
|
||||
break;
|
||||
case AV_PIX_FMT_RGBA:
|
||||
avctx->bits_per_coded_sample = 32;
|
||||
bit_depth = 8;
|
||||
color_type = PNG_COLOR_TYPE_RGB_ALPHA;
|
||||
break;
|
||||
case AV_PIX_FMT_RGB24:
|
||||
avctx->bits_per_coded_sample = 24;
|
||||
bit_depth = 8;
|
||||
color_type = PNG_COLOR_TYPE_RGB;
|
||||
break;
|
||||
@@ -258,7 +256,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
color_type = PNG_COLOR_TYPE_GRAY;
|
||||
break;
|
||||
case AV_PIX_FMT_GRAY8:
|
||||
avctx->bits_per_coded_sample = 0x28;
|
||||
bit_depth = 8;
|
||||
color_type = PNG_COLOR_TYPE_GRAY;
|
||||
break;
|
||||
@@ -267,12 +264,10 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
color_type = PNG_COLOR_TYPE_GRAY_ALPHA;
|
||||
break;
|
||||
case AV_PIX_FMT_MONOBLACK:
|
||||
avctx->bits_per_coded_sample =
|
||||
bit_depth = 1;
|
||||
color_type = PNG_COLOR_TYPE_GRAY;
|
||||
break;
|
||||
case AV_PIX_FMT_PAL8:
|
||||
avctx->bits_per_coded_sample =
|
||||
bit_depth = 8;
|
||||
color_type = PNG_COLOR_TYPE_PALETTE;
|
||||
break;
|
||||
@@ -437,6 +432,23 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
static av_cold int png_enc_init(AVCodecContext *avctx){
|
||||
PNGEncContext *s = avctx->priv_data;
|
||||
|
||||
switch(avctx->pix_fmt) {
|
||||
case AV_PIX_FMT_RGBA:
|
||||
avctx->bits_per_coded_sample = 32;
|
||||
break;
|
||||
case AV_PIX_FMT_RGB24:
|
||||
avctx->bits_per_coded_sample = 24;
|
||||
break;
|
||||
case AV_PIX_FMT_GRAY8:
|
||||
avctx->bits_per_coded_sample = 0x28;
|
||||
break;
|
||||
case AV_PIX_FMT_MONOBLACK:
|
||||
avctx->bits_per_coded_sample = 1;
|
||||
break;
|
||||
case AV_PIX_FMT_PAL8:
|
||||
avctx->bits_per_coded_sample = 8;
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&s->picture);
|
||||
avctx->coded_frame= &s->picture;
|
||||
ff_dsputil_init(&s->dsp, avctx);
|
||||
|
@@ -1258,7 +1258,7 @@ static void qdm2_decode_super_block (QDM2Context *q)
|
||||
for (i = 0; packet_bytes > 0; i++) {
|
||||
int j;
|
||||
|
||||
if (i>=FF_ARRAY_ELEMS(q->sub_packet_list_A)) {
|
||||
if (i >= FF_ARRAY_ELEMS(q->sub_packet_list_A)) {
|
||||
SAMPLES_NEEDED_2("too many packet bytes");
|
||||
return;
|
||||
}
|
||||
|
@@ -56,22 +56,16 @@ typedef struct QtrleContext {
|
||||
static void qtrle_decode_1bpp(QtrleContext *s, int row_ptr, int lines_to_change)
|
||||
{
|
||||
int rle_code;
|
||||
int pixel_ptr = 0;
|
||||
int pixel_ptr;
|
||||
int row_inc = s->frame.linesize[0];
|
||||
unsigned char pi0, pi1; /* 2 8-pixel values */
|
||||
unsigned char *rgb = s->frame.data[0];
|
||||
int pixel_limit = s->frame.linesize[0] * s->avctx->height;
|
||||
int skip;
|
||||
/* skip & 0x80 appears to mean 'start a new line', which can be interpreted
|
||||
* as 'go to next line' during the decoding of a frame but is 'go to first
|
||||
* line' at the beginning. Since we always interpret it as 'go to next line'
|
||||
* in the decoding loop (which makes code simpler/faster), the first line
|
||||
* would not be counted, so we count one more.
|
||||
* See: https://ffmpeg.org/trac/ffmpeg/ticket/226
|
||||
* In the following decoding loop, row_ptr will be the position of the
|
||||
* _next_ row. */
|
||||
lines_to_change++;
|
||||
|
||||
row_ptr -= row_inc;
|
||||
pixel_ptr = row_ptr;
|
||||
lines_to_change++;
|
||||
while (lines_to_change) {
|
||||
skip = bytestream2_get_byte(&s->g);
|
||||
rle_code = (signed char)bytestream2_get_byte(&s->g);
|
||||
@@ -79,8 +73,8 @@ static void qtrle_decode_1bpp(QtrleContext *s, int row_ptr, int lines_to_change)
|
||||
break;
|
||||
if(skip & 0x80) {
|
||||
lines_to_change--;
|
||||
pixel_ptr = row_ptr + 2 * (skip & 0x7f);
|
||||
row_ptr += row_inc;
|
||||
pixel_ptr = row_ptr + 2 * (skip & 0x7f);
|
||||
} else
|
||||
pixel_ptr += 2 * skip;
|
||||
CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */
|
||||
|
@@ -536,7 +536,7 @@ static int ra144_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
for (; i < frame->nb_samples; i++)
|
||||
ractx->curr_block[i] = samples[i] >> 2;
|
||||
|
||||
if ((ret = ff_af_queue_add(&ractx->afq, frame) < 0))
|
||||
if ((ret = ff_af_queue_add(&ractx->afq, frame)) < 0)
|
||||
return ret;
|
||||
} else
|
||||
ractx->last_frame = 1;
|
||||
|
@@ -171,9 +171,10 @@ static av_cold int roq_decode_init(AVCodecContext *avctx)
|
||||
|
||||
s->avctx = avctx;
|
||||
|
||||
if (avctx->width%16 || avctx->height%16) {
|
||||
av_log_ask_for_sample(avctx, "dimensions not being a multiple of 16 are unsupported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
if (avctx->width % 16 || avctx->height % 16) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Dimensions must be a multiple of 16\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
s->width = avctx->width;
|
||||
|
@@ -187,7 +187,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
||||
for(i = !mb_x; i < 2; i++, C += 4){
|
||||
int ij = i + (j >> 1);
|
||||
loc_lim = 0;
|
||||
if(cur_cbp && (1 << ij))
|
||||
if (cur_cbp & (1 << ij))
|
||||
loc_lim = cur_lim;
|
||||
else if(!i && left_cbp & (1 << (ij + 1)))
|
||||
loc_lim = left_lim;
|
||||
@@ -229,7 +229,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
||||
for(i = 0; i < 2; i++, C += 4){
|
||||
int ij = i + (j >> 1);
|
||||
loc_lim = 0;
|
||||
if(r->cbp_chroma[mb_pos] && (1 << ij))
|
||||
if (r->cbp_chroma[mb_pos] & (1 << ij))
|
||||
loc_lim = cur_lim;
|
||||
else if(!j && top_cbp & (1 << (ij + 2)))
|
||||
loc_lim = top_lim;
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#include "bytestream.h"
|
||||
#include "internal.h"
|
||||
#include "libavutil/bswap.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavcodec/dsputil.h"
|
||||
#include "sanm_data.h"
|
||||
|
||||
@@ -415,6 +416,11 @@ static int old_codec37(SANMVideoContext *ctx, int top,
|
||||
flags = bytestream2_get_byte(&ctx->gb);
|
||||
bytestream2_skip(&ctx->gb, 3);
|
||||
|
||||
if (decoded_size > ctx->height * stride - left - top * stride) {
|
||||
decoded_size = ctx->height * stride - left - top * stride;
|
||||
av_log(ctx->avctx, AV_LOG_WARNING, "decoded size is too large\n");
|
||||
}
|
||||
|
||||
ctx->rotate_code = 0;
|
||||
|
||||
if (((seq & 1) || !(flags & 1)) && (compr && compr != 2))
|
||||
@@ -638,6 +644,11 @@ static int old_codec47(SANMVideoContext *ctx, int top,
|
||||
decoded_size = bytestream2_get_le32(&ctx->gb);
|
||||
bytestream2_skip(&ctx->gb, 8);
|
||||
|
||||
if (decoded_size > ctx->height * stride - left - top * stride) {
|
||||
decoded_size = ctx->height * stride - left - top * stride;
|
||||
av_log(ctx->avctx, AV_LOG_WARNING, "decoded size is too large\n");
|
||||
}
|
||||
|
||||
if (skip & 1)
|
||||
bytestream2_skip(&ctx->gb, 0x8080);
|
||||
if (!seq) {
|
||||
@@ -716,8 +727,11 @@ static int process_frame_obj(SANMVideoContext *ctx)
|
||||
h = bytestream2_get_le16u(&ctx->gb);
|
||||
|
||||
if (ctx->width < left + w || ctx->height < top + h) {
|
||||
ctx->avctx->width = FFMAX(left + w, ctx->width);
|
||||
ctx->avctx->height = FFMAX(top + h, ctx->height);
|
||||
if (av_image_check_size(FFMAX(left + w, ctx->width),
|
||||
FFMAX(top + h, ctx->height), 0, ctx->avctx) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
avcodec_set_dimensions(ctx->avctx, FFMAX(left + w, ctx->width),
|
||||
FFMAX(top + h, ctx->height));
|
||||
init_sizes(ctx, left + w, top + h);
|
||||
if (init_buffers(ctx)) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "error resizing buffers\n");
|
||||
|
@@ -343,6 +343,7 @@ static int read_header(ShortenContext *s)
|
||||
s->channels = get_uint(s, CHANSIZE);
|
||||
if (s->channels <= 0 || s->channels > MAX_CHANNELS) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "too many channels: %d\n", s->channels);
|
||||
s->channels = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->avctx->channels = s->channels;
|
||||
|
@@ -538,7 +538,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
int i, ret;
|
||||
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, s->y_block_width * s->y_block_height *
|
||||
MAX_MB_BYTES*3 + FF_MIN_BUFFER_SIZE) < 0))
|
||||
MAX_MB_BYTES*3 + FF_MIN_BUFFER_SIZE)) < 0)
|
||||
return ret;
|
||||
|
||||
if (avctx->pix_fmt != AV_PIX_FMT_YUV410P) {
|
||||
@@ -547,7 +547,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
}
|
||||
|
||||
if (!s->current_picture.data[0]) {
|
||||
if ((ret = ff_get_buffer(avctx, &s->current_picture) < 0) ||
|
||||
if ((ret = ff_get_buffer(avctx, &s->current_picture))< 0 ||
|
||||
(ret = ff_get_buffer(avctx, &s->last_picture)) < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
@@ -40,7 +40,7 @@ static uint8_t *advance_line(uint8_t *start, uint8_t *line,
|
||||
return line + interleave * stride;
|
||||
} else {
|
||||
*y = (*y + 1) & (interleave - 1);
|
||||
if (*y) {
|
||||
if (*y && *y < h) {
|
||||
return start + *y * stride;
|
||||
} else {
|
||||
return NULL;
|
||||
|
@@ -212,10 +212,12 @@ static char *doubles2str(double *dp, int count, const char *sep)
|
||||
{
|
||||
int i;
|
||||
char *ap, *ap0;
|
||||
int component_len;
|
||||
uint64_t component_len;
|
||||
if (!sep) sep = ", ";
|
||||
component_len = 15 + strlen(sep);
|
||||
ap = av_malloc(component_len * count);
|
||||
component_len = 15LL + strlen(sep);
|
||||
if (count >= (INT_MAX - 1)/component_len)
|
||||
return NULL;
|
||||
ap = av_malloc(component_len * count + 1);
|
||||
if (!ap)
|
||||
return NULL;
|
||||
ap0 = ap;
|
||||
@@ -236,14 +238,22 @@ static char *shorts2str(int16_t *sp, int count, const char *sep)
|
||||
{
|
||||
int i;
|
||||
char *ap, *ap0;
|
||||
uint64_t component_len;
|
||||
if (!sep) sep = ", ";
|
||||
ap = av_malloc((5 + strlen(sep)) * count);
|
||||
component_len = 7LL + strlen(sep);
|
||||
if (count >= (INT_MAX - 1)/component_len)
|
||||
return NULL;
|
||||
ap = av_malloc(component_len * count + 1);
|
||||
if (!ap)
|
||||
return NULL;
|
||||
ap0 = ap;
|
||||
ap[0] = '\0';
|
||||
for (i = 0; i < count; i++) {
|
||||
int l = snprintf(ap, 5 + strlen(sep), "%d%s", sp[i], sep);
|
||||
unsigned l = snprintf(ap, component_len, "%d%s", sp[i], sep);
|
||||
if (l >= component_len) {
|
||||
av_free(ap0);
|
||||
return NULL;
|
||||
}
|
||||
ap += l;
|
||||
}
|
||||
ap0[strlen(ap0) - strlen(sep)] = '\0';
|
||||
@@ -878,7 +888,7 @@ static int tiff_decode_tag(TiffContext *s)
|
||||
s->fax_opts = value;
|
||||
break;
|
||||
#define ADD_METADATA(count, name, sep)\
|
||||
if (ret = add_metadata(count, type, name, sep, s) < 0) {\
|
||||
if ((ret = add_metadata(count, type, name, sep, s)) < 0) {\
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
|
||||
return ret;\
|
||||
}
|
||||
|
@@ -1018,6 +1018,9 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
|
||||
&& avctx->bit_rate>0 && avctx->bit_rate<1000) {
|
||||
av_log(avctx, AV_LOG_WARNING, "Bitrate %d is extreemly low, did you mean %dk\n", avctx->bit_rate, avctx->bit_rate);
|
||||
}
|
||||
|
||||
if (!avctx->rc_initial_buffer_occupancy)
|
||||
avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3 / 4;
|
||||
}
|
||||
|
||||
avctx->pts_correction_num_faulty_pts =
|
||||
@@ -1764,7 +1767,7 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_DEBUG, "skip %d samples due to side data\n",
|
||||
avctx->internal->skip_samples);
|
||||
}
|
||||
if (avctx->internal->skip_samples) {
|
||||
if (avctx->internal->skip_samples && *got_frame_ptr) {
|
||||
if(frame->nb_samples <= avctx->internal->skip_samples){
|
||||
*got_frame_ptr = 0;
|
||||
avctx->internal->skip_samples -= frame->nb_samples;
|
||||
@@ -1806,7 +1809,7 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
|
||||
* extended_data are doing it correctly */
|
||||
if (*got_frame_ptr) {
|
||||
planar = av_sample_fmt_is_planar(frame->format);
|
||||
channels = av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
channels = frame->channels;
|
||||
if (!(planar && channels > AV_NUM_DATA_POINTERS))
|
||||
frame->extended_data = frame->data;
|
||||
} else {
|
||||
|
@@ -1149,8 +1149,12 @@ static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
|
||||
*dmv_x = get_bits(gb, v->k_x);
|
||||
*dmv_y = get_bits(gb, v->k_y);
|
||||
if (v->numref) {
|
||||
*pred_flag = *dmv_y & 1;
|
||||
*dmv_y = (*dmv_y + *pred_flag) >> 1;
|
||||
if (pred_flag) {
|
||||
*pred_flag = *dmv_y & 1;
|
||||
*dmv_y = (*dmv_y + *pred_flag) >> 1;
|
||||
} else {
|
||||
*dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -1177,7 +1181,7 @@ static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
|
||||
*dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
|
||||
} else
|
||||
*dmv_y = 0;
|
||||
if (v->numref)
|
||||
if (v->numref && pred_flag)
|
||||
*pred_flag = index1 & 1;
|
||||
}
|
||||
}
|
||||
|
@@ -195,37 +195,41 @@ static void vorbis_free(vorbis_context *vc)
|
||||
av_freep(&vc->channel_residues);
|
||||
av_freep(&vc->saved);
|
||||
|
||||
for (i = 0; i < vc->residue_count; i++)
|
||||
av_free(vc->residues[i].classifs);
|
||||
if (vc->residues)
|
||||
for (i = 0; i < vc->residue_count; i++)
|
||||
av_free(vc->residues[i].classifs);
|
||||
av_freep(&vc->residues);
|
||||
av_freep(&vc->modes);
|
||||
|
||||
ff_mdct_end(&vc->mdct[0]);
|
||||
ff_mdct_end(&vc->mdct[1]);
|
||||
|
||||
for (i = 0; i < vc->codebook_count; ++i) {
|
||||
av_free(vc->codebooks[i].codevectors);
|
||||
ff_free_vlc(&vc->codebooks[i].vlc);
|
||||
}
|
||||
if (vc->codebooks)
|
||||
for (i = 0; i < vc->codebook_count; ++i) {
|
||||
av_free(vc->codebooks[i].codevectors);
|
||||
ff_free_vlc(&vc->codebooks[i].vlc);
|
||||
}
|
||||
av_freep(&vc->codebooks);
|
||||
|
||||
for (i = 0; i < vc->floor_count; ++i) {
|
||||
if (vc->floors[i].floor_type == 0) {
|
||||
av_free(vc->floors[i].data.t0.map[0]);
|
||||
av_free(vc->floors[i].data.t0.map[1]);
|
||||
av_free(vc->floors[i].data.t0.book_list);
|
||||
av_free(vc->floors[i].data.t0.lsp);
|
||||
} else {
|
||||
av_free(vc->floors[i].data.t1.list);
|
||||
if (vc->floors)
|
||||
for (i = 0; i < vc->floor_count; ++i) {
|
||||
if (vc->floors[i].floor_type == 0) {
|
||||
av_free(vc->floors[i].data.t0.map[0]);
|
||||
av_free(vc->floors[i].data.t0.map[1]);
|
||||
av_free(vc->floors[i].data.t0.book_list);
|
||||
av_free(vc->floors[i].data.t0.lsp);
|
||||
} else {
|
||||
av_free(vc->floors[i].data.t1.list);
|
||||
}
|
||||
}
|
||||
}
|
||||
av_freep(&vc->floors);
|
||||
|
||||
for (i = 0; i < vc->mapping_count; ++i) {
|
||||
av_free(vc->mappings[i].magnitude);
|
||||
av_free(vc->mappings[i].angle);
|
||||
av_free(vc->mappings[i].mux);
|
||||
}
|
||||
if (vc->mappings)
|
||||
for (i = 0; i < vc->mapping_count; ++i) {
|
||||
av_free(vc->mappings[i].magnitude);
|
||||
av_free(vc->mappings[i].angle);
|
||||
av_free(vc->mappings[i].mux);
|
||||
}
|
||||
av_freep(&vc->mappings);
|
||||
}
|
||||
|
||||
@@ -1651,6 +1655,45 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
|
||||
|
||||
av_dlog(NULL, "packet length %d \n", buf_size);
|
||||
|
||||
if (*buf == 1 && buf_size > 7) {
|
||||
init_get_bits(gb, buf+1, buf_size*8 - 8);
|
||||
vorbis_free(vc);
|
||||
if ((ret = vorbis_parse_id_hdr(vc))) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "Id header corrupt.\n");
|
||||
vorbis_free(vc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (vc->audio_channels > 8)
|
||||
avccontext->channel_layout = 0;
|
||||
else
|
||||
avccontext->channel_layout = ff_vorbis_channel_layouts[vc->audio_channels - 1];
|
||||
|
||||
avccontext->channels = vc->audio_channels;
|
||||
avccontext->sample_rate = vc->audio_samplerate;
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
if (*buf == 3 && buf_size > 7) {
|
||||
av_log(avccontext, AV_LOG_DEBUG, "Ignoring comment header\n");
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
if (*buf == 5 && buf_size > 7 && vc->channel_residues && !vc->modes) {
|
||||
init_get_bits(gb, buf+1, buf_size*8 - 8);
|
||||
if ((ret = vorbis_parse_setup_hdr(vc))) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "Setup header corrupt.\n");
|
||||
vorbis_free(vc);
|
||||
return ret;
|
||||
}
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
if (!vc->channel_residues || !vc->modes) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "Data packet before valid headers\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* get output buffer */
|
||||
vc->frame.nb_samples = vc->blocksize[1] / 2;
|
||||
if ((ret = ff_get_buffer(avccontext, &vc->frame)) < 0) {
|
||||
|
@@ -281,15 +281,15 @@ static av_cold int vp3_decode_end(AVCodecContext *avctx)
|
||||
Vp3DecodeContext *s = avctx->priv_data;
|
||||
int i;
|
||||
|
||||
av_free(s->superblock_coding);
|
||||
av_free(s->all_fragments);
|
||||
av_free(s->coded_fragment_list[0]);
|
||||
av_free(s->dct_tokens_base);
|
||||
av_free(s->superblock_fragments);
|
||||
av_free(s->macroblock_coding);
|
||||
av_free(s->motion_val[0]);
|
||||
av_free(s->motion_val[1]);
|
||||
av_free(s->edge_emu_buffer);
|
||||
av_freep(&s->superblock_coding);
|
||||
av_freep(&s->all_fragments);
|
||||
av_freep(&s->coded_fragment_list[0]);
|
||||
av_freep(&s->dct_tokens_base);
|
||||
av_freep(&s->superblock_fragments);
|
||||
av_freep(&s->macroblock_coding);
|
||||
av_freep(&s->motion_val[0]);
|
||||
av_freep(&s->motion_val[1]);
|
||||
av_freep(&s->edge_emu_buffer);
|
||||
|
||||
if (avctx->internal->is_copy)
|
||||
return 0;
|
||||
@@ -2339,6 +2339,8 @@ static av_cold int theora_decode_init(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
for(i=0;i<3;i++) {
|
||||
if (header_len[i] <= 0)
|
||||
continue;
|
||||
init_get_bits(&gb, header_start[i], header_len[i] * 8);
|
||||
|
||||
ptype = get_bits(&gb, 8);
|
||||
|
@@ -537,6 +537,12 @@ static int vqa_decode_chunk(VqaContext *s)
|
||||
bytestream2_seek(&s->gb, cbp0_chunk, SEEK_SET);
|
||||
chunk_size = bytestream2_get_be32(&s->gb);
|
||||
|
||||
if (chunk_size > MAX_CODEBOOK_SIZE - s->next_codebook_buffer_index) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "cbp0 chunk too large (%u bytes)\n",
|
||||
chunk_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* accumulate partial codebook */
|
||||
bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index],
|
||||
chunk_size);
|
||||
@@ -560,6 +566,12 @@ static int vqa_decode_chunk(VqaContext *s)
|
||||
bytestream2_seek(&s->gb, cbpz_chunk, SEEK_SET);
|
||||
chunk_size = bytestream2_get_be32(&s->gb);
|
||||
|
||||
if (chunk_size > MAX_CODEBOOK_SIZE - s->next_codebook_buffer_index) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "cbpz chunk too large (%u bytes)\n",
|
||||
chunk_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* accumulate partial codebook */
|
||||
bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index],
|
||||
chunk_size);
|
||||
|
@@ -1785,6 +1785,7 @@ static av_always_inline void gmc(uint8_t *dst, uint8_t *src,
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_VIDEODSP
|
||||
#if HAVE_YASM
|
||||
#if ARCH_X86_32
|
||||
static void gmc_mmx(uint8_t *dst, uint8_t *src,
|
||||
@@ -1814,6 +1815,7 @@ static void gmc_mmx(uint8_t *dst, uint8_t *src,
|
||||
width, height, &ff_emulated_edge_mc_8);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* HAVE_INLINE_ASM */
|
||||
|
||||
@@ -2518,7 +2520,7 @@ static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
|
||||
c->scalarproduct_float = ff_scalarproduct_float_sse;
|
||||
c->butterflies_float_interleave = ff_butterflies_float_interleave_sse;
|
||||
|
||||
#if HAVE_INLINE_ASM
|
||||
#if HAVE_INLINE_ASM && CONFIG_VIDEODSP
|
||||
c->gmc = gmc_sse;
|
||||
#endif
|
||||
#endif /* HAVE_YASM */
|
||||
|
@@ -625,7 +625,11 @@ DEBLOCK_LUMA v, 16
|
||||
%define t5 m11
|
||||
%define mask0 m12
|
||||
%define mask1p m13
|
||||
%if WIN64
|
||||
%define mask1q [rsp]
|
||||
%else
|
||||
%define mask1q [rsp-24]
|
||||
%endif
|
||||
%define mpb_0 m14
|
||||
%define mpb_1 m15
|
||||
%else
|
||||
@@ -644,7 +648,11 @@ DEBLOCK_LUMA v, 16
|
||||
;-----------------------------------------------------------------------------
|
||||
; void deblock_v_luma_intra( uint8_t *pix, int stride, int alpha, int beta )
|
||||
;-----------------------------------------------------------------------------
|
||||
%if WIN64
|
||||
cglobal deblock_%1_luma_intra_8, 4,6,16,0x10
|
||||
%else
|
||||
cglobal deblock_%1_luma_intra_8, 4,6,16,ARCH_X86_64*0x50-0x50
|
||||
%endif
|
||||
lea r4, [r1*4]
|
||||
lea r5, [r1*3] ; 3*stride
|
||||
dec r2d ; alpha-1
|
||||
|
@@ -45,6 +45,11 @@ static av_cold int xan_decode_init(AVCodecContext *avctx)
|
||||
|
||||
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
if (avctx->height < 8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid frame height: %d.\n", avctx->height);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->buffer_size = avctx->width * avctx->height;
|
||||
s->y_buffer = av_malloc(s->buffer_size);
|
||||
if (!s->y_buffer)
|
||||
@@ -212,6 +217,10 @@ static int xan_decode_chroma(AVCodecContext *avctx, unsigned chroma_off)
|
||||
U += s->pic.linesize[1];
|
||||
V += s->pic.linesize[2];
|
||||
}
|
||||
if (avctx->height & 1) {
|
||||
memcpy(U, U - s->pic.linesize[1], avctx->width >> 1);
|
||||
memcpy(V, V - s->pic.linesize[2], avctx->width >> 1);
|
||||
}
|
||||
} else {
|
||||
uint8_t *U2 = U + s->pic.linesize[1];
|
||||
uint8_t *V2 = V + s->pic.linesize[2];
|
||||
@@ -236,6 +245,12 @@ static int xan_decode_chroma(AVCodecContext *avctx, unsigned chroma_off)
|
||||
U2 += s->pic.linesize[1] * 2;
|
||||
V2 += s->pic.linesize[2] * 2;
|
||||
}
|
||||
if (avctx->height & 3) {
|
||||
int lines = ((avctx->height + 1) >> 1) - (avctx->height >> 2) * 2;
|
||||
|
||||
memcpy(U, U - lines * s->pic.linesize[1], lines * s->pic.linesize[1]);
|
||||
memcpy(V, V - lines * s->pic.linesize[2], lines * s->pic.linesize[2]);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -39,6 +39,7 @@ typedef struct YopDecContext {
|
||||
|
||||
uint8_t *low_nibble;
|
||||
uint8_t *srcptr;
|
||||
uint8_t *src_end;
|
||||
uint8_t *dstptr;
|
||||
uint8_t *dstbuf;
|
||||
} YopDecContext;
|
||||
@@ -88,8 +89,8 @@ static av_cold int yop_decode_init(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!avctx->extradata) {
|
||||
av_log(avctx, AV_LOG_ERROR, "extradata missing\n");
|
||||
if (avctx->extradata_size < 3) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Missing or incomplete extradata.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
@@ -123,8 +124,13 @@ static av_cold int yop_decode_close(AVCodecContext *avctx)
|
||||
* @param s codec context
|
||||
* @param tag the tag that was in the nibble
|
||||
*/
|
||||
static void yop_paint_block(YopDecContext *s, int tag)
|
||||
static int yop_paint_block(YopDecContext *s, int tag)
|
||||
{
|
||||
if (s->src_end - s->srcptr < paint_lut[tag][3]) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Packet too small.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->dstptr[0] = s->srcptr[0];
|
||||
s->dstptr[1] = s->srcptr[paint_lut[tag][0]];
|
||||
s->dstptr[s->frame.linesize[0]] = s->srcptr[paint_lut[tag][1]];
|
||||
@@ -132,6 +138,7 @@ static void yop_paint_block(YopDecContext *s, int tag)
|
||||
|
||||
// The number of src bytes consumed is in the last part of the lut entry.
|
||||
s->srcptr += paint_lut[tag][3];
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -184,6 +191,11 @@ static int yop_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
int ret, i, x, y;
|
||||
uint32_t *palette;
|
||||
|
||||
if (avpkt->size < 4 + 3 * s->num_pal_colors) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Packet too small.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (s->frame.data[0])
|
||||
avctx->release_buffer(avctx, &s->frame);
|
||||
|
||||
@@ -201,6 +213,7 @@ static int yop_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
s->dstbuf = s->frame.data[0];
|
||||
s->dstptr = s->frame.data[0];
|
||||
s->srcptr = avpkt->data + 4;
|
||||
s->src_end = avpkt->data + avpkt->size;
|
||||
s->low_nibble = NULL;
|
||||
|
||||
is_odd_frame = avpkt->data[0];
|
||||
@@ -231,7 +244,9 @@ static int yop_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
tag = yop_get_next_nibble(s);
|
||||
|
||||
if (tag != 0xf) {
|
||||
yop_paint_block(s, tag);
|
||||
ret = yop_paint_block(s, tag);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
tag = yop_get_next_nibble(s);
|
||||
ret = yop_copy_previous_block(s, tag);
|
||||
|
@@ -429,6 +429,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
c->fmt = buf[3];
|
||||
c->bw = buf[4];
|
||||
c->bh = buf[5];
|
||||
c->decode_intra = NULL;
|
||||
c->decode_xor = NULL;
|
||||
|
||||
buf += 6;
|
||||
len -= 6;
|
||||
|
@@ -23,8 +23,15 @@
|
||||
* libcdio CD grabbing
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if HAVE_CDIO_PARANOIA_H
|
||||
#include <cdio/cdda.h>
|
||||
#include <cdio/paranoia.h>
|
||||
#elif HAVE_CDIO_PARANOIA_PARANOIA_H
|
||||
#include <cdio/paranoia/cdda.h>
|
||||
#include <cdio/paranoia/paranoia.h>
|
||||
#endif
|
||||
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
@@ -96,7 +96,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
static int config_props(AVFilterLink *inlink)
|
||||
{
|
||||
KerndeintContext *kerndeint = inlink->dst->priv;
|
||||
const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format];
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
int ret;
|
||||
|
||||
kerndeint->vsub = desc->log2_chroma_h;
|
||||
|
@@ -369,7 +369,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
{
|
||||
YADIFContext *yadif = ctx->priv;
|
||||
static const char *shorthand[] = { "mode", "parity", "enable", NULL };
|
||||
static const char *shorthand[] = { "mode", "parity", "deint", NULL };
|
||||
int ret;
|
||||
|
||||
yadif->csp = NULL;
|
||||
|
@@ -248,7 +248,7 @@ static int color_config_props(AVFilterLink *inlink)
|
||||
if (av_image_check_size(test->w, test->h, 0, ctx) < 0)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
if (ret = config_props(inlink) < 0)
|
||||
if ((ret = config_props(inlink)) < 0)
|
||||
return ret;
|
||||
|
||||
av_log(ctx, AV_LOG_VERBOSE, "color:0x%02x%02x%02x%02x\n",
|
||||
|
@@ -376,8 +376,8 @@ static int dv_write_header(AVFormatContext *s)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (tcr)
|
||||
return av_timecode_init_from_string(&dvc->tc, rate, tcr->value, s);
|
||||
if (tcr && av_timecode_init_from_string(&dvc->tc, rate, tcr->value, s) >= 0)
|
||||
return 0;
|
||||
return av_timecode_init(&dvc->tc, rate, 0, 0, s);
|
||||
}
|
||||
|
||||
|
@@ -713,13 +713,13 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
st = s->streams[i];
|
||||
if (stream_type == FLV_STREAM_TYPE_AUDIO) {
|
||||
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
|
||||
flv_same_audio_codec(st->codec, flags)) {
|
||||
(s->audio_codec_id || flv_same_audio_codec(st->codec, flags))) {
|
||||
break;
|
||||
}
|
||||
} else
|
||||
if (stream_type == FLV_STREAM_TYPE_VIDEO) {
|
||||
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
|
||||
flv_same_video_codec(st->codec, flags)) {
|
||||
(s->video_codec_id || flv_same_video_codec(st->codec, flags))) {
|
||||
break;
|
||||
}
|
||||
} else if (stream_type == FLV_STREAM_TYPE_DATA) {
|
||||
|
@@ -21,6 +21,9 @@
|
||||
|
||||
#include "matroska.h"
|
||||
|
||||
/* If you add a tag here that is not in ff_codec_bmp_tags[]
|
||||
or ff_codec_wav_tags[], add it also to additional_audio_tags[]
|
||||
or additional_video_tags[] in matroskaenc.c */
|
||||
const CodecTags ff_mkv_codec_tags[]={
|
||||
{"A_AAC" , AV_CODEC_ID_AAC},
|
||||
{"A_AC3" , AV_CODEC_ID_AC3},
|
||||
@@ -111,7 +114,7 @@ const char * const ff_matroska_video_stereo_mode[MATROSKA_VIDEO_STEREO_MODE_COUN
|
||||
"bottom_top",
|
||||
"top_bottom",
|
||||
"checkerboard_rl",
|
||||
"checkerboard_lr"
|
||||
"checkerboard_lr",
|
||||
"row_interleaved_rl",
|
||||
"row_interleaved_lr",
|
||||
"col_interleaved_rl",
|
||||
|
@@ -1881,6 +1881,7 @@ static int matroska_deliver_packet(MatroskaDemuxContext *matroska,
|
||||
*/
|
||||
static void matroska_clear_queue(MatroskaDemuxContext *matroska)
|
||||
{
|
||||
matroska->prev_pkt = NULL;
|
||||
if (matroska->packets) {
|
||||
int n;
|
||||
for (n = 0; n < matroska->num_packets; n++) {
|
||||
@@ -2388,7 +2389,6 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index,
|
||||
avio_seek(s->pb, st->index_entries[st->nb_index_entries-1].pos, SEEK_SET);
|
||||
matroska->current_id = 0;
|
||||
while ((index = av_index_search_timestamp(st, timestamp, flags)) < 0) {
|
||||
matroska->prev_pkt = NULL;
|
||||
matroska_clear_queue(matroska);
|
||||
if (matroska_parse_cluster(matroska) < 0)
|
||||
break;
|
||||
|
@@ -1353,6 +1353,33 @@ static int mkv_query_codec(enum AVCodecID codec_id, int std_compliance)
|
||||
return 0;
|
||||
}
|
||||
|
||||
const AVCodecTag additional_audio_tags[] = {
|
||||
{ AV_CODEC_ID_ALAC, 0XFFFFFFFF },
|
||||
{ AV_CODEC_ID_EAC3, 0XFFFFFFFF },
|
||||
{ AV_CODEC_ID_MLP, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_OPUS, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_PCM_S16BE, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_PCM_S24BE, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_PCM_S32BE, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_QDM2, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RA_144, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RA_288, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_COOK, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_TRUEHD, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_TTA, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_WAVPACK, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_NONE, 0xFFFFFFFF }
|
||||
};
|
||||
|
||||
const AVCodecTag additional_video_tags[] = {
|
||||
{ AV_CODEC_ID_PRORES, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RV10, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RV20, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RV30, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_RV40, 0xFFFFFFFF },
|
||||
{ AV_CODEC_ID_NONE, 0xFFFFFFFF }
|
||||
};
|
||||
|
||||
#if CONFIG_MATROSKA_MUXER
|
||||
AVOutputFormat ff_matroska_muxer = {
|
||||
.name = "matroska",
|
||||
@@ -1369,6 +1396,10 @@ AVOutputFormat ff_matroska_muxer = {
|
||||
.write_trailer = mkv_write_trailer,
|
||||
.flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS |
|
||||
AVFMT_TS_NONSTRICT,
|
||||
.codec_tag = (const AVCodecTag* const []){
|
||||
ff_codec_bmp_tags, ff_codec_wav_tags,
|
||||
additional_audio_tags, additional_video_tags, 0
|
||||
},
|
||||
.subtitle_codec = AV_CODEC_ID_SSA,
|
||||
.query_codec = mkv_query_codec,
|
||||
};
|
||||
@@ -1405,5 +1436,8 @@ AVOutputFormat ff_matroska_audio_muxer = {
|
||||
.write_packet = mkv_write_packet,
|
||||
.write_trailer = mkv_write_trailer,
|
||||
.flags = AVFMT_GLOBALHEADER | AVFMT_TS_NONSTRICT,
|
||||
.codec_tag = (const AVCodecTag* const []){
|
||||
ff_codec_wav_tags, additional_audio_tags, 0
|
||||
},
|
||||
};
|
||||
#endif
|
||||
|
@@ -696,6 +696,9 @@ static int mov_read_chan(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
if (atom.size < 16)
|
||||
return 0;
|
||||
|
||||
/* skip version and flags */
|
||||
avio_skip(pb, 4);
|
||||
|
||||
ff_mov_read_chan(c->fc, pb, st, atom.size - 4);
|
||||
|
||||
return 0;
|
||||
|
@@ -1128,13 +1128,17 @@ static int mov_write_video_tag(AVIOContext *pb, MOVTrack *track)
|
||||
mov_write_avcc_tag(pb, track);
|
||||
if(track->mode == MODE_IPOD)
|
||||
mov_write_uuid_tag_ipod(pb);
|
||||
} else if (track->enc->field_order != AV_FIELD_UNKNOWN)
|
||||
mov_write_fiel_tag(pb, track);
|
||||
else if (track->enc->codec_id == AV_CODEC_ID_VC1 && track->vos_len > 0)
|
||||
} else if (track->enc->codec_id == AV_CODEC_ID_VC1 && track->vos_len > 0)
|
||||
mov_write_dvc1_tag(pb, track);
|
||||
else if (track->vos_len > 0)
|
||||
mov_write_glbl_tag(pb, track);
|
||||
|
||||
if (track->enc->codec_id != AV_CODEC_ID_H264 &&
|
||||
track->enc->codec_id != AV_CODEC_ID_MPEG4 &&
|
||||
track->enc->codec_id != AV_CODEC_ID_DNXHD)
|
||||
if (track->enc->field_order != AV_FIELD_UNKNOWN)
|
||||
mov_write_fiel_tag(pb, track);
|
||||
|
||||
if (track->enc->sample_aspect_ratio.den && track->enc->sample_aspect_ratio.num &&
|
||||
track->enc->sample_aspect_ratio.den != track->enc->sample_aspect_ratio.num) {
|
||||
mov_write_pasp_tag(pb, track);
|
||||
@@ -1147,7 +1151,12 @@ static int mov_write_tmcd_tag(AVIOContext *pb, MOVTrack *track)
|
||||
{
|
||||
int64_t pos = avio_tell(pb);
|
||||
int frame_duration = av_rescale(track->timescale, track->enc->time_base.num, track->enc->time_base.den);
|
||||
int nb_frames = (track->timescale + frame_duration/2) / frame_duration;
|
||||
int nb_frames = 1.0/av_q2d(track->enc->time_base) + 0.5;
|
||||
|
||||
if (nb_frames > 255) {
|
||||
av_log(NULL, AV_LOG_ERROR, "fps %d is too large\n", nb_frames);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
avio_wb32(pb, 0); /* size */
|
||||
ffio_wfourcc(pb, "tmcd"); /* Data format */
|
||||
|
@@ -504,7 +504,7 @@ static int mpegps_read_packet(AVFormatContext *s,
|
||||
if(st->discard >= AVDISCARD_ALL)
|
||||
goto skip;
|
||||
if (startcode >= 0xa0 && startcode <= 0xaf) {
|
||||
if (lpcm_header_len == 6) {
|
||||
if (lpcm_header_len == 6 && st->codec->codec_id == AV_CODEC_ID_MLP) {
|
||||
if (len < 6)
|
||||
goto skip;
|
||||
avio_skip(s->pb, 6);
|
||||
|
@@ -395,7 +395,7 @@ int avformat_write_header(AVFormatContext *s, AVDictionary **options)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = init_pts(s) < 0))
|
||||
if ((ret = init_pts(s)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
@@ -490,13 +490,12 @@ static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt)
|
||||
*/
|
||||
static inline int split_write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
int ret;
|
||||
AVPacket spkt = *pkt;
|
||||
int ret, did_split;
|
||||
|
||||
av_packet_split_side_data(&spkt);
|
||||
ret = s->oformat->write_packet(s, &spkt);
|
||||
spkt.data = NULL;
|
||||
av_destruct_packet(&spkt);
|
||||
did_split = av_packet_split_side_data(pkt);
|
||||
ret = s->oformat->write_packet(s, pkt);
|
||||
if (did_split)
|
||||
av_packet_merge_side_data(pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -102,6 +102,7 @@ static int ogg_restore(AVFormatContext *s, int discard)
|
||||
av_free(ogg->streams[i].buf);
|
||||
|
||||
avio_seek(bc, ost->pos, SEEK_SET);
|
||||
ogg->page_pos = -1;
|
||||
ogg->curidx = ost->curidx;
|
||||
ogg->nstreams = ost->nstreams;
|
||||
ogg->streams = av_realloc(ogg->streams,
|
||||
@@ -146,6 +147,7 @@ static int ogg_reset(AVFormatContext *s)
|
||||
}
|
||||
}
|
||||
|
||||
ogg->page_pos = -1;
|
||||
ogg->curidx = -1;
|
||||
|
||||
return 0;
|
||||
@@ -183,6 +185,9 @@ static int ogg_replace_stream(AVFormatContext *s, uint32_t serial)
|
||||
|
||||
os = &ogg->streams[0];
|
||||
|
||||
os->serial = serial;
|
||||
return 0;
|
||||
|
||||
buf = os->buf;
|
||||
bufsize = os->bufsize;
|
||||
codec = os->codec;
|
||||
@@ -297,6 +302,12 @@ static int ogg_read_page(AVFormatContext *s, int *sid)
|
||||
sync[(sp + 2) & 3] == 'g' && sync[(sp + 3) & 3] == 'S')
|
||||
break;
|
||||
|
||||
if(!i && bc->seekable && ogg->page_pos > 0) {
|
||||
memset(sync, 0, 4);
|
||||
avio_seek(bc, ogg->page_pos+4, SEEK_SET);
|
||||
ogg->page_pos = -1;
|
||||
}
|
||||
|
||||
c = avio_r8(bc);
|
||||
|
||||
if (url_feof(bc))
|
||||
@@ -335,6 +346,7 @@ static int ogg_read_page(AVFormatContext *s, int *sid)
|
||||
}
|
||||
|
||||
os = ogg->streams + idx;
|
||||
ogg->page_pos =
|
||||
os->page_pos = avio_tell(bc) - 27;
|
||||
|
||||
if (os->psize > 0)
|
||||
@@ -559,6 +571,7 @@ static int ogg_get_length(AVFormatContext *s)
|
||||
|
||||
ogg_save(s);
|
||||
avio_seek(s->pb, end, SEEK_SET);
|
||||
ogg->page_pos = -1;
|
||||
|
||||
while (!ogg_read_page(s, &i)) {
|
||||
if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0 &&
|
||||
@@ -599,6 +612,23 @@ static int ogg_get_length(AVFormatContext *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ogg_read_close(AVFormatContext *s)
|
||||
{
|
||||
struct ogg *ogg = s->priv_data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ogg->nstreams; i++) {
|
||||
av_free(ogg->streams[i].buf);
|
||||
if (ogg->streams[i].codec &&
|
||||
ogg->streams[i].codec->cleanup) {
|
||||
ogg->streams[i].codec->cleanup(s, i);
|
||||
}
|
||||
av_free(ogg->streams[i].private);
|
||||
}
|
||||
av_free(ogg->streams);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ogg_read_header(AVFormatContext *s)
|
||||
{
|
||||
struct ogg *ogg = s->priv_data;
|
||||
@@ -722,19 +752,6 @@ retry:
|
||||
return psize;
|
||||
}
|
||||
|
||||
static int ogg_read_close(AVFormatContext *s)
|
||||
{
|
||||
struct ogg *ogg = s->priv_data;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ogg->nstreams; i++) {
|
||||
av_free(ogg->streams[i].buf);
|
||||
av_free(ogg->streams[i].private);
|
||||
}
|
||||
av_free(ogg->streams);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int64_t ogg_read_timestamp(AVFormatContext *s, int stream_index,
|
||||
int64_t *pos_arg, int64_t pos_limit)
|
||||
{
|
||||
|
@@ -55,6 +55,7 @@ struct ogg_codec {
|
||||
* Number of expected headers
|
||||
*/
|
||||
int nb_header;
|
||||
void (*cleanup)(AVFormatContext *s, int idx);
|
||||
};
|
||||
|
||||
struct ogg_stream {
|
||||
@@ -99,6 +100,7 @@ struct ogg {
|
||||
int nstreams;
|
||||
int headers;
|
||||
int curidx;
|
||||
int64_t page_pos; ///< file offset of the current page
|
||||
struct ogg_state *state;
|
||||
};
|
||||
|
||||
|
@@ -192,6 +192,16 @@ fixup_vorbis_headers(AVFormatContext * as, struct oggvorbis_private *priv,
|
||||
return offset;
|
||||
}
|
||||
|
||||
static void vorbis_cleanup(AVFormatContext *s, int idx)
|
||||
{
|
||||
struct ogg *ogg = s->priv_data;
|
||||
struct ogg_stream *os = ogg->streams + idx;
|
||||
struct oggvorbis_private *priv = os->private;
|
||||
int i;
|
||||
if (os->private)
|
||||
for (i = 0; i < 3; i++)
|
||||
av_freep(&priv->packet[i]);
|
||||
}
|
||||
|
||||
static int
|
||||
vorbis_header (AVFormatContext * s, int idx)
|
||||
@@ -373,5 +383,6 @@ const struct ogg_codec ff_vorbis_codec = {
|
||||
.magicsize = 7,
|
||||
.header = vorbis_header,
|
||||
.packet = vorbis_packet,
|
||||
.cleanup= vorbis_cleanup,
|
||||
.nb_header = 3,
|
||||
};
|
||||
|
@@ -817,7 +817,13 @@ int ff_read_riff_info(AVFormatContext *s, int64_t size)
|
||||
|
||||
chunk_code = avio_rl32(pb);
|
||||
chunk_size = avio_rl32(pb);
|
||||
|
||||
if (url_feof(pb)) {
|
||||
if (chunk_code || chunk_size) {
|
||||
av_log(s, AV_LOG_WARNING, "INFO subchunk truncated\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (chunk_size > end || end - chunk_size < cur || chunk_size == UINT_MAX) {
|
||||
avio_seek(pb, -9, SEEK_CUR);
|
||||
chunk_code = avio_rl32(pb);
|
||||
|
@@ -356,11 +356,11 @@ int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end)
|
||||
data++;
|
||||
break;
|
||||
}
|
||||
if (data + size >= data_end || data + size < data)
|
||||
if (size < 0 || size >= data_end - data)
|
||||
return -1;
|
||||
data += size;
|
||||
t = ff_amf_tag_size(data, data_end);
|
||||
if (t < 0 || data + t >= data_end)
|
||||
if (t < 0 || t >= data_end - data)
|
||||
return -1;
|
||||
data += t;
|
||||
}
|
||||
@@ -389,7 +389,7 @@ int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end,
|
||||
int size = bytestream_get_be16(&data);
|
||||
if (!size)
|
||||
break;
|
||||
if (data + size >= data_end || data + size < data)
|
||||
if (size < 0 || size >= data_end - data)
|
||||
return -1;
|
||||
data += size;
|
||||
if (size == namelen && !memcmp(data-size, name, namelen)) {
|
||||
@@ -410,7 +410,7 @@ int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end,
|
||||
return 0;
|
||||
}
|
||||
len = ff_amf_tag_size(data, data_end);
|
||||
if (len < 0 || data + len >= data_end || data + len < data)
|
||||
if (len < 0 || len >= data_end - data)
|
||||
return -1;
|
||||
data += len;
|
||||
}
|
||||
@@ -440,7 +440,7 @@ static const char* rtmp_packet_type(int type)
|
||||
|
||||
static void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *data_end)
|
||||
{
|
||||
int size;
|
||||
unsigned int size;
|
||||
char buf[1024];
|
||||
|
||||
if (data >= data_end)
|
||||
@@ -459,7 +459,7 @@ static void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *d
|
||||
} else {
|
||||
size = bytestream_get_be32(&data);
|
||||
}
|
||||
size = FFMIN(size, 1023);
|
||||
size = FFMIN(size, sizeof(buf) - 1);
|
||||
memcpy(buf, data, size);
|
||||
buf[size] = 0;
|
||||
av_log(ctx, AV_LOG_DEBUG, " string '%s'\n", buf);
|
||||
@@ -472,22 +472,21 @@ static void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *d
|
||||
case AMF_DATA_TYPE_OBJECT:
|
||||
av_log(ctx, AV_LOG_DEBUG, " {\n");
|
||||
for (;;) {
|
||||
int size = bytestream_get_be16(&data);
|
||||
int t;
|
||||
memcpy(buf, data, size);
|
||||
buf[size] = 0;
|
||||
size = bytestream_get_be16(&data);
|
||||
av_strlcpy(buf, data, FFMIN(sizeof(buf), size + 1));
|
||||
if (!size) {
|
||||
av_log(ctx, AV_LOG_DEBUG, " }\n");
|
||||
data++;
|
||||
break;
|
||||
}
|
||||
if (data + size >= data_end || data + size < data)
|
||||
if (size >= data_end - data)
|
||||
return;
|
||||
data += size;
|
||||
av_log(ctx, AV_LOG_DEBUG, " %s: ", buf);
|
||||
ff_amf_tag_contents(ctx, data, data_end);
|
||||
t = ff_amf_tag_size(data, data_end);
|
||||
if (t < 0 || data + t >= data_end)
|
||||
if (t < 0 || t >= data_end - data)
|
||||
return;
|
||||
data += t;
|
||||
}
|
||||
|
@@ -1989,7 +1989,7 @@ static int handle_invoke(URLContext *s, RTMPPacket *pkt)
|
||||
!memcmp(pkt->data, "\002\000\007publish", 10) ||
|
||||
!memcmp(pkt->data, "\002\000\010_checkbw", 11) ||
|
||||
!memcmp(pkt->data, "\002\000\014createStream", 15)) {
|
||||
if (ret = send_invoke_response(s, pkt) < 0)
|
||||
if ((ret = send_invoke_response(s, pkt)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -19,7 +19,7 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include "libavutil/opt.h"
|
||||
#include "avformat.h"
|
||||
|
||||
#include "rtp.h"
|
||||
@@ -107,8 +107,8 @@ int ff_rtp_get_payload_type(AVFormatContext *fmt,
|
||||
/* static payload type */
|
||||
for (i = 0; AVRtpPayloadTypes[i].pt >= 0; ++i)
|
||||
if (AVRtpPayloadTypes[i].codec_id == codec->codec_id) {
|
||||
if (codec->codec_id == AV_CODEC_ID_H263 && (!fmt ||
|
||||
!fmt->oformat->priv_class ||
|
||||
if (codec->codec_id == AV_CODEC_ID_H263 && (!fmt || !fmt->oformat ||
|
||||
!fmt->oformat->priv_class || !fmt->priv_data ||
|
||||
!av_opt_flag_is_set(fmt->priv_data, "rtpflags", "rfc2190")))
|
||||
continue;
|
||||
/* G722 has 8000 as nominal rate even if the sample rate is 16000,
|
||||
|
@@ -64,7 +64,7 @@ int ff_rtp_chain_mux_open(AVFormatContext **out, AVFormatContext *s,
|
||||
/* Get the payload type from the codec */
|
||||
if (st->id < RTP_PT_PRIVATE)
|
||||
rtpctx->streams[0]->id =
|
||||
ff_rtp_get_payload_type(rtpctx, st->codec, idx);
|
||||
ff_rtp_get_payload_type(s, st->codec, idx);
|
||||
else
|
||||
rtpctx->streams[0]->id = st->id;
|
||||
|
||||
|
@@ -31,14 +31,14 @@
|
||||
|
||||
static const uint8_t *avc_mp4_find_startcode(const uint8_t *start, const uint8_t *end, int nal_length_size)
|
||||
{
|
||||
int res = 0;
|
||||
unsigned int res = 0;
|
||||
|
||||
if (end - start < nal_length_size)
|
||||
return NULL;
|
||||
while (nal_length_size--)
|
||||
res = (res << 8) | *start++;
|
||||
|
||||
if (start + res > end || res < 0 || start + res < start)
|
||||
if (res > end - start)
|
||||
return NULL;
|
||||
|
||||
return start + res;
|
||||
|
@@ -100,6 +100,8 @@ static int srt_read_header(AVFormatContext *s)
|
||||
pts = get_pts(&ptr, &duration, &x1, &y1, &x2, &y2);
|
||||
if (pts != AV_NOPTS_VALUE) {
|
||||
int len = buf.len - (ptr - buf.str);
|
||||
if (len <= 0)
|
||||
continue;
|
||||
sub = ff_subtitles_queue_insert(&srt->q, ptr, len, 0);
|
||||
if (!sub) {
|
||||
res = AVERROR(ENOMEM);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user