Merge branch 'master' into oldabi

* master: (172 commits)
  Check mmap() return against correct value Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
  vorbisdec: Employ proper printf format specifiers for uint_fast32_t.
  Support fourcc MMJP.
  Support fourcc XVIX.
  Support fourcc M263.
  Support fourcc auv2.
  Fix indentation.
  Support PARSER_FLAG_COMPLETE_FRAMES for h261 and h263 parsers.
  ffplay: avoid SIGFPE exception in SDL_DisplayYUVOverlay
  avi: try to synchronize the points in time of the starts of streams after seeking. Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
  Add flag to force demuxers to sort more strictly by dts. This enables non interleaved AVI mode for example. Players that are picky on strict interleaving can set this. Patches to only switch to non intereaved AVI mode when the index is not strictly correctly interleaved are welcome. Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
  applehttp: Don't export variant_bitrate if it isn't known
  crypto: Use av_freep instead of av_free
  CrystalHD: Add AVOption to configure hardware downscaling.
  Check for malloc failures in fraps decoder.
  Use av_fast_malloc instead of av_realloc in fraps decoder.
  general.texi: document libcelt decoder.
  Fix some passing argument from incompatible pointer type warnings. Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
  configure: Add missing libm library dependencies to .pc files.
  oggdec: reindent after 8f3eebd6
  ...

Conflicts:
	libavcodec/version.h

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer
2011-04-25 02:47:47 +02:00
132 changed files with 2209 additions and 754 deletions

View File

@@ -9,6 +9,11 @@ version <next>:
- mpeg2 aspect ratio dection fixed - mpeg2 aspect ratio dection fixed
- libxvid aspect pickiness fixed - libxvid aspect pickiness fixed
- Frame multithreaded decoding - Frame multithreaded decoding
- Lots of deprecated API cruft removed
version 0.7_beta1:
- WebM support in Matroska de/muxer - WebM support in Matroska de/muxer
- low overhead Ogg muxing - low overhead Ogg muxing
- MMS-TCP support - MMS-TCP support

View File

@@ -93,6 +93,7 @@ tools/%.o: tools/%.c
$(CC) $(CPPFLAGS) $(CFLAGS) -c $(CC_O) $< $(CC) $(CPPFLAGS) $(CFLAGS) -c $(CC_O) $<
-include $(wildcard tools/*.d) -include $(wildcard tools/*.d)
-include $(wildcard tests/*.d)
ffplay.o: CFLAGS += $(SDL_CFLAGS) ffplay.o: CFLAGS += $(SDL_CFLAGS)
@@ -290,7 +291,7 @@ fate: $(FATE)
$(FATE): ffmpeg$(EXESUF) $(FATE_UTILS:%=tests/%$(HOSTEXESUF)) $(FATE): ffmpeg$(EXESUF) $(FATE_UTILS:%=tests/%$(HOSTEXESUF))
@echo "TEST $(@:fate-%=%)" @echo "TEST $(@:fate-%=%)"
$(Q)$(SRC_PATH)/tests/fate-run.sh $@ "$(SAMPLES)" "$(TARGET_EXEC)" "$(TARGET_PATH)" '$(CMD)' '$(CMP)' '$(REF)' '$(FUZZ)' '$(THREADS)' $(Q)$(SRC_PATH)/tests/fate-run.sh $@ "$(SAMPLES)" "$(TARGET_EXEC)" "$(TARGET_PATH)" '$(CMD)' '$(CMP)' '$(REF)' '$(FUZZ)' '$(THREADS)' '$(THREAD_TYPE)'
fate-list: fate-list:
@printf '%s\n' $(sort $(FATE)) @printf '%s\n' $(sort $(FATE))

View File

@@ -79,12 +79,8 @@ void uninit_opts(void)
av_freep(&sws_opts); av_freep(&sws_opts);
#endif #endif
for (i = 0; i < opt_name_count; i++) { for (i = 0; i < opt_name_count; i++) {
//opt_values are only stored for codec-specific options in which case av_freep(&opt_names[i]);
//both the name and value are dup'd av_freep(&opt_values[i]);
if (opt_values[i]) {
av_freep(&opt_names[i]);
av_freep(&opt_values[i]);
}
} }
av_freep(&opt_names); av_freep(&opt_names);
av_freep(&opt_values); av_freep(&opt_values);
@@ -157,6 +153,66 @@ static const OptionDef* find_option(const OptionDef *po, const char *name){
return po; return po;
} }
#if defined(_WIN32) && !defined(__MINGW32CE__)
/* Will be leaked on exit */
static char** win32_argv_utf8 = NULL;
static int win32_argc = 0;
/**
* Prepare command line arguments for executable.
* For Windows - perform wide-char to UTF-8 conversion.
* Input arguments should be main() function arguments.
* @param argc_ptr Arguments number (including executable)
* @param argv_ptr Arguments list.
*/
static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
{
char *argstr_flat;
wchar_t **argv_w;
int i, buffsize = 0, offset = 0;
if (win32_argv_utf8) {
*argc_ptr = win32_argc;
*argv_ptr = win32_argv_utf8;
return;
}
win32_argc = 0;
argv_w = CommandLineToArgvW(GetCommandLineW(), &win32_argc);
if (win32_argc <= 0 || !argv_w)
return;
/* determine the UTF-8 buffer size (including NULL-termination symbols) */
for (i = 0; i < win32_argc; i++)
buffsize += WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1,
NULL, 0, NULL, NULL);
win32_argv_utf8 = av_mallocz(sizeof(char*) * (win32_argc + 1) + buffsize);
argstr_flat = (char*)win32_argv_utf8 + sizeof(char*) * (win32_argc + 1);
if (win32_argv_utf8 == NULL) {
LocalFree(argv_w);
return;
}
for (i = 0; i < win32_argc; i++) {
win32_argv_utf8[i] = &argstr_flat[offset];
offset += WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1,
&argstr_flat[offset],
buffsize - offset, NULL, NULL);
}
win32_argv_utf8[i] = NULL;
LocalFree(argv_w);
*argc_ptr = win32_argc;
*argv_ptr = win32_argv_utf8;
}
#else
static inline void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
{
/* nothing to do */
}
#endif /* WIN32 && !__MINGW32CE__ */
void parse_options(int argc, char **argv, const OptionDef *options, void parse_options(int argc, char **argv, const OptionDef *options,
void (* parse_arg_function)(const char*)) void (* parse_arg_function)(const char*))
{ {
@@ -164,6 +220,9 @@ void parse_options(int argc, char **argv, const OptionDef *options,
int optindex, handleoptions=1; int optindex, handleoptions=1;
const OptionDef *po; const OptionDef *po;
/* perform system-dependent conversions for arguments list */
prepare_app_arguments(&argc, &argv);
/* parse options */ /* parse options */
optindex = 1; optindex = 1;
while (optindex < argc) { while (optindex < argc) {
@@ -235,6 +294,23 @@ int opt_default(const char *opt, const char *arg){
int ret= 0; int ret= 0;
const AVOption *o= NULL; const AVOption *o= NULL;
int opt_types[]={AV_OPT_FLAG_VIDEO_PARAM, AV_OPT_FLAG_AUDIO_PARAM, 0, AV_OPT_FLAG_SUBTITLE_PARAM, 0}; int opt_types[]={AV_OPT_FLAG_VIDEO_PARAM, AV_OPT_FLAG_AUDIO_PARAM, 0, AV_OPT_FLAG_SUBTITLE_PARAM, 0};
AVCodec *p = NULL;
AVOutputFormat *oformat = NULL;
while ((p = av_codec_next(p))) {
AVClass *c = p->priv_class;
if (c && av_find_opt(&c, opt, NULL, 0, 0))
break;
}
if (p)
goto out;
while ((oformat = av_oformat_next(oformat))) {
const AVClass *c = oformat->priv_class;
if (c && av_find_opt(&c, opt, NULL, 0, 0))
break;
}
if (oformat)
goto out;
for(type=0; *avcodec_opts && type<AVMEDIA_TYPE_NB && ret>= 0; type++){ for(type=0; *avcodec_opts && type<AVMEDIA_TYPE_NB && ret>= 0; type++){
const AVOption *o2 = av_find_opt(avcodec_opts[0], opt, NULL, opt_types[type], opt_types[type]); const AVOption *o2 = av_find_opt(avcodec_opts[0], opt, NULL, opt_types[type], opt_types[type]);
@@ -252,39 +328,25 @@ int opt_default(const char *opt, const char *arg){
ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_VIDEO], opt+1, arg, 1, &o); ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_VIDEO], opt+1, arg, 1, &o);
else if(opt[0] == 's' && avcodec_opts[AVMEDIA_TYPE_SUBTITLE]) else if(opt[0] == 's' && avcodec_opts[AVMEDIA_TYPE_SUBTITLE])
ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], opt+1, arg, 1, &o); ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], opt+1, arg, 1, &o);
if (ret >= 0)
opt += 1;
} }
if (o && ret < 0) { if (o && ret < 0) {
fprintf(stderr, "Invalid value '%s' for option '%s'\n", arg, opt); fprintf(stderr, "Invalid value '%s' for option '%s'\n", arg, opt);
exit(1); exit(1);
} }
if (!o) { if (!o) {
AVCodec *p = NULL; fprintf(stderr, "Unrecognized option '%s'\n", opt);
AVOutputFormat *oformat = NULL; exit(1);
while ((p=av_codec_next(p))){
AVClass *c= p->priv_class;
if(c && av_find_opt(&c, opt, NULL, 0, 0))
break;
}
if (!p) {
while ((oformat = av_oformat_next(oformat))) {
const AVClass *c = oformat->priv_class;
if (c && av_find_opt(&c, opt, NULL, 0, 0))
break;
}
}
if(!p && !oformat){
fprintf(stderr, "Unrecognized option '%s'\n", opt);
exit(1);
}
} }
out:
// av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avcodec_opts, opt, NULL), (int)av_get_int(avcodec_opts, opt, NULL)); // av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avcodec_opts, opt, NULL), (int)av_get_int(avcodec_opts, opt, NULL));
//FIXME we should always use avcodec_opts, ... for storing options so there will not be any need to keep track of what i set over this
opt_values= av_realloc(opt_values, sizeof(void*)*(opt_name_count+1)); opt_values= av_realloc(opt_values, sizeof(void*)*(opt_name_count+1));
opt_values[opt_name_count]= o ? NULL : av_strdup(arg); opt_values[opt_name_count] = av_strdup(arg);
opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1)); opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1));
opt_names[opt_name_count++]= o ? o->name : av_strdup(opt); opt_names[opt_name_count++] = av_strdup(opt);
if ((*avcodec_opts && avcodec_opts[0]->debug) || (avformat_opts && avformat_opts->debug)) if ((*avcodec_opts && avcodec_opts[0]->debug) || (avformat_opts && avformat_opts->debug))
av_log_set_level(AV_LOG_DEBUG); av_log_set_level(AV_LOG_DEBUG);
@@ -358,16 +420,22 @@ void set_context_opts(void *ctx, void *opts_ctx, int flags, AVCodec *codec)
for(i=0; i<opt_name_count; i++){ for(i=0; i<opt_name_count; i++){
char buf[256]; char buf[256];
const AVOption *opt; const AVOption *opt;
const char *str= av_get_string(opts_ctx, opt_names[i], &opt, buf, sizeof(buf)); const char *str;
/* if an option with name opt_names[i] is present in opts_ctx then str is non-NULL */ if (priv_ctx) {
if(str && ((opt->flags & flags) == flags)) if (av_find_opt(priv_ctx, opt_names[i], NULL, flags, flags)) {
av_set_string3(ctx, opt_names[i], str, 1, NULL); if (av_set_string3(priv_ctx, opt_names[i], opt_values[i], 0, NULL) < 0) {
/* We need to use a differnt system to pass options to the private context because fprintf(stderr, "Invalid value '%s' for option '%s'\n",
it is not known which codec and thus context kind that will be when parsing options opt_names[i], opt_values[i]);
we thus use opt_values directly instead of opts_ctx */ exit(1);
if(!str && priv_ctx) { }
if (av_find_opt(priv_ctx, opt_names[i], NULL, flags, flags)) } else
av_set_string3(priv_ctx, opt_names[i], opt_values[i], 0, NULL); goto global;
} else {
global:
str = av_get_string(opts_ctx, opt_names[i], &opt, buf, sizeof(buf));
/* if an option with name opt_names[i] is present in opts_ctx then str is non-NULL */
if (str && ((opt->flags & flags) == flags))
av_set_string3(ctx, opt_names[i], str, 1, NULL);
} }
} }
} }

28
configure vendored
View File

@@ -161,6 +161,7 @@ Configuration options:
External library support: External library support:
--enable-avisynth enable reading of AVISynth script files [no] --enable-avisynth enable reading of AVISynth script files [no]
--enable-bzlib enable bzlib [autodetect] --enable-bzlib enable bzlib [autodetect]
--enable-libcelt enable CELT/Opus decoding via libcelt [no]
--enable-frei0r enable frei0r video filtering --enable-frei0r enable frei0r video filtering
--enable-libopencore-amrnb enable AMR-NB de/encoding via libopencore-amrnb [no] --enable-libopencore-amrnb enable AMR-NB de/encoding via libopencore-amrnb [no]
--enable-libopencore-amrwb enable AMR-WB decoding via libopencore-amrwb [no] --enable-libopencore-amrwb enable AMR-WB decoding via libopencore-amrwb [no]
@@ -929,6 +930,7 @@ CONFIG_LIST="
h264pred h264pred
hardcoded_tables hardcoded_tables
huffman huffman
libcelt
libdc1394 libdc1394
libdirac libdirac
libfaac libfaac
@@ -1126,6 +1128,7 @@ HAVE_LIST="
ten_operands ten_operands
termios_h termios_h
threads threads
trunc
truncf truncf
vfp_args vfp_args
VirtualAlloc VirtualAlloc
@@ -1245,7 +1248,7 @@ mdct_select="fft"
rdft_select="fft" rdft_select="fft"
# decoders / encoders / hardware accelerators # decoders / encoders / hardware accelerators
aac_decoder_select="mdct rdft sinewin" aac_decoder_select="mdct sinewin"
aac_encoder_select="mdct sinewin" aac_encoder_select="mdct sinewin"
aac_latm_decoder_select="aac_decoder aac_latm_parser" aac_latm_decoder_select="aac_decoder aac_latm_parser"
ac3_decoder_select="mdct ac3dsp ac3_parser" ac3_decoder_select="mdct ac3dsp ac3_parser"
@@ -1393,6 +1396,7 @@ vdpau_deps="vdpau_vdpau_h vdpau_vdpau_x11_h"
h264_parser_select="golomb h264dsp h264pred" h264_parser_select="golomb h264dsp h264pred"
# external libraries # external libraries
libcelt_decoder_deps="libcelt"
libdirac_decoder_deps="libdirac !libschroedinger" libdirac_decoder_deps="libdirac !libschroedinger"
libdirac_encoder_deps="libdirac" libdirac_encoder_deps="libdirac"
libfaac_encoder_deps="libfaac" libfaac_encoder_deps="libfaac"
@@ -2870,7 +2874,7 @@ for thread in $THREADS_LIST; do
fi fi
done done
check_lib math.h sin -lm check_lib math.h sin -lm && LIBM="-lm"
disabled crystalhd || check_lib libcrystalhd/libcrystalhd_if.h DtsCrystalHDVersion -lcrystalhd || disable crystalhd disabled crystalhd || check_lib libcrystalhd/libcrystalhd_if.h DtsCrystalHDVersion -lcrystalhd || disable crystalhd
enabled vaapi && require vaapi va/va.h vaInitialize -lva enabled vaapi && require vaapi va/va.h vaInitialize -lva
@@ -2884,10 +2888,12 @@ check_mathfunc lrint
check_mathfunc lrintf check_mathfunc lrintf
check_mathfunc round check_mathfunc round
check_mathfunc roundf check_mathfunc roundf
check_mathfunc trunc
check_mathfunc truncf check_mathfunc truncf
# these are off by default, so fail if requested and not available # these are off by default, so fail if requested and not available
enabled avisynth && require2 vfw32 "windows.h vfw.h" AVIFileInit -lavifil32 enabled avisynth && require2 vfw32 "windows.h vfw.h" AVIFileInit -lavifil32
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0
enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not found"; } enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not found"; }
enabled libdirac && require_pkg_config dirac \ enabled libdirac && require_pkg_config dirac \
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \ "libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \
@@ -2957,7 +2963,14 @@ check_cpp_condition vfw.h "WM_CAP_DRIVER_CONNECT > WM_USER" && enable vfwcap_def
check_header dev/ic/bt8xx.h check_header dev/ic/bt8xx.h
check_header sndio.h check_header sndio.h
check_header sys/soundcard.h if check_struct sys/soundcard.h audio_buf_info bytes; then
enable_safe sys/soundcard.h
else
check_cc -D__BSD_VISIBLE -D__XSI_VISIBLE <<EOF && add_cppflags -D__BSD_VISIBLE -D__XSI_VISIBLE && enable_safe sys/soundcard.h
#include <sys/soundcard.h>
audio_buf_info abc;
EOF
fi
check_header soundcard.h check_header soundcard.h
enabled_any alsa_indev alsa_outdev && check_lib2 alsa/asoundlib.h snd_pcm_htimestamp -lasound enabled_any alsa_indev alsa_outdev && check_lib2 alsa/asoundlib.h snd_pcm_htimestamp -lasound
@@ -3167,6 +3180,7 @@ echo "threading support ${thread_type-no}"
echo "SDL support ${sdl-no}" echo "SDL support ${sdl-no}"
echo "Sun medialib support ${mlib-no}" echo "Sun medialib support ${mlib-no}"
echo "AVISynth enabled ${avisynth-no}" echo "AVISynth enabled ${avisynth-no}"
echo "libcelt enabled ${libcelt-no}"
echo "frei0r enabled ${frei0r-no}" echo "frei0r enabled ${frei0r-no}"
echo "libdc1394 support ${libdc1394-no}" echo "libdc1394 support ${libdc1394-no}"
echo "libdirac enabled ${libdirac-no}" echo "libdirac enabled ${libdirac-no}"
@@ -3471,10 +3485,10 @@ Cflags: -I\${includedir}
EOF EOF
} }
pkgconfig_generate libavutil "FFmpeg utility library" "$LIBAVUTIL_VERSION" pkgconfig_generate libavutil "FFmpeg utility library" "$LIBAVUTIL_VERSION" "$LIBM"
pkgconfig_generate libavcodec "FFmpeg codec library" "$LIBAVCODEC_VERSION" "$extralibs" pkgconfig_generate libavcodec "FFmpeg codec library" "$LIBAVCODEC_VERSION" "$extralibs $LIBM" "libavutil = $LIBAVUTIL_VERSION"
pkgconfig_generate libavformat "FFmpeg container format library" "$LIBAVFORMAT_VERSION" "$extralibs" "libavcodec = $LIBAVCODEC_VERSION" pkgconfig_generate libavformat "FFmpeg container format library" "$LIBAVFORMAT_VERSION" "$extralibs" "libavcodec = $LIBAVCODEC_VERSION"
pkgconfig_generate libavdevice "FFmpeg device handling library" "$LIBAVDEVICE_VERSION" "$extralibs" "libavformat = $LIBAVFORMAT_VERSION" pkgconfig_generate libavdevice "FFmpeg device handling library" "$LIBAVDEVICE_VERSION" "$extralibs" "libavformat = $LIBAVFORMAT_VERSION"
pkgconfig_generate libavfilter "FFmpeg video filtering library" "$LIBAVFILTER_VERSION" "$extralibs" pkgconfig_generate libavfilter "FFmpeg video filtering library" "$LIBAVFILTER_VERSION" "$extralibs"
pkgconfig_generate libpostproc "FFmpeg post processing library" "$LIBPOSTPROC_VERSION" pkgconfig_generate libpostproc "FFmpeg post processing library" "$LIBPOSTPROC_VERSION" "" "libavutil = $LIBAVUTIL_VERSION"
pkgconfig_generate libswscale "FFmpeg image rescaling library" "$LIBSWSCALE_VERSION" "" "libavutil = $LIBAVUTIL_VERSION" pkgconfig_generate libswscale "FFmpeg image rescaling library" "$LIBSWSCALE_VERSION" "$LIBM" "libavutil = $LIBAVUTIL_VERSION"

View File

@@ -1,17 +1,26 @@
Never assume the API of libav* to be stable unless at least 1 week has passed since Never assume the API of libav* to be stable unless at least 1 month has passed
the last major version increase. since the last major version increase.
The last version increases were: The last version increases were:
libavcodec: ? libavcodec: 2011-04-18
libavdevice: ? libavdevice: 2011-04-18
libavfilter: 2009-10-18 libavfilter: 2011-04-18
libavformat: ? libavformat: 2011-04-18
libpostproc: ? libpostproc: 2011-04-18
libswscale: ? libswscale: 2011-04-18
libavutil: 2009-03-08 libavutil: 2011-04-18
API changes, most recent first: API changes, most recent first:
2011-04-21 - 94f7451 - lavc 53.1.0 - avcodec.h
Add CODEC_CAP_SLICE_THREADS for codecs supporting sliced threading.
2011-04-15 - lavc 52.120.0 - avcodec.h
AVPacket structure got additional members for passing side information:
4de339e introduce side information for AVPacket
2d8591c make containers pass palette change in AVPacket
2011-04-12 - lavf 52.107.0 - avio.h 2011-04-12 - lavf 52.107.0 - avio.h
Avio cleanup, part II - deprecate the entire URLContext API: Avio cleanup, part II - deprecate the entire URLContext API:
175389c add avio_check as a replacement for url_exist 175389c add avio_check as a replacement for url_exist

View File

@@ -64,4 +64,15 @@ Note that the pattern must not necessarily contain "%d" or
ffmpeg -f image2 -i img.jpeg img.png ffmpeg -f image2 -i img.jpeg img.png
@end example @end example
@section applehttp
Apple HTTP Live Streaming demuxer.
This demuxer presents all AVStreams from all variant streams.
The id field is set to the bitrate variant index number. By setting
the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay),
the caller can decide which variant streams to actually receive.
The total bitrate of the variant that the stream belongs to is
available in a metadata key named "variant_bitrate".
@c man end INPUT DEVICES @c man end INPUT DEVICES

View File

@@ -280,7 +280,7 @@ Just create an "input.avs" text file with this single line ...
@example @example
DirectShowSource("C:\path to your file\yourfile.asf") DirectShowSource("C:\path to your file\yourfile.asf")
@end example @end example
... and then feed that text file to FFmpeg: ... and then feed that text file to ffmpeg:
@example @example
ffmpeg -i input.avs ffmpeg -i input.avs
@end example @end example
@@ -348,7 +348,7 @@ ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
rm temp[12].[av] all.[av] rm temp[12].[av] all.[av]
@end example @end example
@section FFmpeg does not adhere to the -maxrate setting, some frames are bigger than maxrate/fps. @section The ffmpeg program does not respect the -maxrate setting, some frames are bigger than maxrate/fps.
Read the MPEG spec about video buffer verifier. Read the MPEG spec about video buffer verifier.

View File

@@ -1,8 +1,8 @@
\input texinfo @c -*- texinfo -*- \input texinfo @c -*- texinfo -*-
@settitle FFmpeg Documentation @settitle ffmpeg Documentation
@titlepage @titlepage
@center @titlefont{FFmpeg Documentation} @center @titlefont{ffmpeg Documentation}
@end titlepage @end titlepage
@top @top
@@ -22,17 +22,15 @@ ffmpeg [[infile options][@option{-i} @var{infile}]]... @{[outfile options] @var{
@chapter Description @chapter Description
@c man begin DESCRIPTION @c man begin DESCRIPTION
FFmpeg is a very fast video and audio converter. It can also grab from ffmpeg is a very fast video and audio converter that can also grab from
a live audio/video source. a live audio/video source. It can also convert between arbitrary sample
rates and resize video on the fly with a high quality polyphase filter.
The command line interface is designed to be intuitive, in the sense The command line interface is designed to be intuitive, in the sense
that FFmpeg tries to figure out all parameters that can possibly be that ffmpeg tries to figure out all parameters that can possibly be
derived automatically. You usually only have to specify the target derived automatically. You usually only have to specify the target
bitrate you want. bitrate you want.
FFmpeg can also convert from any sample rate to any other, and resize
video on the fly with a high quality polyphase filter.
As a general rule, options are applied to the next specified As a general rule, options are applied to the next specified
file. Therefore, order is important, and you can have the same file. Therefore, order is important, and you can have the same
option on the command line multiple times. Each occurrence is option on the command line multiple times. Each occurrence is
@@ -61,7 +59,7 @@ ffmpeg -r 1 -i input.m2v -r 24 output.avi
The format option may be needed for raw input files. The format option may be needed for raw input files.
By default, FFmpeg tries to convert as losslessly as possible: It By default ffmpeg tries to convert as losslessly as possible: It
uses the same audio and video parameters for the outputs as the one uses the same audio and video parameters for the outputs as the one
specified for the inputs. specified for the inputs.
@@ -495,7 +493,7 @@ Use 'frames' B-frames (supported for MPEG-1, MPEG-2 and MPEG-4).
macroblock decision macroblock decision
@table @samp @table @samp
@item 0 @item 0
FF_MB_DECISION_SIMPLE: Use mb_cmp (cannot change it yet in FFmpeg). FF_MB_DECISION_SIMPLE: Use mb_cmp (cannot change it yet in ffmpeg).
@item 1 @item 1
FF_MB_DECISION_BITS: Choose the one which needs the fewest bits. FF_MB_DECISION_BITS: Choose the one which needs the fewest bits.
@item 2 @item 2
@@ -877,22 +875,22 @@ It allows almost lossless encoding.
@section Video and Audio grabbing @section Video and Audio grabbing
FFmpeg can grab video and audio from devices given that you specify the input If you specify the input format and device then ffmpeg can grab video
format and device. and audio directly.
@example @example
ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg
@end example @end example
Note that you must activate the right video source and channel before Note that you must activate the right video source and channel before
launching FFmpeg with any TV viewer such as xawtv launching ffmpeg with any TV viewer such as xawtv
(@url{http://linux.bytesex.org/xawtv/}) by Gerd Knorr. You also (@url{http://linux.bytesex.org/xawtv/}) by Gerd Knorr. You also
have to set the audio recording levels correctly with a have to set the audio recording levels correctly with a
standard mixer. standard mixer.
@section X11 grabbing @section X11 grabbing
FFmpeg can grab the X11 display. Grab the X11 display with ffmpeg via
@example @example
ffmpeg -f x11grab -s cif -r 25 -i :0.0 /tmp/out.mpg ffmpeg -f x11grab -s cif -r 25 -i :0.0 /tmp/out.mpg
@@ -910,7 +908,7 @@ variable. 10 is the x-offset and 20 the y-offset for the grabbing.
@section Video and Audio file format conversion @section Video and Audio file format conversion
FFmpeg can use any supported file format and protocol as input: Any supported file format and protocol can serve as input to ffmpeg:
Examples: Examples:
@itemize @itemize
@@ -930,7 +928,7 @@ It will use the files:
The Y files use twice the resolution of the U and V files. They are The Y files use twice the resolution of the U and V files. They are
raw files, without header. They can be generated by all decent video raw files, without header. They can be generated by all decent video
decoders. You must specify the size of the image with the @option{-s} option decoders. You must specify the size of the image with the @option{-s} option
if FFmpeg cannot guess it. if ffmpeg cannot guess it.
@item @item
You can input from a raw YUV420P file: You can input from a raw YUV420P file:
@@ -1057,7 +1055,7 @@ file to which you want to add them.
@ignore @ignore
@setfilename ffmpeg @setfilename ffmpeg
@settitle FFmpeg video converter @settitle ffmpeg video converter
@c man begin SEEALSO @c man begin SEEALSO
ffplay(1), ffprobe(1), ffserver(1) and the FFmpeg HTML documentation ffplay(1), ffprobe(1), ffserver(1) and the FFmpeg HTML documentation

View File

@@ -1,8 +1,8 @@
\input texinfo @c -*- texinfo -*- \input texinfo @c -*- texinfo -*-
@settitle FFplay Documentation @settitle ffplay Documentation
@titlepage @titlepage
@center @titlefont{FFplay Documentation} @center @titlefont{ffplay Documentation}
@end titlepage @end titlepage
@top @top

View File

@@ -1,8 +1,8 @@
\input texinfo @c -*- texinfo -*- \input texinfo @c -*- texinfo -*-
@settitle FFprobe Documentation @settitle ffprobe Documentation
@titlepage @titlepage
@center @titlefont{FFprobe Documentation} @center @titlefont{ffprobe Documentation}
@end titlepage @end titlepage
@top @top
@@ -22,7 +22,7 @@ ffprobe [options] [@file{input_file}]
@chapter Description @chapter Description
@c man begin DESCRIPTION @c man begin DESCRIPTION
FFprobe gathers information from multimedia streams and prints it in ffprobe gathers information from multimedia streams and prints it in
human- and machine-readable fashion. human- and machine-readable fashion.
For example it can be used to check the format of the container used For example it can be used to check the format of the container used
@@ -33,7 +33,7 @@ If a filename is specified in input, ffprobe will try to open and
probe the file content. If the file cannot be opened or recognized as probe the file content. If the file cannot be opened or recognized as
a multimedia file, a positive exit code is returned. a multimedia file, a positive exit code is returned.
FFprobe may be employed both as a standalone application or in ffprobe may be employed both as a standalone application or in
combination with a textual filter, which may perform more combination with a textual filter, which may perform more
sophisticated processing, e.g. statistical processing or plotting. sophisticated processing, e.g. statistical processing or plotting.
@@ -41,7 +41,7 @@ Options are used to list some of the formats supported by ffprobe or
for specifying which information to display, and for setting how for specifying which information to display, and for setting how
ffprobe will show it. ffprobe will show it.
FFprobe output is designed to be easily parsable by a textual filter, ffprobe output is designed to be easily parsable by a textual filter,
and consists of one or more sections of the form: and consists of one or more sections of the form:
@example @example
[SECTION] [SECTION]
@@ -119,7 +119,7 @@ with name "STREAM".
@ignore @ignore
@setfilename ffprobe @setfilename ffprobe
@settitle FFprobe media prober @settitle ffprobe media prober
@c man begin SEEALSO @c man begin SEEALSO
ffmpeg(1), ffplay(1), ffserver(1) and the FFmpeg HTML documentation ffmpeg(1), ffplay(1), ffserver(1) and the FFmpeg HTML documentation

View File

@@ -1,8 +1,8 @@
\input texinfo @c -*- texinfo -*- \input texinfo @c -*- texinfo -*-
@settitle FFserver Documentation @settitle ffserver Documentation
@titlepage @titlepage
@center @titlefont{FFserver Documentation} @center @titlefont{ffserver Documentation}
@end titlepage @end titlepage
@top @top
@@ -22,12 +22,12 @@ ffserver [options]
@chapter Description @chapter Description
@c man begin DESCRIPTION @c man begin DESCRIPTION
FFserver is a streaming server for both audio and video. It supports ffserver is a streaming server for both audio and video. It supports
several live feeds, streaming from files and time shifting on live feeds several live feeds, streaming from files and time shifting on live feeds
(you can seek to positions in the past on each live feed, provided you (you can seek to positions in the past on each live feed, provided you
specify a big enough feed storage in ffserver.conf). specify a big enough feed storage in ffserver.conf).
FFserver runs in daemon mode by default; that is, it puts itself in ffserver runs in daemon mode by default; that is, it puts itself in
the background and detaches from its TTY, unless it is launched in the background and detaches from its TTY, unless it is launched in
debug mode or a NoDaemon option is specified in the configuration debug mode or a NoDaemon option is specified in the configuration
file. file.
@@ -39,7 +39,7 @@ information.
@section How does it work? @section How does it work?
FFserver receives prerecorded files or FFM streams from some ffmpeg ffserver receives prerecorded files or FFM streams from some ffmpeg
instance as input, then streams them over RTP/RTSP/HTTP. instance as input, then streams them over RTP/RTSP/HTTP.
An ffserver instance will listen on some port as specified in the An ffserver instance will listen on some port as specified in the
@@ -57,7 +57,7 @@ file.
@section Status stream @section Status stream
FFserver supports an HTTP interface which exposes the current status ffserver supports an HTTP interface which exposes the current status
of the server. of the server.
Simply point your browser to the address of the special status stream Simply point your browser to the address of the special status stream
@@ -249,8 +249,8 @@ For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
Use @file{configfile} instead of @file{/etc/ffserver.conf}. Use @file{configfile} instead of @file{/etc/ffserver.conf}.
@item -n @item -n
Enable no-launch mode. This option disables all the Launch directives Enable no-launch mode. This option disables all the Launch directives
within the various <Stream> sections. FFserver will not launch any within the various <Stream> sections. Since ffserver will not launch
ffmpeg instance, so you will have to launch them manually. any ffmpeg instances, you will have to launch them manually.
@item -d @item -d
Enable debug mode. This option increases log verbosity, directs log Enable debug mode. This option increases log verbosity, directs log
messages to stdout and causes ffserver to run in the foreground messages to stdout and causes ffserver to run in the foreground
@@ -261,7 +261,7 @@ rather than as a daemon.
@ignore @ignore
@setfilename ffserver @setfilename ffserver
@settitle FFserver video server @settitle ffserver video server
@c man begin SEEALSO @c man begin SEEALSO

View File

@@ -985,6 +985,39 @@ given coordinates @var{x}, @var{y}.
It accepts the following parameters: It accepts the following parameters:
@var{width}:@var{height}:@var{x}:@var{y}:@var{color}. @var{width}:@var{height}:@var{x}:@var{y}:@var{color}.
The parameters @var{width}, @var{height}, @var{x}, and @var{y} are
expressions containing the following constants:
@table @option
@item E, PI, PHI
the corresponding mathematical approximated values for e
(euler number), pi (greek PI), phi (golden ratio)
@item in_w, in_h
the input video width and heigth
@item iw, ih
same as @var{in_w} and @var{in_h}
@item out_w, out_h
the output width and heigth, that is the size of the padded area as
specified by the @var{width} and @var{height} expressions
@item ow, oh
same as @var{out_w} and @var{out_h}
@item x, y
x and y offsets as specified by the @var{x} and @var{y}
expressions, or NAN if not yet specified
@item a
input display aspect ratio, same as @var{iw} / @var{ih}
@item hsub, vsub
horizontal and vertical chroma subsample values. For example for the
pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
@end table
Follows the description of the accepted parameters. Follows the description of the accepted parameters.
@table @option @table @option
@@ -994,6 +1027,9 @@ Specify the size of the output image with the paddings added. If the
value for @var{width} or @var{height} is 0, the corresponding input size value for @var{width} or @var{height} is 0, the corresponding input size
is used for the output. is used for the output.
The @var{width} expression can reference the value set by the
@var{height} expression, and viceversa.
The default value of @var{width} and @var{height} is 0. The default value of @var{width} and @var{height} is 0.
@item x, y @item x, y
@@ -1001,6 +1037,9 @@ The default value of @var{width} and @var{height} is 0.
Specify the offsets where to place the input image in the padded area Specify the offsets where to place the input image in the padded area
with respect to the top/left border of the output image. with respect to the top/left border of the output image.
The @var{x} expression can reference the value set by the @var{y}
expression, and viceversa.
The default value of @var{x} and @var{y} is 0. The default value of @var{x} and @var{y} is 0.
@item color @item color
@@ -1012,13 +1051,29 @@ The default value of @var{color} is "black".
@end table @end table
For example: Some examples follow:
@example @example
# Add paddings with color "violet" to the input video. Output video # Add paddings with color "violet" to the input video. Output video
# size is 640x480, the top-left corner of the input video is placed at # size is 640x480, the top-left corner of the input video is placed at
# column 0, row 40. # column 0, row 40.
pad=640:480:0:40:violet pad=640:480:0:40:violet
# pad the input to get an output with dimensions increased bt 3/2,
# and put the input video at the center of the padded area
pad="3/2*iw:3/2*ih:(ow-iw)/2:(oh-ih)/2"
# pad the input to get a squared output with size equal to the maximum
# value between the input width and height, and put the input video at
# the center of the padded area
pad="max(iw\,ih):ow:(ow-iw)/2:(oh-ih)/2"
# pad the input to get a final w/h ratio of 16:9
pad="ih*16/9:ih:(ow-iw)/2:(oh-ih)/2"
# double output size and put the input video in the bottom-right
# corner of the output padded area
pad="2*iw:2*ih:ow-iw:oh-ih"
@end example @end example
@section pixdesctest @section pixdesctest

View File

@@ -96,7 +96,7 @@ library:
@tab Only embedded audio is decoded. @tab Only embedded audio is decoded.
@item FLI/FLC/FLX animation @tab @tab X @item FLI/FLC/FLX animation @tab @tab X
@tab .fli/.flc files @tab .fli/.flc files
@item Flash Video (FLV) @tab @tab X @item Flash Video (FLV) @tab X @tab X
@tab Macromedia Flash video files @tab Macromedia Flash video files
@item framecrc testing format @tab X @tab @item framecrc testing format @tab X @tab
@item FunCom ISS @tab @tab X @item FunCom ISS @tab @tab X
@@ -598,6 +598,8 @@ following image formats are supported:
@item Atrac 3 @tab @tab X @item Atrac 3 @tab @tab X
@item Bink Audio @tab @tab X @item Bink Audio @tab @tab X
@tab Used in Bink and Smacker files in many games. @tab Used in Bink and Smacker files in many games.
@item CELT (Opus) @tab @tab E
@tab decoding supported through external library libcelt
@item Delphine Software International CIN audio @tab @tab X @item Delphine Software International CIN audio @tab @tab X
@tab Codec used in Delphine Software International games. @tab Codec used in Delphine Software International games.
@item COOK @tab @tab X @item COOK @tab @tab X
@@ -789,14 +791,6 @@ to configure.
BSD make will not build FFmpeg, you need to install and use GNU Make BSD make will not build FFmpeg, you need to install and use GNU Make
(@file{gmake}). (@file{gmake}).
@subsubsection FreeBSD
FreeBSD will not compile out-of-the-box due to broken system headers.
Passing @code{--extra-cflags=-D__BSD_VISIBLE} to configure will work
around the problem. This may have unexpected sideeffects, so use it at
your own risk. If you care about FreeBSD, please make an attempt at
getting the system headers fixed.
@subsection (Open)Solaris @subsection (Open)Solaris
GNU Make is required to build FFmpeg, so you have to invoke (@file{gmake}), GNU Make is required to build FFmpeg, so you have to invoke (@file{gmake}),

View File

@@ -1,5 +1,5 @@
/* /*
* FFmpeg main * ffmpeg main
* Copyright (c) 2000-2003 Fabrice Bellard * Copyright (c) 2000-2003 Fabrice Bellard
* *
* This file is part of FFmpeg. * This file is part of FFmpeg.
@@ -85,7 +85,7 @@
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
const char program_name[] = "FFmpeg"; const char program_name[] = "ffmpeg";
const int program_birth_year = 2000; const int program_birth_year = 2000;
/* select an input stream for an output stream */ /* select an input stream for an output stream */
@@ -209,7 +209,7 @@ static int do_hex_dump = 0;
static int do_pkt_dump = 0; static int do_pkt_dump = 0;
static int do_psnr = 0; static int do_psnr = 0;
static int do_pass = 0; static int do_pass = 0;
static char *pass_logfilename_prefix = NULL; static const char *pass_logfilename_prefix;
static int audio_stream_copy = 0; static int audio_stream_copy = 0;
static int video_stream_copy = 0; static int video_stream_copy = 0;
static int subtitle_stream_copy = 0; static int subtitle_stream_copy = 0;
@@ -235,7 +235,7 @@ static int audio_volume = 256;
static int exit_on_error = 0; static int exit_on_error = 0;
static int using_stdin = 0; static int using_stdin = 0;
static int verbose = 1; static int verbose = 1;
static int daemon = 0; static int run_as_daemon = 0;
static int thread_count= 1; static int thread_count= 1;
static int q_pressed = 0; static int q_pressed = 0;
static int64_t video_size = 0; static int64_t video_size = 0;
@@ -289,10 +289,6 @@ typedef struct AVOutputStream {
float frame_aspect_ratio; float frame_aspect_ratio;
/* full frame size of first frame */
int original_height;
int original_width;
/* forced key frames */ /* forced key frames */
int64_t *forced_kf_pts; int64_t *forced_kf_pts;
int forced_kf_count; int forced_kf_count;
@@ -445,7 +441,7 @@ static void term_exit(void)
{ {
av_log(NULL, AV_LOG_QUIET, ""); av_log(NULL, AV_LOG_QUIET, "");
#if HAVE_TERMIOS_H #if HAVE_TERMIOS_H
if(!daemon) if(!run_as_daemon)
tcsetattr (0, TCSANOW, &oldtty); tcsetattr (0, TCSANOW, &oldtty);
#endif #endif
} }
@@ -463,7 +459,7 @@ sigterm_handler(int sig)
static void term_init(void) static void term_init(void)
{ {
#if HAVE_TERMIOS_H #if HAVE_TERMIOS_H
if(!daemon){ if(!run_as_daemon){
struct termios tty; struct termios tty;
tcgetattr (0, &tty); tcgetattr (0, &tty);
@@ -500,7 +496,7 @@ static int read_key(void)
struct timeval tv; struct timeval tv;
fd_set rfds; fd_set rfds;
if(daemon) if(run_as_daemon)
return -1; return -1;
FD_ZERO(&rfds); FD_ZERO(&rfds);
@@ -1145,8 +1141,8 @@ static void do_video_out(AVFormatContext *s,
AVFrame *in_picture, AVFrame *in_picture,
int *frame_size) int *frame_size)
{ {
int nb_frames, i, ret; int nb_frames, i, ret, resample_changed;
AVFrame *final_picture, *formatted_picture, *resampling_dst, *padding_src; AVFrame *final_picture, *formatted_picture, *resampling_dst;
AVCodecContext *enc, *dec; AVCodecContext *enc, *dec;
double sync_ipts; double sync_ipts;
@@ -1191,26 +1187,26 @@ static void do_video_out(AVFormatContext *s,
formatted_picture = in_picture; formatted_picture = in_picture;
final_picture = formatted_picture; final_picture = formatted_picture;
padding_src = formatted_picture;
resampling_dst = &ost->pict_tmp; resampling_dst = &ost->pict_tmp;
if ( ost->resample_height != ist->st->codec->height resample_changed = ost->resample_width != dec->width ||
|| ost->resample_width != ist->st->codec->width ost->resample_height != dec->height ||
|| (ost->resample_pix_fmt!= ist->st->codec->pix_fmt) ) { ost->resample_pix_fmt != dec->pix_fmt;
fprintf(stderr,"Input Stream #%d.%d frame size changed to %dx%d, %s\n", ist->file_index, ist->index, ist->st->codec->width, ist->st->codec->height,avcodec_get_pix_fmt_name(ist->st->codec->pix_fmt)); if (resample_changed) {
av_log(NULL, AV_LOG_INFO,
"Input stream #%d.%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
ist->file_index, ist->index,
ost->resample_width, ost->resample_height, avcodec_get_pix_fmt_name(ost->resample_pix_fmt),
dec->width , dec->height , avcodec_get_pix_fmt_name(dec->pix_fmt));
if(!ost->video_resample) if(!ost->video_resample)
ffmpeg_exit(1); ffmpeg_exit(1);
} }
#if !CONFIG_AVFILTER #if !CONFIG_AVFILTER
if (ost->video_resample) { if (ost->video_resample) {
padding_src = NULL;
final_picture = &ost->pict_tmp; final_picture = &ost->pict_tmp;
if( ost->resample_height != ist->st->codec->height if (resample_changed) {
|| ost->resample_width != ist->st->codec->width
|| (ost->resample_pix_fmt!= ist->st->codec->pix_fmt) ) {
/* initialize a new scaler context */ /* initialize a new scaler context */
sws_freeContext(ost->img_resample_ctx); sws_freeContext(ost->img_resample_ctx);
sws_flags = av_get_int(sws_opts, "sws_flags", NULL); sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
@@ -1495,7 +1491,7 @@ static int output_packet(AVInputStream *ist, int ist_index,
int ret, i; int ret, i;
int got_picture; int got_picture;
AVFrame picture; AVFrame picture;
void *buffer_to_free; void *buffer_to_free = NULL;
static unsigned int samples_size= 0; static unsigned int samples_size= 0;
AVSubtitle subtitle, *subtitle_to_free; AVSubtitle subtitle, *subtitle_to_free;
int64_t pkt_pts = AV_NOPTS_VALUE; int64_t pkt_pts = AV_NOPTS_VALUE;
@@ -1596,6 +1592,8 @@ static int output_packet(AVInputStream *ist, int ist_index,
ist->st->codec->time_base.den; ist->st->codec->time_base.den;
} }
avpkt.size = 0; avpkt.size = 0;
buffer_to_free = NULL;
pre_process_video_frame(ist, (AVPicture *)&picture, &buffer_to_free);
break; break;
case AVMEDIA_TYPE_SUBTITLE: case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(ist->st->codec, ret = avcodec_decode_subtitle2(ist->st->codec,
@@ -1630,12 +1628,6 @@ static int output_packet(AVInputStream *ist, int ist_index,
avpkt.size = 0; avpkt.size = 0;
} }
buffer_to_free = NULL;
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
pre_process_video_frame(ist, (AVPicture *)&picture,
&buffer_to_free);
}
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
if(ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ if(ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
for(i=0;i<nb_ostreams;i++) { for(i=0;i<nb_ostreams;i++) {
@@ -2288,9 +2280,9 @@ static int transcode(AVFormatContext **output_files,
fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n"); fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n");
ffmpeg_exit(1); ffmpeg_exit(1);
} }
ost->video_resample = (codec->width != icodec->width || ost->video_resample = codec->width != icodec->width ||
codec->height != icodec->height || codec->height != icodec->height ||
(codec->pix_fmt != icodec->pix_fmt)); codec->pix_fmt != icodec->pix_fmt;
if (ost->video_resample) { if (ost->video_resample) {
#if !CONFIG_AVFILTER #if !CONFIG_AVFILTER
avcodec_get_frame_defaults(&ost->pict_tmp); avcodec_get_frame_defaults(&ost->pict_tmp);
@@ -2312,9 +2304,6 @@ static int transcode(AVFormatContext **output_files,
fprintf(stderr, "Cannot get resampling context\n"); fprintf(stderr, "Cannot get resampling context\n");
ffmpeg_exit(1); ffmpeg_exit(1);
} }
ost->original_height = icodec->height;
ost->original_width = icodec->width;
#endif #endif
codec->bits_per_raw_sample= frame_bits_per_raw_sample; codec->bits_per_raw_sample= frame_bits_per_raw_sample;
} }
@@ -2340,7 +2329,7 @@ static int transcode(AVFormatContext **output_files,
break; break;
} }
/* two pass mode */ /* two pass mode */
if (ost->encoding_needed && if (ost->encoding_needed && codec->codec_id != CODEC_ID_H264 &&
(codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
char logfilename[1024]; char logfilename[1024];
FILE *f; FILE *f;
@@ -4277,6 +4266,12 @@ static void log_callback_null(void* ptr, int level, const char* fmt, va_list vl)
{ {
} }
static void opt_passlogfile(const char *arg)
{
pass_logfilename_prefix = arg;
opt_default("passlogfile", arg);
}
static const OptionDef options[] = { static const OptionDef options[] = {
/* main options */ /* main options */
#include "cmdutils_common_opts.h" #include "cmdutils_common_opts.h"
@@ -4350,7 +4345,7 @@ static const OptionDef options[] = {
{ "sameq", OPT_BOOL | OPT_VIDEO, {(void*)&same_quality}, { "sameq", OPT_BOOL | OPT_VIDEO, {(void*)&same_quality},
"use same quantizer as source (implies VBR)" }, "use same quantizer as source (implies VBR)" },
{ "pass", HAS_ARG | OPT_FUNC2 | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" }, { "pass", HAS_ARG | OPT_FUNC2 | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
{ "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" }, { "passlogfile", HAS_ARG | OPT_VIDEO, {(void*)&opt_passlogfile}, "select two pass log file name prefix", "prefix" },
{ "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace}, { "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace},
"deinterlace pictures" }, "deinterlace pictures" },
{ "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" }, { "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" },
@@ -4421,7 +4416,7 @@ int main(int argc, char **argv)
av_log_set_flags(AV_LOG_SKIP_REPEATED); av_log_set_flags(AV_LOG_SKIP_REPEATED);
if(argc>1 && !strcmp(argv[1], "-d")){ if(argc>1 && !strcmp(argv[1], "-d")){
daemon=1; run_as_daemon=1;
verbose=-1; verbose=-1;
av_log_set_callback(log_callback_null); av_log_set_callback(log_callback_null);
argc--; argc--;

View File

@@ -1,5 +1,5 @@
/* /*
* FFplay : Simple Media Player based on the FFmpeg libraries * ffplay : Simple Media Player based on the FFmpeg libraries
* Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2003 Fabrice Bellard
* *
* This file is part of FFmpeg. * This file is part of FFmpeg.
@@ -56,7 +56,7 @@
#include <unistd.h> #include <unistd.h>
#include <assert.h> #include <assert.h>
const char program_name[] = "FFplay"; const char program_name[] = "ffplay";
const int program_birth_year = 2003; const int program_birth_year = 2003;
//#define DEBUG //#define DEBUG
@@ -206,7 +206,6 @@ typedef struct VideoState {
struct SwsContext *img_convert_ctx; struct SwsContext *img_convert_ctx;
#endif #endif
// QETimer *video_timer;
char filename[1024]; char filename[1024];
int width, height, xleft, ytop; int width, height, xleft, ytop;
@@ -753,8 +752,8 @@ static void video_image_display(VideoState *is)
} }
rect.x = is->xleft + x; rect.x = is->xleft + x;
rect.y = is->ytop + y; rect.y = is->ytop + y;
rect.w = width; rect.w = FFMAX(width, 1);
rect.h = height; rect.h = FFMAX(height, 1);
SDL_DisplayYUVOverlay(vp->bmp, &rect); SDL_DisplayYUVOverlay(vp->bmp, &rect);
} else { } else {
#if 0 #if 0
@@ -1064,7 +1063,7 @@ static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_by
} }
/* pause or resume the video */ /* pause or resume the video */
static void stream_pause(VideoState *is) static void stream_toggle_pause(VideoState *is)
{ {
if (is->paused) { if (is->paused) {
is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts; is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
@@ -1804,14 +1803,13 @@ static int video_thread(void *arg)
{ {
VideoState *is = arg; VideoState *is = arg;
AVFrame *frame= avcodec_alloc_frame(); AVFrame *frame= avcodec_alloc_frame();
int64_t pts_int; int64_t pts_int, pos;
double pts; double pts;
int ret; int ret;
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
AVFilterGraph *graph = avfilter_graph_alloc(); AVFilterGraph *graph = avfilter_graph_alloc();
AVFilterContext *filt_out = NULL; AVFilterContext *filt_out = NULL;
int64_t pos;
if ((ret = configure_video_filters(graph, is, vfilters)) < 0) if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
goto the_end; goto the_end;
@@ -1845,6 +1843,7 @@ static int video_thread(void *arg)
} }
#else #else
ret = get_video_frame(is, frame, &pts_int, &pkt); ret = get_video_frame(is, frame, &pts_int, &pkt);
pos = pkt.pos;
#endif #endif
if (ret < 0) goto the_end; if (ret < 0) goto the_end;
@@ -1854,10 +1853,8 @@ static int video_thread(void *arg)
pts = pts_int*av_q2d(is->video_st->time_base); pts = pts_int*av_q2d(is->video_st->time_base);
#if CONFIG_AVFILTER
ret = output_picture(is, frame, pts, pos); ret = output_picture(is, frame, pts, pos);
#else #if !CONFIG_AVFILTER
ret = output_picture(is, frame, pts, pkt.pos);
av_free_packet(&pkt); av_free_packet(&pkt);
#endif #endif
if (ret < 0) if (ret < 0)
@@ -1865,7 +1862,7 @@ static int video_thread(void *arg)
if (step) if (step)
if (cur_stream) if (cur_stream)
stream_pause(cur_stream); stream_toggle_pause(cur_stream);
} }
the_end: the_end:
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
@@ -1944,7 +1941,7 @@ static int subtitle_thread(void *arg)
av_free_packet(pkt); av_free_packet(pkt);
// if (step) // if (step)
// if (cur_stream) // if (cur_stream)
// stream_pause(cur_stream); // stream_toggle_pause(cur_stream);
} }
the_end: the_end:
return 0; return 0;
@@ -2713,7 +2710,7 @@ static void toggle_full_screen(void)
static void toggle_pause(void) static void toggle_pause(void)
{ {
if (cur_stream) if (cur_stream)
stream_pause(cur_stream); stream_toggle_pause(cur_stream);
step = 0; step = 0;
} }
@@ -2722,7 +2719,7 @@ static void step_to_next_frame(void)
if (cur_stream) { if (cur_stream) {
/* if the stream is paused unpause it, then step */ /* if the stream is paused unpause it, then step */
if (cur_stream->paused) if (cur_stream->paused)
stream_pause(cur_stream); stream_toggle_pause(cur_stream);
} }
step = 1; step = 1;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* FFprobe : Simple Media Prober based on the FFmpeg libraries * ffprobe : Simple Media Prober based on the FFmpeg libraries
* Copyright (c) 2007-2010 Stefano Sabatini * Copyright (c) 2007-2010 Stefano Sabatini
* *
* This file is part of FFmpeg. * This file is part of FFmpeg.
@@ -28,7 +28,7 @@
#include "libavdevice/avdevice.h" #include "libavdevice/avdevice.h"
#include "cmdutils.h" #include "cmdutils.h"
const char program_name[] = "FFprobe"; const char program_name[] = "ffprobe";
const int program_birth_year = 2007; const int program_birth_year = 2007;
static int do_show_format = 0; static int do_show_format = 0;

View File

@@ -59,7 +59,7 @@
#include "cmdutils.h" #include "cmdutils.h"
const char program_name[] = "FFserver"; const char program_name[] = "ffserver";
const int program_birth_year = 2000; const int program_birth_year = 2000;
static const OptionDef options[]; static const OptionDef options[];

View File

@@ -141,7 +141,7 @@ OBJS-$(CONFIG_FFV1_DECODER) += ffv1.o rangecoder.o
OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o rangecoder.o OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o rangecoder.o
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o
OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
OBJS-$(CONFIG_FLASHSV_ENCODER) += flashsvenc.o OBJS-$(CONFIG_FLASHSV_ENCODER) += flashsvenc.o
@@ -393,9 +393,9 @@ OBJS-$(CONFIG_VCR1_ENCODER) += vcr1.o
OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdav.o OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdav.o
OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdav.o OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdav.o
OBJS-$(CONFIG_VMNC_DECODER) += vmnc.o OBJS-$(CONFIG_VMNC_DECODER) += vmnc.o
OBJS-$(CONFIG_VORBIS_DECODER) += vorbis_dec.o vorbis.o \ OBJS-$(CONFIG_VORBIS_DECODER) += vorbisdec.o vorbis.o \
vorbis_data.o xiph.o vorbis_data.o xiph.o
OBJS-$(CONFIG_VORBIS_ENCODER) += vorbis_enc.o vorbis.o \ OBJS-$(CONFIG_VORBIS_ENCODER) += vorbisenc.o vorbis.o \
vorbis_data.o vorbis_data.o
OBJS-$(CONFIG_VP3_DECODER) += vp3.o vp3dsp.o OBJS-$(CONFIG_VP3_DECODER) += vp3.o vp3dsp.o
OBJS-$(CONFIG_VP5_DECODER) += vp5.o vp56.o vp56data.o vp56dsp.o \ OBJS-$(CONFIG_VP5_DECODER) += vp5.o vp56.o vp56data.o vp56dsp.o \
@@ -554,6 +554,7 @@ OBJS-$(CONFIG_WEBM_MUXER) += xiph.o mpeg4audio.o \
mpegaudiodata.o mpegaudiodata.o
# external codec libraries # external codec libraries
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o
OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o

View File

@@ -31,6 +31,7 @@
***********************************/ ***********************************/
#include <float.h> #include <float.h>
#include <math.h>
#include "avcodec.h" #include "avcodec.h"
#include "put_bits.h" #include "put_bits.h"
#include "aac.h" #include "aac.h"

View File

@@ -180,9 +180,8 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
* @return Returns error status. 0 - OK, !0 - error * @return Returns error status. 0 - OK, !0 - error
*/ */
static av_cold int che_configure(AACContext *ac, static av_cold int che_configure(AACContext *ac,
enum ChannelPosition che_pos[4][MAX_ELEM_ID], enum ChannelPosition che_pos[4][MAX_ELEM_ID],
int type, int id, int type, int id, int *channels)
int *channels)
{ {
if (che_pos[type][id]) { if (che_pos[type][id]) {
if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement)))) if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
@@ -212,9 +211,9 @@ static av_cold int che_configure(AACContext *ac,
* @return Returns error status. 0 - OK, !0 - error * @return Returns error status. 0 - OK, !0 - error
*/ */
static av_cold int output_configure(AACContext *ac, static av_cold int output_configure(AACContext *ac,
enum ChannelPosition che_pos[4][MAX_ELEM_ID], enum ChannelPosition che_pos[4][MAX_ELEM_ID],
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID], enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
int channel_config, enum OCStatus oc_type) int channel_config, enum OCStatus oc_type)
{ {
AVCodecContext *avctx = ac->avctx; AVCodecContext *avctx = ac->avctx;
int i, type, channels = 0, ret; int i, type, channels = 0, ret;
@@ -231,7 +230,7 @@ static av_cold int output_configure(AACContext *ac,
return ret; return ret;
} }
memset(ac->tag_che_map, 0, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0])); memset(ac->tag_che_map, 0, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
avctx->channel_layout = aac_channel_layout[channel_config - 1]; avctx->channel_layout = aac_channel_layout[channel_config - 1];
} else { } else {
@@ -346,8 +345,8 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
* @return Returns error status. 0 - OK, !0 - error * @return Returns error status. 0 - OK, !0 - error
*/ */
static av_cold int set_default_channel_config(AVCodecContext *avctx, static av_cold int set_default_channel_config(AVCodecContext *avctx,
enum ChannelPosition new_che_pos[4][MAX_ELEM_ID], enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
int channel_config) int channel_config)
{ {
if (channel_config < 1 || channel_config > 7) { if (channel_config < 1 || channel_config > 7) {
av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n", av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
@@ -464,6 +463,11 @@ static int decode_audio_specific_config(AACContext *ac,
GetBitContext gb; GetBitContext gb;
int i; int i;
av_dlog(avctx, "extradata size %d\n", avctx->extradata_size);
for (i = 0; i < avctx->extradata_size; i++)
av_dlog(avctx, "%02x ", avctx->extradata[i]);
av_dlog(avctx, "\n");
init_get_bits(&gb, data, data_size * 8); init_get_bits(&gb, data, data_size * 8);
if ((i = ff_mpeg4audio_get_config(m4ac, data, data_size)) < 0) if ((i = ff_mpeg4audio_get_config(m4ac, data, data_size)) < 0)
@@ -490,6 +494,10 @@ static int decode_audio_specific_config(AACContext *ac,
return -1; return -1;
} }
av_dlog(avctx, "AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
m4ac->object_type, m4ac->chan_config, m4ac->sampling_index,
m4ac->sample_rate, m4ac->sbr, m4ac->ps);
return get_bits_count(&gb); return get_bits_count(&gb);
} }
@@ -1240,7 +1248,7 @@ static av_always_inline float flt16_trunc(float pf)
static av_always_inline void predict(PredictorState *ps, float *coef, static av_always_inline void predict(PredictorState *ps, float *coef,
float sf_scale, float inv_sf_scale, float sf_scale, float inv_sf_scale,
int output_enable) int output_enable)
{ {
const float a = 0.953125; // 61.0 / 64 const float a = 0.953125; // 61.0 / 64
const float alpha = 0.90625; // 29.0 / 32 const float alpha = 0.90625; // 29.0 / 32

View File

@@ -606,8 +606,10 @@ static int aac_encode_frame(AVCodecContext *avctx,
} }
frame_bits = put_bits_count(&s->pb); frame_bits = put_bits_count(&s->pb);
if (frame_bits <= 6144 * avctx->channels - 3) if (frame_bits <= 6144 * avctx->channels - 3) {
s->psy.bitres.bits = frame_bits / avctx->channels;
break; break;
}
s->lambda *= avctx->bit_rate * 1024.0f / avctx->sample_rate / frame_bits; s->lambda *= avctx->bit_rate * 1024.0f / avctx->sample_rate / frame_bits;

View File

@@ -30,7 +30,6 @@
/*********************************** /***********************************
* TODOs: * TODOs:
* thresholds linearization after their modifications for attaining given bitrate
* try other bitrate controlling mechanism (maybe use ratecontrol.c?) * try other bitrate controlling mechanism (maybe use ratecontrol.c?)
* control quality for quality-based output * control quality for quality-based output
**********************************/ **********************************/
@@ -41,10 +40,51 @@
*/ */
#define PSY_3GPP_THR_SPREAD_HI 1.5f // spreading factor for low-to-hi threshold spreading (15 dB/Bark) #define PSY_3GPP_THR_SPREAD_HI 1.5f // spreading factor for low-to-hi threshold spreading (15 dB/Bark)
#define PSY_3GPP_THR_SPREAD_LOW 3.0f // spreading factor for hi-to-low threshold spreading (30 dB/Bark) #define PSY_3GPP_THR_SPREAD_LOW 3.0f // spreading factor for hi-to-low threshold spreading (30 dB/Bark)
/* spreading factor for low-to-hi energy spreading, long block, > 22kbps/channel (20dB/Bark) */
#define PSY_3GPP_EN_SPREAD_HI_L1 2.0f
/* spreading factor for low-to-hi energy spreading, long block, <= 22kbps/channel (15dB/Bark) */
#define PSY_3GPP_EN_SPREAD_HI_L2 1.5f
/* spreading factor for low-to-hi energy spreading, short block (15 dB/Bark) */
#define PSY_3GPP_EN_SPREAD_HI_S 1.5f
/* spreading factor for hi-to-low energy spreading, long block (30dB/Bark) */
#define PSY_3GPP_EN_SPREAD_LOW_L 3.0f
/* spreading factor for hi-to-low energy spreading, short block (20dB/Bark) */
#define PSY_3GPP_EN_SPREAD_LOW_S 2.0f
#define PSY_3GPP_RPEMIN 0.01f #define PSY_3GPP_RPEMIN 0.01f
#define PSY_3GPP_RPELEV 2.0f #define PSY_3GPP_RPELEV 2.0f
#define PSY_3GPP_C1 3.0f /* log2(8) */
#define PSY_3GPP_C2 1.3219281f /* log2(2.5) */
#define PSY_3GPP_C3 0.55935729f /* 1 - C2 / C1 */
#define PSY_SNR_1DB 7.9432821e-1f /* -1dB */
#define PSY_SNR_25DB 3.1622776e-3f /* -25dB */
#define PSY_3GPP_SAVE_SLOPE_L -0.46666667f
#define PSY_3GPP_SAVE_SLOPE_S -0.36363637f
#define PSY_3GPP_SAVE_ADD_L -0.84285712f
#define PSY_3GPP_SAVE_ADD_S -0.75f
#define PSY_3GPP_SPEND_SLOPE_L 0.66666669f
#define PSY_3GPP_SPEND_SLOPE_S 0.81818181f
#define PSY_3GPP_SPEND_ADD_L -0.35f
#define PSY_3GPP_SPEND_ADD_S -0.26111111f
#define PSY_3GPP_CLIP_LO_L 0.2f
#define PSY_3GPP_CLIP_LO_S 0.2f
#define PSY_3GPP_CLIP_HI_L 0.95f
#define PSY_3GPP_CLIP_HI_S 0.75f
#define PSY_3GPP_AH_THR_LONG 0.5f
#define PSY_3GPP_AH_THR_SHORT 0.63f
enum {
PSY_3GPP_AH_NONE,
PSY_3GPP_AH_INACTIVE,
PSY_3GPP_AH_ACTIVE
};
#define PSY_3GPP_BITS_TO_PE(bits) ((bits) * 1.18f)
/* LAME psy model constants */ /* LAME psy model constants */
#define PSY_LAME_FIR_LEN 21 ///< LAME psy model FIR order #define PSY_LAME_FIR_LEN 21 ///< LAME psy model FIR order
#define AAC_BLOCK_SIZE_LONG 1024 ///< long block size #define AAC_BLOCK_SIZE_LONG 1024 ///< long block size
@@ -60,9 +100,15 @@
* information for single band used by 3GPP TS26.403-inspired psychoacoustic model * information for single band used by 3GPP TS26.403-inspired psychoacoustic model
*/ */
typedef struct AacPsyBand{ typedef struct AacPsyBand{
float energy; ///< band energy float energy; ///< band energy
float thr; ///< energy threshold float thr; ///< energy threshold
float thr_quiet; ///< threshold in quiet float thr_quiet; ///< threshold in quiet
float nz_lines; ///< number of non-zero spectral lines
float active_lines; ///< number of active spectral lines
float pe; ///< perceptual entropy
float pe_const; ///< constant part of the PE calculation
float norm_fac; ///< normalization factor for linearization
int avoid_holes; ///< hole avoidance flag
}AacPsyBand; }AacPsyBand;
/** /**
@@ -97,6 +143,15 @@ typedef struct AacPsyCoeffs{
* 3GPP TS26.403-inspired psychoacoustic model specific data * 3GPP TS26.403-inspired psychoacoustic model specific data
*/ */
typedef struct AacPsyContext{ typedef struct AacPsyContext{
int chan_bitrate; ///< bitrate per channel
int frame_bits; ///< average bits per frame
int fill_level; ///< bit reservoir fill level
struct {
float min; ///< minimum allowed PE for bit factor calculation
float max; ///< maximum allowed PE for bit factor calculation
float previous; ///< allowed PE of the previous frame
float correction; ///< PE correction factor
} pe;
AacPsyCoeffs psy_coef[2][64]; AacPsyCoeffs psy_coef[2][64];
AacPsyChannel *ch; AacPsyChannel *ch;
}AacPsyContext; }AacPsyContext;
@@ -235,16 +290,33 @@ static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
AacPsyContext *pctx; AacPsyContext *pctx;
float bark; float bark;
int i, j, g, start; int i, j, g, start;
float prev, minscale, minath; float prev, minscale, minath, minsnr, pe_min;
const int chan_bitrate = ctx->avctx->bit_rate / ctx->avctx->channels;
const int bandwidth = ctx->avctx->cutoff ? ctx->avctx->cutoff : ctx->avctx->sample_rate / 2;
const float num_bark = calc_bark((float)bandwidth);
ctx->model_priv_data = av_mallocz(sizeof(AacPsyContext)); ctx->model_priv_data = av_mallocz(sizeof(AacPsyContext));
pctx = (AacPsyContext*) ctx->model_priv_data; pctx = (AacPsyContext*) ctx->model_priv_data;
pctx->chan_bitrate = chan_bitrate;
pctx->frame_bits = chan_bitrate * AAC_BLOCK_SIZE_LONG / ctx->avctx->sample_rate;
pctx->pe.min = 8.0f * AAC_BLOCK_SIZE_LONG * bandwidth / (ctx->avctx->sample_rate * 2.0f);
pctx->pe.max = 12.0f * AAC_BLOCK_SIZE_LONG * bandwidth / (ctx->avctx->sample_rate * 2.0f);
ctx->bitres.size = 6144 - pctx->frame_bits;
ctx->bitres.size -= ctx->bitres.size % 8;
pctx->fill_level = ctx->bitres.size;
minath = ath(3410, ATH_ADD); minath = ath(3410, ATH_ADD);
for (j = 0; j < 2; j++) { for (j = 0; j < 2; j++) {
AacPsyCoeffs *coeffs = pctx->psy_coef[j]; AacPsyCoeffs *coeffs = pctx->psy_coef[j];
const uint8_t *band_sizes = ctx->bands[j]; const uint8_t *band_sizes = ctx->bands[j];
float line_to_frequency = ctx->avctx->sample_rate / (j ? 256.f : 2048.0f); float line_to_frequency = ctx->avctx->sample_rate / (j ? 256.f : 2048.0f);
float avg_chan_bits = chan_bitrate / ctx->avctx->sample_rate * (j ? 128.0f : 1024.0f);
/* reference encoder uses 2.4% here instead of 60% like the spec says */
float bark_pe = 0.024f * PSY_3GPP_BITS_TO_PE(avg_chan_bits) / num_bark;
float en_spread_low = j ? PSY_3GPP_EN_SPREAD_LOW_S : PSY_3GPP_EN_SPREAD_LOW_L;
/* High energy spreading for long blocks <= 22kbps/channel and short blocks are the same. */
float en_spread_hi = (j || (chan_bitrate <= 22.0f)) ? PSY_3GPP_EN_SPREAD_HI_S : PSY_3GPP_EN_SPREAD_HI_L1;
i = 0; i = 0;
prev = 0.0; prev = 0.0;
for (g = 0; g < ctx->num_bands[j]; g++) { for (g = 0; g < ctx->num_bands[j]; g++) {
@@ -258,6 +330,11 @@ static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
float bark_width = coeffs[g+1].barks - coeffs->barks; float bark_width = coeffs[g+1].barks - coeffs->barks;
coeff->spread_low[0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_LOW); coeff->spread_low[0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_LOW);
coeff->spread_hi [0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_HI); coeff->spread_hi [0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_HI);
coeff->spread_low[1] = pow(10.0, -bark_width * en_spread_low);
coeff->spread_hi [1] = pow(10.0, -bark_width * en_spread_hi);
pe_min = bark_pe * bark_width;
minsnr = pow(2.0f, pe_min / band_sizes[g]) - 1.5f;
coeff->min_snr = av_clipf(1.0f / minsnr, PSY_SNR_25DB, PSY_SNR_1DB);
} }
start = 0; start = 0;
for (g = 0; g < ctx->num_bands[j]; g++) { for (g = 0; g < ctx->num_bands[j]; g++) {
@@ -385,6 +462,97 @@ static FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx,
return wi; return wi;
} }
/* 5.6.1.2 "Calculation of Bit Demand" */
static int calc_bit_demand(AacPsyContext *ctx, float pe, int bits, int size,
int short_window)
{
const float bitsave_slope = short_window ? PSY_3GPP_SAVE_SLOPE_S : PSY_3GPP_SAVE_SLOPE_L;
const float bitsave_add = short_window ? PSY_3GPP_SAVE_ADD_S : PSY_3GPP_SAVE_ADD_L;
const float bitspend_slope = short_window ? PSY_3GPP_SPEND_SLOPE_S : PSY_3GPP_SPEND_SLOPE_L;
const float bitspend_add = short_window ? PSY_3GPP_SPEND_ADD_S : PSY_3GPP_SPEND_ADD_L;
const float clip_low = short_window ? PSY_3GPP_CLIP_LO_S : PSY_3GPP_CLIP_LO_L;
const float clip_high = short_window ? PSY_3GPP_CLIP_HI_S : PSY_3GPP_CLIP_HI_L;
float clipped_pe, bit_save, bit_spend, bit_factor, fill_level;
ctx->fill_level += ctx->frame_bits - bits;
ctx->fill_level = av_clip(ctx->fill_level, 0, size);
fill_level = av_clipf((float)ctx->fill_level / size, clip_low, clip_high);
clipped_pe = av_clipf(pe, ctx->pe.min, ctx->pe.max);
bit_save = (fill_level + bitsave_add) * bitsave_slope;
assert(bit_save <= 0.3f && bit_save >= -0.05000001f);
bit_spend = (fill_level + bitspend_add) * bitspend_slope;
assert(bit_spend <= 0.5f && bit_spend >= -0.1f);
/* The bit factor graph in the spec is obviously incorrect.
* bit_spend + ((bit_spend - bit_spend))...
* The reference encoder subtracts everything from 1, but also seems incorrect.
* 1 - bit_save + ((bit_spend + bit_save))...
* Hopefully below is correct.
*/
bit_factor = 1.0f - bit_save + ((bit_spend - bit_save) / (ctx->pe.max - ctx->pe.min)) * (clipped_pe - ctx->pe.min);
/* NOTE: The reference encoder attempts to center pe max/min around the current pe. */
ctx->pe.max = FFMAX(pe, ctx->pe.max);
ctx->pe.min = FFMIN(pe, ctx->pe.min);
return FFMIN(ctx->frame_bits * bit_factor, ctx->frame_bits + size - bits);
}
static float calc_pe_3gpp(AacPsyBand *band)
{
float pe, a;
band->pe = 0.0f;
band->pe_const = 0.0f;
band->active_lines = 0.0f;
if (band->energy > band->thr) {
a = log2f(band->energy);
pe = a - log2f(band->thr);
band->active_lines = band->nz_lines;
if (pe < PSY_3GPP_C1) {
pe = pe * PSY_3GPP_C3 + PSY_3GPP_C2;
a = a * PSY_3GPP_C3 + PSY_3GPP_C2;
band->active_lines *= PSY_3GPP_C3;
}
band->pe = pe * band->nz_lines;
band->pe_const = a * band->nz_lines;
}
return band->pe;
}
static float calc_reduction_3gpp(float a, float desired_pe, float pe,
float active_lines)
{
float thr_avg, reduction;
thr_avg = powf(2.0f, (a - pe) / (4.0f * active_lines));
reduction = powf(2.0f, (a - desired_pe) / (4.0f * active_lines)) - thr_avg;
return FFMAX(reduction, 0.0f);
}
static float calc_reduced_thr_3gpp(AacPsyBand *band, float min_snr,
float reduction)
{
float thr = band->thr;
if (band->energy > thr) {
thr = powf(thr, 0.25f) + reduction;
thr = powf(thr, 4.0f);
/* This deviates from the 3GPP spec to match the reference encoder.
* It performs min(thr_reduced, max(thr, energy/min_snr)) only for bands
* that have hole avoidance on (active or inactive). It always reduces the
* threshold of bands with hole avoidance off.
*/
if (thr > band->energy * min_snr && band->avoid_holes != PSY_3GPP_AH_NONE) {
thr = FFMAX(band->thr, band->energy * min_snr);
band->avoid_holes = PSY_3GPP_AH_ACTIVE;
}
}
return thr;
}
/** /**
* Calculate band thresholds as suggested in 3GPP TS26.403 * Calculate band thresholds as suggested in 3GPP TS26.403
*/ */
@@ -395,37 +563,167 @@ static void psy_3gpp_analyze(FFPsyContext *ctx, int channel,
AacPsyChannel *pch = &pctx->ch[channel]; AacPsyChannel *pch = &pctx->ch[channel];
int start = 0; int start = 0;
int i, w, g; int i, w, g;
const int num_bands = ctx->num_bands[wi->num_windows == 8]; float desired_bits, desired_pe, delta_pe, reduction, spread_en[128] = {0};
const uint8_t *band_sizes = ctx->bands[wi->num_windows == 8]; float a = 0.0f, active_lines = 0.0f, norm_fac = 0.0f;
AacPsyCoeffs *coeffs = pctx->psy_coef[wi->num_windows == 8]; float pe = pctx->chan_bitrate > 32000 ? 0.0f : FFMAX(50.0f, 100.0f - pctx->chan_bitrate * 100.0f / 32000.0f);
const int num_bands = ctx->num_bands[wi->num_windows == 8];
const uint8_t *band_sizes = ctx->bands[wi->num_windows == 8];
AacPsyCoeffs *coeffs = pctx->psy_coef[wi->num_windows == 8];
const float avoid_hole_thr = wi->num_windows == 8 ? PSY_3GPP_AH_THR_SHORT : PSY_3GPP_AH_THR_LONG;
//calculate energies, initial thresholds and related values - 5.4.2 "Threshold Calculation" //calculate energies, initial thresholds and related values - 5.4.2 "Threshold Calculation"
for (w = 0; w < wi->num_windows*16; w += 16) { for (w = 0; w < wi->num_windows*16; w += 16) {
for (g = 0; g < num_bands; g++) { for (g = 0; g < num_bands; g++) {
AacPsyBand *band = &pch->band[w+g]; AacPsyBand *band = &pch->band[w+g];
float form_factor = 0.0f;
band->energy = 0.0f; band->energy = 0.0f;
for (i = 0; i < band_sizes[g]; i++) for (i = 0; i < band_sizes[g]; i++) {
band->energy += coefs[start+i] * coefs[start+i]; band->energy += coefs[start+i] * coefs[start+i];
band->thr = band->energy * 0.001258925f; form_factor += sqrtf(fabs(coefs[start+i]));
start += band_sizes[g]; }
band->thr = band->energy * 0.001258925f;
band->nz_lines = form_factor / powf(band->energy / band_sizes[g], 0.25f);
start += band_sizes[g];
} }
} }
//modify thresholds and energies - spread, threshold in quiet, pre-echo control //modify thresholds and energies - spread, threshold in quiet, pre-echo control
for (w = 0; w < wi->num_windows*16; w += 16) { for (w = 0; w < wi->num_windows*16; w += 16) {
AacPsyBand *bands = &pch->band[w]; AacPsyBand *bands = &pch->band[w];
//5.4.2.3 "Spreading" & 5.4.3 "Spreaded Energy Calculation" //5.4.2.3 "Spreading" & 5.4.3 "Spreaded Energy Calculation"
for (g = 1; g < num_bands; g++) spread_en[0] = bands[0].energy;
bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]); for (g = 1; g < num_bands; g++) {
for (g = num_bands - 2; g >= 0; g--) bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]);
bands[g].thr = FFMAX(bands[g].thr, bands[g+1].thr * coeffs[g].spread_low[0]); spread_en[w+g] = FFMAX(bands[g].energy, spread_en[w+g-1] * coeffs[g].spread_hi[1]);
}
for (g = num_bands - 2; g >= 0; g--) {
bands[g].thr = FFMAX(bands[g].thr, bands[g+1].thr * coeffs[g].spread_low[0]);
spread_en[w+g] = FFMAX(spread_en[w+g], spread_en[w+g+1] * coeffs[g].spread_low[1]);
}
//5.4.2.4 "Threshold in quiet" //5.4.2.4 "Threshold in quiet"
for (g = 0; g < num_bands; g++) { for (g = 0; g < num_bands; g++) {
AacPsyBand *band = &bands[g]; AacPsyBand *band = &bands[g];
band->thr_quiet = band->thr = FFMAX(band->thr, coeffs[g].ath); band->thr_quiet = band->thr = FFMAX(band->thr, coeffs[g].ath);
//5.4.2.5 "Pre-echo control" //5.4.2.5 "Pre-echo control"
if (!(wi->window_type[0] == LONG_STOP_SEQUENCE || (wi->window_type[1] == LONG_START_SEQUENCE && !w))) if (!(wi->window_type[0] == LONG_STOP_SEQUENCE || (wi->window_type[1] == LONG_START_SEQUENCE && !w)))
band->thr = FFMAX(PSY_3GPP_RPEMIN*band->thr, FFMIN(band->thr, band->thr = FFMAX(PSY_3GPP_RPEMIN*band->thr, FFMIN(band->thr,
PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet)); PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet));
/* 5.6.1.3.1 "Prepatory steps of the perceptual entropy calculation" */
pe += calc_pe_3gpp(band);
a += band->pe_const;
active_lines += band->active_lines;
/* 5.6.1.3.3 "Selection of the bands for avoidance of holes" */
if (spread_en[w+g] * avoid_hole_thr > band->energy || coeffs[g].min_snr > 1.0f)
band->avoid_holes = PSY_3GPP_AH_NONE;
else
band->avoid_holes = PSY_3GPP_AH_INACTIVE;
}
}
/* 5.6.1.3.2 "Calculation of the desired perceptual entropy" */
ctx->pe[channel] = pe;
desired_bits = calc_bit_demand(pctx, pe, ctx->bitres.bits, ctx->bitres.size, wi->num_windows == 8);
desired_pe = PSY_3GPP_BITS_TO_PE(desired_bits);
/* NOTE: PE correction is kept simple. During initial testing it had very
* little effect on the final bitrate. Probably a good idea to come
* back and do more testing later.
*/
if (ctx->bitres.bits > 0)
desired_pe *= av_clipf(pctx->pe.previous / PSY_3GPP_BITS_TO_PE(ctx->bitres.bits),
0.85f, 1.15f);
pctx->pe.previous = PSY_3GPP_BITS_TO_PE(desired_bits);
if (desired_pe < pe) {
/* 5.6.1.3.4 "First Estimation of the reduction value" */
for (w = 0; w < wi->num_windows*16; w += 16) {
reduction = calc_reduction_3gpp(a, desired_pe, pe, active_lines);
pe = 0.0f;
a = 0.0f;
active_lines = 0.0f;
for (g = 0; g < num_bands; g++) {
AacPsyBand *band = &pch->band[w+g];
band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction);
/* recalculate PE */
pe += calc_pe_3gpp(band);
a += band->pe_const;
active_lines += band->active_lines;
}
}
/* 5.6.1.3.5 "Second Estimation of the reduction value" */
for (i = 0; i < 2; i++) {
float pe_no_ah = 0.0f, desired_pe_no_ah;
active_lines = a = 0.0f;
for (w = 0; w < wi->num_windows*16; w += 16) {
for (g = 0; g < num_bands; g++) {
AacPsyBand *band = &pch->band[w+g];
if (band->avoid_holes != PSY_3GPP_AH_ACTIVE) {
pe_no_ah += band->pe;
a += band->pe_const;
active_lines += band->active_lines;
}
}
}
desired_pe_no_ah = FFMAX(desired_pe - (pe - pe_no_ah), 0.0f);
if (active_lines > 0.0f)
reduction += calc_reduction_3gpp(a, desired_pe_no_ah, pe_no_ah, active_lines);
pe = 0.0f;
for (w = 0; w < wi->num_windows*16; w += 16) {
for (g = 0; g < num_bands; g++) {
AacPsyBand *band = &pch->band[w+g];
if (active_lines > 0.0f)
band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction);
pe += calc_pe_3gpp(band);
band->norm_fac = band->active_lines / band->thr;
norm_fac += band->norm_fac;
}
}
delta_pe = desired_pe - pe;
if (fabs(delta_pe) > 0.05f * desired_pe)
break;
}
if (pe < 1.15f * desired_pe) {
/* 6.6.1.3.6 "Final threshold modification by linearization" */
norm_fac = 1.0f / norm_fac;
for (w = 0; w < wi->num_windows*16; w += 16) {
for (g = 0; g < num_bands; g++) {
AacPsyBand *band = &pch->band[w+g];
if (band->active_lines > 0.5f) {
float delta_sfb_pe = band->norm_fac * norm_fac * delta_pe;
float thr = band->thr;
thr *= powf(2.0f, delta_sfb_pe / band->active_lines);
if (thr > coeffs[g].min_snr * band->energy && band->avoid_holes == PSY_3GPP_AH_INACTIVE)
thr = FFMAX(band->thr, coeffs[g].min_snr * band->energy);
band->thr = thr;
}
}
}
} else {
/* 5.6.1.3.7 "Further perceptual entropy reduction" */
g = num_bands;
while (pe > desired_pe && g--) {
for (w = 0; w < wi->num_windows*16; w+= 16) {
AacPsyBand *band = &pch->band[w+g];
if (band->avoid_holes != PSY_3GPP_AH_NONE && coeffs[g].min_snr < PSY_SNR_1DB) {
coeffs[g].min_snr = PSY_SNR_1DB;
band->thr = band->energy * PSY_SNR_1DB;
pe += band->active_lines * 1.5f - band->pe;
}
}
}
/* TODO: allow more holes (unused without mid/side) */
} }
} }

View File

@@ -35,6 +35,7 @@
#include <stdint.h> #include <stdint.h>
#include <float.h> #include <float.h>
#include <math.h>
#define ENVELOPE_ADJUSTMENT_OFFSET 2 #define ENVELOPE_ADJUSTMENT_OFFSET 2
#define NOISE_FLOOR_OFFSET 6.0f #define NOISE_FLOOR_OFFSET 6.0f

View File

@@ -750,6 +750,7 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
case CODEC_ID_ADPCM_EA_R1: case CODEC_ID_ADPCM_EA_R1:
case CODEC_ID_ADPCM_EA_R2: case CODEC_ID_ADPCM_EA_R2:
case CODEC_ID_ADPCM_EA_R3: case CODEC_ID_ADPCM_EA_R3:
case CODEC_ID_ADPCM_EA_XAS:
max_channels = 6; max_channels = 6;
break; break;
} }

View File

@@ -365,6 +365,7 @@ void avcodec_register_all(void)
REGISTER_ENCDEC (XSUB, xsub); REGISTER_ENCDEC (XSUB, xsub);
/* external libraries */ /* external libraries */
REGISTER_DECODER (LIBCELT, libcelt);
REGISTER_ENCDEC (LIBDIRAC, libdirac); REGISTER_ENCDEC (LIBDIRAC, libdirac);
REGISTER_ENCODER (LIBFAAC, libfaac); REGISTER_ENCODER (LIBFAAC, libfaac);
REGISTER_ENCDEC (LIBGSM, libgsm); REGISTER_ENCDEC (LIBGSM, libgsm);

View File

@@ -122,7 +122,7 @@ endfunc
function fft_pass_neon function fft_pass_neon
push {r4,lr} push {r4,lr}
movrel lr, coefs + 24 movrel lr, coefs+24
vld1.16 {d30}, [lr,:64] vld1.16 {d30}, [lr,:64]
lsl r12, r2, #3 lsl r12, r2, #3
vmov d31, d30 vmov d31, d30

View File

@@ -186,7 +186,7 @@ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){
obuf[i] = c ^ buf[i]; obuf[i] = c ^ buf[i];
if (off) if (off)
av_log(NULL,AV_LOG_DEBUG,"Offset of %d not handled, post sample on ffmpeg-dev.\n",off); av_log_ask_for_sample(NULL, "Offset of %d not handled.\n", off);
return off; return off;
} }

View File

@@ -342,6 +342,7 @@ enum CodecID {
CODEC_ID_BINKAUDIO_DCT, CODEC_ID_BINKAUDIO_DCT,
CODEC_ID_AAC_LATM, CODEC_ID_AAC_LATM,
CODEC_ID_QDMC, CODEC_ID_QDMC,
CODEC_ID_CELT,
/* subtitle codecs */ /* subtitle codecs */
CODEC_ID_DVD_SUBTITLE= 0x17000, CODEC_ID_DVD_SUBTITLE= 0x17000,
@@ -700,6 +701,10 @@ typedef struct RcOverride{
* Codec supports frame-level multithreading. * Codec supports frame-level multithreading.
*/ */
#define CODEC_CAP_FRAME_THREADS 0x1000 #define CODEC_CAP_FRAME_THREADS 0x1000
/**
* Codec supports slice-based (or partition-based) multithreading.
*/
#define CODEC_CAP_SLICE_THREADS 0x2000
//The following defines may change, don't expect compatibility if you use them. //The following defines may change, don't expect compatibility if you use them.
#define MB_TYPE_INTRA4x4 0x0001 #define MB_TYPE_INTRA4x4 0x0001
@@ -3702,7 +3707,7 @@ int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h);
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt); enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
#if LIBAVCODEC_VERSION_MAJOR < 53 #if FF_API_THREAD_INIT
/** /**
* @deprecated Set s->thread_count before calling avcodec_open() instead of calling this. * @deprecated Set s->thread_count before calling avcodec_open() instead of calling this.
*/ */
@@ -4319,7 +4324,7 @@ void av_log_missing_feature(void *avc, const char *feature, int want_sample);
* a pointer to an AVClass struct * a pointer to an AVClass struct
* @param[in] msg string containing an optional message, or NULL if no message * @param[in] msg string containing an optional message, or NULL if no message
*/ */
void av_log_ask_for_sample(void *avc, const char *msg); void av_log_ask_for_sample(void *avc, const char *msg, ...);
/** /**
* Register the hardware accelerator hwaccel. * Register the hardware accelerator hwaccel.

View File

@@ -1136,7 +1136,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
switch (q->subpacket[s].cookversion) { switch (q->subpacket[s].cookversion) {
case MONO: case MONO:
if (q->nb_channels != 1) { if (q->nb_channels != 1) {
av_log(avctx,AV_LOG_ERROR,"Container channels != 1, report sample!\n"); av_log_ask_for_sample(avctx, "Container channels != 1.\n");
return -1; return -1;
} }
av_log(avctx,AV_LOG_DEBUG,"MONO\n"); av_log(avctx,AV_LOG_DEBUG,"MONO\n");
@@ -1150,7 +1150,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
break; break;
case JOINT_STEREO: case JOINT_STEREO:
if (q->nb_channels != 2) { if (q->nb_channels != 2) {
av_log(avctx,AV_LOG_ERROR,"Container channels != 2, report sample!\n"); av_log_ask_for_sample(avctx, "Container channels != 2.\n");
return -1; return -1;
} }
av_log(avctx,AV_LOG_DEBUG,"JOINT_STEREO\n"); av_log(avctx,AV_LOG_DEBUG,"JOINT_STEREO\n");
@@ -1188,7 +1188,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
break; break;
default: default:
av_log(avctx,AV_LOG_ERROR,"Unknown Cook version, report sample!\n"); av_log_ask_for_sample(avctx, "Unknown Cook version.\n");
return -1; return -1;
break; break;
} }
@@ -1205,7 +1205,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
/* Try to catch some obviously faulty streams, othervise it might be exploitable */ /* Try to catch some obviously faulty streams, othervise it might be exploitable */
if (q->subpacket[s].total_subbands > 53) { if (q->subpacket[s].total_subbands > 53) {
av_log(avctx,AV_LOG_ERROR,"total_subbands > 53, report sample!\n"); av_log_ask_for_sample(avctx, "total_subbands > 53\n");
return -1; return -1;
} }
@@ -1215,7 +1215,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
} }
if (q->subpacket[s].subbands > 50) { if (q->subpacket[s].subbands > 50) {
av_log(avctx,AV_LOG_ERROR,"subbands > 50, report sample!\n"); av_log_ask_for_sample(avctx, "subbands > 50\n");
return -1; return -1;
} }
q->subpacket[s].gains1.now = q->subpacket[s].gain_1; q->subpacket[s].gains1.now = q->subpacket[s].gain_1;
@@ -1226,7 +1226,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
q->num_subpackets++; q->num_subpackets++;
s++; s++;
if (s > MAX_SUBPACKETS) { if (s > MAX_SUBPACKETS) {
av_log(avctx,AV_LOG_ERROR,"Too many subpackets > 5, report file!\n"); av_log_ask_for_sample(avctx, "Too many subpackets > 5\n");
return -1; return -1;
} }
} }
@@ -1268,7 +1268,9 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
/* Try to catch some obviously faulty streams, othervise it might be exploitable */ /* Try to catch some obviously faulty streams, othervise it might be exploitable */
if ((q->samples_per_channel == 256) || (q->samples_per_channel == 512) || (q->samples_per_channel == 1024)) { if ((q->samples_per_channel == 256) || (q->samples_per_channel == 512) || (q->samples_per_channel == 1024)) {
} else { } else {
av_log(avctx,AV_LOG_ERROR,"unknown amount of samples_per_channel = %d, report sample!\n",q->samples_per_channel); av_log_ask_for_sample(avctx,
"unknown amount of samples_per_channel = %d\n",
q->samples_per_channel);
return -1; return -1;
} }

View File

@@ -87,6 +87,7 @@
#include "h264.h" #include "h264.h"
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
/** Timeout parameter passed to DtsProcOutput() in us */ /** Timeout parameter passed to DtsProcOutput() in us */
#define OUTPUT_PROC_TIMEOUT 50 #define OUTPUT_PROC_TIMEOUT 50
@@ -118,6 +119,7 @@ typedef struct OpaqueList {
} OpaqueList; } OpaqueList;
typedef struct { typedef struct {
AVClass *av_class;
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame pic; AVFrame pic;
HANDLE dev; HANDLE dev;
@@ -137,8 +139,20 @@ typedef struct {
OpaqueList *head; OpaqueList *head;
OpaqueList *tail; OpaqueList *tail;
/* Options */
uint32_t sWidth;
} CHDContext; } CHDContext;
static const AVOption options[] = {
{ "crystalhd_downscale_width",
"Turn on downscaling to the specified width",
offsetof(CHDContext, sWidth),
FF_OPT_TYPE_INT, 0, 0, UINT32_MAX,
AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, },
{ NULL, },
};
/***************************************************************************** /*****************************************************************************
* Helper functions * Helper functions
@@ -434,6 +448,11 @@ static av_cold int init(AVCodecContext *avctx)
} }
format.mSubtype = subtype; format.mSubtype = subtype;
if (priv->sWidth) {
format.bEnableScaling = 1;
format.ScalingParams.sWidth = priv->sWidth;
}
/* Get a decoder instance */ /* Get a decoder instance */
av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n"); av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
// Initialize the Link and Decoder devices // Initialize the Link and Decoder devices
@@ -948,6 +967,13 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *a
#if CONFIG_H264_CRYSTALHD_DECODER #if CONFIG_H264_CRYSTALHD_DECODER
static AVClass h264_class = {
"h264_crystalhd",
av_default_item_name,
options,
LIBAVUTIL_VERSION_INT,
};
AVCodec ff_h264_crystalhd_decoder = { AVCodec ff_h264_crystalhd_decoder = {
.name = "h264_crystalhd", .name = "h264_crystalhd",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
@@ -960,10 +986,18 @@ AVCodec ff_h264_crystalhd_decoder = {
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (CrystalHD acceleration)"), .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
.priv_class = &h264_class,
}; };
#endif #endif
#if CONFIG_MPEG2_CRYSTALHD_DECODER #if CONFIG_MPEG2_CRYSTALHD_DECODER
static AVClass mpeg2_class = {
"mpeg2_crystalhd",
av_default_item_name,
options,
LIBAVUTIL_VERSION_INT,
};
AVCodec ff_mpeg2_crystalhd_decoder = { AVCodec ff_mpeg2_crystalhd_decoder = {
.name = "mpeg2_crystalhd", .name = "mpeg2_crystalhd",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
@@ -976,10 +1010,18 @@ AVCodec ff_mpeg2_crystalhd_decoder = {
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 Video (CrystalHD acceleration)"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 Video (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
.priv_class = &mpeg2_class,
}; };
#endif #endif
#if CONFIG_MPEG4_CRYSTALHD_DECODER #if CONFIG_MPEG4_CRYSTALHD_DECODER
static AVClass mpeg4_class = {
"mpeg4_crystalhd",
av_default_item_name,
options,
LIBAVUTIL_VERSION_INT,
};
AVCodec ff_mpeg4_crystalhd_decoder = { AVCodec ff_mpeg4_crystalhd_decoder = {
.name = "mpeg4_crystalhd", .name = "mpeg4_crystalhd",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
@@ -992,10 +1034,18 @@ AVCodec ff_mpeg4_crystalhd_decoder = {
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 (CrystalHD acceleration)"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
.priv_class = &mpeg4_class,
}; };
#endif #endif
#if CONFIG_MSMPEG4_CRYSTALHD_DECODER #if CONFIG_MSMPEG4_CRYSTALHD_DECODER
static AVClass msmpeg4_class = {
"msmpeg4_crystalhd",
av_default_item_name,
options,
LIBAVUTIL_VERSION_INT,
};
AVCodec ff_msmpeg4_crystalhd_decoder = { AVCodec ff_msmpeg4_crystalhd_decoder = {
.name = "msmpeg4_crystalhd", .name = "msmpeg4_crystalhd",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
@@ -1008,10 +1058,18 @@ AVCodec ff_msmpeg4_crystalhd_decoder = {
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 Microsoft variant version 3 (CrystalHD acceleration)"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 Microsoft variant version 3 (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
.priv_class = &msmpeg4_class,
}; };
#endif #endif
#if CONFIG_VC1_CRYSTALHD_DECODER #if CONFIG_VC1_CRYSTALHD_DECODER
static AVClass vc1_class = {
"vc1_crystalhd",
av_default_item_name,
options,
LIBAVUTIL_VERSION_INT,
};
AVCodec ff_vc1_crystalhd_decoder = { AVCodec ff_vc1_crystalhd_decoder = {
.name = "vc1_crystalhd", .name = "vc1_crystalhd",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
@@ -1024,10 +1082,18 @@ AVCodec ff_vc1_crystalhd_decoder = {
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 (CrystalHD acceleration)"), .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
.priv_class = &vc1_class,
}; };
#endif #endif
#if CONFIG_WMV3_CRYSTALHD_DECODER #if CONFIG_WMV3_CRYSTALHD_DECODER
static AVClass wmv3_class = {
"wmv3_crystalhd",
av_default_item_name,
options,
LIBAVUTIL_VERSION_INT,
};
AVCodec ff_wmv3_crystalhd_decoder = { AVCodec ff_wmv3_crystalhd_decoder = {
.name = "wmv3_crystalhd", .name = "wmv3_crystalhd",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
@@ -1040,5 +1106,6 @@ AVCodec ff_wmv3_crystalhd_decoder = {
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 (CrystalHD acceleration)"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 (CrystalHD acceleration)"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUYV422, PIX_FMT_NONE},
.priv_class = &wmv3_class,
}; };
#endif #endif

View File

@@ -869,6 +869,7 @@ AVCodec ff_dnxhd_encoder = {
dnxhd_encode_init, dnxhd_encode_init,
dnxhd_encode_picture, dnxhd_encode_picture,
dnxhd_encode_end, dnxhd_encode_end,
.capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"), .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
.priv_class = &class, .priv_class = &class,

View File

@@ -1297,6 +1297,7 @@ AVCodec ff_dvvideo_encoder = {
sizeof(DVVideoContext), sizeof(DVVideoContext),
dvvideo_init_encoder, dvvideo_init_encoder,
dvvideo_encode_frame, dvvideo_encode_frame,
.capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_YUV411P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_YUV411P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
}; };
@@ -1312,7 +1313,7 @@ AVCodec ff_dvvideo_decoder = {
NULL, NULL,
dvvideo_close, dvvideo_close,
dvvideo_decode_frame, dvvideo_decode_frame,
CODEC_CAP_DR1, CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
NULL, NULL,
.max_lowres = 3, .max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),

View File

@@ -1795,7 +1795,7 @@ AVCodec ff_ffv1_decoder = {
NULL, NULL,
common_end, common_end,
decode_frame, decode_frame,
CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/, CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS,
NULL, NULL,
.long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
}; };
@@ -1809,6 +1809,7 @@ AVCodec ff_ffv1_encoder = {
encode_init, encode_init,
encode_frame, encode_frame,
common_end, common_end,
.capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
}; };

View File

@@ -22,6 +22,7 @@
#include "libavutil/crc.h" #include "libavutil/crc.h"
#include "flac.h" #include "flac.h"
#include "flacdata.h" #include "flacdata.h"
#include "vorbis.h"
static const int8_t sample_size_table[] = { 0, 8, 12, 0, 16, 20, 24, 0 }; static const int8_t sample_size_table[] = { 0, 8, 12, 0, 16, 20, 24, 0 };
@@ -54,6 +55,8 @@ int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
fi->ch_mode = get_bits(gb, 4); fi->ch_mode = get_bits(gb, 4);
if (fi->ch_mode < FLAC_MAX_CHANNELS) { if (fi->ch_mode < FLAC_MAX_CHANNELS) {
fi->channels = fi->ch_mode + 1; fi->channels = fi->ch_mode + 1;
if (fi->ch_mode <= 5)
avctx->channel_layout = ff_vorbis_channel_layouts[fi->ch_mode];
fi->ch_mode = FLAC_CHMODE_INDEPENDENT; fi->ch_mode = FLAC_CHMODE_INDEPENDENT;
} else if (fi->ch_mode <= FLAC_CHMODE_MID_SIDE) { } else if (fi->ch_mode <= FLAC_CHMODE_MID_SIDE) {
fi->channels = 2; fi->channels = 2;

View File

@@ -61,9 +61,9 @@
#define CHECK_PIXEL_PTR(n) \ #define CHECK_PIXEL_PTR(n) \
if (pixel_ptr + n > pixel_limit) { \ if (pixel_ptr + n > pixel_limit) { \
av_log (s->avctx, AV_LOG_INFO, "Problem: pixel_ptr >= pixel_limit (%d >= %d)\n", \ av_log (s->avctx, AV_LOG_ERROR, "Invalid pixel_ptr = %d > pixel_limit = %d\n", \
pixel_ptr + n, pixel_limit); \ pixel_ptr + n, pixel_limit); \
return -1; \ return AVERROR_INVALIDDATA; \
} \ } \
typedef struct FlicDecodeContext { typedef struct FlicDecodeContext {
@@ -181,6 +181,11 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
/* iterate through the chunks */ /* iterate through the chunks */
while ((frame_size > 0) && (num_chunks > 0)) { while ((frame_size > 0) && (num_chunks > 0)) {
chunk_size = AV_RL32(&buf[stream_ptr]); chunk_size = AV_RL32(&buf[stream_ptr]);
if (chunk_size > frame_size) {
av_log(avctx, AV_LOG_WARNING,
"Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
chunk_size = frame_size;
}
stream_ptr += 4; stream_ptr += 4;
chunk_type = AV_RL16(&buf[stream_ptr]); chunk_type = AV_RL16(&buf[stream_ptr]);
stream_ptr += 2; stream_ptr += 2;

View File

@@ -46,6 +46,7 @@ typedef struct FrapsContext{
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame frame;
uint8_t *tmpbuf; uint8_t *tmpbuf;
int tmpbuf_size;
DSPContext dsp; DSPContext dsp;
} FrapsContext; } FrapsContext;
@@ -272,7 +273,9 @@ static int decode_frame(AVCodecContext *avctx,
offs[planes] = buf_size; offs[planes] = buf_size;
for(i = 0; i < planes; i++){ for(i = 0; i < planes; i++){
is_chroma = !!i; is_chroma = !!i;
s->tmpbuf = av_realloc(s->tmpbuf, offs[i + 1] - offs[i] - 1024 + FF_INPUT_BUFFER_PADDING_SIZE); av_fast_malloc(&s->tmpbuf, &s->tmpbuf_size, offs[i + 1] - offs[i] - 1024 + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->tmpbuf)
return AVERROR(ENOMEM);
if(fraps2_decode_plane(s, f->data[i], f->linesize[i], avctx->width >> is_chroma, if(fraps2_decode_plane(s, f->data[i], f->linesize[i], avctx->width >> is_chroma,
avctx->height >> is_chroma, buf + offs[i], offs[i + 1] - offs[i], is_chroma, 1) < 0) { avctx->height >> is_chroma, buf + offs[i], offs[i + 1] - offs[i], is_chroma, 1) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i); av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);
@@ -314,7 +317,9 @@ static int decode_frame(AVCodecContext *avctx,
} }
offs[planes] = buf_size; offs[planes] = buf_size;
for(i = 0; i < planes; i++){ for(i = 0; i < planes; i++){
s->tmpbuf = av_realloc(s->tmpbuf, offs[i + 1] - offs[i] - 1024 + FF_INPUT_BUFFER_PADDING_SIZE); av_fast_malloc(&s->tmpbuf, &s->tmpbuf_size, offs[i + 1] - offs[i] - 1024 + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->tmpbuf)
return AVERROR(ENOMEM);
if(fraps2_decode_plane(s, f->data[0] + i + (f->linesize[0] * (avctx->height - 1)), -f->linesize[0], if(fraps2_decode_plane(s, f->data[0] + i + (f->linesize[0] * (avctx->height - 1)), -f->linesize[0],
avctx->width, avctx->height, buf + offs[i], offs[i + 1] - offs[i], 0, 3) < 0) { avctx->width, avctx->height, buf + offs[i], offs[i + 1] - offs[i], 0, 3) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i); av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);

View File

@@ -70,11 +70,15 @@ static int h261_parse(AVCodecParserContext *s,
ParseContext *pc = s->priv_data; ParseContext *pc = s->priv_data;
int next; int next;
next= h261_find_frame_end(pc,avctx, buf, buf_size); if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) { next = buf_size;
*poutbuf = NULL; } else {
*poutbuf_size = 0; next= h261_find_frame_end(pc,avctx, buf, buf_size);
return buf_size; if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
*poutbuf = NULL;
*poutbuf_size = 0;
return buf_size;
}
} }
*poutbuf = buf; *poutbuf = buf;
*poutbuf_size = buf_size; *poutbuf_size = buf_size;

View File

@@ -70,12 +70,16 @@ static int h263_parse(AVCodecParserContext *s,
ParseContext *pc = s->priv_data; ParseContext *pc = s->priv_data;
int next; int next;
next= ff_h263_find_frame_end(pc, buf, buf_size); if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
next = buf_size;
} else {
next= ff_h263_find_frame_end(pc, buf, buf_size);
if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) { if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
*poutbuf = NULL; *poutbuf = NULL;
*poutbuf_size = 0; *poutbuf_size = 0;
return buf_size; return buf_size;
}
} }
*poutbuf = buf; *poutbuf = buf;

View File

@@ -3467,7 +3467,9 @@ AVCodec ff_h264_decoder = {
NULL, NULL,
ff_h264_decode_end, ff_h264_decode_end,
decode_frame, decode_frame,
/*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_FRAME_THREADS, /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_DELAY |
CODEC_CAP_FRAME_THREADS |
CODEC_CAP_SLICE_THREADS,
.flush= flush_dpb, .flush= flush_dpb,
.long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"), .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),

View File

@@ -30,10 +30,72 @@
#include "avcodec.h" #include "avcodec.h"
#include "get_bits.h" #include "get_bits.h"
// TODO: masking bits
typedef enum {
MASK_NONE,
MASK_HAS_MASK,
MASK_HAS_TRANSPARENT_COLOR,
MASK_LASSO
} mask_type;
/**
* Gets the actual extra data after video preperties which contains
* the raw CMAP palette data beyond the IFF extra context.
*
* @param avctx the AVCodecContext where to extract raw palette data from
* @return pointer to raw CMAP palette data
*/
static av_always_inline uint8_t *get_palette_data(const AVCodecContext *const avctx) {
return avctx->extradata + AV_RB16(avctx->extradata);
}
/**
* Gets the size of CMAP palette data beyond the IFF extra context.
* Please note that any value < 2 of IFF extra context or
* raw extradata < 0 is considered as illegal extradata.
*
* @param avctx the AVCodecContext where to extract palette data size from
* @return size of raw palette data in bytes
*/
static av_always_inline int get_palette_size(const AVCodecContext *const avctx) {
return avctx->extradata_size - AV_RB16(avctx->extradata);
}
/**
* Gets the actual raw image data after video properties which
* contains the raw image data beyond the IFF extra context.
*
* @param avpkt the AVPacket where to extract raw image data from
* @return pointer to raw image data
*/
static av_always_inline uint8_t *get_image_data(const AVPacket *const avpkt) {
return avpkt->data + AV_RB16(avpkt->data);
}
/**
* Gets the size of raw image data beyond the IFF extra context.
* Please note that any value < 2 of either IFF extra context
* or raw image data is considered as an illegal packet.
*
* @param avpkt the AVPacket where to extract image data size from
* @return size of raw image data in bytes
*/
static av_always_inline int get_image_size(const AVPacket *const avpkt) {
return avpkt->size - AV_RB16(avpkt->data);
}
typedef struct { typedef struct {
AVFrame frame; AVFrame frame;
int planesize; int planesize;
uint8_t * planebuf; uint8_t * planebuf;
uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation
uint32_t *ham_palbuf; ///< HAM decode table
unsigned compression; ///< delta compression method used
unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
unsigned transparency; ///< TODO: transparency color index in palette
unsigned masking; ///< TODO: masking method used
int init; // 1 if buffer and palette data already initialized, 0 otherwise int init; // 1 if buffer and palette data already initialized, 0 otherwise
} IffContext; } IffContext;
@@ -122,6 +184,7 @@ static av_always_inline uint32_t gray2rgb(const uint32_t x) {
static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal) static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
{ {
int count, i; int count, i;
const uint8_t *const extradata = get_palette_data(avctx);
if (avctx->bits_per_coded_sample > 8) { if (avctx->bits_per_coded_sample > 8) {
av_log(avctx, AV_LOG_ERROR, "bit_per_coded_sample > 8 not supported\n"); av_log(avctx, AV_LOG_ERROR, "bit_per_coded_sample > 8 not supported\n");
@@ -130,10 +193,10 @@ static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
count = 1 << avctx->bits_per_coded_sample; count = 1 << avctx->bits_per_coded_sample;
// If extradata is smaller than actually needed, fill the remaining with black. // If extradata is smaller than actually needed, fill the remaining with black.
count = FFMIN(avctx->extradata_size / 3, count); count = FFMIN(get_palette_size(avctx) / 3, count);
if (count) { if (count) {
for (i=0; i < count; i++) { for (i=0; i < count; i++) {
pal[i] = 0xFF000000 | AV_RB24( avctx->extradata + i*3 ); pal[i] = 0xFF000000 | AV_RB24(extradata + i*3);
} }
} else { // Create gray-scale color palette for bps < 8 } else { // Create gray-scale color palette for bps < 8
count = 1 << avctx->bits_per_coded_sample; count = 1 << avctx->bits_per_coded_sample;
@@ -145,15 +208,123 @@ static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
return 0; return 0;
} }
/**
* Extracts the IFF extra context and updates internal
* decoder structures.
*
* @param avctx the AVCodecContext where to extract extra context to
* @param avpkt the AVPacket to extract extra context from or NULL to use avctx
* @return 0 in case of success, a negative error code otherwise
*/
static int extract_header(AVCodecContext *const avctx,
const AVPacket *const avpkt) {
const uint8_t *buf;
unsigned buf_size;
IffContext *s = avctx->priv_data;
if (avpkt) {
if (avpkt->size < 2)
return AVERROR_INVALIDDATA;
buf = avpkt->data;
buf_size = bytestream_get_be16(&buf);
if (buf_size <= 1 || get_image_size(avpkt) <= 1) {
av_log(avctx, AV_LOG_ERROR,
"Invalid image size received: %u -> image data offset: %d\n",
buf_size, get_image_size(avpkt));
return AVERROR_INVALIDDATA;
}
} else {
if (avctx->extradata_size < 2)
return AVERROR_INVALIDDATA;
buf = avctx->extradata;
buf_size = bytestream_get_be16(&buf);
if (buf_size <= 1 || get_palette_size(avctx) < 0) {
av_log(avctx, AV_LOG_ERROR,
"Invalid palette size received: %u -> palette data offset: %d\n",
buf_size, get_palette_size(avctx));
return AVERROR_INVALIDDATA;
}
}
if (buf_size > 8) {
s->compression = bytestream_get_byte(&buf);
s->bpp = bytestream_get_byte(&buf);
s->ham = bytestream_get_byte(&buf);
s->flags = bytestream_get_byte(&buf);
s->transparency = bytestream_get_be16(&buf);
s->masking = bytestream_get_byte(&buf);
if (s->masking == MASK_HAS_TRANSPARENT_COLOR) {
av_log(avctx, AV_LOG_ERROR, "Transparency not supported\n");
return AVERROR_PATCHWELCOME;
} else if (s->masking != MASK_NONE) {
av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
return AVERROR_PATCHWELCOME;
}
if (!s->bpp || s->bpp > 32) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
return AVERROR_INVALIDDATA;
} else if (s->ham >= 8) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
return AVERROR_INVALIDDATA;
}
av_freep(&s->ham_buf);
av_freep(&s->ham_palbuf);
if (s->ham) {
int i, count = FFMIN(get_palette_size(avctx) / 3, 1 << s->ham);
const uint8_t *const extradata = get_palette_data(avctx);
s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->ham_buf)
return AVERROR(ENOMEM);
s->ham_palbuf = av_malloc((8 * (1 << s->ham) * sizeof (uint32_t)) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->ham_palbuf) {
av_freep(&s->ham_buf);
return AVERROR(ENOMEM);
}
if (count) { // HAM with color palette attached
// prefill with black and palette and set HAM take direct value mask to zero
memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
for (i=0; i < count; i++) {
s->ham_palbuf[i*2+1] = AV_RL24(extradata + i*3);
}
count = 1 << s->ham;
} else { // HAM with grayscale color palette
count = 1 << s->ham;
for (i=0; i < count; i++) {
s->ham_palbuf[i*2] = 0; // take direct color value from palette
s->ham_palbuf[i*2+1] = av_le2ne32(gray2rgb((i * 255) >> s->ham));
}
}
for (i=0; i < count; i++) {
uint32_t tmp = i << (8 - s->ham);
tmp |= tmp >> s->ham;
s->ham_palbuf[(i+count)*2] = 0x00FFFF; // just modify blue color component
s->ham_palbuf[(i+count*2)*2] = 0xFFFF00; // just modify red color component
s->ham_palbuf[(i+count*3)*2] = 0xFF00FF; // just modify green color component
s->ham_palbuf[(i+count)*2+1] = tmp << 16;
s->ham_palbuf[(i+count*2)*2+1] = tmp;
s->ham_palbuf[(i+count*3)*2+1] = tmp << 8;
}
} else if (s->flags & 1) { // EHB (ExtraHalfBrite) color palette
av_log(avctx, AV_LOG_ERROR, "ExtraHalfBrite (EHB) mode not supported\n");
return AVERROR_PATCHWELCOME;
}
}
return 0;
}
static av_cold int decode_init(AVCodecContext *avctx) static av_cold int decode_init(AVCodecContext *avctx)
{ {
IffContext *s = avctx->priv_data; IffContext *s = avctx->priv_data;
int err; int err;
if (avctx->bits_per_coded_sample <= 8) { if (avctx->bits_per_coded_sample <= 8) {
avctx->pix_fmt = (avctx->bits_per_coded_sample < 8 || avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
avctx->extradata_size) ? PIX_FMT_PAL8 (avctx->extradata_size >= 2 && get_palette_size(avctx)) ? PIX_FMT_PAL8
: PIX_FMT_GRAY8; : PIX_FMT_GRAY8;
} else if (avctx->bits_per_coded_sample <= 32) { } else if (avctx->bits_per_coded_sample <= 32) {
avctx->pix_fmt = PIX_FMT_BGR32; avctx->pix_fmt = PIX_FMT_BGR32;
} else { } else {
@@ -167,6 +338,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
if (!s->planebuf) if (!s->planebuf)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
s->bpp = avctx->bits_per_coded_sample;
if ((err = extract_header(avctx, NULL)) < 0)
return err;
s->frame.reference = 1; s->frame.reference = 1;
return 0; return 0;
@@ -214,6 +389,39 @@ static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int p
} while (--buf_size); } while (--buf_size);
} }
#define DECODE_HAM_PLANE32(x) \
first = buf[x] << 1; \
second = buf[(x)+1] << 1; \
delta &= pal[first++]; \
delta |= pal[first]; \
dst[x] = delta; \
delta &= pal[second++]; \
delta |= pal[second]; \
dst[(x)+1] = delta
/**
* Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
*
* @param dst the destination 24bpp buffer
* @param buf the source 8bpp chunky buffer
* @param pal the HAM decode table
* @param buf_size the plane size in bytes
*/
static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf,
const uint32_t *const pal, unsigned buf_size)
{
uint32_t delta = 0;
do {
uint32_t first, second;
DECODE_HAM_PLANE32(0);
DECODE_HAM_PLANE32(2);
DECODE_HAM_PLANE32(4);
DECODE_HAM_PLANE32(6);
buf += 8;
dst += 8;
} while (--buf_size);
}
/** /**
* Decode one complete byterun1 encoded line. * Decode one complete byterun1 encoded line.
* *
@@ -250,11 +458,14 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
AVPacket *avpkt) AVPacket *avpkt)
{ {
IffContext *s = avctx->priv_data; IffContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->size >= 2 ? get_image_data(avpkt) : NULL;
int buf_size = avpkt->size; const int buf_size = avpkt->size >= 2 ? get_image_size(avpkt) : 0;
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
int y, plane, res; int y, plane, res;
if ((res = extract_header(avctx, avpkt)) < 0)
return res;
if (s->init) { if (s->init) {
if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) { if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
@@ -274,16 +485,26 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
for(y = 0; y < avctx->height; y++ ) { for(y = 0; y < avctx->height; y++ ) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
memset(row, 0, avctx->width); memset(row, 0, avctx->width);
for (plane = 0; plane < avctx->bits_per_coded_sample && buf < buf_end; plane++) { for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane); decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
buf += s->planesize; buf += s->planesize;
} }
} }
} else if (s->ham) { // HAM to PIX_FMT_BGR32
for (y = 0; y < avctx->height; y++) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
memset(s->ham_buf, 0, avctx->width);
for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane);
buf += s->planesize;
}
decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
}
} else { // PIX_FMT_BGR32 } else { // PIX_FMT_BGR32
for(y = 0; y < avctx->height; y++ ) { for(y = 0; y < avctx->height; y++ ) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
memset(row, 0, avctx->width << 2); memset(row, 0, avctx->width << 2);
for (plane = 0; plane < avctx->bits_per_coded_sample && buf < buf_end; plane++) { for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane); decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane);
buf += s->planesize; buf += s->planesize;
} }
@@ -295,6 +516,13 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
memcpy(row, buf, FFMIN(avctx->width, buf_end - buf)); memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
buf += avctx->width + (avctx->width % 2); // padding if odd buf += avctx->width + (avctx->width % 2); // padding if odd
} }
} else { // IFF-PBM: HAM to PIX_FMT_BGR32
for (y = 0; y < avctx->height; y++) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf));
buf += avctx->width + (avctx->width & 1); // padding if odd
decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, avctx->width);
}
} }
*data_size = sizeof(AVFrame); *data_size = sizeof(AVFrame);
@@ -307,11 +535,13 @@ static int decode_frame_byterun1(AVCodecContext *avctx,
AVPacket *avpkt) AVPacket *avpkt)
{ {
IffContext *s = avctx->priv_data; IffContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->size >= 2 ? get_image_data(avpkt) : NULL;
int buf_size = avpkt->size; const int buf_size = avpkt->size >= 2 ? get_image_size(avpkt) : 0;
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
int y, plane, res; int y, plane, res;
if ((res = extract_header(avctx, avpkt)) < 0)
return res;
if (s->init) { if (s->init) {
if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) { if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
@@ -331,26 +561,42 @@ static int decode_frame_byterun1(AVCodecContext *avctx,
for(y = 0; y < avctx->height ; y++ ) { for(y = 0; y < avctx->height ; y++ ) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ]; uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
memset(row, 0, avctx->width); memset(row, 0, avctx->width);
for (plane = 0; plane < avctx->bits_per_coded_sample; plane++) { for (plane = 0; plane < s->bpp; plane++) {
buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end); buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
decodeplane8(row, s->planebuf, s->planesize, plane); decodeplane8(row, s->planebuf, s->planesize, plane);
} }
} }
} else if (s->ham) { // HAM to PIX_FMT_BGR32
for (y = 0; y < avctx->height ; y++) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
memset(s->ham_buf, 0, avctx->width);
for (plane = 0; plane < s->bpp; plane++) {
buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
decodeplane8(s->ham_buf, s->planebuf, s->planesize, plane);
}
decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
}
} else { //PIX_FMT_BGR32 } else { //PIX_FMT_BGR32
for(y = 0; y < avctx->height ; y++ ) { for(y = 0; y < avctx->height ; y++ ) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
memset(row, 0, avctx->width << 2); memset(row, 0, avctx->width << 2);
for (plane = 0; plane < avctx->bits_per_coded_sample; plane++) { for (plane = 0; plane < s->bpp; plane++) {
buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end); buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
decodeplane32((uint32_t *) row, s->planebuf, s->planesize, plane); decodeplane32((uint32_t *) row, s->planebuf, s->planesize, plane);
} }
} }
} }
} else { } else if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { // IFF-PBM
for(y = 0; y < avctx->height ; y++ ) { for(y = 0; y < avctx->height ; y++ ) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]]; uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
buf += decode_byterun(row, avctx->width, buf, buf_end); buf += decode_byterun(row, avctx->width, buf, buf_end);
} }
} else { // IFF-PBM: HAM to PIX_FMT_BGR32
for (y = 0; y < avctx->height ; y++) {
uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end);
decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, avctx->width);
}
} }
*data_size = sizeof(AVFrame); *data_size = sizeof(AVFrame);
@@ -364,6 +610,8 @@ static av_cold int decode_end(AVCodecContext *avctx)
if (s->frame.data[0]) if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame); avctx->release_buffer(avctx, &s->frame);
av_freep(&s->planebuf); av_freep(&s->planebuf);
av_freep(&s->ham_buf);
av_freep(&s->ham_palbuf);
return 0; return 0;
} }

136
libavcodec/libcelt_dec.c Normal file
View File

@@ -0,0 +1,136 @@
/*
* Xiph CELT / Opus decoder using libcelt
* Copyright (c) 2011 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <celt/celt.h>
#include <celt/celt_header.h>
#include "avcodec.h"
#include "libavutil/intreadwrite.h"
struct libcelt_context {
CELTMode *mode;
CELTDecoder *dec;
int frame_bytes;
int discard;
};
static int ff_celt_error_to_averror(int err)
{
switch(err) {
case CELT_BAD_ARG: return AVERROR(EINVAL);
#ifdef CELT_BUFFER_TOO_SMALL
case CELT_BUFFER_TOO_SMALL: return AVERROR(ENOBUFS);
#endif
case CELT_INTERNAL_ERROR: return AVERROR(EFAULT);
case CELT_CORRUPTED_DATA: return AVERROR_INVALIDDATA;
case CELT_UNIMPLEMENTED: return AVERROR(ENOTSUP);
#ifdef ENOTRECOVERABLE
case CELT_INVALID_STATE: return AVERROR(ENOTRECOVERABLE);
#endif
case CELT_ALLOC_FAIL: return AVERROR(ENOMEM);
default: return AVERROR(EINVAL);
}
}
static int ff_celt_bitstream_version_hack(CELTMode *mode)
{
CELTHeader header = { .version_id = 0 };
celt_header_init(&header, mode, 960, 2);
return header.version_id;
}
static av_cold int libcelt_dec_init(AVCodecContext *c)
{
struct libcelt_context *celt = c->priv_data;
int err;
if (!c->channels || !c->frame_size ||
c->frame_size > INT_MAX / sizeof(int16_t) / c->channels)
return AVERROR(EINVAL);
celt->frame_bytes = c->frame_size * c->channels * sizeof(int16_t);
celt->mode = celt_mode_create(c->sample_rate, c->frame_size, &err);
if (!celt->mode)
return ff_celt_error_to_averror(err);
celt->dec = celt_decoder_create_custom(celt->mode, c->channels, &err);
if (!celt->dec) {
celt_mode_destroy(celt->mode);
return ff_celt_error_to_averror(err);
}
if (c->extradata_size >= 4) {
celt->discard = AV_RL32(c->extradata);
if (celt->discard < 0 || celt->discard >= c->frame_size) {
av_log(c, AV_LOG_WARNING,
"Invalid overlap (%d), ignored.\n", celt->discard);
celt->discard = 0;
}
celt->discard *= c->channels * sizeof(int16_t);
}
if(c->extradata_size >= 8) {
unsigned version = AV_RL32(c->extradata + 4);
unsigned lib_version = ff_celt_bitstream_version_hack(celt->mode);
if (version != lib_version)
av_log(c, AV_LOG_WARNING,
"CELT bitstream version 0x%x may be "
"improperly decoded by libcelt for version 0x%x.\n",
version, lib_version);
}
return 0;
}
static av_cold int libcelt_dec_close(AVCodecContext *c)
{
struct libcelt_context *celt = c->priv_data;
celt_decoder_destroy(celt->dec);
celt_mode_destroy(celt->mode);
return 0;
}
static int libcelt_dec_decode(AVCodecContext *c, void *pcm, int *pcm_size,
AVPacket *pkt)
{
struct libcelt_context *celt = c->priv_data;
int err;
if (*pcm_size < celt->frame_bytes)
return AVERROR(ENOBUFS);
err = celt_decode(celt->dec, pkt->data, pkt->size, pcm, c->frame_size);
if (err < 0)
return ff_celt_error_to_averror(err);
*pcm_size = celt->frame_bytes;
if (celt->discard) {
*pcm_size = celt->frame_bytes - celt->discard;
memmove(pcm, (char *)pcm + celt->discard, *pcm_size);
celt->discard = 0;
}
return pkt->size;
}
AVCodec ff_libcelt_decoder = {
.name = "libcelt",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_CELT,
.priv_data_size = sizeof(struct libcelt_context),
.init = libcelt_dec_init,
.close = libcelt_dec_close,
.decode = libcelt_dec_decode,
.capabilities = 0,
.long_name = NULL_IF_CONFIG_SMALL("Xiph CELT/Opus decoder using libcelt"),
};

View File

@@ -34,6 +34,10 @@ typedef struct Mp3AudioContext {
int stereo; int stereo;
uint8_t buffer[BUFFER_SIZE]; uint8_t buffer[BUFFER_SIZE];
int buffer_index; int buffer_index;
struct {
int *left;
int *right;
} s32_data;
} Mp3AudioContext; } Mp3AudioContext;
static av_cold int MP3lame_encode_init(AVCodecContext *avctx) static av_cold int MP3lame_encode_init(AVCodecContext *avctx)
@@ -69,9 +73,26 @@ static av_cold int MP3lame_encode_init(AVCodecContext *avctx)
avctx->frame_size = lame_get_framesize(s->gfp); avctx->frame_size = lame_get_framesize(s->gfp);
avctx->coded_frame= avcodec_alloc_frame(); if(!(avctx->coded_frame= avcodec_alloc_frame())) {
lame_close(s->gfp);
return AVERROR(ENOMEM);
}
avctx->coded_frame->key_frame= 1; avctx->coded_frame->key_frame= 1;
if(AV_SAMPLE_FMT_S32 == avctx->sample_fmt && s->stereo) {
int nelem = 2 * avctx->frame_size;
if(! (s->s32_data.left = av_malloc(nelem * sizeof(int)))) {
av_freep(&avctx->coded_frame);
lame_close(s->gfp);
return AVERROR(ENOMEM);
}
s->s32_data.right = s->s32_data.left + avctx->frame_size;
}
return 0; return 0;
err_close: err_close:
@@ -146,7 +167,45 @@ static int MP3lame_encode_frame(AVCodecContext *avctx,
/* lame 3.91 dies on '1-channel interleaved' data */ /* lame 3.91 dies on '1-channel interleaved' data */
if(data){ if(!data){
lame_result= lame_encode_flush(
s->gfp,
s->buffer + s->buffer_index,
BUFFER_SIZE - s->buffer_index
);
#if 2147483647 == INT_MAX
}else if(AV_SAMPLE_FMT_S32 == avctx->sample_fmt){
if (s->stereo) {
int32_t *rp = data;
int32_t *mp = rp + 2*avctx->frame_size;
int *wpl = s->s32_data.left;
int *wpr = s->s32_data.right;
while (rp < mp) {
*wpl++ = *rp++;
*wpr++ = *rp++;
}
lame_result = lame_encode_buffer_int(
s->gfp,
s->s32_data.left,
s->s32_data.right,
avctx->frame_size,
s->buffer + s->buffer_index,
BUFFER_SIZE - s->buffer_index
);
} else {
lame_result = lame_encode_buffer_int(
s->gfp,
data,
data,
avctx->frame_size,
s->buffer + s->buffer_index,
BUFFER_SIZE - s->buffer_index
);
}
#endif
}else{
if (s->stereo) { if (s->stereo) {
lame_result = lame_encode_buffer_interleaved( lame_result = lame_encode_buffer_interleaved(
s->gfp, s->gfp,
@@ -165,12 +224,6 @@ static int MP3lame_encode_frame(AVCodecContext *avctx,
BUFFER_SIZE - s->buffer_index BUFFER_SIZE - s->buffer_index
); );
} }
}else{
lame_result= lame_encode_flush(
s->gfp,
s->buffer + s->buffer_index,
BUFFER_SIZE - s->buffer_index
);
} }
if(lame_result < 0){ if(lame_result < 0){
@@ -206,6 +259,7 @@ static av_cold int MP3lame_encode_close(AVCodecContext *avctx)
{ {
Mp3AudioContext *s = avctx->priv_data; Mp3AudioContext *s = avctx->priv_data;
av_freep(&s->s32_data.left);
av_freep(&avctx->coded_frame); av_freep(&avctx->coded_frame);
lame_close(s->gfp); lame_close(s->gfp);
@@ -222,7 +276,11 @@ AVCodec ff_libmp3lame_encoder = {
MP3lame_encode_frame, MP3lame_encode_frame,
MP3lame_encode_close, MP3lame_encode_close,
.capabilities= CODEC_CAP_DELAY, .capabilities= CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,
#if 2147483647 == INT_MAX
AV_SAMPLE_FMT_S32,
#endif
AV_SAMPLE_FMT_NONE},
.supported_samplerates= sSampleRates, .supported_samplerates= sSampleRates,
.long_name= NULL_IF_CONFIG_SMALL("libmp3lame MP3 (MPEG audio layer 3)"), .long_name= NULL_IF_CONFIG_SMALL("libmp3lame MP3 (MPEG audio layer 3)"),
}; };

View File

@@ -59,7 +59,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
if (s->codec_api.SetParam(s->handle, VO_PID_AAC_ENCPARAM, &params) if (s->codec_api.SetParam(s->handle, VO_PID_AAC_ENCPARAM, &params)
!= VO_ERR_NONE) { != VO_ERR_NONE) {
av_log(avctx, AV_LOG_ERROR, "Unable to set encoding parameters\n"); av_log(avctx, AV_LOG_ERROR, "Unable to set encoding parameters\n");
return AVERROR_UNKNOWN; return AVERROR(EINVAL);
} }
for (index = 0; index < 16; index++) for (index = 0; index < 16; index++)
@@ -68,7 +68,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
if (index == 16) { if (index == 16) {
av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n", av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n",
avctx->sample_rate); avctx->sample_rate);
return AVERROR_NOTSUPP; return AVERROR(ENOSYS);
} }
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
avctx->extradata_size = 2; avctx->extradata_size = 2;
@@ -110,14 +110,14 @@ static int aac_encode_frame(AVCodecContext *avctx,
if (s->codec_api.GetOutputData(s->handle, &output, &output_info) if (s->codec_api.GetOutputData(s->handle, &output, &output_info)
!= VO_ERR_NONE) { != VO_ERR_NONE) {
av_log(avctx, AV_LOG_ERROR, "Unable to encode frame\n"); av_log(avctx, AV_LOG_ERROR, "Unable to encode frame\n");
return AVERROR_UNKNOWN; return AVERROR(EINVAL);
} }
return output.Length; return output.Length;
} }
AVCodec ff_libvo_aacenc_encoder = { AVCodec ff_libvo_aacenc_encoder = {
"libvo_aacenc", "libvo_aacenc",
CODEC_TYPE_AUDIO, AVMEDIA_TYPE_AUDIO,
CODEC_ID_AAC, CODEC_ID_AAC,
sizeof(AACContext), sizeof(AACContext),
aac_encode_init, aac_encode_init,

View File

@@ -119,7 +119,7 @@ static int amr_wb_encode_frame(AVCodecContext *avctx,
AVCodec ff_libvo_amrwbenc_encoder = { AVCodec ff_libvo_amrwbenc_encoder = {
"libvo_amrwbenc", "libvo_amrwbenc",
CODEC_TYPE_AUDIO, AVMEDIA_TYPE_AUDIO,
CODEC_ID_AMR_WB, CODEC_ID_AMR_WB,
sizeof(AMRWBContext), sizeof(AMRWBContext),
amr_wb_encode_init, amr_wb_encode_init,

View File

@@ -38,7 +38,9 @@ typedef struct X264Context {
const char *preset; const char *preset;
const char *tune; const char *tune;
const char *profile; const char *profile;
const char *level;
int fastfirstpass; int fastfirstpass;
const char *stats;
} X264Context; } X264Context;
static void X264_log(void *p, int level, const char *fmt, va_list args) static void X264_log(void *p, int level, const char *fmt, va_list args)
@@ -144,7 +146,8 @@ static int X264_frame(AVCodecContext *ctx, uint8_t *buf,
} }
x4->out_pic.key_frame = pic_out.b_keyframe; x4->out_pic.key_frame = pic_out.b_keyframe;
x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA; if (bufsize)
x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
return bufsize; return bufsize;
} }
@@ -162,6 +165,40 @@ static av_cold int X264_close(AVCodecContext *avctx)
return 0; return 0;
} }
/**
* Detect default settings and use default profile to avoid libx264 failure.
*/
static void check_default_settings(AVCodecContext *avctx)
{
X264Context *x4 = avctx->priv_data;
int score = 0;
score += x4->params.analyse.i_me_range == 0;
score += x4->params.rc.i_qp_step == 3;
score += x4->params.i_keyint_max == 12;
score += x4->params.rc.i_qp_min == 2;
score += x4->params.rc.i_qp_max == 31;
score += x4->params.rc.f_qcompress == 0.5;
score += fabs(x4->params.rc.f_ip_factor - 1.25) < 0.01;
score += fabs(x4->params.rc.f_pb_factor - 1.25) < 0.01;
score += x4->params.analyse.inter == 0 && x4->params.analyse.i_subpel_refine == 8;
if (score >= 5) {
av_log(avctx, AV_LOG_ERROR, "Default settings detected, using medium profile\n");
x4->preset = "medium";
if (avctx->bit_rate == 200*100)
avctx->crf = 23;
}
}
#define OPT_STR(opt, param) \
do { \
if (param && x264_param_parse(&x4->params, opt, param) < 0) { \
av_log(avctx, AV_LOG_ERROR, \
"bad value for '%s': '%s'\n", opt, param); \
return -1; \
} \
} while (0); \
static av_cold int X264_init(AVCodecContext *avctx) static av_cold int X264_init(AVCodecContext *avctx)
{ {
X264Context *x4 = avctx->priv_data; X264Context *x4 = avctx->priv_data;
@@ -248,26 +285,19 @@ static av_cold int X264_init(AVCodecContext *avctx)
x4->params.analyse.i_trellis = avctx->trellis; x4->params.analyse.i_trellis = avctx->trellis;
x4->params.analyse.i_noise_reduction = avctx->noise_reduction; x4->params.analyse.i_noise_reduction = avctx->noise_reduction;
if (avctx->level > 0)
x4->params.i_level_idc = avctx->level;
x4->params.rc.b_mb_tree = !!(avctx->flags2 & CODEC_FLAG2_MBTREE); x4->params.rc.b_mb_tree = !!(avctx->flags2 & CODEC_FLAG2_MBTREE);
x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor); x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
x4->params.rc.f_pb_factor = avctx->b_quant_factor; x4->params.rc.f_pb_factor = avctx->b_quant_factor;
x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset; x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset;
if (!x4->preset)
check_default_settings(avctx);
if (x4->preset || x4->tune) { if (x4->preset || x4->tune) {
if (x264_param_default_preset(&x4->params, x4->preset, x4->tune) < 0) if (x264_param_default_preset(&x4->params, x4->preset, x4->tune) < 0)
return -1; return -1;
} }
if (x4->fastfirstpass)
x264_param_apply_fastfirstpass(&x4->params);
if (x4->profile)
if (x264_param_apply_profile(&x4->params, x4->profile) < 0)
return -1;
x4->params.pf_log = X264_log; x4->params.pf_log = X264_log;
x4->params.p_log_private = avctx; x4->params.p_log_private = avctx;
x4->params.i_log_level = X264_LOG_DEBUG; x4->params.i_log_level = X264_LOG_DEBUG;
@@ -290,6 +320,8 @@ static av_cold int X264_init(AVCodecContext *avctx)
} }
} }
OPT_STR("stats", x4->stats);
// if neither crf nor cqp modes are selected we have to enable the RC // if neither crf nor cqp modes are selected we have to enable the RC
// we do it this way because we cannot check if the bitrate has been set // we do it this way because we cannot check if the bitrate has been set
if (!(avctx->crf || (avctx->cqp > -1))) if (!(avctx->crf || (avctx->cqp > -1)))
@@ -301,6 +333,15 @@ static av_cold int X264_init(AVCodecContext *avctx)
(float)avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size; (float)avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size;
} }
OPT_STR("level", x4->level);
if (x4->fastfirstpass)
x264_param_apply_fastfirstpass(&x4->params);
if (x4->profile)
if (x264_param_apply_profile(&x4->params, x4->profile) < 0)
return -1;
x4->params.i_width = avctx->width; x4->params.i_width = avctx->width;
x4->params.i_height = avctx->height; x4->params.i_height = avctx->height;
x4->params.vui.i_sar_width = avctx->sample_aspect_ratio.num; x4->params.vui.i_sar_width = avctx->sample_aspect_ratio.num;
@@ -325,7 +366,8 @@ static av_cold int X264_init(AVCodecContext *avctx)
x4->params.b_repeat_headers = 0; x4->params.b_repeat_headers = 0;
// update AVCodecContext with x264 parameters // update AVCodecContext with x264 parameters
avctx->has_b_frames = x4->params.i_bframe_pyramid ? 2 : !!x4->params.i_bframe; avctx->has_b_frames = x4->params.i_bframe ?
x4->params.i_bframe_pyramid ? 2 : 1 : 0;
avctx->bit_rate = x4->params.rc.i_bitrate*1000; avctx->bit_rate = x4->params.rc.i_bitrate*1000;
avctx->crf = x4->params.rc.f_rf_constant; avctx->crf = x4->params.rc.f_rf_constant;
@@ -360,6 +402,8 @@ static const AVOption options[] = {
{"tune", "Tune the encoding params", OFFSET(tune), FF_OPT_TYPE_STRING, 0, 0, 0, VE}, {"tune", "Tune the encoding params", OFFSET(tune), FF_OPT_TYPE_STRING, 0, 0, 0, VE},
{"fastfirstpass", "Use fast settings when encoding first pass", OFFSET(fastfirstpass), FF_OPT_TYPE_INT, 1, 0, 1, VE}, {"fastfirstpass", "Use fast settings when encoding first pass", OFFSET(fastfirstpass), FF_OPT_TYPE_INT, 1, 0, 1, VE},
{"profile", "Set profile restrictions", OFFSET(profile), FF_OPT_TYPE_STRING, 0, 0, 0, VE}, {"profile", "Set profile restrictions", OFFSET(profile), FF_OPT_TYPE_STRING, 0, 0, 0, VE},
{"level", "Specify level (as defined by Annex A)", OFFSET(level), FF_OPT_TYPE_STRING, 0, 0, 0, VE},
{"passlogfile", "Filename for 2 pass stats", OFFSET(stats), FF_OPT_TYPE_STRING, 0, 0, 0, VE},
{ NULL }, { NULL },
}; };

View File

@@ -248,7 +248,7 @@ static av_cold int decode_init(AVCodecContext *avctx){
break; break;
default: default:
l->lossy = AV_RL32(avctx->extradata + 8); l->lossy = AV_RL32(avctx->extradata + 8);
av_log(avctx, AV_LOG_INFO, "This is LOCO codec version %i, please upload file for study\n", version); av_log_ask_for_sample(avctx, "This is LOCO codec version %i.\n", version);
} }
l->mode = AV_RL32(avctx->extradata + 4); l->mode = AV_RL32(avctx->extradata + 4);

View File

@@ -797,6 +797,10 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, i
if (s->restart_interval && !s->restart_count) if (s->restart_interval && !s->restart_count)
s->restart_count = s->restart_interval; s->restart_count = s->restart_interval;
if(get_bits_count(&s->gb)>s->gb.size_in_bits){
av_log(s->avctx, AV_LOG_ERROR, "overread %d\n", get_bits_count(&s->gb) - s->gb.size_in_bits);
return -1;
}
for(i=0;i<nb_components;i++) { for(i=0;i<nb_components;i++) {
uint8_t *ptr; uint8_t *ptr;
int n, h, v, x, y, c, j; int n, h, v, x, y, c, j;

View File

@@ -43,7 +43,7 @@ static const uint8_t mlp_channels[32] = {
5, 6, 5, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 6, 5, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
}; };
static const uint64_t mlp_layout[32] = { const uint64_t ff_mlp_layout[32] = {
AV_CH_LAYOUT_MONO, AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_2_1, AV_CH_LAYOUT_2_1,
@@ -107,7 +107,7 @@ static int truehd_channels(int chanmap)
return channels; return channels;
} }
static int64_t truehd_layout(int chanmap) int64_t ff_truehd_layout(int chanmap)
{ {
int layout = 0, i; int layout = 0, i;
@@ -316,19 +316,15 @@ static int mlp_parse(AVCodecParserContext *s,
if (mh.stream_type == 0xbb) { if (mh.stream_type == 0xbb) {
/* MLP stream */ /* MLP stream */
avctx->channels = mlp_channels[mh.channels_mlp]; avctx->channels = mlp_channels[mh.channels_mlp];
avctx->channel_layout = mlp_layout[mh.channels_mlp]; avctx->channel_layout = ff_mlp_layout[mh.channels_mlp];
} else { /* mh.stream_type == 0xba */ } else { /* mh.stream_type == 0xba */
/* TrueHD stream */ /* TrueHD stream */
if (mh.channels_thd_stream2) { if (mh.channels_thd_stream2) {
avctx->channels = truehd_channels(mh.channels_thd_stream2); avctx->channels = truehd_channels(mh.channels_thd_stream2);
avctx->channel_layout = truehd_layout(mh.channels_thd_stream2); avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream2);
} else { } else {
avctx->channels = truehd_channels(mh.channels_thd_stream1); avctx->channels = truehd_channels(mh.channels_thd_stream1);
avctx->channel_layout = truehd_layout(mh.channels_thd_stream1); avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream1);
}
if (av_get_channel_layout_nb_channels(avctx->channel_layout) != avctx->channels) {
avctx->channel_layout = 0;
av_log_ask_for_sample(avctx, "Unknown channel layout.");
} }
} }

View File

@@ -54,6 +54,9 @@ typedef struct MLPHeaderInfo
int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb); int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb);
int64_t ff_truehd_layout(int chanmap);
extern const uint64_t ff_mlp_layout[32];
#endif /* AVCODEC_MLP_PARSER_H */ #endif /* AVCODEC_MLP_PARSER_H */

View File

@@ -329,6 +329,23 @@ static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb)
for (substr = 0; substr < MAX_SUBSTREAMS; substr++) for (substr = 0; substr < MAX_SUBSTREAMS; substr++)
m->substream[substr].restart_seen = 0; m->substream[substr].restart_seen = 0;
if (mh.stream_type == 0xbb) {
/* MLP stream */
m->avctx->channel_layout = ff_mlp_layout[mh.channels_mlp];
} else { /* mh.stream_type == 0xba */
/* TrueHD stream */
if (mh.channels_thd_stream2) {
m->avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream2);
} else {
m->avctx->channel_layout = ff_truehd_layout(mh.channels_thd_stream1);
}
if (m->avctx->channels &&
av_get_channel_layout_nb_channels(m->avctx->channel_layout) != m->avctx->channels) {
m->avctx->channel_layout = 0;
av_log_ask_for_sample(m->avctx, "Unknown channel layout.");
}
}
m->needs_reordering = mh.channels_mlp >= 18 && mh.channels_mlp <= 20; m->needs_reordering = mh.channels_mlp >= 18 && mh.channels_mlp <= 20;
return 0; return 0;

View File

@@ -2602,7 +2602,7 @@ AVCodec ff_mpeg2video_decoder = {
NULL, NULL,
mpeg_decode_end, mpeg_decode_end,
mpeg_decode_frame, mpeg_decode_frame,
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.flush= flush, .flush= flush,
.max_lowres= 3, .max_lowres= 3,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"), .long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"),
@@ -2619,7 +2619,7 @@ AVCodec ff_mpegvideo_decoder = {
NULL, NULL,
mpeg_decode_end, mpeg_decode_end,
mpeg_decode_frame, mpeg_decode_frame,
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.flush= flush, .flush= flush,
.max_lowres= 3, .max_lowres= 3,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"), .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),

View File

@@ -940,7 +940,7 @@ AVCodec ff_mpeg1video_encoder = {
MPV_encode_end, MPV_encode_end,
.supported_framerates= ff_frame_rate_tab+1, .supported_framerates= ff_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY, .capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"), .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),
}; };
@@ -954,6 +954,6 @@ AVCodec ff_mpeg2video_encoder = {
MPV_encode_end, MPV_encode_end,
.supported_framerates= ff_frame_rate_tab+1, .supported_framerates= ff_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY, .capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"), .long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"),
}; };

View File

@@ -1359,6 +1359,6 @@ AVCodec ff_mpeg4_encoder = {
MPV_encode_picture, MPV_encode_picture,
MPV_encode_end, MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY, .capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
}; };

View File

@@ -3802,6 +3802,7 @@ AVCodec ff_h263p_encoder = {
MPV_encode_init, MPV_encode_init,
MPV_encode_picture, MPV_encode_picture,
MPV_encode_end, MPV_encode_end,
.capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"), .long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
}; };

View File

@@ -1,89 +0,0 @@
/*
* AVOptions ABI compatibility wrapper
* Copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
#include "opt.h"
#if LIBAVCODEC_VERSION_MAJOR < 53 && CONFIG_SHARED && HAVE_SYMVER
FF_SYMVER(const AVOption *, av_find_opt, (void *obj, const char *name, const char *unit, int mask, int flags), "LIBAVCODEC_52"){
return av_find_opt(obj, name, unit, mask, flags);
}
FF_SYMVER(int, av_set_string3, (void *obj, const char *name, const char *val, int alloc, const AVOption **o_out), "LIBAVCODEC_52"){
return av_set_string3(obj, name, val, alloc, o_out);
}
FF_SYMVER(const AVOption *, av_set_double, (void *obj, const char *name, double n), "LIBAVCODEC_52"){
return av_set_double(obj, name, n);
}
FF_SYMVER(const AVOption *, av_set_q, (void *obj, const char *name, AVRational n), "LIBAVCODEC_52"){
return av_set_q(obj, name, n);
}
FF_SYMVER(const AVOption *, av_set_int, (void *obj, const char *name, int64_t n), "LIBAVCODEC_52"){
return av_set_int(obj, name, n);
}
FF_SYMVER(double, av_get_double, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
return av_get_double(obj, name, o_out);
}
FF_SYMVER(AVRational, av_get_q, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
return av_get_q(obj, name, o_out);
}
FF_SYMVER(int64_t, av_get_int, (void *obj, const char *name, const AVOption **o_out), "LIBAVCODEC_52"){
return av_get_int(obj, name, o_out);
}
FF_SYMVER(const char *, av_get_string, (void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len), "LIBAVCODEC_52"){
return av_get_string(obj, name, o_out, buf, buf_len);
}
FF_SYMVER(const AVOption *, av_next_option, (void *obj, const AVOption *last), "LIBAVCODEC_52"){
return av_next_option(obj, last);
}
FF_SYMVER(int, av_opt_show2, (void *obj, void *av_log_obj, int req_flags, int rej_flags), "LIBAVCODEC_52"){
return av_opt_show2(obj, av_log_obj, req_flags, rej_flags);
}
FF_SYMVER(void, av_opt_set_defaults, (void *s), "LIBAVCODEC_52"){
return av_opt_set_defaults(s);
}
FF_SYMVER(void, av_opt_set_defaults2, (void *s, int mask, int flags), "LIBAVCODEC_52"){
return av_opt_set_defaults2(s, mask, flags);
}
#endif
#if FF_API_SET_STRING_OLD
const AVOption *av_set_string2(void *obj, const char *name, const char *val, int alloc){
const AVOption *o;
if (av_set_string3(obj, name, val, alloc, &o) < 0)
return NULL;
return o;
}
const AVOption *av_set_string(void *obj, const char *name, const char *val){
const AVOption *o;
if (av_set_string3(obj, name, val, 0, &o) < 0)
return NULL;
return o;
}
#endif
#if FF_API_OPT_SHOW
int av_opt_show(void *obj, void *av_log_obj){
return av_opt_show2(obj, av_log_obj,
AV_OPT_FLAG_ENCODING_PARAM|AV_OPT_FLAG_DECODING_PARAM, 0);
}
#endif

View File

@@ -71,7 +71,7 @@ static av_cold int pcm_encode_close(AVCodecContext *avctx)
* @param offset Sample value offset * @param offset Sample value offset
*/ */
#define ENCODE(type, endian, src, dst, n, shift, offset) \ #define ENCODE(type, endian, src, dst, n, shift, offset) \
samples_##type = (type*)src; \ samples_##type = (const type*) src; \
for(;n>0;n--) { \ for(;n>0;n--) { \
register type v = (*samples_##type++ >> shift) + offset; \ register type v = (*samples_##type++ >> shift) + offset; \
bytestream_put_##endian(&dst, v); \ bytestream_put_##endian(&dst, v); \

View File

@@ -877,7 +877,8 @@ static void validate_thread_parameters(AVCodecContext *avctx)
avctx->active_thread_type = 0; avctx->active_thread_type = 0;
} else if (frame_threading_supported && (avctx->thread_type & FF_THREAD_FRAME)) { } else if (frame_threading_supported && (avctx->thread_type & FF_THREAD_FRAME)) {
avctx->active_thread_type = FF_THREAD_FRAME; avctx->active_thread_type = FF_THREAD_FRAME;
} else if (avctx->thread_type & FF_THREAD_SLICE) { } else if (avctx->codec->capabilities & CODEC_CAP_SLICE_THREADS &&
avctx->thread_type & FF_THREAD_SLICE) {
avctx->active_thread_type = FF_THREAD_SLICE; avctx->active_thread_type = FF_THREAD_SLICE;
} }
} }

View File

@@ -61,6 +61,7 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = {
{ PIX_FMT_UYVY422, MKTAG('A', 'V', '1', 'x') }, /* Avid 1:1x */ { PIX_FMT_UYVY422, MKTAG('A', 'V', '1', 'x') }, /* Avid 1:1x */
{ PIX_FMT_UYVY422, MKTAG('A', 'V', 'u', 'p') }, { PIX_FMT_UYVY422, MKTAG('A', 'V', 'u', 'p') },
{ PIX_FMT_UYVY422, MKTAG('V', 'D', 'T', 'Z') }, /* SoftLab-NSK VideoTizer */ { PIX_FMT_UYVY422, MKTAG('V', 'D', 'T', 'Z') }, /* SoftLab-NSK VideoTizer */
{ PIX_FMT_UYVY422, MKTAG('a', 'u', 'v', '2') },
{ PIX_FMT_UYYVYY411, MKTAG('Y', '4', '1', '1') }, { PIX_FMT_UYYVYY411, MKTAG('Y', '4', '1', '1') },
{ PIX_FMT_GRAY8, MKTAG('G', 'R', 'E', 'Y') }, { PIX_FMT_GRAY8, MKTAG('G', 'R', 'E', 'Y') },
{ PIX_FMT_NV12, MKTAG('N', 'V', '1', '2') }, { PIX_FMT_NV12, MKTAG('N', 'V', '1', '2') },

View File

@@ -656,6 +656,8 @@ static int rv10_decode_frame(AVCodecContext *avctx,
const uint8_t *slices_hdr = NULL; const uint8_t *slices_hdr = NULL;
av_dlog(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size); av_dlog(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size);
s->flags = avctx->flags;
s->flags2 = avctx->flags2;
/* no supplementary picture */ /* no supplementary picture */
if (buf_size == 0) { if (buf_size == 0) {

View File

@@ -86,7 +86,6 @@ static int sp5x_decode_frame(AVCodecContext *avctx,
recoded[j++] = 0xFF; recoded[j++] = 0xFF;
recoded[j++] = 0xD9; recoded[j++] = 0xD9;
avctx->flags &= ~CODEC_FLAG_EMU_EDGE;
av_init_packet(&avpkt_recoded); av_init_packet(&avpkt_recoded);
avpkt_recoded.data = recoded; avpkt_recoded.data = recoded;
avpkt_recoded.size = j; avpkt_recoded.size = j;
@@ -121,6 +120,6 @@ AVCodec ff_amv_decoder = {
NULL, NULL,
ff_mjpeg_decode_end, ff_mjpeg_decode_end,
sp5x_decode_frame, sp5x_decode_frame,
CODEC_CAP_DR1, 0,
.long_name = NULL_IF_CONFIG_SMALL("AMV Video"), .long_name = NULL_IF_CONFIG_SMALL("AMV Video"),
}; };

View File

@@ -353,7 +353,7 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
s->flags = FLAG_KEYFRAME; s->flags = FLAG_KEYFRAME;
if (s->flags & FLAG_SPRITE) { if (s->flags & FLAG_SPRITE) {
av_log(s->avctx, AV_LOG_INFO, "SPRITE frame found, please report the sample to the developers\n"); av_log_ask_for_sample(s->avctx, "SPRITE frame found.\n");
/* FIXME header.width, height, xoffset and yoffset aren't initialized */ /* FIXME header.width, height, xoffset and yoffset aren't initialized */
#if 0 #if 0
s->w = header.width; s->w = header.width;
@@ -370,7 +370,7 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
if ((s->w < 213) && (s->h >= 176)) if ((s->w < 213) && (s->h >= 176))
{ {
s->flags |= FLAG_INTERPOLATED; s->flags |= FLAG_INTERPOLATED;
av_log(s->avctx, AV_LOG_INFO, "INTERPOLATION selected, please report the sample to the developers\n"); av_log_ask_for_sample(s->avctx, "INTERPOLATION selected.\n");
} }
} }
} }

View File

@@ -247,7 +247,7 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
if (s->is_float) if (s->is_float)
{ {
avctx->sample_fmt = AV_SAMPLE_FMT_FLT; avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
av_log(s->avctx, AV_LOG_ERROR, "Unsupported sample format. Please contact the developers.\n"); av_log_ask_for_sample(s->avctx, "Unsupported sample format.\n");
return -1; return -1;
} }
else switch(s->bps) { else switch(s->bps) {
@@ -256,7 +256,8 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
// case 3: avctx->sample_fmt = AV_SAMPLE_FMT_S24; break; // case 3: avctx->sample_fmt = AV_SAMPLE_FMT_S24; break;
case 4: avctx->sample_fmt = AV_SAMPLE_FMT_S32; break; case 4: avctx->sample_fmt = AV_SAMPLE_FMT_S32; break;
default: default:
av_log(s->avctx, AV_LOG_ERROR, "Invalid/unsupported sample format. Please contact the developers.\n"); av_log_ask_for_sample(s->avctx,
"Invalid/unsupported sample format.\n");
return -1; return -1;
} }

View File

@@ -234,7 +234,7 @@ static void memset_float(float *buf, float val, int size)
* be a multiple of four. * be a multiple of four.
* @return the LPC value * @return the LPC value
* *
* @todo reuse code from vorbis_dec.c: vorbis_floor0_decode * @todo reuse code from Vorbis decoder: vorbis_floor0_decode
*/ */
static float eval_lpc_spectrum(const float *lsp, float cos_val, int order) static float eval_lpc_spectrum(const float *lsp, float cos_val, int order)
{ {

View File

@@ -482,7 +482,7 @@ static void avcodec_get_subtitle_defaults(AVSubtitle *sub)
int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec) int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
{ {
int ret= -1; int ret = 0;
/* If there is a user-supplied mutex locking routine, call it. */ /* If there is a user-supplied mutex locking routine, call it. */
if (ff_lockmgr_cb) { if (ff_lockmgr_cb) {
@@ -493,11 +493,14 @@ int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
entangled_thread_counter++; entangled_thread_counter++;
if(entangled_thread_counter != 1){ if(entangled_thread_counter != 1){
av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n"); av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n");
ret = -1;
goto end; goto end;
} }
if(avctx->codec || !codec) if(avctx->codec || !codec) {
ret = AVERROR(EINVAL);
goto end; goto end;
}
if (codec->priv_data_size > 0) { if (codec->priv_data_size > 0) {
if(!avctx->priv_data){ if(!avctx->priv_data){
@@ -547,6 +550,7 @@ int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type
&& avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) { && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) {
av_log(avctx, AV_LOG_ERROR, "codec type or id mismatches\n"); av_log(avctx, AV_LOG_ERROR, "codec type or id mismatches\n");
ret = AVERROR(EINVAL);
goto free_and_end; goto free_and_end;
} }
avctx->frame_number = 0; avctx->frame_number = 0;
@@ -561,6 +565,7 @@ int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
if (avctx->codec->max_lowres < avctx->lowres) { if (avctx->codec->max_lowres < avctx->lowres) {
av_log(avctx, AV_LOG_ERROR, "The maximum value for lowres supported by the decoder is %d\n", av_log(avctx, AV_LOG_ERROR, "The maximum value for lowres supported by the decoder is %d\n",
avctx->codec->max_lowres); avctx->codec->max_lowres);
ret = AVERROR(EINVAL);
goto free_and_end; goto free_and_end;
} }
if (avctx->codec->sample_fmts && avctx->codec->encode) { if (avctx->codec->sample_fmts && avctx->codec->encode) {
@@ -570,6 +575,7 @@ int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
break; break;
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) { if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR, "Specified sample_fmt is not supported.\n"); av_log(avctx, AV_LOG_ERROR, "Specified sample_fmt is not supported.\n");
ret = AVERROR(EINVAL);
goto free_and_end; goto free_and_end;
} }
} }
@@ -1257,13 +1263,19 @@ void av_log_missing_feature(void *avc, const char *feature, int want_sample)
av_log(avc, AV_LOG_WARNING, "\n"); av_log(avc, AV_LOG_WARNING, "\n");
} }
void av_log_ask_for_sample(void *avc, const char *msg) void av_log_ask_for_sample(void *avc, const char *msg, ...)
{ {
va_list argument_list;
va_start(argument_list, msg);
if (msg) if (msg)
av_log(avc, AV_LOG_WARNING, "%s ", msg); av_vlog(avc, AV_LOG_WARNING, msg, argument_list);
av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample " av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample "
"of this file to ftp://upload.ffmpeg.org/MPlayer/incoming/ " "of this file to ftp://upload.ffmpeg.org/MPlayer/incoming/ "
"and contact the ffmpeg-devel mailing list.\n"); "and contact the ffmpeg-devel mailing list.\n");
va_end(argument_list);
} }
static AVHWAccel *first_hwaccel = NULL; static AVHWAccel *first_hwaccel = NULL;
@@ -1345,8 +1357,7 @@ void ff_thread_await_progress(AVFrame *f, int progress, int field)
#endif #endif
#if LIBAVCODEC_VERSION_MAJOR < 53 #if FF_API_THREAD_INIT
int avcodec_thread_init(AVCodecContext *s, int thread_count) int avcodec_thread_init(AVCodecContext *s, int thread_count)
{ {
s->thread_count = thread_count; s->thread_count = thread_count;

View File

@@ -52,7 +52,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
} }
if(avpkt->size > avctx->width * avctx->height * 8 / 3){ if(avpkt->size > avctx->width * avctx->height * 8 / 3){
av_log(avctx, AV_LOG_ERROR, "Probably padded data, need sample!\n"); av_log_ask_for_sample(avctx, "Probably padded data\n");
} }
pic->reference= 0; pic->reference= 0;

View File

@@ -21,8 +21,8 @@
#define AVCODEC_VERSION_H #define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 52 #define LIBAVCODEC_VERSION_MAJOR 52
#define LIBAVCODEC_VERSION_MINOR 119 #define LIBAVCODEC_VERSION_MINOR 120
#define LIBAVCODEC_VERSION_MICRO 1 #define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \ LIBAVCODEC_VERSION_MINOR, \
@@ -86,5 +86,11 @@
#ifndef FF_API_REQUEST_CHANNELS #ifndef FF_API_REQUEST_CHANNELS
#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 54) #define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif #endif
#ifndef FF_API_OPT_H
#define FF_API_OPT_H (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_THREAD_INIT
#define FF_API_THREAD_INIT (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#endif /* AVCODEC_VERSION_H */ #endif /* AVCODEC_VERSION_H */

View File

@@ -539,7 +539,7 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc)
rangemax = (1 << rangebits); rangemax = (1 << rangebits);
if (rangemax > vc->blocksize[1] / 2) { if (rangemax > vc->blocksize[1] / 2) {
av_log(vc->avccontext, AV_LOG_ERROR, av_log(vc->avccontext, AV_LOG_ERROR,
"Floor value is too large for blocksize: %d (%d)\n", "Floor value is too large for blocksize: %"PRIuFAST32" (%"PRIuFAST32")\n",
rangemax, vc->blocksize[1] / 2); rangemax, vc->blocksize[1] / 2);
return -1; return -1;
} }

View File

@@ -464,8 +464,6 @@ static void vqa_decode_chunk(VqaContext *s)
switch (s->vqa_version) { switch (s->vqa_version) {
case 1: case 1:
/* still need sample media for this case (only one game, "Legend of
* Kyrandia III : Malcolm's Revenge", is known to use this version) */
lobyte = s->decode_buffer[lobytes * 2]; lobyte = s->decode_buffer[lobytes * 2];
hibyte = s->decode_buffer[(lobytes * 2) + 1]; hibyte = s->decode_buffer[(lobytes * 2) + 1];
vector_index = ((hibyte << 8) | lobyte) >> 3; vector_index = ((hibyte << 8) | lobyte) >> 3;

View File

@@ -96,11 +96,13 @@ static int decode_frame(AVCodecContext *avctx,
else { else {
l->shift = 8 - (buf[2] >> 4); l->shift = 8 - (buf[2] >> 4);
if (l->shift > 4) { if (l->shift > 4) {
av_log(avctx, AV_LOG_ERROR, "Unknown WNV1 frame header value %i, please upload file for study\n", buf[2] >> 4); av_log_ask_for_sample(avctx, "Unknown WNV1 frame header value %i\n",
buf[2] >> 4);
l->shift = 4; l->shift = 4;
} }
if (l->shift < 1) { if (l->shift < 1) {
av_log(avctx, AV_LOG_ERROR, "Unknown WNV1 frame header value %i, please upload file for study\n", buf[2] >> 4); av_log_ask_for_sample(avctx, "Unknown WNV1 frame header value %i\n",
buf[2] >> 4);
l->shift = 1; l->shift = 1;
} }
} }

View File

@@ -26,6 +26,7 @@
#define _BSD_SOURCE 1 #define _BSD_SOURCE 1
#define _NETBSD_SOURCE #define _NETBSD_SOURCE
#define _XOPEN_SOURCE 600
#include "libavformat/avformat.h" #include "libavformat/avformat.h"
#if HAVE_DEV_BKTR_IOCTL_METEOR_H && HAVE_DEV_BKTR_IOCTL_BT848_H #if HAVE_DEV_BKTR_IOCTL_METEOR_H && HAVE_DEV_BKTR_IOCTL_BT848_H

View File

@@ -26,7 +26,7 @@
#include "libavutil/samplefmt.h" #include "libavutil/samplefmt.h"
#define LIBAVFILTER_VERSION_MAJOR 1 #define LIBAVFILTER_VERSION_MAJOR 1
#define LIBAVFILTER_VERSION_MINOR 79 #define LIBAVFILTER_VERSION_MINOR 77
#define LIBAVFILTER_VERSION_MICRO 0 #define LIBAVFILTER_VERSION_MICRO 0
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \

View File

@@ -116,7 +116,7 @@ static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
const_values[3]=y; const_values[3]=y;
for(x=0; x<w; x++){ for(x=0; x<w; x++){
const_values[2]=x; const_values[2]=x;
dst[x + y * dst_stride] = av_eval_expr(vf->priv->e[plane], dst[x + y * dst_stride] = av_expr_eval(vf->priv->e[plane],
const_values, vf); const_values, vf);
} }
} }
@@ -176,7 +176,7 @@ static int vf_open(vf_instance_t *vf, char *args){
plane==0 ? lum : (plane==1 ? cb : cr), plane==0 ? lum : (plane==1 ? cb : cr),
NULL NULL
}; };
res = av_parse_expr(&vf->priv->e[plane], eq[plane], const_names, NULL, NULL, func2_names, func2, 0, NULL); res = av_expr_parse(&vf->priv->e[plane], eq[plane], const_names, NULL, NULL, func2_names, func2, 0, NULL);
if (res < 0) { if (res < 0) {
mp_msg(MSGT_VFILTER, MSGL_ERR, "geq: error loading equation `%s'\n", eq[plane]); mp_msg(MSGT_VFILTER, MSGL_ERR, "geq: error loading equation `%s'\n", eq[plane]);

View File

@@ -69,7 +69,7 @@ static int config(struct vf_instance *vf,
double temp_val; double temp_val;
int res; int res;
res= av_parse_and_eval_expr(&temp_val, vf->priv->eq, const_names, const_values, NULL, NULL, NULL, NULL, NULL, 0, NULL); res= av_expr_parse_and_eval(&temp_val, vf->priv->eq, const_names, const_values, NULL, NULL, NULL, NULL, NULL, 0, NULL);
if (res < 0){ if (res < 0){
mp_msg(MSGT_VFILTER, MSGL_ERR, "qp: Error evaluating \"%s\" \n", vf->priv->eq); mp_msg(MSGT_VFILTER, MSGL_ERR, "qp: Error evaluating \"%s\" \n", vf->priv->eq);

View File

@@ -867,7 +867,6 @@ static void end_frame(AVFilterLink *inlink)
{ {
MPContext *m = inlink->dst->priv; MPContext *m = inlink->dst->priv;
AVFilterBufferRef *inpic = inlink->cur_buf; AVFilterBufferRef *inpic = inlink->cur_buf;
AVFilterLink *outlink = inlink->dst->outputs[0];
int i; int i;
double pts= MP_NOPTS_VALUE; double pts= MP_NOPTS_VALUE;
mp_image_t* mpi = new_mp_image(inpic->video->w, inpic->video->h); mp_image_t* mpi = new_mp_image(inpic->video->w, inpic->video->h);

View File

@@ -25,6 +25,8 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/colorspace.h" #include "libavutil/colorspace.h"
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
@@ -32,6 +34,38 @@
#include "libavutil/parseutils.h" #include "libavutil/parseutils.h"
#include "drawutils.h" #include "drawutils.h"
static const char *var_names[] = {
"PI",
"PHI",
"E",
"in_w", "iw",
"in_h", "ih",
"out_w", "ow",
"out_h", "oh",
"x",
"y",
"a",
"hsub",
"vsub",
NULL
};
enum var_name {
VAR_PI,
VAR_PHI,
VAR_E,
VAR_IN_W, VAR_IW,
VAR_IN_H, VAR_IH,
VAR_OUT_W, VAR_OW,
VAR_OUT_H, VAR_OH,
VAR_X,
VAR_Y,
VAR_A,
VAR_HSUB,
VAR_VSUB,
VARS_NB
};
static int query_formats(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx)
{ {
static const enum PixelFormat pix_fmts[] = { static const enum PixelFormat pix_fmts[] = {
@@ -58,6 +92,11 @@ typedef struct {
int x, y; ///< offsets of the input area with respect to the padded area int x, y; ///< offsets of the input area with respect to the padded area
int in_w, in_h; ///< width and height for the padded input video, which has to be aligned to the chroma values in order to avoid chroma issues int in_w, in_h; ///< width and height for the padded input video, which has to be aligned to the chroma values in order to avoid chroma issues
char w_expr[256]; ///< width expression string
char h_expr[256]; ///< height expression string
char x_expr[256]; ///< width expression string
char y_expr[256]; ///< height expression string
uint8_t color[4]; ///< color expressed either in YUVA or RGBA colorspace for the padding area uint8_t color[4]; ///< color expressed either in YUVA or RGBA colorspace for the padding area
uint8_t *line[4]; uint8_t *line[4];
int line_step[4]; int line_step[4];
@@ -70,18 +109,18 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
PadContext *pad = ctx->priv; PadContext *pad = ctx->priv;
char color_string[128] = "black"; char color_string[128] = "black";
av_strlcpy(pad->w_expr, "iw", sizeof(pad->w_expr));
av_strlcpy(pad->h_expr, "ih", sizeof(pad->h_expr));
av_strlcpy(pad->x_expr, "0" , sizeof(pad->w_expr));
av_strlcpy(pad->y_expr, "0" , sizeof(pad->h_expr));
if (args) if (args)
sscanf(args, "%d:%d:%d:%d:%s", &pad->w, &pad->h, &pad->x, &pad->y, color_string); sscanf(args, "%255[^:]:%255[^:]:%255[^:]:%255[^:]:%255s",
pad->w_expr, pad->h_expr, pad->x_expr, pad->y_expr, color_string);
if (av_parse_color(pad->color, color_string, -1, ctx) < 0) if (av_parse_color(pad->color, color_string, -1, ctx) < 0)
return AVERROR(EINVAL); return AVERROR(EINVAL);
/* sanity check params */
if (pad->w < 0 || pad->h < 0) {
av_log(ctx, AV_LOG_ERROR, "Negative size values are not acceptable.\n");
return AVERROR(EINVAL);
}
return 0; return 0;
} }
@@ -102,11 +141,64 @@ static int config_input(AVFilterLink *inlink)
PadContext *pad = ctx->priv; PadContext *pad = ctx->priv;
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format]; const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format];
uint8_t rgba_color[4]; uint8_t rgba_color[4];
int is_packed_rgba; int ret, is_packed_rgba;
double var_values[VARS_NB], res;
char *expr;
pad->hsub = pix_desc->log2_chroma_w; pad->hsub = pix_desc->log2_chroma_w;
pad->vsub = pix_desc->log2_chroma_h; pad->vsub = pix_desc->log2_chroma_h;
var_values[VAR_PI] = M_PI;
var_values[VAR_PHI] = M_PHI;
var_values[VAR_E] = M_E;
var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
var_values[VAR_A] = (float) inlink->w / inlink->h;
var_values[VAR_HSUB] = 1<<pad->hsub;
var_values[VAR_VSUB] = 2<<pad->vsub;
/* evaluate width and height */
av_expr_parse_and_eval(&res, (expr = pad->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx);
pad->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
if ((ret = av_expr_parse_and_eval(&res, (expr = pad->h_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
pad->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
/* evaluate the width again, as it may depend on the evaluated output height */
if ((ret = av_expr_parse_and_eval(&res, (expr = pad->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
pad->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
/* evaluate x and y */
av_expr_parse_and_eval(&res, (expr = pad->x_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx);
pad->x = var_values[VAR_X] = res;
if ((ret = av_expr_parse_and_eval(&res, (expr = pad->y_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
pad->y = var_values[VAR_Y] = res;
/* evaluate x again, as it may depend on the evaluated y value */
if ((ret = av_expr_parse_and_eval(&res, (expr = pad->x_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto eval_fail;
pad->x = var_values[VAR_X] = res;
/* sanity check params */
if (pad->w < 0 || pad->h < 0 || pad->x < 0 || pad->y < 0) {
av_log(ctx, AV_LOG_ERROR, "Negative values are not acceptable.\n");
return AVERROR(EINVAL);
}
if (!pad->w) if (!pad->w)
pad->w = inlink->w; pad->w = inlink->w;
if (!pad->h) if (!pad->h)
@@ -140,6 +232,12 @@ static int config_input(AVFilterLink *inlink)
} }
return 0; return 0;
eval_fail:
av_log(NULL, AV_LOG_ERROR,
"Error when evaluating the expression '%s'\n", expr);
return ret;
} }
static int config_output(AVFilterLink *outlink) static int config_output(AVFilterLink *outlink)

View File

@@ -24,7 +24,7 @@ OBJS-$(CONFIG_AC3_DEMUXER) += ac3dec.o rawdec.o
OBJS-$(CONFIG_AC3_MUXER) += rawenc.o OBJS-$(CONFIG_AC3_MUXER) += rawenc.o
OBJS-$(CONFIG_ADTS_MUXER) += adtsenc.o OBJS-$(CONFIG_ADTS_MUXER) += adtsenc.o
OBJS-$(CONFIG_AEA_DEMUXER) += aea.o pcm.o OBJS-$(CONFIG_AEA_DEMUXER) += aea.o pcm.o
OBJS-$(CONFIG_AIFF_DEMUXER) += aiffdec.o riff.o pcm.o OBJS-$(CONFIG_AIFF_DEMUXER) += aiffdec.o riff.o pcm.o caf.o
OBJS-$(CONFIG_AIFF_MUXER) += aiffenc.o riff.o OBJS-$(CONFIG_AIFF_MUXER) += aiffenc.o riff.o
OBJS-$(CONFIG_AMR_DEMUXER) += amr.o OBJS-$(CONFIG_AMR_DEMUXER) += amr.o
OBJS-$(CONFIG_AMR_MUXER) += amr.o OBJS-$(CONFIG_AMR_MUXER) += amr.o
@@ -164,6 +164,7 @@ OBJS-$(CONFIG_NUT_DEMUXER) += nutdec.o nut.o riff.o
OBJS-$(CONFIG_NUT_MUXER) += nutenc.o nut.o riff.o OBJS-$(CONFIG_NUT_MUXER) += nutenc.o nut.o riff.o
OBJS-$(CONFIG_NUV_DEMUXER) += nuv.o riff.o OBJS-$(CONFIG_NUV_DEMUXER) += nuv.o riff.o
OBJS-$(CONFIG_OGG_DEMUXER) += oggdec.o \ OBJS-$(CONFIG_OGG_DEMUXER) += oggdec.o \
oggparsecelt.o \
oggparsedirac.o \ oggparsedirac.o \
oggparseflac.o \ oggparseflac.o \
oggparseogm.o \ oggparseogm.o \
@@ -316,6 +317,7 @@ OBJS+= avio.o aviobuf.o
OBJS-$(CONFIG_APPLEHTTP_PROTOCOL) += applehttpproto.o OBJS-$(CONFIG_APPLEHTTP_PROTOCOL) += applehttpproto.o
OBJS-$(CONFIG_CONCAT_PROTOCOL) += concat.o OBJS-$(CONFIG_CONCAT_PROTOCOL) += concat.o
OBJS-$(CONFIG_CRYPTO_PROTOCOL) += crypto.o
OBJS-$(CONFIG_FILE_PROTOCOL) += file.o OBJS-$(CONFIG_FILE_PROTOCOL) += file.o
OBJS-$(CONFIG_GOPHER_PROTOCOL) += gopher.o OBJS-$(CONFIG_GOPHER_PROTOCOL) += gopher.o
OBJS-$(CONFIG_HTTP_PROTOCOL) += http.o httpauth.o OBJS-$(CONFIG_HTTP_PROTOCOL) += http.o httpauth.o

View File

@@ -23,6 +23,7 @@
#include "avformat.h" #include "avformat.h"
#include "pcm.h" #include "pcm.h"
#include "aiff.h" #include "aiff.h"
#include "caf.h"
#define AIFF 0 #define AIFF 0
#define AIFF_C_VERSION1 0xA2805140 #define AIFF_C_VERSION1 0xA2805140
@@ -253,6 +254,11 @@ static int aiff_read_header(AVFormatContext *s,
st->codec->extradata_size = size; st->codec->extradata_size = size;
avio_read(pb, st->codec->extradata, size); avio_read(pb, st->codec->extradata, size);
break; break;
case MKTAG('C','H','A','N'):
if (size < 12)
return AVERROR_INVALIDDATA;
ff_read_chan_chunk(s, size, st->codec);
break;
default: /* Jump */ default: /* Jump */
if (size & 1) /* Always even aligned */ if (size & 1) /* Always even aligned */
size++; size++;

View File

@@ -237,6 +237,7 @@ void av_register_all(void)
/* protocols */ /* protocols */
REGISTER_PROTOCOL (APPLEHTTP, applehttp); REGISTER_PROTOCOL (APPLEHTTP, applehttp);
REGISTER_PROTOCOL (CONCAT, concat); REGISTER_PROTOCOL (CONCAT, concat);
REGISTER_PROTOCOL (CRYPTO, crypto);
REGISTER_PROTOCOL (FILE, file); REGISTER_PROTOCOL (FILE, file);
REGISTER_PROTOCOL (GOPHER, gopher); REGISTER_PROTOCOL (GOPHER, gopher);
REGISTER_PROTOCOL (HTTP, http); REGISTER_PROTOCOL (HTTP, http);

View File

@@ -27,6 +27,8 @@
#define _XOPEN_SOURCE 600 #define _XOPEN_SOURCE 600
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "avformat.h" #include "avformat.h"
#include "internal.h" #include "internal.h"
#include <unistd.h> #include <unistd.h>
@@ -47,9 +49,17 @@
* one anonymous toplevel variant for this, to maintain the structure. * one anonymous toplevel variant for this, to maintain the structure.
*/ */
enum KeyType {
KEY_NONE,
KEY_AES_128,
};
struct segment { struct segment {
int duration; int duration;
char url[MAX_URL_SIZE]; char url[MAX_URL_SIZE];
char key[MAX_URL_SIZE];
enum KeyType key_type;
uint8_t iv[16];
}; };
/* /*
@@ -77,6 +87,9 @@ struct variant {
int needed, cur_needed; int needed, cur_needed;
int cur_seq_no; int cur_seq_no;
int64_t last_load_time; int64_t last_load_time;
char key_url[MAX_URL_SIZE];
uint8_t key[16];
}; };
typedef struct AppleHTTPContext { typedef struct AppleHTTPContext {
@@ -160,10 +173,35 @@ static void handle_variant_args(struct variant_info *info, const char *key,
} }
} }
struct key_info {
char uri[MAX_URL_SIZE];
char method[10];
char iv[35];
};
static void handle_key_args(struct key_info *info, const char *key,
int key_len, char **dest, int *dest_len)
{
if (!strncmp(key, "METHOD=", key_len)) {
*dest = info->method;
*dest_len = sizeof(info->method);
} else if (!strncmp(key, "URI=", key_len)) {
*dest = info->uri;
*dest_len = sizeof(info->uri);
} else if (!strncmp(key, "IV=", key_len)) {
*dest = info->iv;
*dest_len = sizeof(info->iv);
}
}
static int parse_playlist(AppleHTTPContext *c, const char *url, static int parse_playlist(AppleHTTPContext *c, const char *url,
struct variant *var, AVIOContext *in) struct variant *var, AVIOContext *in)
{ {
int ret = 0, duration = 0, is_segment = 0, is_variant = 0, bandwidth = 0; int ret = 0, duration = 0, is_segment = 0, is_variant = 0, bandwidth = 0;
enum KeyType key_type = KEY_NONE;
uint8_t iv[16] = "";
int has_iv = 0;
char key[MAX_URL_SIZE];
char line[1024]; char line[1024];
const char *ptr; const char *ptr;
int close_in = 0; int close_in = 0;
@@ -192,6 +230,19 @@ static int parse_playlist(AppleHTTPContext *c, const char *url,
ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args, ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args,
&info); &info);
bandwidth = atoi(info.bandwidth); bandwidth = atoi(info.bandwidth);
} else if (av_strstart(line, "#EXT-X-KEY:", &ptr)) {
struct key_info info = {{0}};
ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_key_args,
&info);
key_type = KEY_NONE;
has_iv = 0;
if (!strcmp(info.method, "AES-128"))
key_type = KEY_AES_128;
if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) {
ff_hex_to_data(iv, info.iv + 2);
has_iv = 1;
}
av_strlcpy(key, info.uri, sizeof(key));
} else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) { } else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) {
if (!var) { if (!var) {
var = new_variant(c, 0, url, NULL); var = new_variant(c, 0, url, NULL);
@@ -242,6 +293,15 @@ static int parse_playlist(AppleHTTPContext *c, const char *url,
goto fail; goto fail;
} }
seg->duration = duration; seg->duration = duration;
seg->key_type = key_type;
if (has_iv) {
memcpy(seg->iv, iv, sizeof(iv));
} else {
int seq = var->start_seq_no + var->n_segments;
memset(seg->iv, 0, sizeof(seg->iv));
AV_WB32(seg->iv + 12, seq);
}
ff_make_absolute_url(seg->key, sizeof(seg->key), url, key);
ff_make_absolute_url(seg->url, sizeof(seg->url), url, line); ff_make_absolute_url(seg->url, sizeof(seg->url), url, line);
dynarray_add(&var->segments, &var->n_segments, seg); dynarray_add(&var->segments, &var->n_segments, seg);
is_segment = 0; is_segment = 0;
@@ -257,6 +317,50 @@ fail:
return ret; return ret;
} }
static int open_input(struct variant *var)
{
struct segment *seg = var->segments[var->cur_seq_no - var->start_seq_no];
if (seg->key_type == KEY_NONE) {
return ffurl_open(&var->input, seg->url, AVIO_RDONLY);
} else if (seg->key_type == KEY_AES_128) {
char iv[33], key[33], url[MAX_URL_SIZE];
int ret;
if (strcmp(seg->key, var->key_url)) {
URLContext *uc;
if (ffurl_open(&uc, seg->key, AVIO_RDONLY) == 0) {
if (ffurl_read_complete(uc, var->key, sizeof(var->key))
!= sizeof(var->key)) {
av_log(NULL, AV_LOG_ERROR, "Unable to read key file %s\n",
seg->key);
}
ffurl_close(uc);
} else {
av_log(NULL, AV_LOG_ERROR, "Unable to open key file %s\n",
seg->key);
}
av_strlcpy(var->key_url, seg->key, sizeof(var->key_url));
}
ff_data_to_hex(iv, seg->iv, sizeof(seg->iv), 0);
ff_data_to_hex(key, var->key, sizeof(var->key), 0);
iv[32] = key[32] = '\0';
if (strstr(seg->url, "://"))
snprintf(url, sizeof(url), "crypto+%s", seg->url);
else
snprintf(url, sizeof(url), "crypto:%s", seg->url);
if ((ret = ffurl_alloc(&var->input, url, AVIO_RDONLY)) < 0)
return ret;
av_set_string3(var->input->priv_data, "key", key, 0, NULL);
av_set_string3(var->input->priv_data, "iv", iv, 0, NULL);
if ((ret = ffurl_connect(var->input)) < 0) {
ffurl_close(var->input);
var->input = NULL;
return ret;
}
return 0;
}
return AVERROR(ENOSYS);
}
static int read_data(void *opaque, uint8_t *buf, int buf_size) static int read_data(void *opaque, uint8_t *buf, int buf_size)
{ {
struct variant *v = opaque; struct variant *v = opaque;
@@ -367,6 +471,7 @@ static int applehttp_read_header(AVFormatContext *s, AVFormatParameters *ap)
for (i = 0; i < c->n_variants; i++) { for (i = 0; i < c->n_variants; i++) {
struct variant *v = c->variants[i]; struct variant *v = c->variants[i];
AVInputFormat *in_fmt = NULL; AVInputFormat *in_fmt = NULL;
char bitrate_str[20];
if (v->n_segments == 0) if (v->n_segments == 0)
continue; continue;
@@ -393,6 +498,7 @@ static int applehttp_read_header(AVFormatContext *s, AVFormatParameters *ap)
if (ret < 0) if (ret < 0)
goto fail; goto fail;
v->stream_offset = stream_offset; v->stream_offset = stream_offset;
snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);
/* Create new AVStreams for each stream in this variant */ /* Create new AVStreams for each stream in this variant */
for (j = 0; j < v->ctx->nb_streams; j++) { for (j = 0; j < v->ctx->nb_streams; j++) {
AVStream *st = av_new_stream(s, i); AVStream *st = av_new_stream(s, i);
@@ -401,6 +507,9 @@ static int applehttp_read_header(AVFormatContext *s, AVFormatParameters *ap)
goto fail; goto fail;
} }
avcodec_copy_context(st->codec, v->ctx->streams[j]->codec); avcodec_copy_context(st->codec, v->ctx->streams[j]->codec);
if (v->bandwidth)
av_metadata_set2(&st->metadata, "variant_bitrate", bitrate_str,
0);
} }
stream_offset += v->ctx->nb_streams; stream_offset += v->ctx->nb_streams;
} }

View File

@@ -1258,27 +1258,6 @@ static int asf_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int
/* find the position */ /* find the position */
pos = st->index_entries[index].pos; pos = st->index_entries[index].pos;
// various attempts to find key frame have failed so far
// asf_reset_header(s);
// avio_seek(s->pb, pos, SEEK_SET);
// key_pos = pos;
// for(i=0;i<16;i++){
// pos = avio_tell(s->pb);
// if (av_read_frame(s, &pkt) < 0){
// av_log(s, AV_LOG_INFO, "seek failed\n");
// return -1;
// }
// asf_st = s->streams[stream_index]->priv_data;
// pos += st->parser->frame_offset;
//
// if (pkt.size > b) {
// b = pkt.size;
// key_pos = pos;
// }
//
// av_free_packet(&pkt);
// }
/* do the seek */ /* do the seek */
av_log(s, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos); av_log(s, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos);
avio_seek(s->pb, pos, SEEK_SET); avio_seek(s->pb, pos, SEEK_SET);

View File

@@ -104,6 +104,7 @@ struct AVFormatContext;
* service_provider -- name of the service provider in broadcasting. * service_provider -- name of the service provider in broadcasting.
* title -- name of the work. * title -- name of the work.
* track -- number of this work in the set, can be in form current/total. * track -- number of this work in the set, can be in form current/total.
* variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of
*/ */
#define AV_METADATA_MATCH_CASE 1 #define AV_METADATA_MATCH_CASE 1
@@ -788,6 +789,7 @@ typedef struct AVFormatContext {
#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container #define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container
#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled #define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled
#define AVFMT_FLAG_RTP_HINT 0x0040 ///< Add RTP hinting to the output file #define AVFMT_FLAG_RTP_HINT 0x0040 ///< Add RTP hinting to the output file
#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down)
int loop_input; int loop_input;

View File

@@ -54,6 +54,8 @@ typedef struct AVIStream {
AVFormatContext *sub_ctx; AVFormatContext *sub_ctx;
AVPacket sub_pkt; AVPacket sub_pkt;
uint8_t *sub_buffer; uint8_t *sub_buffer;
int64_t seek_pos;
} AVIStream; } AVIStream;
typedef struct { typedef struct {
@@ -727,7 +729,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
if(!avi->index_loaded && pb->seekable) if(!avi->index_loaded && pb->seekable)
avi_load_index(s); avi_load_index(s);
avi->index_loaded = 1; avi->index_loaded = 1;
avi->non_interleaved |= guess_ni_flag(s); avi->non_interleaved |= guess_ni_flag(s) | (s->flags & AVFMT_FLAG_SORT_DTS);
for(i=0; i<s->nb_streams; i++){ for(i=0; i<s->nb_streams; i++){
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
if(st->nb_index_entries) if(st->nb_index_entries)
@@ -988,6 +990,12 @@ resync:
ast->packet_size= 0; ast->packet_size= 0;
} }
if(!avi->non_interleaved && ast->seek_pos > pkt->pos){
av_free_packet(pkt);
goto resync;
}
ast->seek_pos= 0;
return size; return size;
} }
@@ -1253,7 +1261,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
AVIContext *avi = s->priv_data; AVIContext *avi = s->priv_data;
AVStream *st; AVStream *st;
int i, index; int i, index;
int64_t pos; int64_t pos, pos_min;
AVIStream *ast; AVIStream *ast;
if (!avi->index_loaded) { if (!avi->index_loaded) {
@@ -1290,6 +1298,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
return 0; return 0;
} }
pos_min= pos;
for(i = 0; i < s->nb_streams; i++) { for(i = 0; i < s->nb_streams; i++) {
AVStream *st2 = s->streams[i]; AVStream *st2 = s->streams[i];
AVIStream *ast2 = st2->priv_data; AVIStream *ast2 = st2->priv_data;
@@ -1313,21 +1322,13 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
flags | AVSEEK_FLAG_BACKWARD); flags | AVSEEK_FLAG_BACKWARD);
if(index<0) if(index<0)
index=0; index=0;
ast2->seek_pos= st2->index_entries[index].pos;
if(!avi->non_interleaved){ pos_min= FFMIN(pos_min,ast2->seek_pos);
while(index>0 && st2->index_entries[index].pos > pos)
index--;
while(index+1 < st2->nb_index_entries && st2->index_entries[index].pos < pos)
index++;
}
// av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %"PRId64"\n", timestamp, index, st2->index_entries[index].timestamp);
/* extract the current frame number */
ast2->frame_offset = st2->index_entries[index].timestamp; ast2->frame_offset = st2->index_entries[index].timestamp;
} }
/* do the seek */ /* do the seek */
avio_seek(s->pb, pos, SEEK_SET); avio_seek(s->pb, pos_min, SEEK_SET);
avi->stream_index= -1; avi->stream_index= -1;
return 0; return 0;
} }

View File

@@ -334,7 +334,7 @@ int ffurl_write(URLContext *h, const unsigned char *buf, int size)
if (h->max_packet_size && size > h->max_packet_size) if (h->max_packet_size && size > h->max_packet_size)
return AVERROR(EIO); return AVERROR(EIO);
return retry_transfer_wrapper(h, buf, size, size, h->prot->url_write); return retry_transfer_wrapper(h, buf, size, size, (void*)h->prot->url_write);
} }
int64_t ffurl_seek(URLContext *h, int64_t pos, int whence) int64_t ffurl_seek(URLContext *h, int64_t pos, int whence)

View File

@@ -855,7 +855,7 @@ int ffio_fdopen(AVIOContext **s, URLContext *h)
if (ffio_init_context(*s, buffer, buffer_size, if (ffio_init_context(*s, buffer, buffer_size,
(h->flags & AVIO_WRONLY || h->flags & AVIO_RDWR), h, (h->flags & AVIO_WRONLY || h->flags & AVIO_RDWR), h,
ffurl_read, ffurl_write, ffurl_seek) < 0) { (void*)ffurl_read, (void*)ffurl_write, (void*)ffurl_seek) < 0) {
av_free(buffer); av_free(buffer);
av_freep(s); av_freep(s);
return AVERROR(EIO); return AVERROR(EIO);

View File

@@ -56,3 +56,61 @@ const AVCodecTag ff_codec_caf_tags[] = {
/*{ MPEG4TwinVQ MKBETAG('t','w','v','q') },*/ /*{ MPEG4TwinVQ MKBETAG('t','w','v','q') },*/
{ CODEC_ID_NONE, 0 }, { CODEC_ID_NONE, 0 },
}; };
typedef struct CafChannelLayout {
int64_t channel_layout;
uint32_t layout_tag;
} CafChannelLayout;
static const CafChannelLayout caf_channel_layout[] = {
{ AV_CH_LAYOUT_MONO, (100<<16) | 1}, //< kCAFChannelLayoutTag_Mono
{ AV_CH_LAYOUT_STEREO, (101<<16) | 2}, //< kCAFChannelLayoutTag_Stereo
{ AV_CH_LAYOUT_STEREO, (102<<16) | 2}, //< kCAFChannelLayoutTag_StereoHeadphones
{ AV_CH_LAYOUT_2_1, (131<<16) | 3}, //< kCAFChannelLayoutTag_ITU_2_1
{ AV_CH_LAYOUT_2_2, (132<<16) | 4}, //< kCAFChannelLayoutTag_ITU_2_2
{ AV_CH_LAYOUT_QUAD, (108<<16) | 4}, //< kCAFChannelLayoutTag_Quadraphonic
{ AV_CH_LAYOUT_SURROUND, (113<<16) | 3}, //< kCAFChannelLayoutTag_MPEG_3_0_A
{ AV_CH_LAYOUT_4POINT0, (115<<16) | 4}, //< kCAFChannelLayoutTag_MPEG_4_0_A
{ AV_CH_LAYOUT_5POINT0_BACK, (117<<16) | 5}, //< kCAFChannelLayoutTag_MPEG_5_0_A
{ AV_CH_LAYOUT_5POINT0, (117<<16) | 5}, //< kCAFChannelLayoutTag_MPEG_5_0_A
{ AV_CH_LAYOUT_5POINT1_BACK, (121<<16) | 6}, //< kCAFChannelLayoutTag_MPEG_5_1_A
{ AV_CH_LAYOUT_5POINT1, (121<<16) | 6}, //< kCAFChannelLayoutTag_MPEG_5_1_A
{ AV_CH_LAYOUT_7POINT1, (128<<16) | 8}, //< kCAFChannelLayoutTag_MPEG_7_1_C
{ AV_CH_LAYOUT_7POINT1_WIDE, (126<<16) | 8}, //< kCAFChannelLayoutTag_MPEG_7_1_A
{ AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY, (133<<16) | 3}, //< kCAFChannelLayoutTag_DVD_4
{ AV_CH_LAYOUT_2_1|AV_CH_LOW_FREQUENCY, (134<<16) | 4}, //< kCAFChannelLayoutTag_DVD_5
{ AV_CH_LAYOUT_2_2|AV_CH_LOW_FREQUENCY, (135<<16) | 4}, //< kCAFChannelLayoutTag_DVD_6
{ AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY, (136<<16) | 4}, //< kCAFChannelLayoutTag_DVD_10
{ AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY, (137<<16) | 5}, //< kCAFChannelLayoutTag_DVD_11
{ 0, 0},
};
void ff_read_chan_chunk(AVFormatContext *s, int64_t size, AVCodecContext *codec)
{
uint32_t layout_tag;
AVIOContext *pb = s->pb;
const CafChannelLayout *caf_layout = caf_channel_layout;
if (size != 12) {
// Channel descriptions not implemented
av_log_ask_for_sample(s, "Unimplemented channel layout.\n");
avio_skip(pb, size);
return;
}
layout_tag = avio_rb32(pb);
if (layout_tag == 0x10000) { //< kCAFChannelLayoutTag_UseChannelBitmap
codec->channel_layout = avio_rb32(pb);
avio_skip(pb, 4);
return;
}
while (caf_layout->channel_layout) {
if (layout_tag == caf_layout->layout_tag) {
codec->channel_layout = caf_layout->channel_layout;
break;
}
caf_layout++;
}
if (!codec->channel_layout)
av_log(s, AV_LOG_WARNING, "Unknown channel layout.\n");
avio_skip(pb, 8);
}

View File

@@ -27,8 +27,11 @@
#ifndef AVFORMAT_CAF_H #ifndef AVFORMAT_CAF_H
#define AVFORMAT_CAF_H #define AVFORMAT_CAF_H
#include "avformat.h"
#include "internal.h" #include "internal.h"
extern const AVCodecTag ff_codec_caf_tags[]; extern const AVCodecTag ff_codec_caf_tags[];
void ff_read_chan_chunk(AVFormatContext *s, int64_t size, AVCodecContext *codec);
#endif /* AVFORMAT_CAF_H */ #endif /* AVFORMAT_CAF_H */

View File

@@ -257,10 +257,16 @@ static int read_header(AVFormatContext *s,
read_info_chunk(s, size); read_info_chunk(s, size);
break; break;
case MKBETAG('c','h','a','n'):
if (size < 12)
return AVERROR_INVALIDDATA;
ff_read_chan_chunk(s, size, st->codec);
break;
default: default:
#define _(x) ((x) >= ' ' ? (x) : ' ') #define _(x) ((x) >= ' ' ? (x) : ' ')
av_log(s, AV_LOG_WARNING, "skipping CAF chunk: %08X (%c%c%c%c)\n", av_log(s, AV_LOG_WARNING, "skipping CAF chunk: %08X (%c%c%c%c), size %"PRId64"\n",
tag, _(tag>>24), _((tag>>16)&0xFF), _((tag>>8)&0xFF), _(tag&0xFF)); tag, _(tag>>24), _((tag>>16)&0xFF), _((tag>>8)&0xFF), _(tag&0xFF), size);
#undef _ #undef _
case MKBETAG('f','r','e','e'): case MKBETAG('f','r','e','e'):
if (size < 0) if (size < 0)

170
libavformat/crypto.c Normal file
View File

@@ -0,0 +1,170 @@
/*
* Decryption protocol handler
* Copyright (c) 2011 Martin Storsjo
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
#include "libavutil/aes.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "internal.h"
#include "url.h"
#define MAX_BUFFER_BLOCKS 150
#define BLOCKSIZE 16
typedef struct {
const AVClass *class;
URLContext *hd;
uint8_t inbuffer [BLOCKSIZE*MAX_BUFFER_BLOCKS],
outbuffer[BLOCKSIZE*MAX_BUFFER_BLOCKS];
uint8_t *outptr;
int indata, indata_used, outdata;
int eof;
uint8_t *key;
int keylen;
uint8_t *iv;
int ivlen;
struct AVAES *aes;
} CryptoContext;
#define OFFSET(x) offsetof(CryptoContext, x)
static const AVOption options[] = {
{"key", "AES decryption key", OFFSET(key), FF_OPT_TYPE_BINARY },
{"iv", "AES decryption initialization vector", OFFSET(iv), FF_OPT_TYPE_BINARY },
{ NULL }
};
static const AVClass crypto_class = {
"crypto", av_default_item_name, options, LIBAVUTIL_VERSION_INT
};
static int crypto_open(URLContext *h, const char *uri, int flags)
{
const char *nested_url;
int ret;
CryptoContext *c = h->priv_data;
if (!av_strstart(uri, "crypto+", &nested_url) &&
!av_strstart(uri, "crypto:", &nested_url)) {
av_log(h, AV_LOG_ERROR, "Unsupported url %s\n", uri);
ret = AVERROR(EINVAL);
goto err;
}
if (c->keylen < BLOCKSIZE || c->ivlen < BLOCKSIZE) {
av_log(h, AV_LOG_ERROR, "Key or IV not set\n");
ret = AVERROR(EINVAL);
goto err;
}
if (flags == AVIO_WRONLY) {
av_log(h, AV_LOG_ERROR, "Only decryption is supported currently\n");
ret = AVERROR(ENOSYS);
goto err;
}
if ((ret = ffurl_open(&c->hd, nested_url, AVIO_RDONLY)) < 0) {
av_log(h, AV_LOG_ERROR, "Unable to open input\n");
goto err;
}
c->aes = av_mallocz(av_aes_size);
if (!c->aes) {
ret = AVERROR(ENOMEM);
goto err;
}
av_aes_init(c->aes, c->key, 128, 1);
h->is_streamed = 1;
return 0;
err:
av_free(c->key);
av_free(c->iv);
return ret;
}
static int crypto_read(URLContext *h, uint8_t *buf, int size)
{
CryptoContext *c = h->priv_data;
int blocks;
retry:
if (c->outdata > 0) {
size = FFMIN(size, c->outdata);
memcpy(buf, c->outptr, size);
c->outptr += size;
c->outdata -= size;
return size;
}
// We avoid using the last block until we've found EOF,
// since we'll remove PKCS7 padding at the end. So make
// sure we've got at least 2 blocks, so we can decrypt
// at least one.
while (c->indata - c->indata_used < 2*BLOCKSIZE) {
int n = ffurl_read(c->hd, c->inbuffer + c->indata,
sizeof(c->inbuffer) - c->indata);
if (n <= 0) {
c->eof = 1;
break;
}
c->indata += n;
}
blocks = (c->indata - c->indata_used) / BLOCKSIZE;
if (!blocks)
return AVERROR_EOF;
if (!c->eof)
blocks--;
av_aes_crypt(c->aes, c->outbuffer, c->inbuffer + c->indata_used, blocks,
c->iv, 1);
c->outdata = BLOCKSIZE * blocks;
c->outptr = c->outbuffer;
c->indata_used += BLOCKSIZE * blocks;
if (c->indata_used >= sizeof(c->inbuffer)/2) {
memmove(c->inbuffer, c->inbuffer + c->indata_used,
c->indata - c->indata_used);
c->indata -= c->indata_used;
c->indata_used = 0;
}
if (c->eof) {
// Remove PKCS7 padding at the end
int padding = c->outbuffer[c->outdata - 1];
c->outdata -= padding;
}
goto retry;
}
static int crypto_close(URLContext *h)
{
CryptoContext *c = h->priv_data;
if (c->hd)
ffurl_close(c->hd);
av_freep(&c->aes);
av_freep(&c->key);
av_freep(&c->iv);
return 0;
}
URLProtocol ff_crypto_protocol = {
.name = "crypto",
.url_open = crypto_open,
.url_read = crypto_read,
.url_close = crypto_close,
.priv_data_size = sizeof(CryptoContext),
.priv_data_class = &crypto_class,
.flags = URL_PROTOCOL_FLAG_NESTED_SCHEME,
};

View File

@@ -39,7 +39,7 @@ static int cdata_probe(AVProbeData *p)
{ {
const uint8_t *b = p->buf; const uint8_t *b = p->buf;
if (b[0] == 0x04 && (b[1] == 0x00 || b[1] == 0x04 || b[1] == 0x0C)) if (b[0] == 0x04 && (b[1] == 0x00 || b[1] == 0x04 || b[1] == 0x0C || b[1] == 0x14))
return AVPROBE_SCORE_MAX/8; return AVPROBE_SCORE_MAX/8;
return 0; return 0;
} }
@@ -56,13 +56,14 @@ static int cdata_read_header(AVFormatContext *s, AVFormatParameters *ap)
case 0x0400: cdata->channels = 1; break; case 0x0400: cdata->channels = 1; break;
case 0x0404: cdata->channels = 2; break; case 0x0404: cdata->channels = 2; break;
case 0x040C: cdata->channels = 4; break; case 0x040C: cdata->channels = 4; break;
case 0x0414: cdata->channels = 6; break;
default: default:
av_log(s, AV_LOG_INFO, "unknown header 0x%04x\n", header); av_log(s, AV_LOG_INFO, "unknown header 0x%04x\n", header);
return -1; return -1;
}; };
sample_rate = avio_rb16(pb); sample_rate = avio_rb16(pb);
avio_skip(pb, 12); avio_skip(pb, (avio_r8(pb) & 0x20) ? 15 : 11);
st = av_new_stream(s, 0); st = av_new_stream(s, 0);
if (!st) if (!st)
@@ -72,6 +73,7 @@ static int cdata_read_header(AVFormatContext *s, AVFormatParameters *ap)
st->codec->codec_id = CODEC_ID_ADPCM_EA_XAS; st->codec->codec_id = CODEC_ID_ADPCM_EA_XAS;
st->codec->channels = cdata->channels; st->codec->channels = cdata->channels;
st->codec->sample_rate = sample_rate; st->codec->sample_rate = sample_rate;
st->codec->sample_fmt = AV_SAMPLE_FMT_S16;
av_set_pts_info(st, 64, 1, sample_rate); av_set_pts_info(st, 64, 1, sample_rate);
cdata->audio_pts = 0; cdata->audio_pts = 0;

View File

@@ -29,6 +29,7 @@
* http://wiki.multimedia.cx/index.php?title=IFF * http://wiki.multimedia.cx/index.php?title=IFF
*/ */
#include "libavcodec/bytestream.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "avformat.h" #include "avformat.h"
@@ -40,6 +41,7 @@
#define ID_PBM MKTAG('P','B','M',' ') #define ID_PBM MKTAG('P','B','M',' ')
#define ID_ILBM MKTAG('I','L','B','M') #define ID_ILBM MKTAG('I','L','B','M')
#define ID_BMHD MKTAG('B','M','H','D') #define ID_BMHD MKTAG('B','M','H','D')
#define ID_CAMG MKTAG('C','A','M','G')
#define ID_CMAP MKTAG('C','M','A','P') #define ID_CMAP MKTAG('C','M','A','P')
#define ID_FORM MKTAG('F','O','R','M') #define ID_FORM MKTAG('F','O','R','M')
@@ -60,6 +62,16 @@
#define PACKET_SIZE 1024 #define PACKET_SIZE 1024
/**
* This number of bytes if added at the beginning of each AVPacket
* which contain additional information about video properties
* which has to be shared between demuxer and decoder.
* This number may change between frames, e.g. the demuxer might
* set it to smallest possible size of 2 to indicate that there's
* no extradata changing in this frame.
*/
#define IFF_EXTRA_VIDEO_SIZE 9
typedef enum { typedef enum {
COMP_NONE, COMP_NONE,
COMP_FIB, COMP_FIB,
@@ -76,6 +88,12 @@ typedef struct {
uint32_t body_size; uint32_t body_size;
uint32_t sent_bytes; uint32_t sent_bytes;
uint32_t audio_frame_count; uint32_t audio_frame_count;
unsigned compression; ///< delta compression method used
unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
unsigned transparency; ///< transparency color index in palette
unsigned masking; ///< masking method used
} IffDemuxContext; } IffDemuxContext;
@@ -126,8 +144,12 @@ static int iff_read_header(AVFormatContext *s,
IffDemuxContext *iff = s->priv_data; IffDemuxContext *iff = s->priv_data;
AVIOContext *pb = s->pb; AVIOContext *pb = s->pb;
AVStream *st; AVStream *st;
uint8_t *buf;
uint32_t chunk_id, data_size; uint32_t chunk_id, data_size;
int compression = -1; int compression = -1;
uint32_t screenmode = 0;
unsigned transparency = 0;
unsigned masking = 0; // no mask
st = av_new_stream(s, 0); st = av_new_stream(s, 0);
if (!st) if (!st)
@@ -171,12 +193,18 @@ static int iff_read_header(AVFormatContext *s,
st->codec->channels = (avio_rb32(pb) < 6) ? 1 : 2; st->codec->channels = (avio_rb32(pb) < 6) ? 1 : 2;
break; break;
case ID_CAMG:
if (data_size < 4)
return AVERROR_INVALIDDATA;
screenmode = avio_rb32(pb);
break;
case ID_CMAP: case ID_CMAP:
st->codec->extradata_size = data_size; st->codec->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE;
st->codec->extradata = av_malloc(data_size); st->codec->extradata = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata) if (!st->codec->extradata)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
if (avio_read(pb, st->codec->extradata, data_size) < 0) if (avio_read(pb, st->codec->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0)
return AVERROR(EIO); return AVERROR(EIO);
break; break;
@@ -188,12 +216,15 @@ static int iff_read_header(AVFormatContext *s,
st->codec->height = avio_rb16(pb); st->codec->height = avio_rb16(pb);
avio_skip(pb, 4); // x, y offset avio_skip(pb, 4); // x, y offset
st->codec->bits_per_coded_sample = avio_r8(pb); st->codec->bits_per_coded_sample = avio_r8(pb);
if (data_size >= 11) { if (data_size >= 10)
avio_skip(pb, 1); // masking masking = avio_r8(pb);
if (data_size >= 11)
compression = avio_r8(pb); compression = avio_r8(pb);
if (data_size >= 14) {
avio_skip(pb, 1); // padding
transparency = avio_rb16(pb);
} }
if (data_size >= 16) { if (data_size >= 16) {
avio_skip(pb, 3); // paddding, transparent
st->sample_aspect_ratio.num = avio_r8(pb); st->sample_aspect_ratio.num = avio_r8(pb);
st->sample_aspect_ratio.den = avio_r8(pb); st->sample_aspect_ratio.den = avio_r8(pb);
} }
@@ -253,6 +284,31 @@ static int iff_read_header(AVFormatContext *s,
break; break;
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
iff->compression = compression;
iff->bpp = st->codec->bits_per_coded_sample;
if ((screenmode & 0x800 /* Hold And Modify */) && iff->bpp <= 8) {
iff->ham = iff->bpp > 6 ? 6 : 4;
st->codec->bits_per_coded_sample = 24;
}
iff->flags = (screenmode & 0x80 /* Extra HalfBrite */) && iff->bpp <= 8;
iff->masking = masking;
iff->transparency = transparency;
if (!st->codec->extradata) {
st->codec->extradata_size = IFF_EXTRA_VIDEO_SIZE;
st->codec->extradata = av_malloc(IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
}
buf = st->codec->extradata;
bytestream_put_be16(&buf, IFF_EXTRA_VIDEO_SIZE);
bytestream_put_byte(&buf, iff->compression);
bytestream_put_byte(&buf, iff->bpp);
bytestream_put_byte(&buf, iff->ham);
bytestream_put_byte(&buf, iff->flags);
bytestream_put_be16(&buf, iff->transparency);
bytestream_put_byte(&buf, iff->masking);
switch (compression) { switch (compression) {
case BITMAP_RAW: case BITMAP_RAW:
st->codec->codec_id = CODEC_ID_IFF_ILBM; st->codec->codec_id = CODEC_ID_IFF_ILBM;
@@ -293,7 +349,15 @@ static int iff_read_packet(AVFormatContext *s,
} }
interleave_stereo(sample_buffer, pkt->data, PACKET_SIZE); interleave_stereo(sample_buffer, pkt->data, PACKET_SIZE);
} else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { } else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
ret = av_get_packet(pb, pkt, iff->body_size); uint8_t *buf;
if (av_new_packet(pkt, iff->body_size + 2) < 0) {
return AVERROR(ENOMEM);
}
buf = pkt->data;
bytestream_put_be16(&buf, 2);
ret = avio_read(pb, buf, iff->body_size);
} else { } else {
ret = av_get_packet(pb, pkt, PACKET_SIZE); ret = av_get_packet(pb, pkt, PACKET_SIZE);
} }

View File

@@ -366,7 +366,7 @@ static int write_packet(AVFormatContext *s, AVPacket *pkt)
av_log(s, AV_LOG_ERROR, av_log(s, AV_LOG_ERROR,
"Could not get frame filename number %d from pattern '%s'\n", "Could not get frame filename number %d from pattern '%s'\n",
img->img_number, img->path); img->img_number, img->path);
return AVERROR(EIO); return AVERROR(EINVAL);
} }
for(i=0; i<3; i++){ for(i=0; i<3; i++){
if (avio_open(&pb[i], filename, AVIO_WRONLY) < 0) { if (avio_open(&pb[i], filename, AVIO_WRONLY) < 0) {

View File

@@ -618,7 +618,7 @@ static int mkv_write_tracks(AVFormatContext *s)
put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, MATROSKA_TRACK_TYPE_SUBTITLE); put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, MATROSKA_TRACK_TYPE_SUBTITLE);
if (!native_id) { if (!native_id) {
av_log(s, AV_LOG_ERROR, "Subtitle codec %d is not supported.\n", codec->codec_id); av_log(s, AV_LOG_ERROR, "Subtitle codec %d is not supported.\n", codec->codec_id);
return AVERROR_NOTSUPP; return AVERROR(EINVAL);
} }
break; break;
default: default:

View File

@@ -1,148 +0,0 @@
/*
* Copyright (c) 2009 Aurelien Jacobs <aurel@gnuage.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <strings.h>
#include "avformat.h"
#include "metadata.h"
#include "libavutil/avstring.h"
#if FF_API_OLD_METADATA
#define SIZE_OFFSET(x) sizeof(((AVFormatContext*)0)->x),offsetof(AVFormatContext,x)
static const struct {
const char name[16];
int size;
int offset;
} compat_tab[] = {
{ "title", SIZE_OFFSET(title) },
{ "author", SIZE_OFFSET(author) },
{ "copyright", SIZE_OFFSET(copyright) },
{ "comment", SIZE_OFFSET(comment) },
{ "album", SIZE_OFFSET(album) },
{ "year", SIZE_OFFSET(year) },
{ "track", SIZE_OFFSET(track) },
{ "genre", SIZE_OFFSET(genre) },
{ "artist", SIZE_OFFSET(author) },
{ "creator", SIZE_OFFSET(author) },
{ "written_by", SIZE_OFFSET(author) },
{ "lead_performer", SIZE_OFFSET(author) },
{ "composer", SIZE_OFFSET(author) },
{ "performer", SIZE_OFFSET(author) },
{ "description", SIZE_OFFSET(comment) },
{ "albumtitle", SIZE_OFFSET(album) },
{ "date", SIZE_OFFSET(year) },
{ "date_written", SIZE_OFFSET(year) },
{ "date_released", SIZE_OFFSET(year) },
{ "tracknumber", SIZE_OFFSET(track) },
{ "part_number", SIZE_OFFSET(track) },
};
void ff_metadata_demux_compat(AVFormatContext *ctx)
{
AVMetadata *m;
int i, j;
if ((m = ctx->metadata))
for (j=0; j<m->count; j++)
for (i=0; i<FF_ARRAY_ELEMS(compat_tab); i++)
if (!strcasecmp(m->elems[j].key, compat_tab[i].name)) {
int *ptr = (int *)((char *)ctx+compat_tab[i].offset);
if (*ptr) continue;
if (compat_tab[i].size > sizeof(int))
av_strlcpy((char *)ptr, m->elems[j].value, compat_tab[i].size);
else
*ptr = atoi(m->elems[j].value);
}
for (i=0; i<ctx->nb_chapters; i++)
if ((m = ctx->chapters[i]->metadata))
for (j=0; j<m->count; j++)
if (!strcasecmp(m->elems[j].key, "title")) {
av_free(ctx->chapters[i]->title);
ctx->chapters[i]->title = av_strdup(m->elems[j].value);
}
for (i=0; i<ctx->nb_programs; i++)
if ((m = ctx->programs[i]->metadata))
for (j=0; j<m->count; j++) {
if (!strcasecmp(m->elems[j].key, "name")) {
av_free(ctx->programs[i]->name);
ctx->programs[i]->name = av_strdup(m->elems[j].value);
}
if (!strcasecmp(m->elems[j].key, "provider_name")) {
av_free(ctx->programs[i]->provider_name);
ctx->programs[i]->provider_name = av_strdup(m->elems[j].value);
}
}
for (i=0; i<ctx->nb_streams; i++)
if ((m = ctx->streams[i]->metadata))
for (j=0; j<m->count; j++) {
if (!strcasecmp(m->elems[j].key, "language"))
av_strlcpy(ctx->streams[i]->language, m->elems[j].value, 4);
if (!strcasecmp(m->elems[j].key, "filename")) {
av_free(ctx->streams[i]->filename);
ctx->streams[i]->filename= av_strdup(m->elems[j].value);
}
}
}
#define FILL_METADATA(s, key, value) { \
if (!av_metadata_get(s->metadata, #key, NULL, 0)) \
av_metadata_set2(&s->metadata, #key, value, 0); \
}
#define FILL_METADATA_STR(s, key) { \
if (s->key && *s->key) FILL_METADATA(s, key, s->key); }
#define FILL_METADATA_INT(s, key) { \
char number[10]; \
snprintf(number, sizeof(number), "%d", s->key); \
if(s->key) FILL_METADATA(s, key, number) }
void ff_metadata_mux_compat(AVFormatContext *ctx)
{
int i;
if (ctx->metadata && ctx->metadata->count > 0)
return;
FILL_METADATA_STR(ctx, title);
FILL_METADATA_STR(ctx, author);
FILL_METADATA_STR(ctx, copyright);
FILL_METADATA_STR(ctx, comment);
FILL_METADATA_STR(ctx, album);
FILL_METADATA_INT(ctx, year);
FILL_METADATA_INT(ctx, track);
FILL_METADATA_STR(ctx, genre);
for (i=0; i<ctx->nb_chapters; i++)
FILL_METADATA_STR(ctx->chapters[i], title);
for (i=0; i<ctx->nb_programs; i++) {
FILL_METADATA_STR(ctx->programs[i], name);
FILL_METADATA_STR(ctx->programs[i], provider_name);
}
for (i=0; i<ctx->nb_streams; i++) {
FILL_METADATA_STR(ctx->streams[i], language);
FILL_METADATA_STR(ctx->streams[i], filename);
}
}
#endif /* FF_API_OLD_METADATA */

View File

@@ -588,7 +588,7 @@ static int mov_read_moov(MOVContext *c, AVIOContext *pb, MOVAtom atom)
static int mov_read_moof(MOVContext *c, AVIOContext *pb, MOVAtom atom) static int mov_read_moof(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{ {
c->fragment.moof_offset = avio_tell(pb) - 8; c->fragment.moof_offset = avio_tell(pb) - 8;
av_dlog(c->fc, "moof offset %llx\n", c->fragment.moof_offset); av_dlog(c->fc, "moof offset %"PRIx64"\n", c->fragment.moof_offset);
return mov_read_default(c, pb, atom); return mov_read_default(c, pb, atom);
} }
@@ -2368,7 +2368,7 @@ static int mov_read_header(AVFormatContext *s, AVFormatParameters *ap)
av_log(s, AV_LOG_ERROR, "moov atom not found\n"); av_log(s, AV_LOG_ERROR, "moov atom not found\n");
return -1; return -1;
} }
av_dlog(mov->fc, "on_parse_exit_offset=%lld\n", avio_tell(pb)); av_dlog(mov->fc, "on_parse_exit_offset=%"PRId64"\n", avio_tell(pb));
if (pb->seekable && mov->chapter_track > 0) if (pb->seekable && mov->chapter_track > 0)
mov_read_chapters(s); mov_read_chapters(s);
@@ -2417,7 +2417,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
mov_read_default(mov, s->pb, (MOVAtom){ AV_RL32("root"), INT64_MAX }) < 0 || mov_read_default(mov, s->pb, (MOVAtom){ AV_RL32("root"), INT64_MAX }) < 0 ||
url_feof(s->pb)) url_feof(s->pb))
return AVERROR_EOF; return AVERROR_EOF;
av_dlog(s, "read fragments, offset 0x%llx\n", avio_tell(s->pb)); av_dlog(s, "read fragments, offset 0x%"PRIx64"\n", avio_tell(s->pb));
goto retry; goto retry;
} }
sc = st->priv_data; sc = st->priv_data;

View File

@@ -923,7 +923,7 @@ static int flush_packet(AVFormatContext *ctx, int stream_index,
/* output data */ /* output data */
assert(payload_size - stuffing_size <= av_fifo_size(stream->fifo)); assert(payload_size - stuffing_size <= av_fifo_size(stream->fifo));
av_fifo_generic_read(stream->fifo, ctx->pb, payload_size - stuffing_size, &avio_write); av_fifo_generic_read(stream->fifo, ctx->pb, payload_size - stuffing_size, (void*)avio_write);
stream->bytes_to_iframe -= payload_size - stuffing_size; stream->bytes_to_iframe -= payload_size - stuffing_size;
}else{ }else{
payload_size= payload_size=

View File

@@ -45,6 +45,7 @@ static const struct ogg_codec * const ogg_codecs[] = {
&ff_vorbis_codec, &ff_vorbis_codec,
&ff_theora_codec, &ff_theora_codec,
&ff_flac_codec, &ff_flac_codec,
&ff_celt_codec,
&ff_old_dirac_codec, &ff_old_dirac_codec,
&ff_old_flac_codec, &ff_old_flac_codec,
&ff_ogm_video_codec, &ff_ogm_video_codec,
@@ -242,14 +243,14 @@ ogg_read_page (AVFormatContext * s, int *str)
idx = ogg_find_stream (ogg, serial); idx = ogg_find_stream (ogg, serial);
if (idx < 0){ if (idx < 0){
if (ogg->headers) { if (ogg->headers) {
int n; int n;
for (n = 0; n < ogg->nstreams; n++) { for (n = 0; n < ogg->nstreams; n++) {
av_freep(&ogg->streams[n].buf); av_freep(&ogg->streams[n].buf);
av_freep(&ogg->streams[n].private); av_freep(&ogg->streams[n].private);
} }
ogg->curidx = -1; ogg->curidx = -1;
ogg->nstreams = 0; ogg->nstreams = 0;
} }
idx = ogg_new_stream (s, serial); idx = ogg_new_stream (s, serial);
if (idx < 0) if (idx < 0)

View File

@@ -98,6 +98,7 @@ struct ogg {
#define OGG_FLAG_BOS 2 #define OGG_FLAG_BOS 2
#define OGG_FLAG_EOS 4 #define OGG_FLAG_EOS 4
extern const struct ogg_codec ff_celt_codec;
extern const struct ogg_codec ff_dirac_codec; extern const struct ogg_codec ff_dirac_codec;
extern const struct ogg_codec ff_flac_codec; extern const struct ogg_codec ff_flac_codec;
extern const struct ogg_codec ff_ogm_audio_codec; extern const struct ogg_codec ff_ogm_audio_codec;

View File

@@ -0,0 +1,98 @@
/*
* Xiph CELT / Opus parser for Ogg
* Copyright (c) 2011 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <string.h>
#include "avformat.h"
#include "oggdec.h"
#include "libavutil/intreadwrite.h"
struct oggcelt_private {
int extra_headers_left;
};
static int celt_header(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
struct oggcelt_private *priv = os->private;
uint8_t *p = os->buf + os->pstart;
if (os->psize == 60 &&
!memcmp(p, ff_celt_codec.magic, ff_celt_codec.magicsize)) {
/* Main header */
uint32_t version, header_size, sample_rate, nb_channels, frame_size;
uint32_t overlap, bytes_per_packet, extra_headers;
uint8_t *extradata;
extradata = av_malloc(2 * sizeof(uint32_t) +
FF_INPUT_BUFFER_PADDING_SIZE);
priv = av_malloc(sizeof(struct oggcelt_private));
if (!extradata || !priv) {
av_free(extradata);
av_free(priv);
return AVERROR(ENOMEM);
}
version = AV_RL32(p + 28);
header_size = AV_RL32(p + 32); /* unused */
sample_rate = AV_RL32(p + 36);
nb_channels = AV_RL32(p + 40);
frame_size = AV_RL32(p + 44);
overlap = AV_RL32(p + 48);
bytes_per_packet = AV_RL32(p + 52); /* unused */
extra_headers = AV_RL32(p + 56);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_CELT;
st->codec->sample_rate = sample_rate;
st->codec->channels = nb_channels;
st->codec->frame_size = frame_size;
st->codec->sample_fmt = AV_SAMPLE_FMT_S16;
av_set_pts_info(st, 64, 1, sample_rate);
priv->extra_headers_left = 1 + extra_headers;
av_free(os->private);
os->private = priv;
AV_WL32(extradata + 0, overlap);
AV_WL32(extradata + 4, version);
av_free(st->codec->extradata);
st->codec->extradata = extradata;
st->codec->extradata_size = 2 * sizeof(uint32_t);
return 1;
} else if(priv && priv->extra_headers_left) {
/* Extra headers (vorbiscomment) */
ff_vorbis_comment(s, &st->metadata, p, os->psize);
priv->extra_headers_left--;
return 1;
} else {
return 0;
}
}
const struct ogg_codec ff_celt_codec = {
.magic = "CELT ",
.magicsize = 8,
.header = celt_header,
};

View File

@@ -107,7 +107,8 @@ static int oma_read_header(AVFormatContext *s,
case OMA_CODECID_ATRAC3: case OMA_CODECID_ATRAC3:
samplerate = srate_tab[(codec_params >> 13) & 7]*100; samplerate = srate_tab[(codec_params >> 13) & 7]*100;
if (samplerate != 44100) if (samplerate != 44100)
av_log(s, AV_LOG_ERROR, "Unsupported sample rate, send sample file to developers: %d\n", samplerate); av_log_ask_for_sample(s, "Unsupported sample rate: %d\n",
samplerate);
framesize = (codec_params & 0x3FF) * 8; framesize = (codec_params & 0x3FF) * 8;
jsflag = (codec_params >> 17) & 1; /* get stereo coding mode, 1 for joint-stereo */ jsflag = (codec_params >> 17) & 1; /* get stereo coding mode, 1 for joint-stereo */

Some files were not shown because too many files have changed in this diff Show More