Compare commits

..

1 Commits

Author SHA1 Message Date
Michael Niedermayer
35a7b73590 update for 2.1
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-10-28 01:35:03 +01:00
1296 changed files with 25211 additions and 79139 deletions

12
.gitignore vendored
View File

@@ -27,6 +27,7 @@
/ffserver
/config.*
/coverage.info
/version.h
/doc/*.1
/doc/*.3
/doc/*.html
@@ -34,28 +35,23 @@
/doc/config.texi
/doc/avoptions_codec.texi
/doc/avoptions_format.texi
/doc/doxy/html/
/doc/examples/avio_reading
/doc/examples/avcodec
/doc/examples/demuxing_decoding
/doc/examples/filter_audio
/doc/examples/decoding_encoding
/doc/examples/demuxing
/doc/examples/filtering_audio
/doc/examples/filtering_video
/doc/examples/metadata
/doc/examples/muxing
/doc/examples/pc-uninstalled
/doc/examples/remuxing
/doc/examples/resampling_audio
/doc/examples/scaling_video
/doc/examples/transcode_aac
/doc/fate.txt
/doc/doxy/html/
/doc/print_options
/lcov/
/libavcodec/*_tablegen
/libavcodec/*_tables.c
/libavcodec/*_tables.h
/libavutil/avconfig.h
/libavutil/ffversion.h
/tests/audiogen
/tests/base64
/tests/data/

View File

@@ -1,33 +1,7 @@
Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest.
version 2.2:
- HNM version 4 demuxer and video decoder
- Live HDS muxer
- setsar/setdar filters now support variables in ratio expressions
- elbg filter
- string validation in ffprobe
- support for decoding through VDPAU in ffmpeg (the -hwaccel option)
- complete Voxware MetaSound decoder
- remove mp3_header_compress bitstream filter
- Windows resource files for shared libraries
- aeval filter
- stereoscopic 3d metadata handling
- WebP encoding via libwebp
- ATRAC3+ decoder
- VP8 in Ogg demuxing
- side & metadata support in NUT
- framepack filter
- XYZ12 rawvideo support in NUT
- Exif metadata support in WebP decoder
- OpenGL device
- Use metadata_header_padding to control padding in ID3 tags (currently used in
MP3, AIFF, and OMA files), FLAC header, and the AVI "junk" block.
- Mirillis FIC video decoder
- Support DNx444
- libx265 encoder
- dejudder filter
version <next>
version 2.1:
@@ -72,8 +46,7 @@ version 2.1:
- ReplayGain scanner
- Enhanced Low Delay AAC (ER AAC ELD) decoding (no LD SBR support)
- Linux framebuffer output device
- HEVC decoder
- raw HEVC, HEVC in MOV/MP4, HEVC in Matroska, HEVC in MPEG-TS demuxing
- HEVC decoder, raw HEVC demuxer, HEVC demuxing in TS, Matroska and MP4
- mergeplanes filter
@@ -89,7 +62,7 @@ version 2.0:
- 10% faster aac encoding on x86 and MIPS
- sine audio filter source
- WebP demuxing and decoding support
- ffmpeg options -filter_script and -filter_complex_script, which allow a
- new ffmpeg options -filter_script and -filter_complex_script, which allow a
filtergraph description to be read from a file
- OpenCL support
- audio phaser filter
@@ -97,7 +70,7 @@ version 2.0:
- libquvi demuxer
- uniform options syntax across all filters
- telecine filter
- interlace filter
- new interlace filter
- smptehdbars source
- inverse telecine filters (fieldmatch and decimate)
- colorbalance filter

View File

@@ -47,6 +47,7 @@ Specifically, the GPL parts of FFmpeg are
- vf_stereo3d.c
- vf_super2xsai.c
- vf_tinterlace.c
- vf_yadif.c
- vsrc_mptestsrc.c
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
@@ -79,7 +80,6 @@ The following libraries are under GPL:
- libutvideo
- libvidstab
- libx264
- libx265
- libxavs
- libxvid
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by

View File

@@ -31,7 +31,7 @@ ffprobe:
ffprobe.c Stefano Sabatini
ffserver:
ffserver.c Reynaldo H. Verdejo Pinochet
ffserver.c, ffserver.h Baptiste Coudurier
Commandline utility code:
cmdutils.c, cmdutils.h Michael Niedermayer
@@ -142,7 +142,6 @@ Codecs:
ass* Aurelien Jacobs
asv* Michael Niedermayer
atrac3* Benjamin Larsson
atrac3plus* Maxim Poliakovski
bgmc.c, bgmc.h Thilo Borgmann
bink.c Kostya Shishkov
binkaudio.c Peter Ross
@@ -151,7 +150,6 @@ Codecs:
cdxl.c Paul B Mahol
celp_filters.* Vitor Sessak
cinepak.c Roberto Togni
cinepakenc.c Rl / Aetey G.T. AB
cljr Alex Beregszaszi
cllc.c Derek Buitenhuis
cook.c, cookdata.h Benjamin Larsson
@@ -199,11 +197,8 @@ Codecs:
libtheoraenc.c David Conrad
libutvideo* Derek Buitenhuis
libvorbis.c David Conrad
libvpx* James Zern
libx264.c Mans Rullgard, Jason Garrett-Glaser
libx265.c Derek Buitenhuis
libxavs.c Stefan Gehrer
libzvbi-teletextdec.c Marton Balint
loco.c Kostya Shishkov
lzo.h, lzo.c Reimar Doeffinger
mdec.c Michael Niedermayer
@@ -271,7 +266,6 @@ Codecs:
vp5 Aurelien Jacobs
vp6 Aurelien Jacobs
vp8 David Conrad, Jason Garrett-Glaser, Ronald Bultje
vp9 Ronald Bultje, Clément Bœsch
vqavideo.c Mike Melanson
wavpack.c Kostya Shishkov
wmaprodec.c Sascha Sommer
@@ -307,7 +301,6 @@ libavdevice
iec61883.c Georg Lippitsch
lavfi Stefano Sabatini
libdc1394.c Roman Shaposhnik
opengl_enc.c Lukasz Marek
pulse_audio_enc.c Lukasz Marek
sdl Stefano Sabatini
v4l2.c Luca Abeni
@@ -336,7 +329,6 @@ Filters:
avf_avectorscope.c Paul B Mahol
vf_blend.c Paul B Mahol
vf_colorbalance.c Paul B Mahol
vf_dejudder.c Nicholas Robbins
vf_delogo.c Jean Delvare (CC <khali@linux-fr.org>)
vf_drawbox.c/drawgrid Andrey Utkin
vf_extractplanes.c Paul B Mahol
@@ -393,7 +385,6 @@ Muxers/Demuxers:
flvdec.c, flvenc.c Michael Niedermayer
gxf.c Reimar Doeffinger
gxfenc.c Baptiste Coudurier
hls.c Anssi Hannula
idcin.c Mike Melanson
idroqdec.c Mike Melanson
iff.c Jaikrishnan Menon
@@ -453,7 +444,6 @@ Muxers/Demuxers:
siff.c Kostya Shishkov
smacker.c Kostya Shishkov
smjpeg* Paul B Mahol
spdif* Anssi Hannula
srtdec.c Aurelien Jacobs
swf.c Baptiste Coudurier
takdec.c Paul B Mahol
@@ -511,9 +501,8 @@ x86 Michael Niedermayer
Releases
========
2.2 Michael Niedermayer
2.1 Michael Niedermayer
1.2 Michael Niedermayer
2.0 Michael Niedermayer
If you want to maintain an older release, please contact us

View File

@@ -6,34 +6,29 @@ vpath %.cpp $(SRC_PATH)
vpath %.h $(SRC_PATH)
vpath %.S $(SRC_PATH)
vpath %.asm $(SRC_PATH)
vpath %.rc $(SRC_PATH)
vpath %.v $(SRC_PATH)
vpath %.texi $(SRC_PATH)
vpath %/fate_config.sh.template $(SRC_PATH)
AVPROGS-$(CONFIG_FFMPEG) += ffmpeg
AVPROGS-$(CONFIG_FFPLAY) += ffplay
AVPROGS-$(CONFIG_FFPROBE) += ffprobe
AVPROGS-$(CONFIG_FFSERVER) += ffserver
PROGS-$(CONFIG_FFMPEG) += ffmpeg
PROGS-$(CONFIG_FFPLAY) += ffplay
PROGS-$(CONFIG_FFPROBE) += ffprobe
PROGS-$(CONFIG_FFSERVER) += ffserver
AVPROGS := $(AVPROGS-yes:%=%$(PROGSSUF)$(EXESUF))
INSTPROGS = $(AVPROGS-yes:%=%$(PROGSSUF)$(EXESUF))
PROGS += $(AVPROGS)
PROGS := $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
INSTPROGS = $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
AVBASENAMES = ffmpeg ffplay ffprobe ffserver
ALLAVPROGS = $(AVBASENAMES:%=%$(PROGSSUF)$(EXESUF))
ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
$(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog) += cmdutils.o))
$(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog)-$(CONFIG_OPENCL) += cmdutils_opencl.o))
OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
OBJS = cmdutils.o $(EXEOBJS)
OBJS-ffmpeg = ffmpeg_opt.o ffmpeg_filter.o
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
TOOLS = qt-faststart trasher uncoded_frame
TOOLS = qt-faststart trasher
TOOLS-$(CONFIG_ZLIB) += cws2fws
BASENAMES = ffmpeg ffplay ffprobe ffserver
ALLPROGS = $(BASENAMES:%=%$(PROGSSUF)$(EXESUF))
ALLPROGS_G = $(BASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
FFLIBS-$(CONFIG_AVFILTER) += avfilter
FFLIBS-$(CONFIG_AVFORMAT) += avformat
@@ -55,14 +50,16 @@ include $(SRC_PATH)/common.mak
FF_EXTRALIBS := $(FFEXTRALIBS)
FF_DEP_LIBS := $(DEP_LIBS)
all: $(AVPROGS)
all: $(PROGS)
$(PROGS): %$(EXESUF): %_g$(EXESUF)
$(CP) $< $@
$(STRIP) $@
$(TOOLS): %$(EXESUF): %.o $(EXEOBJS)
$(LD) $(LDFLAGS) $(LD_O) $^ $(ELIBS)
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
config.h: .config
.config: $(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c))
@@ -76,7 +73,7 @@ SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
ALTIVEC-OBJS VIS-OBJS \
MMX-OBJS YASM-OBJS \
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
OBJS SLIBOBJS HOSTOBJS TESTOBJS
OBJS HOSTOBJS TESTOBJS
define RESET
$(1) :=
@@ -93,10 +90,8 @@ endef
$(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D))))
include $(SRC_PATH)/doc/Makefile
define DOPROG
OBJS-$(1) += $(1).o $(EXEOBJS) $(OBJS-$(1)-yes)
OBJS-$(1) += $(1).o cmdutils.o $(EXEOBJS)
$(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1))
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
$(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1))
@@ -104,13 +99,7 @@ $(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(LIBS-$(1))
-include $$(OBJS-$(1):.o=.d)
endef
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
ffprobe.o cmdutils.o : libavutil/ffversion.h
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
$(CP) $< $@
$(STRIP) $@
$(foreach P,$(PROGS-yes),$(eval $(call DOPROG,$(P))))
%$(PROGSSUF)_g$(EXESUF): %.o $(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
@@ -125,14 +114,14 @@ GIT_LOG = $(SRC_PATH)/.git/logs/HEAD
.version: $(wildcard $(GIT_LOG)) $(VERSION_SH) config.mak
.version: M=@
libavutil/ffversion.h .version:
$(M)$(VERSION_SH) $(SRC_PATH) libavutil/ffversion.h $(EXTRA_VERSION)
version.h .version:
$(M)$(VERSION_SH) $(SRC_PATH) version.h $(EXTRA_VERSION)
$(Q)touch .version
# force version.sh to run whenever version might have changed
-include .version
ifdef AVPROGS
ifdef PROGS
install: install-progs install-data
endif
@@ -143,7 +132,7 @@ install-libs: install-libs-yes
install-progs-yes:
install-progs-$(CONFIG_SHARED): install-libs
install-progs: install-progs-yes $(AVPROGS)
install-progs: install-progs-yes $(PROGS)
$(Q)mkdir -p "$(BINDIR)"
$(INSTALL) -c -m 755 $(INSTPROGS) "$(BINDIR)"
@@ -155,13 +144,13 @@ install-data: $(DATA_FILES) $(EXAMPLES_FILES)
uninstall: uninstall-libs uninstall-headers uninstall-progs uninstall-data
uninstall-progs:
$(RM) $(addprefix "$(BINDIR)/", $(ALLAVPROGS))
$(RM) $(addprefix "$(BINDIR)/", $(ALLPROGS))
uninstall-data:
$(RM) -r "$(DATADIR)"
clean::
$(RM) $(ALLAVPROGS) $(ALLAVPROGS_G)
$(RM) $(ALLPROGS) $(ALLPROGS_G)
$(RM) $(CLEANSUFFIXES)
$(RM) $(CLEANSUFFIXES:%=tools/%)
$(RM) -r coverage-html
@@ -169,13 +158,14 @@ clean::
distclean::
$(RM) $(DISTCLEANSUFFIXES)
$(RM) config.* .config libavutil/avconfig.h .version version.h libavutil/ffversion.h libavcodec/codec_names.h
$(RM) config.* .config libavutil/avconfig.h .version version.h libavcodec/codec_names.h
config:
$(SRC_PATH)/configure $(value FFMPEG_CONFIGURATION)
check: all alltools examples testprogs fate
include $(SRC_PATH)/doc/Makefile
include $(SRC_PATH)/tests/Makefile
$(sort $(OBJDIRS)):

View File

@@ -1 +1 @@
2.2-rc2
2.1

View File

@@ -1 +1 @@
2.2-rc2
2.1

View File

@@ -20,7 +20,6 @@
*/
#include <string.h>
#include <stdint.h>
#include <stdlib.h>
#include <errno.h>
#include <math.h>
@@ -49,8 +48,8 @@
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/cpu.h"
#include "libavutil/ffversion.h"
#include "cmdutils.h"
#include "version.h"
#if CONFIG_NETWORK
#include "libavformat/network.h"
#endif
@@ -58,6 +57,10 @@
#include <sys/time.h>
#include <sys/resource.h>
#endif
#if CONFIG_OPENCL
#include "libavutil/opencl.h"
#endif
static int init_report(const char *env);
@@ -65,8 +68,9 @@ struct SwsContext *sws_opts;
AVDictionary *swr_opts;
AVDictionary *format_opts, *codec_opts, *resample_opts;
const int this_year = 2013;
static FILE *report_file;
int hide_banner = 0;
void init_opts(void)
{
@@ -492,9 +496,6 @@ void parse_loglevel(int argc, char **argv, const OptionDef *options)
fflush(report_file);
}
}
idx = locate_option(argc, argv, options, "hide_banner");
if (idx)
hide_banner = 1;
}
static const AVOption *opt_find(void *obj, const char *name, const char *unit,
@@ -985,6 +986,26 @@ int opt_timelimit(void *optctx, const char *opt, const char *arg)
return 0;
}
#if CONFIG_OPENCL
int opt_opencl(void *optctx, const char *opt, const char *arg)
{
char *key, *value;
const char *opts = arg;
int ret = 0;
while (*opts) {
ret = av_opt_get_key_value(&opts, "=", ":", 0, &key, &value);
if (ret < 0)
return ret;
ret = av_opencl_set_option(key, value);
if (ret < 0)
return ret;
if (*opts)
opts++;
}
return ret;
}
#endif
void print_error(const char *filename, int err)
{
char errbuf[128];
@@ -1050,7 +1071,7 @@ static void print_program_info(int flags, int level)
av_log(NULL, level, "%s version " FFMPEG_VERSION, program_name);
if (flags & SHOW_COPYRIGHT)
av_log(NULL, level, " Copyright (c) %d-%d the FFmpeg developers",
program_birth_year, CONFIG_THIS_YEAR);
program_birth_year, this_year);
av_log(NULL, level, "\n");
av_log(NULL, level, "%sbuilt on %s %s with %s\n",
indent, __DATE__, __TIME__, CC_IDENT);
@@ -1058,36 +1079,10 @@ static void print_program_info(int flags, int level)
av_log(NULL, level, "%sconfiguration: " FFMPEG_CONFIGURATION "\n", indent);
}
static void print_buildconf(int flags, int level)
{
const char *indent = flags & INDENT ? " " : "";
char str[] = { FFMPEG_CONFIGURATION };
char *conflist, *remove_tilde, *splitconf;
// Change all the ' --' strings to '~--' so that
// they can be identified as tokens.
while ((conflist = strstr(str, " --")) != NULL) {
strncpy(conflist, "~--", 3);
}
// Compensate for the weirdness this would cause
// when passing 'pkg-config --static'.
while ((remove_tilde = strstr(str, "pkg-config~")) != NULL) {
strncpy(remove_tilde, "pkg-config ", 11);
}
splitconf = strtok(str, "~");
av_log(NULL, level, "\n%sconfiguration:\n", indent);
while (splitconf != NULL) {
av_log(NULL, level, "%s%s%s\n", indent, indent, splitconf);
splitconf = strtok(NULL, "~");
}
}
void show_banner(int argc, char **argv, const OptionDef *options)
{
int idx = locate_option(argc, argv, options, "version");
if (hide_banner || idx)
if (idx)
return;
print_program_info (INDENT|SHOW_COPYRIGHT, AV_LOG_INFO);
@@ -1104,14 +1099,6 @@ int show_version(void *optctx, const char *opt, const char *arg)
return 0;
}
int show_buildconf(void *optctx, const char *opt, const char *arg)
{
av_log_set_callback(log_callback_help);
print_buildconf (INDENT|0, AV_LOG_INFO);
return 0;
}
int show_license(void *optctx, const char *opt, const char *arg)
{
#if CONFIG_NONFREE
@@ -1531,7 +1518,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
return 0;
}
int show_colors(void *optctx, const char *opt, const char *arg)
void show_colors(void *optctx, const char *opt, const char *arg)
{
const char *name;
const uint8_t *rgb;
@@ -1541,8 +1528,6 @@ int show_colors(void *optctx, const char *opt, const char *arg)
for (i = 0; name = av_get_known_color_name(i, &rgb); i++)
printf("%-32s #%02x%02x%02x\n", name, rgb[0], rgb[1], rgb[2]);
return 0;
}
int show_pix_fmts(void *optctx, const char *opt, const char *arg)
@@ -1804,7 +1789,7 @@ int read_yesno(void)
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
{
int ret;
FILE *f = av_fopen_utf8(filename, "rb");
FILE *f = fopen(filename, "rb");
if (!f) {
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename,

View File

@@ -43,12 +43,16 @@ extern const char program_name[];
*/
extern const int program_birth_year;
/**
* this year, defined by the program for show_banner()
*/
extern const int this_year;
extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
extern AVFormatContext *avformat_opts;
extern struct SwsContext *sws_opts;
extern AVDictionary *swr_opts;
extern AVDictionary *format_opts, *codec_opts, *resample_opts;
extern int hide_banner;
/**
* Register a program-specific cleanup routine.
@@ -99,12 +103,8 @@ int opt_max_alloc(void *optctx, const char *opt, const char *arg);
int opt_codec_debug(void *optctx, const char *opt, const char *arg);
#if CONFIG_OPENCL
int opt_opencl(void *optctx, const char *opt, const char *arg);
int opt_opencl_bench(void *optctx, const char *opt, const char *arg);
#endif
/**
* Limit the execution time.
*/
@@ -414,13 +414,6 @@ void show_banner(int argc, char **argv, const OptionDef *options);
*/
int show_version(void *optctx, const char *opt, const char *arg);
/**
* Print the build configuration of the program to stdout. The contents
* depend on the definition of FFMPEG_CONFIGURATION.
* This option processing function does not utilize the arguments.
*/
int show_buildconf(void *optctx, const char *opt, const char *arg);
/**
* Print the license of the program to stdout. The license depends on
* the license of the libraries compiled into the program.
@@ -499,7 +492,7 @@ int show_sample_fmts(void *optctx, const char *opt, const char *arg);
* Print a listing containing all the color names and values recognized
* by the program.
*/
int show_colors(void *optctx, const char *opt, const char *arg);
void show_colors(void *optctx, const char *opt, const char *arg);
/**
* Return a positive value if a line read from standard input

View File

@@ -4,7 +4,6 @@
{ "help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
{ "-help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
{ "version" , OPT_EXIT, {.func_arg = show_version}, "show version" },
{ "buildconf" , OPT_EXIT, {.func_arg = show_buildconf}, "show build configuration" },
{ "formats" , OPT_EXIT, {.func_arg = show_formats }, "show available formats" },
{ "codecs" , OPT_EXIT, {.func_arg = show_codecs }, "show available codecs" },
{ "decoders" , OPT_EXIT, {.func_arg = show_decoders }, "show available decoders" },
@@ -21,8 +20,6 @@
{ "report" , 0, {(void*)opt_report}, "generate a report" },
{ "max_alloc" , HAS_ARG, {.func_arg = opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
{ "cpuflags" , HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" },
{ "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" },
#if CONFIG_OPENCL
{ "opencl_bench", OPT_EXIT, {.func_arg = opt_opencl_bench}, "run benchmark on all OpenCL devices and show results" },
{ "opencl_options", HAS_ARG, {.func_arg = opt_opencl}, "set OpenCL environment options" },
#endif

View File

@@ -1,274 +0,0 @@
/*
* Copyright (C) 2013 Lenny Wang
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavutil/log.h"
#include "libavutil/opencl.h"
#include "cmdutils.h"
typedef struct {
int platform_idx;
int device_idx;
char device_name[64];
int64_t runtime;
} OpenCLDeviceBenchmark;
const char *ocl_bench_source = AV_OPENCL_KERNEL(
inline unsigned char clip_uint8(int a)
{
if (a & (~0xFF))
return (-a)>>31;
else
return a;
}
kernel void unsharp_bench(
global unsigned char *src,
global unsigned char *dst,
global int *mask,
int width,
int height)
{
int i, j, local_idx, lc_idx, sum = 0;
int2 thread_idx, block_idx, global_idx, lm_idx;
thread_idx.x = get_local_id(0);
thread_idx.y = get_local_id(1);
block_idx.x = get_group_id(0);
block_idx.y = get_group_id(1);
global_idx.x = get_global_id(0);
global_idx.y = get_global_id(1);
local uchar data[32][32];
local int lc[128];
for (i = 0; i <= 1; i++) {
lm_idx.y = -8 + (block_idx.y + i) * 16 + thread_idx.y;
lm_idx.y = lm_idx.y < 0 ? 0 : lm_idx.y;
lm_idx.y = lm_idx.y >= height ? height - 1: lm_idx.y;
for (j = 0; j <= 1; j++) {
lm_idx.x = -8 + (block_idx.x + j) * 16 + thread_idx.x;
lm_idx.x = lm_idx.x < 0 ? 0 : lm_idx.x;
lm_idx.x = lm_idx.x >= width ? width - 1: lm_idx.x;
data[i*16 + thread_idx.y][j*16 + thread_idx.x] = src[lm_idx.y*width + lm_idx.x];
}
}
local_idx = thread_idx.y*16 + thread_idx.x;
if (local_idx < 128)
lc[local_idx] = mask[local_idx];
barrier(CLK_LOCAL_MEM_FENCE);
\n#pragma unroll\n
for (i = -4; i <= 4; i++) {
lm_idx.y = 8 + i + thread_idx.y;
\n#pragma unroll\n
for (j = -4; j <= 4; j++) {
lm_idx.x = 8 + j + thread_idx.x;
lc_idx = (i + 4)*8 + j + 4;
sum += (int)data[lm_idx.y][lm_idx.x] * lc[lc_idx];
}
}
int temp = (int)data[thread_idx.y + 8][thread_idx.x + 8];
int res = temp + (((temp - (int)((sum + 1<<15) >> 16))) >> 16);
if (global_idx.x < width && global_idx.y < height)
dst[global_idx.x + global_idx.y*width] = clip_uint8(res);
}
);
#define OCLCHECK(method, ... ) \
do { \
status = method(__VA_ARGS__); \
if (status != CL_SUCCESS) { \
av_log(NULL, AV_LOG_ERROR, # method " error '%s'\n", \
av_opencl_errstr(status)); \
ret = AVERROR_EXTERNAL; \
goto end; \
} \
} while (0)
#define CREATEBUF(out, flags, size) \
do { \
out = clCreateBuffer(ext_opencl_env->context, flags, size, NULL, &status); \
if (status != CL_SUCCESS) { \
av_log(NULL, AV_LOG_ERROR, "Could not create OpenCL buffer\n"); \
ret = AVERROR_EXTERNAL; \
goto end; \
} \
} while (0)
static void fill_rand_int(int *data, int n)
{
int i;
srand(av_gettime());
for (i = 0; i < n; i++)
data[i] = rand();
}
#define OPENCL_NB_ITER 5
static int64_t run_opencl_bench(AVOpenCLExternalEnv *ext_opencl_env)
{
int i, arg = 0, width = 1920, height = 1088;
int64_t start, ret = 0;
cl_int status;
size_t kernel_len;
char *inbuf;
int *mask;
int buf_size = width * height * sizeof(char);
int mask_size = sizeof(uint32_t) * 128;
cl_mem cl_mask, cl_inbuf, cl_outbuf;
cl_kernel kernel = NULL;
cl_program program = NULL;
size_t local_work_size_2d[2] = {16, 16};
size_t global_work_size_2d[2] = {(size_t)width, (size_t)height};
if (!(inbuf = av_malloc(buf_size)) || !(mask = av_malloc(mask_size))) {
av_log(NULL, AV_LOG_ERROR, "Out of memory\n");
ret = AVERROR(ENOMEM);
goto end;
}
fill_rand_int((int*)inbuf, buf_size/4);
fill_rand_int(mask, mask_size/4);
CREATEBUF(cl_mask, CL_MEM_READ_ONLY, mask_size);
CREATEBUF(cl_inbuf, CL_MEM_READ_ONLY, buf_size);
CREATEBUF(cl_outbuf, CL_MEM_READ_WRITE, buf_size);
kernel_len = strlen(ocl_bench_source);
program = clCreateProgramWithSource(ext_opencl_env->context, 1, &ocl_bench_source,
&kernel_len, &status);
if (status != CL_SUCCESS || !program) {
av_log(NULL, AV_LOG_ERROR, "OpenCL unable to create benchmark program\n");
ret = AVERROR_EXTERNAL;
goto end;
}
status = clBuildProgram(program, 1, &(ext_opencl_env->device_id), NULL, NULL, NULL);
if (status != CL_SUCCESS) {
av_log(NULL, AV_LOG_ERROR, "OpenCL unable to build benchmark program\n");
ret = AVERROR_EXTERNAL;
goto end;
}
kernel = clCreateKernel(program, "unsharp_bench", &status);
if (status != CL_SUCCESS) {
av_log(NULL, AV_LOG_ERROR, "OpenCL unable to create benchmark kernel\n");
ret = AVERROR_EXTERNAL;
goto end;
}
OCLCHECK(clEnqueueWriteBuffer, ext_opencl_env->command_queue, cl_inbuf, CL_TRUE, 0,
buf_size, inbuf, 0, NULL, NULL);
OCLCHECK(clEnqueueWriteBuffer, ext_opencl_env->command_queue, cl_mask, CL_TRUE, 0,
mask_size, mask, 0, NULL, NULL);
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_mem), &cl_inbuf);
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_mem), &cl_outbuf);
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_mem), &cl_mask);
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_int), &width);
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_int), &height);
start = av_gettime();
for (i = 0; i < OPENCL_NB_ITER; i++)
OCLCHECK(clEnqueueNDRangeKernel, ext_opencl_env->command_queue, kernel, 2, NULL,
global_work_size_2d, local_work_size_2d, 0, NULL, NULL);
clFinish(ext_opencl_env->command_queue);
ret = (av_gettime() - start)/OPENCL_NB_ITER;
end:
if (kernel)
clReleaseKernel(kernel);
if (program)
clReleaseProgram(program);
if (cl_inbuf)
clReleaseMemObject(cl_inbuf);
if (cl_outbuf)
clReleaseMemObject(cl_outbuf);
if (cl_mask)
clReleaseMemObject(cl_mask);
av_free(inbuf);
av_free(mask);
return ret;
}
static int compare_ocl_device_desc(const void *a, const void *b)
{
return ((OpenCLDeviceBenchmark*)a)->runtime - ((OpenCLDeviceBenchmark*)b)->runtime;
}
int opt_opencl_bench(void *optctx, const char *opt, const char *arg)
{
int i, j, nb_devices = 0, count = 0;
int64_t score = 0;
AVOpenCLDeviceList *device_list;
AVOpenCLDeviceNode *device_node = NULL;
OpenCLDeviceBenchmark *devices = NULL;
cl_platform_id platform;
av_opencl_get_device_list(&device_list);
for (i = 0; i < device_list->platform_num; i++)
nb_devices += device_list->platform_node[i]->device_num;
if (!nb_devices) {
av_log(NULL, AV_LOG_ERROR, "No OpenCL device detected!\n");
return AVERROR(EINVAL);
}
if (!(devices = av_malloc(sizeof(OpenCLDeviceBenchmark) * nb_devices))) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate buffer\n");
return AVERROR(ENOMEM);
}
for (i = 0; i < device_list->platform_num; i++) {
for (j = 0; j < device_list->platform_node[i]->device_num; j++) {
device_node = device_list->platform_node[i]->device_node[j];
platform = device_list->platform_node[i]->platform_id;
score = av_opencl_benchmark(device_node, platform, run_opencl_bench);
if (score > 0) {
devices[count].platform_idx = i;
devices[count].device_idx = j;
devices[count].runtime = score;
strcpy(devices[count].device_name, device_node->device_name);
count++;
}
}
}
qsort(devices, count, sizeof(OpenCLDeviceBenchmark), compare_ocl_device_desc);
fprintf(stderr, "platform_idx\tdevice_idx\tdevice_name\truntime\n");
for (i = 0; i < count; i++)
fprintf(stdout, "%d\t%d\t%s\t%"PRId64"\n",
devices[i].platform_idx, devices[i].device_idx,
devices[i].device_name, devices[i].runtime);
av_opencl_free_device_list(&device_list);
av_free(devices);
return 0;
}
int opt_opencl(void *optctx, const char *opt, const char *arg)
{
char *key, *value;
const char *opts = arg;
int ret = 0;
while (*opts) {
ret = av_opt_get_key_value(&opts, "=", ":", 0, &key, &value);
if (ret < 0)
return ret;
ret = av_opencl_set_option(key, value);
if (ret < 0)
return ret;
if (*opts)
opts++;
}
return ret;
}

View File

@@ -10,7 +10,7 @@ ifndef SUBDIR
ifndef V
Q = @
ECHO = printf "$(1)\t%s\n" $(2)
BRIEF = CC CXX HOSTCC HOSTLD AS YASM AR LD STRIP CP WINDRES
BRIEF = CC CXX HOSTCC HOSTLD AS YASM AR LD STRIP CP
SILENT = DEPCC DEPHOSTCC DEPAS DEPYASM RANLIB RM
MSG = $@
@@ -43,7 +43,6 @@ endef
COMPILE_C = $(call COMPILE,CC)
COMPILE_CXX = $(call COMPILE,CXX)
COMPILE_S = $(call COMPILE,AS)
COMPILE_HOSTC = $(call COMPILE,HOSTCC)
%.o: %.c
$(COMPILE_C)
@@ -57,12 +56,6 @@ COMPILE_HOSTC = $(call COMPILE,HOSTCC)
%.o: %.S
$(COMPILE_S)
%_host.o: %.c
$(COMPILE_HOSTC)
%.o: %.rc
$(WINDRES) $(IFLAGS) --preprocessor "$(DEPWINDRES) -E -xc-header -DRC_INVOKED $(CC_DEPFLAGS)" -o $@ $<
%.i: %.c
$(CC) $(CCFLAGS) $(CC_E) $<
@@ -89,15 +82,14 @@ endif
include $(SRC_PATH)/arch.mak
OBJS += $(OBJS-yes)
SLIBOBJS += $(SLIBOBJS-yes)
FFLIBS := $(FFLIBS-yes) $(FFLIBS)
TESTPROGS += $(TESTPROGS-yes)
LDLIBS = $(FFLIBS:%=%$(BUILDSUF))
FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(EXTRALIBS)
EXAMPLES := $(EXAMPLES:%=$(SUBDIR)%-example$(EXESUF))
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
SLIBOBJS := $(sort $(SLIBOBJS:%=$(SUBDIR)%))
TESTOBJS := $(TESTOBJS:%=$(SUBDIR)%) $(TESTPROGS:%=$(SUBDIR)%-test.o)
TESTPROGS := $(TESTPROGS:%=$(SUBDIR)%-test$(EXESUF))
HOSTOBJS := $(HOSTPROGS:%=$(SUBDIR)%.o)
@@ -121,19 +113,18 @@ checkheaders: $(HOBJS)
alltools: $(TOOLS)
$(HOSTOBJS): %.o: %.c
$(COMPILE_HOSTC)
$(call COMPILE,HOSTCC)
$(HOSTPROGS): %$(HOSTEXESUF): %.o
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $^ $(HOSTLIBS)
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $< $(HOSTLIBS)
$(OBJS): | $(sort $(dir $(OBJS)))
$(HOBJS): | $(sort $(dir $(HOBJS)))
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
$(SLIBOBJS): | $(sort $(dir $(SLIBOBJS)))
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
$(TOOLOBJS): | tools
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(TESTOBJS))
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
DISTCLEANSUFFIXES = *.pc
@@ -148,4 +139,4 @@ endef
$(eval $(RULES))
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d))
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d))

View File

@@ -38,6 +38,8 @@ static int optind = 1;
static int optopt;
static char *optarg;
#undef fprintf
static int getopt(int argc, char *argv[], char *opts)
{
static int sp = 1;

View File

@@ -32,8 +32,6 @@
#undef __STRICT_ANSI__ /* for _beginthread() */
#include <stdlib.h>
#include "libavutil/mem.h"
typedef TID pthread_t;
typedef void pthread_attr_t;

View File

@@ -24,6 +24,3 @@
#if !defined(va_copy) && defined(_MSC_VER)
#define va_copy(dst, src) ((dst) = (src))
#endif
#if !defined(va_copy) && defined(__GNUC__) && __GNUC__ < 3
#define va_copy(dst, src) __va_copy(dst, src)
#endif

View File

@@ -1,132 +0,0 @@
#!/bin/sh
# Copyright (c) 2013, Derek Buitenhuis
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# mktemp isn't POSIX, so supply an implementation
mktemp() {
echo "${2%%XXX*}.${HOSTNAME}.${UID}.$$"
}
if [ $# -lt 2 ]; then
echo "Usage: makedef <version_script> <objects>" >&2
exit 0
fi
vscript=$1
shift
if [ ! -f "$vscript" ]; then
echo "Version script does not exist" >&2
exit 1
fi
for object in "$@"; do
if [ ! -f "$object" ]; then
echo "Object does not exist: ${object}" >&2
exit 1
fi
done
# Create a lib temporarily to dump symbols from.
# It's just much easier to do it this way
libname=$(mktemp -u "library").lib
trap 'rm -f -- $libname' EXIT
lib -out:${libname} $@ >/dev/null
if [ $? != 0 ]; then
echo "Could not create temporary library." >&2
exit 1
fi
IFS='
'
# Determine if we're building for x86 or x86_64 and
# set the symbol prefix accordingly.
prefix=""
arch=$(dumpbin -headers ${libname} |
tr '\t' ' ' |
grep '^ \+.\+machine \+(.\+)' |
head -1 |
sed -e 's/^ \{1,\}.\{1,\} \{1,\}machine \{1,\}(\(...\)).*/\1/')
if [ "${arch}" = "x86" ]; then
prefix="_"
else
if [ "${arch}" != "ARM" ] && [ "${arch}" != "x64" ]; then
echo "Unknown machine type." >&2
exit 1
fi
fi
started=0
regex="none"
for line in $(cat ${vscript} | tr '\t' ' '); do
# We only care about global symbols
echo "${line}" | grep -q '^ \+global:'
if [ $? = 0 ]; then
started=1
line=$(echo "${line}" | sed -e 's/^ \{1,\}global: *//')
else
echo "${line}" | grep -q '^ \+local:'
if [ $? = 0 ]; then
started=0
fi
fi
if [ ${started} = 0 ]; then
continue
fi
# Handle multiple symbols on one line
IFS=';'
# Work around stupid expansion to filenames
line=$(echo "${line}" | sed -e 's/\*/.\\+/g')
for exp in ${line}; do
# Remove leading and trailing whitespace
exp=$(echo "${exp}" | sed -e 's/^ *//' -e 's/ *$//')
if [ "${regex}" = "none" ]; then
regex="${exp}"
else
regex="${regex};${exp}"
fi
done
IFS='
'
done
dump=$(dumpbin -linkermember:1 ${libname})
rm ${libname}
IFS=';'
list=""
for exp in ${regex}; do
list="${list}"'
'$(echo "${dump}" |
sed -e '/public symbols/,$!d' -e '/^ \{1,\}Summary/,$d' -e "s/ \{1,\}${prefix}/ /" -e 's/ \{1,\}/ /g' |
tail -n +2 |
cut -d' ' -f3 |
grep "^${exp}" |
sed -e 's/^/ /')
done
echo "EXPORTS"
echo "${list}" | sort | uniq | tail -n +2

620
configure vendored

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ since the last major version increase or the API was added.
The last version increases were:
libavcodec: 2013-03-xx
libavdevice: 2013-03-xx
libavfilter: 2013-12-xx
libavfilter: 2012-06-22
libavformat: 2013-03-xx
libavresample: 2012-10-05
libpostproc: 2011-04-18
@@ -15,133 +15,6 @@ libavutil: 2012-10-22
API changes, most recent first:
2014-xx-xx - xxxxxxx - lavu 53.05.0 - frame.h
Add av_frame_copy() for copying the frame data.
2014-02-xx - xxxxxxx - lswr 0.18.100 - swresample.h
Add swr_is_initialized() for checking whether a resample context is initialized.
2014-02-xx - xxxxxxx - lavr 1.2.0 - avresample.h
Add avresample_is_open() for checking whether a resample context is open.
2014-xx-xx - xxxxxxx - lavu 53.04.0 - opt.h
Add AV_OPT_FLAG_EXPORT and AV_OPT_FLAG_READONLY to mark options meant (only)
for reading.
2014-xx-xx - xxxxxxx - lavu 53.03.01 - opt.h
Deprecate unused AV_OPT_FLAG_METADATA.
2014-02-xx - xxxxxxx - lavd 55.10.100 - avdevice.h
Add avdevice_list_devices() and avdevice_free_list_devices()
2014-02-16 - db3c970 - lavf 55.33.100 - avio.h
Add avio_find_protocol_name() to find out the name of the protocol that would
be selected for a given URL.
2014-02-xx - xxxxxxx - lavu 53.3.0 - frame.h
Add AV_FRAME_DATA_DOWNMIX_INFO value to the AVFrameSideDataType enum and
downmix_info.h API, which identify downmix-related metadata.
2014-02-11 - 1b05ac2 - lavf 55.32.100 - avformat.h
Add av_write_uncoded_frame() and av_interleaved_write_uncoded_frame().
2014-02-04 - 3adb5f8 / d9ae103 - lavf 55.30.100 / 55.11.0 - avformat.h
Add AVFormatContext.max_interleave_delta for controlling amount of buffering
when interleaving.
2014-02-02 - xxxxxxx - lavf 55.29.100 - avformat.h
Add output_ts_offset muxing option to AVFormatContext.
2014-01-27 - 102bd64 - lavd 55.7.100 - avdevice.h
lavf 55.28.100 - avformat.h
Add avdevice_dev_to_app_control_message() function.
2014-01-27 - 7151411 - lavd 55.6.100 - avdevice.h
lavf 55.27.100 - avformat.h
Add avdevice_app_to_dev_control_message() function.
2014-01-24 - 86bee79 - lavf 55.26.100 - avformat.h
Add AVFormatContext option metadata_header_padding to allow control over the
amount of padding added.
2014-01-20 - eef74b2 / 93c553c - lavc 55.48.102 / 55.32.1 - avcodec.h
Edges are not required anymore on video buffers allocated by get_buffer2()
(i.e. as if the CODEC_FLAG_EMU_EDGE flag was always on). Deprecate
CODEC_FLAG_EMU_EDGE and avcodec_get_edge_width().
2014-01-19 - xxxxxxx - lavf 55.25.100 - avformat.h
Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
2014-01-19 - xxxxxxx - lavu 52.63.100 - rational.h
Add av_make_q() function.
2014-01-05 - 4cf4da9 / 5b4797a - lavu 52.62.100 / 53.2.0 - frame.h
Add AV_FRAME_DATA_MATRIXENCODING value to the AVFrameSideDataType enum, which
identifies AVMatrixEncoding data.
2014-01-05 - 751385f / 5c437fb - lavu 52.61.100 / 53.1.0 - channel_layout.h
Add values for various Dolby flags to the AVMatrixEncoding enum.
2014-01-04 - xxxxxxx - lavu 52.60.100 - mathematics.h
Add av_add_stable() function.
2013-12-22 - xxxxxxx - lavu 52.59.100 - avstring.h
Add av_strnlen() function.
2013-12-xx - xxxxxxx - lavu 52.57.100 - opencl.h
Add av_opencl_benchmark() function.
2013-11-xx - xxxxxxx - lavu 52.56.100 - ffversion.h
Moves version.h to libavutil/ffversion.h.
Install ffversion.h and make it public.
2013-12-11 - 29c83d2 / b9fb59d,409a143 / 9431356,44967ab / d7b3ee9 - lavc 55.45.101 / 55.28.1 - avcodec.h
av_frame_alloc(), av_frame_unref() and av_frame_free() now can and should be
used instead of avcodec_alloc_frame(), avcodec_get_frame_defaults() and
avcodec_free_frame() respectively. The latter three functions are deprecated.
2013-12-09 - 7a60348 / 7e244c6- - lavu 52.58.100 / 52.20.0 - frame.h
Add AV_FRAME_DATA_STEREO3D value to the AVFrameSideDataType enum and
stereo3d.h API, that identify codec-independent stereo3d information.
2013-11-26 - 625b290 / 1eaac1d- - lavu 52.55.100 / 52.19.0 - frame.h
Add AV_FRAME_DATA_A53_CC value to the AVFrameSideDataType enum, which
identifies ATSC A53 Part 4 Closed Captions data.
2013-11-XX - xxxxxxx - lavu 52.54.100 - avstring.h
Add av_utf8_decode() function.
2013-11-22 - fb7d70c - lavc 55.44.100 - avcodec.h
Add HEVC profiles
2013-11-xx - xxxxxxx - lavc 55.44.100 - avcodec.h
Add av_packet_{un,}pack_dictionary()
Add AV_PKT_METADATA_UPDATE side data type, used to transmit key/value
strings between a stream and the application.
2013-11-14 - 7c888ae / cce3e0a - lavu 52.53.100 / 52.18.0 - mem.h
Move av_fast_malloc() and av_fast_realloc() for libavcodec to libavutil.
2013-11-14 - b71e4d8 / 8941971 - lavc 55.43.100 / 55.27.0 - avcodec.h
Deprecate AVCodecContext.error_rate, it is replaced by the 'error_rate'
private option of the mpegvideo encoder family.
2013-11-14 - 31c09b7 / 728c465 - lavc 55.26.0 - vdpau.h
Add av_vdpau_get_profile().
Add av_vdpau_alloc_context(). This function must from now on be
used for allocating AVVDPAUContext.
2013-11-04 - be41f21 / cd8f772 - lavc 55.41.100 / 55.25.0 - avcodec.h
lavu 52.51.100 - frame.h
Add ITU-R BT.2020 and other not yet included values to color primaries,
transfer characteristics and colorspaces.
2013-11-04 - xxxxxxx - lavu 52.50.100 - avutil.h
Add av_fopen_utf8()
2013-10-31 - 78265fc / 28096e0 - lavu 52.49.100 / 52.17.0 - frame.h
Add AVFrame.flags and AV_FRAME_FLAG_CORRUPT.
2013-10-27 - xxxxxxx - lavc 55.39.100 - avcodec.h
Add CODEC_CAP_DELAY support to avcodec_decode_subtitle2.
@@ -162,11 +35,11 @@ API changes, most recent first:
Add audio/video/subtitle AVCodec fields to AVFormatContext to force specific
decoders
2013-09-28 - 7381d31 / 0767bfd - lavfi 3.88.100 / 3.11.0 - avfilter.h
2013-08-xx - xxxxxxx - lavfi 3.11.0 - avfilter.h
Add AVFilterGraph.execute and AVFilterGraph.opaque for custom slice threading
implementations.
2013-09-21 - 85f8a3c / e208e6d - lavu 52.46.100 / 52.16.0 - pixfmt.h
2013-09-21 - xxxxxxx - lavu 52.16.0 - pixfmt.h
Add interleaved 4:2:2 8/10-bit formats AV_PIX_FMT_NV16 and
AV_PIX_FMT_NV20.

View File

@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
# This could be handy for archiving the generated documentation or
# if some version control system is used.
PROJECT_NUMBER = 2.2-rc2
PROJECT_NUMBER = 2.1
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
@@ -793,13 +793,13 @@ HTML_FILE_EXTENSION = .html
# each generated HTML page. If it is left blank doxygen will generate a
# standard header.
HTML_HEADER =
#HTML_HEADER = doc/doxy/header.html
# The HTML_FOOTER tag can be used to specify a personal HTML footer for
# each generated HTML page. If it is left blank doxygen will generate a
# standard footer.
HTML_FOOTER =
#HTML_FOOTER = doc/doxy/footer.html
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
# style sheet that is used by each HTML page. It can be used to
@@ -808,7 +808,7 @@ HTML_FOOTER =
# the style sheet file to the HTML output directory, so don't put your own
# stylesheet in the HTML output directory as well, or it will be erased!
HTML_STYLESHEET =
#HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
# Doxygen will adjust the colors in the stylesheet and background images

View File

@@ -14,11 +14,11 @@ COMPONENTS-$(CONFIG_AVFORMAT) += ffmpeg-formats ffmpeg-protocols
COMPONENTS-$(CONFIG_AVDEVICE) += ffmpeg-devices
COMPONENTS-$(CONFIG_AVFILTER) += ffmpeg-filters
MANPAGES1 = $(AVPROGS-yes:%=doc/%.1) $(AVPROGS-yes:%=doc/%-all.1) $(COMPONENTS-yes:%=doc/%.1)
MANPAGES1 = $(PROGS-yes:%=doc/%.1) $(PROGS-yes:%=doc/%-all.1) $(COMPONENTS-yes:%=doc/%.1)
MANPAGES3 = $(LIBRARIES-yes:%=doc/%.3)
MANPAGES = $(MANPAGES1) $(MANPAGES3)
PODPAGES = $(AVPROGS-yes:%=doc/%.pod) $(AVPROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
PODPAGES = $(PROGS-yes:%=doc/%.pod) $(PROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
HTMLPAGES = $(PROGS-yes:%=doc/%.html) $(PROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
doc/developer.html \
doc/faq.html \
doc/fate.html \
@@ -36,25 +36,6 @@ DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
DOCS = $(DOCS-yes)
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
DOC_EXAMPLES-$(CONFIG_AVCODEC_EXAMPLE) += avcodec
DOC_EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
DOC_EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
DOC_EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac
ALL_DOC_EXAMPLES_LIST = $(DOC_EXAMPLES-) $(DOC_EXAMPLES-yes)
DOC_EXAMPLES := $(DOC_EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
ALL_DOC_EXAMPLES := $(ALL_DOC_EXAMPLES_LIST:%=doc/examples/%$(PROGSSUF)$(EXESUF))
ALL_DOC_EXAMPLES_G := $(ALL_DOC_EXAMPLES_LIST:%=doc/examples/%$(PROGSSUF)_g$(EXESUF))
PROGS += $(DOC_EXAMPLES)
all-$(CONFIG_DOC): doc
doc: documentation
@@ -62,9 +43,7 @@ doc: documentation
apidoc: doc/doxy/html
documentation: $(DOCS)
examples: $(DOC_EXAMPLES)
TEXIDEP = perl $(SRC_PATH)/doc/texidep.pl $(SRC_PATH) $< $@ >$(@:%=%.d)
TEXIDEP = awk '/^@(verbatim)?include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
doc/%.txt: TAG = TXT
doc/%.txt: doc/%.texi
@@ -105,13 +84,9 @@ doc/%.3: doc/%.pod $(GENTEXI)
$(M)pod2man --section=3 --center=" " --release=" " $< > $@
$(DOCS) doc/doxy/html: | doc/
$(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
OBJDIRS += doc/examples
DOXY_INPUT = $(addprefix $(SRC_PATH)/, $(INSTHEADERS) $(DOC_EXAMPLES:%$(EXESUF)=%.c) $(LIB_EXAMPLES:%$(EXESUF)=%.c))
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(DOXY_INPUT)
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $< $(DOXY_INPUT)
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(INSTHEADERS)
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $^
install-doc: install-html install-man
@@ -145,7 +120,7 @@ uninstall-html:
$(RM) -r "$(DOCDIR)"
uninstall-man:
$(RM) $(addprefix "$(MANDIR)/man1/",$(AVPROGS-yes:%=%.1) $(AVPROGS-yes:%=%-all.1) $(COMPONENTS-yes:%=%.1))
$(RM) $(addprefix "$(MANDIR)/man1/",$(PROGS-yes:%=%.1) $(PROGS-yes:%=%-all.1) $(COMPONENTS-yes:%=%.1))
$(RM) $(addprefix "$(MANDIR)/man3/",$(LIBRARIES-yes:%=%.3))
clean:: docclean
@@ -153,13 +128,8 @@ clean:: docclean
distclean:: docclean
$(RM) doc/config.texi
examplesclean:
$(RM) $(ALL_DOC_EXAMPLES) $(ALL_DOC_EXAMPLES_G)
$(RM) $(CLEANSUFFIXES:%=doc/examples/%)
docclean: examplesclean
$(RM) $(CLEANSUFFIXES:%=doc/%)
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 doc/avoptions_*.texi
docclean:
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 $(CLEANSUFFIXES:%=doc/%) doc/avoptions_*.texi
$(RM) -r doc/doxy/html
-include $(wildcard $(DOCS:%=%.d))

View File

@@ -1,7 +1,7 @@
Release Notes
=============
* 2.2 "Muybridge" March, 2014
* 2.1 "Fourier" October, 2013
General notes

36
doc/avutil.txt Normal file
View File

@@ -0,0 +1,36 @@
AVUtil
======
libavutil is a small lightweight library of generally useful functions.
It is not a library for code needed by both libavcodec and libavformat.
Overview:
=========
adler32.c adler32 checksum
aes.c AES encryption and decryption
fifo.c resizeable first in first out buffer
intfloat_readwrite.c portable reading and writing of floating point values
log.c "printf" with context and level
md5.c MD5 Message-Digest Algorithm
rational.c code to perform exact calculations with rational numbers
tree.c generic AVL tree
crc.c generic CRC checksumming code
integer.c 128bit integer math
lls.c
mathematics.c greatest common divisor, integer sqrt, integer log2, ...
mem.c memory allocation routines with guaranteed alignment
Headers:
bswap.h big/little/native-endian conversion code
x86_cpu.h a few useful macros for unifying x86-64 and x86-32 code
avutil.h
common.h
intreadwrite.h reading and writing of unaligned big/little/native-endian integers
Goals:
======
* Modular (few interdependencies and the possibility of disabling individual parts during ./configure)
* Small (source and object)
* Efficient (low CPU and memory usage)
* Useful (avoid useless features almost no one needs)

View File

@@ -117,6 +117,8 @@ ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
@section movsub
@section mp3_header_compress
@section mp3_header_decompress
@section noise

View File

@@ -172,13 +172,7 @@ Set max video quantizer scale (VBR). Must be included between -1 and
Set max difference between the quantizer scale (VBR).
@item bf @var{integer} (@emph{encoding,video})
Set max number of B frames between non-B-frames.
Must be an integer between -1 and 16. 0 means that B-frames are
disabled. If a value of -1 is used, it will choose an automatic value
depending on the encoder.
Default value is 0.
Set max number of B frames.
@item b_qfactor @var{float} (@emph{encoding,video})
Set qp factor between P and B frames.
@@ -877,9 +871,6 @@ Set frame skip factor.
@item skip_exp @var{integer} (@emph{encoding,video})
Set frame skip exponent.
Negative values behave identical to the corresponding positive ones, except
that the score is normalized.
Positive values exist primarly for compatibility reasons and are not so useful.
@item skipcmp @var{integer} (@emph{encoding,video})
Set frame skip compare function.
@@ -1092,9 +1083,5 @@ instead of alpha. Default is 0.
@c man end CODEC OPTIONS
@ifclear config-writeonly
@include decoders.texi
@end ifclear
@ifclear config-readonly
@include encoders.texi
@end ifclear

View File

@@ -14,7 +14,7 @@ You can disable all the decoders with the configure option
with the options @code{--enable-decoder=@var{DECODER}} /
@code{--disable-decoder=@var{DECODER}}.
The option @code{-decoders} of the ff* tools will display the list of
The option @code{-codecs} of the ff* tools will display the list of
enabled decoders.
@c man end DECODERS
@@ -52,37 +52,6 @@ top-field-first is assumed
@chapter Audio Decoders
@c man begin AUDIO DECODERS
A description of some of the currently available audio decoders
follows.
@section ac3
AC-3 audio decoder.
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
the undocumented RealAudio 3 (a.k.a. dnet).
@subsubsection AC-3 Decoder Options
@table @option
@item -drc_scale @var{value}
Dynamic Range Scale Factor. The factor to apply to dynamic range values
from the AC-3 stream. This factor is applied exponentially.
There are 3 notable scale factor ranges:
@table @option
@item drc_scale == 0
DRC disabled. Produces full range audio.
@item 0 < drc_scale <= 1
DRC enabled. Applies a fraction of the stream DRC value.
Audio reproduction is between full range and full compression.
@item drc_scale > 1
DRC enabled. Applies drc_scale asymmetrically.
Loud sounds are fully compressed. Soft sounds are enhanced.
@end table
@end table
@section ffwavesynth
Internal wave synthetizer.

View File

@@ -17,8 +17,8 @@ a:visited {
}
#banner img {
margin-bottom: 1px;
margin-top: 5px;
padding-bottom: 1px;
padding-top: 5px;
}
#body {

View File

@@ -92,7 +92,7 @@ for markup commands, i.e. use @code{@@param} and not @code{\param}.
* more text ...
* ...
*/
typedef struct Foobar @{
typedef struct Foobar@{
int var1; /**< var1 description */
int var2; ///< var2 description
/** var3 description */
@@ -248,7 +248,7 @@ Contributions should be licensed under the
@uref{http://www.gnu.org/licenses/lgpl-2.1.html, LGPL 2.1},
including an "or any later version" clause, or, if you prefer
a gift-style license, the
@uref{http://opensource.org/licenses/isc-license.txt, ISC} or
@uref{http://www.isc.org/software/license/, ISC} or
@uref{http://mit-license.org/, MIT} license.
@uref{http://www.gnu.org/licenses/gpl-2.0.html, GPL 2} including
an "or any later version" clause is also acceptable, but LGPL is

View File

@@ -17,9 +17,5 @@ for programmatic use.
@c man end DEVICE OPTIONS
@ifclear config-writeonly
@include indevs.texi
@end ifclear
@ifclear config-readonly
@include outdevs.texi
@end ifclear

View File

@@ -8,5 +8,7 @@ shift 2
doxygen - <<EOF
@INCLUDE = ${DOXYFILE}
INPUT = $@
EXAMPLE_PATH = ${SRC_PATH}/doc/examples
HTML_HEADER = ${SRC_PATH}/doc/doxy/header.html
HTML_FOOTER = ${SRC_PATH}/doc/doxy/footer.html
HTML_STYLESHEET = ${SRC_PATH}/doc/doxy/doxy_stylesheet.css
EOF

2019
doc/doxy/doxy_stylesheet.css Normal file

File diff suppressed because it is too large Load Diff

9
doc/doxy/footer.html Normal file
View File

@@ -0,0 +1,9 @@
<footer class="footer pagination-right">
<span class="label label-info">
Generated on $datetime for $projectname by&#160;<a href="http://www.doxygen.org/index.html">doxygen</a> $doxygenversion
</span>
</footer>
</div>
</body>
</html>

16
doc/doxy/header.html Normal file
View File

@@ -0,0 +1,16 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath$doxy_stylesheet.css" rel="stylesheet" type="text/css" />
<!--Header replace -->
</head>
<div class="container">
<!--Header replace -->
<div class="menu">

View File

@@ -14,7 +14,7 @@ You can disable all the encoders with the configure option
with the options @code{--enable-encoder=@var{ENCODER}} /
@code{--disable-encoder=@var{ENCODER}}.
The option @code{-encoders} of the ff* tools will display the list of
The option @code{-codecs} of the ff* tools will display the list of
enabled encoders.
@c man end ENCODERS
@@ -38,8 +38,8 @@ As this encoder is experimental, unexpected behavior may exist from time to
time. For a more stable AAC encoder, see @ref{libvo-aacenc}. However, be warned
that it has a worse quality reported by some users.
@c todo @ref{libaacplus}
See also @ref{libfdk-aac-enc,,libfdk_aac} and @ref{libfaac}.
@c Comment this out until somebody writes the respective documentation.
@c See also @ref{libfaac}, @ref{libaacplus}, and @ref{libfdk-aac-enc}.
@subsection Options
@@ -494,285 +494,6 @@ Selected by Encoder (default)
@end table
@anchor{libfaac}
@section libfaac
libfaac AAC (Advanced Audio Coding) encoder wrapper.
Requires the presence of the libfaac headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libfaac --enable-nonfree}.
This encoder is considered to be of higher quality with respect to the
@ref{aacenc,,the native experimental FFmpeg AAC encoder}.
For more information see the libfaac project at
@url{http://www.audiocoding.com/faac.html/}.
@subsection Options
The following shared FFmpeg codec options are recognized.
The following options are supported by the libfaac wrapper. The
@command{faac}-equivalent of the options are listed in parentheses.
@table @option
@item b (@emph{-b})
Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate
is not explicitly specified, it is automatically set to a suitable
value depending on the selected profile. @command{faac} bitrate is
expressed in kilobits/s.
Note that libfaac does not support CBR (Constant Bit Rate) but only
ABR (Average Bit Rate).
If VBR mode is enabled this option is ignored.
@item ar (@emph{-R})
Set audio sampling rate (in Hz).
@item ac (@emph{-c})
Set the number of audio channels.
@item cutoff (@emph{-C})
Set cutoff frequency. If not specified (or explicitly set to 0) it
will use a value automatically computed by the library. Default value
is 0.
@item profile
Set audio profile.
The following profiles are recognized:
@table @samp
@item aac_main
Main AAC (Main)
@item aac_low
Low Complexity AAC (LC)
@item aac_ssr
Scalable Sample Rate (SSR)
@item aac_ltp
Long Term Prediction (LTP)
@end table
If not specified it is set to @samp{aac_low}.
@item flags +qscale
Set constant quality VBR (Variable Bit Rate) mode.
@item global_quality
Set quality in VBR mode as an integer number of lambda units.
Only relevant when VBR mode is enabled with @code{flags +qscale}. The
value is converted to QP units by dividing it by @code{FF_QP2LAMBDA},
and used to set the quality value used by libfaac. A reasonable range
for the option value in QP units is [10-500], the higher the value the
higher the quality.
@item q (@emph{-q})
Enable VBR mode when set to a non-negative value, and set constant
quality value as a double floating point value in QP units.
The value sets the quality value used by libfaac. A reasonable range
for the option value is [10-500], the higher the value the higher the
quality.
This option is valid only using the @command{ffmpeg} command-line
tool. For library interface users, use @option{global_quality}.
@end table
@subsection Examples
@itemize
@item
Use @command{ffmpeg} to convert an audio file to ABR 128 kbps AAC in an M4A (MP4)
container:
@example
ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
@end example
@item
Use @command{ffmpeg} to convert an audio file to VBR AAC, using the
LTP AAC profile:
@example
ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
@end example
@end itemize
@anchor{libfdk-aac-enc}
@section libfdk_aac
libfdk-aac AAC (Advanced Audio Coding) encoder wrapper.
The libfdk-aac library is based on the Fraunhofer FDK AAC code from
the Android project.
Requires the presence of the libfdk-aac headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libfdk-aac}. The library is also incompatible with GPL,
so if you allow the use of GPL, you should configure with
@code{--enable-gpl --enable-nonfree --enable-libfdk-aac}.
This encoder is considered to be of higher quality with respect to
both @ref{aacenc,,the native experimental FFmpeg AAC encoder} and
@ref{libfaac}.
VBR encoding, enabled through the @option{vbr} or @option{flags
+qscale} options, is experimental and only works with some
combinations of parameters.
Support for encoding 7.1 audio is only available with libfdk-aac 0.1.3 or
higher.
For more information see the fdk-aac project at
@url{http://sourceforge.net/p/opencore-amr/fdk-aac/}.
@subsection Options
The following options are mapped on the shared FFmpeg codec options.
@table @option
@item b
Set bit rate in bits/s. If the bitrate is not explicitly specified, it
is automatically set to a suitable value depending on the selected
profile.
In case VBR mode is enabled the option is ignored.
@item ar
Set audio sampling rate (in Hz).
@item channels
Set the number of audio channels.
@item flags +qscale
Enable fixed quality, VBR (Variable Bit Rate) mode.
Note that VBR is implicitly enabled when the @option{vbr} value is
positive.
@item cutoff
Set cutoff frequency. If not specified (or explicitly set to 0) it
will use a value automatically computed by the library. Default value
is 0.
@item profile
Set audio profile.
The following profiles are recognized:
@table @samp
@item aac_low
Low Complexity AAC (LC)
@item aac_he
High Efficiency AAC (HE-AAC)
@item aac_he_v2
High Efficiency AAC version 2 (HE-AACv2)
@item aac_ld
Low Delay AAC (LD)
@item aac_eld
Enhanced Low Delay AAC (ELD)
@end table
If not specified it is set to @samp{aac_low}.
@end table
The following are private options of the libfdk_aac encoder.
@table @option
@item afterburner
Enable afterburner feature if set to 1, disabled if set to 0. This
improves the quality but also the required processing power.
Default value is 1.
@item eld_sbr
Enable SBR (Spectral Band Replication) for ELD if set to 1, disabled
if set to 0.
Default value is 0.
@item signaling
Set SBR/PS signaling style.
It can assume one of the following values:
@table @samp
@item default
choose signaling implicitly (explicit hierarchical by default,
implicit if global header is disabled)
@item implicit
implicit backwards compatible signaling
@item explicit_sbr
explicit SBR, implicit PS signaling
@item explicit_hierarchical
explicit hierarchical signaling
@end table
Default value is @samp{default}.
@item latm
Output LATM/LOAS encapsulated data if set to 1, disabled if set to 0.
Default value is 0.
@item header_period
Set StreamMuxConfig and PCE repetition period (in frames) for sending
in-band configuration buffers within LATM/LOAS transport layer.
Must be a 16-bits non-negative integer.
Default value is 0.
@item vbr
Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty
good) and 5 is highest quality. A value of 0 will disable VBR, and CBR
(Constant Bit Rate) is enabled.
Currently only the @samp{aac_low} profile supports VBR encoding.
VBR modes 1-5 correspond to roughly the following average bit rates:
@table @samp
@item 1
32 kbps/channel
@item 2
40 kbps/channel
@item 3
48-56 kbps/channel
@item 4
64 kbps/channel
@item 5
about 80-96 kbps/channel
@end table
Default value is 0.
@end table
@subsection Examples
@itemize
@item
Use @command{ffmpeg} to convert an audio file to VBR AAC in an M4A (MP4)
container:
@example
ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a
@end example
@item
Use @command{ffmpeg} to convert an audio file to CBR 64k kbps AAC, using the
High-Efficiency AAC profile:
@example
ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a
@end example
@end itemize
@anchor{libmp3lame}
@section libmp3lame
@@ -792,7 +513,7 @@ The following options are supported by the libmp3lame wrapper. The
@table @option
@item b (@emph{-b})
Set bitrate expressed in bits/s for CBR or ABR. LAME @code{bitrate} is
Set bitrate expressed in bits/s for CBR. LAME @code{bitrate} is
expressed in kilobits/s.
@item q (@emph{-V})
@@ -814,11 +535,6 @@ has this enabled by default, but can be overriden by use
Enable the encoder to use (on a frame by frame basis) either L/R
stereo or mid/side stereo. Default value is 1.
@item abr (@emph{--abr})
Enable the encoder to use ABR when set to 1. The @command{lame}
@option{--abr} sets the target bitrate, while this options only
tells FFmpeg to use ABR still relies on @option{b} to set bitrate.
@end table
@section libopencore-amrnb
@@ -1070,7 +786,7 @@ Set maximum frame size, or duration of a frame in milliseconds. The
argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
frame sizes achieve lower latency but less quality at a given bitrate.
Sizes greater than 20ms are only interesting at fairly low bitrates.
The default is 20ms.
The default of FFmpeg is 10ms, but is 20ms in @command{opusenc}.
@item packet_loss (@emph{expect-loss})
Set expected packet loss percentage. The default is 0.
@@ -1147,111 +863,32 @@ transient response is a higher bitrate.
@end table
@anchor{libwavpack}
@section libwavpack
A wrapper providing WavPack encoding through libwavpack.
Only lossless mode using 32-bit integer samples is supported currently.
Requires the presence of the libwavpack headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libwavpack}.
Note that a libavcodec-native encoder for the WavPack codec exists so users can
encode audios with this codec without using this encoder. See @ref{wavpackenc}.
@subsection Options
@command{wavpack} command line utility's corresponding options are listed in
parentheses, if any.
The @option{compression_level} option can be used to control speed vs.
compression tradeoff, with the values mapped to libwavpack as follows:
@table @option
@item frame_size (@emph{--blocksize})
Default is 32768.
@item compression_level
Set speed vs. compression tradeoff. Acceptable arguments are listed below:
@table @samp
@item 0 (@emph{-f})
Fast mode.
@item 0
Fast mode - corresponding to the wavpack @option{-f} option.
@item 1
Normal (default) settings.
@item 2 (@emph{-h})
High quality.
@item 2
High quality - corresponding to the wavpack @option{-h} option.
@item 3 (@emph{-hh})
Very high quality.
@item 3
Very high quality - corresponding to the wavpack @option{-hh} option.
@item 4-8 (@emph{-hh -x}@var{EXTRAPROC})
Same as @samp{3}, but with extra processing enabled.
@samp{4} is the same as @option{-x2} and @samp{8} is the same as @option{-x6}.
@end table
@end table
@anchor{wavpackenc}
@section wavpack
WavPack lossless audio encoder.
This is a libavcodec-native WavPack encoder. There is also an encoder based on
libwavpack, but there is virtually no reason to use that encoder.
See also @ref{libwavpack}.
@subsection Options
The equivalent options for @command{wavpack} command line utility are listed in
parentheses.
@subsubsection Shared options
The following shared options are effective for this encoder. Only special notes
about this particular encoder will be documented here. For the general meaning
of the options, see @ref{codec-options,,the Codec Options chapter}.
@table @option
@item frame_size (@emph{--blocksize})
For this encoder, the range for this option is between 128 and 131072. Default
is automatically decided based on sample rate and number of channel.
For the complete formula of calculating default, see
@file{libavcodec/wavpackenc.c}.
@item compression_level (@emph{-f}, @emph{-h}, @emph{-hh}, and @emph{-x})
This option's syntax is consistent with @ref{libwavpack}'s.
@end table
@subsubsection Private options
@table @option
@item joint_stereo (@emph{-j})
Set whether to enable joint stereo. Valid values are:
@table @samp
@item on (@emph{1})
Force mid/side audio encoding.
@item off (@emph{0})
Force left/right audio encoding.
@item auto
Let the encoder decide automatically.
@end table
@item optimize_mono
Set whether to enable optimization for mono. This option is only effective for
non-mono streams. Available values:
@table @samp
@item on
enabled
@item off
disabled
@end table
@item 4-8
Same as 3, but with extra processing enabled - corresponding to the wavpack
@option{-x} option. I.e. 4 is the same as @option{-x2} and 8 is the same as
@option{-x6}.
@end table
@@ -1265,15 +902,12 @@ follows.
@section libtheora
libtheora Theora encoder wrapper.
Theora format supported through libtheora.
Requires the presence of the libtheora headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libtheora}.
For more informations about the libtheora project see
@url{http://www.theora.org/}.
@subsection Options
The following global options are mapped to internal libtheora options
@@ -1281,11 +915,11 @@ which affect the quality and the bitrate of the encoded stream.
@table @option
@item b
Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. In
case VBR (Variable Bit Rate) mode is enabled this option is ignored.
Set the video bitrate, only works if the @code{qscale} flag in
@option{flags} is not enabled.
@item flags
Used to enable constant quality mode (VBR) encoding through the
Used to enable constant quality mode encoding through the
@option{qscale} flag, and to enable the @code{pass1} and @code{pass2}
modes.
@@ -1293,41 +927,19 @@ modes.
Set the GOP size.
@item global_quality
Set the global quality as an integer in lambda units.
Set the global quality in lambda units, only works if the
@code{qscale} flag in @option{flags} is enabled. The value is clipped
in the [0 - 10*@code{FF_QP2LAMBDA}] range, and then multiplied for 6.3
to get a value in the native libtheora range [0-63]. A higher value
corresponds to a higher quality.
Only relevant when VBR mode is enabled with @code{flags +qscale}. The
value is converted to QP units by dividing it by @code{FF_QP2LAMBDA},
clipped in the [0 - 10] range, and then multiplied by 6.3 to get a
value in the native libtheora range [0-63]. A higher value corresponds
to a higher quality.
@item q
Enable VBR mode when set to a non-negative value, and set constant
quality value as a double floating point value in QP units.
The value is clipped in the [0-10] range, and then multiplied by 6.3
to get a value in the native libtheora range [0-63].
This option is valid only using the @command{ffmpeg} command-line
tool. For library interface users, use @option{global_quality}.
For example, to set maximum constant quality encoding with
@command{ffmpeg}:
@example
ffmpeg -i INPUT -flags:v qscale -global_quality:v "10*QP2LAMBDA" -codec:v libtheora OUTPUT.ogg
@end example
@end table
@subsection Examples
@itemize
@item
Set maximum constant quality (VBR) encoding with @command{ffmpeg}:
@example
ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg
@end example
@item
Use @command{ffmpeg} to convert a CBR 1000 kbps Theora video stream:
@example
ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
@end example
@end itemize
@section libvpx
VP8 format supported through libvpx.
@@ -1447,69 +1059,7 @@ g_error_resilient
For more information about libvpx see:
@url{http://www.webmproject.org/}
@section libwebp
libwebp WebP Image encoder wrapper
libwebp is Google's official encoder for WebP images. It can encode in either
lossy or lossless mode. Lossy images are essentially a wrapper around a VP8
frame. Lossless images are a separate codec developed by Google.
@subsection Pixel Format
Currently, libwebp only supports YUV420 for lossy and RGB for lossless due
to limitations of the format and libwebp. Alpha is supported for either mode.
Because of API limitations, if RGB is passed in when encoding lossy or YUV is
passed in for encoding lossless, the pixel format will automatically be
converted using functions from libwebp. This is not ideal and is done only for
convenience.
@subsection Options
@table @option
@item -lossless @var{boolean}
Enables/Disables use of lossless mode. Default is 0.
@item -compression_level @var{integer}
For lossy, this is a quality/speed tradeoff. Higher values give better quality
for a given size at the cost of increased encoding time. For lossless, this is
a size/speed tradeoff. Higher values give smaller size at the cost of increased
encoding time. More specifically, it controls the number of extra algorithms
and compression tools used, and varies the combination of these tools. This
maps to the @var{method} option in libwebp. The valid range is 0 to 6.
Default is 4.
@item -qscale @var{float}
For lossy encoding, this controls image quality, 0 to 100. For lossless
encoding, this controls the effort and time spent at compressing more. The
default value is 75. Note that for usage via libavcodec, this option is called
@var{global_quality} and must be multiplied by @var{FF_QP2LAMBDA}.
@item -preset @var{type}
Configuration preset. This does some automatic settings based on the general
type of the image.
@table @option
@item none
Do not use a preset.
@item default
Use the encoder default.
@item picture
Digital picture, like portrait, inner shot
@item photo
Outdoor photograph, with natural lighting
@item drawing
Hand or line drawing, with high-contrast details
@item icon
Small-sized colorful images
@item text
Text-like
@end table
@end table
@section libx264, libx264rgb
@section libx264
x264 H.264/MPEG-4 AVC encoder wrapper.
@@ -1531,16 +1081,6 @@ by the libx264 @code{x264_param_parse} function.
The x264 project website is at
@url{http://www.videolan.org/developers/x264.html}.
The libx264rgb encoder is the same as libx264, except it accepts packed RGB
pixel formats as input instead of YUV.
@subsection Supported Pixel Formats
x264 supports 8- to 10-bit color spaces. The exact bit depth is controlled at
x264's configure time. FFmpeg only supports one bit depth in one particular
build. In other words, it is not possible to build one FFmpeg with multiple
versions of x264 with different bit depths.
@subsection Options
The following options are supported by the libx264 wrapper. The

View File

@@ -11,22 +11,19 @@ CFLAGS += -Wall -g
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
EXAMPLES= avio_reading \
avcodec \
demuxing_decoding \
EXAMPLES= decoding_encoding \
demuxing \
filtering_video \
filtering_audio \
metadata \
muxing \
remuxing \
resampling_audio \
scaling_video \
transcode_aac \
OBJS=$(addsuffix .o,$(EXAMPLES))
# the following examples make explicit use of the math library
avcodec: LDLIBS += -lm
decoding_encoding: LDLIBS += -lm
muxing: LDLIBS += -lm
resampling_audio: LDLIBS += -lm

View File

@@ -5,19 +5,14 @@ Both following use cases rely on pkg-config and make, thus make sure
that you have them installed and working on your system.
Method 1: build the installed examples in a generic read/write user directory
1) Build the installed examples in a generic read/write user directory
Copy to a read/write user directory and just use "make", it will link
to the libraries on your system, assuming the PKG_CONFIG_PATH is
correctly configured.
Method 2: build the examples in-tree
2) Build the examples in-tree
Assuming you are in the source FFmpeg checkout directory, you need to build
FFmpeg (no need to make install in any prefix). Then just run "make examples".
This will build the examples using the FFmpeg build system. You can clean those
examples using "make examplesclean"
If you want to try the dedicated Makefile examples (to emulate the first
method), go into doc/examples and run a command such as
PKG_CONFIG_PATH=pc-uninstalled make.
FFmpeg (no need to make install in any prefix). Then you can go into
doc/examples and run a command such as PKG_CONFIG_PATH=pc-uninstalled make.

View File

@@ -1,132 +0,0 @@
/*
* Copyright (c) 2014 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libavformat AVIOContext API example.
*
* Make libavformat demuxer access media content through a custom
* AVIOContext read callback.
* @example avio_reading.c
*/
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/file.h>
struct buffer_data {
uint8_t *ptr;
size_t size; ///< size left in the buffer
};
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
{
struct buffer_data *bd = (struct buffer_data *)opaque;
buf_size = FFMIN(buf_size, bd->size);
printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
/* copy internal buffer data to buf */
memcpy(buf, bd->ptr, buf_size);
bd->ptr += buf_size;
bd->size -= buf_size;
return buf_size;
}
int main(int argc, char *argv[])
{
AVFormatContext *fmt_ctx = NULL;
AVIOContext *avio_ctx = NULL;
uint8_t *buffer = NULL, *avio_ctx_buffer = NULL;
size_t buffer_size, avio_ctx_buffer_size = 4096;
char *input_filename = NULL;
int ret = 0;
struct buffer_data bd = { 0 };
if (argc != 2) {
fprintf(stderr, "usage: %s input_file\n"
"API example program to show how to read from a custom buffer "
"accessed through AVIOContext.\n", argv[0]);
return 1;
}
input_filename = argv[1];
/* register codecs and formats and other lavf/lavc components*/
av_register_all();
/* slurp file content into buffer */
ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL);
if (ret < 0)
goto end;
/* fill opaque structure used by the AVIOContext read callback */
bd.ptr = buffer;
bd.size = buffer_size;
if (!(fmt_ctx = avformat_alloc_context())) {
ret = AVERROR(ENOMEM);
goto end;
}
avio_ctx_buffer = av_malloc(avio_ctx_buffer_size);
if (!avio_ctx_buffer) {
ret = AVERROR(ENOMEM);
goto end;
}
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
0, &bd, &read_packet, NULL, NULL);
if (!avio_ctx) {
ret = AVERROR(ENOMEM);
goto end;
}
fmt_ctx->pb = avio_ctx;
ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open input\n");
goto end;
}
ret = avformat_find_stream_info(fmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Could not find stream information\n");
goto end;
}
av_dump_format(fmt_ctx, 0, input_filename, 0);
end:
avformat_close_input(&fmt_ctx);
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
av_freep(&avio_ctx->buffer);
av_freep(&avio_ctx);
av_file_unmap(buffer, buffer_size);
if (ret < 0) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
return 1;
}
return 0;
}

View File

@@ -24,10 +24,10 @@
* @file
* libavcodec API use example.
*
* @example avcodec.c
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
* format handling
* @example doc/examples/decoding_encoding.c
*/
#include <math.h>
@@ -156,7 +156,7 @@ static void audio_encode_example(const char *filename)
}
/* frame containing input raw audio */
frame = av_frame_alloc();
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
@@ -170,10 +170,6 @@ static void audio_encode_example(const char *filename)
* we calculate the size of the samples buffer in bytes */
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
c->sample_fmt, 0);
if (buffer_size < 0) {
fprintf(stderr, "Could not get sample buffer size\n");
exit(1);
}
samples = av_malloc(buffer_size);
if (!samples) {
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
@@ -191,7 +187,7 @@ static void audio_encode_example(const char *filename)
/* encode a single tone sound */
t = 0;
tincr = 2 * M_PI * 440.0 / c->sample_rate;
for (i = 0; i < 200; i++) {
for(i=0;i<200;i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
@@ -231,7 +227,7 @@ static void audio_encode_example(const char *filename)
fclose(f);
av_freep(&samples);
av_frame_free(&frame);
avcodec_free_frame(&frame);
avcodec_close(c);
av_free(c);
}
@@ -291,11 +287,12 @@ static void audio_decode_example(const char *outfilename, const char *filename)
int got_frame = 0;
if (!decoded_frame) {
if (!(decoded_frame = av_frame_alloc())) {
if (!(decoded_frame = avcodec_alloc_frame())) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
}
} else
avcodec_get_frame_defaults(decoded_frame);
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
if (len < 0) {
@@ -307,11 +304,6 @@ static void audio_decode_example(const char *outfilename, const char *filename)
int data_size = av_samples_get_buffer_size(NULL, c->channels,
decoded_frame->nb_samples,
c->sample_fmt, 1);
if (data_size < 0) {
/* This should not occur, checking just for paranoia */
fprintf(stderr, "Failed to calculate data size\n");
exit(1);
}
fwrite(decoded_frame->data[0], 1, data_size, outfile);
}
avpkt.size -= len;
@@ -337,7 +329,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
avcodec_close(c);
av_free(c);
av_frame_free(&decoded_frame);
avcodec_free_frame(&decoded_frame);
}
/*
@@ -374,12 +366,12 @@ static void video_encode_example(const char *filename, int codec_id)
c->width = 352;
c->height = 288;
/* frames per second */
c->time_base = (AVRational){1,25};
c->time_base= (AVRational){1,25};
c->gop_size = 10; /* emit one intra frame every ten frames */
c->max_b_frames = 1;
c->max_b_frames=1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec_id == AV_CODEC_ID_H264)
if(codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
@@ -394,7 +386,7 @@ static void video_encode_example(const char *filename, int codec_id)
exit(1);
}
frame = av_frame_alloc();
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
@@ -413,7 +405,7 @@ static void video_encode_example(const char *filename, int codec_id)
}
/* encode 1 second of video */
for (i = 0; i < 25; i++) {
for(i=0;i<25;i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
@@ -421,15 +413,15 @@ static void video_encode_example(const char *filename, int codec_id)
fflush(stdout);
/* prepare a dummy image */
/* Y */
for (y = 0; y < c->height; y++) {
for (x = 0; x < c->width; x++) {
for(y=0;y<c->height;y++) {
for(x=0;x<c->width;x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for (y = 0; y < c->height/2; y++) {
for (x = 0; x < c->width/2; x++) {
for(y=0;y<c->height/2;y++) {
for(x=0;x<c->width/2;x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
@@ -475,7 +467,7 @@ static void video_encode_example(const char *filename, int codec_id)
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
avcodec_free_frame(&frame);
printf("\n");
}
@@ -489,10 +481,10 @@ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
FILE *f;
int i;
f = fopen(filename,"w");
fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
for (i = 0; i < ysize; i++)
fwrite(buf + i * wrap, 1, xsize, f);
f=fopen(filename,"w");
fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255);
for(i=0;i<ysize;i++)
fwrite(buf + i * wrap,1,xsize,f);
fclose(f);
}
@@ -573,14 +565,14 @@ static void video_decode_example(const char *outfilename, const char *filename)
exit(1);
}
frame = av_frame_alloc();
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame_count = 0;
for (;;) {
for(;;) {
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
if (avpkt.size == 0)
break;
@@ -617,7 +609,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
avcodec_close(c);
av_free(c);
av_frame_free(&frame);
avcodec_free_frame(&frame);
printf("\n");
}

View File

@@ -22,11 +22,11 @@
/**
* @file
* Demuxing and decoding example.
* libavformat demuxing API use example.
*
* Show how to use the libavformat and libavcodec API to demux and
* decode audio and video data.
* @example demuxing_decoding.c
* @example doc/examples/demuxing.c
*/
#include <libavutil/imgutils.h>
@@ -53,30 +53,16 @@ static AVPacket pkt;
static int video_frame_count = 0;
static int audio_frame_count = 0;
/* The different ways of decoding and managing data memory. You are not
* supposed to support all the modes in your application but pick the one most
* appropriate to your needs. Look for the use of api_mode in this example to
* see what are the differences of API usage between them */
enum {
API_MODE_OLD = 0, /* old method, deprecated */
API_MODE_NEW_API_REF_COUNT = 1, /* new method, using the frame reference counting */
API_MODE_NEW_API_NO_REF_COUNT = 2, /* new method, without reference counting */
};
static int api_mode = API_MODE_OLD;
static int decode_packet(int *got_frame, int cached)
{
int ret = 0;
int decoded = pkt.size;
*got_frame = 0;
if (pkt.stream_index == video_stream_idx) {
/* decode video frame */
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
fprintf(stderr, "Error decoding video frame\n");
return ret;
}
@@ -99,7 +85,7 @@ static int decode_packet(int *got_frame, int cached)
/* decode audio frame */
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
fprintf(stderr, "Error decoding audio frame\n");
return ret;
}
/* Some audio decoders decode only part of the packet, and have to be
@@ -127,11 +113,6 @@ static int decode_packet(int *got_frame, int cached)
}
}
/* If we use the new API with reference counting, we own the data and need
* to de-reference it when we don't use it anymore */
if (*got_frame && api_mode == API_MODE_NEW_API_REF_COUNT)
av_frame_unref(frame);
return decoded;
}
@@ -142,7 +123,6 @@ static int open_codec_context(int *stream_idx,
AVStream *st;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;
AVDictionary *opts = NULL;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0) {
@@ -159,13 +139,10 @@ static int open_codec_context(int *stream_idx,
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return AVERROR(EINVAL);
return ret;
}
/* Init the decoders, with or without reference counting */
if (api_mode == API_MODE_NEW_API_REF_COUNT)
av_dict_set(&opts, "refcounted_frames", "1", 0);
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
@@ -208,31 +185,15 @@ int main (int argc, char **argv)
{
int ret = 0, got_frame;
if (argc != 4 && argc != 5) {
fprintf(stderr, "usage: %s [-refcount=<old|new_norefcount|new_refcount>] "
"input_file video_output_file audio_output_file\n"
if (argc != 4) {
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
"API example program to show how to read frames from an input file.\n"
"This program reads frames from a file, decodes them, and writes decoded\n"
"video frames to a rawvideo file named video_output_file, and decoded\n"
"audio frames to a rawaudio file named audio_output_file.\n\n"
"If the -refcount option is specified, the program use the\n"
"reference counting frame system which allows keeping a copy of\n"
"the data for longer than one decode call. If unset, it's using\n"
"the classic old method.\n"
"audio frames to a rawaudio file named audio_output_file.\n"
"\n", argv[0]);
exit(1);
}
if (argc == 5) {
const char *mode = argv[1] + strlen("-refcount=");
if (!strcmp(mode, "old")) api_mode = API_MODE_OLD;
else if (!strcmp(mode, "new_norefcount")) api_mode = API_MODE_NEW_API_NO_REF_COUNT;
else if (!strcmp(mode, "new_refcount")) api_mode = API_MODE_NEW_API_REF_COUNT;
else {
fprintf(stderr, "unknow mode '%s'\n", mode);
exit(1);
}
argv++;
}
src_filename = argv[1];
video_dst_filename = argv[2];
audio_dst_filename = argv[3];
@@ -294,12 +255,7 @@ int main (int argc, char **argv)
goto end;
}
/* When using the new API, you need to use the libavutil/frame.h API, while
* the classic frame management is available in libavcodec */
if (api_mode == API_MODE_OLD)
frame = avcodec_alloc_frame();
else
frame = av_frame_alloc();
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate frame\n");
ret = AVERROR(ENOMEM);
@@ -369,17 +325,16 @@ int main (int argc, char **argv)
}
end:
avcodec_close(video_dec_ctx);
avcodec_close(audio_dec_ctx);
if (video_dec_ctx)
avcodec_close(video_dec_ctx);
if (audio_dec_ctx)
avcodec_close(audio_dec_ctx);
avformat_close_input(&fmt_ctx);
if (video_dst_file)
fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
if (api_mode == API_MODE_OLD)
avcodec_free_frame(&frame);
else
av_frame_free(&frame);
av_free(frame);
av_free(video_dst_data[0]);
return ret < 0;

View File

@@ -1,364 +0,0 @@
/*
* copyright (c) 2013 Andrew Kelley
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libavfilter API usage example.
*
* @example filter_audio.c
* This example will generate a sine wave audio,
* pass it through a simple filter chain, and then compute the MD5 checksum of
* the output data.
*
* The filter chain it uses is:
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
*
* abuffer: This provides the endpoint where you can feed the decoded samples.
* volume: In this example we hardcode it to 0.90.
* aformat: This converts the samples to the samplefreq, channel layout,
* and sample format required by the audio device.
* abuffersink: This provides the endpoint where you can read the samples after
* they have passed through the filter chain.
*/
#include <inttypes.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "libavutil/channel_layout.h"
#include "libavutil/md5.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#define INPUT_SAMPLERATE 48000
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
#define VOLUME_VAL 0.90
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
AVFilterContext **sink)
{
AVFilterGraph *filter_graph;
AVFilterContext *abuffer_ctx;
AVFilter *abuffer;
AVFilterContext *volume_ctx;
AVFilter *volume;
AVFilterContext *aformat_ctx;
AVFilter *aformat;
AVFilterContext *abuffersink_ctx;
AVFilter *abuffersink;
AVDictionary *options_dict = NULL;
uint8_t options_str[1024];
uint8_t ch_layout[64];
int err;
/* Create a new filtergraph, which will contain all the filters. */
filter_graph = avfilter_graph_alloc();
if (!filter_graph) {
fprintf(stderr, "Unable to create filter graph.\n");
return AVERROR(ENOMEM);
}
/* Create the abuffer filter;
* it will be used for feeding the data into the graph. */
abuffer = avfilter_get_by_name("abuffer");
if (!abuffer) {
fprintf(stderr, "Could not find the abuffer filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
if (!abuffer_ctx) {
fprintf(stderr, "Could not allocate the abuffer instance.\n");
return AVERROR(ENOMEM);
}
/* Set the filter options through the AVOptions API. */
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
/* Now initialize the filter; we pass NULL options, since we have already
* set all the options above. */
err = avfilter_init_str(abuffer_ctx, NULL);
if (err < 0) {
fprintf(stderr, "Could not initialize the abuffer filter.\n");
return err;
}
/* Create volume filter. */
volume = avfilter_get_by_name("volume");
if (!volume) {
fprintf(stderr, "Could not find the volume filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
if (!volume_ctx) {
fprintf(stderr, "Could not allocate the volume instance.\n");
return AVERROR(ENOMEM);
}
/* A different way of passing the options is as key/value pairs in a
* dictionary. */
av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
err = avfilter_init_dict(volume_ctx, &options_dict);
av_dict_free(&options_dict);
if (err < 0) {
fprintf(stderr, "Could not initialize the volume filter.\n");
return err;
}
/* Create the aformat filter;
* it ensures that the output is of the format we want. */
aformat = avfilter_get_by_name("aformat");
if (!aformat) {
fprintf(stderr, "Could not find the aformat filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
if (!aformat_ctx) {
fprintf(stderr, "Could not allocate the aformat instance.\n");
return AVERROR(ENOMEM);
}
/* A third way of passing the options is in a string of the form
* key1=value1:key2=value2.... */
snprintf(options_str, sizeof(options_str),
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
(uint64_t)AV_CH_LAYOUT_STEREO);
err = avfilter_init_str(aformat_ctx, options_str);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
return err;
}
/* Finally create the abuffersink filter;
* it will be used to get the filtered data out of the graph. */
abuffersink = avfilter_get_by_name("abuffersink");
if (!abuffersink) {
fprintf(stderr, "Could not find the abuffersink filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
if (!abuffersink_ctx) {
fprintf(stderr, "Could not allocate the abuffersink instance.\n");
return AVERROR(ENOMEM);
}
/* This filter takes no options. */
err = avfilter_init_str(abuffersink_ctx, NULL);
if (err < 0) {
fprintf(stderr, "Could not initialize the abuffersink instance.\n");
return err;
}
/* Connect the filters;
* in this simple case the filters just form a linear chain. */
err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
if (err >= 0)
err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
if (err >= 0)
err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
if (err < 0) {
fprintf(stderr, "Error connecting filters\n");
return err;
}
/* Configure the graph. */
err = avfilter_graph_config(filter_graph, NULL);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
return err;
}
*graph = filter_graph;
*src = abuffer_ctx;
*sink = abuffersink_ctx;
return 0;
}
/* Do something useful with the filtered data: this simple
* example just prints the MD5 checksum of each plane to stdout. */
static int process_output(struct AVMD5 *md5, AVFrame *frame)
{
int planar = av_sample_fmt_is_planar(frame->format);
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
int planes = planar ? channels : 1;
int bps = av_get_bytes_per_sample(frame->format);
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
int i, j;
for (i = 0; i < planes; i++) {
uint8_t checksum[16];
av_md5_init(md5);
av_md5_sum(checksum, frame->extended_data[i], plane_size);
fprintf(stdout, "plane %d: 0x", i);
for (j = 0; j < sizeof(checksum); j++)
fprintf(stdout, "%02X", checksum[j]);
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
return 0;
}
/* Construct a frame of audio data to be filtered;
* this simple example just synthesizes a sine wave. */
static int get_input(AVFrame *frame, int frame_num)
{
int err, i, j;
#define FRAME_SIZE 1024
/* Set up the frame properties and allocate the buffer for the data. */
frame->sample_rate = INPUT_SAMPLERATE;
frame->format = INPUT_FORMAT;
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
frame->nb_samples = FRAME_SIZE;
frame->pts = frame_num * FRAME_SIZE;
err = av_frame_get_buffer(frame, 0);
if (err < 0)
return err;
/* Fill the data for each channel. */
for (i = 0; i < 5; i++) {
float *data = (float*)frame->extended_data[i];
for (j = 0; j < frame->nb_samples; j++)
data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
}
return 0;
}
int main(int argc, char *argv[])
{
struct AVMD5 *md5;
AVFilterGraph *graph;
AVFilterContext *src, *sink;
AVFrame *frame;
uint8_t errstr[1024];
float duration;
int err, nb_frames, i;
if (argc < 2) {
fprintf(stderr, "Usage: %s <duration>\n", argv[0]);
return 1;
}
duration = atof(argv[1]);
nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
if (nb_frames <= 0) {
fprintf(stderr, "Invalid duration: %s\n", argv[1]);
return 1;
}
avfilter_register_all();
/* Allocate the frame we will be using to store the data. */
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Error allocating the frame\n");
return 1;
}
md5 = av_md5_alloc();
if (!md5) {
fprintf(stderr, "Error allocating the MD5 context\n");
return 1;
}
/* Set up the filtergraph. */
err = init_filter_graph(&graph, &src, &sink);
if (err < 0) {
fprintf(stderr, "Unable to init filter graph:");
goto fail;
}
/* the main filtering loop */
for (i = 0; i < nb_frames; i++) {
/* get an input frame to be filtered */
err = get_input(frame, i);
if (err < 0) {
fprintf(stderr, "Error generating input frame:");
goto fail;
}
/* Send the frame to the input of the filtergraph. */
err = av_buffersrc_add_frame(src, frame);
if (err < 0) {
av_frame_unref(frame);
fprintf(stderr, "Error submitting the frame to the filtergraph:");
goto fail;
}
/* Get all the filtered output that is available. */
while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
/* now do something with our filtered frame */
err = process_output(md5, frame);
if (err < 0) {
fprintf(stderr, "Error processing the filtered frame:");
goto fail;
}
av_frame_unref(frame);
}
if (err == AVERROR(EAGAIN)) {
/* Need to feed more frames in. */
continue;
} else if (err == AVERROR_EOF) {
/* Nothing more to do, finish. */
break;
} else if (err < 0) {
/* An error occurred. */
fprintf(stderr, "Error filtering the data:");
goto fail;
}
}
avfilter_graph_free(&graph);
av_frame_free(&frame);
av_freep(&md5);
return 0;
fail:
av_strerror(err, errstr, sizeof(errstr));
fprintf(stderr, "%s\n", errstr);
return 1;
}

View File

@@ -25,7 +25,7 @@
/**
* @file
* API example for audio decoding and filtering
* @example filtering_audio.c
* @example doc/examples/filtering_audio.c
*/
#include <unistd.h>
@@ -85,7 +85,7 @@ static int open_input_file(const char *filename)
static int init_filters(const char *filters_descr)
{
char args[512];
int ret = 0;
int ret;
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
@@ -97,10 +97,6 @@ static int init_filters(const char *filters_descr)
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (!dec_ctx->channel_layout)
@@ -113,7 +109,7 @@ static int init_filters(const char *filters_descr)
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
goto end;
return ret;
}
/* buffer audio sink: to terminate the filter chain. */
@@ -121,28 +117,28 @@ static int init_filters(const char *filters_descr)
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
goto end;
return ret;
}
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end;
return ret;
}
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end;
return ret;
}
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end;
return ret;
}
/* Endpoints for the filter graph. */
@@ -157,11 +153,11 @@ static int init_filters(const char *filters_descr)
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
goto end;
&inputs, &outputs, NULL)) < 0)
return ret;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
return ret;
/* Print summary of the sink buffer
* Note: args buffer is reused to store channel layout string */
@@ -172,11 +168,7 @@ static int init_filters(const char *filters_descr)
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
args);
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
return 0;
}
static void print_frame(const AVFrame *frame)
@@ -196,7 +188,7 @@ static void print_frame(const AVFrame *frame)
int main(int argc, char **argv)
{
int ret;
AVPacket packet0, packet;
AVPacket packet;
AVFrame *frame = av_frame_alloc();
AVFrame *filt_frame = av_frame_alloc();
int got_frame;
@@ -210,6 +202,7 @@ int main(int argc, char **argv)
exit(1);
}
avcodec_register_all();
av_register_all();
avfilter_register_all();
@@ -219,24 +212,18 @@ int main(int argc, char **argv)
goto end;
/* read all packets */
packet0.data = NULL;
packet.data = NULL;
while (1) {
if (!packet0.data) {
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
break;
packet0 = packet;
}
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
break;
if (packet.stream_index == audio_stream_index) {
avcodec_get_frame_defaults(frame);
got_frame = 0;
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
continue;
}
packet.size -= ret;
packet.data += ret;
if (got_frame) {
/* push the audio data from decoded frame into the filtergraph */
@@ -248,31 +235,29 @@ int main(int argc, char **argv)
/* pull filtered audio from the filtergraph */
while (1) {
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
if(ret < 0)
goto end;
print_frame(filt_frame);
av_frame_unref(filt_frame);
}
}
if (packet.size <= 0)
av_free_packet(&packet0);
} else {
/* discard non-wanted packets */
av_free_packet(&packet0);
}
av_free_packet(&packet);
}
end:
avfilter_graph_free(&filter_graph);
avcodec_close(dec_ctx);
if (dec_ctx)
avcodec_close(dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
char buf[1024];
av_strerror(ret, buf, sizeof(buf));
fprintf(stderr, "Error occurred: %s\n", buf);
exit(1);
}

View File

@@ -24,7 +24,7 @@
/**
* @file
* API example for decoding and filtering
* @example filtering_video.c
* @example doc/examples/filtering_video.c
*/
#define _XOPEN_SOURCE 600 /* for usleep */
@@ -36,7 +36,6 @@
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
const char *filter_descr = "scale=78:24";
@@ -71,7 +70,6 @@ static int open_input_file(const char *filename)
}
video_stream_index = ret;
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
/* init the video decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
@@ -85,18 +83,15 @@ static int open_input_file(const char *filename)
static int init_filters(const char *filters_descr)
{
char args[512];
int ret = 0;
int ret;
AVFilter *buffersrc = avfilter_get_by_name("buffer");
AVFilter *buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
AVBufferSinkParams *buffersink_params;
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
@@ -109,22 +104,18 @@ static int init_filters(const char *filters_descr)
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
return ret;
}
/* buffer video sink: to terminate the filter chain. */
buffersink_params = av_buffersink_params_alloc();
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
NULL, buffersink_params, filter_graph);
av_free(buffersink_params);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
return ret;
}
/* Endpoints for the filter graph. */
@@ -140,16 +131,11 @@ static int init_filters(const char *filters_descr)
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
goto end;
return ret;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
return ret;
return 0;
}
static void display_frame(const AVFrame *frame, AVRational time_base)
@@ -200,6 +186,7 @@ int main(int argc, char **argv)
exit(1);
}
avcodec_register_all();
av_register_all();
avfilter_register_all();
@@ -214,6 +201,7 @@ int main(int argc, char **argv)
break;
if (packet.stream_index == video_stream_index) {
avcodec_get_frame_defaults(frame);
got_frame = 0;
ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
if (ret < 0) {
@@ -240,20 +228,22 @@ int main(int argc, char **argv)
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
av_frame_unref(filt_frame);
}
av_frame_unref(frame);
}
}
av_free_packet(&packet);
}
end:
avfilter_graph_free(&filter_graph);
avcodec_close(dec_ctx);
if (dec_ctx)
avcodec_close(dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
char buf[1024];
av_strerror(ret, buf, sizeof(buf));
fprintf(stderr, "Error occurred: %s\n", buf);
exit(1);
}

View File

@@ -23,7 +23,7 @@
/**
* @file
* Shows how the metadata API can be used in application programs.
* @example metadata.c
* @example doc/examples/metadata.c
*/
#include <stdio.h>

View File

@@ -24,9 +24,9 @@
* @file
* libavformat API example.
*
* Output a media file in any supported libavformat format. The default
* codecs are used.
* @example muxing.c
* Output a media file in any supported libavformat format.
* The default codecs are used.
* @example doc/examples/muxing.c
*/
#include <stdlib.h>
@@ -36,43 +36,18 @@
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
static int audio_is_eof, video_is_eof;
#define STREAM_DURATION 10.0
/* 5 seconds stream duration */
#define STREAM_DURATION 200.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
static int sws_flags = SWS_BICUBIC;
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
log_packet(fmt_ctx, pkt);
return av_interleaved_write_frame(fmt_ctx, pkt);
}
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
@@ -98,8 +73,7 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
c->sample_fmt = (*codec)->sample_fmts ?
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
c->sample_fmt = AV_SAMPLE_FMT_FLTP;
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
@@ -148,7 +122,6 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
static float t, tincr, tincr2;
AVFrame *audio_frame;
static uint8_t **src_samples_data;
static int src_samples_linesize;
static int src_nb_samples;
@@ -157,7 +130,6 @@ static int max_dst_nb_samples;
uint8_t **dst_samples_data;
int dst_samples_linesize;
int dst_samples_size;
int samples_count;
struct SwrContext *swr_ctx = NULL;
@@ -168,13 +140,6 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
c = st->codec;
/* allocate and init a re-usable frame */
audio_frame = av_frame_alloc();
if (!audio_frame) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
/* open it */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
@@ -192,17 +157,12 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
10000 : c->frame_size;
ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels,
src_nb_samples, AV_SAMPLE_FMT_S16, 0);
src_nb_samples, c->sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate source samples\n");
exit(1);
}
/* compute the number of converted samples: buffering is avoided
* ensuring that the output buffer will contain at least all the
* converted input samples */
max_dst_nb_samples = src_nb_samples;
/* create resampler context */
if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
swr_ctx = swr_alloc();
@@ -224,15 +184,17 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
fprintf(stderr, "Failed to initialize the resampling context\n");
exit(1);
}
}
ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
max_dst_nb_samples, c->sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate destination samples\n");
exit(1);
}
} else {
dst_samples_data = src_samples_data;
/* compute the number of converted samples: buffering is avoided
* ensuring that the output buffer will contain at least all the
* converted input samples */
max_dst_nb_samples = src_nb_samples;
ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
max_dst_nb_samples, c->sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate destination samples\n");
exit(1);
}
dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples,
c->sample_fmt, 0);
@@ -255,83 +217,77 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
}
}
static void write_audio_frame(AVFormatContext *oc, AVStream *st, int flush)
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame = avcodec_alloc_frame();
int got_packet, ret, dst_nb_samples;
av_init_packet(&pkt);
c = st->codec;
if (!flush) {
get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
/* convert samples from native format to destination codec format, using the resampler */
if (swr_ctx) {
/* compute destination number of samples */
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
c->sample_rate, c->sample_rate, AV_ROUND_UP);
if (dst_nb_samples > max_dst_nb_samples) {
av_free(dst_samples_data[0]);
ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
dst_nb_samples, c->sample_fmt, 0);
if (ret < 0)
exit(1);
max_dst_nb_samples = dst_nb_samples;
dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
c->sample_fmt, 0);
}
/* convert to destination format */
ret = swr_convert(swr_ctx,
dst_samples_data, dst_nb_samples,
(const uint8_t **)src_samples_data, src_nb_samples);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
/* convert samples from native format to destination codec format, using the resampler */
if (swr_ctx) {
/* compute destination number of samples */
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
c->sample_rate, c->sample_rate, AV_ROUND_UP);
if (dst_nb_samples > max_dst_nb_samples) {
av_free(dst_samples_data[0]);
ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
dst_nb_samples, c->sample_fmt, 0);
if (ret < 0)
exit(1);
}
} else {
dst_nb_samples = src_nb_samples;
max_dst_nb_samples = dst_nb_samples;
dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
c->sample_fmt, 0);
}
audio_frame->nb_samples = dst_nb_samples;
audio_frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt,
dst_samples_data[0], dst_samples_size, 0);
samples_count += dst_nb_samples;
/* convert to destination format */
ret = swr_convert(swr_ctx,
dst_samples_data, dst_nb_samples,
(const uint8_t **)src_samples_data, src_nb_samples);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
exit(1);
}
} else {
dst_samples_data[0] = src_samples_data[0];
dst_nb_samples = src_nb_samples;
}
ret = avcodec_encode_audio2(c, &pkt, flush ? NULL : audio_frame, &got_packet);
frame->nb_samples = dst_nb_samples;
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
dst_samples_data[0], dst_samples_size, 0);
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
exit(1);
}
if (!got_packet) {
if (flush)
audio_is_eof = 1;
if (!got_packet)
return;
}
ret = write_frame(oc, &c->time_base, st, &pkt);
if (ret < 0) {
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
if (ret != 0) {
fprintf(stderr, "Error while writing audio frame: %s\n",
av_err2str(ret));
exit(1);
}
avcodec_free_frame(&frame);
}
static void close_audio(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
if (dst_samples_data != src_samples_data) {
av_free(dst_samples_data[0]);
av_free(dst_samples_data);
}
av_free(src_samples_data[0]);
av_free(src_samples_data);
av_frame_free(&audio_frame);
av_free(dst_samples_data[0]);
}
/**************************************************************/
@@ -354,14 +310,11 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
}
/* allocate and init a re-usable frame */
frame = av_frame_alloc();
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
@@ -408,13 +361,17 @@ static void fill_yuv_image(AVPicture *pict, int frame_index,
}
}
static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
int ret;
static struct SwsContext *sws_ctx;
AVCodecContext *c = st->codec;
if (!flush) {
if (frame_count >= STREAM_NB_FRAMES) {
/* No more frames to compress. The codec has a latency of a few
* frames if using B-frames, so we get the last frames by
* passing the same picture again. */
} else {
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
@@ -437,7 +394,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
}
}
if (oc->oformat->flags & AVFMT_RAWPICTURE && !flush) {
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* Raw video case - directly store the picture in the packet */
AVPacket pkt;
av_init_packet(&pkt);
@@ -454,24 +411,23 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
av_init_packet(&pkt);
/* encode the image */
frame->pts = frame_count;
ret = avcodec_encode_video2(c, &pkt, flush ? NULL : frame, &got_packet);
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
/* If size is zero, it means the image was buffered. */
if (got_packet) {
ret = write_frame(oc, &c->time_base, st, &pkt);
if (!ret && got_packet && pkt.size) {
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
} else {
if (flush)
video_is_eof = 1;
ret = 0;
}
}
if (ret < 0) {
if (ret != 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
exit(1);
}
@@ -483,7 +439,7 @@ static void close_video(AVFormatContext *oc, AVStream *st)
avcodec_close(st->codec);
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
av_frame_free(&frame);
av_free(frame);
}
/**************************************************************/
@@ -497,7 +453,7 @@ int main(int argc, char **argv)
AVStream *audio_st, *video_st;
AVCodec *audio_codec, *video_codec;
double audio_time, video_time;
int flush, ret;
int ret;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
@@ -521,9 +477,9 @@ int main(int argc, char **argv)
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
}
if (!oc)
if (!oc) {
return 1;
}
fmt = oc->oformat;
/* Add the audio and video streams using the default format codecs
@@ -531,10 +487,12 @@ int main(int argc, char **argv)
video_st = NULL;
audio_st = NULL;
if (fmt->video_codec != AV_CODEC_ID_NONE)
if (fmt->video_codec != AV_CODEC_ID_NONE) {
video_st = add_stream(oc, &video_codec, fmt->video_codec);
if (fmt->audio_codec != AV_CODEC_ID_NONE)
}
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
@@ -563,23 +521,23 @@ int main(int argc, char **argv)
return 1;
}
flush = 0;
while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) {
if (frame)
frame->pts = 0;
for (;;) {
/* Compute current audio and video time. */
audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY;
video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;
audio_time = audio_st ? audio_st->pts.val * av_q2d(audio_st->time_base) : 0.0;
video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0;
if (!flush &&
(!audio_st || audio_time >= STREAM_DURATION) &&
(!video_st || video_time >= STREAM_DURATION)) {
flush = 1;
}
if ((!audio_st || audio_time >= STREAM_DURATION) &&
(!video_st || video_time >= STREAM_DURATION))
break;
/* write interleaved audio and video frames */
if (audio_st && !audio_is_eof && audio_time <= video_time) {
write_audio_frame(oc, audio_st, flush);
} else if (video_st && !video_is_eof && video_time < audio_time) {
write_video_frame(oc, video_st, flush);
if (!video_st || (video_st && audio_st && audio_time < video_time)) {
write_audio_frame(oc, audio_st);
} else {
write_video_frame(oc, video_st);
frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
}
}

View File

@@ -1,164 +0,0 @@
/*
* Copyright (c) 2013 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libavformat/libavcodec demuxing and muxing API example.
*
* Remux streams from one container format to another.
* @example remuxing.c
*/
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
tag,
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
int main(int argc, char **argv)
{
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket pkt;
const char *in_filename, *out_filename;
int ret, i;
if (argc < 3) {
printf("usage: %s input output\n"
"API example program to remux a media file with libavformat and libavcodec.\n"
"The output format is guessed according to the file extension.\n"
"\n", argv[0]);
return 1;
}
in_filename = argv[1];
out_filename = argv[2];
av_register_all();
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
av_dump_format(ifmt_ctx, 0, in_filename, 0);
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ofmt = ofmt_ctx->oformat;
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *in_stream = ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0) {
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
goto end;
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
av_dump_format(ofmt_ctx, 0, out_filename, 1);
if (!(ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", out_filename);
goto end;
}
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}
while (1) {
AVStream *in_stream, *out_stream;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0)
break;
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt.stream_index];
log_packet(ifmt_ctx, &pkt, "in");
/* copy packet */
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
log_packet(ofmt_ctx, &pkt, "out");
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
av_free_packet(&pkt);
}
av_write_trailer(ofmt_ctx);
end:
avformat_close_input(&ifmt_ctx);
/* close output */
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
avio_close(ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
return 1;
}
return 0;
}

View File

@@ -21,7 +21,7 @@
*/
/**
* @example resampling_audio.c
* @example doc/examples/resampling_audio.c
* libswresample API use example.
*/
@@ -62,7 +62,7 @@ static int get_format_from_sample_fmt(const char **fmt,
/**
* Fill dst buffer with nb_samples, generated starting from t.
*/
static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
{
int i, j;
double tincr = 1.0 / sample_rate, *dstp = dst;
@@ -184,10 +184,6 @@ int main(int argc, char **argv)
}
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
ret, dst_sample_fmt, 1);
if (dst_bufsize < 0) {
fprintf(stderr, "Could not get sample buffer size\n");
goto end;
}
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
} while (t < 10);

View File

@@ -23,7 +23,7 @@
/**
* @file
* libswscale API use example.
* @example scaling_video.c
* @example doc/examples/scaling_video.c
*/
#include <libavutil/imgutils.h>

View File

@@ -1,755 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* simple audio converter
*
* @example transcode_aac.c
* Convert an input audio file to AAC in an MP4 container using FFmpeg.
* @author Andreas Unterweger (dustsigns@gmail.com)
*/
#include <stdio.h>
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavcodec/avcodec.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/frame.h"
#include "libavutil/opt.h"
#include "libswresample/swresample.h"
/** The output bit rate in kbit/s */
#define OUTPUT_BIT_RATE 48000
/** The number of output channels */
#define OUTPUT_CHANNELS 2
/** The audio sample output format */
#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
/**
* Convert an error code into a text message.
* @param error Error code to be converted
* @return Corresponding error text (not thread-safe)
*/
static char *const get_error_text(const int error)
{
static char error_buffer[255];
av_strerror(error, error_buffer, sizeof(error_buffer));
return error_buffer;
}
/** Open an input file and the required decoder. */
static int open_input_file(const char *filename,
AVFormatContext **input_format_context,
AVCodecContext **input_codec_context)
{
AVCodec *input_codec;
int error;
/** Open the input file to read from it. */
if ((error = avformat_open_input(input_format_context, filename, NULL,
NULL)) < 0) {
fprintf(stderr, "Could not open input file '%s' (error '%s')\n",
filename, get_error_text(error));
*input_format_context = NULL;
return error;
}
/** Get information on the input file (number of streams etc.). */
if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) {
fprintf(stderr, "Could not open find stream info (error '%s')\n",
get_error_text(error));
avformat_close_input(input_format_context);
return error;
}
/** Make sure that there is only one stream in the input file. */
if ((*input_format_context)->nb_streams != 1) {
fprintf(stderr, "Expected one audio input stream, but found %d\n",
(*input_format_context)->nb_streams);
avformat_close_input(input_format_context);
return AVERROR_EXIT;
}
/** Find a decoder for the audio stream. */
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codec->codec_id))) {
fprintf(stderr, "Could not find input codec\n");
avformat_close_input(input_format_context);
return AVERROR_EXIT;
}
/** Open the decoder for the audio stream to use it later. */
if ((error = avcodec_open2((*input_format_context)->streams[0]->codec,
input_codec, NULL)) < 0) {
fprintf(stderr, "Could not open input codec (error '%s')\n",
get_error_text(error));
avformat_close_input(input_format_context);
return error;
}
/** Save the decoder context for easier access later. */
*input_codec_context = (*input_format_context)->streams[0]->codec;
return 0;
}
/**
* Open an output file and the required encoder.
* Also set some basic encoder parameters.
* Some of these parameters are based on the input file's parameters.
*/
static int open_output_file(const char *filename,
AVCodecContext *input_codec_context,
AVFormatContext **output_format_context,
AVCodecContext **output_codec_context)
{
AVIOContext *output_io_context = NULL;
AVStream *stream = NULL;
AVCodec *output_codec = NULL;
int error;
/** Open the output file to write to it. */
if ((error = avio_open(&output_io_context, filename,
AVIO_FLAG_WRITE)) < 0) {
fprintf(stderr, "Could not open output file '%s' (error '%s')\n",
filename, get_error_text(error));
return error;
}
/** Create a new format context for the output container format. */
if (!(*output_format_context = avformat_alloc_context())) {
fprintf(stderr, "Could not allocate output format context\n");
return AVERROR(ENOMEM);
}
/** Associate the output file (pointer) with the container format context. */
(*output_format_context)->pb = output_io_context;
/** Guess the desired container format based on the file extension. */
if (!((*output_format_context)->oformat = av_guess_format(NULL, filename,
NULL))) {
fprintf(stderr, "Could not find output file format\n");
goto cleanup;
}
av_strlcpy((*output_format_context)->filename, filename,
sizeof((*output_format_context)->filename));
/** Find the encoder to be used by its name. */
if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) {
fprintf(stderr, "Could not find an AAC encoder.\n");
goto cleanup;
}
/** Create a new audio stream in the output file container. */
if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
fprintf(stderr, "Could not create new stream\n");
error = AVERROR(ENOMEM);
goto cleanup;
}
/** Save the encoder context for easiert access later. */
*output_codec_context = stream->codec;
/**
* Set the basic encoder parameters.
* The input file's sample rate is used to avoid a sample rate conversion.
*/
(*output_codec_context)->channels = OUTPUT_CHANNELS;
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
(*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16;
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
/**
* Some container formats (like MP4) require global headers to be present
* Mark the encoder so that it behaves accordingly.
*/
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
(*output_codec_context)->flags |= CODEC_FLAG_GLOBAL_HEADER;
/** Open the encoder for the audio stream to use it later. */
if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
fprintf(stderr, "Could not open output codec (error '%s')\n",
get_error_text(error));
goto cleanup;
}
return 0;
cleanup:
avio_close((*output_format_context)->pb);
avformat_free_context(*output_format_context);
*output_format_context = NULL;
return error < 0 ? error : AVERROR_EXIT;
}
/** Initialize one data packet for reading or writing. */
static void init_packet(AVPacket *packet)
{
av_init_packet(packet);
/** Set the packet data and size so that it is recognized as being empty. */
packet->data = NULL;
packet->size = 0;
}
/** Initialize one audio frame for reading from the input file */
static int init_input_frame(AVFrame **frame)
{
if (!(*frame = av_frame_alloc())) {
fprintf(stderr, "Could not allocate input frame\n");
return AVERROR(ENOMEM);
}
return 0;
}
/**
* Initialize the audio resampler based on the input and output codec settings.
* If the input and output sample formats differ, a conversion is required
* libswresample takes care of this, but requires initialization.
*/
static int init_resampler(AVCodecContext *input_codec_context,
AVCodecContext *output_codec_context,
SwrContext **resample_context)
{
int error;
/**
* Create a resampler context for the conversion.
* Set the conversion parameters.
* Default channel layouts based on the number of channels
* are assumed for simplicity (they are sometimes not detected
* properly by the demuxer and/or decoder).
*/
*resample_context = swr_alloc_set_opts(NULL,
av_get_default_channel_layout(output_codec_context->channels),
output_codec_context->sample_fmt,
output_codec_context->sample_rate,
av_get_default_channel_layout(input_codec_context->channels),
input_codec_context->sample_fmt,
input_codec_context->sample_rate,
0, NULL);
if (!*resample_context) {
fprintf(stderr, "Could not allocate resample context\n");
return AVERROR(ENOMEM);
}
/**
* Perform a sanity check so that the number of converted samples is
* not greater than the number of samples to be converted.
* If the sample rates differ, this case has to be handled differently
*/
av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
/** Open the resampler with the specified parameters. */
if ((error = swr_init(*resample_context)) < 0) {
fprintf(stderr, "Could not open resample context\n");
swr_free(resample_context);
return error;
}
return 0;
}
/** Initialize a FIFO buffer for the audio samples to be encoded. */
static int init_fifo(AVAudioFifo **fifo)
{
/** Create the FIFO buffer based on the specified output sample format. */
if (!(*fifo = av_audio_fifo_alloc(OUTPUT_SAMPLE_FORMAT, OUTPUT_CHANNELS, 1))) {
fprintf(stderr, "Could not allocate FIFO\n");
return AVERROR(ENOMEM);
}
return 0;
}
/** Write the header of the output file container. */
static int write_output_file_header(AVFormatContext *output_format_context)
{
int error;
if ((error = avformat_write_header(output_format_context, NULL)) < 0) {
fprintf(stderr, "Could not write output file header (error '%s')\n",
get_error_text(error));
return error;
}
return 0;
}
/** Decode one audio frame from the input file. */
static int decode_audio_frame(AVFrame *frame,
AVFormatContext *input_format_context,
AVCodecContext *input_codec_context,
int *data_present, int *finished)
{
/** Packet used for temporary storage. */
AVPacket input_packet;
int error;
init_packet(&input_packet);
/** Read one audio frame from the input file into a temporary packet. */
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
/** If we are the the end of the file, flush the decoder below. */
if (error == AVERROR_EOF)
*finished = 1;
else {
fprintf(stderr, "Could not read frame (error '%s')\n",
get_error_text(error));
return error;
}
}
/**
* Decode the audio frame stored in the temporary packet.
* The input audio stream decoder is used to do this.
* If we are at the end of the file, pass an empty packet to the decoder
* to flush it.
*/
if ((error = avcodec_decode_audio4(input_codec_context, frame,
data_present, &input_packet)) < 0) {
fprintf(stderr, "Could not decode frame (error '%s')\n",
get_error_text(error));
av_free_packet(&input_packet);
return error;
}
/**
* If the decoder has not been flushed completely, we are not finished,
* so that this function has to be called again.
*/
if (*finished && *data_present)
*finished = 0;
av_free_packet(&input_packet);
return 0;
}
/**
* Initialize a temporary storage for the specified number of audio samples.
* The conversion requires temporary storage due to the different format.
* The number of audio samples to be allocated is specified in frame_size.
*/
static int init_converted_samples(uint8_t ***converted_input_samples,
AVCodecContext *output_codec_context,
int frame_size)
{
int error;
/**
* Allocate as many pointers as there are audio channels.
* Each pointer will later point to the audio samples of the corresponding
* channels (although it may be NULL for interleaved formats).
*/
if (!(*converted_input_samples = calloc(output_codec_context->channels,
sizeof(**converted_input_samples)))) {
fprintf(stderr, "Could not allocate converted input sample pointers\n");
return AVERROR(ENOMEM);
}
/**
* Allocate memory for the samples of all channels in one consecutive
* block for convenience.
*/
if ((error = av_samples_alloc(*converted_input_samples, NULL,
output_codec_context->channels,
frame_size,
output_codec_context->sample_fmt, 0)) < 0) {
fprintf(stderr,
"Could not allocate converted input samples (error '%s')\n",
get_error_text(error));
av_freep(&(*converted_input_samples)[0]);
free(*converted_input_samples);
return error;
}
return 0;
}
/**
* Convert the input audio samples into the output sample format.
* The conversion happens on a per-frame basis, the size of which is specified
* by frame_size.
*/
static int convert_samples(const uint8_t **input_data,
uint8_t **converted_data, const int frame_size,
SwrContext *resample_context)
{
int error;
/** Convert the samples using the resampler. */
if ((error = swr_convert(resample_context,
converted_data, frame_size,
input_data , frame_size)) < 0) {
fprintf(stderr, "Could not convert input samples (error '%s')\n",
get_error_text(error));
return error;
}
return 0;
}
/** Add converted input audio samples to the FIFO buffer for later processing. */
static int add_samples_to_fifo(AVAudioFifo *fifo,
uint8_t **converted_input_samples,
const int frame_size)
{
int error;
/**
* Make the FIFO as large as it needs to be to hold both,
* the old and the new samples.
*/
if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
fprintf(stderr, "Could not reallocate FIFO\n");
return error;
}
/** Store the new samples in the FIFO buffer. */
if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
frame_size) < frame_size) {
fprintf(stderr, "Could not write data to FIFO\n");
return AVERROR_EXIT;
}
return 0;
}
/**
* Read one audio frame from the input file, decodes, converts and stores
* it in the FIFO buffer.
*/
static int read_decode_convert_and_store(AVAudioFifo *fifo,
AVFormatContext *input_format_context,
AVCodecContext *input_codec_context,
AVCodecContext *output_codec_context,
SwrContext *resampler_context,
int *finished)
{
/** Temporary storage of the input samples of the frame read from the file. */
AVFrame *input_frame = NULL;
/** Temporary storage for the converted input samples. */
uint8_t **converted_input_samples = NULL;
int data_present;
int ret = AVERROR_EXIT;
/** Initialize temporary storage for one input frame. */
if (init_input_frame(&input_frame))
goto cleanup;
/** Decode one frame worth of audio samples. */
if (decode_audio_frame(input_frame, input_format_context,
input_codec_context, &data_present, finished))
goto cleanup;
/**
* If we are at the end of the file and there are no more samples
* in the decoder which are delayed, we are actually finished.
* This must not be treated as an error.
*/
if (*finished && !data_present) {
ret = 0;
goto cleanup;
}
/** If there is decoded data, convert and store it */
if (data_present) {
/** Initialize the temporary storage for the converted input samples. */
if (init_converted_samples(&converted_input_samples, output_codec_context,
input_frame->nb_samples))
goto cleanup;
/**
* Convert the input samples to the desired output sample format.
* This requires a temporary storage provided by converted_input_samples.
*/
if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,
input_frame->nb_samples, resampler_context))
goto cleanup;
/** Add the converted input samples to the FIFO buffer for later processing. */
if (add_samples_to_fifo(fifo, converted_input_samples,
input_frame->nb_samples))
goto cleanup;
ret = 0;
}
ret = 0;
cleanup:
if (converted_input_samples) {
av_freep(&converted_input_samples[0]);
free(converted_input_samples);
}
av_frame_free(&input_frame);
return ret;
}
/**
* Initialize one input frame for writing to the output file.
* The frame will be exactly frame_size samples large.
*/
static int init_output_frame(AVFrame **frame,
AVCodecContext *output_codec_context,
int frame_size)
{
int error;
/** Create a new frame to store the audio samples. */
if (!(*frame = av_frame_alloc())) {
fprintf(stderr, "Could not allocate output frame\n");
return AVERROR_EXIT;
}
/**
* Set the frame's parameters, especially its size and format.
* av_frame_get_buffer needs this to allocate memory for the
* audio samples of the frame.
* Default channel layouts based on the number of channels
* are assumed for simplicity.
*/
(*frame)->nb_samples = frame_size;
(*frame)->channel_layout = output_codec_context->channel_layout;
(*frame)->format = output_codec_context->sample_fmt;
(*frame)->sample_rate = output_codec_context->sample_rate;
/**
* Allocate the samples of the created frame. This call will make
* sure that the audio frame can hold as many samples as specified.
*/
if ((error = av_frame_get_buffer(*frame, 0)) < 0) {
fprintf(stderr, "Could allocate output frame samples (error '%s')\n",
get_error_text(error));
av_frame_free(frame);
return error;
}
return 0;
}
/** Encode one frame worth of audio to the output file. */
static int encode_audio_frame(AVFrame *frame,
AVFormatContext *output_format_context,
AVCodecContext *output_codec_context,
int *data_present)
{
/** Packet used for temporary storage. */
AVPacket output_packet;
int error;
init_packet(&output_packet);
/**
* Encode the audio frame and store it in the temporary packet.
* The output audio stream encoder is used to do this.
*/
if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
frame, data_present)) < 0) {
fprintf(stderr, "Could not encode frame (error '%s')\n",
get_error_text(error));
av_free_packet(&output_packet);
return error;
}
/** Write one audio frame from the temporary packet to the output file. */
if (*data_present) {
if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
fprintf(stderr, "Could not write frame (error '%s')\n",
get_error_text(error));
av_free_packet(&output_packet);
return error;
}
av_free_packet(&output_packet);
}
return 0;
}
/**
* Load one audio frame from the FIFO buffer, encode and write it to the
* output file.
*/
static int load_encode_and_write(AVAudioFifo *fifo,
AVFormatContext *output_format_context,
AVCodecContext *output_codec_context)
{
/** Temporary storage of the output samples of the frame written to the file. */
AVFrame *output_frame;
/**
* Use the maximum number of possible samples per frame.
* If there is less than the maximum possible frame size in the FIFO
* buffer use this number. Otherwise, use the maximum possible frame size
*/
const int frame_size = FFMIN(av_audio_fifo_size(fifo),
output_codec_context->frame_size);
int data_written;
/** Initialize temporary storage for one output frame. */
if (init_output_frame(&output_frame, output_codec_context, frame_size))
return AVERROR_EXIT;
/**
* Read as many samples from the FIFO buffer as required to fill the frame.
* The samples are stored in the frame temporarily.
*/
if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
fprintf(stderr, "Could not read data from FIFO\n");
av_frame_free(&output_frame);
return AVERROR_EXIT;
}
/** Encode one frame worth of audio samples. */
if (encode_audio_frame(output_frame, output_format_context,
output_codec_context, &data_written)) {
av_frame_free(&output_frame);
return AVERROR_EXIT;
}
av_frame_free(&output_frame);
return 0;
}
/** Write the trailer of the output file container. */
static int write_output_file_trailer(AVFormatContext *output_format_context)
{
int error;
if ((error = av_write_trailer(output_format_context)) < 0) {
fprintf(stderr, "Could not write output file trailer (error '%s')\n",
get_error_text(error));
return error;
}
return 0;
}
/** Convert an audio file to an AAC file in an MP4 container. */
int main(int argc, char **argv)
{
AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
SwrContext *resample_context = NULL;
AVAudioFifo *fifo = NULL;
int ret = AVERROR_EXIT;
if (argc < 3) {
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
exit(1);
}
/** Register all codecs and formats so that they can be used. */
av_register_all();
/** Open the input file for reading. */
if (open_input_file(argv[1], &input_format_context,
&input_codec_context))
goto cleanup;
/** Open the output file for writing. */
if (open_output_file(argv[2], input_codec_context,
&output_format_context, &output_codec_context))
goto cleanup;
/** Initialize the resampler to be able to convert audio sample formats. */
if (init_resampler(input_codec_context, output_codec_context,
&resample_context))
goto cleanup;
/** Initialize the FIFO buffer to store audio samples to be encoded. */
if (init_fifo(&fifo))
goto cleanup;
/** Write the header of the output file container. */
if (write_output_file_header(output_format_context))
goto cleanup;
/**
* Loop as long as we have input samples to read or output samples
* to write; abort as soon as we have neither.
*/
while (1) {
/** Use the encoder's desired frame size for processing. */
const int output_frame_size = output_codec_context->frame_size;
int finished = 0;
/**
* Make sure that there is one frame worth of samples in the FIFO
* buffer so that the encoder can do its work.
* Since the decoder's and the encoder's frame size may differ, we
* need to FIFO buffer to store as many frames worth of input samples
* that they make up at least one frame worth of output samples.
*/
while (av_audio_fifo_size(fifo) < output_frame_size) {
/**
* Decode one frame worth of audio samples, convert it to the
* output sample format and put it into the FIFO buffer.
*/
if (read_decode_convert_and_store(fifo, input_format_context,
input_codec_context,
output_codec_context,
resample_context, &finished))
goto cleanup;
/**
* If we are at the end of the input file, we continue
* encoding the remaining audio samples to the output file.
*/
if (finished)
break;
}
/**
* If we have enough samples for the encoder, we encode them.
* At the end of the file, we pass the remaining samples to
* the encoder.
*/
while (av_audio_fifo_size(fifo) >= output_frame_size ||
(finished && av_audio_fifo_size(fifo) > 0))
/**
* Take one frame worth of audio samples from the FIFO buffer,
* encode it and write it to the output file.
*/
if (load_encode_and_write(fifo, output_format_context,
output_codec_context))
goto cleanup;
/**
* If we are at the end of the input file and have encoded
* all remaining samples, we can exit this loop and finish.
*/
if (finished) {
int data_written;
/** Flush the encoder as it may have delayed frames. */
do {
if (encode_audio_frame(NULL, output_format_context,
output_codec_context, &data_written))
goto cleanup;
} while (data_written);
break;
}
}
/** Write the trailer of the output file container. */
if (write_output_file_trailer(output_format_context))
goto cleanup;
ret = 0;
cleanup:
if (fifo)
av_audio_fifo_free(fifo);
swr_free(&resample_context);
if (output_codec_context)
avcodec_close(output_codec_context);
if (output_format_context) {
avio_close(output_format_context->pb);
avformat_free_context(output_format_context);
}
if (input_codec_context)
avcodec_close(input_codec_context);
if (input_format_context)
avformat_close_input(&input_format_context);
return ret;
}

View File

@@ -368,6 +368,26 @@ ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
rm temp[12].[av] all.[av]
@end example
@section -profile option fails when encoding H.264 video with AAC audio
@command{ffmpeg} prints an error like
@example
Undefined constant or missing '(' in 'baseline'
Unable to parse option value "baseline"
Error setting option profile to value baseline.
@end example
Short answer: write @option{-profile:v} instead of @option{-profile}.
Long answer: this happens because the @option{-profile} option can apply to both
video and audio. Specifically the AAC encoder also defines some profiles, none
of which are named @var{baseline}.
The solution is to apply the @option{-profile} option to the video stream only
by using @url{http://ffmpeg.org/ffmpeg.html#Stream-specifiers-1, Stream specifiers}.
Appending @code{:v} to it will do exactly that.
@section Using @option{-f lavfi}, audio becomes mono for no apparent reason.
Use @option{-dumpgraph -} to find out exactly where the channel layout is

View File

@@ -80,22 +80,11 @@ The transcoding process in @command{ffmpeg} for each output can be described by
the following diagram:
@example
_______ ______________
| | | |
| input | demuxer | encoded data | decoder
| file | ---------> | packets | -----+
|_______| |______________| |
v
_________
| |
| decoded |
| frames |
________ ______________ |_________|
| | | | |
| output | <-------- | encoded data | <----+
| file | muxer | packets | encoder
|________| |______________|
_______ ______________ _________ ______________ ________
| | | | | | | | | |
| input | demuxer | encoded data | decoder | decoded | encoder | encoded data | muxer | output |
| file | ---------> | packets | ---------> | frames | ---------> | packets | -------> | file |
|_______| |______________| |_________| |______________| |________|
@end example
@@ -123,11 +112,11 @@ the same type. In the above diagram they can be represented by simply inserting
an additional step between decoding and encoding:
@example
_________ __________ ______________
| | simple | | | |
| decoded | fltrgrph | filtered | encoder | encoded data |
| frames | ----------> | frames | ---------> | packets |
|_________| |__________| |______________|
_________ __________ ______________
| | | | | |
| decoded | simple filtergraph | filtered | encoder | encoded data |
| frames | -------------------> | frames | ---------> | packets |
|_________| |__________| |______________|
@end example
@@ -136,10 +125,10 @@ Simple filtergraphs are configured with the per-stream @option{-filter} option
A simple filtergraph for video can look for example like this:
@example
_______ _____________ _______ ________
| | | | | | | |
| input | ---> | deinterlace | ---> | scale | ---> | output |
|_______| |_____________| |_______| |________|
_______ _____________ _______ _____ ________
| | | | | | | | | |
| input | ---> | deinterlace | ---> | scale | ---> | fps | ---> | output |
|_______| |_____________| |_______| |_____| |________|
@end example
@@ -296,20 +285,23 @@ input until the timestamps reach @var{position}.
@var{position} may be either in seconds or in @code{hh:mm:ss[.xxx]} form.
@item -itsoffset @var{offset} (@emph{input})
Set the input time offset.
Set the input time offset in seconds.
@code{[-]hh:mm:ss[.xxx]} syntax is also supported.
The offset is added to the timestamps of the input files.
Specifying a positive offset means that the corresponding
streams are delayed by @var{offset} seconds.
@var{offset} must be a time duration specification,
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
The offset is added to the timestamps of the input files. Specifying
a positive offset means that the corresponding streams are delayed by
the time duration specified in @var{offset}.
@item -timestamp @var{date} (@emph{output})
@item -timestamp @var{time} (@emph{output})
Set the recording timestamp in the container.
@var{date} must be a time duration specification,
see @ref{date syntax,,the Date section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
The syntax for @var{time} is:
@example
now|([(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...])|(HHMMSS[.m...]))[Z|z])
@end example
If the value is "now" it takes the current time.
Time is local time unless 'Z' or 'z' is appended, in which case it is
interpreted as UTC.
If the year-month-day part is not specified it takes the current
year-month-day.
@item -metadata[:metadata_specifier] @var{key}=@var{value} (@emph{output,per-metadata})
Set a metadata key/value pair.
@@ -356,13 +348,8 @@ Stop writing to the stream after @var{framecount} frames.
@item -q[:@var{stream_specifier}] @var{q} (@emph{output,per-stream})
@itemx -qscale[:@var{stream_specifier}] @var{q} (@emph{output,per-stream})
Use fixed quality scale (VBR). The meaning of @var{q}/@var{qscale} is
Use fixed quality scale (VBR). The meaning of @var{q} is
codec-dependent.
If @var{qscale} is used without a @var{stream_specifier} then it applies only
to the video stream, this is to maintain compatibility with previous behavior
and as specifying the same codec specific value to 2 different codecs that is
audio and video generally is not what is intended when no stream_specifier is
used.
@anchor{filter_option}
@item -filter[:@var{stream_specifier}] @var{filtergraph} (@emph{output,per-stream})
@@ -516,6 +503,9 @@ prefix is ``ffmpeg2pass''. The complete file name will be
@file{PREFIX-N.log}, where N is a number specific to the output
stream
@item -vlang @var{code}
Set the ISO 639 language code (3 letters) of the current video stream.
@item -vf @var{filtergraph} (@emph{output})
Create the filtergraph specified by @var{filtergraph} and use it to
filter the stream.
@@ -626,42 +616,6 @@ would be more efficient.
@item -copyinkf[:@var{stream_specifier}] (@emph{output,per-stream})
When doing stream copy, copy also non-key frames found at the
beginning.
@item -hwaccel[:@var{stream_specifier}] @var{hwaccel} (@emph{input,per-stream})
Use hardware acceleration to decode the matching stream(s). The allowed values
of @var{hwaccel} are:
@table @option
@item none
Do not use any hardware acceleration (the default).
@item auto
Automatically select the hardware acceleration method.
@item vdpau
Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
@end table
This option has no effect if the selected hwaccel is not available or not
supported by the chosen decoder.
Note that most acceleration methods are intended for playback and will not be
faster than software decoding on modern CPUs. Additionally, @command{ffmpeg}
will usually need to copy the decoded frames from the GPU memory into the system
memory, resulting in further performance loss. This option is thus mainly
useful for testing.
@item -hwaccel_device[:@var{stream_specifier}] @var{hwaccel_device} (@emph{input,per-stream})
Select a device to use for hardware acceleration.
This option only makes sense when the @option{-hwaccel} option is also
specified. Its exact meaning depends on the specific hardware acceleration
method chosen.
@table @option
@item vdpau
For VDPAU, this option specifies the X11 display/screen to use. If this option
is not specified, the value of the @var{DISPLAY} environment variable is used
@end table
@end table
@section Audio Options
@@ -714,6 +668,8 @@ stereo but not 6 channels as 5.1. The default is to always try to guess. Use
@section Subtitle options:
@table @option
@item -slang @var{code}
Set the ISO 639 language code (3 letters) of the current subtitle stream.
@item -scodec @var{codec} (@emph{input/output})
Set the subtitle codec. This is an alias for @code{-codec:s}.
@item -sn (@emph{output})
@@ -1042,7 +998,7 @@ ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
@end example
@item -tag[:@var{stream_specifier}] @var{codec_tag} (@emph{input/output,per-stream})
@item -tag[:@var{stream_specifier}] @var{codec_tag} (@emph{per-stream})
Force a tag/fourcc for matching streams.
@item -timecode @var{hh}:@var{mm}:@var{ss}SEP@var{ff}
@@ -1116,14 +1072,12 @@ transcoding. Use @option{-noaccurate_seek} to disable it, which may be useful
e.g. when copying some streams and transcoding the others.
@item -override_ffserver (@emph{global})
Overrides the input specifications from @command{ffserver}. Using this
option you can map any input stream to @command{ffserver} and control
many aspects of the encoding from @command{ffmpeg}. Without this
option @command{ffmpeg} will transmit to @command{ffserver} what is
requested by @command{ffserver}.
Overrides the input specifications from ffserver. Using this option you can
map any input stream to ffserver and control many aspects of the encoding from
ffmpeg. Without this option ffmpeg will transmit to ffserver what is requested by
ffserver.
The option is intended for cases where features are needed that cannot be
specified to @command{ffserver} but can be to @command{ffmpeg}.
specified to ffserver but can be to ffmpeg.
@end table

View File

@@ -188,12 +188,6 @@ Cycle program.
@item w
Show audio waves.
@item s
Step to the next frame.
Pause if the stream is not already paused, step to the next video
frame, and pause.
@item left/right
Seek backward/forward 10 seconds.
@@ -201,8 +195,6 @@ Seek backward/forward 10 seconds.
Seek backward/forward 1 minute.
@item page down/page up
Seek to the previous/next chapter.
or if there are no chapters
Seek backward/forward 10 minutes.
@item mouse click
@@ -214,7 +206,6 @@ Seek to percentage in file corresponding to fraction of width.
@include config.texi
@ifset config-all
@set config-readonly
@ifset config-avutil
@include utils.texi
@end ifset

View File

@@ -197,11 +197,11 @@ The information for each single packet is printed within a dedicated
section with name "PACKET".
@item -show_frames
Show information about each frame and subtitle contained in the input
multimedia stream.
Show information about each frame contained in the input multimedia
stream.
The information for each single frame is printed within a dedicated
section with name "FRAME" or "SUBTITLE".
section with name "FRAME".
@item -show_streams
Show information about each media stream contained in the input
@@ -337,39 +337,6 @@ A writer may accept one or more arguments, which specify the options
to adopt. The options are specified as a list of @var{key}=@var{value}
pairs, separated by ":".
All writers support the following options:
@table @option
@item string_validation, sv
Set string validation mode.
The following values are accepted.
@table @samp
@item fail
The writer will fail immediately in case an invalid string (UTF-8)
sequence or code point is found in the input. This is especially
useful to validate input metadata.
@item ignore
Any validation error will be ignored. This will result in possibly
broken output, especially with the json or xml writer.
@item replace
The writer will substitute invalid UTF-8 sequences or code points with
the string specified with the @option{string_validation_replacement}.
@end table
Default value is @samp{replace}.
@item string_validation_replacement, svr
Set replacement string to use in case @option{string_validation} is
set to @samp{replace}.
In case the option is not specified, the writer will assume the empty
string, that is it will remove the invalid sequences from the input
strings.
@end table
A description of the currently available writers follows.
@section default
@@ -599,7 +566,6 @@ DV, GXF and AVI timecodes are available in format metadata
@include config.texi
@ifset config-all
@set config-readonly
@ifset config-avutil
@include utils.texi
@end ifset

View File

@@ -28,10 +28,7 @@
<xsd:complexType name="framesType">
<xsd:sequence>
<xsd:choice minOccurs="0" maxOccurs="unbounded">
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:choice>
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
@@ -61,8 +58,6 @@
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
<xsd:attribute name="pkt_dts" type="xsd:long" />
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
<xsd:attribute name="best_effort_timestamp_time" type="xsd:float" />
<xsd:attribute name="pkt_duration" type="xsd:long" />
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
<xsd:attribute name="pkt_pos" type="xsd:long" />
@@ -87,16 +82,6 @@
<xsd:attribute name="repeat_pict" type="xsd:int" />
</xsd:complexType>
<xsd:complexType name="subtitleType">
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
<xsd:attribute name="pts" type="xsd:long" />
<xsd:attribute name="pts_time" type="xsd:float"/>
<xsd:attribute name="format" type="xsd:int" />
<xsd:attribute name="start_display_time" type="xsd:int" />
<xsd:attribute name="end_display_time" type="xsd:int" />
<xsd:attribute name="num_rects" type="xsd:int" />
</xsd:complexType>
<xsd:complexType name="streamsType">
<xsd:sequence>
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>

View File

@@ -235,7 +235,7 @@ StartSendOnKey
#<Stream test.ogg>
#Feed feed1.ffm
#Metadata title "Stream title"
#Title "Stream title"
#AudioBitRate 64
#AudioChannels 2
#AudioSampleRate 44100
@@ -280,10 +280,10 @@ StartSendOnKey
#<Stream file.asf>
#File "/usr/local/httpd/htdocs/test.asf"
#NoAudio
#Metadata author "Me"
#Metadata copyright "Super MegaCorp"
#Metadata title "Test stream from disk"
#Metadata comment "Test comment"
#Author "Me"
#Copyright "Super MegaCorp"
#Title "Test stream from disk"
#Comment "Test comment"
#</Stream>

View File

@@ -16,14 +16,11 @@ ffserver [@var{options}]
@chapter Description
@c man begin DESCRIPTION
@command{ffserver} is a streaming server for both audio and video.
It supports several live feeds, streaming from files and time shifting
on live feeds. You can seek to positions in the past on each live
feed, provided you specify a big enough feed storage.
@command{ffserver} is configured through a configuration file, which
is read at startup. If not explicitly specified, it will read from
@file{/etc/ffserver.conf}.
@command{ffserver} is a streaming server for both audio and video. It
supports several live feeds, streaming from files and time shifting on
live feeds (you can seek to positions in the past on each live feed,
provided you specify a big enough feed storage in
@file{ffserver.conf}).
@command{ffserver} receives prerecorded files or FFM streams from some
@command{ffmpeg} instance as input, then streams them over
@@ -42,126 +39,10 @@ For each feed you can have different output streams in various
formats, each one specified by a @code{<Stream>} section in the
configuration file.
@chapter Detailed description
@command{ffserver} works by forwarding streams encoded by
@command{ffmpeg}, or pre-recorded streams which are read from disk.
Precisely, @command{ffserver} acts as an HTTP server, accepting POST
requests from @command{ffmpeg} to acquire the stream to publish, and
serving RTSP clients or HTTP clients GET requests with the stream
media content.
A feed is an @ref{FFM} stream created by @command{ffmpeg}, and sent to
a port where @command{ffserver} is listening.
Each feed is identified by a unique name, corresponding to the name
of the resource published on @command{ffserver}, and is configured by
a dedicated @code{Feed} section in the configuration file.
The feed publish URL is given by:
@example
http://@var{ffserver_ip_address}:@var{http_port}/@var{feed_name}
@end example
where @var{ffserver_ip_address} is the IP address of the machine where
@command{ffserver} is installed, @var{http_port} is the port number of
the HTTP server (configured through the @option{Port} option), and
@var{feed_name} is the name of the corresponding feed defined in the
configuration file.
Each feed is associated to a file which is stored on disk. This stored
file is used to allow to send pre-recorded data to a player as fast as
possible when new content is added in real-time to the stream.
A "live-stream" or "stream" is a resource published by
@command{ffserver}, and made accessible through the HTTP protocol to
clients.
A stream can be connected to a feed, or to a file. In the first case,
the published stream is forwarded from the corresponding feed
generated by a running instance of @command{ffmpeg}, in the second
case the stream is read from a pre-recorded file.
Each stream is identified by a unique name, corresponding to the name
of the resource served by @command{ffserver}, and is configured by
a dedicated @code{Stream} section in the configuration file.
The stream access HTTP URL is given by:
@example
http://@var{ffserver_ip_address}:@var{http_port}/@var{stream_name}[@var{options}]
@end example
The stream access RTSP URL is given by:
@example
http://@var{ffserver_ip_address}:@var{rtsp_port}/@var{stream_name}[@var{options}]
@end example
@var{stream_name} is the name of the corresponding stream defined in
the configuration file. @var{options} is a list of options specified
after the URL which affects how the stream is served by
@command{ffserver}. @var{http_port} and @var{rtsp_port} are the HTTP
and RTSP ports configured with the options @var{Port} and
@var{RTSPPort} respectively.
In case the stream is associated to a feed, the encoding parameters
must be configured in the stream configuration. They are sent to
@command{ffmpeg} when setting up the encoding. This allows
@command{ffserver} to define the encoding parameters used by
the @command{ffmpeg} encoders.
The @command{ffmpeg} @option{override_ffserver} commandline option
allows to override the encoding parameters set by the server.
Multiple streams can be connected to the same feed.
For example, you can have a situation described by the following
graph:
@example
_________ __________
| | | |
ffmpeg 1 -----| feed 1 |-----| stream 1 |
\ |_________|\ |__________|
\ \
\ \ __________
\ \ | |
\ \| stream 2 |
\ |__________|
\
\ _________ __________
\ | | | |
\| feed 2 |-----| stream 3 |
|_________| |__________|
_________ __________
| | | |
ffmpeg 2 -----| feed 3 |-----| stream 4 |
|_________| |__________|
_________ __________
| | | |
| file 1 |-----| stream 5 |
|_________| |__________|
@end example
@anchor{FFM}
@section FFM, FFM2 formats
FFM and FFM2 are formats used by ffserver. They allow storing a wide variety of
video and audio streams and encoding options, and can store a moving time segment
of an infinite movie or a whole movie.
FFM is version specific, and there is limited compatibility of FFM files
generated by one version of ffmpeg/ffserver and another version of
ffmpeg/ffserver. It may work but it is not guaranteed to work.
FFM2 is extensible while maintaining compatibility and should work between
differing versions of tools. FFM2 is the default.
@section Status stream
@command{ffserver} supports an HTTP interface which exposes the
current status of the server.
ffserver supports an HTTP interface which exposes the current status
of the server.
Simply point your browser to the address of the special status stream
specified in the configuration file.
@@ -180,8 +61,27 @@ ACL allow 192.168.0.0 192.168.255.255
then the server will post a page with the status information when
the special stream @file{status.html} is requested.
@section What can this do?
When properly configured and running, you can capture video and audio in real
time from a suitable capture card, and stream it out over the Internet to
either Windows Media Player or RealAudio player (with some restrictions).
It can also stream from files, though that is currently broken. Very often, a
web server can be used to serve up the files just as well.
It can stream prerecorded video from .ffm files, though it is somewhat tricky
to make it work correctly.
@section How do I make it work?
First, build the kit. It *really* helps to have installed LAME first. Then when
you run the ffserver ./configure, make sure that you have the
@code{--enable-libmp3lame} flag turned on.
LAME is important as it allows for streaming audio to Windows Media Player.
Don't ask why the other audio types do not work.
As a simple test, just run the following two command lines where INPUTFILE
is some file which you can decode with ffmpeg:
@@ -209,6 +109,35 @@ You should edit the ffserver.conf file to suit your needs (in terms of
frame rates etc). Then install ffserver and ffmpeg, write a script to start
them up, and off you go.
@section Troubleshooting
@subsection I don't hear any audio, but video is fine.
Maybe you didn't install LAME, or got your ./configure statement wrong. Check
the ffmpeg output to see if a line referring to MP3 is present. If not, then
your configuration was incorrect. If it is, then maybe your wiring is not
set up correctly. Maybe the sound card is not getting data from the right
input source. Maybe you have a really awful audio interface (like I do)
that only captures in stereo and also requires that one channel be flipped.
If you are one of these people, then export 'AUDIO_FLIP_LEFT=1' before
starting ffmpeg.
@subsection The audio and video lose sync after a while.
Yes, they do.
@subsection After a long while, the video update rate goes way down in WMP.
Yes, it does. Who knows why?
@subsection WMP 6.4 behaves differently to WMP 7.
Yes, it does. Any thoughts on this would be gratefully received. These
differences extend to embedding WMP into a web page. [There are two
object IDs that you can use: The old one, which does not play well, and
the new one, which does (both tested on the same system). However,
I suspect that the new one is not available unless you have installed WMP 7].
@section What else can it do?
You can replay video from .ffm files that was recorded earlier.
@@ -248,6 +177,9 @@ specify a time. In addition, ffserver will skip frames until a key_frame
is found. This further reduces the startup delay by not transferring data
that will be discarded.
* You may want to adjust the MaxBandwidth in the ffserver.conf to limit
the amount of bandwidth consumed by live streams.
@section Why does the ?buffer / Preroll stop working after a time?
It turns out that (on my machine at least) the number of frames successfully
@@ -281,6 +213,19 @@ You use this by adding the ?date= to the end of the URL for the stream.
For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
@c man end
@section What is FFM, FFM2
FFM and FFM2 are formats used by ffserver. They allow storing a wide variety of
video and audio streams and encoding options, and can store a moving time segment
of an infinite movie or a whole movie.
FFM is version specific, and there is limited compatibility of FFM files
generated by one version of ffmpeg/ffserver and another version of
ffmpeg/ffserver. It may work but it is not guaranteed to work.
FFM2 is extensible while maintaining compatibility and should work between
differing versions of tools. FFM2 is the default.
@chapter Options
@c man begin OPTIONS
@@ -290,540 +235,15 @@ For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
@table @option
@item -f @var{configfile}
Read configuration file @file{configfile}. If not specified it will
read by default from @file{/etc/ffserver.conf}.
Use @file{configfile} instead of @file{/etc/ffserver.conf}.
@item -n
Enable no-launch mode. This option disables all the @code{Launch}
directives within the various @code{<Feed>} sections. Since
@command{ffserver} will not launch any @command{ffmpeg} instances, you
will have to launch them manually.
Enable no-launch mode. This option disables all the Launch directives
within the various <Stream> sections. Since ffserver will not launch
any ffmpeg instances, you will have to launch them manually.
@item -d
Enable debug mode. This option increases log verbosity, and directs
log messages to stdout. When specified, the @option{CustomLog} option
is ignored.
Enable debug mode. This option increases log verbosity, directs log
messages to stdout.
@end table
@chapter Configuration file syntax
@command{ffserver} reads a configuration file containing global
options and settings for each stream and feed.
The configuration file consists of global options and dedicated
sections, which must be introduced by "<@var{SECTION_NAME}
@var{ARGS}>" on a separate line and must be terminated by a line in
the form "</@var{SECTION_NAME}>". @var{ARGS} is optional.
Currently the following sections are recognized: @samp{Feed},
@samp{Stream}, @samp{Redirect}.
A line starting with @code{#} is ignored and treated as a comment.
Name of options and sections are case-insensitive.
@section ACL syntax
An ACL (Access Control List) specifies the address which are allowed
to access a given stream, or to write a given feed.
It accepts the folling forms
@itemize
@item
Allow/deny access to @var{address}.
@example
ACL ALLOW <address>
ACL DENY <address>
@end example
@item
Allow/deny access to ranges of addresses from @var{first_address} to
@var{last_address}.
@example
ACL ALLOW <first_address> <last_address>
ACL DENY <first_address> <last_address>
@end example
@end itemize
You can repeat the ACL allow/deny as often as you like. It is on a per
stream basis. The first match defines the action. If there are no matches,
then the default is the inverse of the last ACL statement.
Thus 'ACL allow localhost' only allows access from localhost.
'ACL deny 1.0.0.0 1.255.255.255' would deny the whole of network 1 and
allow everybody else.
@section Global options
@table @option
@item Port @var{port_number}
@item RTSPPort @var{port_number}
Set TCP port number on which the HTTP/RTSP server is listening. You
must select a different port from your standard HTTP web server if it
is running on the same computer.
If not specified, no corresponding server will be created.
@item BindAddress @var{ip_address}
@item RTSPBindAddress @var{ip_address}
Set address on which the HTTP/RTSP server is bound. Only useful if you
have several network interfaces.
@item MaxHTTPConnections @var{n}
Set number of simultaneous HTTP connections that can be handled. It
has to be defined @emph{before} the @option{MaxClients} parameter,
since it defines the @option{MaxClients} maximum limit.
Default value is 2000.
@item MaxClients @var{n}
Set number of simultaneous requests that can be handled. Since
@command{ffserver} is very fast, it is more likely that you will want
to leave this high and use @option{MaxBandwidth}.
Default value is 5.
@item MaxBandwidth @var{kbps}
Set the maximum amount of kbit/sec that you are prepared to consume
when streaming to clients.
Default value is 1000.
@item CustomLog @var{filename}
Set access log file (uses standard Apache log file format). '-' is the
standard output.
If not specified @command{ffserver} will produce no log.
In case the commandline option @option{-d} is specified this option is
ignored, and the log is written to standard output.
@item NoDaemon
Set no-daemon mode. This option is currently ignored since now
@command{ffserver} will always work in no-daemon mode, and is
deprecated.
@end table
@section Feed section
A Feed section defines a feed provided to @command{ffserver}.
Each live feed contains one video and/or audio sequence coming from an
@command{ffmpeg} encoder or another @command{ffserver}. This sequence
may be encoded simultaneously with several codecs at several
resolutions.
A feed instance specification is introduced by a line in the form:
@example
<Feed FEED_FILENAME>
@end example
where @var{FEED_FILENAME} specifies the unique name of the FFM stream.
The following options are recognized within a Feed section.
@table @option
@item File @var{filename}
@item ReadOnlyFile @var{filename}
Set the path where the feed file is stored on disk.
If not specified, the @file{/tmp/FEED.ffm} is assumed, where
@var{FEED} is the feed name.
If @option{ReadOnlyFile} is used the file is marked as read-only and
it will not be deleted or updated.
@item Truncate
Truncate the feed file, rather than appending to it. By default
@command{ffserver} will append data to the file, until the maximum
file size value is reached (see @option{FileMaxSize} option).
@item FileMaxSize @var{size}
Set maximum size of the feed file in bytes. 0 means unlimited. The
postfixes @code{K} (2^10), @code{M} (2^20), and @code{G} (2^30) are
recognized.
Default value is 5M.
@item Launch @var{args}
Launch an @command{ffmpeg} command when creating @command{ffserver}.
@var{args} must be a sequence of arguments to be provided to an
@command{ffmpeg} instance. The first provided argument is ignored, and
it is replaced by a path with the same dirname of the @command{ffserver}
instance, followed by the remaining argument and terminated with a
path corresponding to the feed.
When the launched process exits, @command{ffserver} will launch
another program instance.
In case you need a more complex @command{ffmpeg} configuration,
e.g. if you need to generate multiple FFM feeds with a single
@command{ffmpeg} instance, you should launch @command{ffmpeg} by hand.
This option is ignored in case the commandline option @option{-n} is
specified.
@item ACL @var{spec}
Specify the list of IP address which are allowed or denied to write
the feed. Multiple ACL options can be specified.
@end table
@section Stream section
A Stream section defines a stream provided by @command{ffserver}, and
identified by a single name.
The stream is sent when answering a request containing the stream
name.
A stream section must be introduced by the line:
@example
<Stream STREAM_NAME>
@end example
where @var{STREAM_NAME} specifies the unique name of the stream.
The following options are recognized within a Stream section.
Encoding options are marked with the @emph{encoding} tag, and they are
used to set the encoding parameters, and are mapped to libavcodec
encoding options. Not all encoding options are supported, in
particular it is not possible to set encoder private options. In order
to override the encoding options specified by @command{ffserver}, you
can use the @command{ffmpeg} @option{override_ffserver} commandline
option.
Only one of the @option{Feed} and @option{File} options should be set.
@table @option
@item Feed @var{feed_name}
Set the input feed. @var{feed_name} must correspond to an existing
feed defined in a @code{Feed} section.
When this option is set, encoding options are used to setup the
encoding operated by the remote @command{ffmpeg} process.
@item File @var{filename}
Set the filename of the pre-recorded input file to stream.
When this option is set, encoding options are ignored and the input
file content is re-streamed as is.
@item Format @var{format_name}
Set the format of the output stream.
Must be the name of a format recognized by FFmpeg. If set to
@samp{status}, it is treated as a status stream.
@item InputFormat @var{format_name}
Set input format. If not specified, it is automatically guessed.
@item Preroll @var{n}
Set this to the number of seconds backwards in time to start. Note that
most players will buffer 5-10 seconds of video, and also you need to allow
for a keyframe to appear in the data stream.
Default value is 0.
@item StartSendOnKey
Do not send stream until it gets the first key frame. By default
@command{ffserver} will send data immediately.
@item MaxTime @var{n}
Set the number of seconds to run. This value set the maximum duration
of the stream a client will be able to receive.
A value of 0 means that no limit is set on the stream duration.
@item ACL @var{spec}
Set ACL for the stream.
@item DynamicACL @var{spec}
@item RTSPOption @var{option}
@item MulticastAddress @var{address}
@item MulticastPort @var{port}
@item MulticastTTL @var{integer}
@item NoLoop
@item FaviconURL @var{url}
Set favicon (favourite icon) for the server status page. It is ignored
for regular streams.
@item Author @var{value}
@item Comment @var{value}
@item Copyright @var{value}
@item Title @var{value}
Set metadata corresponding to the option. All these options are
deprecated in favor of @option{Metadata}.
@item Metadata @var{key} @var{value}
Set metadata value on the output stream.
@item NoAudio
@item NoVideo
Suppress audio/video.
@item AudioCodec @var{codec_name} (@emph{encoding,audio})
Set audio codec.
@item AudioBitRate @var{rate} (@emph{encoding,audio})
Set bitrate for the audio stream in kbits per second.
@item AudioChannels @var{n} (@emph{encoding,audio})
Set number of audio channels.
@item AudioSampleRate @var{n} (@emph{encoding,audio})
Set sampling frequency for audio. When using low bitrates, you should
lower this frequency to 22050 or 11025. The supported frequencies
depend on the selected audio codec.
@item AVOptionAudio @var{option} @var{value} (@emph{encoding,audio})
Set generic option for audio stream.
@item AVPresetAudio @var{preset} (@emph{encoding,audio})
Set preset for audio stream.
@item VideoCodec @var{codec_name} (@emph{encoding,video})
Set video codec.
@item VideoBitRate @var{n} (@emph{encoding,video})
Set bitrate for the video stream in kbits per second.
@item VideoBitRateRange @var{range} (@emph{encoding,video})
Set video bitrate range.
A range must be specified in the form @var{minrate}-@var{maxrate}, and
specifies the @option{minrate} and @option{maxrate} encoding options
expressed in kbits per second.
@item VideoBitRateRangeTolerance @var{n} (@emph{encoding,video})
Set video bitrate tolerance in kbits per second.
@item PixelFormat @var{pixel_format} (@emph{encoding,video})
Set video pixel format.
@item Debug @var{integer} (@emph{encoding,video})
Set video @option{debug} encoding option.
@item Strict @var{integer} (@emph{encoding,video})
Set video @option{strict} encoding option.
@item VideoBufferSize @var{n} (@emph{encoding,video})
Set ratecontrol buffer size, expressed in KB.
@item VideoFrameRate @var{n} (@emph{encoding,video})
Set number of video frames per second.
@item VideoSize (@emph{encoding,video})
Set size of the video frame, must be an abbreviation or in the form
@var{W}x@var{H}. See @ref{video size syntax,,the Video size section
in the ffmpeg-utils(1) manual,ffmpeg-utils}.
Default value is @code{160x128}.
@item VideoIntraOnly (@emph{encoding,video})
Transmit only intra frames (useful for low bitrates, but kills frame rate).
@item VideoGopSize @var{n} (@emph{encoding,video})
If non-intra only, an intra frame is transmitted every VideoGopSize
frames. Video synchronization can only begin at an intra frame.
@item VideoTag @var{tag} (@emph{encoding,video})
Set video tag.
@item VideoHighQuality (@emph{encoding,video})
@item Video4MotionVector (@emph{encoding,video})
@item BitExact (@emph{encoding,video})
Set bitexact encoding flag.
@item IdctSimple (@emph{encoding,video})
Set simple IDCT algorithm.
@item Qscale @var{n} (@emph{encoding,video})
Enable constant quality encoding, and set video qscale (quantization
scale) value, expressed in @var{n} QP units.
@item VideoQMin @var{n} (@emph{encoding,video})
@item VideoQMax @var{n} (@emph{encoding,video})
Set video qmin/qmax.
@item VideoQDiff @var{integer} (@emph{encoding,video})
Set video @option{qdiff} encoding option.
@item LumiMask @var{float} (@emph{encoding,video})
@item DarkMask @var{float} (@emph{encoding,video})
Set @option{lumi_mask}/@option{dark_mask} encoding options.
@item AVOptionVideo @var{option} @var{value} (@emph{encoding,video})
Set generic option for video stream.
@item AVPresetVideo @var{preset} (@emph{encoding,video})
Set preset for video stream.
@var{preset} must be the path of a preset file.
@end table
@subsection Server status stream
A server status stream is a special stream which is used to show
statistics about the @command{ffserver} operations.
It must be specified setting the option @option{Format} to
@samp{status}.
@section Redirect section
A redirect section specifies where to redirect the requested URL to
another page.
A redirect section must be introduced by the line:
@example
<Redirect NAME>
@end example
where @var{NAME} is the name of the page which should be redirected.
It only accepts the option @option{URL}, which specify the redirection
URL.
@chapter Stream examples
@itemize
@item
Multipart JPEG
@example
<Stream test.mjpg>
Feed feed1.ffm
Format mpjpeg
VideoFrameRate 2
VideoIntraOnly
NoAudio
Strict -1
</Stream>
@end example
@item
Single JPEG
@example
<Stream test.jpg>
Feed feed1.ffm
Format jpeg
VideoFrameRate 2
VideoIntraOnly
VideoSize 352x240
NoAudio
Strict -1
</Stream>
@end example
@item
Flash
@example
<Stream test.swf>
Feed feed1.ffm
Format swf
VideoFrameRate 2
VideoIntraOnly
NoAudio
</Stream>
@end example
@item
ASF compatible
@example
<Stream test.asf>
Feed feed1.ffm
Format asf
VideoFrameRate 15
VideoSize 352x240
VideoBitRate 256
VideoBufferSize 40
VideoGopSize 30
AudioBitRate 64
StartSendOnKey
</Stream>
@end example
@item
MP3 audio
@example
<Stream test.mp3>
Feed feed1.ffm
Format mp2
AudioCodec mp3
AudioBitRate 64
AudioChannels 1
AudioSampleRate 44100
NoVideo
</Stream>
@end example
@item
Ogg Vorbis audio
@example
<Stream test.ogg>
Feed feed1.ffm
Metadata title "Stream title"
AudioBitRate 64
AudioChannels 2
AudioSampleRate 44100
NoVideo
</Stream>
@end example
@item
Real with audio only at 32 kbits
@example
<Stream test.ra>
Feed feed1.ffm
Format rm
AudioBitRate 32
NoVideo
</Stream>
@end example
@item
Real with audio and video at 64 kbits
@example
<Stream test.rm>
Feed feed1.ffm
Format rm
AudioBitRate 32
VideoBitRate 128
VideoFrameRate 25
VideoGopSize 25
</Stream>
@end example
@item
For stream coming from a file: you only need to set the input filename
and optionally a new format.
@example
<Stream file.rm>
File "/usr/local/httpd/htdocs/tlive.rm"
NoAudio
</Stream>
@end example
@example
<Stream file.asf>
File "/usr/local/httpd/htdocs/test.asf"
NoAudio
Metadata author "Me"
Metadata copyright "Super MegaCorp"
Metadata title "Test stream from disk"
Metadata comment "Test comment"
</Stream>
@end example
@end itemize
@c man end
@include config.texi

View File

@@ -194,13 +194,6 @@ to a plain @code{%}
Errors in parsing the environment variable are not fatal, and will not
appear in the report.
@item -hide_banner
Suppress printing banner.
All FFmpeg tools will normally show a copyright notice, build options
and library versions. This option can be used to suppress printing
this information.
@item -cpuflags flags (@emph{global})
Allows setting and clearing cpu flags. This option is intended
for testing. Do not use it unless you know what you're doing.
@@ -257,10 +250,6 @@ Possible flags for this option are:
@end table
@end table
@item -opencl_bench
Benchmark all available OpenCL devices and show the results. This option
is only available when FFmpeg has been compiled with @code{--enable-opencl}.
@item -opencl_options options (@emph{global})
Set OpenCL environment options. This option is only available when
FFmpeg has been compiled with @code{--enable-opencl}.

File diff suppressed because it is too large Load Diff

View File

@@ -125,26 +125,18 @@ Consider things that a sane encoder should not do as an error.
Use wallclock as timestamps.
@item avoid_negative_ts @var{integer} (@emph{output})
Possible values:
@table @samp
@item make_non_negative
Shift timestamps to make them non-negative.
Also note that this affects only leading negative timestamps, and not
non-monotonic negative timestamps.
@item make_zero
Shift timestamps so that the first timestamp is 0.
@item auto (default)
Enables shifting when required by the target format.
@item disabled
Disables shifting of timestamp.
@end table
Shift timestamps to make them non-negative. A value of 1 enables shifting,
a value of 0 disables it, the default value of -1 enables shifting
when required by the target format.
When shifting is enabled, all output timestamps are shifted by the
same amount. Audio, video, and subtitles desynching and relative
timestamp differences are preserved compared to how they would have
been without shifting.
Also note that this affects only leading negative timestamps, and not
non-monotonic negative timestamps.
@item skip_initial_bytes @var{integer} (@emph{input})
Set number of bytes to skip before reading header and frames if set to 1.
Default is 0.
@@ -156,18 +148,6 @@ Correct single timestamp overflows if set to 1. Default is 1.
Flush the underlying I/O stream after each packet. Default 1 enables it, and
has the effect of reducing the latency; 0 disables it and may slightly
increase performance in some cases.
@item output_ts_offset @var{offset} (@emph{output})
Set the output time offset.
@var{offset} must be a time duration specification,
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
The offset is added by the muxer to the output timestamps.
Specifying a positive offset means that the corresponding streams are
delayed bt the time duration specified in @var{offset}. Default value
is @code{0} (meaning that no offset is applied).
@end table
@c man end FORMAT OPTIONS
@@ -203,10 +183,6 @@ The exact semantics of stream specifiers is defined by the
@code{avformat_match_stream_specifier()} function declared in the
@file{libavformat/avformat.h} header.
@ifclear config-writeonly
@include demuxers.texi
@end ifclear
@ifclear config-readonly
@include muxers.texi
@end ifclear
@include metadata.texi

View File

@@ -122,20 +122,6 @@ x264 is under the GNU Public License Version 2 or later
details), you must upgrade FFmpeg's license to GPL in order to use it.
@end float
@section x265
FFmpeg can make use of the x265 library for HEVC encoding.
Go to @url{http://x265.org/developers.html} and follow the instructions
for installing the library. Then pass @code{--enable-libx265} to configure
to enable it.
@float note
x265 is under the GNU Public License Version 2 or later
(see @url{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} for
details), you must upgrade FFmpeg's license to GPL in order to use it.
@end float
@section libilbc
iLBC is a narrowband speech codec that has been made freely available
@@ -162,27 +148,6 @@ libzvbi is licensed under the GNU General Public License Version 2 or later
you must upgrade FFmpeg's license to GPL in order to use it.
@end float
@section AviSynth
FFmpeg can read AviSynth scripts as input. To enable support, pass
@code{--enable-avisynth} to configure. The correct headers are
included in compat/avisynth/, which allows the user to enable support
without needing to search for these headers themselves.
For Windows, supported AviSynth variants are
@url{http://avisynth.nl, AviSynth 2.5 or 2.6} for 32-bit builds and
@url{http://avs-plus.net, AviSynth+ 0.1} for 32-bit and 64-bit builds.
For Linux and OS X, the supported AviSynth variant is
@url{https://github.com/avxsynth/avxsynth, AvxSynth}.
@float NOTE
AviSynth and AvxSynth are loaded dynamically. Distributors can build FFmpeg
with @code{--enable-avisynth}, and the binaries will work regardless of the
end user having AviSynth or AvxSynth installed - they'll only need to be
installed to use AviSynth scripts (obviously).
@end float
@chapter Supported File Formats, Codecs or Features
@@ -284,8 +249,6 @@ library:
@item GXF @tab X @tab X
@tab General eXchange Format SMPTE 360M, used by Thomson Grass Valley
playout servers.
@item HNM @tab @tab X
@tab Only version 4 supported, used in some games from Cryo Interactive
@item iCEDraw File @tab @tab X
@item ICO @tab X @tab X
@tab Microsoft Windows ICO
@@ -336,8 +299,6 @@ library:
@tab also known as DVB Transport Stream
@item MPEG-4 @tab X @tab X
@tab MPEG-4 is a variant of QuickTime.
@item Mirillis FIC video @tab @tab X
@tab No cursor rendering.
@item MIME multipart JPEG @tab X @tab
@item MSN TCP webcam @tab @tab X
@tab Used by MSN Messenger webcam streams.
@@ -377,7 +338,6 @@ library:
@item raw H.261 @tab X @tab X
@item raw H.263 @tab X @tab X
@item raw H.264 @tab X @tab X
@item raw HEVC @tab X @tab X
@item raw Ingenient MJPEG @tab @tab X
@item raw MJPEG @tab X @tab X
@item raw MLP @tab @tab X
@@ -528,8 +488,8 @@ following image formats are supported:
@tab YUV, JPEG and some extension is not supported yet.
@item Truevision Targa @tab X @tab X
@tab Targa (.TGA) image format
@item WebP @tab E @tab X
@tab WebP image format, encoding supported through external library libwebp
@item WebP @tab @tab X
@tab WebP image format
@item XBM @tab X @tab X
@tab X BitMap image format
@item XFace @tab X @tab X
@@ -647,9 +607,6 @@ following image formats are supported:
@item H.263+ / H.263-1998 / H.263 version 2 @tab X @tab X
@item H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 @tab E @tab X
@tab encoding supported through external library libx264
@item HEVC @tab X @tab X
@tab encoding supported through the external library libx265
@item HNM version 4 @tab @tab X
@item HuffYUV @tab X @tab X
@item HuffYUV FFmpeg variant @tab X @tab X
@item IBM Ultimotion @tab @tab X
@@ -701,6 +658,7 @@ following image formats are supported:
@item Mobotix MxPEG video @tab @tab X
@item Motion Pixels video @tab @tab X
@item MPEG-1 video @tab X @tab X
@item MPEG-1/2 video XvMC (X-Video Motion Compensation) @tab @tab X
@item MPEG-2 video @tab X @tab X
@item MPEG-4 part 2 @tab X @tab X
@tab libxvidcore can be used alternatively for encoding.
@@ -871,7 +829,6 @@ following image formats are supported:
@tab QuickTime fourcc 'alac'
@item ATRAC1 @tab @tab X
@item ATRAC3 @tab @tab X
@item ATRAC3+ @tab @tab X
@item Bink Audio @tab @tab X
@tab Used in Bink and Smacker files in many games.
@item CELT @tab @tab E
@@ -982,6 +939,7 @@ following image formats are supported:
@item Vorbis @tab E @tab X
@tab A native but very primitive encoder exists.
@item Voxware MetaSound @tab @tab X
@tab imperfect and incomplete support
@item WavPack @tab X @tab X
@item Westwood Audio (SND1) @tab @tab X
@item Windows Media Audio 1 @tab X @tab X
@@ -1035,7 +993,6 @@ performance on systems without hardware floating point support).
@multitable @columnfractions .4 .1
@item Name @tab Support
@item file @tab X
@item FTP @tab X
@item Gopher @tab X
@item HLS @tab X
@item HTTP @tab X
@@ -1051,7 +1008,6 @@ performance on systems without hardware floating point support).
@item RTMPTS @tab X
@item RTP @tab X
@item SCTP @tab X
@item SFTP @tab E
@item TCP @tab X
@item TLS @tab X
@item UDP @tab X
@@ -1071,14 +1027,13 @@ performance on systems without hardware floating point support).
@item caca @tab @tab X
@item DV1394 @tab X @tab
@item Lavfi virtual device @tab X @tab
@item Linux framebuffer @tab X @tab X
@item Linux framebuffer @tab X @tab
@item JACK @tab X @tab
@item LIBCDIO @tab X
@item LIBDC1394 @tab X @tab
@item OpenAL @tab X
@item OpenGL @tab @tab X
@item OSS @tab X @tab X
@item PulseAudio @tab X @tab X
@item Pulseaudio @tab X @tab
@item SDL @tab @tab X
@item Video4Linux2 @tab X @tab X
@item VfW capture @tab X @tab

273
doc/git-howto.txt Normal file
View File

@@ -0,0 +1,273 @@
About Git write access:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Before everything else, you should know how to use GIT properly.
Luckily Git comes with excellent documentation.
git --help
man git
shows you the available subcommands,
git <command> --help
man git-<command>
shows information about the subcommand <command>.
The most comprehensive manual is the website Git Reference
http://gitref.org/
For more information about the Git project, visit
http://git-scm.com/
Consult these resources whenever you have problems, they are quite exhaustive.
You do not need a special username or password.
All you need is to provide a ssh public key to the Git server admin.
What follows now is a basic introduction to Git and some FFmpeg-specific
guidelines. Read it at least once, if you are granted commit privileges to the
FFmpeg project you are expected to be familiar with these rules.
I. BASICS:
==========
0. Get GIT:
Most distributions have a git package, if not
You can get git from http://git-scm.com/
1. Cloning the source tree:
git clone git://source.ffmpeg.org/ffmpeg <target>
This will put the FFmpeg sources into the directory <target>.
git clone git@source.ffmpeg.org:ffmpeg <target>
This will put the FFmpeg sources into the directory <target> and let
you push back your changes to the remote repository.
2. Updating the source tree to the latest revision:
git pull (--ff-only)
pulls in the latest changes from the tracked branch. The tracked branch
can be remote. By default the master branch tracks the branch master in
the remote origin.
Caveat: Since merge commits are forbidden at least for the initial
months of git --ff-only or --rebase (see below) are recommended.
--ff-only will fail and not create merge commits if your branch
has diverged (has a different history) from the tracked branch.
2.a Rebasing your local branches:
git pull --rebase
fetches the changes from the main repository and replays your local commits
over it. This is required to keep all your local changes at the top of
FFmpeg's master tree. The master tree will reject pushes with merge commits.
3. Adding/removing files/directories:
git add [-A] <filename/dirname>
git rm [-r] <filename/dirname>
GIT needs to get notified of all changes you make to your working
directory that makes files appear or disappear.
Line moves across files are automatically tracked.
4. Showing modifications:
git diff <filename(s)>
will show all local modifications in your working directory as unified diff.
5. Inspecting the changelog:
git log <filename(s)>
You may also use the graphical tools like gitview or gitk or the web
interface available at http://source.ffmpeg.org
6. Checking source tree status:
git status
detects all the changes you made and lists what actions will be taken in case
of a commit (additions, modifications, deletions, etc.).
7. Committing:
git diff --check
to double check your changes before committing them to avoid trouble later
on. All experienced developers do this on each and every commit, no matter
how small.
Every one of them has been saved from looking like a fool by this many times.
It's very easy for stray debug output or cosmetic modifications to slip in,
please avoid problems through this extra level of scrutiny.
For cosmetics-only commits you should get (almost) empty output from
git diff -w -b <filename(s)>
Also check the output of
git status
to make sure you don't have untracked files or deletions.
git add [-i|-p|-A] <filenames/dirnames>
Make sure you have told git your name and email address, e.g. by running
git config --global user.name "My Name"
git config --global user.email my@email.invalid
(--global to set the global configuration for all your git checkouts).
Git will select the changes to the files for commit. Optionally you can use
the interactive or the patch mode to select hunk by hunk what should be
added to the commit.
git commit
Git will commit the selected changes to your current local branch.
You will be prompted for a log message in an editor, which is either
set in your personal configuration file through
git config core.editor
or set by one of the following environment variables:
GIT_EDITOR, VISUAL or EDITOR.
Log messages should be concise but descriptive. Explain why you made a change,
what you did will be obvious from the changes themselves most of the time.
Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
levels look at and educate themselves while reading through your code. Don't
include filenames in log messages, Git provides that information.
Possibly make the commit message have a terse, descriptive first line, an
empty line and then a full description. The first line will be used to name
the patch by git format-patch.
8. Renaming/moving/copying files or contents of files:
Git automatically tracks such changes, making those normal commits.
mv/cp path/file otherpath/otherfile
git add [-A] .
git commit
Do not move, rename or copy files of which you are not the maintainer without
discussing it on the mailing list first!
9. Reverting broken commits
git revert <commit>
git revert will generate a revert commit. This will not make the faulty
commit disappear from the history.
git reset <commit>
git reset will uncommit the changes till <commit> rewriting the current
branch history.
git commit --amend
allows to amend the last commit details quickly.
git rebase -i origin/master
will replay local commits over the main repository allowing to edit,
merge or remove some of them in the process.
Note that the reset, commit --amend and rebase rewrite history, so you
should use them ONLY on your local or topic branches.
The main repository will reject those changes.
10. Preparing a patchset.
git format-patch <commit> [-o directory]
will generate a set of patches for each commit between <commit> and
current HEAD. E.g.
git format-patch origin/master
will generate patches for all commits on current branch which are not
present in upstream.
A useful shortcut is also
git format-patch -n
which will generate patches from last n commits.
By default the patches are created in the current directory.
11. Sending patches for review
git send-email <commit list|directory>
will send the patches created by git format-patch or directly generates
them. All the email fields can be configured in the global/local
configuration or overridden by command line.
Note that this tool must often be installed separately (e.g. git-email
package on Debian-based distros).
12. Pushing changes to remote trees
git push
Will push the changes to the default remote (origin).
Git will prevent you from pushing changes if the local and remote trees are
out of sync. Refer to 2 and 2.a to sync the local tree.
git remote add <name> <url>
Will add additional remote with a name reference, it is useful if you want
to push your local branch for review on a remote host.
git push <remote> <refspec>
Will push the changes to the remote repository. Omitting refspec makes git
push update all the remote branches matching the local ones.
13. Finding a specific svn revision
Since version 1.7.1 git supports ':/foo' syntax for specifying commits
based on a regular expression. see man gitrevisions
git show :/'as revision 23456'
will show the svn changeset r23456. With older git versions searching in
the git log output is the easiest option (especially if a pager with
search capabilities is used).
This commit can be checked out with
git checkout -b svn_23456 :/'as revision 23456'
or for git < 1.7.1 with
git checkout -b svn_23456 $SHA1
where $SHA1 is the commit SHA1 from the 'git log' output.
Contact the project admins <root at ffmpeg dot org> if you have technical
problems with the GIT server.

View File

@@ -16,25 +16,7 @@ The libavutil library is a utility library to aid portable
multimedia programming. It contains safe portable string functions,
random number generators, data structures, additional mathematics
functions, cryptography and multimedia related functionality (like
enumerations for pixel and sample formats). It is not a library for
code needed by both libavcodec and libavformat.
The goals for this library is to be:
@table @strong
@item Modular
It should have few interdependencies and the possibility of disabling individual
parts during @command{./configure}.
@item Small
Both sources and objects should be small.
@item Efficient
It should have low CPU and memory usage.
@item Useful
It should avoid useless features that almost no one needs.
@end table
enumerations for pixel and sample formats).
@c man end DESCRIPTION

View File

@@ -23,8 +23,6 @@ A description of some of the currently available muxers follows.
Audio Interchange File Format muxer.
@subsection Options
It accepts the following options:
@table @option
@@ -51,10 +49,6 @@ The output of the muxer consists of a single line of the form:
CRC=0x@var{CRC}, where @var{CRC} is a hexadecimal number 0-padded to
8 digits containing the CRC for all the decoded input frames.
See also the @ref{framecrc} muxer.
@subsection Examples
For example to compute the CRC of the input, and store it in the file
@file{out.crc}:
@example
@@ -74,6 +68,8 @@ and the input video converted to MPEG-2 video, use the command:
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
@end example
See also the @ref{framecrc} muxer.
@anchor{framecrc}
@section framecrc
@@ -93,8 +89,6 @@ packet of the form:
@var{CRC} is a hexadecimal number 0-padded to 8 digits containing the
CRC of the packet.
@subsection Examples
For example to compute the CRC of the audio and video frames in
@file{INPUT}, converted to raw audio and video packets, and store it
in the file @file{out.crc}:
@@ -138,8 +132,6 @@ packet of the form:
@var{MD5} is a hexadecimal number representing the computed MD5 hash
for the packet.
@subsection Examples
For example to compute the MD5 of the audio and video frames in
@file{INPUT}, converted to raw audio and video packets, and store it
in the file @file{out.md5}:
@@ -154,89 +146,30 @@ ffmpeg -i INPUT -f framemd5 -
See also the @ref{md5} muxer.
@anchor{gif}
@section gif
Animated GIF muxer.
It accepts the following options:
@table @option
@item loop
Set the number of times to loop the output. Use @code{-1} for no loop, @code{0}
for looping indefinitely (default).
@item final_delay
Force the delay (expressed in centiseconds) after the last frame. Each frame
ends with a delay until the next frame. The default is @code{-1}, which is a
special value to tell the muxer to re-use the previous delay. In case of a
loop, you might want to customize this value to mark a pause for instance.
@end table
For example, to encode a gif looping 10 times, with a 5 seconds delay between
the loops:
@example
ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif
@end example
Note 1: if you wish to extract the frames in separate GIF files, you need to
force the @ref{image2} muxer:
@example
ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif"
@end example
Note 2: the GIF format has a very small time base: the delay between two frames
can not be smaller than one centi second.
@anchor{hls}
@section hls
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
the HTTP Live Streaming (HLS) specification.
the HTTP Live Streaming specification.
It creates a playlist file and numbered segment files. The output
filename specifies the playlist filename; the segment filenames
receive the same basename as the playlist, a sequential number and
a .ts extension.
For example, to convert an input file with @command{ffmpeg}:
@example
ffmpeg -i in.nut out.m3u8
@end example
See also the @ref{segment} muxer, which provides a more generic and
flexible implementation of a segmenter, and can be used to perform HLS
segmentation.
@subsection Options
This muxer supports the following options:
@table @option
@item hls_time @var{seconds}
Set the segment length in seconds. Default value is 2.
@item hls_list_size @var{size}
Set the maximum number of playlist entries. If set to 0 the list file
will contain all the segments. Default value is 5.
@item hls_wrap @var{wrap}
Set the number after which the segment filename number (the number
specified in each segment file) wraps. If set to 0 the number will be
never wrapped. Default value is 0.
This option is useful to avoid to fill the disk with many segment
files, and limits the maximum number of segment files written to disk
to @var{wrap}.
@item start_number @var{number}
Start the playlist sequence number from @var{number}. Default value is
0.
Note that the playlist sequence number must be unique for each segment
and it is not to be confused with the segment filename sequence number
which can be cyclic, for example if the @option{wrap} option is
specified.
@item -hls_time @var{seconds}
Set the segment length in seconds.
@item -hls_list_size @var{size}
Set the maximum number of playlist entries.
@item -hls_wrap @var{wrap}
Set the number after which index wraps.
@item -start_number @var{number}
Start the sequence from @var{number}.
@end table
@anchor{ico}
@@ -302,8 +235,6 @@ The pattern "img%%-%d.jpg" will specify a sequence of filenames of the
form @file{img%-1.jpg}, @file{img%-2.jpg}, ..., @file{img%-10.jpg},
etc.
@subsection Examples
The following example shows how to use @command{ffmpeg} for creating a
sequence of files @file{img-001.jpeg}, @file{img-002.jpeg}, ...,
taking one image every second from the input video:
@@ -326,32 +257,16 @@ Note also that the pattern must not necessarily contain "%d" or
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
@end example
The @option{strftime} option allows you to expand the filename with
date and time information. Check the documentation of
the @code{strftime()} function for the syntax.
For example to generate image files from the @code{strftime()}
"%Y-%m-%d_%H-%M-%S" pattern, the following @command{ffmpeg} command
can be used:
@example
ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
@end example
@subsection Options
@table @option
@item start_number
Start the sequence from the specified number. Default value is 1. Must
be a non-negative number.
@item start_number @var{number}
Start the sequence from @var{number}. Default value is 1. Must be a
non-negative number.
@item update
If set to 1, the filename will always be interpreted as just a
filename, not a pattern, and the corresponding file will be continuously
overwritten with new images. Default value is 0.
@item -update @var{number}
If @var{number} is nonzero, the filename will always be interpreted as just a
filename, not a pattern, and this file will be continuously overwritten with new
images.
@item strftime
If set to 1, expand the filename with date and time information from
@code{strftime()}. Default value is 0.
@end table
The image muxer supports the .Y.U.V image file format. This format is
@@ -366,27 +281,25 @@ Matroska container muxer.
This muxer implements the matroska and webm container specs.
@subsection Metadata
The recognized metadata settings in this muxer are:
@table @option
@item title
Set title name provided to a single track.
@item language
Specify the language of the track in the Matroska languages form.
@item title=@var{title name}
Name provided to a single track
@end table
The language can be either the 3 letters bibliographic ISO-639-2 (ISO
639-2/B) form (like "fre" for French), or a language code mixed with a
country code for specialities in languages (like "fre-ca" for Canadian
French).
@table @option
@item stereo_mode
Set stereo 3D video layout of two views in a single video track.
@item language=@var{language name}
Specifies the language of the track in the Matroska languages form
@end table
The following values are recognized:
@table @samp
@table @option
@item stereo_mode=@var{mode}
Stereo 3D video layout of two views in a single video track
@table @option
@item mono
video is not stereo
@item left_right
@@ -425,11 +338,10 @@ For example a 3D WebM clip can be created using the following command line:
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
@end example
@subsection Options
This muxer supports the following options:
@table @option
@item reserve_index_space
By default, this muxer writes the index for seeking (called cues in Matroska
terms) at the end of the file, because it cannot know in advance how much space
@@ -444,6 +356,7 @@ for most use cases should be about 50kB per hour of video.
Note that cues are only written if the output is seekable and this option will
have no effect if it is not.
@end table
@anchor{md5}
@@ -473,9 +386,7 @@ ffmpeg -i INPUT -f md5 -
See also the @ref{framemd5} muxer.
@section mov, mp4, ismv
MOV/MP4/ISMV (Smooth Streaming) muxer.
@section MOV/MP4/ISMV
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
file has all the metadata about all packets stored in one location
@@ -491,8 +402,6 @@ very long files (since writing normal MOV/MP4 files stores info about
every single packet in memory until the file is closed). The downside
is that it is less compatible with other applications.
@subsection Options
Fragmentation is enabled by setting one of the AVOptions that define
how to cut the file into fragments:
@@ -549,8 +458,6 @@ as fragmented output, thus it is not enabled by default.
Add RTP hinting tracks to the output file.
@end table
@subsection Example
Smooth Streaming content can be pushed in real time to a publishing
point on IIS with this muxer. Example:
@example
@@ -561,15 +468,12 @@ ffmpeg -re @var{<normal input/transcoding options>} -movflags isml+frag_keyframe
The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and
optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the
@code{id3v2_version} option controls which one is used. Setting
@code{id3v2_version} to 0 will disable the ID3v2 header completely. The legacy
ID3v1 tag is not written by default, but may be enabled with the
@code{write_id3v1} option.
@code{id3v2_version} option controls which one is used. The legacy ID3v1 tag is
not written by default, but may be enabled with the @code{write_id3v1} option.
The muxer may also write a Xing frame at the beginning, which contains the
number of frames in the file. It is useful for computing duration of VBR files.
The Xing frame is written if the output stream is seekable and if the
@code{write_xing} option is set to 1 (the default).
For seekable output the muxer also writes a Xing frame at the beginning, which
contains the number of frames in the file. It is useful for computing duration
of VBR files.
The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures
are supplied to the muxer in form of a video stream with a single packet. There
@@ -596,24 +500,12 @@ ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
-metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
@end example
Write a "clean" MP3 without any extra features:
@example
ffmpeg -i input.wav -write_xing 0 -id3v2_version 0 out.mp3
@end example
@section mpegts
MPEG transport stream muxer.
This muxer implements ISO 13818-1 and part of ETSI EN 300 468.
The recognized metadata settings in mpegts muxer are @code{service_provider}
and @code{service_name}. If they are not set the default for
@code{service_provider} is "FFmpeg" and the default for
@code{service_name} is "Service01".
@subsection Options
The muxer options are:
@table @option
@@ -666,7 +558,10 @@ Reemit PAT/PMT before writing the next packet.
Use LATM packetization for AAC.
@end table
@subsection Example
The recognized metadata settings in mpegts muxer are @code{service_provider}
and @code{service_name}. If they are not set the default for
@code{service_provider} is "FFmpeg" and the default for
@code{service_name} is "Service01".
@example
ffmpeg -i file.mpg -c copy \
@@ -717,12 +612,11 @@ situations, giving a small seek granularity at the cost of additional container
overhead.
@end table
@anchor{segment}
@section segment, stream_segment, ssegment
Basic stream segmenter.
This muxer outputs streams to a number of separate files of nearly
The segmenter muxer outputs streams to a number of separate files of nearly
fixed duration. Output filename pattern can be set in a fashion similar to
@ref{image2}.
@@ -744,14 +638,7 @@ The segment muxer works best with a single constant frame rate video.
Optionally it can generate a list of the created segments, by setting
the option @var{segment_list}. The list type is specified by the
@var{segment_list_type} option. The entry filenames in the segment
list are set by default to the basename of the corresponding segment
files.
See also the @ref{hls} muxer, which provides a more specific
implementation for HLS segmentation.
@subsection Options
@var{segment_list_type} option.
The segment muxer supports the following options:
@@ -783,15 +670,13 @@ Allow caching (only affects M3U8 list files).
Allow live-friendly file generation.
@end table
Default value is @code{samp}.
@item segment_list_size @var{size}
Update the list file so that it contains at most the last @var{size}
segments. If 0 the list file will contain all the segments. Default
value is 0.
@item segment_list_entry_prefix @var{prefix}
Set @var{prefix} to prepend to the name of each entry filename. By
default no prefix is applied.
@item segment_list_type @var{type}
Specify the format for the segment list file.
@@ -863,7 +748,7 @@ In particular may be used in combination with the @file{ffmpeg} option
@var{force_key_frames} may not be set accurately because of rounding
issues, with the consequence that a key frame time may result set just
before the specified time. For constant frame rate videos a value of
1/(2*@var{frame_rate}) should address the worst case mismatch between
1/2*@var{frame_rate} should address the worst case mismatch between
the specified time and the time set by @var{force_key_frames}.
@item segment_times @var{times}
@@ -964,8 +849,8 @@ to feed the same packets to several muxers directly.
The slave outputs are specified in the file name given to the muxer,
separated by '|'. If any of the slave name contains the '|' separator,
leading or trailing spaces or any special character, it must be
escaped (see @ref{quoting_and_escaping,,the "Quoting and escaping"
section in the ffmpeg-utils(1) manual,ffmpeg-utils}).
escaped (see the ``Quoting and escaping'' section in the ffmpeg-utils
manual).
Muxer options can be specified for each slave by prepending them as a list of
@var{key}=@var{value} pairs separated by ':', between square brackets. If
@@ -980,12 +865,9 @@ output name suffix.
@item bsfs[/@var{spec}]
Specify a list of bitstream filters to apply to the specified
output.
It is possible to specify to which streams a given bitstream filter
applies, by appending a stream specifier to the option separated by
@code{/}. @var{spec} must be a stream specifier (see @ref{Format
stream specifiers}). If the stream specifier is not specified, the
output. It is possible to specify to which streams a given bitstream
filter applies, by appending a stream specifier to the option
separated by @code{/}. If the stream specifier is not specified, the
bistream filters will be applied to all streams in the output.
Several bitstream filters can be specified, separated by ",".
@@ -996,8 +878,7 @@ specified by a stream specifier. If not specified, this defaults to
all the input streams.
@end table
@subsection Examples
Some examples follow.
@itemize
@item
Encode something and both archive it in a WebM file and stream it
@@ -1018,15 +899,6 @@ audio packets.
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
-f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
@end example
@item
As below, but select only stream @code{a:1} for the audio output. Note
that a second level escaping must be performed, as ":" is a special
character used to separate options.
@example
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
-f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
@end example
@end itemize
Note: some codecs may need different options depending on the output format;

View File

@@ -22,22 +22,6 @@ A description of the currently available output devices follows.
ALSA (Advanced Linux Sound Architecture) output device.
@subsection Examples
@itemize
@item
Play a file on default ALSA device:
@example
ffmpeg -i INPUT -f alsa default
@end example
@item
Play a file on soundcard 1, audio device 7:
@example
ffmpeg -i INPUT -f alsa hw:1,7
@end example
@end itemize
@section caca
CACA output device.
@@ -120,68 +104,6 @@ ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
@end example
@end itemize
@section decklink
The decklink output device provides playback capabilities for Blackmagic
DeckLink devices.
To enable this output device, you need the Blackmagic DeckLink SDK and you
need to configure with the appropriate @code{--extra-cflags}
and @code{--extra-ldflags}.
On Windows, you need to run the IDL files through @command{widl}.
DeckLink is very picky about the formats it supports. Pixel format is always
uyvy422, framerate and video size must be determined for your device with
@command{-list_formats 1}. Audio sample rate is always 48 kHz.
@subsection Options
@table @option
@item list_devices
If set to @option{true}, print a list of devices and exit.
Defaults to @option{false}.
@item list_formats
If set to @option{true}, print a list of supported formats and exit.
Defaults to @option{false}.
@item preroll
Amount of time to preroll video in seconds.
Defaults to @option{0.5}.
@end table
@subsection Examples
@itemize
@item
List output devices:
@example
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
@end example
@item
List supported formats:
@example
ffmpeg -i test.avi -f decklink -list_formats 1 'DeckLink Mini Monitor'
@end example
@item
Play video clip:
@example
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 'DeckLink Mini Monitor'
@end example
@item
Play video clip with non-standard framerate or video size:
@example
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLink Mini Monitor'
@end example
@end itemize
@section fbdev
Linux framebuffer output device.
@@ -211,41 +133,6 @@ ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
@section opengl
OpenGL output device.
To enable this output device you need to configure FFmpeg with @code{--enable-opengl}.
Device allows to render to OpenGL context.
Context may be provided by application or default SDL window is created.
When device renders to external context, application must implement handlers for following messages:
@code{AV_CTL_MESSAGE_CREATE_WINDOW_BUFFER} - create OpenGL context on current thread.
@code{AV_CTL_MESSAGE_PREPARE_WINDOW_BUFFER} - make OpenGL context current.
@code{AV_CTL_MESSAGE_DISPLAY_WINDOW_BUFFER} - swap buffers.
@code{AV_CTL_MESSAGE_DESTROY_WINDOW_BUFFER} - destroy OpenGL context.
Application is also required to inform a device about current resolution by sending @code{AV_DEVICE_WINDOW_RESIZED} message.
@subsection Options
@table @option
@item background
Set background color. Black is a default.
@item no_window
Disables default SDL window when set to non-zero value.
Application must provide OpenGL context and both @code{window_size_cb} and @code{window_swap_buffers_cb} callbacks when set.
@item window_title
Set the SDL window title, if not specified default to the filename specified for the output device.
Ignored when @option{no_window} is set.
@end table
@subsection Examples
Play a file on SDL window using OpenGL rendering:
@example
ffmpeg -i INPUT -f opengl "window title"
@end example
@section oss
OSS (Open Sound System) output device.
@@ -277,19 +164,6 @@ by default it is set to the specified output name.
Specify the device to use. Default device is used when not provided.
List of output devices can be obtained with command @command{pactl list sinks}.
@item buffer_size
@item buffer_duration
Control the size and duration of the PulseAudio buffer. A small buffer
gives more control, but requires more frequent updates.
@option{buffer_size} specifies size in bytes while
@option{buffer_duration} specifies duration in milliseconds.
When both options are provided then the highest value is used
(duration is recalculated to bytes using stream parameters). If they
are set to 0 (which is default), the device will use the default
PulseAudio duration value. By default PulseAudio set buffer duration
to around 2 seconds.
@end table
@subsection Examples
@@ -332,17 +206,7 @@ downscaled according to the aspect ratio.
@item window_fullscreen
Set fullscreen mode when non-zero value is provided.
Default value is zero.
@end table
@subsection Interactive commands
The window created by the device can be controlled through the
following interactive commands.
@table @key
@item q, ESC
Quit the device immediately.
Zero is a default.
@end table
@subsection Examples

View File

@@ -108,16 +108,14 @@ libavformat) as DLLs.
@section Microsoft Visual C++ or Intel C++ Compiler for Windows
FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility
and wrapper, or with MSVC 2013 and ICL natively.
FFmpeg can be built with MSVC or ICL using a C99-to-C89 conversion utility and
wrapper. For ICL, only the wrapper is used, since ICL supports C99.
You will need the following prerequisites:
@itemize
@item @uref{https://github.com/libav/c99-to-c89/, C99-to-C89 Converter & Wrapper}
(if using MSVC 2012 or earlier)
@item @uref{http://download.videolan.org/pub/contrib/c99-to-c89/, C99-to-C89 Converter & Wrapper}
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
(if using MSVC 2012 or earlier)
@item @uref{http://www.mingw.org/, MSYS}
@item @uref{http://yasm.tortall.net/, YASM}
@item @uref{http://gnuwin32.sourceforge.net/packages/bc.htm, bc for Windows} if
@@ -127,16 +125,14 @@ you want to run @uref{fate.html, FATE}.
To set up a proper environment in MSYS, you need to run @code{msys.bat} from
the Visual Studio or Intel Compiler command prompt.
Place @code{yasm.exe} somewhere in your @code{PATH}. If using MSVC 2012 or
earlier, place @code{c99wrap.exe} and @code{c99conv.exe} somewhere in your
@code{PATH} as well.
Place @code{makedef}, @code{c99wrap.exe}, @code{c99conv.exe}, and @code{yasm.exe}
somewhere in your @code{PATH}.
Next, make sure any other headers and libs you want to use, such as zlib, are
located in a spot that the compiler can see. Do so by modifying the @code{LIB}
and @code{INCLUDE} environment variables to include the @strong{Windows-style}
paths to these directories. Alternatively, you can try and use the
@code{--extra-cflags}/@code{--extra-ldflags} configure options. If using MSVC
2012 or earlier, place @code{inttypes.h} somewhere the compiler can see too.
Next, make sure @code{inttypes.h} and any other headers and libs you want to use
are located in a spot that the compiler can see. Do so by modifying the @code{LIB}
and @code{INCLUDE} environment variables to include the @strong{Windows} paths to
these directories. Alternatively, you can try and use the
@code{--extra-cflags}/@code{--extra-ldflags} configure options.
Finally, run:
@@ -186,9 +182,7 @@ can see.
@itemize
@item Visual Studio 2010 Pro and Express
@item Visual Studio 2012 Pro and Express
@item Visual Studio 2013 Pro and Express
@item Intel Composer XE 2013
@item Intel Composer XE 2013 SP1
@end itemize
Anything else is not officially supported.

View File

@@ -117,19 +117,7 @@ ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP/////////////
File access protocol.
Allow to read from or write to a file.
A file URL can have the form:
@example
file:@var{filename}
@end example
where @var{filename} is the path of the file to read.
An URL that does not have a protocol prefix will be assumed to be a
file URL. Depending on the build, an URL that looks like a Windows
path with the drive letter at the beginning will also be assumed to be
a file URL (usually not the case in builds for unix-like systems).
Allow to read from or read to a file.
For example to read from a file @file{input.mpeg} with @command{ffmpeg}
use the command:
@@ -137,6 +125,10 @@ use the command:
ffmpeg -i file:input.mpeg output.mpeg
@end example
The ff* tools default to the file protocol, that is a resource
specified with the name "FILE.mpeg" is interpreted as the URL
"file:FILE.mpeg".
This protocol accepts the following options:
@table @option
@@ -552,10 +544,6 @@ is not specified.
Truncate existing files on write, if set to 1. A value of 0 prevents
truncating. Default value is 1.
@item private_key
Specify the path of the file containing private key to use during authorization.
By default libssh searches for keys in the @file{~/.ssh/} directory.
@end table
Example: Play a file stored on remote server.
@@ -673,8 +661,6 @@ set to the the local RTP port value plus 1.
@section rtsp
Real-Time Streaming Protocol.
RTSP is not technically a protocol handler in libavformat, it is a demuxer
and muxer. The demuxer supports both normal RTSP (with data transferred
over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
@@ -689,22 +675,14 @@ The required syntax for a RTSP url is:
rtsp://@var{hostname}[:@var{port}]/@var{path}
@end example
Options can be set on the @command{ffmpeg}/@command{ffplay} command
line, or set in code via @code{AVOption}s or in
@code{avformat_open_input}.
The following options (set on the @command{ffmpeg}/@command{ffplay} command
line, or set in code via @code{AVOption}s or in @code{avformat_open_input}),
are supported:
The following options are supported.
Flags for @code{rtsp_transport}:
@table @option
@item initial_pause
Do not start playing the stream immediately if set to 1. Default value
is 0.
@item rtsp_transport
Set RTSP trasport protocols.
It accepts the following values:
@table @samp
@item udp
Use UDP as lower transport protocol.
@@ -722,56 +700,17 @@ passing proxies.
Multiple lower transport protocols may be specified, in that case they are
tried one at a time (if the setup of one fails, the next one is tried).
For the muxer, only the @samp{tcp} and @samp{udp} options are supported.
For the muxer, only the @code{tcp} and @code{udp} options are supported.
@item rtsp_flags
Set RTSP flags.
Flags for @code{rtsp_flags}:
The following values are accepted:
@table @samp
@table @option
@item filter_src
Accept packets only from negotiated peer address and port.
@item listen
Act as a server, listening for an incoming connection.
@end table
Default value is @samp{none}.
@item allowed_media_types
Set media types to accept from the server.
The following flags are accepted:
@table @samp
@item video
@item audio
@item data
@end table
By default it accepts all media types.
@item min_port
Set minimum local UDP port. Default value is 5000.
@item max_port
Set maximum local UDP port. Default value is 65000.
@item timeout
Set maximum timeout (in seconds) to wait for incoming connections.
A value of -1 mean infinite (default). This option implies the
@option{rtsp_flags} set to @samp{listen}.
@item reorder_queue_size
Set number of packets to buffer for handling of reordered packets.
@item stimeout
Set socket TCP I/O timeout in micro seconds.
@item user-agent
Override User-Agent header. If not specified, it default to the
libavformat identifier string.
@end table
When receiving data over UDP, the demuxer tries to reorder received packets
(since they may arrive out of order, or packets may get lost totally). This
can be disabled by setting the maximum demuxing delay to zero (via
@@ -782,36 +721,36 @@ streams to display can be chosen with @code{-vst} @var{n} and
@code{-ast} @var{n} for video and audio respectively, and can be switched
on the fly by pressing @code{v} and @code{a}.
@subsection Examples
Example command lines:
The following examples all make use of the @command{ffplay} and
@command{ffmpeg} tools.
To watch a stream over UDP, with a max reordering delay of 0.5 seconds:
@itemize
@item
Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
@example
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
@end example
@item
Watch a stream tunneled over HTTP:
To watch a stream tunneled over HTTP:
@example
ffplay -rtsp_transport http rtsp://server/video.mp4
@end example
@item
Send a stream in realtime to a RTSP server, for others to watch:
To send a stream in realtime to a RTSP server, for others to watch:
@example
ffmpeg -re -i @var{input} -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
@end example
@item
Receive a stream in realtime:
To receive a stream in realtime:
@example
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp @var{output}
@end example
@end itemize
@table @option
@item stimeout
Socket IO timeout in micro seconds.
@end table
@section sap
@@ -958,32 +897,23 @@ The required syntax for a TCP url is:
tcp://@var{hostname}:@var{port}[?@var{options}]
@end example
@var{options} contains a list of &-separated options of the form
@var{key}=@var{val}.
The list of supported options follows.
@table @option
@item listen=@var{1|0}
Listen for an incoming connection. Default value is 0.
@item listen
Listen for an incoming connection
@item timeout=@var{microseconds}
Set raise error timeout, expressed in microseconds.
In read mode: if no data arrived in more than this time interval, raise error.
In write mode: if socket cannot be written in more than this time interval, raise error.
This also sets timeout on TCP connection establishing.
This option is only relevant in read mode: if no data arrived in more
than this time interval, raise error.
@item listen_timeout=@var{microseconds}
Set listen timeout, expressed in microseconds.
@end table
The following example shows how to setup a listening TCP connection
with @command{ffmpeg}, which is then accessed with @command{ffplay}:
@example
ffmpeg -i @var{input} -f @var{format} tcp://@var{hostname}:@var{port}?listen
ffplay tcp://@var{hostname}:@var{port}
@end example
@end table
@section tls
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
@@ -1049,7 +979,7 @@ ffplay tls://@var{hostname}:@var{port}
User Datagram Protocol.
The required syntax for an UDP URL is:
The required syntax for a UDP url is:
@example
udp://@var{hostname}:@var{port}[?@var{options}]
@end example
@@ -1064,6 +994,7 @@ UDP socket buffer overruns. The @var{fifo_size} and
The list of supported options follows.
@table @option
@item buffer_size=@var{size}
Set the UDP socket buffer size in bytes. This is used both for the
receiving and the sending buffer size.
@@ -1113,34 +1044,25 @@ Survive in case of UDP receiving circular buffer overrun. Default
value is 0.
@item timeout=@var{microseconds}
Set raise error timeout, expressed in microseconds.
This option is only relevant in read mode: if no data arrived in more
than this time interval, raise error.
In read mode: if no data arrived in more than this time interval, raise error.
@end table
@subsection Examples
Some usage examples of the UDP protocol with @command{ffmpeg} follow.
@itemize
@item
Use @command{ffmpeg} to stream over UDP to a remote endpoint:
To stream over UDP to a remote endpoint:
@example
ffmpeg -i @var{input} -f @var{format} udp://@var{hostname}:@var{port}
@end example
@item
Use @command{ffmpeg} to stream in mpegts format over UDP using 188
sized UDP packets, using a large input buffer:
To stream in mpegts format over UDP using 188 sized UDP packets, using a large input buffer:
@example
ffmpeg -i @var{input} -f mpegts udp://@var{hostname}:@var{port}?pkt_size=188&buffer_size=65535
@end example
@item
Use @command{ffmpeg} to receive over UDP from a remote endpoint:
To receive over UDP from a remote endpoint:
@example
ffmpeg -i udp://[@var{multicast-address}]:@var{port} ...
ffmpeg -i udp://[@var{multicast-address}]:@var{port}
@end example
@end itemize
@section unix

24
doc/soc.txt Normal file
View File

@@ -0,0 +1,24 @@
Google Summer of Code and similar project guidelines
Summer of Code is a project by Google in which students are paid to implement
some nice new features for various participating open source projects ...
This text is a collection of things to take care of for the next soc as
it's a little late for this year's soc (2006).
The Goal:
Our goal in respect to soc is and must be of course exactly one thing and
that is to improve FFmpeg, to reach this goal, code must
* conform to the development policy and patch submission guidelines
* must improve FFmpeg somehow (faster, smaller, "better",
more codecs supported, fewer bugs, cleaner, ...)
for mentors and other developers to help students to reach that goal it is
essential that changes to their codebase are publicly visible, clean and
easy reviewable that again leads us to:
* use of a revision control system like git
* separation of cosmetic from non-cosmetic changes (this is almost entirely
ignored by mentors and students in soc 2006 which might lead to a surprise
when the code will be reviewed at the end before a possible inclusion in
FFmpeg, individual changes were generally not reviewable due to cosmetics).
* frequent commits, so that comments can be provided early

9
doc/texi2pod.pl Normal file → Executable file
View File

@@ -1,4 +1,4 @@
#!/usr/bin/env perl
#! /usr/bin/perl
# Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
@@ -327,9 +327,6 @@ die "No filename or title\n" unless defined $fn && defined $tl;
$chapters{NAME} = "$fn \- $tl\n";
$chapters{FOOTNOTES} .= "=back\n" if exists $chapters{FOOTNOTES};
# always use utf8
print "=encoding utf8\n\n";
unshift @chapters_sequence, "NAME";
for $chapter (@chapters_sequence) {
if (exists $chapters{$chapter}) {
@@ -380,8 +377,8 @@ sub postprocess
s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g;
s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g;
s/;\s+\@pxref\{(?:[^\}]*)\}//g;
s/\@ref\{(?:[^,\}]*,)(?:[^,\}]*,)([^,\}]*).*\}/B<$1>/g;
s/\@ref\{([^\}]*)\}/B<$1>/g;
s/\@ref\{(?:[^,\}]*,)(?:[^,\}]*,)([^,\}]*).*\}/$1/g;
s/\@ref\{([^\}]*)\}/$1/g;
s/\@noindent\s*//g;
s/\@refill//g;
s/\@gol//g;

View File

@@ -1,32 +0,0 @@
#! /usr/bin/env perl
# This script will print the dependency of a Texinfo file to stdout.
# texidep.pl <src-path> <input.texi> <output.ext>
use warnings;
use strict;
die unless @ARGV == 3;
my ($src_path, $root, $target) = @ARGV;
sub print_deps {
my ($file, $deps) = @_;
$deps->{$file} = 1;
open(my $fh, "<", "$file") or die "Cannot open file '$file': $!";
while (<$fh>) {
if (my ($i) = /^\@(?:verbatim)?include\s+(\S+)/) {
die "Circular dependency found in file $root\n" if exists $deps->{"doc/$1"};
print "$target: doc/$1\n";
# skip looking for config.texi dependencies, since it has
# none, and is not located in the source tree
if ("$1" ne "config.texi") {
print_deps("$src_path/doc/$1", {%$deps});
}
}
}
}
print_deps($root, {});

View File

@@ -1051,13 +1051,13 @@ See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
Select the index of the platform to run OpenCL code.
The specified index must be one of the indexes in the device list
which can be obtained with @code{ffmpeg -opencl_bench} or @code{av_opencl_get_device_list()}.
which can be obtained with @code{av_opencl_get_device_list()}.
@item device_idx
Select the index of the device used to run OpenCL code.
The specifed index must be one of the indexes in the device list which
can be obtained with @code{ffmpeg -opencl_bench} or @code{av_opencl_get_device_list()}.
can be obtained with @code{av_opencl_get_device_list()}.
@end table

109
doc/viterbi.txt Normal file
View File

@@ -0,0 +1,109 @@
This is a quick description of the viterbi aka dynamic programing
algorthm.
Its reason for existence is that wikipedia has become very poor on
describing algorithms in a way that makes it useable for understanding
them or anything else actually. It tends now to describe the very same
algorithm under 50 different names and pages with few understandable
by even people who fully understand the algorithm and the theory behind.
Problem description: (that is what it can solve)
assume we have a 2d table, or you could call it a graph or matrix if you
prefer
O O O O O O O
O O O O O O O
O O O O O O O
O O O O O O O
That table has edges connecting points from each column to the next column
and each edge has a score like: (only some edge and scores shown to keep it
readable)
O--5--O-----O-----O-----O-----O
2 / 7 / \ / \ / \ /
\ / \ / \ / \ / \ /
O7-/--O--/--O--/--O--/--O--/--O
\/ \/ 1/ \/ \/ \/ \/ \/ \/ \/
/\ /\ 2\ /\ /\ /\ /\ /\ /\ /\
O3-/--O--/--O--/--O--/--O--/--O
/ \ / \ / \ / \ / \
1 \ 9 \ / \ / \ / \
O--2--O--1--O--5--O--3--O--8--O
Our goal is to find a path from left to right through it which
minimizes the sum of the score of all edges.
(and of course left/right is just a convention here it could be top down too)
Similarly the minimum could be the maximum by just fliping the sign,
Example of a path with scores:
O O O O O O O
>---O. O O .O-2-O O O
5. .7 .
O O-1-O O O 8 O O
.
O O O O O O-1-O---> (sum here is 24)
The viterbi algorthm now solves this simply column by column
For the previous column each point has a best path and a associated
score:
O-----5 O
\
\
O \ 1 O
\/
/\
O / 2 O
/
/
O-----2 O
To move one column forward we just need to find the best path and associated
scores for the next column
here are some edges we could choose from:
O-----5--3--O
\ \8
\ \
O \ 1--9--O
\/ \3
/\ \
O / 2--1--O
/ \2
/ \
O-----2--4--O
Finding the new best paths and scores for each point of our new column is
trivial given we know the previous column best paths and scores:
O-----0-----8
\
\
O \ 0----10
\/
/\
O / 0-----3
/ \
/ \
O 0 4
the viterbi algorthm continues exactly like this column for column until the
end and then just picks the path with the best score (above that would be the
one with score 3)
Author: Michael niedermayer
Copyright LGPL

344
ffmpeg.c
View File

@@ -30,8 +30,6 @@
#include <stdlib.h>
#include <errno.h>
#include <limits.h>
#include <stdint.h>
#if HAVE_ISATTY
#if HAVE_IO_H
#include <io.h>
@@ -40,9 +38,9 @@
#include <unistd.h>
#endif
#endif
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
@@ -125,7 +123,6 @@ static int64_t getmaxrss(void);
static int run_as_daemon = 0;
static int64_t video_size = 0;
static int64_t audio_size = 0;
static int64_t data_size = 0;
static int64_t subtitle_size = 0;
static int64_t extra_size = 0;
static int nb_frames_dup = 0;
@@ -314,7 +311,6 @@ void term_exit(void)
static volatile int received_sigterm = 0;
static volatile int received_nb_signals = 0;
static int main_return_code = 0;
static void
sigterm_handler(int sig)
@@ -470,9 +466,7 @@ static void ffmpeg_cleanup(int ret)
bsfc = next;
}
output_streams[i]->bitstream_filters = NULL;
av_frame_free(&output_streams[i]->filtered_frame);
av_parser_close(output_streams[i]->parser);
avcodec_free_frame(&output_streams[i]->filtered_frame);
av_freep(&output_streams[i]->forced_keyframes);
av_expr_free(output_streams[i]->forced_keyframes_pexpr);
@@ -494,7 +488,6 @@ static void ffmpeg_cleanup(int ret)
avsubtitle_free(&input_streams[i]->prev_sub.subtitle);
av_frame_free(&input_streams[i]->sub2video.frame);
av_freep(&input_streams[i]->filters);
av_freep(&input_streams[i]->hwaccel_device);
av_freep(&input_streams[i]);
}
@@ -549,15 +542,6 @@ static void update_benchmark(const char *fmt, ...)
}
}
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
{
int i;
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost2 = output_streams[i];
ost2->finished |= ost == ost2 ? this_stream : others;
}
}
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
{
AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
@@ -658,8 +642,7 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
ret = av_interleaved_write_frame(s, pkt);
if (ret < 0) {
print_error("av_interleaved_write_frame()", ret);
main_return_code = 1;
close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
exit_program(1);
}
}
@@ -667,7 +650,7 @@ static void close_output_stream(OutputStream *ost)
{
OutputFile *of = output_files[ost->file_index];
ost->finished |= ENCODER_FINISHED;
ost->finished = 1;
if (of->shortest) {
int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, AV_TIME_BASE_Q);
of->recording_time = FFMIN(of->recording_time, end);
@@ -707,12 +690,6 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
av_assert0(pkt.size || !pkt.data);
update_benchmark(NULL);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
"frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
enc->time_base.num, enc->time_base.den);
}
if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
exit_program(1);
@@ -840,29 +817,10 @@ static void do_video_out(AVFormatContext *s,
nb_frames = 1;
format_video_sync = video_sync_method;
if (format_video_sync == VSYNC_AUTO) {
if(!strcmp(s->oformat->name, "avi")) {
format_video_sync = VSYNC_VFR;
} else
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
if ( ist
&& format_video_sync == VSYNC_CFR
&& input_files[ist->file_index]->ctx->nb_streams == 1
&& input_files[ist->file_index]->input_ts_offset == 0) {
format_video_sync = VSYNC_VSCFR;
}
if (format_video_sync == VSYNC_CFR && copy_ts) {
format_video_sync = VSYNC_VSCFR;
}
}
if (format_video_sync == VSYNC_AUTO)
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
switch (format_video_sync) {
case VSYNC_VSCFR:
if (ost->frame_number == 0 && delta - duration >= 0.5) {
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
delta = duration;
ost->sync_opts = lrint(sync_ipts);
}
case VSYNC_CFR:
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
if (delta < -1.1)
@@ -987,13 +945,6 @@ static void do_video_out(AVFormatContext *s,
}
update_benchmark(NULL);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
"frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
enc->time_base.num, enc->time_base.den);
}
ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
if (ret < 0) {
@@ -1002,13 +953,6 @@ static void do_video_out(AVFormatContext *s,
}
if (got_packet) {
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
}
if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
pkt.pts = ost->sync_opts;
@@ -1105,21 +1049,19 @@ static int reap_filters(void)
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
OutputFile *of = output_files[ost->file_index];
AVFilterContext *filter;
AVCodecContext *enc = ost->st->codec;
int ret = 0;
if (!ost->filter)
continue;
filter = ost->filter->filter;
if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
return AVERROR(ENOMEM);
}
} else
avcodec_get_frame_defaults(ost->filtered_frame);
filtered_frame = ost->filtered_frame;
while (1) {
ret = av_buffersink_get_frame_flags(filter, filtered_frame,
ret = av_buffersink_get_frame_flags(ost->filter->filter, filtered_frame,
AV_BUFFERSINK_FLAG_NO_REQUEST);
if (ret < 0) {
if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
@@ -1128,38 +1070,32 @@ static int reap_filters(void)
}
break;
}
if (ost->finished) {
av_frame_unref(filtered_frame);
continue;
}
frame_pts = AV_NOPTS_VALUE;
if (filtered_frame->pts != AV_NOPTS_VALUE) {
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
filtered_frame->pts = frame_pts =
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
filtered_frame->pts = frame_pts = av_rescale_q(filtered_frame->pts,
ost->filter->filter->inputs[0]->time_base,
ost->st->codec->time_base) -
av_rescale_q(start_time,
AV_TIME_BASE_Q,
ost->st->codec->time_base);
}
//if (ost->source_index >= 0)
// *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
switch (filter->inputs[0]->type) {
switch (ost->filter->filter->inputs[0]->type) {
case AVMEDIA_TYPE_VIDEO:
filtered_frame->pts = frame_pts;
if (!ost->frame_aspect_ratio.num)
enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s time_base:%d/%d\n",
av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
enc->time_base.num, enc->time_base.den);
}
ost->st->codec->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
do_video_out(of->ctx, ost, filtered_frame);
break;
case AVMEDIA_TYPE_AUDIO:
filtered_frame->pts = frame_pts;
if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
enc->channels != av_frame_get_channels(filtered_frame)) {
if (!(ost->st->codec->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
ost->st->codec->channels != av_frame_get_channels(filtered_frame)) {
av_log(NULL, AV_LOG_ERROR,
"Audio filter graph output is not normalized and encoder does not support parameter changes\n");
break;
@@ -1340,21 +1276,16 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
}
if (is_last_report) {
int64_t raw = audio_size + video_size + data_size + subtitle_size + extra_size;
float percent = 0.0;
if (raw)
percent = 100.0 * (total_size - raw) / raw;
int64_t raw= audio_size + video_size + subtitle_size + extra_size;
av_log(NULL, AV_LOG_INFO, "\n");
av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0f data:%1.0f global headers:%1.0fkB muxing overhead %f%%\n",
av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0f global headers:%1.0fkB muxing overhead %f%%\n",
video_size / 1024.0,
audio_size / 1024.0,
subtitle_size / 1024.0,
data_size / 1024.0,
extra_size / 1024.0,
percent);
if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
100.0 * (total_size - raw) / raw
);
if(video_size + audio_size + subtitle_size + extra_size == 0){
av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
}
}
@@ -1400,7 +1331,6 @@ static void flush_encoders(void)
if (encode) {
AVPacket pkt;
int pkt_size;
int got_packet;
av_init_packet(&pkt);
pkt.data = NULL;
@@ -1413,6 +1343,7 @@ static void flush_encoders(void)
av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
exit_program(1);
}
*size += pkt.size;
if (ost->logfile && enc->stats_out) {
fprintf(ost->logfile, "%s", enc->stats_out);
}
@@ -1420,21 +1351,15 @@ static void flush_encoders(void)
stop_encoding = 1;
break;
}
if (ost->finished & MUXER_FINISHED) {
av_free_packet(&pkt);
continue;
}
*size += pkt.size;
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
if (pkt.duration > 0)
pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
pkt_size = pkt.size;
write_frame(os, &pkt, ost);
if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
do_video_stats(ost, pkt_size);
do_video_stats(ost, pkt.size);
}
}
@@ -1455,9 +1380,6 @@ static int check_output_constraints(InputStream *ist, OutputStream *ost)
if (ost->source_index != ist_index)
return 0;
if (ost->finished)
return 0;
if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
return 0;
@@ -1512,8 +1434,6 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
video_size += pkt->size;
ost->sync_opts++;
} else if (ost->st->codec->codec_type == AVMEDIA_TYPE_DATA) {
data_size += pkt->size;
} else if (ost->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
subtitle_size += pkt->size;
}
@@ -1547,10 +1467,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
&& ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
&& ost->st->codec->codec_id != AV_CODEC_ID_VC1
) {
if (av_parser_change(ost->parser, ost->st->codec,
&opkt.data, &opkt.size,
pkt->data, pkt->size,
pkt->flags & AV_PKT_FLAG_KEY)) {
if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY)) {
opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
if (!opkt.buf)
exit_program(1);
@@ -1559,7 +1476,6 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
opkt.data = pkt->data;
opkt.size = pkt->size;
}
av_copy_packet_side_data(&opkt, pkt);
if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
/* store AVPicture in AVPacket, as expected by the output format */
@@ -1600,7 +1516,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
int i, ret, err = 0, resample_changed;
AVRational decoded_frame_tb;
if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
return AVERROR(ENOMEM);
@@ -1736,6 +1652,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
{
AVFrame *decoded_frame, *f;
void *buffer_to_free = NULL;
int i, ret = 0, err = 0, resample_changed;
int64_t best_effort_timestamp;
AVRational *frame_sample_aspect;
@@ -1770,26 +1687,18 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
if(ist->top_field_first>=0)
decoded_frame->top_field_first = ist->top_field_first;
if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
err = ist->hwaccel_retrieve_data(ist->st->codec, decoded_frame);
if (err < 0)
goto fail;
}
ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
if(best_effort_timestamp != AV_NOPTS_VALUE)
ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
"frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
ist->st->index, av_ts2str(decoded_frame->pts),
av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
best_effort_timestamp,
av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
decoded_frame->key_frame, decoded_frame->pict_type,
ist->st->time_base.num, ist->st->time_base.den);
"frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d \n",
ist->st->index, av_ts2str(decoded_frame->pts),
av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
best_effort_timestamp,
av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
decoded_frame->key_frame, decoded_frame->pict_type);
}
pkt->size = 0;
@@ -1842,9 +1751,9 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
}
}
fail:
av_frame_unref(ist->filter_frame);
av_frame_unref(decoded_frame);
av_free(buffer_to_free);
return err < 0 ? err : ret;
}
@@ -1864,32 +1773,25 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
}
if (ist->fix_sub_duration) {
int end = 1;
if (ist->prev_sub.got_output) {
end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
1000, AV_TIME_BASE);
int end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
1000, AV_TIME_BASE);
if (end < ist->prev_sub.subtitle.end_display_time) {
av_log(ist->st->codec, AV_LOG_DEBUG,
"Subtitle duration reduced from %d to %d%s\n",
ist->prev_sub.subtitle.end_display_time, end,
end <= 0 ? ", dropping it" : "");
"Subtitle duration reduced from %d to %d\n",
ist->prev_sub.subtitle.end_display_time, end);
ist->prev_sub.subtitle.end_display_time = end;
}
}
FFSWAP(int, *got_output, ist->prev_sub.got_output);
FFSWAP(int, ret, ist->prev_sub.ret);
FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
if (end <= 0)
goto out;
}
if (!*got_output)
return ret;
sub2video_update(ist, &subtitle);
if (!subtitle.num_rects)
goto out;
if (!*got_output || !subtitle.num_rects)
return ret;
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
@@ -1900,7 +1802,6 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
}
out:
avsubtitle_free(&subtitle);
return ret;
}
@@ -2062,63 +1963,6 @@ static void print_sdp(void)
av_freep(&avc);
}
static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
{
int i;
for (i = 0; hwaccels[i].name; i++)
if (hwaccels[i].pix_fmt == pix_fmt)
return &hwaccels[i];
return NULL;
}
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
{
InputStream *ist = s->opaque;
const enum AVPixelFormat *p;
int ret;
for (p = pix_fmts; *p != -1; p++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
const HWAccel *hwaccel;
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
break;
hwaccel = get_hwaccel(*p);
if (!hwaccel ||
(ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
(ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
continue;
ret = hwaccel->init(s);
if (ret < 0) {
if (ist->hwaccel_id == hwaccel->id) {
av_log(NULL, AV_LOG_FATAL,
"%s hwaccel requested for input stream #%d:%d, "
"but cannot be initialized.\n", hwaccel->name,
ist->file_index, ist->st->index);
exit_program(1);
}
continue;
}
ist->active_hwaccel_id = hwaccel->id;
ist->hwaccel_pix_fmt = *p;
break;
}
return *p;
}
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
{
InputStream *ist = s->opaque;
if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
return ist->hwaccel_get_buffer(s, frame, flags);
return avcodec_default_get_buffer2(s, frame, flags);
}
static int init_input_stream(int ist_index, char *error, int error_len)
{
int ret;
@@ -2132,23 +1976,21 @@ static int init_input_stream(int ist_index, char *error, int error_len)
return AVERROR(EINVAL);
}
ist->st->codec->opaque = ist;
ist->st->codec->get_format = get_format;
ist->st->codec->get_buffer2 = get_buffer;
ist->st->codec->thread_safe_callbacks = 1;
av_opt_set_int(ist->st->codec, "refcounted_frames", 1, 0);
if (!av_dict_get(ist->opts, "threads", NULL, 0))
av_dict_set(&ist->opts, "threads", "auto", 0);
if ((ret = avcodec_open2(ist->st->codec, codec, &ist->opts)) < 0) {
char errbuf[128];
if (ret == AVERROR_EXPERIMENTAL)
abort_codec_experimental(codec, 0);
av_strerror(ret, errbuf, sizeof(errbuf));
snprintf(error, error_len,
"Error while opening decoder for input stream "
"#%d:%d : %s",
ist->file_index, ist->st->index, av_err2str(ret));
ist->file_index, ist->st->index, errbuf);
return ret;
}
assert_avoptions(ist->opts);
@@ -2156,6 +1998,7 @@ static int init_input_stream(int ist_index, char *error, int error_len)
ist->next_pts = AV_NOPTS_VALUE;
ist->next_dts = AV_NOPTS_VALUE;
ist->is_start = 1;
return 0;
}
@@ -2415,8 +2258,6 @@ static int transcode_init(void)
av_reduce(&codec->time_base.num, &codec->time_base.den,
codec->time_base.num, codec->time_base.den, INT_MAX);
ost->parser = av_parser_init(codec->codec_id);
switch (codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if (audio_volume != 256) {
@@ -2429,7 +2270,6 @@ static int transcode_init(void)
codec->frame_size = icodec->frame_size;
codec->audio_service_type = icodec->audio_service_type;
codec->block_align = icodec->block_align;
codec->delay = icodec->delay;
if((codec->block_align == 1 || codec->block_align == 1152 || codec->block_align == 576) && codec->codec_id == AV_CODEC_ID_MP3)
codec->block_align= 0;
if(codec->codec_id == AV_CODEC_ID_AC3)
@@ -2496,25 +2336,12 @@ static int transcode_init(void)
if (ist && !ost->frame_rate.num)
ost->frame_rate = ist->framerate;
if (ist && !ost->frame_rate.num)
ost->frame_rate = ist->st->r_frame_rate;
if (ist && !ost->frame_rate.num) {
ost->frame_rate = (AVRational){25, 1};
av_log(NULL, AV_LOG_WARNING,
"No information "
"about the input framerate is available. Falling "
"back to a default value of 25fps for output stream #%d:%d. Use the -r option "
"if you want a different framerate.\n",
ost->file_index, ost->index);
}
ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
// ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
ost->frame_rate = ost->enc->supported_framerates[idx];
}
if (codec->codec_id == AV_CODEC_ID_MPEG4) {
av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
ost->frame_rate.num, ost->frame_rate.den, 65535);
}
}
switch (codec->codec_type) {
@@ -2530,7 +2357,7 @@ static int transcode_init(void)
if (ost->filter && !(codec->time_base.num && codec->time_base.den))
codec->time_base = ost->filter->filter->inputs[0]->time_base;
if ( av_q2d(codec->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
&& (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
&& (video_sync_method == VSYNC_CFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
"Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
}
@@ -2620,7 +2447,7 @@ static int transcode_init(void)
codec->stats_in = logbuffer;
}
if (codec->flags & CODEC_FLAG_PASS1) {
f = av_fopen_utf8(logfilename, "wb");
f = fopen(logfilename, "wb");
if (!f) {
av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
logfilename, strerror(errno));
@@ -2670,6 +2497,9 @@ static int transcode_init(void)
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
" It takes bits/s as argument, not kbits/s\n");
extra_size += ost->st->codec->extradata_size;
if (ost->st->codec->me_threshold)
input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
} else {
av_opt_set_dict(ost->st->codec, &ost->opts);
}
@@ -2706,10 +2536,12 @@ static int transcode_init(void)
oc = output_files[i]->ctx;
oc->interrupt_callback = int_cb;
if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
char errbuf[128];
av_strerror(ret, errbuf, sizeof(errbuf));
snprintf(error, sizeof(error),
"Could not write header for output file #%d "
"(incorrect codec parameters ?): %s",
i, av_err2str(ret));
i, errbuf);
ret = AVERROR(EINVAL);
goto dump_format;
}
@@ -2963,15 +2795,11 @@ static void *input_thread(void *arg)
av_dup_packet(&pkt);
av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
pthread_cond_signal(&f->fifo_cond);
pthread_mutex_unlock(&f->fifo_lock);
}
pthread_mutex_lock(&f->fifo_lock);
f->finished = 1;
pthread_cond_signal(&f->fifo_cond);
pthread_mutex_unlock(&f->fifo_lock);
return NULL;
}
@@ -3023,10 +2851,6 @@ static int init_input_threads(void)
if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
return AVERROR(ENOMEM);
if (f->ctx->pb ? !f->ctx->pb->seekable :
strcmp(f->ctx->iformat->name, "lavfi"))
f->non_blocking = 1;
pthread_mutex_init(&f->fifo_lock, NULL);
pthread_cond_init (&f->fifo_cond, NULL);
@@ -3042,22 +2866,14 @@ static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
pthread_mutex_lock(&f->fifo_lock);
while (1) {
if (av_fifo_size(f->fifo)) {
av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
pthread_cond_signal(&f->fifo_cond);
break;
} else {
if (f->finished) {
if (f->finished)
ret = AVERROR_EOF;
break;
}
if (f->non_blocking) {
else
ret = AVERROR(EAGAIN);
break;
}
pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
}
}
pthread_mutex_unlock(&f->fifo_lock);
@@ -3248,21 +3064,22 @@ static int process_input(int file_index)
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
int64_t delta = pkt_dts - ist->next_dts;
if (is->iformat->flags & AVFMT_TS_DISCONT) {
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
(delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
ifile->ts_offset -= delta;
av_log(NULL, AV_LOG_DEBUG,
"timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
delta, ifile->ts_offset);
pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
}
if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
(delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
pkt_dts + AV_TIME_BASE/10 < ist->pts){
ifile->ts_offset -= delta;
av_log(NULL, AV_LOG_DEBUG,
"timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
delta, ifile->ts_offset);
pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
}
} else {
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
(delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
(delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)
) {
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
pkt.dts = AV_NOPTS_VALUE;
}
@@ -3270,7 +3087,8 @@ static int process_input(int file_index)
int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
delta = pkt_pts - ist->next_dts;
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
(delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
(delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE)
) {
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
pkt.pts = AV_NOPTS_VALUE;
}
@@ -3294,8 +3112,10 @@ static int process_input(int file_index)
ret = output_packet(ist, &pkt);
if (ret < 0) {
char buf[128];
av_strerror(ret, buf, sizeof(buf));
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
ist->file_index, ist->st->index, av_err2str(ret));
ist->file_index, ist->st->index, buf);
if (exit_on_error)
exit_program(1);
}
@@ -3488,8 +3308,6 @@ static int transcode(void)
ist = input_streams[i];
if (ist->decoding_needed) {
avcodec_close(ist->st->codec);
if (ist->hwaccel_uninit)
ist->hwaccel_uninit(ist->st->codec);
}
}
@@ -3629,6 +3447,6 @@ int main(int argc, char **argv)
if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
exit_program(69);
exit_program(received_nb_signals ? 255 : main_return_code);
return main_return_code;
exit_program(received_nb_signals ? 255 : 0);
return 0;
}

View File

@@ -51,24 +51,10 @@
#define VSYNC_PASSTHROUGH 0
#define VSYNC_CFR 1
#define VSYNC_VFR 2
#define VSYNC_VSCFR 0xfe
#define VSYNC_DROP 0xff
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
enum HWAccelID {
HWACCEL_NONE = 0,
HWACCEL_AUTO,
HWACCEL_VDPAU,
};
typedef struct HWAccel {
const char *name;
int (*init)(AVCodecContext *s);
enum HWAccelID id;
enum AVPixelFormat pix_fmt;
} HWAccel;
/* select an input stream for an output stream */
typedef struct StreamMap {
int disabled; /* 1 is this mapping is disabled by a negative map */
@@ -113,10 +99,6 @@ typedef struct OptionsContext {
int nb_ts_scale;
SpecifierOpt *dump_attachment;
int nb_dump_attachment;
SpecifierOpt *hwaccels;
int nb_hwaccels;
SpecifierOpt *hwaccel_devices;
int nb_hwaccel_devices;
/* output options */
StreamMap *stream_maps;
@@ -171,8 +153,6 @@ typedef struct OptionsContext {
int nb_intra_matrices;
SpecifierOpt *inter_matrices;
int nb_inter_matrices;
SpecifierOpt *chroma_intra_matrices;
int nb_chroma_intra_matrices;
SpecifierOpt *top_field_first;
int nb_top_field_first;
SpecifierOpt *metadata_map;
@@ -255,6 +235,7 @@ typedef struct InputStream {
int64_t filter_in_rescale_delta_last;
double ts_scale;
int is_start; /* is 1 at the start and after a discontinuity */
int saw_first_ts;
int showed_multi_packet_warning;
AVDictionary *opts;
@@ -293,19 +274,6 @@ typedef struct InputStream {
int nb_filters;
int reinit_filters;
/* hwaccel options */
enum HWAccelID hwaccel_id;
char *hwaccel_device;
/* hwaccel context */
enum HWAccelID active_hwaccel_id;
void *hwaccel_ctx;
void (*hwaccel_uninit)(AVCodecContext *s);
int (*hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags);
int (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame);
enum AVPixelFormat hwaccel_pix_fmt;
enum AVPixelFormat hwaccel_retrieved_pix_fmt;
} InputStream;
typedef struct InputFile {
@@ -313,7 +281,6 @@ typedef struct InputFile {
int eof_reached; /* true if eof reached */
int eagain; /* true if last read attempt returned EAGAIN */
int ist_index; /* index of first stream in input_streams */
int64_t input_ts_offset;
int64_t ts_offset;
int64_t last_ts;
int64_t start_time; /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */
@@ -326,7 +293,6 @@ typedef struct InputFile {
#if HAVE_PTHREADS
pthread_t thread; /* thread reading from this file */
int non_blocking; /* reading packets from the thread should not block */
int finished; /* the thread has exited */
int joined; /* the thread has been joined */
pthread_mutex_t fifo_lock; /* lock for access to fifo */
@@ -346,11 +312,6 @@ enum forced_keyframes_const {
extern const char *const forced_keyframes_const_names[];
typedef enum {
ENCODER_FINISHED = 1,
MUXER_FINISHED = 2,
} OSTFinished ;
typedef struct OutputStream {
int file_index; /* file index */
int index; /* stream index in the output file */
@@ -396,15 +357,13 @@ typedef struct OutputStream {
OutputFilter *filter;
char *avfilter;
char *filters; ///< filtergraph associated to the -filter option
char *filters_script; ///< filtergraph script associated to the -filter_script option
int64_t sws_flags;
AVDictionary *opts;
AVDictionary *swr_opts;
AVDictionary *resample_opts;
char *apad;
OSTFinished finished; /* no more packets should be written for this stream */
int finished; /* no more packets should be written for this stream */
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
int stream_copy;
const char *attachment_filename;
@@ -412,8 +371,6 @@ typedef struct OutputStream {
int copy_prior_start;
int keep_pix_fmt;
AVCodecParserContext *parser;
} OutputStream;
typedef struct OutputFile {
@@ -468,8 +425,6 @@ extern float max_error_rate;
extern const AVIOInterruptCB int_cb;
extern const OptionDef options[];
extern const HWAccel hwaccels[];
void term_init(void);
void term_exit(void);
@@ -493,6 +448,4 @@ FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
int ffmpeg_parse_options(int argc, char **argv);
int vdpau_init(AVCodecContext *s);
#endif /* FFMPEG_H */

View File

@@ -18,8 +18,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "ffmpeg.h"
#include "libavfilter/avfilter.h"
@@ -656,8 +654,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
av_bprintf(&args,
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
"pixel_aspect=%d/%d:sws_param=flags=%d", ist->resample_width,
ist->resample_height,
ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt,
ist->resample_height, ist->resample_pix_fmt,
tb.num, tb.den, sar.num, sar.den,
SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
if (fr.num && fr.den)

View File

@@ -62,14 +62,6 @@
outvar = o->name[i].u.type;\
}\
}
const HWAccel hwaccels[] = {
#if HAVE_VDPAU_X11
{ "vdpau", vdpau_init, HWACCEL_VDPAU, AV_PIX_FMT_VDPAU },
#endif
{ 0 },
};
char *vstats_filename;
float audio_drift_threshold = 0.1;
@@ -559,14 +551,13 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
{
int i;
char *next, *codec_tag = NULL;
for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i];
AVCodecContext *dec = st->codec;
InputStream *ist = av_mallocz(sizeof(*ist));
char *framerate = NULL, *hwaccel = NULL, *hwaccel_device = NULL;
char *codec_tag = NULL;
char *next;
char *framerate = NULL;
if (!ist)
exit_program(1);
@@ -621,41 +612,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
ist->top_field_first = -1;
MATCH_PER_STREAM_OPT(top_field_first, i, ist->top_field_first, ic, st);
MATCH_PER_STREAM_OPT(hwaccels, str, hwaccel, ic, st);
if (hwaccel) {
if (!strcmp(hwaccel, "none"))
ist->hwaccel_id = HWACCEL_NONE;
else if (!strcmp(hwaccel, "auto"))
ist->hwaccel_id = HWACCEL_AUTO;
else {
int i;
for (i = 0; hwaccels[i].name; i++) {
if (!strcmp(hwaccels[i].name, hwaccel)) {
ist->hwaccel_id = hwaccels[i].id;
break;
}
}
if (!ist->hwaccel_id) {
av_log(NULL, AV_LOG_FATAL, "Unrecognized hwaccel: %s.\n",
hwaccel);
av_log(NULL, AV_LOG_FATAL, "Supported hwaccels: ");
for (i = 0; hwaccels[i].name; i++)
av_log(NULL, AV_LOG_FATAL, "%s ", hwaccels[i].name);
av_log(NULL, AV_LOG_FATAL, "\n");
exit_program(1);
}
}
}
MATCH_PER_STREAM_OPT(hwaccel_devices, str, hwaccel_device, ic, st);
if (hwaccel_device) {
ist->hwaccel_device = av_strdup(hwaccel_device);
if (!ist->hwaccel_device)
exit_program(1);
}
ist->hwaccel_pix_fmt = AV_PIX_FMT_NONE;
break;
case AVMEDIA_TYPE_AUDIO:
ist->guess_layout_max = INT_MAX;
@@ -698,9 +654,10 @@ static void assert_file_overwrite(const char *filename)
exit_program(1);
}
if (!file_overwrite) {
const char *proto_name = avio_find_protocol_name(filename);
if (proto_name && !strcmp(proto_name, "file") && avio_check(filename, 0) == 0) {
if (!file_overwrite &&
(strchr(filename, ':') == NULL || filename[1] == ':' ||
av_strstart(filename, "file:", NULL))) {
if (avio_check(filename, 0) == 0) {
if (stdin_interaction && !no_file_overwrite) {
fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
fflush(stderr);
@@ -895,7 +852,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
f->ist_index = nb_input_streams - ic->nb_streams;
f->start_time = o->start_time;
f->recording_time = o->recording_time;
f->input_ts_offset = o->input_ts_offset;
f->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
f->nb_streams = ic->nb_streams;
f->rate_emu = o->rate_emu;
@@ -915,13 +871,8 @@ static int open_input_file(OptionsContext *o, const char *filename)
const AVClass *class = avcodec_get_class();
const AVOption *option = av_opt_find(&class, e->key, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
const AVClass *fclass = avformat_get_class();
const AVOption *foption = av_opt_find(&fclass, e->key, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
if (!option || foption)
if (!option)
continue;
if (!(option->flags & AV_OPT_FLAG_DECODING_PARAM)) {
av_log(NULL, AV_LOG_ERROR, "Codec AVOption %s (%s) specified for "
"input file #%d (%s) is not a decoding option.\n", e->key,
@@ -978,14 +929,14 @@ static uint8_t *get_line(AVIOContext *s)
static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
{
int i, ret = -1;
int i, ret = 1;
char filename[1000];
const char *base[3] = { getenv("AVCONV_DATADIR"),
getenv("HOME"),
AVCONV_DATADIR,
};
for (i = 0; i < FF_ARRAY_ELEMS(base) && ret < 0; i++) {
for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
if (!base[i])
continue;
if (codec_name) {
@@ -993,7 +944,7 @@ static int get_preset_file_2(const char *preset_name, const char *codec_name, AV
i != 1 ? "" : "/.avconv", codec_name, preset_name);
ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
}
if (ret < 0) {
if (ret) {
snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
i != 1 ? "" : "/.avconv", preset_name);
ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
@@ -1200,36 +1151,26 @@ static char *get_ost_filters(OptionsContext *o, AVFormatContext *oc,
OutputStream *ost)
{
AVStream *st = ost->st;
char *filter = NULL, *filter_script = NULL;
if (ost->filters_script && ost->filters) {
MATCH_PER_STREAM_OPT(filter_scripts, str, filter_script, oc, st);
MATCH_PER_STREAM_OPT(filters, str, filter, oc, st);
if (filter_script && filter) {
av_log(NULL, AV_LOG_ERROR, "Both -filter and -filter_script set for "
"output stream #%d:%d.\n", nb_output_files, st->index);
exit_program(1);
}
if (ost->filters_script)
return read_file(ost->filters_script);
else if (ost->filters)
return av_strdup(ost->filters);
if (filter_script)
return read_file(filter_script);
else if (filter)
return av_strdup(filter);
return av_strdup(st->codec->codec_type == AVMEDIA_TYPE_VIDEO ?
"null" : "anull");
}
static void check_streamcopy_filters(OptionsContext *o, AVFormatContext *oc,
const OutputStream *ost, enum AVMediaType type)
{
if (ost->filters_script || ost->filters) {
av_log(NULL, AV_LOG_ERROR,
"%s '%s' was defined for %s output stream %d:%d but codec copy was selected.\n"
"Filtering and streamcopy cannot be used together.\n",
ost->filters ? "Filtergraph" : "Filtergraph script",
ost->filters ? ost->filters : ost->filters_script,
av_get_media_type_string(type), ost->file_index, ost->index);
exit_program(1);
}
}
static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
{
AVStream *st;
@@ -1258,15 +1199,11 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
ost->frame_aspect_ratio = q;
}
MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st);
if (!ost->stream_copy) {
const char *p = NULL;
char *frame_size = NULL;
char *frame_pix_fmt = NULL;
char *intra_matrix = NULL, *inter_matrix = NULL;
char *chroma_intra_matrix = NULL;
int do_pass = 0;
int i;
@@ -1299,16 +1236,6 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
}
parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
}
MATCH_PER_STREAM_OPT(chroma_intra_matrices, str, chroma_intra_matrix, oc, st);
if (chroma_intra_matrix) {
uint16_t *p = av_mallocz(sizeof(*video_enc->chroma_intra_matrix) * 64);
if (!p) {
av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
exit_program(1);
}
av_codec_set_chroma_intra_matrix(video_enc, p);
parse_matrix_coeffs(p, chroma_intra_matrix);
}
MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
if (inter_matrix) {
if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
@@ -1384,9 +1311,6 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
}
if (ost->stream_copy)
check_streamcopy_filters(o, oc, ost, AVMEDIA_TYPE_VIDEO);
return ost;
}
@@ -1403,9 +1327,6 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
audio_enc = st->codec;
audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st);
if (!ost->stream_copy) {
char *sample_fmt = NULL;
@@ -1443,9 +1364,6 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
}
}
if (ost->stream_copy)
check_streamcopy_filters(o, oc, ost, AVMEDIA_TYPE_AUDIO);
return ost;
}
@@ -1630,18 +1548,6 @@ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
exit_program(1);
}
if (ost->avfilter && (ost->filters || ost->filters_script)) {
const char *opt = ost->filters ? "-vf/-af/-filter" : "-filter_script";
av_log(NULL, AV_LOG_ERROR,
"%s '%s' was specified through the %s option "
"for output stream %d:%d, which is fed from a complex filtergraph.\n"
"%s and -filter_complex cannot be used together for the same stream.\n",
ost->filters ? "Filtergraph" : "Filtergraph script",
ost->filters ? ost->filters : ost->filters_script,
opt, ost->file_index, ost->index, opt);
exit_program(1);
}
if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
exit_program(1);
@@ -2149,22 +2055,23 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
opt_video_codec(o, "c:v", "mpeg1video");
opt_audio_codec(o, "c:a", "mp2");
parse_option(o, "f", "vcd", options);
av_dict_set(&o->g->codec_opts, "b:v", arg, 0);
parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
parse_option(o, "r", frame_rates[norm], options);
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", 0);
av_dict_set(&o->g->codec_opts, "b:v", "1150000", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "maxrate", "1150000", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "minrate", "1150000", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "bufsize", "327680", AV_DICT_DONT_OVERWRITE); // 40*1024*8;
av_dict_set(&o->g->codec_opts, "b:v", "1150000", 0);
av_dict_set(&o->g->codec_opts, "maxrate", "1150000", 0);
av_dict_set(&o->g->codec_opts, "minrate", "1150000", 0);
av_dict_set(&o->g->codec_opts, "bufsize", "327680", 0); // 40*1024*8;
av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "b:a", "224000", 0);
parse_option(o, "ar", "44100", options);
parse_option(o, "ac", "2", options);
av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->format_opts, "muxrate", "1411200", AV_DICT_DONT_OVERWRITE); // 2352 * 75 * 8;
av_dict_set(&o->g->format_opts, "packetsize", "2324", 0);
av_dict_set(&o->g->format_opts, "muxrate", "1411200", 0); // 2352 * 75 * 8;
/* We have to offset the PTS, so that it is consistent with the SCR.
SCR starts at 36000, but the first two packs contain only padding
@@ -2181,18 +2088,18 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
parse_option(o, "r", frame_rates[norm], options);
parse_option(o, "pix_fmt", "yuv420p", options);
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", 0);
av_dict_set(&o->g->codec_opts, "b:v", "2040000", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "maxrate", "2516000", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1145000;
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
av_dict_set(&o->g->codec_opts, "scan_offset", "1", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "b:v", "2040000", 0);
av_dict_set(&o->g->codec_opts, "maxrate", "2516000", 0);
av_dict_set(&o->g->codec_opts, "minrate", "0", 0); // 1145000;
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", 0); // 224*1024*8;
av_dict_set(&o->g->codec_opts, "scan_offset", "1", 0);
av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "b:a", "224000", 0);
parse_option(o, "ar", "44100", options);
av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->format_opts, "packetsize", "2324", 0);
} else if (!strcmp(arg, "dvd")) {
@@ -2203,17 +2110,17 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
parse_option(o, "r", frame_rates[norm], options);
parse_option(o, "pix_fmt", "yuv420p", options);
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", 0);
av_dict_set(&o->g->codec_opts, "b:v", "6000000", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "maxrate", "9000000", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1500000;
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
av_dict_set(&o->g->codec_opts, "b:v", "6000000", 0);
av_dict_set(&o->g->codec_opts, "maxrate", "9000000", 0);
av_dict_set(&o->g->codec_opts, "minrate", "0", 0); // 1500000;
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", 0); // 224*1024*8;
av_dict_set(&o->g->format_opts, "packetsize", "2048", AV_DICT_DONT_OVERWRITE); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
av_dict_set(&o->g->format_opts, "muxrate", "10080000", AV_DICT_DONT_OVERWRITE); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
av_dict_set(&o->g->format_opts, "packetsize", "2048", 0); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
av_dict_set(&o->g->format_opts, "muxrate", "10080000", 0); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
av_dict_set(&o->g->codec_opts, "b:a", "448000", AV_DICT_DONT_OVERWRITE);
av_dict_set(&o->g->codec_opts, "b:a", "448000", 0);
parse_option(o, "ar", "48000", options);
} else if (!strncmp(arg, "dv", 2)) {
@@ -2781,7 +2688,7 @@ const OptionDef options[] = {
{ "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
"set the number of frames to record", "number" },
{ "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(codec_tags) },
"force codec tag/fourcc", "fourcc/tag" },
{ "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE |
OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(qscale) },
@@ -2872,9 +2779,6 @@ const OptionDef options[] = {
{ "inter_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
OPT_OUTPUT, { .off = OFFSET(inter_matrices) },
"specify inter matrix coeffs", "matrix" },
{ "chroma_intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
OPT_OUTPUT, { .off = OFFSET(chroma_intra_matrices) },
"specify intra matrix coeffs", "matrix" },
{ "top", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_INT| OPT_SPEC |
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(top_field_first) },
"top=1/bottom=0/auto=-1 field first", "" },
@@ -2896,12 +2800,6 @@ const OptionDef options[] = {
"force key frames at specified timestamps", "timestamps" },
{ "b", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
"video bitrate (please use -b:v)", "bitrate" },
{ "hwaccel", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccels) },
"use HW accelerated decoding", "hwaccel name" },
{ "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
"select a device for HW acceleration" "devicename" },
/* audio options */
{ "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },

View File

@@ -1,335 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <vdpau/vdpau.h>
#include <vdpau/vdpau_x11.h>
#include <X11/Xlib.h>
#include "ffmpeg.h"
#include "libavcodec/vdpau.h"
#include "libavutil/avassert.h"
#include "libavutil/buffer.h"
#include "libavutil/frame.h"
#include "libavutil/pixfmt.h"
typedef struct VDPAUContext {
Display *dpy;
VdpDevice device;
VdpDecoder decoder;
VdpGetProcAddress *get_proc_address;
VdpGetErrorString *get_error_string;
VdpGetInformationString *get_information_string;
VdpDeviceDestroy *device_destroy;
VdpDecoderCreate *decoder_create;
VdpDecoderDestroy *decoder_destroy;
VdpDecoderRender *decoder_render;
VdpVideoSurfaceCreate *video_surface_create;
VdpVideoSurfaceDestroy *video_surface_destroy;
VdpVideoSurfaceGetBitsYCbCr *video_surface_get_bits;
VdpVideoSurfaceGetParameters *video_surface_get_parameters;
VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities *video_surface_query;
AVFrame *tmp_frame;
enum AVPixelFormat pix_fmt;
VdpYCbCrFormat vdpau_format;
} VDPAUContext;
static void vdpau_uninit(AVCodecContext *s)
{
InputStream *ist = s->opaque;
VDPAUContext *ctx = ist->hwaccel_ctx;
ist->hwaccel_uninit = NULL;
ist->hwaccel_get_buffer = NULL;
ist->hwaccel_retrieve_data = NULL;
if (ctx->decoder_destroy)
ctx->decoder_destroy(ctx->decoder);
if (ctx->device_destroy)
ctx->device_destroy(ctx->device);
if (ctx->dpy)
XCloseDisplay(ctx->dpy);
av_frame_free(&ctx->tmp_frame);
av_freep(&ist->hwaccel_ctx);
av_freep(&s->hwaccel_context);
}
static void vdpau_release_buffer(void *opaque, uint8_t *data)
{
VdpVideoSurface surface = *(VdpVideoSurface*)data;
VDPAUContext *ctx = opaque;
ctx->video_surface_destroy(surface);
av_freep(&data);
}
static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
{
InputStream *ist = s->opaque;
VDPAUContext *ctx = ist->hwaccel_ctx;
VdpVideoSurface *surface;
VdpStatus err;
av_assert0(frame->format == AV_PIX_FMT_VDPAU);
surface = av_malloc(sizeof(*surface));
if (!surface)
return AVERROR(ENOMEM);
frame->buf[0] = av_buffer_create((uint8_t*)surface, sizeof(*surface),
vdpau_release_buffer, ctx,
AV_BUFFER_FLAG_READONLY);
if (!frame->buf[0]) {
av_freep(&surface);
return AVERROR(ENOMEM);
}
// properly we should keep a pool of surfaces instead of creating
// them anew for each frame, but since we don't care about speed
// much in this code, we don't bother
err = ctx->video_surface_create(ctx->device, VDP_CHROMA_TYPE_420,
frame->width, frame->height, surface);
if (err != VDP_STATUS_OK) {
av_log(NULL, AV_LOG_ERROR, "Error allocating a VDPAU video surface: %s\n",
ctx->get_error_string(err));
av_buffer_unref(&frame->buf[0]);
return AVERROR_UNKNOWN;
}
frame->data[3] = (uint8_t*)(uintptr_t)*surface;
return 0;
}
static int vdpau_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
VdpVideoSurface surface = (VdpVideoSurface)(uintptr_t)frame->data[3];
InputStream *ist = s->opaque;
VDPAUContext *ctx = ist->hwaccel_ctx;
VdpStatus err;
int ret, chroma_type;
err = ctx->video_surface_get_parameters(surface, &chroma_type,
&ctx->tmp_frame->width,
&ctx->tmp_frame->height);
if (err != VDP_STATUS_OK) {
av_log(NULL, AV_LOG_ERROR, "Error getting surface parameters: %s\n",
ctx->get_error_string(err));
return AVERROR_UNKNOWN;
}
ctx->tmp_frame->format = ctx->pix_fmt;
ret = av_frame_get_buffer(ctx->tmp_frame, 32);
if (ret < 0)
return ret;
ctx->tmp_frame->width = frame->width;
ctx->tmp_frame->height = frame->height;
err = ctx->video_surface_get_bits(surface, ctx->vdpau_format,
(void * const *)ctx->tmp_frame->data,
ctx->tmp_frame->linesize);
if (err != VDP_STATUS_OK) {
av_log(NULL, AV_LOG_ERROR, "Error retrieving frame data from VDPAU: %s\n",
ctx->get_error_string(err));
ret = AVERROR_UNKNOWN;
goto fail;
}
if (ctx->vdpau_format == VDP_YCBCR_FORMAT_YV12)
FFSWAP(uint8_t*, ctx->tmp_frame->data[1], ctx->tmp_frame->data[2]);
ret = av_frame_copy_props(ctx->tmp_frame, frame);
if (ret < 0)
goto fail;
av_frame_unref(frame);
av_frame_move_ref(frame, ctx->tmp_frame);
return 0;
fail:
av_frame_unref(ctx->tmp_frame);
return ret;
}
static const int vdpau_formats[][2] = {
{ VDP_YCBCR_FORMAT_YV12, AV_PIX_FMT_YUV420P },
{ VDP_YCBCR_FORMAT_NV12, AV_PIX_FMT_NV12 },
{ VDP_YCBCR_FORMAT_YUYV, AV_PIX_FMT_YUYV422 },
{ VDP_YCBCR_FORMAT_UYVY, AV_PIX_FMT_UYVY422 },
};
static int vdpau_alloc(AVCodecContext *s)
{
InputStream *ist = s->opaque;
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
AVVDPAUContext *vdpau_ctx;
VDPAUContext *ctx;
const char *display, *vendor;
VdpStatus err;
int i;
ctx = av_mallocz(sizeof(*ctx));
if (!ctx)
return AVERROR(ENOMEM);
ist->hwaccel_ctx = ctx;
ist->hwaccel_uninit = vdpau_uninit;
ist->hwaccel_get_buffer = vdpau_get_buffer;
ist->hwaccel_retrieve_data = vdpau_retrieve_data;
ctx->tmp_frame = av_frame_alloc();
if (!ctx->tmp_frame)
goto fail;
ctx->dpy = XOpenDisplay(ist->hwaccel_device);
if (!ctx->dpy) {
av_log(NULL, loglevel, "Cannot open the X11 display %s.\n",
XDisplayName(ist->hwaccel_device));
goto fail;
}
display = XDisplayString(ctx->dpy);
err = vdp_device_create_x11(ctx->dpy, XDefaultScreen(ctx->dpy), &ctx->device,
&ctx->get_proc_address);
if (err != VDP_STATUS_OK) {
av_log(NULL, loglevel, "VDPAU device creation on X11 display %s failed.\n",
display);
goto fail;
}
#define GET_CALLBACK(id, result) \
do { \
void *tmp; \
err = ctx->get_proc_address(ctx->device, id, &tmp); \
if (err != VDP_STATUS_OK) { \
av_log(NULL, loglevel, "Error getting the " #id " callback.\n"); \
goto fail; \
} \
ctx->result = tmp; \
} while (0)
GET_CALLBACK(VDP_FUNC_ID_GET_ERROR_STRING, get_error_string);
GET_CALLBACK(VDP_FUNC_ID_GET_INFORMATION_STRING, get_information_string);
GET_CALLBACK(VDP_FUNC_ID_DEVICE_DESTROY, device_destroy);
GET_CALLBACK(VDP_FUNC_ID_DECODER_CREATE, decoder_create);
GET_CALLBACK(VDP_FUNC_ID_DECODER_DESTROY, decoder_destroy);
GET_CALLBACK(VDP_FUNC_ID_DECODER_RENDER, decoder_render);
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, video_surface_create);
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, video_surface_destroy);
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, video_surface_get_bits);
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS, video_surface_get_parameters);
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES,
video_surface_query);
for (i = 0; i < FF_ARRAY_ELEMS(vdpau_formats); i++) {
VdpBool supported;
err = ctx->video_surface_query(ctx->device, VDP_CHROMA_TYPE_420,
vdpau_formats[i][0], &supported);
if (err != VDP_STATUS_OK) {
av_log(NULL, loglevel,
"Error querying VDPAU surface capabilities: %s\n",
ctx->get_error_string(err));
goto fail;
}
if (supported)
break;
}
if (i == FF_ARRAY_ELEMS(vdpau_formats)) {
av_log(NULL, loglevel,
"No supported VDPAU format for retrieving the data.\n");
return AVERROR(EINVAL);
}
ctx->vdpau_format = vdpau_formats[i][0];
ctx->pix_fmt = vdpau_formats[i][1];
vdpau_ctx = av_vdpau_alloc_context();
if (!vdpau_ctx)
goto fail;
vdpau_ctx->render = ctx->decoder_render;
s->hwaccel_context = vdpau_ctx;
ctx->get_information_string(&vendor);
av_log(NULL, AV_LOG_VERBOSE, "Using VDPAU -- %s -- on X11 display %s, "
"to decode input stream #%d:%d.\n", vendor,
display, ist->file_index, ist->st->index);
return 0;
fail:
av_log(NULL, loglevel, "VDPAU init failed for stream #%d:%d.\n",
ist->file_index, ist->st->index);
vdpau_uninit(s);
return AVERROR(EINVAL);
}
int vdpau_init(AVCodecContext *s)
{
InputStream *ist = s->opaque;
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
AVVDPAUContext *vdpau_ctx;
VDPAUContext *ctx;
VdpStatus err;
int profile, ret;
if (!ist->hwaccel_ctx) {
ret = vdpau_alloc(s);
if (ret < 0)
return ret;
}
ctx = ist->hwaccel_ctx;
vdpau_ctx = s->hwaccel_context;
ret = av_vdpau_get_profile(s, &profile);
if (ret < 0) {
av_log(NULL, loglevel, "No known VDPAU decoder profile for this stream.\n");
return AVERROR(EINVAL);
}
if (ctx->decoder)
ctx->decoder_destroy(ctx->decoder);
err = ctx->decoder_create(ctx->device, profile,
s->coded_width, s->coded_height,
16, &ctx->decoder);
if (err != VDP_STATUS_OK) {
av_log(NULL, loglevel, "Error creating the VDPAU decoder: %s\n",
ctx->get_error_string(err));
return AVERROR_UNKNOWN;
}
vdpau_ctx->decoder = ctx->decoder;
ist->hwaccel_get_buffer = vdpau_get_buffer;
ist->hwaccel_retrieve_data = vdpau_retrieve_data;
return 0;
}

200
ffplay.c
View File

@@ -28,8 +28,6 @@
#include <math.h>
#include <limits.h>
#include <signal.h>
#include <stdint.h>
#include "libavutil/avstring.h"
#include "libavutil/colorspace.h"
#include "libavutil/mathematics.h"
@@ -123,7 +121,6 @@ typedef struct PacketQueue {
typedef struct VideoPicture {
double pts; // presentation timestamp for this picture
double duration; // estimated duration based on frame rate
int64_t pos; // byte position in file
SDL_Overlay *bmp;
int width, height; /* source height & width */
@@ -145,8 +142,6 @@ typedef struct AudioParams {
int channels;
int64_t channel_layout;
enum AVSampleFormat fmt;
int frame_size;
int bytes_per_sec;
} AudioParams;
typedef struct Clock {
@@ -247,8 +242,13 @@ typedef struct VideoState {
SDL_cond *subpq_cond;
double frame_timer;
double frame_last_pts;
double frame_last_duration;
double frame_last_dropped_pts;
double frame_last_returned_time;
double frame_last_filter_delay;
int64_t frame_last_dropped_pos;
int frame_last_dropped_serial;
int video_stream;
AVStream *video_st;
PacketQueue videoq;
@@ -359,6 +359,8 @@ int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
return 0;
}
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
{
MyAVPacketList *pkt1;
@@ -1066,24 +1068,20 @@ static void sigterm_handler(int sig)
exit(123);
}
static void set_default_window_size(VideoPicture *vp)
{
SDL_Rect rect;
calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
default_width = rect.w;
default_height = rect.h;
}
static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
{
int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
int w,h;
SDL_Rect rect;
if (is_full_screen) flags |= SDL_FULLSCREEN;
else flags |= SDL_RESIZABLE;
if (vp && vp->width)
set_default_window_size(vp);
if (vp && vp->width) {
calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
default_width = rect.w;
default_height = rect.h;
}
if (is_full_screen && fs_screen_width) {
w = fs_screen_width;
@@ -1294,18 +1292,6 @@ static double compute_target_delay(double delay, VideoState *is)
return delay;
}
static double vp_duration(VideoState *is, VideoPicture *vp, VideoPicture *nextvp) {
if (vp->serial == nextvp->serial) {
double duration = nextvp->pts - vp->pts;
if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
return vp->duration;
else
return duration;
} else {
return 0.0;
}
}
static void pictq_next_picture(VideoState *is) {
/* update queue size and signal for next picture */
if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
@@ -1341,12 +1327,14 @@ static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial
set_clock(&is->vidclk, pts, serial);
sync_clock_to_slave(&is->extclk, &is->vidclk);
is->video_current_pos = pos;
is->frame_last_pts = pts;
}
/* called to display each frame */
static void video_refresh(void *opaque, double *remaining_time)
{
VideoState *is = opaque;
VideoPicture *vp;
double time;
SubPicture *sp, *sp2;
@@ -1369,34 +1357,37 @@ static void video_refresh(void *opaque, double *remaining_time)
redisplay = pictq_prev_picture(is);
retry:
if (is->pictq_size == 0) {
SDL_LockMutex(is->pictq_mutex);
if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, is->frame_last_dropped_serial);
is->frame_last_dropped_pts = AV_NOPTS_VALUE;
}
SDL_UnlockMutex(is->pictq_mutex);
// nothing to do, no picture to display in the queue
} else {
double last_duration, duration, delay;
VideoPicture *vp, *lastvp;
/* dequeue the picture */
vp = &is->pictq[is->pictq_rindex];
lastvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
if (vp->serial != is->videoq.serial) {
pictq_next_picture(is);
is->video_current_pos = -1;
redisplay = 0;
goto retry;
}
if (lastvp->serial != vp->serial && !redisplay)
is->frame_timer = av_gettime() / 1000000.0;
if (is->paused)
goto display;
/* compute nominal last_duration */
last_duration = vp_duration(is, lastvp, vp);
last_duration = vp->pts - is->frame_last_pts;
if (!isnan(last_duration) && last_duration > 0 && last_duration < is->max_frame_duration) {
/* if duration of the last frame was sane, update last_duration in video state */
is->frame_last_duration = last_duration;
}
if (redisplay)
delay = 0.0;
else
delay = compute_target_delay(last_duration, is);
delay = compute_target_delay(is->frame_last_duration, is);
time= av_gettime()/1000000.0;
if (time < is->frame_timer + delay && !redisplay) {
@@ -1415,7 +1406,7 @@ retry:
if (is->pictq_size > 1) {
VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
duration = vp_duration(is, vp, nextvp);
duration = nextvp->pts - vp->pts;
if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
if (!redisplay)
is->frame_drops_late++;
@@ -1558,7 +1549,7 @@ static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
}
}
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
{
VideoPicture *vp;
@@ -1608,7 +1599,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double
}
/* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
while (!vp->allocated && !is->abort_request) {
while (!vp->allocated) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
}
@@ -1655,7 +1646,6 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double
SDL_UnlockYUVOverlay(vp->bmp);
vp->pts = pts;
vp->duration = duration;
vp->pos = pos;
vp->serial = serial;
@@ -1678,6 +1668,18 @@ static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *s
if (pkt->data == flush_pkt.data) {
avcodec_flush_buffers(is->video_st->codec);
SDL_LockMutex(is->pictq_mutex);
// Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
while (is->pictq_size && !is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
is->video_current_pos = -1;
is->frame_last_pts = AV_NOPTS_VALUE;
is->frame_last_duration = 0;
is->frame_timer = (double)av_gettime() / 1000000.0;
is->frame_last_dropped_pts = AV_NOPTS_VALUE;
SDL_UnlockMutex(is->pictq_mutex);
return 0;
}
@@ -1705,17 +1707,23 @@ static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *s
frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
if (frame->pts != AV_NOPTS_VALUE) {
double diff = dpts - get_master_clock(is);
if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
diff - is->frame_last_filter_delay < 0 &&
*serial == is->vidclk.serial &&
SDL_LockMutex(is->pictq_mutex);
if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE) {
double clockdiff = get_clock(&is->vidclk) - get_master_clock(is);
double ptsdiff = dpts - is->frame_last_pts;
if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
!isnan(ptsdiff) && ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
clockdiff + ptsdiff - is->frame_last_filter_delay < 0 &&
is->videoq.nb_packets) {
is->frame_last_dropped_pos = av_frame_get_pkt_pos(frame);
is->frame_last_dropped_pts = dpts;
is->frame_last_dropped_serial = *serial;
is->frame_drops_early++;
av_frame_unref(frame);
ret = 0;
}
}
SDL_UnlockMutex(is->pictq_mutex);
}
return ret;
@@ -1727,8 +1735,7 @@ static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *s
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
{
int ret, i;
int nb_filters = graph->nb_filters;
int ret;
AVFilterInOut *outputs = NULL, *inputs = NULL;
if (filtergraph) {
@@ -1756,10 +1763,6 @@ static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
goto fail;
}
/* Reorder the filters to ensure that inputs of the custom filters are merged first */
for (i = 0; i < graph->nb_filters - nb_filters; i++)
FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
ret = avfilter_graph_config(graph, NULL);
fail:
avfilter_inout_free(&outputs);
@@ -1906,11 +1909,8 @@ static int video_thread(void *arg)
VideoState *is = arg;
AVFrame *frame = av_frame_alloc();
double pts;
double duration;
int ret;
int serial = 0;
AVRational tb = is->video_st->time_base;
AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
#if CONFIG_AVFILTER
AVFilterGraph *graph = avfilter_graph_alloc();
@@ -1925,6 +1925,7 @@ static int video_thread(void *arg)
while (is->paused && !is->videoq.abort_request)
SDL_Delay(10);
avcodec_get_frame_defaults(frame);
av_free_packet(&pkt);
ret = get_video_frame(is, frame, &pkt, &serial);
@@ -1951,6 +1952,7 @@ static int video_thread(void *arg)
event.type = FF_QUIT_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
av_free_packet(&pkt);
goto the_end;
}
filt_in = is->in_video_filter;
@@ -1959,12 +1961,14 @@ static int video_thread(void *arg)
last_h = frame->height;
last_format = frame->format;
last_serial = serial;
frame_rate = filt_out->inputs[0]->frame_rate;
}
ret = av_buffersrc_add_frame(filt_in, frame);
if (ret < 0)
goto the_end;
av_frame_unref(frame);
avcodec_get_frame_defaults(frame);
av_free_packet(&pkt);
while (ret >= 0) {
is->frame_last_returned_time = av_gettime() / 1000000.0;
@@ -1980,20 +1984,22 @@ static int video_thread(void *arg)
is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
is->frame_last_filter_delay = 0;
tb = filt_out->inputs[0]->time_base;
#endif
duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), serial);
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(filt_out->inputs[0]->time_base);
ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial);
av_frame_unref(frame);
#if CONFIG_AVFILTER
}
#else
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(is->video_st->time_base);
ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial);
av_frame_unref(frame);
#endif
if (ret < 0)
goto the_end;
}
the_end:
avcodec_flush_buffers(is->video_st->codec);
#if CONFIG_AVFILTER
avfilter_graph_free(&graph);
#endif
@@ -2163,10 +2169,11 @@ static int audio_decode_frame(VideoState *is)
/* NOTE: the audio packet can contain several frames */
while (pkt_temp->stream_index != -1 || is->audio_buf_frames_pending) {
if (!is->frame) {
if (!(is->frame = av_frame_alloc()))
if (!(is->frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
} else {
av_frame_unref(is->frame);
avcodec_get_frame_defaults(is->frame);
}
if (is->audioq.serial != is->audio_pkt_temp_serial)
@@ -2241,6 +2248,7 @@ static int audio_decode_frame(VideoState *is)
if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
return ret;
av_frame_unref(is->frame);
#endif
}
#if CONFIG_AVFILTER
@@ -2377,6 +2385,8 @@ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
VideoState *is = opaque;
int audio_size, len1;
int bytes_per_sec;
int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
audio_callback_time = av_gettime();
@@ -2386,7 +2396,7 @@ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
if (audio_size < 0) {
/* if error, just output silence */
is->audio_buf = is->silence_buf;
is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
} else {
if (is->show_mode != SHOW_MODE_VIDEO)
update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
@@ -2402,10 +2412,11 @@ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
stream += len1;
is->audio_buf_index += len1;
}
bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
/* Let's assume the audio driver that is used by SDL has two periods. */
if (!isnan(is->audio_clock)) {
set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
sync_clock_to_slave(&is->extclk, &is->audclk);
}
}
@@ -2464,12 +2475,6 @@ static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb
audio_hw_params->freq = spec.freq;
audio_hw_params->channel_layout = wanted_channel_layout;
audio_hw_params->channels = spec.channels;
audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
return -1;
}
return spec.size;
}
@@ -2574,7 +2579,7 @@ static int stream_component_open(VideoState *is, int stream_index)
is->audio_diff_avg_count = 0;
/* since we do not have a precise anough audio fifo fullness,
we correct audio sync only if larger than this threshold */
is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec;
is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
@@ -2819,13 +2824,6 @@ static int read_thread(void *arg)
}
is->show_mode = show_mode;
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
AVCodecContext *avctx = st->codec;
VideoPicture vp = {.width = avctx->width, .height = avctx->height, .sar = av_guess_sample_aspect_ratio(ic, st, NULL)};
if (vp.width)
set_default_window_size(&vp);
}
/* open the streams */
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
@@ -2948,8 +2946,6 @@ static int read_thread(void *arg)
packet_queue_put_nullpacket(&is->videoq, is->video_stream);
if (is->audio_stream >= 0)
packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
if (is->subtitle_stream >= 0)
packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
SDL_Delay(10);
eof=0;
continue;
@@ -3170,33 +3166,6 @@ static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
}
}
static void seek_chapter(VideoState *is, int incr)
{
int64_t pos = get_master_clock(is) * AV_TIME_BASE;
int i;
if (!is->ic->nb_chapters)
return;
/* find the current chapter */
for (i = 0; i < is->ic->nb_chapters; i++) {
AVChapter *ch = is->ic->chapters[i];
if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
i--;
break;
}
}
i += incr;
i = FFMAX(i, 0);
if (i >= is->ic->nb_chapters)
return;
av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
AV_TIME_BASE_Q), 0, 0);
}
/* handle an event sent by the GUI */
static void event_loop(VideoState *cur_stream)
{
@@ -3246,19 +3215,11 @@ static void event_loop(VideoState *cur_stream)
toggle_audio_display(cur_stream);
break;
case SDLK_PAGEUP:
if (cur_stream->ic->nb_chapters <= 1) {
incr = 600.0;
goto do_seek;
}
seek_chapter(cur_stream, 1);
break;
incr = 600.0;
goto do_seek;
case SDLK_PAGEDOWN:
if (cur_stream->ic->nb_chapters <= 1) {
incr = -600.0;
goto do_seek;
}
seek_chapter(cur_stream, -1);
break;
incr = -600.0;
goto do_seek;
case SDLK_LEFT:
incr = -10.0;
goto do_seek;
@@ -3591,6 +3552,7 @@ int main(int argc, char **argv)
parse_loglevel(argc, argv, options);
/* register all codecs, demux and protocols */
avcodec_register_all();
#if CONFIG_AVDEVICE
avdevice_register_all();
#endif

402
ffprobe.c
View File

@@ -24,7 +24,7 @@
*/
#include "config.h"
#include "libavutil/ffversion.h"
#include "version.h"
#include <string.h>
@@ -66,12 +66,6 @@ static int do_show_data = 0;
static int do_show_program_version = 0;
static int do_show_library_versions = 0;
static int do_show_chapter_tags = 0;
static int do_show_format_tags = 0;
static int do_show_frame_tags = 0;
static int do_show_program_tags = 0;
static int do_show_stream_tags = 0;
static int show_value_unit = 0;
static int use_value_prefix = 0;
static int use_byte_value_binary_prefix = 0;
@@ -141,7 +135,6 @@ typedef enum {
SECTION_ID_STREAM_DISPOSITION,
SECTION_ID_STREAMS,
SECTION_ID_STREAM_TAGS,
SECTION_ID_SUBTITLE,
} SectionID;
static struct section sections[] = {
@@ -151,7 +144,7 @@ static struct section sections[] = {
[SECTION_ID_ERROR] = { SECTION_ID_ERROR, "error", 0, { -1 } },
[SECTION_ID_FORMAT] = { SECTION_ID_FORMAT, "format", 0, { SECTION_ID_FORMAT_TAGS, -1 } },
[SECTION_ID_FORMAT_TAGS] = { SECTION_ID_FORMAT_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "format_tags" },
[SECTION_ID_FRAMES] = { SECTION_ID_FRAMES, "frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME, SECTION_ID_SUBTITLE, -1 } },
[SECTION_ID_FRAMES] = { SECTION_ID_FRAMES, "frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME, -1 } },
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, -1 } },
[SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" },
[SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } },
@@ -174,7 +167,6 @@ static struct section sections[] = {
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, -1 } },
[SECTION_ID_STREAM_DISPOSITION] = { SECTION_ID_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "stream_disposition" },
[SECTION_ID_STREAM_TAGS] = { SECTION_ID_STREAM_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "stream_tags" },
[SECTION_ID_SUBTITLE] = { SECTION_ID_SUBTITLE, "subtitle", 0, { -1 } },
};
static const OptionDef *options;
@@ -266,13 +258,6 @@ typedef struct WriterContext WriterContext;
#define WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS 1
#define WRITER_FLAG_PUT_PACKETS_AND_FRAMES_IN_SAME_CHAPTER 2
typedef enum {
WRITER_STRING_VALIDATION_FAIL,
WRITER_STRING_VALIDATION_REPLACE,
WRITER_STRING_VALIDATION_IGNORE,
WRITER_STRING_VALIDATION_NB
} StringValidation;
typedef struct Writer {
const AVClass *priv_class; ///< private class of the writer, if any
int priv_size; ///< private size for the writer context
@@ -313,10 +298,6 @@ struct WriterContext {
unsigned int nb_section_packet; ///< number of the packet section in case we are in "packets_and_frames" section
unsigned int nb_section_frame; ///< number of the frame section in case we are in "packets_and_frames" section
unsigned int nb_section_packet_frame; ///< nb_section_packet or nb_section_frame according if is_packets_and_frames
StringValidation string_validation;
char *string_validation_replacement;
unsigned int string_validation_utf8_flags;
};
static const char *writer_get_name(void *p)
@@ -325,35 +306,11 @@ static const char *writer_get_name(void *p)
return wctx->writer->name;
}
#define OFFSET(x) offsetof(WriterContext, x)
static const AVOption writer_options[] = {
{ "string_validation", "set string validation mode",
OFFSET(string_validation), AV_OPT_TYPE_INT, {.i64=WRITER_STRING_VALIDATION_REPLACE}, 0, WRITER_STRING_VALIDATION_NB-1, .unit = "sv" },
{ "sv", "set string validation mode",
OFFSET(string_validation), AV_OPT_TYPE_INT, {.i64=WRITER_STRING_VALIDATION_REPLACE}, 0, WRITER_STRING_VALIDATION_NB-1, .unit = "sv" },
{ "ignore", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = WRITER_STRING_VALIDATION_IGNORE}, .unit = "sv" },
{ "replace", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = WRITER_STRING_VALIDATION_REPLACE}, .unit = "sv" },
{ "fail", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = WRITER_STRING_VALIDATION_FAIL}, .unit = "sv" },
{ "string_validation_replacement", "set string validation replacement string", OFFSET(string_validation_replacement), AV_OPT_TYPE_STRING, {.str=""}},
{ "svr", "set string validation replacement string", OFFSET(string_validation_replacement), AV_OPT_TYPE_STRING, {.str=""}},
{ NULL }
};
static void *writer_child_next(void *obj, void *prev)
{
WriterContext *ctx = obj;
if (!prev && ctx->writer && ctx->writer->priv_class && ctx->priv)
return ctx->priv;
return NULL;
}
static const AVClass writer_class = {
.class_name = "Writer",
.item_name = writer_get_name,
.option = writer_options,
.version = LIBAVUTIL_VERSION_INT,
.child_next = writer_child_next,
"Writer",
writer_get_name,
NULL,
LIBAVUTIL_VERSION_INT,
};
static void writer_close(WriterContext **wctx)
@@ -370,19 +327,9 @@ static void writer_close(WriterContext **wctx)
if ((*wctx)->writer->priv_class)
av_opt_free((*wctx)->priv);
av_freep(&((*wctx)->priv));
av_opt_free(*wctx);
av_freep(wctx);
}
static void bprint_bytes(AVBPrint *bp, const uint8_t *ubuf, size_t ubuf_size)
{
int i;
av_bprintf(bp, "0X");
for (i = 0; i < ubuf_size; i++)
av_bprintf(bp, "%02X", ubuf[i]);
}
static int writer_open(WriterContext **wctx, const Writer *writer, const char *args,
const struct section *sections, int nb_sections)
{
@@ -404,55 +351,14 @@ static int writer_open(WriterContext **wctx, const Writer *writer, const char *a
(*wctx)->sections = sections;
(*wctx)->nb_sections = nb_sections;
av_opt_set_defaults(*wctx);
if (writer->priv_class) {
void *priv_ctx = (*wctx)->priv;
*((const AVClass **)priv_ctx) = writer->priv_class;
av_opt_set_defaults(priv_ctx);
}
/* convert options to dictionary */
if (args) {
AVDictionary *opts = NULL;
AVDictionaryEntry *opt = NULL;
if ((ret = av_dict_parse_string(&opts, args, "=", ":", 0)) < 0) {
av_log(*wctx, AV_LOG_ERROR, "Failed to parse option string '%s' provided to writer context\n", args);
av_dict_free(&opts);
if (args &&
(ret = av_set_options_string(priv_ctx, args, "=", ":")) < 0)
goto fail;
}
while ((opt = av_dict_get(opts, "", opt, AV_DICT_IGNORE_SUFFIX))) {
if ((ret = av_opt_set(*wctx, opt->key, opt->value, AV_OPT_SEARCH_CHILDREN)) < 0) {
av_log(*wctx, AV_LOG_ERROR, "Failed to set option '%s' with value '%s' provided to writer context\n",
opt->key, opt->value);
av_dict_free(&opts);
goto fail;
}
}
av_dict_free(&opts);
}
/* validate replace string */
{
const uint8_t *p = (*wctx)->string_validation_replacement;
const uint8_t *endp = p + strlen(p);
while (*p) {
const uint8_t *p0 = p;
int32_t code;
ret = av_utf8_decode(&code, &p, endp, (*wctx)->string_validation_utf8_flags);
if (ret < 0) {
AVBPrint bp;
av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
bprint_bytes(&bp, p0, p-p0),
av_log(wctx, AV_LOG_ERROR,
"Invalid UTF8 sequence %s found in string validation replace '%s'\n",
bp.str, (*wctx)->string_validation_replacement);
return ret;
}
}
}
for (i = 0; i < SECTION_MAX_NB_LEVELS; i++)
@@ -522,98 +428,18 @@ static inline void writer_print_integer(WriterContext *wctx,
}
}
static inline int validate_string(WriterContext *wctx, char **dstp, const char *src)
{
const uint8_t *p, *endp;
AVBPrint dstbuf;
int invalid_chars_nb = 0, ret = 0;
av_bprint_init(&dstbuf, 0, AV_BPRINT_SIZE_UNLIMITED);
endp = src + strlen(src);
for (p = (uint8_t *)src; *p;) {
uint32_t code;
int invalid = 0;
const uint8_t *p0 = p;
if (av_utf8_decode(&code, &p, endp, wctx->string_validation_utf8_flags) < 0) {
AVBPrint bp;
av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
bprint_bytes(&bp, p0, p-p0);
av_log(wctx, AV_LOG_DEBUG,
"Invalid UTF-8 sequence %s found in string '%s'\n", bp.str, src);
invalid = 1;
}
if (invalid) {
invalid_chars_nb++;
switch (wctx->string_validation) {
case WRITER_STRING_VALIDATION_FAIL:
av_log(wctx, AV_LOG_ERROR,
"Invalid UTF-8 sequence found in string '%s'\n", src);
ret = AVERROR_INVALIDDATA;
goto end;
break;
case WRITER_STRING_VALIDATION_REPLACE:
av_bprintf(&dstbuf, "%s", wctx->string_validation_replacement);
break;
}
}
if (!invalid || wctx->string_validation == WRITER_STRING_VALIDATION_IGNORE)
av_bprint_append_data(&dstbuf, p0, p-p0);
}
if (invalid_chars_nb && wctx->string_validation == WRITER_STRING_VALIDATION_REPLACE) {
av_log(wctx, AV_LOG_WARNING,
"%d invalid UTF-8 sequence(s) found in string '%s', replaced with '%s'\n",
invalid_chars_nb, src, wctx->string_validation_replacement);
}
end:
av_bprint_finalize(&dstbuf, dstp);
return ret;
}
#define PRINT_STRING_OPT 1
#define PRINT_STRING_VALIDATE 2
static inline int writer_print_string(WriterContext *wctx,
const char *key, const char *val, int flags)
static inline void writer_print_string(WriterContext *wctx,
const char *key, const char *val, int opt)
{
const struct section *section = wctx->section[wctx->level];
int ret = 0;
if ((flags & PRINT_STRING_OPT)
&& !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS))
return 0;
if (opt && !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS))
return;
if (section->show_all_entries || av_dict_get(section->entries_to_show, key, NULL, 0)) {
if (flags & PRINT_STRING_VALIDATE) {
char *key1 = NULL, *val1 = NULL;
ret = validate_string(wctx, &key1, key);
if (ret < 0) goto end;
ret = validate_string(wctx, &val1, val);
if (ret < 0) goto end;
wctx->writer->print_string(wctx, key1, val1);
end:
if (ret < 0) {
av_log(wctx, AV_LOG_ERROR,
"Invalid key=value string combination %s=%s in section %s\n",
key, val, section->unique_name);
}
av_free(key1);
av_free(val1);
} else {
wctx->writer->print_string(wctx, key, val);
}
wctx->writer->print_string(wctx, key, val);
wctx->nb_item[wctx->level]++;
}
return ret;
}
static inline void writer_print_rational(WriterContext *wctx,
@@ -631,7 +457,7 @@ static void writer_print_time(WriterContext *wctx, const char *key,
char buf[128];
if ((!is_duration && ts == AV_NOPTS_VALUE) || (is_duration && ts == 0)) {
writer_print_string(wctx, key, "N/A", PRINT_STRING_OPT);
writer_print_string(wctx, key, "N/A", 1);
} else {
double d = ts * av_q2d(*time_base);
struct unit_value uv;
@@ -645,7 +471,7 @@ static void writer_print_time(WriterContext *wctx, const char *key,
static void writer_print_ts(WriterContext *wctx, const char *key, int64_t ts, int is_duration)
{
if ((!is_duration && ts == AV_NOPTS_VALUE) || (is_duration && ts == 0)) {
writer_print_string(wctx, key, "N/A", PRINT_STRING_OPT);
writer_print_string(wctx, key, "N/A", 1);
} else {
writer_print_integer(wctx, key, ts);
}
@@ -714,9 +540,9 @@ static const char *name##_get_name(void *ctx) \
return #name ; \
} \
static const AVClass name##_class = { \
.class_name = #name, \
.item_name = name##_get_name, \
.option = name##_options \
#name, \
name##_get_name, \
name##_options \
}
/* Default output */
@@ -728,7 +554,6 @@ typedef struct DefaultContext {
int nested_section[SECTION_MAX_NB_LEVELS];
} DefaultContext;
#undef OFFSET
#define OFFSET(x) offsetof(DefaultContext, x)
static const AVOption default_options[] = {
@@ -1615,8 +1440,7 @@ static void writer_register_all(void)
#define print_int(k, v) writer_print_integer(w, k, v)
#define print_q(k, v, s) writer_print_rational(w, k, v, s)
#define print_str(k, v) writer_print_string(w, k, v, 0)
#define print_str_opt(k, v) writer_print_string(w, k, v, PRINT_STRING_OPT)
#define print_str_validate(k, v) writer_print_string(w, k, v, PRINT_STRING_VALIDATE)
#define print_str_opt(k, v) writer_print_string(w, k, v, 1)
#define print_time(k, v, tb) writer_print_time(w, k, v, tb, 0)
#define print_ts(k, v) writer_print_ts(w, k, v, 0)
#define print_duration_time(k, v, tb) writer_print_time(w, k, v, tb, 1)
@@ -1631,22 +1455,16 @@ static void writer_register_all(void)
#define print_section_header(s) writer_print_section_header(w, s)
#define print_section_footer(s) writer_print_section_footer(w, s)
static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id)
static inline void show_tags(WriterContext *wctx, AVDictionary *tags, int section_id)
{
AVDictionaryEntry *tag = NULL;
int ret = 0;
if (!tags)
return 0;
writer_print_section_header(w, section_id);
while ((tag = av_dict_get(tags, "", tag, AV_DICT_IGNORE_SUFFIX))) {
if ((ret = print_str_validate(tag->key, tag->value)) < 0)
break;
}
writer_print_section_footer(w);
return ret;
return;
writer_print_section_header(wctx, section_id);
while ((tag = av_dict_get(tags, "", tag, AV_DICT_IGNORE_SUFFIX)))
writer_print_string(wctx, tag->key, tag->value, 0);
writer_print_section_footer(wctx);
}
static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pkt, int packet_idx)
@@ -1684,29 +1502,6 @@ static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pk
fflush(stdout);
}
static void show_subtitle(WriterContext *w, AVSubtitle *sub, AVStream *stream,
AVFormatContext *fmt_ctx)
{
AVBPrint pbuf;
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
writer_print_section_header(w, SECTION_ID_SUBTITLE);
print_str ("media_type", "subtitle");
print_ts ("pts", sub->pts);
print_time("pts_time", sub->pts, &AV_TIME_BASE_Q);
print_int ("format", sub->format);
print_int ("start_display_time", sub->start_display_time);
print_int ("end_display_time", sub->end_display_time);
print_int ("num_rects", sub->num_rects);
writer_print_section_footer(w);
av_bprint_finalize(&pbuf, NULL);
fflush(stdout);
}
static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
AVFormatContext *fmt_ctx)
{
@@ -1725,8 +1520,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
print_time("pkt_pts_time", frame->pkt_pts, &stream->time_base);
print_ts ("pkt_dts", frame->pkt_dts);
print_time("pkt_dts_time", frame->pkt_dts, &stream->time_base);
print_ts ("best_effort_timestamp", av_frame_get_best_effort_timestamp(frame));
print_time("best_effort_timestamp_time", av_frame_get_best_effort_timestamp(frame), &stream->time_base);
print_duration_ts ("pkt_duration", av_frame_get_pkt_duration(frame));
print_duration_time("pkt_duration_time", av_frame_get_pkt_duration(frame), &stream->time_base);
if (av_frame_get_pkt_pos (frame) != -1) print_fmt ("pkt_pos", "%"PRId64, av_frame_get_pkt_pos(frame));
@@ -1772,8 +1565,7 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
print_str_opt("channel_layout", "unknown");
break;
}
if (do_show_frame_tags)
show_tags(w, av_frame_get_metadata(frame), SECTION_ID_FRAME_TAGS);
show_tags(w, av_frame_get_metadata(frame), SECTION_ID_FRAME_TAGS);
writer_print_section_footer(w);
@@ -1786,9 +1578,9 @@ static av_always_inline int process_frame(WriterContext *w,
AVFrame *frame, AVPacket *pkt)
{
AVCodecContext *dec_ctx = fmt_ctx->streams[pkt->stream_index]->codec;
AVSubtitle sub;
int ret = 0, got_frame = 0;
avcodec_get_frame_defaults(frame);
if (dec_ctx->codec) {
switch (dec_ctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
@@ -1798,10 +1590,6 @@ static av_always_inline int process_frame(WriterContext *w,
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, pkt);
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(dec_ctx, &sub, &got_frame, pkt);
break;
}
}
@@ -1811,15 +1599,9 @@ static av_always_inline int process_frame(WriterContext *w,
pkt->data += ret;
pkt->size -= ret;
if (got_frame) {
int is_sub = (dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE);
nb_streams_frames[pkt->stream_index]++;
if (do_show_frames)
if (is_sub)
show_subtitle(w, &sub, fmt_ctx->streams[pkt->stream_index], fmt_ctx);
else
show_frame(w, frame, fmt_ctx->streams[pkt->stream_index], fmt_ctx);
if (is_sub)
avsubtitle_free(&sub);
show_frame(w, frame, fmt_ctx->streams[pkt->stream_index], fmt_ctx);
}
return got_frame;
}
@@ -1852,7 +1634,7 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
const ReadInterval *interval, int64_t *cur_ts)
{
AVPacket pkt, pkt1;
AVFrame *frame = NULL;
AVFrame frame;
int ret = 0, i = 0, frame_count = 0;
int64_t start = -INT64_MAX, end = interval->end;
int has_start = 0, has_end = interval->has_end && !interval->end_is_offset;
@@ -1886,11 +1668,6 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
}
}
frame = av_frame_alloc();
if (!frame) {
ret = AVERROR(ENOMEM);
goto end;
}
while (!av_read_frame(fmt_ctx, &pkt)) {
if (selected_streams[pkt.stream_index]) {
AVRational tb = fmt_ctx->streams[pkt.stream_index]->time_base;
@@ -1923,7 +1700,7 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
}
if (do_read_frames) {
pkt1 = pkt;
while (pkt1.size && process_frame(w, fmt_ctx, frame, &pkt1) > 0);
while (pkt1.size && process_frame(w, fmt_ctx, &frame, &pkt1) > 0);
}
}
av_free_packet(&pkt);
@@ -1935,11 +1712,10 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
for (i = 0; i < fmt_ctx->nb_streams; i++) {
pkt.stream_index = i;
if (do_read_frames)
while (process_frame(w, fmt_ctx, frame, &pkt) > 0);
while (process_frame(w, fmt_ctx, &frame, &pkt) > 0);
}
end:
av_frame_free(&frame);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not read packets in interval ");
log_read_interval(interval, NULL, AV_LOG_ERROR);
@@ -1947,7 +1723,7 @@ end:
return ret;
}
static int read_packets(WriterContext *w, AVFormatContext *fmt_ctx)
static void read_packets(WriterContext *w, AVFormatContext *fmt_ctx)
{
int i, ret = 0;
int64_t cur_ts = fmt_ctx->start_time;
@@ -1962,11 +1738,9 @@ static int read_packets(WriterContext *w, AVFormatContext *fmt_ctx)
break;
}
}
return ret;
}
static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_idx, int in_program)
static void show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_idx, int in_program)
{
AVStream *stream = fmt_ctx->streams[stream_idx];
AVCodecContext *dec_ctx;
@@ -1975,7 +1749,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
const char *s;
AVRational sar, dar;
AVBPrint pbuf;
int ret = 0;
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
@@ -2130,35 +1903,26 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
writer_print_section_footer(w);
}
if (do_show_stream_tags)
ret = show_tags(w, stream->metadata, in_program ? SECTION_ID_PROGRAM_STREAM_TAGS : SECTION_ID_STREAM_TAGS);
show_tags(w, stream->metadata, in_program ? SECTION_ID_PROGRAM_STREAM_TAGS : SECTION_ID_STREAM_TAGS);
writer_print_section_footer(w);
av_bprint_finalize(&pbuf, NULL);
fflush(stdout);
return ret;
}
static int show_streams(WriterContext *w, AVFormatContext *fmt_ctx)
static void show_streams(WriterContext *w, AVFormatContext *fmt_ctx)
{
int i, ret = 0;
int i;
writer_print_section_header(w, SECTION_ID_STREAMS);
for (i = 0; i < fmt_ctx->nb_streams; i++)
if (selected_streams[i]) {
ret = show_stream(w, fmt_ctx, i, 0);
if (ret < 0)
break;
}
if (selected_streams[i])
show_stream(w, fmt_ctx, i, 0);
writer_print_section_footer(w);
return ret;
}
static int show_program(WriterContext *w, AVFormatContext *fmt_ctx, AVProgram *program)
static void show_program(WriterContext *w, AVFormatContext *fmt_ctx, AVProgram *program)
{
int i, ret = 0;
int i;
writer_print_section_header(w, SECTION_ID_PROGRAM);
print_int("program_id", program->id);
@@ -2170,46 +1934,35 @@ static int show_program(WriterContext *w, AVFormatContext *fmt_ctx, AVProgram *p
print_time("start_time", program->start_time, &AV_TIME_BASE_Q);
print_ts("end_pts", program->end_time);
print_time("end_time", program->end_time, &AV_TIME_BASE_Q);
if (do_show_program_tags)
ret = show_tags(w, program->metadata, SECTION_ID_PROGRAM_TAGS);
if (ret < 0)
goto end;
show_tags(w, program->metadata, SECTION_ID_PROGRAM_TAGS);
writer_print_section_header(w, SECTION_ID_PROGRAM_STREAMS);
for (i = 0; i < program->nb_stream_indexes; i++) {
if (selected_streams[program->stream_index[i]]) {
ret = show_stream(w, fmt_ctx, program->stream_index[i], 1);
if (ret < 0)
break;
}
if (selected_streams[program->stream_index[i]])
show_stream(w, fmt_ctx, program->stream_index[i], 1);
}
writer_print_section_footer(w);
end:
writer_print_section_footer(w);
return ret;
}
static int show_programs(WriterContext *w, AVFormatContext *fmt_ctx)
static void show_programs(WriterContext *w, AVFormatContext *fmt_ctx)
{
int i, ret = 0;
int i;
writer_print_section_header(w, SECTION_ID_PROGRAMS);
for (i = 0; i < fmt_ctx->nb_programs; i++) {
AVProgram *program = fmt_ctx->programs[i];
if (!program)
continue;
ret = show_program(w, fmt_ctx, program);
if (ret < 0)
break;
show_program(w, fmt_ctx, program);
}
writer_print_section_footer(w);
return ret;
}
static int show_chapters(WriterContext *w, AVFormatContext *fmt_ctx)
static void show_chapters(WriterContext *w, AVFormatContext *fmt_ctx)
{
int i, ret = 0;
int i;
writer_print_section_header(w, SECTION_ID_CHAPTERS);
for (i = 0; i < fmt_ctx->nb_chapters; i++) {
@@ -2222,23 +1975,19 @@ static int show_chapters(WriterContext *w, AVFormatContext *fmt_ctx)
print_time("start_time", chapter->start, &chapter->time_base);
print_int("end", chapter->end);
print_time("end_time", chapter->end, &chapter->time_base);
if (do_show_chapter_tags)
ret = show_tags(w, chapter->metadata, SECTION_ID_CHAPTER_TAGS);
show_tags(w, chapter->metadata, SECTION_ID_CHAPTER_TAGS);
writer_print_section_footer(w);
}
writer_print_section_footer(w);
return ret;
}
static int show_format(WriterContext *w, AVFormatContext *fmt_ctx)
static void show_format(WriterContext *w, AVFormatContext *fmt_ctx)
{
char val_str[128];
int64_t size = fmt_ctx->pb ? avio_size(fmt_ctx->pb) : -1;
int ret = 0;
writer_print_section_header(w, SECTION_ID_FORMAT);
print_str_validate("filename", fmt_ctx->filename);
print_str("filename", fmt_ctx->filename);
print_int("nb_streams", fmt_ctx->nb_streams);
print_int("nb_programs", fmt_ctx->nb_programs);
print_str("format_name", fmt_ctx->iformat->name);
@@ -2253,12 +2002,10 @@ static int show_format(WriterContext *w, AVFormatContext *fmt_ctx)
if (fmt_ctx->bit_rate > 0) print_val ("bit_rate", fmt_ctx->bit_rate, unit_bit_per_second_str);
else print_str_opt("bit_rate", "N/A");
print_int("probe_score", av_format_get_probe_score(fmt_ctx));
if (do_show_format_tags)
ret = show_tags(w, fmt_ctx->metadata, SECTION_ID_FORMAT_TAGS);
show_tags(w, fmt_ctx->metadata, SECTION_ID_FORMAT_TAGS);
writer_print_section_footer(w);
fflush(stdout);
return ret;
}
static void show_error(WriterContext *w, int err)
@@ -2364,8 +2111,6 @@ static int probe_file(WriterContext *wctx, const char *filename)
if (ret < 0)
return ret;
#define CHECK_END if (ret < 0) goto end
nb_streams_frames = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_frames));
nb_streams_packets = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_packets));
selected_streams = av_calloc(fmt_ctx->nb_streams, sizeof(*selected_streams));
@@ -2375,7 +2120,8 @@ static int probe_file(WriterContext *wctx, const char *filename)
ret = avformat_match_stream_specifier(fmt_ctx,
fmt_ctx->streams[i],
stream_specifier);
CHECK_END;
if (ret < 0)
goto end;
else
selected_streams[i] = ret;
ret = 0;
@@ -2394,29 +2140,18 @@ static int probe_file(WriterContext *wctx, const char *filename)
section_id = SECTION_ID_FRAMES;
if (do_show_frames || do_show_packets)
writer_print_section_header(wctx, section_id);
ret = read_packets(wctx, fmt_ctx);
read_packets(wctx, fmt_ctx);
if (do_show_frames || do_show_packets)
writer_print_section_footer(wctx);
CHECK_END;
}
if (do_show_programs) {
ret = show_programs(wctx, fmt_ctx);
CHECK_END;
}
if (do_show_streams) {
ret = show_streams(wctx, fmt_ctx);
CHECK_END;
}
if (do_show_chapters) {
ret = show_chapters(wctx, fmt_ctx);
CHECK_END;
}
if (do_show_format) {
ret = show_format(wctx, fmt_ctx);
CHECK_END;
}
if (do_show_programs)
show_programs(wctx, fmt_ctx);
if (do_show_streams)
show_streams(wctx, fmt_ctx);
if (do_show_chapters)
show_chapters(wctx, fmt_ctx);
if (do_show_format)
show_format(wctx, fmt_ctx);
end:
close_input_file(&fmt_ctx);
@@ -2442,7 +2177,7 @@ static void ffprobe_show_program_version(WriterContext *w)
writer_print_section_header(w, SECTION_ID_PROGRAM_VERSION);
print_str("version", FFMPEG_VERSION);
print_fmt("copyright", "Copyright (c) %d-%d the FFmpeg developers",
program_birth_year, CONFIG_THIS_YEAR);
program_birth_year, this_year);
print_str("build_date", __DATE__);
print_str("build_time", __TIME__);
print_str("compiler_ident", CC_IDENT);
@@ -2913,12 +2648,6 @@ int main(int argc, char **argv)
SET_DO_SHOW(STREAM_DISPOSITION, stream_disposition);
SET_DO_SHOW(PROGRAM_STREAM_DISPOSITION, stream_disposition);
SET_DO_SHOW(CHAPTER_TAGS, chapter_tags);
SET_DO_SHOW(FORMAT_TAGS, format_tags);
SET_DO_SHOW(FRAME_TAGS, frame_tags);
SET_DO_SHOW(PROGRAM_TAGS, program_tags);
SET_DO_SHOW(STREAM_TAGS, stream_tags);
if (do_bitexact && (do_show_program_version || do_show_library_versions)) {
av_log(NULL, AV_LOG_ERROR,
"-bitexact and -show_program_version or -show_library_versions "
@@ -2947,9 +2676,6 @@ int main(int argc, char **argv)
if ((ret = writer_open(&wctx, w, w_args,
sections, FF_ARRAY_ELEMS(sections))) >= 0) {
if (w == &xml_writer)
wctx->string_validation_utf8_flags |= AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES;
writer_print_section_header(wctx, SECTION_ID_ROOT);
if (do_show_program_version)

View File

@@ -216,7 +216,6 @@ typedef struct FFStream {
struct FFStream *feed; /* feed we are using (can be null if
coming from file) */
AVDictionary *in_opts; /* input parameters */
AVDictionary *metadata; /* metadata to set on the stream */
AVInputFormat *ifmt; /* if non NULL, force input format */
AVOutputFormat *fmt;
IPAddressACL *acl;
@@ -229,6 +228,10 @@ typedef struct FFStream {
int feed_streams[MAX_STREAMS]; /* index of streams in the feed */
char feed_filename[1024]; /* file name of the feed storage, or
input file name for a stream */
char author[512];
char title[512];
char copyright[512];
char comment[512];
pid_t pid; /* Of ffmpeg process */
time_t pid_start; /* Of ffmpeg process */
char **child_argv;
@@ -287,7 +290,8 @@ static void rtsp_cmd_describe(HTTPContext *c, const char *url);
static void rtsp_cmd_options(HTTPContext *c, const char *url);
static void rtsp_cmd_setup(HTTPContext *c, const char *url, RTSPMessageHeader *h);
static void rtsp_cmd_play(HTTPContext *c, const char *url, RTSPMessageHeader *h);
static void rtsp_cmd_interrupt(HTTPContext *c, const char *url, RTSPMessageHeader *h, int pause_only);
static void rtsp_cmd_pause(HTTPContext *c, const char *url, RTSPMessageHeader *h);
static void rtsp_cmd_teardown(HTTPContext *c, const char *url, RTSPMessageHeader *h);
/* SDP handling */
static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
@@ -505,9 +509,8 @@ static void start_children(FFStream *feed)
char *slash;
int i;
/* replace "ffserver" with "ffmpeg" in the path of current
* program. Ignore user provided path */
av_strlcpy(pathname, my_program_name, sizeof(pathname));
slash = strrchr(pathname, '/');
if (!slash)
slash = pathname;
@@ -627,16 +630,17 @@ static void start_multicast(void)
}
}
/* change state to send data */
rtp_c->state = HTTPSTATE_SEND_DATA;
}
}
}
/* main loop of the HTTP server */
/* main loop of the http server */
static int http_server(void)
{
int server_fd = 0, rtsp_server_fd = 0;
int ret, delay;
int ret, delay, delay1;
struct pollfd *poll_table, *poll_entry;
HTTPContext *c, *c_next;
@@ -700,19 +704,18 @@ static int http_server(void)
case HTTPSTATE_SEND_DATA:
case HTTPSTATE_SEND_DATA_TRAILER:
if (!c->is_packetized) {
/* for TCP, we output as much as we can
* (may need to put a limit) */
/* for TCP, we output as much as we can (may need to put a limit) */
c->poll_entry = poll_entry;
poll_entry->fd = fd;
poll_entry->events = POLLOUT;
poll_entry++;
} else {
/* when ffserver is doing the timing, we work by
looking at which packet needs to be sent every
looking at which packet need to be sent every
10 ms */
/* one tick wait XXX: 10 ms assumed */
if (delay > 10)
delay = 10;
delay1 = 10; /* one tick wait XXX: 10 ms assumed */
if (delay1 < delay)
delay = delay1;
}
break;
case HTTPSTATE_WAIT_REQUEST:
@@ -752,8 +755,8 @@ static int http_server(void)
for(c = first_http_ctx; c != NULL; c = c_next) {
c_next = c->next;
if (handle_connection(c) < 0) {
log_connection(c);
/* close and free the connection */
log_connection(c);
close_connection(c);
}
}
@@ -1002,7 +1005,9 @@ static int handle_connection(HTTPContext *c)
if (len < 0) {
if (ff_neterrno() != AVERROR(EAGAIN) &&
ff_neterrno() != AVERROR(EINTR)) {
goto close_connection;
/* error : close connection */
av_freep(&c->pb_buffer);
return -1;
}
} else {
c->buffer_ptr += len;
@@ -1014,8 +1019,7 @@ static int handle_connection(HTTPContext *c)
/* if error, exit */
if (c->http_error)
return -1;
/* all the buffer was sent : synchronize to the incoming
* stream */
/* all the buffer was sent : synchronize to the incoming stream */
c->state = HTTPSTATE_SEND_DATA_HEADER;
c->buffer_ptr = c->buffer_end = c->buffer;
}
@@ -1026,7 +1030,7 @@ static int handle_connection(HTTPContext *c)
case HTTPSTATE_SEND_DATA_HEADER:
case HTTPSTATE_SEND_DATA_TRAILER:
/* for packetized output, we consider we can always write (the
input streams set the speed). It may be better to verify
input streams sets the speed). It may be better to verify
that we do not rely too much on the kernel queues */
if (!c->is_packetized) {
if (c->poll_entry->revents & (POLLERR | POLLHUP))
@@ -1060,8 +1064,10 @@ static int handle_connection(HTTPContext *c)
break;
case RTSPSTATE_SEND_REPLY:
if (c->poll_entry->revents & (POLLERR | POLLHUP))
goto close_connection;
if (c->poll_entry->revents & (POLLERR | POLLHUP)) {
av_freep(&c->pb_buffer);
return -1;
}
/* no need to write if no events */
if (!(c->poll_entry->revents & POLLOUT))
return 0;
@@ -1069,7 +1075,9 @@ static int handle_connection(HTTPContext *c)
if (len < 0) {
if (ff_neterrno() != AVERROR(EAGAIN) &&
ff_neterrno() != AVERROR(EINTR)) {
goto close_connection;
/* error : close connection */
av_freep(&c->pb_buffer);
return -1;
}
} else {
c->buffer_ptr += len;
@@ -1114,10 +1122,6 @@ static int handle_connection(HTTPContext *c)
return -1;
}
return 0;
close_connection:
av_freep(&c->pb_buffer);
return -1;
}
static int extract_rates(char *rates, int ratelen, const char *request)
@@ -1456,7 +1460,7 @@ static int validate_acl(FFStream *stream, HTTPContext *c)
}
/* compute the real filename of a file by matching it without its
extensions to all the stream's filenames */
extensions to all the stream filenames */
static void compute_real_filename(char *filename, int max_size)
{
char file1[1024];
@@ -1490,7 +1494,7 @@ enum RedirType {
REDIR_SDP,
};
/* parse HTTP request and prepare header */
/* parse http request and prepare header */
static int http_parse_request(HTTPContext *c)
{
const char *p;
@@ -1861,7 +1865,7 @@ static int http_parse_request(HTTPContext *c)
goto send_error;
}
/* prepare HTTP header */
/* prepare http header */
c->buffer[0] = 0;
av_strlcatf(c->buffer, c->buffer_size, "HTTP/1.0 200 OK\r\n");
mime_type = c->stream->fmt->mime_type;
@@ -1940,7 +1944,7 @@ static void compute_status(HTTPContext *c)
}
avio_printf(pb, "HTTP/1.0 200 OK\r\n");
avio_printf(pb, "Content-type: text/html\r\n");
avio_printf(pb, "Content-type: %s\r\n", "text/html");
avio_printf(pb, "Pragma: no-cache\r\n");
avio_printf(pb, "\r\n");
@@ -2179,10 +2183,8 @@ static int open_input_stream(HTTPContext *c, const char *info)
buf_size = FFM_PACKET_SIZE;
/* compute position (absolute time) */
if (av_find_info_tag(buf, sizeof(buf), "date", info)) {
if ((ret = av_parse_time(&stream_pos, buf, 0)) < 0) {
http_log("Invalid date specification '%s' for stream\n", buf);
if ((ret = av_parse_time(&stream_pos, buf, 0)) < 0)
return ret;
}
} else if (av_find_info_tag(buf, sizeof(buf), "buffer", info)) {
int prebuffer = strtol(buf, 0, 10);
stream_pos = av_gettime() - prebuffer * (int64_t)1000000;
@@ -2193,22 +2195,18 @@ static int open_input_stream(HTTPContext *c, const char *info)
buf_size = 0;
/* compute position (relative time) */
if (av_find_info_tag(buf, sizeof(buf), "date", info)) {
if ((ret = av_parse_time(&stream_pos, buf, 1)) < 0) {
http_log("Invalid date specification '%s' for stream\n", buf);
if ((ret = av_parse_time(&stream_pos, buf, 1)) < 0)
return ret;
}
} else
stream_pos = 0;
}
if (!input_filename[0]) {
http_log("No filename was specified for stream\n");
return AVERROR(EINVAL);
}
if (input_filename[0] == '\0')
return -1;
/* open stream */
if ((ret = avformat_open_input(&s, input_filename, c->stream->ifmt, &c->stream->in_opts)) < 0) {
http_log("Could not open input '%s': %s\n", input_filename, av_err2str(ret));
return ret;
http_log("could not open %s: %d\n", input_filename, ret);
return -1;
}
/* set buffer size */
@@ -2216,15 +2214,14 @@ static int open_input_stream(HTTPContext *c, const char *info)
s->flags |= AVFMT_FLAG_GENPTS;
c->fmt_in = s;
if (strcmp(s->iformat->name, "ffm") &&
(ret = avformat_find_stream_info(c->fmt_in, NULL)) < 0) {
http_log("Could not find stream info for input '%s'\n", input_filename);
if (strcmp(s->iformat->name, "ffm") && avformat_find_stream_info(c->fmt_in, NULL) < 0) {
http_log("Could not find stream info '%s'\n", input_filename);
avformat_close_input(&s);
return ret;
return -1;
}
/* choose stream as clock source (we favor the video stream if
* present) for packet sending */
/* choose stream as clock source (we favorize video stream if
present) for packet sending */
c->pts_stream_index = 0;
for(i=0;i<c->stream->nb_streams;i++) {
if (c->pts_stream_index == 0 &&
@@ -2273,10 +2270,12 @@ static int http_prepare_data(HTTPContext *c)
av_freep(&c->pb_buffer);
switch(c->state) {
case HTTPSTATE_SEND_DATA_HEADER:
ctx = avformat_alloc_context();
c->fmt_ctx = *ctx;
av_freep(&ctx);
av_dict_copy(&(c->fmt_ctx.metadata), c->stream->metadata, 0);
memset(&c->fmt_ctx, 0, sizeof(c->fmt_ctx));
av_dict_set(&c->fmt_ctx.metadata, "author" , c->stream->author , 0);
av_dict_set(&c->fmt_ctx.metadata, "comment" , c->stream->comment , 0);
av_dict_set(&c->fmt_ctx.metadata, "copyright", c->stream->copyright, 0);
av_dict_set(&c->fmt_ctx.metadata, "title" , c->stream->title , 0);
c->fmt_ctx.streams = av_mallocz(sizeof(AVStream *) * c->stream->nb_streams);
for(i=0;i<c->stream->nb_streams;i++) {
@@ -2291,8 +2290,8 @@ static int http_prepare_data(HTTPContext *c)
*(c->fmt_ctx.streams[i]) = *src;
c->fmt_ctx.streams[i]->priv_data = 0;
/* XXX: should be done in AVStream, not in codec */
c->fmt_ctx.streams[i]->codec->frame_number = 0;
c->fmt_ctx.streams[i]->codec->frame_number = 0; /* XXX: should be done in
AVStream, not in codec */
}
/* set output format parameters */
c->fmt_ctx.oformat = c->stream->fmt;
@@ -2310,14 +2309,13 @@ static int http_prepare_data(HTTPContext *c)
/*
* HACK to avoid mpeg ps muxer to spit many underflow errors
* Default value from FFmpeg
* Try to set it using configuration option
* Try to set it use configuration option
*/
c->fmt_ctx.max_delay = (int)(0.7*AV_TIME_BASE);
if ((ret = avformat_write_header(&c->fmt_ctx, NULL)) < 0) {
http_log("Error writing output header for stream '%s': %s\n",
c->stream->filename, av_err2str(ret));
return ret;
if (avformat_write_header(&c->fmt_ctx, NULL) < 0) {
http_log("Error writing output header\n");
return -1;
}
av_dict_free(&c->fmt_ctx.metadata);
@@ -2361,7 +2359,7 @@ static int http_prepare_data(HTTPContext *c)
goto redo;
} else {
no_loop:
/* must send trailer now because EOF or error */
/* must send trailer now because eof or error */
c->state = HTTPSTATE_SEND_DATA_TRAILER;
}
}
@@ -2403,8 +2401,8 @@ static int http_prepare_data(HTTPContext *c)
send_it:
ist = c->fmt_in->streams[source_index];
/* specific handling for RTP: we use several
* output streams (one for each RTP connection).
* XXX: need more abstract handling */
output stream (one for each RTP
connection). XXX: need more abstract handling */
if (c->is_packetized) {
/* compute send time and duration */
c->cur_pts = av_rescale_q(pkt.dts, ist->time_base, AV_TIME_BASE_Q);
@@ -2448,9 +2446,8 @@ static int http_prepare_data(HTTPContext *c)
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, ist->time_base, ost->time_base);
pkt.duration = av_rescale_q(pkt.duration, ist->time_base, ost->time_base);
if ((ret = av_write_frame(ctx, &pkt)) < 0) {
http_log("Error writing frame to output for stream '%s': %s\n",
c->stream->filename, av_err2str(ret));
if (av_write_frame(ctx, &pkt) < 0) {
http_log("Error writing frame to output\n");
c->state = HTTPSTATE_SEND_DATA_TRAILER;
}
@@ -2494,7 +2491,7 @@ static int http_prepare_data(HTTPContext *c)
/* should convert the format at the same time */
/* send data starting at c->buffer_ptr to the output connection
* (either UDP or TCP) */
(either UDP or TCP connection) */
static int http_send_data(HTTPContext *c)
{
int len, ret;
@@ -2617,26 +2614,19 @@ static int http_send_data(HTTPContext *c)
static int http_start_receive_data(HTTPContext *c)
{
int fd;
int ret;
if (c->stream->feed_opened) {
http_log("Stream feed '%s' was not opened\n", c->stream->feed_filename);
return AVERROR(EINVAL);
}
if (c->stream->feed_opened)
return -1;
/* Don't permit writing to this one */
if (c->stream->readonly) {
http_log("Cannot write to read-only file '%s'\n", c->stream->feed_filename);
return AVERROR(EINVAL);
}
if (c->stream->readonly)
return -1;
/* open feed */
fd = open(c->stream->feed_filename, O_RDWR);
if (fd < 0) {
ret = AVERROR(errno);
http_log("Could not open feed file '%s': %s\n",
c->stream->feed_filename, strerror(errno));
return ret;
http_log("Error opening feeder file: %s\n", strerror(errno));
return -1;
}
c->feed_fd = fd;
@@ -2645,19 +2635,13 @@ static int http_start_receive_data(HTTPContext *c)
ffm_write_write_index(c->feed_fd, FFM_PACKET_SIZE);
http_log("Truncating feed file '%s'\n", c->stream->feed_filename);
if (ftruncate(c->feed_fd, FFM_PACKET_SIZE) < 0) {
ret = AVERROR(errno);
http_log("Error truncating feed file '%s': %s\n",
c->stream->feed_filename, strerror(errno));
return ret;
http_log("Error truncating feed file: %s\n", strerror(errno));
return -1;
}
} else {
ret = ffm_read_write_index(fd);
if (ret < 0) {
http_log("Error reading write index from feed file '%s': %s\n",
c->stream->feed_filename, strerror(errno));
return ret;
} else {
c->stream->feed_write_index = ret;
if ((c->stream->feed_write_index = ffm_read_write_index(fd)) < 0) {
http_log("Error reading write index from feed file: %s\n", strerror(errno));
return -1;
}
}
@@ -2961,9 +2945,9 @@ static int rtsp_parse_request(HTTPContext *c)
else if (!strcmp(cmd, "PLAY"))
rtsp_cmd_play(c, url, header);
else if (!strcmp(cmd, "PAUSE"))
rtsp_cmd_interrupt(c, url, header, 1);
rtsp_cmd_pause(c, url, header);
else if (!strcmp(cmd, "TEARDOWN"))
rtsp_cmd_interrupt(c, url, header, 0);
rtsp_cmd_teardown(c, url, header);
else
rtsp_reply_error(c, RTSP_STATUS_METHOD);
@@ -2986,7 +2970,6 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
AVFormatContext *avc;
AVStream *avs = NULL;
AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);
AVDictionaryEntry *entry = av_dict_get(stream->metadata, "title", NULL, 0);
int i;
avc = avformat_alloc_context();
@@ -2995,7 +2978,7 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
}
avc->oformat = rtp_format;
av_dict_set(&avc->metadata, "title",
entry ? entry->value : "No Title", 0);
stream->title[0] ? stream->title : "No Title", 0);
avc->nb_streams = stream->nb_streams;
if (stream->is_multicast) {
snprintf(avc->filename, 1024, "rtp://%s:%d?multicast=1?ttl=%d",
@@ -3317,7 +3300,7 @@ static void rtsp_cmd_play(HTTPContext *c, const char *url, RTSPMessageHeader *h)
avio_printf(c->pb, "\r\n");
}
static void rtsp_cmd_interrupt(HTTPContext *c, const char *url, RTSPMessageHeader *h, int pause_only)
static void rtsp_cmd_pause(HTTPContext *c, const char *url, RTSPMessageHeader *h)
{
HTTPContext *rtp_c;
@@ -3327,14 +3310,29 @@ static void rtsp_cmd_interrupt(HTTPContext *c, const char *url, RTSPMessageHeade
return;
}
if (pause_only) {
if (rtp_c->state != HTTPSTATE_SEND_DATA &&
rtp_c->state != HTTPSTATE_WAIT_FEED) {
rtsp_reply_error(c, RTSP_STATUS_STATE);
return;
}
rtp_c->state = HTTPSTATE_READY;
rtp_c->first_pts = AV_NOPTS_VALUE;
if (rtp_c->state != HTTPSTATE_SEND_DATA &&
rtp_c->state != HTTPSTATE_WAIT_FEED) {
rtsp_reply_error(c, RTSP_STATUS_STATE);
return;
}
rtp_c->state = HTTPSTATE_READY;
rtp_c->first_pts = AV_NOPTS_VALUE;
/* now everything is OK, so we can send the connection parameters */
rtsp_reply_header(c, RTSP_STATUS_OK);
/* session ID */
avio_printf(c->pb, "Session: %s\r\n", rtp_c->session_id);
avio_printf(c->pb, "\r\n");
}
static void rtsp_cmd_teardown(HTTPContext *c, const char *url, RTSPMessageHeader *h)
{
HTTPContext *rtp_c;
rtp_c = find_rtp_session_with_url(url, h->session_id);
if (!rtp_c) {
rtsp_reply_error(c, RTSP_STATUS_SESSION);
return;
}
/* now everything is OK, so we can send the connection parameters */
@@ -3343,10 +3341,11 @@ static void rtsp_cmd_interrupt(HTTPContext *c, const char *url, RTSPMessageHeade
avio_printf(c->pb, "Session: %s\r\n", rtp_c->session_id);
avio_printf(c->pb, "\r\n");
if (!pause_only)
close_connection(rtp_c);
/* abort the session */
close_connection(rtp_c);
}
/********************************************************************/
/* RTP handling */
@@ -3491,8 +3490,7 @@ static int rtp_new_av_stream(HTTPContext *c,
ipaddr, ntohs(dest_addr->sin_port),
c->stream->filename, stream_index, c->protocol);
/* normally, no packets should be output here, but the packet size may
* be checked */
/* normally, no packets should be output here, but the packet size may be checked */
if (ffio_open_dyn_packet_buf(&ctx->pb, max_packet_size) < 0) {
/* XXX: close stream */
goto fail;
@@ -3534,7 +3532,7 @@ static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int cop
}
} else {
/* live streams must use the actual feed's codec since it may be
* updated later to carry extradata needed by them.
* updated later to carry extradata needed by the streams.
*/
fst->codec = codec;
}
@@ -3671,14 +3669,9 @@ static void build_file_streams(void)
av_dict_set(&stream->in_opts, "mpeg2ts_compute_pcr", "1", 0);
}
if (!stream->feed_filename[0]) {
http_log("Unspecified feed file for stream '%s'\n", stream->filename);
goto fail;
}
http_log("Opening feed file '%s' for stream '%s'\n", stream->feed_filename, stream->filename);
http_log("Opening file '%s'\n", stream->feed_filename);
if ((ret = avformat_open_input(&infile, stream->feed_filename, stream->ifmt, &stream->in_opts)) < 0) {
http_log("Could not open '%s': %s\n", stream->feed_filename, av_err2str(ret));
http_log("Could not open '%s': %d\n", stream->feed_filename, ret);
/* remove stream (no need to spend more time on it) */
fail:
remove_stream(stream);
@@ -3803,7 +3796,7 @@ static void build_feed_streams(void)
}
}
if (avio_check(feed->feed_filename, AVIO_FLAG_WRITE) <= 0) {
AVFormatContext *s = avformat_alloc_context();
AVFormatContext s1 = {0}, *s = &s1;
if (feed->readonly) {
http_log("Unable to create feed file '%s' as it is marked readonly\n",
@@ -3827,9 +3820,6 @@ static void build_feed_streams(void)
/* XXX: need better api */
av_freep(&s->priv_data);
avio_close(s->pb);
s->streams = NULL;
s->nb_streams = 0;
avformat_free_context(s);
}
/* get feed size and write index */
fd = open(feed->feed_filename, O_RDONLY);
@@ -3952,13 +3942,24 @@ static void add_codec(FFStream *stream, AVCodecContext *av)
memcpy(st->codec, av, sizeof(AVCodecContext));
}
static enum AVCodecID opt_codec(const char *name, enum AVMediaType type)
static enum AVCodecID opt_audio_codec(const char *arg)
{
AVCodec *codec = avcodec_find_encoder_by_name(name);
AVCodec *p= avcodec_find_encoder_by_name(arg);
if (!codec || codec->type != type)
if (p == NULL || p->type != AVMEDIA_TYPE_AUDIO)
return AV_CODEC_ID_NONE;
return codec->id;
return p->id;
}
static enum AVCodecID opt_video_codec(const char *arg)
{
AVCodec *p= avcodec_find_encoder_by_name(arg);
if (p == NULL || p->type != AVMEDIA_TYPE_VIDEO)
return AV_CODEC_ID_NONE;
return p->id;
}
static int ffserver_opt_default(const char *opt, const char *arg,
@@ -3997,9 +3998,9 @@ static int ffserver_opt_preset(const char *arg,
break;
}
if(!strcmp(tmp, "acodec")){
*audio_id = opt_codec(tmp2, AVMEDIA_TYPE_AUDIO);
*audio_id = opt_audio_codec(tmp2);
}else if(!strcmp(tmp, "vcodec")){
*video_id = opt_codec(tmp2, AVMEDIA_TYPE_VIDEO);
*video_id = opt_video_codec(tmp2);
}else if(!strcmp(tmp, "scodec")){
/* opt_subtitle_codec(tmp2); */
}else if(ffserver_opt_default(tmp, tmp2, avctx, type) < 0){
@@ -4014,7 +4015,8 @@ static int ffserver_opt_preset(const char *arg,
return ret;
}
static AVOutputFormat *ffserver_guess_format(const char *short_name, const char *filename, const char *mime_type)
static AVOutputFormat *ffserver_guess_format(const char *short_name, const char *filename,
const char *mime_type)
{
AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
@@ -4032,12 +4034,12 @@ static AVOutputFormat *ffserver_guess_format(const char *short_name, const char
return fmt;
}
static void report_config_error(const char *filename, int line_num, int log_level, int *errors, const char *fmt, ...)
static void report_config_error(const char *filename, int line_num, int *errors, const char *fmt, ...)
{
va_list vl;
va_start(vl, fmt);
av_log(NULL, log_level, "%s:%d: ", filename, line_num);
av_vlog(NULL, log_level, fmt, vl);
fprintf(stderr, "%s:%d: ", filename, line_num);
vfprintf(stderr, fmt, vl);
va_end(vl);
(*errors)++;
@@ -4048,23 +4050,21 @@ static int parse_ffconfig(const char *filename)
FILE *f;
char line[1024];
char cmd[64];
char arg[1024], arg2[1024];
char arg[1024];
const char *p;
int val, errors, warnings, line_num;
int val, errors, line_num;
FFStream **last_stream, *stream, *redirect;
FFStream **last_feed, *feed, *s;
AVCodecContext audio_enc, video_enc;
enum AVCodecID audio_id, video_id;
int ret = 0;
f = fopen(filename, "r");
if (!f) {
ret = AVERROR(errno);
av_log(NULL, AV_LOG_ERROR, "Could not open the configuration file '%s'\n", filename);
return ret;
perror(filename);
return -1;
}
errors = warnings = 0;
errors = 0;
line_num = 0;
first_stream = NULL;
last_stream = &first_stream;
@@ -4075,9 +4075,8 @@ static int parse_ffconfig(const char *filename)
redirect = NULL;
audio_id = AV_CODEC_ID_NONE;
video_id = AV_CODEC_ID_NONE;
#define ERROR(...) report_config_error(filename, line_num, AV_LOG_ERROR, &errors, __VA_ARGS__)
#define WARNING(...) report_config_error(filename, line_num, AV_LOG_WARNING, &warnings, __VA_ARGS__)
#define ERROR(...) report_config_error(filename, line_num, &errors, __VA_ARGS__)
for(;;) {
if (fgets(line, sizeof(line), f) == NULL)
break;
@@ -4103,7 +4102,7 @@ static int parse_ffconfig(const char *filename)
ERROR("%s:%d: Invalid host/IP address: %s\n", arg);
}
} else if (!av_strcasecmp(cmd, "NoDaemon")) {
WARNING("NoDaemon option has no effect, you should remove it\n");
// do nothing here, its the default now
} else if (!av_strcasecmp(cmd, "RTSPPort")) {
get_arg(arg, sizeof(arg), &p);
val = atoi(arg);
@@ -4150,10 +4149,6 @@ static int parse_ffconfig(const char *filename)
ERROR("Already in a tag\n");
} else {
feed = av_mallocz(sizeof(FFStream));
if (!feed) {
ret = AVERROR(ENOMEM);
goto end;
}
get_arg(feed->filename, sizeof(feed->filename), &p);
q = strrchr(feed->filename, '>');
if (*q)
@@ -4166,7 +4161,7 @@ static int parse_ffconfig(const char *filename)
}
feed->fmt = av_guess_format("ffm", NULL, NULL);
/* default feed file */
/* defaut feed file */
snprintf(feed->feed_filename, sizeof(feed->feed_filename),
"/tmp/%s.ffm", feed->filename);
feed->feed_max_size = 5 * 1024 * 1024;
@@ -4185,50 +4180,36 @@ static int parse_ffconfig(const char *filename)
int i;
feed->child_argv = av_mallocz(64 * sizeof(char *));
if (!feed->child_argv) {
ret = AVERROR(ENOMEM);
goto end;
}
for (i = 0; i < 62; i++) {
get_arg(arg, sizeof(arg), &p);
if (!arg[0])
break;
feed->child_argv[i] = av_strdup(arg);
if (!feed->child_argv[i]) {
ret = AVERROR(ENOMEM);
goto end;
}
}
feed->child_argv[i] =
av_asprintf("http://%s:%d/%s",
(my_http_addr.sin_addr.s_addr == INADDR_ANY) ? "127.0.0.1" :
inet_ntoa(my_http_addr.sin_addr), ntohs(my_http_addr.sin_port),
feed->filename);
if (!feed->child_argv[i]) {
ret = AVERROR(ENOMEM);
goto end;
}
feed->child_argv[i] = av_asprintf("http://%s:%d/%s",
(my_http_addr.sin_addr.s_addr == INADDR_ANY) ? "127.0.0.1" :
inet_ntoa(my_http_addr.sin_addr),
ntohs(my_http_addr.sin_port), feed->filename);
}
} else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) {
} else if (!av_strcasecmp(cmd, "ReadOnlyFile")) {
if (feed) {
get_arg(feed->feed_filename, sizeof(feed->feed_filename), &p);
feed->readonly = !av_strcasecmp(cmd, "ReadOnlyFile");
feed->readonly = 1;
} else if (stream) {
get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
}
} else if (!av_strcasecmp(cmd, "File")) {
if (feed) {
get_arg(feed->feed_filename, sizeof(feed->feed_filename), &p);
} else if (stream)
get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
} else if (!av_strcasecmp(cmd, "Truncate")) {
if (feed) {
get_arg(arg, sizeof(arg), &p);
/* assume Truncate is true in case no argument is specified */
if (!arg[0]) {
feed->truncate = 1;
} else {
WARNING("Truncate N syntax in configuration file is deprecated, "
"use Truncate alone with no arguments\n");
feed->truncate = strtod(arg, NULL);
}
feed->truncate = strtod(arg, NULL);
}
} else if (!av_strcasecmp(cmd, "FileMaxSize")) {
if (feed) {
@@ -4268,10 +4249,6 @@ static int parse_ffconfig(const char *filename)
} else {
FFStream *s;
stream = av_mallocz(sizeof(FFStream));
if (!stream) {
ret = AVERROR(ENOMEM);
goto end;
}
get_arg(stream->filename, sizeof(stream->filename), &p);
q = strrchr(stream->filename, '>');
if (q)
@@ -4309,7 +4286,7 @@ static int parse_ffconfig(const char *filename)
sfeed = sfeed->next_feed;
}
if (!sfeed)
ERROR("Feed with name '%s' for stream '%s' is not defined\n", arg, stream->filename);
ERROR("feed '%s' not defined\n", arg);
else
stream->feed = sfeed;
}
@@ -4348,36 +4325,18 @@ static int parse_ffconfig(const char *filename)
} else {
ERROR("FaviconURL only permitted for status streams\n");
}
} else if (!av_strcasecmp(cmd, "Author") ||
!av_strcasecmp(cmd, "Comment") ||
!av_strcasecmp(cmd, "Copyright") ||
!av_strcasecmp(cmd, "Title")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
char key[32];
int i, ret;
for (i = 0; i < strlen(cmd); i++)
key[i] = av_tolower(cmd[i]);
key[i] = 0;
WARNING("'%s' option in configuration file is deprecated, "
"use 'Metadata %s VALUE' instead\n", cmd, key);
if ((ret = av_dict_set(&stream->metadata, key, arg, 0)) < 0) {
ERROR("Could not set metadata '%s' to value '%s': %s\n",
key, arg, av_err2str(ret));
}
}
} else if (!av_strcasecmp(cmd, "Metadata")) {
get_arg(arg, sizeof(arg), &p);
get_arg(arg2, sizeof(arg2), &p);
if (stream) {
int ret;
if ((ret = av_dict_set(&stream->metadata, arg, arg2, 0)) < 0) {
ERROR("Could not set metadata '%s' to value '%s': %s\n",
arg, arg2, av_err2str(ret));
}
}
} else if (!av_strcasecmp(cmd, "Author")) {
if (stream)
get_arg(stream->author, sizeof(stream->author), &p);
} else if (!av_strcasecmp(cmd, "Comment")) {
if (stream)
get_arg(stream->comment, sizeof(stream->comment), &p);
} else if (!av_strcasecmp(cmd, "Copyright")) {
if (stream)
get_arg(stream->copyright, sizeof(stream->copyright), &p);
} else if (!av_strcasecmp(cmd, "Title")) {
if (stream)
get_arg(stream->title, sizeof(stream->title), &p);
} else if (!av_strcasecmp(cmd, "Preroll")) {
get_arg(arg, sizeof(arg), &p);
if (stream)
@@ -4387,13 +4346,13 @@ static int parse_ffconfig(const char *filename)
stream->send_on_key = 1;
} else if (!av_strcasecmp(cmd, "AudioCodec")) {
get_arg(arg, sizeof(arg), &p);
audio_id = opt_codec(arg, AVMEDIA_TYPE_AUDIO);
audio_id = opt_audio_codec(arg);
if (audio_id == AV_CODEC_ID_NONE) {
ERROR("Unknown AudioCodec: %s\n", arg);
}
} else if (!av_strcasecmp(cmd, "VideoCodec")) {
get_arg(arg, sizeof(arg), &p);
video_id = opt_codec(arg, AVMEDIA_TYPE_VIDEO);
video_id = opt_video_codec(arg);
if (video_id == AV_CODEC_ID_NONE) {
ERROR("Unknown VideoCodec: %s\n", arg);
}
@@ -4413,6 +4372,11 @@ static int parse_ffconfig(const char *filename)
get_arg(arg, sizeof(arg), &p);
if (stream)
audio_enc.sample_rate = atoi(arg);
} else if (!av_strcasecmp(cmd, "AudioQuality")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
// audio_enc.quality = atof(arg) * 1000;
}
} else if (!av_strcasecmp(cmd, "VideoBitRateRange")) {
if (stream) {
int minrate, maxrate;
@@ -4454,14 +4418,10 @@ static int parse_ffconfig(const char *filename)
} else if (!av_strcasecmp(cmd, "VideoSize")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
ret = av_parse_video_size(&video_enc.width, &video_enc.height, arg);
if (ret < 0) {
ERROR("Invalid video size '%s'\n", arg);
} else {
if ((video_enc.width % 16) != 0 ||
(video_enc.height % 16) != 0) {
ERROR("Image size must be a multiple of 16\n");
}
av_parse_video_size(&video_enc.width, &video_enc.height, arg);
if ((video_enc.width % 16) != 0 ||
(video_enc.height % 16) != 0) {
ERROR("Image size must be a multiple of 16\n");
}
}
} else if (!av_strcasecmp(cmd, "VideoFrameRate")) {
@@ -4500,6 +4460,7 @@ static int parse_ffconfig(const char *filename)
}
} else if (!av_strcasecmp(cmd, "AVOptionVideo") ||
!av_strcasecmp(cmd, "AVOptionAudio")) {
char arg2[1024];
AVCodecContext *avctx;
int type;
get_arg(arg, sizeof(arg), &p);
@@ -4512,7 +4473,7 @@ static int parse_ffconfig(const char *filename)
type = AV_OPT_FLAG_AUDIO_PARAM;
}
if (ffserver_opt_default(arg, arg2, avctx, type|AV_OPT_FLAG_ENCODING_PARAM)) {
ERROR("Error setting %s option to %s %s\n", cmd, arg, arg2);
ERROR("AVOption error: %s %s\n", arg, arg2);
}
} else if (!av_strcasecmp(cmd, "AVPresetVideo") ||
!av_strcasecmp(cmd, "AVPresetAudio")) {
@@ -4643,10 +4604,6 @@ static int parse_ffconfig(const char *filename)
ERROR("Already in a tag\n");
} else {
redirect = av_mallocz(sizeof(FFStream));
if (!redirect) {
ret = AVERROR(ENOMEM);
goto end;
}
*last_stream = redirect;
last_stream = &redirect->next;
@@ -4676,12 +4633,9 @@ static int parse_ffconfig(const char *filename)
}
#undef ERROR
end:
fclose(f);
if (ret < 0)
return ret;
if (errors)
return AVERROR(EINVAL);
return -1;
else
return 0;
}
@@ -4736,7 +4690,6 @@ static const OptionDef options[] = {
int main(int argc, char **argv)
{
struct sigaction sigact = { { 0 } };
int ret = 0;
config_filename = av_strdup("/etc/ffserver.conf");
@@ -4758,9 +4711,8 @@ int main(int argc, char **argv)
sigact.sa_flags = SA_NOCLDSTOP | SA_RESTART;
sigaction(SIGCHLD, &sigact, 0);
if ((ret = parse_ffconfig(config_filename)) < 0) {
fprintf(stderr, "Error reading configuration file '%s': %s\n",
config_filename, av_err2str(ret));
if (parse_ffconfig(config_filename) < 0) {
fprintf(stderr, "Incorrect config file - exiting.\n");
exit(1);
}
av_freep(&config_filename);

View File

@@ -26,7 +26,6 @@
#include "libavutil/avassert.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
@@ -132,8 +131,7 @@ typedef struct CFrameBuffer {
typedef struct FourXContext {
AVCodecContext *avctx;
DSPContext dsp;
uint16_t *frame_buffer;
uint16_t *last_frame_buffer;
AVFrame *current_picture, *last_picture;
GetBitContext pre_gb; ///< ac/dc prefix
GetBitContext gb;
GetByteContext g;
@@ -343,7 +341,7 @@ static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
int code = get_vlc2(&f->gb,
block_type_vlc[1 - (f->version > 1)][index].table,
BLOCK_TYPE_VLC_BITS, 1);
uint16_t *start = f->last_frame_buffer;
uint16_t *start = (uint16_t *)f->last_picture->data[0];
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
int ret;
int scale = 1;
@@ -416,18 +414,29 @@ static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
return 0;
}
static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
static int decode_p_frame(FourXContext *f, AVFrame *frame,
const uint8_t *buf, int length)
{
int x, y;
const int width = f->avctx->width;
const int height = f->avctx->height;
uint16_t *dst = f->frame_buffer;
uint16_t *dst = (uint16_t *)frame->data[0];
const int stride = frame->linesize[0] >> 1;
uint16_t *src;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
bytestream_offset, wordstream_offset;
int ret;
src = f->last_frame_buffer;
if (!f->last_picture->data[0]) {
if ((ret = ff_get_buffer(f->avctx, f->last_picture,
AV_GET_BUFFER_FLAG_REF)) < 0) {
return ret;
}
for (y=0; y<f->avctx->height; y++)
memset(f->last_picture->data[0] + y*f->last_picture->linesize[0], 0, 2*f->avctx->width);
}
src = (uint16_t *)f->last_picture->data[0];
if (f->version > 1) {
extra = 20;
@@ -452,12 +461,14 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
return AVERROR_INVALIDDATA;
}
av_fast_padded_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size,
bitstream_size);
av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size,
bitstream_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!f->bitstream_buffer)
return AVERROR(ENOMEM);
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)(buf + extra),
bitstream_size / 4);
memset((uint8_t*)f->bitstream_buffer + bitstream_size,
0, FF_INPUT_BUFFER_PADDING_SIZE);
init_get_bits(&f->gb, f->bitstream_buffer, 8 * bitstream_size);
wordstream_offset = extra + bitstream_size;
@@ -467,14 +478,14 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
bytestream2_init(&f->g, buf + bytestream_offset,
length - bytestream_offset);
init_mv(f, width * 2);
init_mv(f, frame->linesize[0]);
for (y = 0; y < height; y += 8) {
for (x = 0; x < width; x += 8)
if ((ret = decode_p_block(f, dst + x, src + x, 3, 3, width)) < 0)
if ((ret = decode_p_block(f, dst + x, src + x, 3, 3, stride)) < 0)
return ret;
src += 8 * width;
dst += 8 * width;
src += 8 * stride;
dst += 8 * stride;
}
return 0;
@@ -539,12 +550,12 @@ static int decode_i_block(FourXContext *f, int16_t *block)
return 0;
}
static inline void idct_put(FourXContext *f, int x, int y)
static inline void idct_put(FourXContext *f, AVFrame *frame, int x, int y)
{
int16_t (*block)[64] = f->block;
int stride = f->avctx->width;
int stride = frame->linesize[0] >> 1;
int i;
uint16_t *dst = f->frame_buffer + y * stride + x;
uint16_t *dst = ((uint16_t*)frame->data[0]) + y * stride + x;
for (i = 0; i < 4; i++) {
block[i][0] += 0x80 * 8 * 8;
@@ -704,13 +715,14 @@ static int mix(int c0, int c1)
return red / 3 * 1024 + green / 3 * 32 + blue / 3;
}
static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
static int decode_i2_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
{
int x, y, x2, y2;
const int width = f->avctx->width;
const int height = f->avctx->height;
const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4);
uint16_t *dst = f->frame_buffer;
uint16_t *dst = (uint16_t*)frame->data[0];
const int stride = frame->linesize[0]>>1;
const uint8_t *buf_end = buf + length;
GetByteContext g3;
@@ -741,18 +753,18 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
for (y2 = 0; y2 < 16; y2++) {
for (x2 = 0; x2 < 16; x2++) {
int index = 2 * (x2 >> 2) + 8 * (y2 >> 2);
dst[y2 * width + x2] = color[(bits >> index) & 3];
dst[y2 * stride + x2] = color[(bits >> index) & 3];
}
}
dst += 16;
}
dst += 16 * width - x;
dst += 16 * stride - x;
}
return 0;
}
static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
static int decode_i_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
{
int x, y, ret;
const int width = f->avctx->width;
@@ -791,12 +803,14 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
prestream_size = length + buf - prestream;
av_fast_padded_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size,
prestream_size);
av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size,
prestream_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!f->bitstream_buffer)
return AVERROR(ENOMEM);
f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream,
prestream_size / 4);
memset((uint8_t*)f->bitstream_buffer + prestream_size,
0, FF_INPUT_BUFFER_PADDING_SIZE);
init_get_bits(&f->pre_gb, f->bitstream_buffer, 8 * prestream_size);
f->last_dc = 0 * 128 * 8 * 8;
@@ -806,7 +820,7 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
if ((ret = decode_i_mb(f)) < 0)
return ret;
idct_put(f, x, y);
idct_put(f, frame, x, y);
}
}
@@ -909,24 +923,29 @@ static int decode_frame(AVCodecContext *avctx, void *data,
frame_size = buf_size - 12;
}
if ((ret = ff_get_buffer(avctx, picture, 0)) < 0)
FFSWAP(AVFrame*, f->current_picture, f->last_picture);
// alternatively we would have to use our own buffer management
avctx->flags |= CODEC_FLAG_EMU_EDGE;
if ((ret = ff_reget_buffer(avctx, f->current_picture)) < 0)
return ret;
if (frame_4cc == AV_RL32("ifr2")) {
picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i2_frame(f, buf - 4, frame_size + 4)) < 0) {
f->current_picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i2_frame(f, f->current_picture, buf - 4, frame_size + 4)) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "decode i2 frame failed\n");
return ret;
}
} else if (frame_4cc == AV_RL32("ifrm")) {
picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i_frame(f, buf, frame_size)) < 0) {
f->current_picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i_frame(f, f->current_picture, buf, frame_size)) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "decode i frame failed\n");
return ret;
}
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
picture->pict_type = AV_PICTURE_TYPE_P;
if ((ret = decode_p_frame(f, buf, frame_size)) < 0) {
f->current_picture->pict_type = AV_PICTURE_TYPE_P;
if ((ret = decode_p_frame(f, f->current_picture, buf, frame_size)) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "decode p frame failed\n");
return ret;
}
@@ -938,13 +957,10 @@ static int decode_frame(AVCodecContext *avctx, void *data,
buf_size);
}
picture->key_frame = picture->pict_type == AV_PICTURE_TYPE_I;
av_image_copy_plane(picture->data[0], picture->linesize[0],
(const uint8_t*)f->frame_buffer, avctx->width * 2,
avctx->width * 2, avctx->height);
FFSWAP(uint16_t *, f->frame_buffer, f->last_frame_buffer);
f->current_picture->key_frame = f->current_picture->pict_type == AV_PICTURE_TYPE_I;
if ((ret = av_frame_ref(picture, f->current_picture)) < 0)
return ret;
*got_frame = 1;
emms_c();
@@ -952,28 +968,9 @@ static int decode_frame(AVCodecContext *avctx, void *data,
return buf_size;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
int i;
av_freep(&f->frame_buffer);
av_freep(&f->last_frame_buffer);
av_freep(&f->bitstream_buffer);
f->bitstream_buffer_size = 0;
for (i = 0; i < CFRAME_BUFFER_COUNT; i++) {
av_freep(&f->cfrm[i].data);
f->cfrm[i].allocated_size = 0;
}
ff_free_vlc(&f->pre_vlc);
return 0;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
int ret;
if (avctx->extradata_size != 4 || !avctx->extradata) {
av_log(avctx, AV_LOG_ERROR, "extradata wrong or missing\n");
@@ -984,17 +981,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
return AVERROR_INVALIDDATA;
}
ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
if (ret < 0)
return ret;
f->frame_buffer = av_mallocz(avctx->width * avctx->height * 2);
f->last_frame_buffer = av_mallocz(avctx->width * avctx->height * 2);
if (!f->frame_buffer || !f->last_frame_buffer) {
decode_end(avctx);
return AVERROR(ENOMEM);
}
f->version = AV_RL32(avctx->extradata) >> 16;
ff_dsputil_init(&f->dsp, avctx);
f->avctx = avctx;
@@ -1005,6 +991,30 @@ static av_cold int decode_init(AVCodecContext *avctx)
else
avctx->pix_fmt = AV_PIX_FMT_BGR555;
f->current_picture = av_frame_alloc();
f->last_picture = av_frame_alloc();
if (!f->current_picture || !f->last_picture)
return AVERROR(ENOMEM);
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
int i;
av_freep(&f->bitstream_buffer);
f->bitstream_buffer_size = 0;
for (i = 0; i < CFRAME_BUFFER_COUNT; i++) {
av_freep(&f->cfrm[i].data);
f->cfrm[i].allocated_size = 0;
}
ff_free_vlc(&f->pre_vlc);
av_frame_free(&f->current_picture);
av_frame_free(&f->last_picture);
return 0;
}

View File

@@ -31,6 +31,8 @@ OBJS = allcodecs.o \
resample2.o \
utils.o \
OBJS-$(HAVE_MSVCRT) += file_open.o
# parts needed for many different codecs
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o
@@ -48,16 +50,13 @@ OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
fft_fixed_32.o fft_init_table.o \
$(FFT-OBJS-yes)
OBJS-$(CONFIG_GOLOMB) += golomb.o
OBJS-$(CONFIG_H263DSP) += h263dsp.o
OBJS-$(CONFIG_H264CHROMA) += h264chroma.o
OBJS-$(CONFIG_H264DSP) += h264dsp.o h264idct.o
OBJS-$(CONFIG_H264PRED) += h264pred.o
OBJS-$(CONFIG_H264QPEL) += h264qpel.o
OBJS-$(CONFIG_HPELDSP) += hpeldsp.o
OBJS-$(CONFIG_HUFFMAN) += huffman.o
OBJS-$(CONFIG_INTRAX8) += intrax8.o intrax8dsp.o
OBJS-$(CONFIG_LIBXVID) += libxvid_rc.o
OBJS-$(CONFIG_LLVIDDSP) += lossless_videodsp.o
OBJS-$(CONFIG_LPC) += lpc.o
OBJS-$(CONFIG_LSP) += lsp.o
OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o mdct_fixed_32.o
@@ -126,8 +125,6 @@ OBJS-$(CONFIG_ASV2_DECODER) += asvdec.o asv.o mpeg12data.o
OBJS-$(CONFIG_ASV2_ENCODER) += asvenc.o asv.o mpeg12data.o
OBJS-$(CONFIG_ATRAC1_DECODER) += atrac1.o atrac.o
OBJS-$(CONFIG_ATRAC3_DECODER) += atrac3.o atrac.o
OBJS-$(CONFIG_ATRAC3P_DECODER) += atrac3plusdec.o atrac3plus.o \
atrac3plusdsp.o atrac.o
OBJS-$(CONFIG_AURA_DECODER) += cyuv.o
OBJS-$(CONFIG_AURA2_DECODER) += aura.o
OBJS-$(CONFIG_AVRN_DECODER) += avrndec.o mjpegdec.o mjpeg.o
@@ -155,7 +152,6 @@ OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
OBJS-$(CONFIG_CINEPAK_ENCODER) += cinepakenc.o elbg.o
OBJS-$(CONFIG_CLJR_DECODER) += cljr.o
OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
OBJS-$(CONFIG_CLLC_DECODER) += cllc.o
@@ -182,7 +178,7 @@ OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
OBJS-$(CONFIG_DVDSUB_ENCODER) += dvdsubenc.o
OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o dv_profile.o
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o dv_profile.o
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o dvdata.o dv_profile.o
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
@@ -206,7 +202,6 @@ OBJS-$(CONFIG_FFV1_ENCODER) += ffv1enc.o ffv1.o
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o huffyuvdec.o
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o huffyuvenc.o
OBJS-$(CONFIG_FFWAVESYNTH_DECODER) += ffwavesynth.o
OBJS-$(CONFIG_FIC_DECODER) += fic.o
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o flacdsp.o
OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o flacdsp.o vorbis_data.o
OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
@@ -241,7 +236,7 @@ OBJS-$(CONFIG_H264_VDA_DECODER) += vda_h264_dec.o
OBJS-$(CONFIG_HEVC_DECODER) += hevc.o hevc_mvs.o hevc_ps.o hevc_sei.o \
hevc_cabac.o hevc_refs.o hevcpred.o \
hevcdsp.o hevc_filter.o cabac.o
OBJS-$(CONFIG_HNM4_VIDEO_DECODER) += hnm4video.o
OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o huffyuvdec.o
OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o huffyuvenc.o
OBJS-$(CONFIG_IAC_DECODER) += imc.o
@@ -285,34 +280,40 @@ OBJS-$(CONFIG_MMVIDEO_DECODER) += mmvideo.o
OBJS-$(CONFIG_MOTIONPIXELS_DECODER) += motionpixels.o
OBJS-$(CONFIG_MOVTEXT_DECODER) += movtextdec.o ass.o
OBJS-$(CONFIG_MOVTEXT_ENCODER) += movtextenc.o ass_split.o
OBJS-$(CONFIG_MP1_DECODER) += mpegaudiodec_fixed.o
OBJS-$(CONFIG_MP1_DECODER) += mpegaudiodec.o
OBJS-$(CONFIG_MP1FLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP2_DECODER) += mpegaudiodec_fixed.o
OBJS-$(CONFIG_MP2_ENCODER) += mpegaudioenc_float.o mpegaudio.o \
mpegaudiodata.o mpegaudiodsp_data.o
OBJS-$(CONFIG_MP2FIXED_ENCODER) += mpegaudioenc_fixed.o mpegaudio.o \
OBJS-$(CONFIG_MP2_DECODER) += mpegaudiodec.o
OBJS-$(CONFIG_MP2_ENCODER) += mpegaudioenc.o mpegaudio.o \
mpegaudiodata.o mpegaudiodsp_data.o
OBJS-$(CONFIG_MP2FLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP3_DECODER) += mpegaudiodec_fixed.o
OBJS-$(CONFIG_MP3ADU_DECODER) += mpegaudiodec_fixed.o
OBJS-$(CONFIG_MP3_DECODER) += mpegaudiodec.o
OBJS-$(CONFIG_MP3ADU_DECODER) += mpegaudiodec.o
OBJS-$(CONFIG_MP3ADUFLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP3FLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec_fixed.o mpeg4audio.o
OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec.o mpeg4audio.o
OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += mpegaudiodec_float.o mpeg4audio.o
OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o
OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12.o mpeg12data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_MPEG_XVMC_DECODER) += mpegvideo_xvmc.o
OBJS-$(CONFIG_MPEG1VIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MPEG1VIDEO_ENCODER) += mpeg12enc.o mpeg12.o
OBJS-$(CONFIG_MPEG2VIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MPEG2VIDEO_ENCODER) += mpeg12enc.o mpeg12.o
OBJS-$(CONFIG_MPEG2VIDEO_ENCODER) += mpeg12enc.o mpeg12.o \
timecode.o
OBJS-$(CONFIG_MPL2_DECODER) += mpl2dec.o ass.o
OBJS-$(CONFIG_MSMPEG4V1_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_MSMPEG4V2_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4enc.o msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_MSMPEG4V3_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4enc.o msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_MSMPEG4V2_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o \
h263dec.o h263.o ituh263dec.o \
mpeg4videodec.o
OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4.o msmpeg4enc.o msmpeg4data.o \
h263.o
OBJS-$(CONFIG_MSMPEG4V3_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o \
h263dec.o h263.o ituh263dec.o \
mpeg4videodec.o
OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4enc.o msmpeg4data.o \
h263.o
OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
OBJS-$(CONFIG_MSA1_DECODER) += mss3.o mss34dsp.o
OBJS-$(CONFIG_MSS1_DECODER) += mss1.o mss12.o
@@ -330,27 +331,27 @@ OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += paf.o
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += paf.o
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o
OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o pnm.o
OBJS-$(CONFIG_PBM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PBM_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PBM_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PCX_DECODER) += pcx.o
OBJS-$(CONFIG_PCX_ENCODER) += pcxenc.o
OBJS-$(CONFIG_PGM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PGM_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PGM_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PGSSUB_DECODER) += pgssubdec.o
OBJS-$(CONFIG_PICTOR_DECODER) += pictordec.o cga_data.o
OBJS-$(CONFIG_PJS_DECODER) += textdec.o ass.o
OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o pngdsp.o
OBJS-$(CONFIG_PNG_ENCODER) += png.o pngenc.o
OBJS-$(CONFIG_PPM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o proresdsp.o proresdata.o
OBJS-$(CONFIG_PRORES_LGPL_DECODER) += proresdec_lgpl.o proresdsp.o proresdata.o
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc_anatoliy.o
OBJS-$(CONFIG_PRORES_AW_ENCODER) += proresenc_anatoliy.o
OBJS-$(CONFIG_PRORES_KS_ENCODER) += proresenc_kostya.o proresdata.o
OBJS-$(CONFIG_PRORES_KS_ENCODER) += proresenc_kostya.o proresdata.o proresdsp.o
OBJS-$(CONFIG_PTX_DECODER) += ptx.o
OBJS-$(CONFIG_QCELP_DECODER) += qcelpdec.o \
celp_filters.o acelp_vectors.o \
@@ -439,7 +440,7 @@ OBJS-$(CONFIG_TRUEMOTION2_DECODER) += truemotion2.o
OBJS-$(CONFIG_TRUESPEECH_DECODER) += truespeech.o
OBJS-$(CONFIG_TSCC_DECODER) += tscc.o msrledec.o
OBJS-$(CONFIG_TSCC2_DECODER) += tscc2.o
OBJS-$(CONFIG_TTA_DECODER) += tta.o ttadata.o ttadsp.o
OBJS-$(CONFIG_TTA_DECODER) += tta.o ttadata.o
OBJS-$(CONFIG_TTA_ENCODER) += ttaenc.o ttadata.o
OBJS-$(CONFIG_TWINVQ_DECODER) += twinvqdec.o twinvq.o
OBJS-$(CONFIG_TXD_DECODER) += txd.o s3tc.o
@@ -459,7 +460,7 @@ OBJS-$(CONFIG_VB_DECODER) += vb.o
OBJS-$(CONFIG_VBLE_DECODER) += vble.o
OBJS-$(CONFIG_VC1_DECODER) += vc1dec.o vc1.o vc1data.o vc1dsp.o \
msmpeg4dec.o msmpeg4.o msmpeg4data.o \
wmv2dsp.o
intrax8.o intrax8dsp.o wmv2dsp.o
OBJS-$(CONFIG_VCR1_DECODER) += vcr1.o
OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdav.o
OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdav.o
@@ -480,7 +481,7 @@ OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
OBJS-$(CONFIG_WAVPACK_ENCODER) += wavpackenc.o
OBJS-$(CONFIG_WEBP_DECODER) += vp8.o vp8dsp.o vp56rac.o
OBJS-$(CONFIG_WEBP_DECODER) += webp.o exif.o tiff_common.o
OBJS-$(CONFIG_WEBP_DECODER) += webp.o
OBJS-$(CONFIG_WEBVTT_DECODER) += webvttdec.o
OBJS-$(CONFIG_WMALOSSLESS_DECODER) += wmalosslessdec.o wma_common.o
OBJS-$(CONFIG_WMAPRO_DECODER) += wmaprodec.o wma.o wma_common.o
@@ -493,7 +494,8 @@ OBJS-$(CONFIG_WMAVOICE_DECODER) += wmavoice.o \
acelp_vectors.o acelp_filters.o
OBJS-$(CONFIG_WMV1_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_WMV2_DECODER) += wmv2dec.o wmv2.o wmv2dsp.o \
msmpeg4dec.o msmpeg4.o msmpeg4data.o
msmpeg4dec.o msmpeg4.o msmpeg4data.o \
intrax8.o intrax8dsp.o
OBJS-$(CONFIG_WMV2_ENCODER) += wmv2enc.o wmv2.o wmv2dsp.o \
msmpeg4.o msmpeg4enc.o msmpeg4data.o
OBJS-$(CONFIG_WNV1_DECODER) += wnv1.o
@@ -596,7 +598,6 @@ OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o g722dec.o
OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o g722enc.o
OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o
OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
OBJS-$(CONFIG_ADPCM_G726LE_DECODER) += g726.o
OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_APC_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_DK3_DECODER) += adpcm.o adpcm_data.o
@@ -633,11 +634,9 @@ OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
OBJS-$(CONFIG_H264_VDA_HWACCEL) += vda_h264.o
OBJS-$(CONFIG_H264_VDPAU_HWACCEL) += vdpau_h264.o
OBJS-$(CONFIG_MPEG1_VDPAU_HWACCEL) += vdpau_mpeg12.o
OBJS-$(CONFIG_MPEG1_XVMC_HWACCEL) += mpegvideo_xvmc.o
OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o vaapi_mpeg.o
OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL) += vdpau_mpeg12.o
OBJS-$(CONFIG_MPEG2_XVMC_HWACCEL) += mpegvideo_xvmc.o
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o vaapi_mpeg.o
OBJS-$(CONFIG_MPEG4_VDPAU_HWACCEL) += vdpau_mpeg4.o
OBJS-$(CONFIG_VC1_DXVA2_HWACCEL) += dxva2_vc1.o
@@ -650,7 +649,7 @@ OBJS-$(CONFIG_ADX_DEMUXER) += adx.o
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o \
ac3tab.o
OBJS-$(CONFIG_DV_DEMUXER) += dv_profile.o
OBJS-$(CONFIG_DV_MUXER) += dv_profile.o
OBJS-$(CONFIG_DV_MUXER) += dv_profile.o timecode.o
OBJS-$(CONFIG_FLAC_DEMUXER) += flac.o flacdata.o vorbis_data.o \
vorbis_parser.o xiph.o
OBJS-$(CONFIG_FLAC_MUXER) += flac.o flacdata.o vorbis_data.o
@@ -666,11 +665,11 @@ OBJS-$(CONFIG_MATROSKA_MUXER) += mpeg4audio.o mpegaudiodata.o \
flac.o flacdata.o vorbis_data.o xiph.o
OBJS-$(CONFIG_MP2_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o timecode.o
OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGTS_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MXF_MUXER) += dnxhddata.o
OBJS-$(CONFIG_MXF_MUXER) += timecode.o dnxhddata.o
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
OBJS-$(CONFIG_OGG_DEMUXER) += xiph.o flac.o flacdata.o \
mpeg12data.o vorbis_parser.o \
@@ -687,9 +686,6 @@ OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o mpegaudiodata.o \
vorbis_data.o
OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
# libavfilter dependencies
OBJS-$(CONFIG_ELBG_FILTER) += elbg.o
# external codec libraries
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
@@ -731,12 +727,10 @@ OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o \
vorbis_data.o vorbis_parser.o xiph.o
OBJS-$(CONFIG_LIBVPX_VP8_DECODER) += libvpxdec.o
OBJS-$(CONFIG_LIBVPX_VP8_ENCODER) += libvpxenc.o
OBJS-$(CONFIG_LIBVPX_VP9_DECODER) += libvpxdec.o libvpx.o
OBJS-$(CONFIG_LIBVPX_VP9_ENCODER) += libvpxenc.o libvpx.o
OBJS-$(CONFIG_LIBVPX_VP9_DECODER) += libvpxdec.o
OBJS-$(CONFIG_LIBVPX_VP9_ENCODER) += libvpxenc.o
OBJS-$(CONFIG_LIBWAVPACK_ENCODER) += libwavpackenc.o
OBJS-$(CONFIG_LIBWEBP_ENCODER) += libwebpenc.o
OBJS-$(CONFIG_LIBX264_ENCODER) += libx264.o
OBJS-$(CONFIG_LIBX265_ENCODER) += libx265.o
OBJS-$(CONFIG_LIBXAVS_ENCODER) += libxavs.o
OBJS-$(CONFIG_LIBXVID_ENCODER) += libxvid.o
OBJS-$(CONFIG_LIBZVBI_TELETEXT_DECODER) += libzvbi-teletextdec.o
@@ -779,7 +773,6 @@ OBJS-$(CONFIG_MPEGAUDIO_PARSER) += mpegaudio_parser.o \
mpegaudiodecheader.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGVIDEO_PARSER) += mpegvideo_parser.o \
mpeg12.o mpeg12data.o
OBJS-$(CONFIG_PNG_PARSER) += png_parser.o
OBJS-$(CONFIG_PNM_PARSER) += pnm_parser.o pnm.o
OBJS-$(CONFIG_RV30_PARSER) += rv34_parser.o
OBJS-$(CONFIG_RV40_PARSER) += rv34_parser.o
@@ -790,7 +783,6 @@ OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o \
OBJS-$(CONFIG_VORBIS_PARSER) += vorbis_parser.o xiph.o
OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
OBJS-$(CONFIG_VP9_PARSER) += vp9_parser.o
# bitstream filters
OBJS-$(CONFIG_AAC_ADTSTOASC_BSF) += aac_adtstoasc_bsf.o aacadtsdec.o \
@@ -802,6 +794,7 @@ OBJS-$(CONFIG_IMX_DUMP_HEADER_BSF) += imx_dump_header_bsf.o
OBJS-$(CONFIG_MJPEG2JPEG_BSF) += mjpeg2jpeg_bsf.o mjpeg.o
OBJS-$(CONFIG_MJPEGA_DUMP_HEADER_BSF) += mjpega_dump_header_bsf.o
OBJS-$(CONFIG_MOV2TEXTSUB_BSF) += movsub_bsf.o
OBJS-$(CONFIG_MP3_HEADER_COMPRESS_BSF) += mp3_header_compress_bsf.o
OBJS-$(CONFIG_MP3_HEADER_DECOMPRESS_BSF) += mp3_header_decompress_bsf.o \
mpegaudiodata.o
OBJS-$(CONFIG_NOISE_BSF) += noise_bsf.o
@@ -809,14 +802,12 @@ OBJS-$(CONFIG_REMOVE_EXTRADATA_BSF) += remove_extradata_bsf.o
OBJS-$(CONFIG_TEXT2MOVSUB_BSF) += movsub_bsf.o
# thread libraries
OBJS-$(HAVE_LIBC_MSVCRT) += file_open.o
OBJS-$(HAVE_THREADS) += pthread.o pthread_slice.o pthread_frame.o
OBJS-$(HAVE_PTHREADS) += pthread.o
OBJS-$(HAVE_W32THREADS) += pthread.o
OBJS-$(HAVE_OS2THREADS) += pthread.o
OBJS-$(CONFIG_FRAME_THREAD_ENCODER) += frame_thread_encoder.o
# Windows resource file
SLIBOBJS-$(HAVE_GNU_WINDRES) += avcodecres.o
SKIPHEADERS += %_tablegen.h \
%_tables.h \
aac_tablegen_decl.h \
@@ -828,7 +819,7 @@ SKIPHEADERS += %_tablegen.h \
SKIPHEADERS-$(CONFIG_DXVA2) += dxva2.h dxva2_internal.h
SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h
SKIPHEADERS-$(CONFIG_LIBUTVIDEO) += libutvideo.h
SKIPHEADERS-$(CONFIG_XVMC) += xvmc.h
SKIPHEADERS-$(CONFIG_MPEG_XVMC_DECODER) += xvmc.h
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
SKIPHEADERS-$(CONFIG_VDA) += vda.h
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h vdpau_internal.h
@@ -863,7 +854,6 @@ HOSTPROGS = aac_tablegen \
CLEANFILES = *_tables.c *_tables.h *_tablegen$(HOSTEXESUF)
$(SUBDIR)dct-test$(EXESUF): $(SUBDIR)dctref.o $(SUBDIR)aandcttab.o
$(SUBDIR)dv_tablegen$(HOSTEXESUF): $(SUBDIR)dvdata_host.o
TRIG_TABLES = cos cos_fixed sin
TRIG_TABLES := $(TRIG_TABLES:%=$(SUBDIR)%_tables.c)
@@ -889,9 +879,9 @@ ifdef CONFIG_HARDCODED_TABLES
$(SUBDIR)aacdec.o: $(SUBDIR)cbrt_tables.h
$(SUBDIR)aacps.o: $(SUBDIR)aacps_tables.h
$(SUBDIR)aactab.o: $(SUBDIR)aac_tables.h
$(SUBDIR)dvenc.o: $(SUBDIR)dv_tables.h
$(SUBDIR)dv.o: $(SUBDIR)dv_tables.h
$(SUBDIR)sinewin.o: $(SUBDIR)sinewin_tables.h
$(SUBDIR)mpegaudiodec_fixed.o: $(SUBDIR)mpegaudio_tables.h
$(SUBDIR)mpegaudiodec.o: $(SUBDIR)mpegaudio_tables.h
$(SUBDIR)mpegaudiodec_float.o: $(SUBDIR)mpegaudio_tables.h
$(SUBDIR)motionpixels.o: $(SUBDIR)motionpixels_tables.h
$(SUBDIR)pcm.o: $(SUBDIR)pcm_tables.h

View File

@@ -40,6 +40,9 @@
#define C64YRES 200
typedef struct A64Context {
/* general variables */
AVFrame picture;
/* variables for multicolor modes */
AVLFG randctx;
int mc_lifetime;
@@ -186,7 +189,6 @@ static void render_charset(AVCodecContext *avctx, uint8_t *charset,
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
{
A64Context *c = avctx->priv_data;
av_frame_free(&avctx->coded_frame);
av_free(c->mc_meta_charset);
av_free(c->mc_best_cb);
av_free(c->mc_charset);
@@ -195,7 +197,7 @@ static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
return 0;
}
static av_cold int a64multi_encode_init(AVCodecContext *avctx)
static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
{
A64Context *c = avctx->priv_data;
int a;
@@ -238,12 +240,8 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
AV_WB32(avctx->extradata, c->mc_lifetime);
AV_WB32(avctx->extradata + 16, INTERLACED);
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame) {
a64multi_close_encoder(avctx);
return AVERROR(ENOMEM);
}
avcodec_get_frame_defaults(&c->picture);
avctx->coded_frame = &c->picture;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
if (!avctx->codec_tag)
@@ -273,7 +271,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
A64Context *c = avctx->priv_data;
AVFrame *const p = avctx->coded_frame;
AVFrame *const p = &c->picture;
int frame;
int x, y;
@@ -340,8 +338,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
buf = pkt->data;
/* calc optimal new charset + charmaps */
avpriv_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
avpriv_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
ff_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
/* create colorram map and a c64 readable charset */
render_charset(avctx, charset, colram);
@@ -402,7 +400,7 @@ AVCodec ff_a64multi_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_A64_MULTI,
.priv_data_size = sizeof(A64Context),
.init = a64multi_encode_init,
.init = a64multi_init_encoder,
.encode2 = a64multi_encode_frame,
.close = a64multi_close_encoder,
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
@@ -416,7 +414,7 @@ AVCodec ff_a64multi5_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_A64_MULTI5,
.priv_data_size = sizeof(A64Context),
.init = a64multi_encode_init,
.init = a64multi_init_encoder,
.encode2 = a64multi_encode_frame,
.close = a64multi_close_encoder,
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},

View File

@@ -20,7 +20,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "parser.h"
#include "aac_ac3_parser.h"
@@ -83,23 +82,14 @@ get_next:
if (avctx->codec_id != AV_CODEC_ID_AAC) {
avctx->sample_rate = s->sample_rate;
/* (E-)AC-3: allow downmixing to stereo or mono */
#if FF_API_REQUEST_CHANNELS
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->request_channels == 1)
avctx->request_channel_layout = AV_CH_LAYOUT_MONO;
else if (avctx->request_channels == 2)
avctx->request_channel_layout = AV_CH_LAYOUT_STEREO;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (s->channels > 1 &&
avctx->request_channel_layout == AV_CH_LAYOUT_MONO) {
avctx->channels = 1;
avctx->channel_layout = AV_CH_LAYOUT_MONO;
} else if (s->channels > 2 &&
avctx->request_channel_layout == AV_CH_LAYOUT_STEREO) {
avctx->channels = 2;
avctx->channel_layout = AV_CH_LAYOUT_STEREO;
/* allow downmixing to stereo (or mono for AC-3) */
if(avctx->request_channels > 0 &&
avctx->request_channels < s->channels &&
(avctx->request_channels <= 2 ||
(avctx->request_channels == 1 &&
(avctx->codec_id == AV_CODEC_ID_AC3 ||
avctx->codec_id == AV_CODEC_ID_EAC3)))) {
avctx->channels = avctx->request_channels;
} else {
avctx->channels = s->channels;
avctx->channel_layout = s->channel_layout;

View File

@@ -87,7 +87,6 @@ static int aac_adtstoasc_filter(AVBitStreamFilterContext *bsfc,
buf_size -= get_bits_count(&gb)/8;
buf += get_bits_count(&gb)/8;
}
av_free(avctx->extradata);
avctx->extradata_size = 2 + pce_size;
avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);

View File

@@ -74,7 +74,6 @@
* N SinuSoidal Coding (Transient, Sinusoid, Noise)
* Y Parametric Stereo
* N Direct Stream Transfer
* Y Enhanced AAC Low Delay (ER AAC ELD)
*
* Note: - HE AAC v1 comprises LC AAC with Spectral Band Replication.
* - HE AAC v2 comprises LC AAC with Spectral Band Replication and
@@ -105,7 +104,6 @@
#include <assert.h>
#include <errno.h>
#include <math.h>
#include <stdint.h>
#include <string.h>
#if ARCH_ARM
@@ -196,9 +194,6 @@ static int frame_configure_elements(AVCodecContext *avctx)
/* get output buffer */
av_frame_unref(ac->frame);
if (!avctx->channels)
return 1;
ac->frame->nb_samples = 2048;
if ((ret = ff_get_buffer(avctx, ac->frame, 0)) < 0)
return ret;
@@ -537,25 +532,6 @@ static int set_default_channel_config(AVCodecContext *avctx,
*tags = tags_per_config[channel_config];
memcpy(layout_map, aac_channel_layout_map[channel_config - 1],
*tags * sizeof(*layout_map));
/*
* AAC specification has 7.1(wide) as a default layout for 8-channel streams.
* However, at least Nero AAC encoder encodes 7.1 streams using the default
* channel config 7, mapping the side channels of the original audio stream
* to the second AAC_CHANNEL_FRONT pair in the AAC stream. Similarly, e.g. FAAD
* decodes the second AAC_CHANNEL_FRONT pair as side channels, therefore decoding
* the incorrect streams as if they were correct (and as the encoder intended).
*
* As actual intended 7.1(wide) streams are very rare, default to assuming a
* 7.1 layout was intended.
*/
if (channel_config == 7 && avctx->strict_std_compliance < FF_COMPLIANCE_STRICT) {
av_log(avctx, AV_LOG_INFO, "Assuming an incorrectly encoded 7.1 channel layout"
" instead of a spec-compliant 7.1(wide) layout, use -strict %d to decode"
" according to the specification instead.\n", FF_COMPLIANCE_STRICT);
layout_map[2][2] = AAC_CHANNEL_SIDE;
}
return 0;
}
@@ -1136,6 +1112,7 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0 * 32768.0);
// window initialization
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
ff_kbd_window_init(ff_aac_kbd_long_512, 4.0, 512);
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
ff_init_ff_sine_windows(10);
ff_init_ff_sine_windows( 9);
@@ -1248,14 +1225,13 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
if (aot == AOT_ER_AAC_LD || aot == AOT_ER_AAC_ELD) {
ics->swb_offset = ff_swb_offset_512[ac->oc[1].m4ac.sampling_index];
ics->num_swb = ff_aac_num_swb_512[ac->oc[1].m4ac.sampling_index];
ics->tns_max_bands = ff_tns_max_bands_512[ac->oc[1].m4ac.sampling_index];
if (!ics->num_swb || !ics->swb_offset)
return AVERROR_BUG;
} else {
ics->swb_offset = ff_swb_offset_1024[ac->oc[1].m4ac.sampling_index];
ics->num_swb = ff_aac_num_swb_1024[ac->oc[1].m4ac.sampling_index];
ics->tns_max_bands = ff_tns_max_bands_1024[ac->oc[1].m4ac.sampling_index];
}
ics->tns_max_bands = ff_tns_max_bands_1024[ac->oc[1].m4ac.sampling_index];
if (aot != AOT_ER_AAC_ELD) {
ics->predictor_present = get_bits1(gb);
ics->predictor_reset_group = 0;
@@ -1426,12 +1402,12 @@ static int decode_pulses(Pulse *pulse, GetBitContext *gb,
return -1;
pulse->pos[0] = swb_offset[pulse_swb];
pulse->pos[0] += get_bits(gb, 5);
if (pulse->pos[0] >= swb_offset[num_swb])
if (pulse->pos[0] > 1023)
return -1;
pulse->amp[0] = get_bits(gb, 4);
for (i = 1; i < pulse->num_pulse; i++) {
pulse->pos[i] = get_bits(gb, 5) + pulse->pos[i - 1];
if (pulse->pos[i] >= swb_offset[num_swb])
if (pulse->pos[i] > 1023)
return -1;
pulse->amp[i] = get_bits(gb, 4);
}
@@ -2286,12 +2262,10 @@ static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt,
} else if (ac->oc[1].m4ac.ps == -1 && ac->oc[1].status < OC_LOCKED && ac->avctx->channels == 1) {
ac->oc[1].m4ac.sbr = 1;
ac->oc[1].m4ac.ps = 1;
ac->avctx->profile = FF_PROFILE_AAC_HE_V2;
output_configure(ac, ac->oc[1].layout_map, ac->oc[1].layout_map_tags,
ac->oc[1].status, 1);
} else {
ac->oc[1].m4ac.sbr = 1;
ac->avctx->profile = FF_PROFILE_AAC_HE;
}
res = ff_decode_sbr_extension(ac, &che->sbr, gb, crc_flag, cnt, elem_type);
break;
@@ -2531,20 +2505,14 @@ static void imdct_and_windowing_ld(AACContext *ac, SingleChannelElement *sce)
float *in = sce->coeffs;
float *out = sce->ret;
float *saved = sce->saved;
const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_512 : ff_sine_512;
float *buf = ac->buf_mdct;
// imdct
ac->mdct.imdct_half(&ac->mdct_ld, buf, in);
// window overlapping
if (ics->use_kb_window[1]) {
// AAC LD uses a low overlap sine window instead of a KBD window
memcpy(out, saved, 192 * sizeof(float));
ac->fdsp.vector_fmul_window(out + 192, saved + 192, buf, ff_sine_128, 64);
memcpy( out + 320, buf + 64, 192 * sizeof(float));
} else {
ac->fdsp.vector_fmul_window(out, saved, buf, ff_sine_512, 256);
}
ac->fdsp.vector_fmul_window(out, saved, buf, lwindow_prev, 256);
// buffer update
memcpy(saved, buf + 256, 256 * sizeof(float));
@@ -2565,7 +2533,7 @@ static void imdct_and_windowing_eld(AACContext *ac, SingleChannelElement *sce)
// Inverse transform, mapped to the conventional IMDCT by
// Chivukula, R.K.; Reznik, Y.A.; Devarajan, V.,
// "Efficient algorithms for MPEG-4 AAC-ELD, AAC-LD and AAC-LC filterbanks,"
// International Conference on Audio, Language and Image Processing, ICALIP 2008.
// Audio, Language and Image Processing, 2008. ICALIP 2008. International Conference on
// URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4590245&isnumber=4589950
for (i = 0; i < n2; i+=2) {
float temp;
@@ -2832,10 +2800,6 @@ static int aac_decode_er_frame(AVCodecContext *avctx, void *data,
if ((err = frame_configure_elements(avctx)) < 0)
return err;
// The FF_PROFILE_AAC_* defines are all object_type - 1
// This may lead to an undefined profile being signaled
ac->avctx->profile = ac->oc[1].m4ac.object_type - 1;
ac->tags_mapped = 0;
if (chan_config < 0 || chan_config >= 8) {
@@ -2872,7 +2836,6 @@ static int aac_decode_er_frame(AVCodecContext *avctx, void *data,
spectral_to_sample(ac);
ac->frame->nb_samples = samples;
ac->frame->sample_rate = avctx->sample_rate;
*got_frame_ptr = 1;
skip_bits_long(gb, get_bits_left(gb));
@@ -2906,10 +2869,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
if ((err = frame_configure_elements(avctx)) < 0)
goto fail;
// The FF_PROFILE_AAC_* defines are all object_type - 1
// This may lead to an undefined profile being signaled
ac->avctx->profile = ac->oc[1].m4ac.object_type - 1;
ac->tags_mapped = 0;
// parse
while ((elem_type = get_bits(gb, 3)) != TYPE_END) {
@@ -3007,6 +2966,22 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
multiplier = (ac->oc[1].m4ac.sbr == 1) ? ac->oc[1].m4ac.ext_sample_rate > ac->oc[1].m4ac.sample_rate : 0;
samples <<= multiplier;
/* for dual-mono audio (SCE + SCE) */
is_dmono = ac->dmono_mode && sce_count == 2 &&
ac->oc[1].channel_layout == (AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT);
if (samples)
ac->frame->nb_samples = samples;
else
av_frame_unref(ac->frame);
*got_frame_ptr = !!samples;
if (is_dmono) {
if (ac->dmono_mode == 1)
((AVFrame *)data)->data[1] =((AVFrame *)data)->data[0];
else if (ac->dmono_mode == 2)
((AVFrame *)data)->data[0] =((AVFrame *)data)->data[1];
}
if (ac->oc[1].status && audio_found) {
avctx->sample_rate = ac->oc[1].m4ac.sample_rate << multiplier;
@@ -3020,25 +2995,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
if (side && side_size>=4)
AV_WL32(side, 2*AV_RL32(side));
}
*got_frame_ptr = !!samples;
if (samples) {
ac->frame->nb_samples = samples;
ac->frame->sample_rate = avctx->sample_rate;
} else
av_frame_unref(ac->frame);
*got_frame_ptr = !!samples;
/* for dual-mono audio (SCE + SCE) */
is_dmono = ac->dmono_mode && sce_count == 2 &&
ac->oc[1].channel_layout == (AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT);
if (is_dmono) {
if (ac->dmono_mode == 1)
((AVFrame *)data)->data[1] =((AVFrame *)data)->data[0];
else if (ac->dmono_mode == 2)
((AVFrame *)data)->data[0] =((AVFrame *)data)->data[1];
}
return 0;
fail:
pop_output_configuration(ac);

View File

@@ -322,15 +322,14 @@ static void hybrid2_re(float (*in)[2], float (*out)[32][2], const float filter[8
}
/** Split one subband into 6 subsubbands with a complex filter */
static void hybrid6_cx(PSDSPContext *dsp, float (*in)[2], float (*out)[32][2],
TABLE_CONST float (*filter)[8][2], int len)
static void hybrid6_cx(PSDSPContext *dsp, float (*in)[2], float (*out)[32][2], const float (*filter)[8][2], int len)
{
int i;
int N = 8;
LOCAL_ALIGNED_16(float, temp, [8], [2]);
for (i = 0; i < len; i++, in++) {
dsp->hybrid_analysis(temp, in, (const float (*)[8][2]) filter, 1, N);
dsp->hybrid_analysis(temp, in, filter, 1, N);
out[0][i][0] = temp[6][0];
out[0][i][1] = temp[6][1];
out[1][i][0] = temp[7][0];
@@ -346,14 +345,12 @@ static void hybrid6_cx(PSDSPContext *dsp, float (*in)[2], float (*out)[32][2],
}
}
static void hybrid4_8_12_cx(PSDSPContext *dsp,
float (*in)[2], float (*out)[32][2],
TABLE_CONST float (*filter)[8][2], int N, int len)
static void hybrid4_8_12_cx(PSDSPContext *dsp, float (*in)[2], float (*out)[32][2], const float (*filter)[8][2], int N, int len)
{
int i;
for (i = 0; i < len; i++, in++) {
dsp->hybrid_analysis(out[0] + i, in, (const float (*)[8][2]) filter, 32, N);
dsp->hybrid_analysis(out[0] + i, in, filter, 32, N);
}
}
@@ -432,7 +429,6 @@ static void hybrid_synthesis(PSDSPContext *dsp, float out[2][38][64],
#define DECAY_SLOPE 0.05f
/// Number of frequency bands that can be addressed by the parameter index, b(k)
static const int NR_PAR_BANDS[] = { 20, 34 };
static const int NR_IPDOPD_BANDS[] = { 11, 17 };
/// Number of frequency bands that can be addressed by the sub subband index, k
static const int NR_BANDS[] = { 71, 91 };
/// Start frequency band for the all-pass filter decay slope
@@ -689,8 +685,7 @@ static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[3
memcpy(ap_delay[k][m], ap_delay[k][m]+numQMFSlots, 5*sizeof(ap_delay[k][m][0]));
}
ps->dsp.decorrelate(out[k], delay[k] + PS_MAX_DELAY - 2, ap_delay[k],
phi_fract[is34][k],
(const float (*)[2]) Q_fract_allpass[is34][k],
phi_fract[is34][k], Q_fract_allpass[is34][k],
transient_gain[b], g_decay_slope, nL - n0);
}
for (; k < SHORT_DELAY_BAND[is34]; k++) {
@@ -768,7 +763,7 @@ static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2
int8_t (*ipd_mapped)[PS_MAX_NR_IIDICC] = ipd_mapped_buf;
int8_t (*opd_mapped)[PS_MAX_NR_IIDICC] = opd_mapped_buf;
const int8_t *k_to_i = is34 ? k_to_i_34 : k_to_i_20;
TABLE_CONST float (*H_LUT)[8][4] = (PS_BASELINE || ps->icc_mode < 3) ? HA : HB;
const float (*H_LUT)[8][4] = (PS_BASELINE || ps->icc_mode < 3) ? HA : HB;
//Remapping
if (ps->num_env_old) {
@@ -829,7 +824,7 @@ static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2
h21 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][2];
h22 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][3];
if (!PS_BASELINE && ps->enable_ipdopd && b < NR_IPDOPD_BANDS[is34]) {
if (!PS_BASELINE && ps->enable_ipdopd && 2*b <= NR_PAR_BANDS[is34]) {
//The spec say says to only run this smoother when enable_ipdopd
//is set but the reference decoder appears to run it constantly
float h11i, h12i, h21i, h22i;
@@ -919,7 +914,7 @@ int ff_ps_apply(AVCodecContext *avctx, PSContext *ps, float L[2][38][64], float
memset(ps->ap_delay + top, 0, (NR_ALLPASS_BANDS[is34] - top)*sizeof(ps->ap_delay[0]));
hybrid_analysis(&ps->dsp, Lbuf, ps->in_buf, L, is34, len);
decorrelation(ps, Rbuf, (const float (*)[32][2]) Lbuf, is34);
decorrelation(ps, Rbuf, Lbuf, is34);
stereo_processing(ps, Lbuf, Rbuf, is34);
hybrid_synthesis(&ps->dsp, L, Lbuf, is34, len);
hybrid_synthesis(&ps->dsp, R, Rbuf, is34, len);

View File

@@ -82,7 +82,7 @@ int main(void)
write_float_3d_array(f34_2_4, 4, 8, 2);
printf("};\n");
printf("static TABLE_CONST DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2] = {\n");
printf("static const DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2] = {\n");
write_float_4d_array(Q_fract_allpass, 2, 50, 3, 2);
printf("};\n");
printf("static const DECLARE_ALIGNED(16, float, phi_fract)[2][50][2] = {\n");

View File

@@ -28,7 +28,6 @@
#if CONFIG_HARDCODED_TABLES
#define ps_tableinit()
#define TABLE_CONST const
#include "libavcodec/aacps_tables.h"
#else
#include "libavutil/common.h"
@@ -38,7 +37,6 @@
#define NR_ALLPASS_BANDS20 30
#define NR_ALLPASS_BANDS34 50
#define PS_AP_LINKS 3
#define TABLE_CONST
static float pd_re_smooth[8*8*8];
static float pd_im_smooth[8*8*8];
static float HA[46][8][4];
@@ -47,7 +45,7 @@ static DECLARE_ALIGNED(16, float, f20_0_8) [ 8][8][2];
static DECLARE_ALIGNED(16, float, f34_0_12)[12][8][2];
static DECLARE_ALIGNED(16, float, f34_1_8) [ 8][8][2];
static DECLARE_ALIGNED(16, float, f34_2_4) [ 4][8][2];
static TABLE_CONST DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2];
static DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2];
static DECLARE_ALIGNED(16, float, phi_fract)[2][50][2];
static const float g0_Q8[] = {

View File

@@ -93,7 +93,7 @@ static void ps_hybrid_synthesis_deint_c(float out[2][38][64],
static void ps_decorrelate_c(float (*out)[2], float (*delay)[2],
float (*ap_delay)[PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2],
const float phi_fract[2], const float (*Q_fract)[2],
const float phi_fract[2], float (*Q_fract)[2],
const float *transient_gain,
float g_decay_slope,
int len)

View File

@@ -38,7 +38,7 @@ typedef struct PSDSPContext {
int i, int len);
void (*decorrelate)(float (*out)[2], float (*delay)[2],
float (*ap_delay)[PS_QMF_TIME_SLOTS+PS_MAX_AP_DELAY][2],
const float phi_fract[2], const float (*Q_fract)[2],
const float phi_fract[2], float (*Q_fract)[2],
const float *transient_gain,
float g_decay_slope,
int len);

View File

@@ -932,7 +932,6 @@ static void read_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
} else {
#if 1
*num_bits_left -= ff_ps_read_data(ac->avctx, gb, &sbr->ps, *num_bits_left);
ac->avctx->profile = FF_PROFILE_AAC_HE_V2;
#else
avpriv_report_missing_feature(ac->avctx, "Parametric Stereo");
skip_bits_long(gb, *num_bits_left); // bs_fill_bits
@@ -1702,18 +1701,12 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
sbr_qmf_analysis(&ac->fdsp, &sbr->mdct_ana, &sbr->dsp, ch ? R : L, sbr->data[ch].analysis_filterbank_samples,
(float*)sbr->qmf_filter_scratch,
sbr->data[ch].W, sbr->data[ch].Ypos);
sbr->c.sbr_lf_gen(ac, sbr, sbr->X_low,
(const float (*)[32][32][2]) sbr->data[ch].W,
sbr->data[ch].Ypos);
sbr->c.sbr_lf_gen(ac, sbr, sbr->X_low, sbr->data[ch].W, sbr->data[ch].Ypos);
sbr->data[ch].Ypos ^= 1;
if (sbr->start) {
sbr->c.sbr_hf_inverse_filter(&sbr->dsp, sbr->alpha0, sbr->alpha1,
(const float (*)[40][2]) sbr->X_low, sbr->k[0]);
sbr->c.sbr_hf_inverse_filter(&sbr->dsp, sbr->alpha0, sbr->alpha1, sbr->X_low, sbr->k[0]);
sbr_chirp(sbr, &sbr->data[ch]);
sbr_hf_gen(ac, sbr, sbr->X_high,
(const float (*)[40][2]) sbr->X_low,
(const float (*)[2]) sbr->alpha0,
(const float (*)[2]) sbr->alpha1,
sbr_hf_gen(ac, sbr, sbr->X_high, sbr->X_low, sbr->alpha0, sbr->alpha1,
sbr->data[ch].bw_array, sbr->data[ch].t_env,
sbr->data[ch].bs_num_env);
@@ -1723,17 +1716,16 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
sbr_env_estimate(sbr->e_curr, sbr->X_high, sbr, &sbr->data[ch]);
sbr_gain_calc(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a);
sbr->c.sbr_hf_assemble(sbr->data[ch].Y[sbr->data[ch].Ypos],
(const float (*)[40][2]) sbr->X_high,
sbr, &sbr->data[ch],
sbr->X_high, sbr, &sbr->data[ch],
sbr->data[ch].e_a);
}
}
/* synthesis */
sbr->c.sbr_x_gen(sbr, sbr->X[ch],
(const float (*)[64][2]) sbr->data[ch].Y[1-sbr->data[ch].Ypos],
(const float (*)[64][2]) sbr->data[ch].Y[ sbr->data[ch].Ypos],
(const float (*)[40][2]) sbr->X_low, ch);
sbr->data[ch].Y[1-sbr->data[ch].Ypos],
sbr->data[ch].Y[ sbr->data[ch].Ypos],
sbr->X_low, ch);
}
if (ac->oc[1].m4ac.ps == 1) {

View File

@@ -34,6 +34,7 @@
#include <stdint.h>
DECLARE_ALIGNED(32, float, ff_aac_kbd_long_1024)[1024];
DECLARE_ALIGNED(32, float, ff_aac_kbd_long_512 )[512];
DECLARE_ALIGNED(32, float, ff_aac_kbd_short_128)[128];
const uint8_t ff_aac_num_swb_1024[] = {
@@ -1236,10 +1237,6 @@ const uint8_t ff_tns_max_bands_1024[] = {
31, 31, 34, 40, 42, 51, 46, 46, 42, 42, 42, 39, 39
};
const uint8_t ff_tns_max_bands_512[] = {
0, 0, 0, 31, 32, 37, 31, 31, 0, 0, 0, 0, 0
};
const uint8_t ff_tns_max_bands_128[] = {
9, 9, 10, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14
};

View File

@@ -45,6 +45,7 @@
* @{
*/
DECLARE_ALIGNED(32, extern float, ff_aac_kbd_long_1024)[1024];
DECLARE_ALIGNED(32, extern float, ff_aac_kbd_long_512 )[512];
DECLARE_ALIGNED(32, extern float, ff_aac_kbd_short_128)[128];
const DECLARE_ALIGNED(32, extern float, ff_aac_eld_window)[1920];
// @}
@@ -75,7 +76,6 @@ extern const uint16_t * const ff_swb_offset_512 [13];
extern const uint16_t * const ff_swb_offset_128 [13];
extern const uint8_t ff_tns_max_bands_1024[13];
extern const uint8_t ff_tns_max_bands_512 [13];
extern const uint8_t ff_tns_max_bands_128 [13];
#endif /* AVCODEC_AACTAB_H */

View File

@@ -1,14 +0,0 @@
OBJS-$(CONFIG_H264CHROMA) += aarch64/h264chroma_init_aarch64.o
OBJS-$(CONFIG_H264DSP) += aarch64/h264dsp_init_aarch64.o
OBJS-$(CONFIG_H264QPEL) += aarch64/h264qpel_init_aarch64.o
OBJS-$(CONFIG_HPELDSP) += aarch64/hpeldsp_init_aarch64.o
OBJS-$(CONFIG_NEON_CLOBBER_TEST) += aarch64/neontest.o
OBJS-$(CONFIG_RV40_DECODER) += aarch64/rv40dsp_init_aarch64.o
OBJS-$(CONFIG_VC1_DECODER) += aarch64/vc1dsp_init_aarch64.o
NEON-OBJS-$(CONFIG_H264CHROMA) += aarch64/h264cmc_neon.o
NEON-OBJS-$(CONFIG_H264DSP) += aarch64/h264dsp_neon.o \
aarch64/h264idct_neon.o
NEON-OBJS-$(CONFIG_H264QPEL) += aarch64/h264qpel_neon.o \
aarch64/hpeldsp_neon.o
NEON-OBJS-$(CONFIG_HPELDSP) += aarch64/hpeldsp_neon.o

View File

@@ -1,59 +0,0 @@
/*
* ARM NEON optimised H.264 chroma functions
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/aarch64/cpu.h"
#include "libavcodec/h264chroma.h"
#include "config.h"
void ff_put_h264_chroma_mc8_neon(uint8_t *dst, uint8_t *src, int stride,
int h, int x, int y);
void ff_put_h264_chroma_mc4_neon(uint8_t *dst, uint8_t *src, int stride,
int h, int x, int y);
void ff_put_h264_chroma_mc2_neon(uint8_t *dst, uint8_t *src, int stride,
int h, int x, int y);
void ff_avg_h264_chroma_mc8_neon(uint8_t *dst, uint8_t *src, int stride,
int h, int x, int y);
void ff_avg_h264_chroma_mc4_neon(uint8_t *dst, uint8_t *src, int stride,
int h, int x, int y);
void ff_avg_h264_chroma_mc2_neon(uint8_t *dst, uint8_t *src, int stride,
int h, int x, int y);
av_cold void ff_h264chroma_init_aarch64(H264ChromaContext *c, int bit_depth)
{
const int high_bit_depth = bit_depth > 8;
int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags) && !high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_neon;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_neon;
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_neon;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_neon;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_neon;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_neon;
}
}

View File

@@ -1,402 +0,0 @@
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
* Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/aarch64/asm.S"
/* chroma_mc8(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
.macro h264_chroma_mc8 type, codec=h264
function ff_\type\()_\codec\()_chroma_mc8_neon, export=1
sxtw x2, w2
.ifc \type,avg
mov x8, x0
.endif
prfm pldl1strm, [x1]
prfm pldl1strm, [x1, x2]
.ifc \codec,rv40
movrel x6, rv40bias
lsr w9, w5, #1
lsr w10, w4, #1
lsl w9, w9, #3
lsl w10, w10, #1
add w9, w9, w10
add x6, x6, w9, UXTW
ld1r {v22.8H}, [x6]
.endif
.ifc \codec,vc1
movi v22.8H, #28
.endif
mul w7, w4, w5
lsl w14, w5, #3
lsl w13, w4, #3
cmp w7, #0
sub w6, w14, w7
sub w12, w13, w7
sub w4, w7, w13
sub w4, w4, w14
add w4, w4, #64
b.eq 2f
dup v0.8B, w4
dup v1.8B, w12
ld1 {v4.8B, v5.8B}, [x1], x2
dup v2.8B, w6
dup v3.8B, w7
ext v5.8B, v4.8B, v5.8B, #1
1: ld1 {v6.8B, v7.8B}, [x1], x2
umull v16.8H, v4.8B, v0.8B
umlal v16.8H, v5.8B, v1.8B
ext v7.8B, v6.8B, v7.8B, #1
ld1 {v4.8B, v5.8B}, [x1], x2
umlal v16.8H, v6.8B, v2.8B
prfm pldl1strm, [x1]
ext v5.8B, v4.8B, v5.8B, #1
umlal v16.8H, v7.8B, v3.8B
umull v17.8H, v6.8B, v0.8B
subs w3, w3, #2
umlal v17.8H, v7.8B, v1.8B
umlal v17.8H, v4.8B, v2.8B
umlal v17.8H, v5.8B, v3.8B
prfm pldl1strm, [x1, x2]
.ifc \codec,h264
rshrn v16.8B, v16.8H, #6
rshrn v17.8B, v17.8H, #6
.else
add v16.8H, v16.8H, v22.8H
add v17.8H, v17.8H, v22.8H
shrn v16.8B, v16.8H, #6
shrn v17.8B, v17.8H, #6
.endif
.ifc \type,avg
ld1 {v20.8B}, [x8], x2
ld1 {v21.8B}, [x8], x2
urhadd v16.8B, v16.8B, v20.8B
urhadd v17.8B, v17.8B, v21.8B
.endif
st1 {v16.8B}, [x0], x2
st1 {v17.8B}, [x0], x2
b.gt 1b
ret
2: tst w6, w6
add w12, w12, w6
dup v0.8B, w4
dup v1.8B, w12
b.eq 4f
ld1 {v4.8B}, [x1], x2
3: ld1 {v6.8B}, [x1], x2
umull v16.8H, v4.8B, v0.8B
umlal v16.8H, v6.8B, v1.8B
ld1 {v4.8B}, [x1], x2
umull v17.8H, v6.8B, v0.8B
umlal v17.8H, v4.8B, v1.8B
prfm pldl1strm, [x1]
.ifc \codec,h264
rshrn v16.8B, v16.8H, #6
rshrn v17.8B, v17.8H, #6
.else
add v16.8H, v16.8H, v22.8H
add v17.8H, v17.8H, v22.8H
shrn v16.8B, v16.8H, #6
shrn v17.8B, v17.8H, #6
.endif
prfm pldl1strm, [x1, x2]
.ifc \type,avg
ld1 {v20.8B}, [x8], x2
ld1 {v21.8B}, [x8], x2
urhadd v16.8B, v16.8B, v20.8B
urhadd v17.8B, v17.8B, v21.8B
.endif
subs w3, w3, #2
st1 {v16.8B}, [x0], x2
st1 {v17.8B}, [x0], x2
b.gt 3b
ret
4: ld1 {v4.8B, v5.8B}, [x1], x2
ld1 {v6.8B, v7.8B}, [x1], x2
ext v5.8B, v4.8B, v5.8B, #1
ext v7.8B, v6.8B, v7.8B, #1
prfm pldl1strm, [x1]
subs w3, w3, #2
umull v16.8H, v4.8B, v0.8B
umlal v16.8H, v5.8B, v1.8B
umull v17.8H, v6.8B, v0.8B
umlal v17.8H, v7.8B, v1.8B
prfm pldl1strm, [x1, x2]
.ifc \codec,h264
rshrn v16.8B, v16.8H, #6
rshrn v17.8B, v17.8H, #6
.else
add v16.8H, v16.8H, v22.8H
add v17.8H, v17.8H, v22.8H
shrn v16.8B, v16.8H, #6
shrn v17.8B, v17.8H, #6
.endif
.ifc \type,avg
ld1 {v20.8B}, [x8], x2
ld1 {v21.8B}, [x8], x2
urhadd v16.8B, v16.8B, v20.8B
urhadd v17.8B, v17.8B, v21.8B
.endif
st1 {v16.8B}, [x0], x2
st1 {v17.8B}, [x0], x2
b.gt 4b
ret
endfunc
.endm
/* chroma_mc4(uint8_t *dst, uint8_t *src, int stride, int h, int x, int y) */
.macro h264_chroma_mc4 type, codec=h264
function ff_\type\()_\codec\()_chroma_mc4_neon, export=1
sxtw x2, w2
.ifc \type,avg
mov x8, x0
.endif
prfm pldl1strm, [x1]
prfm pldl1strm, [x1, x2]
.ifc \codec,rv40
movrel x6, rv40bias
lsr w9, w5, #1
lsr w10, w4, #1
lsl w9, w9, #3
lsl w10, w10, #1
add w9, w9, w10
add x6, x6, w9, UXTW
ld1r {v22.8H}, [x6]
.endif
.ifc \codec,vc1
movi v22.8H, #28
.endif
mul w7, w4, w5
lsl w14, w5, #3
lsl w13, w4, #3
cmp w7, #0
sub w6, w14, w7
sub w12, w13, w7
sub w4, w7, w13
sub w4, w4, w14
add w4, w4, #64
b.eq 2f
dup v24.8B, w4
dup v25.8B, w12
ld1 {v4.8B}, [x1], x2
dup v26.8B, w6
dup v27.8B, w7
ext v5.8B, v4.8B, v5.8B, #1
trn1 v0.2S, v24.2S, v25.2S
trn1 v2.2S, v26.2S, v27.2S
trn1 v4.2S, v4.2S, v5.2S
1: ld1 {v6.8B}, [x1], x2
ext v7.8B, v6.8B, v7.8B, #1
trn1 v6.2S, v6.2S, v7.2S
umull v18.8H, v4.8B, v0.8B
umlal v18.8H, v6.8B, v2.8B
ld1 {v4.8B}, [x1], x2
ext v5.8B, v4.8B, v5.8B, #1
trn1 v4.2S, v4.2S, v5.2S
prfm pldl1strm, [x1]
umull v19.8H, v6.8B, v0.8B
umlal v19.8H, v4.8B, v2.8B
trn1 v30.2D, v18.2D, v19.2D
trn2 v31.2D, v18.2D, v19.2D
add v18.8H, v30.8H, v31.8H
.ifc \codec,h264
rshrn v16.8B, v18.8H, #6
.else
add v18.8H, v18.8H, v22.8H
shrn v16.8B, v18.8H, #6
.endif
subs w3, w3, #2
prfm pldl1strm, [x1, x2]
.ifc \type,avg
ld1 {v20.S}[0], [x8], x2
ld1 {v20.S}[1], [x8], x2
urhadd v16.8B, v16.8B, v20.8B
.endif
st1 {v16.S}[0], [x0], x2
st1 {v16.S}[1], [x0], x2
b.gt 1b
ret
2: tst w6, w6
add w12, w12, w6
dup v30.8B, w4
dup v31.8B, w12
trn1 v0.2S, v30.2S, v31.2S
trn2 v1.2S, v30.2S, v31.2S
b.eq 4f
ext v1.8B, v0.8B, v1.8B, #4
ld1 {v4.S}[0], [x1], x2
3: ld1 {v4.S}[1], [x1], x2
umull v18.8H, v4.8B, v0.8B
ld1 {v4.S}[0], [x1], x2
umull v19.8H, v4.8B, v1.8B
trn1 v30.2D, v18.2D, v19.2D
trn2 v31.2D, v18.2D, v19.2D
add v18.8H, v30.8H, v31.8H
prfm pldl1strm, [x1]
.ifc \codec,h264
rshrn v16.8B, v18.8H, #6
.else
add v18.8H, v18.8H, v22.8H
shrn v16.8B, v18.8H, #6
.endif
.ifc \type,avg
ld1 {v20.S}[0], [x8], x2
ld1 {v20.S}[1], [x8], x2
urhadd v16.8B, v16.8B, v20.8B
.endif
subs w3, w3, #2
prfm pldl1strm, [x1, x2]
st1 {v16.S}[0], [x0], x2
st1 {v16.S}[1], [x0], x2
b.gt 3b
ret
4: ld1 {v4.8B}, [x1], x2
ld1 {v6.8B}, [x1], x2
ext v5.8B, v4.8B, v5.8B, #1
ext v7.8B, v6.8B, v7.8B, #1
trn1 v4.2S, v4.2S, v5.2S
trn1 v6.2S, v6.2S, v7.2S
umull v18.8H, v4.8B, v0.8B
umull v19.8H, v6.8B, v0.8B
subs w3, w3, #2
trn1 v30.2D, v18.2D, v19.2D
trn2 v31.2D, v18.2D, v19.2D
add v18.8H, v30.8H, v31.8H
prfm pldl1strm, [x1]
.ifc \codec,h264
rshrn v16.8B, v18.8H, #6
.else
add v18.8H, v18.8H, v22.8H
shrn v16.8B, v18.8H, #6
.endif
.ifc \type,avg
ld1 {v20.S}[0], [x8], x2
ld1 {v20.S}[1], [x8], x2
urhadd v16.8B, v16.8B, v20.8B
.endif
prfm pldl1strm, [x1]
st1 {v16.S}[0], [x0], x2
st1 {v16.S}[1], [x0], x2
b.gt 4b
ret
endfunc
.endm
.macro h264_chroma_mc2 type
function ff_\type\()_h264_chroma_mc2_neon, export=1
sxtw x2, w2
prfm pldl1strm, [x1]
prfm pldl1strm, [x1, x2]
orr w7, w4, w5
cbz w7, 2f
mul w7, w4, w5
lsl w14, w5, #3
lsl w13, w4, #3
sub w6, w14, w7
sub w12, w13, w7
sub w4, w7, w13
sub w4, w4, w14
add w4, w4, #64
dup v0.8B, w4
dup v2.8B, w12
dup v1.8B, w6
dup v3.8B, w7
trn1 v0.4H, v0.4H, v2.4H
trn1 v1.4H, v1.4H, v3.4H
1:
ld1 {v4.S}[0], [x1], x2
ld1 {v4.S}[1], [x1], x2
rev64 v5.2S, v4.2S
ld1 {v5.S}[1], [x1]
ext v6.8B, v4.8B, v5.8B, #1
ext v7.8B, v5.8B, v4.8B, #1
trn1 v4.4H, v4.4H, v6.4H
trn1 v5.4H, v5.4H, v7.4H
umull v16.8H, v4.8B, v0.8B
umlal v16.8H, v5.8B, v1.8B
.ifc \type,avg
ld1 {v18.H}[0], [x0], x2
ld1 {v18.H}[2], [x0]
sub x0, x0, x2
.endif
rev64 v17.4S, v16.4S
add v16.8H, v16.8H, v17.8H
rshrn v16.8B, v16.8H, #6
.ifc \type,avg
urhadd v16.8B, v16.8B, v18.8B
.endif
st1 {v16.H}[0], [x0], x2
st1 {v16.H}[2], [x0], x2
subs w3, w3, #2
b.gt 1b
ret
2:
ld1 {v16.H}[0], [x1], x2
ld1 {v16.H}[1], [x1], x2
.ifc \type,avg
ld1 {v18.H}[0], [x0], x2
ld1 {v18.H}[1], [x0]
sub x0, x0, x2
urhadd v16.8B, v16.8B, v18.8B
.endif
st1 {v16.H}[0], [x0], x2
st1 {v16.H}[1], [x0], x2
subs w3, w3, #2
b.gt 2b
ret
endfunc
.endm
h264_chroma_mc8 put
h264_chroma_mc8 avg
h264_chroma_mc4 put
h264_chroma_mc4 avg
h264_chroma_mc2 put
h264_chroma_mc2 avg
#if CONFIG_RV40_DECODER
const rv40bias
.short 0, 16, 32, 16
.short 32, 28, 32, 28
.short 0, 32, 16, 32
.short 32, 28, 32, 28
endconst
h264_chroma_mc8 put, rv40
h264_chroma_mc8 avg, rv40
h264_chroma_mc4 put, rv40
h264_chroma_mc4 avg, rv40
#endif
#if CONFIG_VC1_DECODER
h264_chroma_mc8 put, vc1
h264_chroma_mc8 avg, vc1
h264_chroma_mc4 put, vc1
h264_chroma_mc4 avg, vc1
#endif

View File

@@ -1,101 +0,0 @@
/*
* Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/aarch64/cpu.h"
#include "libavcodec/h264dsp.h"
void ff_h264_v_loop_filter_luma_neon(uint8_t *pix, int stride, int alpha,
int beta, int8_t *tc0);
void ff_h264_h_loop_filter_luma_neon(uint8_t *pix, int stride, int alpha,
int beta, int8_t *tc0);
void ff_h264_v_loop_filter_chroma_neon(uint8_t *pix, int stride, int alpha,
int beta, int8_t *tc0);
void ff_h264_h_loop_filter_chroma_neon(uint8_t *pix, int stride, int alpha,
int beta, int8_t *tc0);
void ff_weight_h264_pixels_16_neon(uint8_t *dst, int stride, int height,
int log2_den, int weight, int offset);
void ff_weight_h264_pixels_8_neon(uint8_t *dst, int stride, int height,
int log2_den, int weight, int offset);
void ff_weight_h264_pixels_4_neon(uint8_t *dst, int stride, int height,
int log2_den, int weight, int offset);
void ff_biweight_h264_pixels_16_neon(uint8_t *dst, uint8_t *src, int stride,
int height, int log2_den, int weightd,
int weights, int offset);
void ff_biweight_h264_pixels_8_neon(uint8_t *dst, uint8_t *src, int stride,
int height, int log2_den, int weightd,
int weights, int offset);
void ff_biweight_h264_pixels_4_neon(uint8_t *dst, uint8_t *src, int stride,
int height, int log2_den, int weightd,
int weights, int offset);
void ff_h264_idct_add_neon(uint8_t *dst, int16_t *block, int stride);
void ff_h264_idct_dc_add_neon(uint8_t *dst, int16_t *block, int stride);
void ff_h264_idct_add16_neon(uint8_t *dst, const int *block_offset,
int16_t *block, int stride,
const uint8_t nnzc[6*8]);
void ff_h264_idct_add16intra_neon(uint8_t *dst, const int *block_offset,
int16_t *block, int stride,
const uint8_t nnzc[6*8]);
void ff_h264_idct_add8_neon(uint8_t **dest, const int *block_offset,
int16_t *block, int stride,
const uint8_t nnzc[6*8]);
void ff_h264_idct8_add_neon(uint8_t *dst, int16_t *block, int stride);
void ff_h264_idct8_dc_add_neon(uint8_t *dst, int16_t *block, int stride);
void ff_h264_idct8_add4_neon(uint8_t *dst, const int *block_offset,
int16_t *block, int stride,
const uint8_t nnzc[6*8]);
av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc)
{
int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags) && bit_depth == 8) {
c->h264_v_loop_filter_luma = ff_h264_v_loop_filter_luma_neon;
c->h264_h_loop_filter_luma = ff_h264_h_loop_filter_luma_neon;
c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon;
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon;
c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels_16_neon;
c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels_8_neon;
c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels_4_neon;
c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels_16_neon;
c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels_8_neon;
c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels_4_neon;
c->h264_idct_add = ff_h264_idct_add_neon;
c->h264_idct_dc_add = ff_h264_idct_dc_add_neon;
c->h264_idct_add16 = ff_h264_idct_add16_neon;
c->h264_idct_add16intra = ff_h264_idct_add16intra_neon;
if (chroma_format_idc <= 1)
c->h264_idct_add8 = ff_h264_idct_add8_neon;
c->h264_idct8_add = ff_h264_idct8_add_neon;
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_neon;
c->h264_idct8_add4 = ff_h264_idct8_add4_neon;
}
}

View File

@@ -1,498 +0,0 @@
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
* Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/aarch64/asm.S"
#include "neon.S"
.macro h264_loop_filter_start
cmp w2, #0
ldr w6, [x4]
ccmp w3, #0, #0, ne
mov v24.S[0], w6
and w6, w6, w6, lsl #16
b.eq 1f
ands w6, w6, w6, lsl #8
b.ge 2f
1:
ret
2:
.endm
.macro h264_loop_filter_luma
dup v22.16B, w2 // alpha
uxtl v24.8H, v24.8B
uabd v21.16B, v16.16B, v0.16B // abs(p0 - q0)
uxtl v24.4S, v24.4H
uabd v28.16B, v18.16B, v16.16B // abs(p1 - p0)
sli v24.8H, v24.8H, #8
uabd v30.16B, v2.16B, v0.16B // abs(q1 - q0)
sli v24.4S, v24.4S, #16
cmhi v21.16B, v22.16B, v21.16B // < alpha
dup v22.16B, w3 // beta
cmlt v23.16B, v24.16B, #0
cmhi v28.16B, v22.16B, v28.16B // < beta
cmhi v30.16B, v22.16B, v30.16B // < beta
bic v21.16B, v21.16B, v23.16B
uabd v17.16B, v20.16B, v16.16B // abs(p2 - p0)
and v21.16B, v21.16B, v28.16B
uabd v19.16B, v4.16B, v0.16B // abs(q2 - q0)
cmhi v17.16B, v22.16B, v17.16B // < beta
and v21.16B, v21.16B, v30.16B
cmhi v19.16B, v22.16B, v19.16B // < beta
and v17.16B, v17.16B, v21.16B
and v19.16B, v19.16B, v21.16B
and v24.16B, v24.16B, v21.16B
urhadd v28.16B, v16.16B, v0.16B
sub v21.16B, v24.16B, v17.16B
uqadd v23.16B, v18.16B, v24.16B
uhadd v20.16B, v20.16B, v28.16B
sub v21.16B, v21.16B, v19.16B
uhadd v28.16B, v4.16B, v28.16B
umin v23.16B, v23.16B, v20.16B
uqsub v22.16B, v18.16B, v24.16B
uqadd v4.16B, v2.16B, v24.16B
umax v23.16B, v23.16B, v22.16B
uqsub v22.16B, v2.16B, v24.16B
umin v28.16B, v4.16B, v28.16B
uxtl v4.8H, v0.8B
umax v28.16B, v28.16B, v22.16B
uxtl2 v20.8H, v0.16B
usubw v4.8H, v4.8H, v16.8B
usubw2 v20.8H, v20.8H, v16.16B
shl v4.8H, v4.8H, #2
shl v20.8H, v20.8H, #2
uaddw v4.8H, v4.8H, v18.8B
uaddw2 v20.8H, v20.8H, v18.16B
usubw v4.8H, v4.8H, v2.8B
usubw2 v20.8H, v20.8H, v2.16B
rshrn v4.8B, v4.8H, #3
rshrn2 v4.16B, v20.8H, #3
bsl v17.16B, v23.16B, v18.16B
bsl v19.16B, v28.16B, v2.16B
neg v23.16B, v21.16B
uxtl v28.8H, v16.8B
smin v4.16B, v4.16B, v21.16B
uxtl2 v21.8H, v16.16B
smax v4.16B, v4.16B, v23.16B
uxtl v22.8H, v0.8B
uxtl2 v24.8H, v0.16B
saddw v28.8H, v28.8H, v4.8B
saddw2 v21.8H, v21.8H, v4.16B
ssubw v22.8H, v22.8H, v4.8B
ssubw2 v24.8H, v24.8H, v4.16B
sqxtun v16.8B, v28.8H
sqxtun2 v16.16B, v21.8H
sqxtun v0.8B, v22.8H
sqxtun2 v0.16B, v24.8H
.endm
function ff_h264_v_loop_filter_luma_neon, export=1
h264_loop_filter_start
sxtw x1, w1
ld1 {v0.16B}, [x0], x1
ld1 {v2.16B}, [x0], x1
ld1 {v4.16B}, [x0], x1
sub x0, x0, x1, lsl #2
sub x0, x0, x1, lsl #1
ld1 {v20.16B}, [x0], x1
ld1 {v18.16B}, [x0], x1
ld1 {v16.16B}, [x0], x1
h264_loop_filter_luma
sub x0, x0, x1, lsl #1
st1 {v17.16B}, [x0], x1
st1 {v16.16B}, [x0], x1
st1 {v0.16B}, [x0], x1
st1 {v19.16B}, [x0]
ret
endfunc
function ff_h264_h_loop_filter_luma_neon, export=1
h264_loop_filter_start
sub x0, x0, #4
ld1 {v6.8B}, [x0], x1
ld1 {v20.8B}, [x0], x1
ld1 {v18.8B}, [x0], x1
ld1 {v16.8B}, [x0], x1
ld1 {v0.8B}, [x0], x1
ld1 {v2.8B}, [x0], x1
ld1 {v4.8B}, [x0], x1
ld1 {v26.8B}, [x0], x1
ld1 {v6.D}[1], [x0], x1
ld1 {v20.D}[1], [x0], x1
ld1 {v18.D}[1], [x0], x1
ld1 {v16.D}[1], [x0], x1
ld1 {v0.D}[1], [x0], x1
ld1 {v2.D}[1], [x0], x1
ld1 {v4.D}[1], [x0], x1
ld1 {v26.D}[1], [x0], x1
transpose_8x16B v6, v20, v18, v16, v0, v2, v4, v26, v21, v23
h264_loop_filter_luma
transpose_4x16B v17, v16, v0, v19, v21, v23, v25, v27
sub x0, x0, x1, lsl #4
add x0, x0, #2
st1 {v17.S}[0], [x0], x1
st1 {v16.S}[0], [x0], x1
st1 {v0.S}[0], [x0], x1
st1 {v19.S}[0], [x0], x1
st1 {v17.S}[1], [x0], x1
st1 {v16.S}[1], [x0], x1
st1 {v0.S}[1], [x0], x1
st1 {v19.S}[1], [x0], x1
st1 {v17.S}[2], [x0], x1
st1 {v16.S}[2], [x0], x1
st1 {v0.S}[2], [x0], x1
st1 {v19.S}[2], [x0], x1
st1 {v17.S}[3], [x0], x1
st1 {v16.S}[3], [x0], x1
st1 {v0.S}[3], [x0], x1
st1 {v19.S}[3], [x0], x1
ret
endfunc
.macro h264_loop_filter_chroma
dup v22.8B, w2 // alpha
uxtl v24.8H, v24.8B
uabd v26.8B, v16.8B, v0.8B // abs(p0 - q0)
uxtl v4.8H, v0.8B
uabd v28.8B, v18.8B, v16.8B // abs(p1 - p0)
usubw v4.8H, v4.8H, v16.8B
sli v24.8H, v24.8H, #8
shl v4.8H, v4.8H, #2
uabd v30.8B, v2.8B, v0.8B // abs(q1 - q0)
uaddw v4.8H, v4.8H, v18.8B
cmhi v26.8B, v22.8B, v26.8B // < alpha
usubw v4.8H, v4.8H, v2.8B
dup v22.8B, w3 // beta
rshrn v4.8B, v4.8H, #3
cmhi v28.8B, v22.8B, v28.8B // < beta
cmhi v30.8B, v22.8B, v30.8B // < beta
smin v4.8B, v4.8B, v24.8B
neg v25.8B, v24.8B
and v26.8B, v26.8B, v28.8B
smax v4.8B, v4.8B, v25.8B
and v26.8B, v26.8B, v30.8B
uxtl v22.8H, v0.8B
and v4.8B, v4.8B, v26.8B
uxtl v28.8H, v16.8B
saddw v28.8H, v28.8H, v4.8B
ssubw v22.8H, v22.8H, v4.8B
sqxtun v16.8B, v28.8H
sqxtun v0.8B, v22.8H
.endm
function ff_h264_v_loop_filter_chroma_neon, export=1
h264_loop_filter_start
sub x0, x0, x1, lsl #1
ld1 {v18.8B}, [x0], x1
ld1 {v16.8B}, [x0], x1
ld1 {v0.8B}, [x0], x1
ld1 {v2.8B}, [x0]
h264_loop_filter_chroma
sub x0, x0, x1, lsl #1
st1 {v16.8B}, [x0], x1
st1 {v0.8B}, [x0], x1
ret
endfunc
function ff_h264_h_loop_filter_chroma_neon, export=1
h264_loop_filter_start
sub x0, x0, #2
ld1 {v18.S}[0], [x0], x1
ld1 {v16.S}[0], [x0], x1
ld1 {v0.S}[0], [x0], x1
ld1 {v2.S}[0], [x0], x1
ld1 {v18.S}[1], [x0], x1
ld1 {v16.S}[1], [x0], x1
ld1 {v0.S}[1], [x0], x1
ld1 {v2.S}[1], [x0], x1
transpose_4x8B v18, v16, v0, v2, v28, v29, v30, v31
h264_loop_filter_chroma
transpose_4x8B v18, v16, v0, v2, v28, v29, v30, v31
sub x0, x0, x1, lsl #3
st1 {v18.S}[0], [x0], x1
st1 {v16.S}[0], [x0], x1
st1 {v0.S}[0], [x0], x1
st1 {v2.S}[0], [x0], x1
st1 {v18.S}[1], [x0], x1
st1 {v16.S}[1], [x0], x1
st1 {v0.S}[1], [x0], x1
st1 {v2.S}[1], [x0], x1
ret
endfunc
.macro biweight_16 macs, macd
dup v0.16B, w5
dup v1.16B, w6
mov v4.16B, v16.16B
mov v6.16B, v16.16B
1: subs w3, w3, #2
ld1 {v20.16B}, [x0], x2
\macd v4.8H, v0.8B, v20.8B
\macd\()2 v6.8H, v0.16B, v20.16B
ld1 {v22.16B}, [x1], x2
\macs v4.8H, v1.8B, v22.8B
\macs\()2 v6.8H, v1.16B, v22.16B
mov v24.16B, v16.16B
ld1 {v28.16B}, [x0], x2
mov v26.16B, v16.16B
\macd v24.8H, v0.8B, v28.8B
\macd\()2 v26.8H, v0.16B, v28.16B
ld1 {v30.16B}, [x1], x2
\macs v24.8H, v1.8B, v30.8B
\macs\()2 v26.8H, v1.16B, v30.16B
sshl v4.8H, v4.8H, v18.8H
sshl v6.8H, v6.8H, v18.8H
sqxtun v4.8B, v4.8H
sqxtun2 v4.16B, v6.8H
sshl v24.8H, v24.8H, v18.8H
sshl v26.8H, v26.8H, v18.8H
sqxtun v24.8B, v24.8H
sqxtun2 v24.16B, v26.8H
mov v6.16B, v16.16B
st1 {v4.16B}, [x7], x2
mov v4.16B, v16.16B
st1 {v24.16B}, [x7], x2
b.ne 1b
ret
.endm
.macro biweight_8 macs, macd
dup v0.8B, w5
dup v1.8B, w6
mov v2.16B, v16.16B
mov v20.16B, v16.16B
1: subs w3, w3, #2
ld1 {v4.8B}, [x0], x2
\macd v2.8H, v0.8B, v4.8B
ld1 {v5.8B}, [x1], x2
\macs v2.8H, v1.8B, v5.8B
ld1 {v6.8B}, [x0], x2
\macd v20.8H, v0.8B, v6.8B
ld1 {v7.8B}, [x1], x2
\macs v20.8H, v1.8B, v7.8B
sshl v2.8H, v2.8H, v18.8H
sqxtun v2.8B, v2.8H
sshl v20.8H, v20.8H, v18.8H
sqxtun v4.8B, v20.8H
mov v20.16B, v16.16B
st1 {v2.8B}, [x7], x2
mov v2.16B, v16.16B
st1 {v4.8B}, [x7], x2
b.ne 1b
ret
.endm
.macro biweight_4 macs, macd
dup v0.8B, w5
dup v1.8B, w6
mov v2.16B, v16.16B
mov v20.16B,v16.16B
1: subs w3, w3, #4
ld1 {v4.S}[0], [x0], x2
ld1 {v4.S}[1], [x0], x2
\macd v2.8H, v0.8B, v4.8B
ld1 {v5.S}[0], [x1], x2
ld1 {v5.S}[1], [x1], x2
\macs v2.8H, v1.8B, v5.8B
b.lt 2f
ld1 {v6.S}[0], [x0], x2
ld1 {v6.S}[1], [x0], x2
\macd v20.8H, v0.8B, v6.8B
ld1 {v7.S}[0], [x1], x2
ld1 {v7.S}[1], [x1], x2
\macs v20.8H, v1.8B, v7.8B
sshl v2.8H, v2.8H, v18.8H
sqxtun v2.8B, v2.8H
sshl v20.8H, v20.8H, v18.8H
sqxtun v4.8B, v20.8H
mov v20.16B, v16.16B
st1 {v2.S}[0], [x7], x2
st1 {v2.S}[1], [x7], x2
mov v2.16B, v16.16B
st1 {v4.S}[0], [x7], x2
st1 {v4.S}[1], [x7], x2
b.ne 1b
ret
2: sshl v2.8H, v2.8H, v18.8H
sqxtun v2.8B, v2.8H
st1 {v2.S}[0], [x7], x2
st1 {v2.S}[1], [x7], x2
ret
.endm
.macro biweight_func w
function ff_biweight_h264_pixels_\w\()_neon, export=1
sxtw x2, w2
lsr w8, w5, #31
add w7, w7, #1
eor w8, w8, w6, lsr #30
orr w7, w7, #1
dup v18.8H, w4
lsl w7, w7, w4
not v18.16B, v18.16B
dup v16.8H, w7
mov x7, x0
cbz w8, 10f
subs w8, w8, #1
b.eq 20f
subs w8, w8, #1
b.eq 30f
b 40f
10: biweight_\w umlal, umlal
20: neg w5, w5
biweight_\w umlal, umlsl
30: neg w5, w5
neg w6, w6
biweight_\w umlsl, umlsl
40: neg w6, w6
biweight_\w umlsl, umlal
endfunc
.endm
biweight_func 16
biweight_func 8
biweight_func 4
.macro weight_16 add
dup v0.16B, w4
1: subs w2, w2, #2
ld1 {v20.16B}, [x0], x1
umull v4.8H, v0.8B, v20.8B
umull2 v6.8H, v0.16B, v20.16B
ld1 {v28.16B}, [x0], x1
umull v24.8H, v0.8B, v28.8B
umull2 v26.8H, v0.16B, v28.16B
\add v4.8H, v16.8H, v4.8H
srshl v4.8H, v4.8H, v18.8H
\add v6.8H, v16.8H, v6.8H
srshl v6.8H, v6.8H, v18.8H
sqxtun v4.8B, v4.8H
sqxtun2 v4.16B, v6.8H
\add v24.8H, v16.8H, v24.8H
srshl v24.8H, v24.8H, v18.8H
\add v26.8H, v16.8H, v26.8H
srshl v26.8H, v26.8H, v18.8H
sqxtun v24.8B, v24.8H
sqxtun2 v24.16B, v26.8H
st1 {v4.16B}, [x5], x1
st1 {v24.16B}, [x5], x1
b.ne 1b
ret
.endm
.macro weight_8 add
dup v0.8B, w4
1: subs w2, w2, #2
ld1 {v4.8B}, [x0], x1
umull v2.8H, v0.8B, v4.8B
ld1 {v6.8B}, [x0], x1
umull v20.8H, v0.8B, v6.8B
\add v2.8H, v16.8H, v2.8H
srshl v2.8H, v2.8H, v18.8H
sqxtun v2.8B, v2.8H
\add v20.8H, v16.8H, v20.8H
srshl v20.8H, v20.8H, v18.8H
sqxtun v4.8B, v20.8H
st1 {v2.8B}, [x5], x1
st1 {v4.8B}, [x5], x1
b.ne 1b
ret
.endm
.macro weight_4 add
dup v0.8B, w4
1: subs w2, w2, #4
ld1 {v4.S}[0], [x0], x1
ld1 {v4.S}[1], [x0], x1
umull v2.8H, v0.8B, v4.8B
b.lt 2f
ld1 {v6.S}[0], [x0], x1
ld1 {v6.S}[1], [x0], x1
umull v20.8H, v0.8B, v6.8B
\add v2.8H, v16.8H, v2.8H
srshl v2.8H, v2.8H, v18.8H
sqxtun v2.8B, v2.8H
\add v20.8H, v16.8H, v20.8H
srshl v20.8H, v20.8h, v18.8H
sqxtun v4.8B, v20.8H
st1 {v2.S}[0], [x5], x1
st1 {v2.S}[1], [x5], x1
st1 {v4.S}[0], [x5], x1
st1 {v4.S}[1], [x5], x1
b.ne 1b
ret
2: \add v2.8H, v16.8H, v2.8H
srshl v2.8H, v2.8H, v18.8H
sqxtun v2.8B, v2.8H
st1 {v2.S}[0], [x5], x1
st1 {v2.S}[1], [x5], x1
ret
.endm
.macro weight_func w
function ff_weight_h264_pixels_\w\()_neon, export=1
sxtw x1, w1
cmp w3, #1
mov w6, #1
lsl w5, w5, w3
dup v16.8H, w5
mov x5, x0
b.le 20f
sub w6, w6, w3
dup v18.8H, w6
cmp w4, #0
b.lt 10f
weight_\w shadd
10: neg w4, w4
weight_\w shsub
20: neg w6, w3
dup v18.8H, w6
cmp w4, #0
b.lt 10f
weight_\w add
10: neg w4, w4
weight_\w sub
endfunc
.endm
weight_func 16
weight_func 8
weight_func 4

View File

@@ -1,408 +0,0 @@
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
* Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/aarch64/asm.S"
#include "neon.S"
function ff_h264_idct_add_neon, export=1
ld1 {v0.4H, v1.4H, v2.4H, v3.4H}, [x1]
sxtw x2, w2
movi v30.8H, #0
add v4.4H, v0.4H, v2.4H
sshr v16.4H, v1.4H, #1
st1 {v30.8H}, [x1], #16
sshr v17.4H, v3.4H, #1
st1 {v30.8H}, [x1], #16
sub v5.4H, v0.4H, v2.4H
add v6.4H, v1.4H, v17.4H
sub v7.4H, v16.4H, v3.4H
add v0.4H, v4.4H, v6.4H
add v1.4H, v5.4H, v7.4H
sub v2.4H, v4.4H, v6.4H
sub v3.4H, v5.4H, v7.4H
transpose_4x4H v0, v1, v2, v3, v4, v5, v6, v7
add v4.4H, v0.4H, v3.4H
ld1 {v18.S}[0], [x0], x2
sshr v16.4H, v2.4H, #1
sshr v17.4H, v1.4H, #1
ld1 {v19.S}[1], [x0], x2
sub v5.4H, v0.4H, v3.4H
ld1 {v18.S}[1], [x0], x2
add v6.4H, v16.4H, v1.4H
ins v4.D[1], v5.D[0]
sub v7.4H, v2.4H, v17.4H
ld1 {v19.S}[0], [x0], x2
ins v6.D[1], v7.D[0]
sub x0, x0, x2, lsl #2
add v0.8H, v4.8H, v6.8H
sub v1.8H, v4.8H, v6.8H
srshr v0.8H, v0.8H, #6
srshr v1.8H, v1.8H, #6
uaddw v0.8H, v0.8H, v18.8B
uaddw v1.8H, v1.8H, v19.8B
sqxtun v0.8B, v0.8H
sqxtun v1.8B, v1.8H
st1 {v0.S}[0], [x0], x2
st1 {v1.S}[1], [x0], x2
st1 {v0.S}[1], [x0], x2
st1 {v1.S}[0], [x0], x2
sub x1, x1, #32
ret
endfunc
function ff_h264_idct_dc_add_neon, export=1
sxtw x2, w2
mov w3, #0
ld1r {v2.8H}, [x1]
strh w3, [x1]
srshr v2.8H, v2.8H, #6
ld1 {v0.S}[0], [x0], x2
ld1 {v0.S}[1], [x0], x2
uaddw v3.8H, v2.8H, v0.8B
ld1 {v1.S}[0], [x0], x2
ld1 {v1.S}[1], [x0], x2
uaddw v4.8H, v2.8H, v1.8B
sqxtun v0.8B, v3.8H
sqxtun v1.8B, v4.8H
sub x0, x0, x2, lsl #2
st1 {v0.S}[0], [x0], x2
st1 {v0.S}[1], [x0], x2
st1 {v1.S}[0], [x0], x2
st1 {v1.S}[1], [x0], x2
ret
endfunc
function ff_h264_idct_add16_neon, export=1
mov x12, x30
mov x6, x0 // dest
mov x5, x1 // block_offset
mov x1, x2 // block
mov w9, w3 // stride
movrel x7, scan8
mov x10, #16
movrel x13, X(ff_h264_idct_dc_add_neon)
movrel x14, X(ff_h264_idct_add_neon)
1: mov w2, w9
ldrb w3, [x7], #1
ldrsw x0, [x5], #4
ldrb w3, [x4, w3, uxtw]
subs w3, w3, #1
b.lt 2f
ldrsh w3, [x1]
add x0, x0, x6
ccmp w3, #0, #4, eq
csel x15, x13, x14, ne
blr x15
2: subs x10, x10, #1
add x1, x1, #32
b.ne 1b
ret x12
endfunc
function ff_h264_idct_add16intra_neon, export=1
mov x12, x30
mov x6, x0 // dest
mov x5, x1 // block_offset
mov x1, x2 // block
mov w9, w3 // stride
movrel x7, scan8
mov x10, #16
movrel x13, X(ff_h264_idct_dc_add_neon)
movrel x14, X(ff_h264_idct_add_neon)
1: mov w2, w9
ldrb w3, [x7], #1
ldrsw x0, [x5], #4
ldrb w3, [x4, w3, uxtw]
add x0, x0, x6
cmp w3, #0
ldrsh w3, [x1]
csel x15, x13, x14, eq
ccmp w3, #0, #0, eq
b.eq 2f
blr x15
2: subs x10, x10, #1
add x1, x1, #32
b.ne 1b
ret x12
endfunc
function ff_h264_idct_add8_neon, export=1
sub sp, sp, #0x40
stp x19, x20, [sp]
mov x12, x30
ldp x6, x15, [x0] // dest[0], dest[1]
add x5, x1, #16*4 // block_offset
add x9, x2, #16*32 // block
mov w19, w3 // stride
movrel x13, X(ff_h264_idct_dc_add_neon)
movrel x14, X(ff_h264_idct_add_neon)
movrel x7, scan8+16
mov x10, #0
mov x11, #16
1: mov w2, w19
ldrb w3, [x7, x10] // scan8[i]
ldrsw x0, [x5, x10, lsl #2] // block_offset[i]
ldrb w3, [x4, w3, uxtw] // nnzc[ scan8[i] ]
add x0, x0, x6 // block_offset[i] + dst[j-1]
add x1, x9, x10, lsl #5 // block + i * 16
cmp w3, #0
ldrsh w3, [x1] // block[i*16]
csel x20, x13, x14, eq
ccmp w3, #0, #0, eq
b.eq 2f
blr x20
2: add x10, x10, #1
cmp x10, #4
csel x10, x11, x10, eq // mov x10, #16
csel x6, x15, x6, eq
cmp x10, #20
b.lt 1b
ldp x19, x20, [sp]
add sp, sp, #0x40
ret x12
endfunc
.macro idct8x8_cols pass
.if \pass == 0
va .req v18
vb .req v30
sshr v18.8H, v26.8H, #1
add v16.8H, v24.8H, v28.8H
ld1 {v30.8H, v31.8H}, [x1]
st1 {v19.8H}, [x1], #16
st1 {v19.8H}, [x1], #16
sub v17.8H, v24.8H, v28.8H
sshr v19.8H, v30.8H, #1
sub v18.8H, v18.8H, v30.8H
add v19.8H, v19.8H, v26.8H
.else
va .req v30
vb .req v18
sshr v30.8H, v26.8H, #1
sshr v19.8H, v18.8H, #1
add v16.8H, v24.8H, v28.8H
sub v17.8H, v24.8H, v28.8H
sub v30.8H, v30.8H, v18.8H
add v19.8H, v19.8H, v26.8H
.endif
add v26.8H, v17.8H, va.8H
sub v28.8H, v17.8H, va.8H
add v24.8H, v16.8H, v19.8H
sub vb.8H, v16.8H, v19.8H
sub v16.8H, v29.8H, v27.8H
add v17.8H, v31.8H, v25.8H
sub va.8H, v31.8H, v25.8H
add v19.8H, v29.8H, v27.8H
sub v16.8H, v16.8H, v31.8H
sub v17.8H, v17.8H, v27.8H
add va.8H, va.8H, v29.8H
add v19.8H, v19.8H, v25.8H
sshr v25.8H, v25.8H, #1
sshr v27.8H, v27.8H, #1
sshr v29.8H, v29.8H, #1
sshr v31.8H, v31.8H, #1
sub v16.8H, v16.8H, v31.8H
sub v17.8H, v17.8H, v27.8H
add va.8H, va.8H, v29.8H
add v19.8H, v19.8H, v25.8H
sshr v25.8H, v16.8H, #2
sshr v27.8H, v17.8H, #2
sshr v29.8H, va.8H, #2
sshr v31.8H, v19.8H, #2
sub v19.8H, v19.8H, v25.8H
sub va.8H, v27.8H, va.8H
add v17.8H, v17.8H, v29.8H
add v16.8H, v16.8H, v31.8H
.if \pass == 0
sub v31.8H, v24.8H, v19.8H
add v24.8H, v24.8H, v19.8H
add v25.8H, v26.8H, v18.8H
sub v18.8H, v26.8H, v18.8H
add v26.8H, v28.8H, v17.8H
add v27.8H, v30.8H, v16.8H
sub v29.8H, v28.8H, v17.8H
sub v28.8H, v30.8H, v16.8H
.else
sub v31.8H, v24.8H, v19.8H
add v24.8H, v24.8H, v19.8H
add v25.8H, v26.8H, v30.8H
sub v30.8H, v26.8H, v30.8H
add v26.8H, v28.8H, v17.8H
sub v29.8H, v28.8H, v17.8H
add v27.8H, v18.8H, v16.8H
sub v28.8H, v18.8H, v16.8H
.endif
.unreq va
.unreq vb
.endm
function ff_h264_idct8_add_neon, export=1
movi v19.8H, #0
ld1 {v24.8H, v25.8H}, [x1]
st1 {v19.8H}, [x1], #16
st1 {v19.8H}, [x1], #16
ld1 {v26.8H, v27.8H}, [x1]
st1 {v19.8H}, [x1], #16
st1 {v19.8H}, [x1], #16
ld1 {v28.8H, v29.8H}, [x1]
st1 {v19.8H}, [x1], #16
st1 {v19.8H}, [x1], #16
idct8x8_cols 0
transpose_8x8H v24, v25, v26, v27, v28, v29, v18, v31, v6, v7
idct8x8_cols 1
mov x3, x0
srshr v24.8H, v24.8H, #6
ld1 {v0.8B}, [x0], x2
srshr v25.8H, v25.8H, #6
ld1 {v1.8B}, [x0], x2
srshr v26.8H, v26.8H, #6
ld1 {v2.8B}, [x0], x2
srshr v27.8H, v27.8H, #6
ld1 {v3.8B}, [x0], x2
srshr v28.8H, v28.8H, #6
ld1 {v4.8B}, [x0], x2
srshr v29.8H, v29.8H, #6
ld1 {v5.8B}, [x0], x2
srshr v30.8H, v30.8H, #6
ld1 {v6.8B}, [x0], x2
srshr v31.8H, v31.8H, #6
ld1 {v7.8B}, [x0], x2
uaddw v24.8H, v24.8H, v0.8B
uaddw v25.8H, v25.8H, v1.8B
uaddw v26.8H, v26.8H, v2.8B
sqxtun v0.8B, v24.8H
uaddw v27.8H, v27.8H, v3.8B
sqxtun v1.8B, v25.8H
uaddw v28.8H, v28.8H, v4.8B
sqxtun v2.8B, v26.8H
st1 {v0.8B}, [x3], x2
uaddw v29.8H, v29.8H, v5.8B
sqxtun v3.8B, v27.8H
st1 {v1.8B}, [x3], x2
uaddw v30.8H, v30.8H, v6.8B
sqxtun v4.8B, v28.8H
st1 {v2.8B}, [x3], x2
uaddw v31.8H, v31.8H, v7.8B
sqxtun v5.8B, v29.8H
st1 {v3.8B}, [x3], x2
sqxtun v6.8B, v30.8H
sqxtun v7.8B, v31.8H
st1 {v4.8B}, [x3], x2
st1 {v5.8B}, [x3], x2
st1 {v6.8B}, [x3], x2
st1 {v7.8B}, [x3], x2
sub x1, x1, #128
ret
endfunc
function ff_h264_idct8_dc_add_neon, export=1
mov w3, #0
sxtw x2, w2
ld1r {v31.8H}, [x1]
strh w3, [x1]
ld1 {v0.8B}, [x0], x2
srshr v31.8H, v31.8H, #6
ld1 {v1.8B}, [x0], x2
ld1 {v2.8B}, [x0], x2
uaddw v24.8H, v31.8H, v0.8B
ld1 {v3.8B}, [x0], x2
uaddw v25.8H, v31.8H, v1.8B
ld1 {v4.8B}, [x0], x2
uaddw v26.8H, v31.8H, v2.8B
ld1 {v5.8B}, [x0], x2
uaddw v27.8H, v31.8H, v3.8B
ld1 {v6.8B}, [x0], x2
uaddw v28.8H, v31.8H, v4.8B
ld1 {v7.8B}, [x0], x2
uaddw v29.8H, v31.8H, v5.8B
uaddw v30.8H, v31.8H, v6.8B
uaddw v31.8H, v31.8H, v7.8B
sqxtun v0.8B, v24.8H
sqxtun v1.8B, v25.8H
sqxtun v2.8B, v26.8H
sqxtun v3.8B, v27.8H
sub x0, x0, x2, lsl #3
st1 {v0.8B}, [x0], x2
sqxtun v4.8B, v28.8H
st1 {v1.8B}, [x0], x2
sqxtun v5.8B, v29.8H
st1 {v2.8B}, [x0], x2
sqxtun v6.8B, v30.8H
st1 {v3.8B}, [x0], x2
sqxtun v7.8B, v31.8H
st1 {v4.8B}, [x0], x2
st1 {v5.8B}, [x0], x2
st1 {v6.8B}, [x0], x2
st1 {v7.8B}, [x0], x2
ret
endfunc
function ff_h264_idct8_add4_neon, export=1
mov x12, x30
mov x6, x0
mov x5, x1
mov x1, x2
mov w2, w3
movrel x7, scan8
mov w10, #16
movrel x13, X(ff_h264_idct8_dc_add_neon)
movrel x14, X(ff_h264_idct8_add_neon)
1: ldrb w9, [x7], #4
ldrsw x0, [x5], #16
ldrb w9, [x4, w9, UXTW]
subs w9, w9, #1
b.lt 2f
ldrsh w11, [x1]
add x0, x6, x0
ccmp w11, #0, #4, eq
csel x15, x13, x14, ne
blr x15
2: subs w10, w10, #4
add x1, x1, #128
b.ne 1b
ret x12
endfunc
const scan8
.byte 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
.byte 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
.byte 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
.byte 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8
.byte 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8
.byte 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8
.byte 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8
.byte 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8
.byte 4+11*8, 5+11*8, 4+12*8, 5+12*8
.byte 6+11*8, 7+11*8, 6+12*8, 7+12*8
.byte 4+13*8, 5+13*8, 4+14*8, 5+14*8
.byte 6+13*8, 7+13*8, 6+14*8, 7+14*8
endconst

View File

@@ -1,172 +0,0 @@
/*
* ARM NEON optimised DSP functions
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/aarch64/cpu.h"
#include "libavcodec/h264qpel.h"
void ff_put_h264_qpel16_mc00_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc10_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc20_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc30_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc01_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc11_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc21_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc31_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc02_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc12_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc22_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc32_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc03_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc13_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc23_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel16_mc33_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc00_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc10_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc20_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc30_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc01_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc11_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc21_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc31_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc02_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc12_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc22_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc32_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc03_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc13_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc23_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_put_h264_qpel8_mc33_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc00_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc10_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc20_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc30_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc01_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc11_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc21_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc31_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc02_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc12_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc22_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc32_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc03_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc13_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc23_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel16_mc33_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc00_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc10_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc20_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc30_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc01_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc11_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc21_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc31_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc02_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc12_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc22_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc32_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc03_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc13_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc23_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
void ff_avg_h264_qpel8_mc33_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
av_cold void ff_h264qpel_init_aarch64(H264QpelContext *c, int bit_depth)
{
const int high_bit_depth = bit_depth > 8;
int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags) && !high_bit_depth) {
c->put_h264_qpel_pixels_tab[0][ 0] = ff_put_h264_qpel16_mc00_neon;
c->put_h264_qpel_pixels_tab[0][ 1] = ff_put_h264_qpel16_mc10_neon;
c->put_h264_qpel_pixels_tab[0][ 2] = ff_put_h264_qpel16_mc20_neon;
c->put_h264_qpel_pixels_tab[0][ 3] = ff_put_h264_qpel16_mc30_neon;
c->put_h264_qpel_pixels_tab[0][ 4] = ff_put_h264_qpel16_mc01_neon;
c->put_h264_qpel_pixels_tab[0][ 5] = ff_put_h264_qpel16_mc11_neon;
c->put_h264_qpel_pixels_tab[0][ 6] = ff_put_h264_qpel16_mc21_neon;
c->put_h264_qpel_pixels_tab[0][ 7] = ff_put_h264_qpel16_mc31_neon;
c->put_h264_qpel_pixels_tab[0][ 8] = ff_put_h264_qpel16_mc02_neon;
c->put_h264_qpel_pixels_tab[0][ 9] = ff_put_h264_qpel16_mc12_neon;
c->put_h264_qpel_pixels_tab[0][10] = ff_put_h264_qpel16_mc22_neon;
c->put_h264_qpel_pixels_tab[0][11] = ff_put_h264_qpel16_mc32_neon;
c->put_h264_qpel_pixels_tab[0][12] = ff_put_h264_qpel16_mc03_neon;
c->put_h264_qpel_pixels_tab[0][13] = ff_put_h264_qpel16_mc13_neon;
c->put_h264_qpel_pixels_tab[0][14] = ff_put_h264_qpel16_mc23_neon;
c->put_h264_qpel_pixels_tab[0][15] = ff_put_h264_qpel16_mc33_neon;
c->put_h264_qpel_pixels_tab[1][ 0] = ff_put_h264_qpel8_mc00_neon;
c->put_h264_qpel_pixels_tab[1][ 1] = ff_put_h264_qpel8_mc10_neon;
c->put_h264_qpel_pixels_tab[1][ 2] = ff_put_h264_qpel8_mc20_neon;
c->put_h264_qpel_pixels_tab[1][ 3] = ff_put_h264_qpel8_mc30_neon;
c->put_h264_qpel_pixels_tab[1][ 4] = ff_put_h264_qpel8_mc01_neon;
c->put_h264_qpel_pixels_tab[1][ 5] = ff_put_h264_qpel8_mc11_neon;
c->put_h264_qpel_pixels_tab[1][ 6] = ff_put_h264_qpel8_mc21_neon;
c->put_h264_qpel_pixels_tab[1][ 7] = ff_put_h264_qpel8_mc31_neon;
c->put_h264_qpel_pixels_tab[1][ 8] = ff_put_h264_qpel8_mc02_neon;
c->put_h264_qpel_pixels_tab[1][ 9] = ff_put_h264_qpel8_mc12_neon;
c->put_h264_qpel_pixels_tab[1][10] = ff_put_h264_qpel8_mc22_neon;
c->put_h264_qpel_pixels_tab[1][11] = ff_put_h264_qpel8_mc32_neon;
c->put_h264_qpel_pixels_tab[1][12] = ff_put_h264_qpel8_mc03_neon;
c->put_h264_qpel_pixels_tab[1][13] = ff_put_h264_qpel8_mc13_neon;
c->put_h264_qpel_pixels_tab[1][14] = ff_put_h264_qpel8_mc23_neon;
c->put_h264_qpel_pixels_tab[1][15] = ff_put_h264_qpel8_mc33_neon;
c->avg_h264_qpel_pixels_tab[0][ 0] = ff_avg_h264_qpel16_mc00_neon;
c->avg_h264_qpel_pixels_tab[0][ 1] = ff_avg_h264_qpel16_mc10_neon;
c->avg_h264_qpel_pixels_tab[0][ 2] = ff_avg_h264_qpel16_mc20_neon;
c->avg_h264_qpel_pixels_tab[0][ 3] = ff_avg_h264_qpel16_mc30_neon;
c->avg_h264_qpel_pixels_tab[0][ 4] = ff_avg_h264_qpel16_mc01_neon;
c->avg_h264_qpel_pixels_tab[0][ 5] = ff_avg_h264_qpel16_mc11_neon;
c->avg_h264_qpel_pixels_tab[0][ 6] = ff_avg_h264_qpel16_mc21_neon;
c->avg_h264_qpel_pixels_tab[0][ 7] = ff_avg_h264_qpel16_mc31_neon;
c->avg_h264_qpel_pixels_tab[0][ 8] = ff_avg_h264_qpel16_mc02_neon;
c->avg_h264_qpel_pixels_tab[0][ 9] = ff_avg_h264_qpel16_mc12_neon;
c->avg_h264_qpel_pixels_tab[0][10] = ff_avg_h264_qpel16_mc22_neon;
c->avg_h264_qpel_pixels_tab[0][11] = ff_avg_h264_qpel16_mc32_neon;
c->avg_h264_qpel_pixels_tab[0][12] = ff_avg_h264_qpel16_mc03_neon;
c->avg_h264_qpel_pixels_tab[0][13] = ff_avg_h264_qpel16_mc13_neon;
c->avg_h264_qpel_pixels_tab[0][14] = ff_avg_h264_qpel16_mc23_neon;
c->avg_h264_qpel_pixels_tab[0][15] = ff_avg_h264_qpel16_mc33_neon;
c->avg_h264_qpel_pixels_tab[1][ 0] = ff_avg_h264_qpel8_mc00_neon;
c->avg_h264_qpel_pixels_tab[1][ 1] = ff_avg_h264_qpel8_mc10_neon;
c->avg_h264_qpel_pixels_tab[1][ 2] = ff_avg_h264_qpel8_mc20_neon;
c->avg_h264_qpel_pixels_tab[1][ 3] = ff_avg_h264_qpel8_mc30_neon;
c->avg_h264_qpel_pixels_tab[1][ 4] = ff_avg_h264_qpel8_mc01_neon;
c->avg_h264_qpel_pixels_tab[1][ 5] = ff_avg_h264_qpel8_mc11_neon;
c->avg_h264_qpel_pixels_tab[1][ 6] = ff_avg_h264_qpel8_mc21_neon;
c->avg_h264_qpel_pixels_tab[1][ 7] = ff_avg_h264_qpel8_mc31_neon;
c->avg_h264_qpel_pixels_tab[1][ 8] = ff_avg_h264_qpel8_mc02_neon;
c->avg_h264_qpel_pixels_tab[1][ 9] = ff_avg_h264_qpel8_mc12_neon;
c->avg_h264_qpel_pixels_tab[1][10] = ff_avg_h264_qpel8_mc22_neon;
c->avg_h264_qpel_pixels_tab[1][11] = ff_avg_h264_qpel8_mc32_neon;
c->avg_h264_qpel_pixels_tab[1][12] = ff_avg_h264_qpel8_mc03_neon;
c->avg_h264_qpel_pixels_tab[1][13] = ff_avg_h264_qpel8_mc13_neon;
c->avg_h264_qpel_pixels_tab[1][14] = ff_avg_h264_qpel8_mc23_neon;
c->avg_h264_qpel_pixels_tab[1][15] = ff_avg_h264_qpel8_mc33_neon;
}
}

View File

@@ -1,934 +0,0 @@
/*
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
* Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/aarch64/asm.S"
#include "neon.S"
/* H.264 qpel MC */
.macro lowpass_const r
movz \r, #20, lsl #16
movk \r, #5
mov v6.S[0], \r
.endm
//trashes v0-v5
.macro lowpass_8 r0, r1, r2, r3, d0, d1, narrow=1
ext v2.8B, \r0\().8B, \r1\().8B, #2
ext v3.8B, \r0\().8B, \r1\().8B, #3
uaddl v2.8H, v2.8B, v3.8B
ext v4.8B, \r0\().8B, \r1\().8B, #1
ext v5.8B, \r0\().8B, \r1\().8B, #4
uaddl v4.8H, v4.8B, v5.8B
ext v1.8B, \r0\().8B, \r1\().8B, #5
uaddl \d0\().8H, \r0\().8B, v1.8B
ext v0.8B, \r2\().8B, \r3\().8B, #2
mla \d0\().8H, v2.8H, v6.H[1]
ext v1.8B, \r2\().8B, \r3\().8B, #3
uaddl v0.8H, v0.8B, v1.8B
ext v1.8B, \r2\().8B, \r3\().8B, #1
mls \d0\().8H, v4.8H, v6.H[0]
ext v3.8B, \r2\().8B, \r3\().8B, #4
uaddl v1.8H, v1.8B, v3.8B
ext v2.8B, \r2\().8B, \r3\().8B, #5
uaddl \d1\().8H, \r2\().8B, v2.8B
mla \d1\().8H, v0.8H, v6.H[1]
mls \d1\().8H, v1.8H, v6.H[0]
.if \narrow
sqrshrun \d0\().8B, \d0\().8H, #5
sqrshrun \d1\().8B, \d1\().8H, #5
.endif
.endm
//trashes v0-v5, v7, v30-v31
.macro lowpass_8H r0, r1
ext v0.16B, \r0\().16B, \r0\().16B, #2
ext v1.16B, \r0\().16B, \r0\().16B, #3
uaddl v0.8H, v0.8B, v1.8B
ext v2.16B, \r0\().16B, \r0\().16B, #1
ext v3.16B, \r0\().16B, \r0\().16B, #4
uaddl v2.8H, v2.8B, v3.8B
ext v30.16B, \r0\().16B, \r0\().16B, #5
uaddl \r0\().8H, \r0\().8B, v30.8B
ext v4.16B, \r1\().16B, \r1\().16B, #2
mla \r0\().8H, v0.8H, v6.H[1]
ext v5.16B, \r1\().16B, \r1\().16B, #3
uaddl v4.8H, v4.8B, v5.8B
ext v7.16B, \r1\().16B, \r1\().16B, #1
mls \r0\().8H, v2.8H, v6.H[0]
ext v0.16B, \r1\().16B, \r1\().16B, #4
uaddl v7.8H, v7.8B, v0.8B
ext v31.16B, \r1\().16B, \r1\().16B, #5
uaddl \r1\().8H, \r1\().8B, v31.8B
mla \r1\().8H, v4.8H, v6.H[1]
mls \r1\().8H, v7.8H, v6.H[0]
.endm
// trashes v2-v5, v30
.macro lowpass_8_1 r0, r1, d0, narrow=1
ext v2.8B, \r0\().8B, \r1\().8B, #2
ext v3.8B, \r0\().8B, \r1\().8B, #3
uaddl v2.8H, v2.8B, v3.8B
ext v4.8B, \r0\().8B, \r1\().8B, #1
ext v5.8B, \r0\().8B, \r1\().8B, #4
uaddl v4.8H, v4.8B, v5.8B
ext v30.8B, \r0\().8B, \r1\().8B, #5
uaddl \d0\().8H, \r0\().8B, v30.8B
mla \d0\().8H, v2.8H, v6.H[1]
mls \d0\().8H, v4.8H, v6.H[0]
.if \narrow
sqrshrun \d0\().8B, \d0\().8H, #5
.endif
.endm
// trashed v0-v7
.macro lowpass_8.16 r0, r1, r2
ext v1.16B, \r0\().16B, \r1\().16B, #4
ext v0.16B, \r0\().16B, \r1\().16B, #6
saddl v5.4S, v1.4H, v0.4H
ext v2.16B, \r0\().16B, \r1\().16B, #2
saddl2 v1.4S, v1.8H, v0.8H
ext v3.16B, \r0\().16B, \r1\().16B, #8
saddl v6.4S, v2.4H, v3.4H
ext \r1\().16B, \r0\().16B, \r1\().16B, #10
saddl2 v2.4S, v2.8H, v3.8H
saddl v0.4S, \r0\().4H, \r1\().4H
saddl2 v4.4S, \r0\().8H, \r1\().8H
shl v3.4S, v5.4S, #4
shl v5.4S, v5.4S, #2
shl v7.4S, v6.4S, #2
add v5.4S, v5.4S, v3.4S
add v6.4S, v6.4S, v7.4S
shl v3.4S, v1.4S, #4
shl v1.4S, v1.4S, #2
shl v7.4S, v2.4S, #2
add v1.4S, v1.4S, v3.4S
add v2.4S, v2.4S, v7.4S
add v5.4S, v5.4S, v0.4S
sub v5.4S, v5.4S, v6.4S
add v1.4S, v1.4S, v4.4S
sub v1.4S, v1.4S, v2.4S
rshrn v5.4H, v5.4S, #10
rshrn2 v5.8H, v1.4S, #10
sqxtun \r2\().8B, v5.8H
.endm
function put_h264_qpel16_h_lowpass_neon_packed
mov x4, x30
mov x12, #16
mov x3, #8
bl put_h264_qpel8_h_lowpass_neon
sub x1, x1, x2, lsl #4
add x1, x1, #8
mov x12, #16
mov x30, x4
b put_h264_qpel8_h_lowpass_neon
endfunc
.macro h264_qpel_h_lowpass type
function \type\()_h264_qpel16_h_lowpass_neon
mov x13, x30
mov x12, #16
bl \type\()_h264_qpel8_h_lowpass_neon
sub x0, x0, x3, lsl #4
sub x1, x1, x2, lsl #4
add x0, x0, #8
add x1, x1, #8
mov x12, #16
mov x30, x13
endfunc
function \type\()_h264_qpel8_h_lowpass_neon
1: ld1 {v28.8B, v29.8B}, [x1], x2
ld1 {v16.8B, v17.8B}, [x1], x2
subs x12, x12, #2
lowpass_8 v28, v29, v16, v17, v28, v16
.ifc \type,avg
ld1 {v2.8B}, [x0], x3
urhadd v28.8B, v28.8B, v2.8B
ld1 {v3.8B}, [x0]
urhadd v16.8B, v16.8B, v3.8B
sub x0, x0, x3
.endif
st1 {v28.8B}, [x0], x3
st1 {v16.8B}, [x0], x3
b.ne 1b
ret
endfunc
.endm
h264_qpel_h_lowpass put
h264_qpel_h_lowpass avg
.macro h264_qpel_h_lowpass_l2 type
function \type\()_h264_qpel16_h_lowpass_l2_neon
mov x13, x30
mov x12, #16
bl \type\()_h264_qpel8_h_lowpass_l2_neon
sub x0, x0, x2, lsl #4
sub x1, x1, x2, lsl #4
sub x3, x3, x2, lsl #4
add x0, x0, #8
add x1, x1, #8
add x3, x3, #8
mov x12, #16
mov x30, x13
endfunc
function \type\()_h264_qpel8_h_lowpass_l2_neon
1: ld1 {v26.8B, v27.8B}, [x1], x2
ld1 {v16.8B, v17.8B}, [x1], x2
ld1 {v28.8B}, [x3], x2
ld1 {v29.8B}, [x3], x2
subs x12, x12, #2
lowpass_8 v26, v27, v16, v17, v26, v27
urhadd v26.8B, v26.8B, v28.8B
urhadd v27.8B, v27.8B, v29.8B
.ifc \type,avg
ld1 {v2.8B}, [x0], x2
urhadd v26.8B, v26.8B, v2.8B
ld1 {v3.8B}, [x0]
urhadd v27.8B, v27.8B, v3.8B
sub x0, x0, x2
.endif
st1 {v26.8B}, [x0], x2
st1 {v27.8B}, [x0], x2
b.ne 1b
ret
endfunc
.endm
h264_qpel_h_lowpass_l2 put
h264_qpel_h_lowpass_l2 avg
function put_h264_qpel16_v_lowpass_neon_packed
mov x4, x30
mov x2, #8
bl put_h264_qpel8_v_lowpass_neon
sub x1, x1, x3, lsl #2
bl put_h264_qpel8_v_lowpass_neon
sub x1, x1, x3, lsl #4
sub x1, x1, x3, lsl #2
add x1, x1, #8
bl put_h264_qpel8_v_lowpass_neon
sub x1, x1, x3, lsl #2
mov x30, x4
b put_h264_qpel8_v_lowpass_neon
endfunc
.macro h264_qpel_v_lowpass type
function \type\()_h264_qpel16_v_lowpass_neon
mov x4, x30
bl \type\()_h264_qpel8_v_lowpass_neon
sub x1, x1, x3, lsl #2
bl \type\()_h264_qpel8_v_lowpass_neon
sub x0, x0, x2, lsl #4
add x0, x0, #8
sub x1, x1, x3, lsl #4
sub x1, x1, x3, lsl #2
add x1, x1, #8
bl \type\()_h264_qpel8_v_lowpass_neon
sub x1, x1, x3, lsl #2
mov x30, x4
endfunc
function \type\()_h264_qpel8_v_lowpass_neon
ld1 {v16.8B}, [x1], x3
ld1 {v18.8B}, [x1], x3
ld1 {v20.8B}, [x1], x3
ld1 {v22.8B}, [x1], x3
ld1 {v24.8B}, [x1], x3
ld1 {v26.8B}, [x1], x3
ld1 {v28.8B}, [x1], x3
ld1 {v30.8B}, [x1], x3
ld1 {v17.8B}, [x1], x3
ld1 {v19.8B}, [x1], x3
ld1 {v21.8B}, [x1], x3
ld1 {v23.8B}, [x1], x3
ld1 {v25.8B}, [x1]
transpose_8x8B v16, v18, v20, v22, v24, v26, v28, v30, v0, v1
transpose_8x8B v17, v19, v21, v23, v25, v27, v29, v31, v0, v1
lowpass_8 v16, v17, v18, v19, v16, v17
lowpass_8 v20, v21, v22, v23, v18, v19
lowpass_8 v24, v25, v26, v27, v20, v21
lowpass_8 v28, v29, v30, v31, v22, v23
transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
.ifc \type,avg
ld1 {v24.8B}, [x0], x2
urhadd v16.8B, v16.8B, v24.8B
ld1 {v25.8B}, [x0], x2
urhadd v17.8B, v17.8B, v25.8B
ld1 {v26.8B}, [x0], x2
urhadd v18.8B, v18.8B, v26.8B
ld1 {v27.8B}, [x0], x2
urhadd v19.8B, v19.8B, v27.8B
ld1 {v28.8B}, [x0], x2
urhadd v20.8B, v20.8B, v28.8B
ld1 {v29.8B}, [x0], x2
urhadd v21.8B, v21.8B, v29.8B
ld1 {v30.8B}, [x0], x2
urhadd v22.8B, v22.8B, v30.8B
ld1 {v31.8B}, [x0], x2
urhadd v23.8B, v23.8B, v31.8B
sub x0, x0, x2, lsl #3
.endif
st1 {v16.8B}, [x0], x2
st1 {v17.8B}, [x0], x2
st1 {v18.8B}, [x0], x2
st1 {v19.8B}, [x0], x2
st1 {v20.8B}, [x0], x2
st1 {v21.8B}, [x0], x2
st1 {v22.8B}, [x0], x2
st1 {v23.8B}, [x0], x2
ret
endfunc
.endm
h264_qpel_v_lowpass put
h264_qpel_v_lowpass avg
.macro h264_qpel_v_lowpass_l2 type
function \type\()_h264_qpel16_v_lowpass_l2_neon
mov x4, x30
bl \type\()_h264_qpel8_v_lowpass_l2_neon
sub x1, x1, x3, lsl #2
bl \type\()_h264_qpel8_v_lowpass_l2_neon
sub x0, x0, x3, lsl #4
sub x12, x12, x2, lsl #4
add x0, x0, #8
add x12, x12, #8
sub x1, x1, x3, lsl #4
sub x1, x1, x3, lsl #2
add x1, x1, #8
bl \type\()_h264_qpel8_v_lowpass_l2_neon
sub x1, x1, x3, lsl #2
mov x30, x4
endfunc
function \type\()_h264_qpel8_v_lowpass_l2_neon
ld1 {v16.8B}, [x1], x3
ld1 {v18.8B}, [x1], x3
ld1 {v20.8B}, [x1], x3
ld1 {v22.8B}, [x1], x3
ld1 {v24.8B}, [x1], x3
ld1 {v26.8B}, [x1], x3
ld1 {v28.8B}, [x1], x3
ld1 {v30.8B}, [x1], x3
ld1 {v17.8B}, [x1], x3
ld1 {v19.8B}, [x1], x3
ld1 {v21.8B}, [x1], x3
ld1 {v23.8B}, [x1], x3
ld1 {v25.8B}, [x1]
transpose_8x8B v16, v18, v20, v22, v24, v26, v28, v30, v0, v1
transpose_8x8B v17, v19, v21, v23, v25, v27, v29, v31, v0, v1
lowpass_8 v16, v17, v18, v19, v16, v17
lowpass_8 v20, v21, v22, v23, v18, v19
lowpass_8 v24, v25, v26, v27, v20, v21
lowpass_8 v28, v29, v30, v31, v22, v23
transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
ld1 {v24.8B}, [x12], x2
ld1 {v25.8B}, [x12], x2
ld1 {v26.8B}, [x12], x2
ld1 {v27.8B}, [x12], x2
ld1 {v28.8B}, [x12], x2
urhadd v16.8B, v24.8B, v16.8B
urhadd v17.8B, v25.8B, v17.8B
ld1 {v29.8B}, [x12], x2
urhadd v18.8B, v26.8B, v18.8B
urhadd v19.8B, v27.8B, v19.8B
ld1 {v30.8B}, [x12], x2
urhadd v20.8B, v28.8B, v20.8B
urhadd v21.8B, v29.8B, v21.8B
ld1 {v31.8B}, [x12], x2
urhadd v22.8B, v30.8B, v22.8B
urhadd v23.8B, v31.8B, v23.8B
.ifc \type,avg
ld1 {v24.8B}, [x0], x3
urhadd v16.8B, v16.8B, v24.8B
ld1 {v25.8B}, [x0], x3
urhadd v17.8B, v17.8B, v25.8B
ld1 {v26.8B}, [x0], x3
urhadd v18.8B, v18.8B, v26.8B
ld1 {v27.8B}, [x0], x3
urhadd v19.8B, v19.8B, v27.8B
ld1 {v28.8B}, [x0], x3
urhadd v20.8B, v20.8B, v28.8B
ld1 {v29.8B}, [x0], x3
urhadd v21.8B, v21.8B, v29.8B
ld1 {v30.8B}, [x0], x3
urhadd v22.8B, v22.8B, v30.8B
ld1 {v31.8B}, [x0], x3
urhadd v23.8B, v23.8B, v31.8B
sub x0, x0, x3, lsl #3
.endif
st1 {v16.8B}, [x0], x3
st1 {v17.8B}, [x0], x3
st1 {v18.8B}, [x0], x3
st1 {v19.8B}, [x0], x3
st1 {v20.8B}, [x0], x3
st1 {v21.8B}, [x0], x3
st1 {v22.8B}, [x0], x3
st1 {v23.8B}, [x0], x3
ret
endfunc
.endm
h264_qpel_v_lowpass_l2 put
h264_qpel_v_lowpass_l2 avg
function put_h264_qpel8_hv_lowpass_neon_top
lowpass_const w12
ld1 {v16.8H}, [x1], x3
ld1 {v17.8H}, [x1], x3
ld1 {v18.8H}, [x1], x3
ld1 {v19.8H}, [x1], x3
ld1 {v20.8H}, [x1], x3
ld1 {v21.8H}, [x1], x3
ld1 {v22.8H}, [x1], x3
ld1 {v23.8H}, [x1], x3
ld1 {v24.8H}, [x1], x3
ld1 {v25.8H}, [x1], x3
ld1 {v26.8H}, [x1], x3
ld1 {v27.8H}, [x1], x3
ld1 {v28.8H}, [x1]
lowpass_8H v16, v17
lowpass_8H v18, v19
lowpass_8H v20, v21
lowpass_8H v22, v23
lowpass_8H v24, v25
lowpass_8H v26, v27
lowpass_8H v28, v29
transpose_8x8H v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
transpose_8x8H v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
lowpass_8.16 v16, v24, v16
lowpass_8.16 v17, v25, v17
lowpass_8.16 v18, v26, v18
lowpass_8.16 v19, v27, v19
lowpass_8.16 v20, v28, v20
lowpass_8.16 v21, v29, v21
lowpass_8.16 v22, v30, v22
lowpass_8.16 v23, v31, v23
transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
ret
endfunc
.macro h264_qpel8_hv_lowpass type
function \type\()_h264_qpel8_hv_lowpass_neon
mov x10, x30
bl put_h264_qpel8_hv_lowpass_neon_top
.ifc \type,avg
ld1 {v0.8B}, [x0], x2
urhadd v16.8B, v16.8B, v0.8B
ld1 {v1.8B}, [x0], x2
urhadd v17.8B, v17.8B, v1.8B
ld1 {v2.8B}, [x0], x2
urhadd v18.8B, v18.8B, v2.8B
ld1 {v3.8B}, [x0], x2
urhadd v19.8B, v19.8B, v3.8B
ld1 {v4.8B}, [x0], x2
urhadd v20.8B, v20.8B, v4.8B
ld1 {v5.8B}, [x0], x2
urhadd v21.8B, v21.8B, v5.8B
ld1 {v6.8B}, [x0], x2
urhadd v22.8B, v22.8B, v6.8B
ld1 {v7.8B}, [x0], x2
urhadd v23.8B, v23.8B, v7.8B
sub x0, x0, x2, lsl #3
.endif
st1 {v16.8B}, [x0], x2
st1 {v17.8B}, [x0], x2
st1 {v18.8B}, [x0], x2
st1 {v19.8B}, [x0], x2
st1 {v20.8B}, [x0], x2
st1 {v21.8B}, [x0], x2
st1 {v22.8B}, [x0], x2
st1 {v23.8B}, [x0], x2
ret x10
endfunc
.endm
h264_qpel8_hv_lowpass put
h264_qpel8_hv_lowpass avg
.macro h264_qpel8_hv_lowpass_l2 type
function \type\()_h264_qpel8_hv_lowpass_l2_neon
mov x10, x30
bl put_h264_qpel8_hv_lowpass_neon_top
ld1 {v0.8B, v1.8B}, [x2], #16
ld1 {v2.8B, v3.8B}, [x2], #16
urhadd v0.8B, v0.8B, v16.8B
urhadd v1.8B, v1.8B, v17.8B
ld1 {v4.8B, v5.8B}, [x2], #16
urhadd v2.8B, v2.8B, v18.8B
urhadd v3.8B, v3.8B, v19.8B
ld1 {v6.8B, v7.8B}, [x2], #16
urhadd v4.8B, v4.8B, v20.8B
urhadd v5.8B, v5.8B, v21.8B
urhadd v6.8B, v6.8B, v22.8B
urhadd v7.8B, v7.8B, v23.8B
.ifc \type,avg
ld1 {v16.8B}, [x0], x3
urhadd v0.8B, v0.8B, v16.8B
ld1 {v17.8B}, [x0], x3
urhadd v1.8B, v1.8B, v17.8B
ld1 {v18.8B}, [x0], x3
urhadd v2.8B, v2.8B, v18.8B
ld1 {v19.8B}, [x0], x3
urhadd v3.8B, v3.8B, v19.8B
ld1 {v20.8B}, [x0], x3
urhadd v4.8B, v4.8B, v20.8B
ld1 {v21.8B}, [x0], x3
urhadd v5.8B, v5.8B, v21.8B
ld1 {v22.8B}, [x0], x3
urhadd v6.8B, v6.8B, v22.8B
ld1 {v23.8B}, [x0], x3
urhadd v7.8B, v7.8B, v23.8B
sub x0, x0, x3, lsl #3
.endif
st1 {v0.8B}, [x0], x3
st1 {v1.8B}, [x0], x3
st1 {v2.8B}, [x0], x3
st1 {v3.8B}, [x0], x3
st1 {v4.8B}, [x0], x3
st1 {v5.8B}, [x0], x3
st1 {v6.8B}, [x0], x3
st1 {v7.8B}, [x0], x3
ret x10
endfunc
.endm
h264_qpel8_hv_lowpass_l2 put
h264_qpel8_hv_lowpass_l2 avg
.macro h264_qpel16_hv type
function \type\()_h264_qpel16_hv_lowpass_neon
mov x13, x30
bl \type\()_h264_qpel8_hv_lowpass_neon
sub x1, x1, x3, lsl #2
bl \type\()_h264_qpel8_hv_lowpass_neon
sub x1, x1, x3, lsl #4
sub x1, x1, x3, lsl #2
add x1, x1, #8
sub x0, x0, x2, lsl #4
add x0, x0, #8
bl \type\()_h264_qpel8_hv_lowpass_neon
sub x1, x1, x3, lsl #2
mov x30, x13
b \type\()_h264_qpel8_hv_lowpass_neon
endfunc
function \type\()_h264_qpel16_hv_lowpass_l2_neon
mov x13, x30
sub x2, x4, #256
bl \type\()_h264_qpel8_hv_lowpass_l2_neon
sub x1, x1, x3, lsl #2
bl \type\()_h264_qpel8_hv_lowpass_l2_neon
sub x1, x1, x3, lsl #4
sub x1, x1, x3, lsl #2
add x1, x1, #8
sub x0, x0, x3, lsl #4
add x0, x0, #8
bl \type\()_h264_qpel8_hv_lowpass_l2_neon
sub x1, x1, x3, lsl #2
mov x30, x13
b \type\()_h264_qpel8_hv_lowpass_l2_neon
endfunc
.endm
h264_qpel16_hv put
h264_qpel16_hv avg
.macro h264_qpel8 type
function ff_\type\()_h264_qpel8_mc10_neon, export=1
lowpass_const w3
mov x3, x1
sub x1, x1, #2
mov x12, #8
b \type\()_h264_qpel8_h_lowpass_l2_neon
endfunc
function ff_\type\()_h264_qpel8_mc20_neon, export=1
lowpass_const w3
sub x1, x1, #2
mov x3, x2
mov x12, #8
b \type\()_h264_qpel8_h_lowpass_neon
endfunc
function ff_\type\()_h264_qpel8_mc30_neon, export=1
lowpass_const w3
add x3, x1, #1
sub x1, x1, #2
mov x12, #8
b \type\()_h264_qpel8_h_lowpass_l2_neon
endfunc
function ff_\type\()_h264_qpel8_mc01_neon, export=1
mov x14, x30
mov x12, x1
\type\()_h264_qpel8_mc01:
lowpass_const w3
mov x3, x2
sub x1, x1, x2, lsl #1
bl \type\()_h264_qpel8_v_lowpass_l2_neon
ret x14
endfunc
function ff_\type\()_h264_qpel8_mc11_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
\type\()_h264_qpel8_mc11:
lowpass_const w3
mov x11, sp
sub sp, sp, #64
mov x0, sp
sub x1, x1, #2
mov x3, #8
mov x12, #8
bl put_h264_qpel8_h_lowpass_neon
mov x0, x8
mov x3, x2
mov x12, sp
sub x1, x9, x2, lsl #1
mov x2, #8
bl \type\()_h264_qpel8_v_lowpass_l2_neon
mov sp, x11
ret x14
endfunc
function ff_\type\()_h264_qpel8_mc21_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
\type\()_h264_qpel8_mc21:
lowpass_const w3
mov x11, sp
sub sp, sp, #(8*8+16*12)
sub x1, x1, #2
mov x3, #8
mov x0, sp
mov x12, #8
bl put_h264_qpel8_h_lowpass_neon
mov x4, x0
mov x0, x8
sub x1, x9, x2, lsl #1
sub x1, x1, #2
mov x3, x2
sub x2, x4, #64
bl \type\()_h264_qpel8_hv_lowpass_l2_neon
mov sp, x11
ret x14
endfunc
function ff_\type\()_h264_qpel8_mc31_neon, export=1
add x1, x1, #1
mov x14, x30
mov x8, x0
mov x9, x1
sub x1, x1, #1
b \type\()_h264_qpel8_mc11
endfunc
function ff_\type\()_h264_qpel8_mc02_neon, export=1
mov x14, x30
lowpass_const w3
sub x1, x1, x2, lsl #1
mov x3, x2
bl \type\()_h264_qpel8_v_lowpass_neon
ret x14
endfunc
function ff_\type\()_h264_qpel8_mc12_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
\type\()_h264_qpel8_mc12:
lowpass_const w3
mov x11, sp
sub sp, sp, #(8*8+16*12)
sub x1, x1, x2, lsl #1
mov x3, x2
mov x2, #8
mov x0, sp
bl put_h264_qpel8_v_lowpass_neon
mov x4, x0
mov x0, x8
sub x1, x9, x3, lsl #1
sub x1, x1, #2
sub x2, x4, #64
bl \type\()_h264_qpel8_hv_lowpass_l2_neon
mov sp, x11
ret x14
endfunc
function ff_\type\()_h264_qpel8_mc22_neon, export=1
mov x14, x30
mov x11, sp
sub x1, x1, x2, lsl #1
sub x1, x1, #2
mov x3, x2
bl \type\()_h264_qpel8_hv_lowpass_neon
mov sp, x11
ret x14
endfunc
function ff_\type\()_h264_qpel8_mc32_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
add x1, x1, #1
b \type\()_h264_qpel8_mc12
endfunc
function ff_\type\()_h264_qpel8_mc03_neon, export=1
mov x14, x30
add x12, x1, x2
b \type\()_h264_qpel8_mc01
endfunc
function ff_\type\()_h264_qpel8_mc13_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
add x1, x1, x2
b \type\()_h264_qpel8_mc11
endfunc
function ff_\type\()_h264_qpel8_mc23_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
add x1, x1, x2
b \type\()_h264_qpel8_mc21
endfunc
function ff_\type\()_h264_qpel8_mc33_neon, export=1
add x1, x1, #1
mov x14, x30
mov x8, x0
mov x9, x1
add x1, x1, x2
sub x1, x1, #1
b \type\()_h264_qpel8_mc11
endfunc
.endm
h264_qpel8 put
h264_qpel8 avg
.macro h264_qpel16 type
function ff_\type\()_h264_qpel16_mc10_neon, export=1
lowpass_const w3
mov x3, x1
sub x1, x1, #2
b \type\()_h264_qpel16_h_lowpass_l2_neon
endfunc
function ff_\type\()_h264_qpel16_mc20_neon, export=1
lowpass_const w3
sub x1, x1, #2
mov x3, x2
b \type\()_h264_qpel16_h_lowpass_neon
endfunc
function ff_\type\()_h264_qpel16_mc30_neon, export=1
lowpass_const w3
add x3, x1, #1
sub x1, x1, #2
b \type\()_h264_qpel16_h_lowpass_l2_neon
endfunc
function ff_\type\()_h264_qpel16_mc01_neon, export=1
mov x14, x30
mov x12, x1
\type\()_h264_qpel16_mc01:
lowpass_const w3
mov x3, x2
sub x1, x1, x2, lsl #1
bl \type\()_h264_qpel16_v_lowpass_l2_neon
ret x14
endfunc
function ff_\type\()_h264_qpel16_mc11_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
\type\()_h264_qpel16_mc11:
lowpass_const w3
mov x11, sp
sub sp, sp, #256
mov x0, sp
sub x1, x1, #2
mov x3, #16
bl put_h264_qpel16_h_lowpass_neon
mov x0, x8
mov x3, x2
mov x12, sp
sub x1, x9, x2, lsl #1
mov x2, #16
bl \type\()_h264_qpel16_v_lowpass_l2_neon
mov sp, x11
ret x14
endfunc
function ff_\type\()_h264_qpel16_mc21_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
\type\()_h264_qpel16_mc21:
lowpass_const w3
mov x11, sp
sub sp, sp, #(16*16+16*12)
sub x1, x1, #2
mov x0, sp
bl put_h264_qpel16_h_lowpass_neon_packed
mov x4, x0
mov x0, x8
sub x1, x9, x2, lsl #1
sub x1, x1, #2
mov x3, x2
bl \type\()_h264_qpel16_hv_lowpass_l2_neon
mov sp, x11
ret x14
endfunc
function ff_\type\()_h264_qpel16_mc31_neon, export=1
add x1, x1, #1
mov x14, x30
mov x8, x0
mov x9, x1
sub x1, x1, #1
b \type\()_h264_qpel16_mc11
endfunc
function ff_\type\()_h264_qpel16_mc02_neon, export=1
mov x14, x30
lowpass_const w3
sub x1, x1, x2, lsl #1
mov x3, x2
bl \type\()_h264_qpel16_v_lowpass_neon
ret x14
endfunc
function ff_\type\()_h264_qpel16_mc12_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
\type\()_h264_qpel16_mc12:
lowpass_const w3
mov x11, sp
sub sp, sp, #(16*16+16*12)
sub x1, x1, x2, lsl #1
mov x0, sp
mov x3, x2
bl put_h264_qpel16_v_lowpass_neon_packed
mov x4, x0
mov x0, x8
sub x1, x9, x3, lsl #1
sub x1, x1, #2
mov x2, x3
bl \type\()_h264_qpel16_hv_lowpass_l2_neon
mov sp, x11
ret x14
endfunc
function ff_\type\()_h264_qpel16_mc22_neon, export=1
mov x14, x30
lowpass_const w3
mov x11, sp
sub x1, x1, x2, lsl #1
sub x1, x1, #2
mov x3, x2
bl \type\()_h264_qpel16_hv_lowpass_neon
mov sp, x11 // restore stack
ret x14
endfunc
function ff_\type\()_h264_qpel16_mc32_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
add x1, x1, #1
b \type\()_h264_qpel16_mc12
endfunc
function ff_\type\()_h264_qpel16_mc03_neon, export=1
mov x14, x30
add x12, x1, x2
b \type\()_h264_qpel16_mc01
endfunc
function ff_\type\()_h264_qpel16_mc13_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
add x1, x1, x2
b \type\()_h264_qpel16_mc11
endfunc
function ff_\type\()_h264_qpel16_mc23_neon, export=1
mov x14, x30
mov x8, x0
mov x9, x1
add x1, x1, x2
b \type\()_h264_qpel16_mc21
endfunc
function ff_\type\()_h264_qpel16_mc33_neon, export=1
add x1, x1, #1
mov x14, x30
mov x8, x0
mov x9, x1
add x1, x1, x2
sub x1, x1, #1
b \type\()_h264_qpel16_mc11
endfunc
.endm
h264_qpel16 put
h264_qpel16 avg

View File

@@ -1,123 +0,0 @@
/*
* ARM NEON optimised DSP functions
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stddef.h>
#include <stdint.h>
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/aarch64/cpu.h"
#include "libavcodec/hpeldsp.h"
void ff_put_pixels16_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_x2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_y2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_xy2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_x2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_y2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_xy2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_x2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_y2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_xy2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_x2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_y2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_xy2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_x2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_y2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_xy2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_x2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_y2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_xy2_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_x2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_y2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_xy2_no_rnd_neon(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
av_cold void ff_hpeldsp_init_aarch64(HpelDSPContext *c, int flags)
{
int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags)) {
c->put_pixels_tab[0][0] = ff_put_pixels16_neon;
c->put_pixels_tab[0][1] = ff_put_pixels16_x2_neon;
c->put_pixels_tab[0][2] = ff_put_pixels16_y2_neon;
c->put_pixels_tab[0][3] = ff_put_pixels16_xy2_neon;
c->put_pixels_tab[1][0] = ff_put_pixels8_neon;
c->put_pixels_tab[1][1] = ff_put_pixels8_x2_neon;
c->put_pixels_tab[1][2] = ff_put_pixels8_y2_neon;
c->put_pixels_tab[1][3] = ff_put_pixels8_xy2_neon;
c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_neon;
c->put_no_rnd_pixels_tab[0][1] = ff_put_pixels16_x2_no_rnd_neon;
c->put_no_rnd_pixels_tab[0][2] = ff_put_pixels16_y2_no_rnd_neon;
c->put_no_rnd_pixels_tab[0][3] = ff_put_pixels16_xy2_no_rnd_neon;
c->put_no_rnd_pixels_tab[1][0] = ff_put_pixels8_neon;
c->put_no_rnd_pixels_tab[1][1] = ff_put_pixels8_x2_no_rnd_neon;
c->put_no_rnd_pixels_tab[1][2] = ff_put_pixels8_y2_no_rnd_neon;
c->put_no_rnd_pixels_tab[1][3] = ff_put_pixels8_xy2_no_rnd_neon;
c->avg_pixels_tab[0][0] = ff_avg_pixels16_neon;
c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_neon;
c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_neon;
c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_neon;
c->avg_pixels_tab[1][0] = ff_avg_pixels8_neon;
c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_neon;
c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_neon;
c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_neon;
c->avg_no_rnd_pixels_tab[0] = ff_avg_pixels16_neon;
c->avg_no_rnd_pixels_tab[1] = ff_avg_pixels16_x2_no_rnd_neon;
c->avg_no_rnd_pixels_tab[2] = ff_avg_pixels16_y2_no_rnd_neon;
c->avg_no_rnd_pixels_tab[3] = ff_avg_pixels16_xy2_no_rnd_neon;
}
}

Some files were not shown because too many files have changed in this diff Show More