lavfi: add geq filter.

This commit is contained in:
Clément Bœsch 2012-05-14 19:03:19 +02:00
parent 3704804c8f
commit 8eecbaf5e4
8 changed files with 321 additions and 4 deletions

View File

@ -22,6 +22,7 @@ version <next>:
- metadata (info chunk) support in CAF muxer
- field filter ported from libmpcodecs
- AVR demuxer
- geq filter ported from libmpcodecs
version 1.0:

View File

@ -30,6 +30,7 @@ Specifically, the GPL parts of FFmpeg are
- vf_cropdetect.c
- vf_decimate.c
- vf_delogo.c
- vf_geq.c
- vf_hqdn3d.c
- vf_hue.c
- vf_mp.c

1
configure vendored
View File

@ -1939,6 +1939,7 @@ frei0r_filter_deps="frei0r dlopen"
frei0r_filter_extralibs='$ldl'
frei0r_src_filter_deps="frei0r dlopen"
frei0r_src_filter_extralibs='$ldl'
geq_filter_deps="gpl"
hqdn3d_filter_deps="gpl"
hue_filter_deps="gpl"
movie_filter_deps="avcodec avformat"

View File

@ -2364,6 +2364,81 @@ frei0r=perspective:0.2/0.2:0.8/0.2
For more information see:
@url{http://frei0r.dyne.org}
@section geq
The filter takes one, two or three equations as parameter, separated by ':'.
The first equation is mandatory and applies to the luma plane. The two
following are respectively for chroma blue and chroma red planes.
The filter syntax allows named parameters:
@table @option
@item lum_expr
the luminance expression
@item cb_expr
the chrominance blue expression
@item cr_expr
the chrominance red expression
@end table
If one of the chrominance expression is not defined, it falls back on the other
one. If none of them are specified, they will evaluate the luminance
expression.
The expressions can use the following variables and functions:
@table @option
@item N
The sequential number of the filtered frame, starting from @code{0}.
@item X, Y
The coordinates of the current sample.
@item W, H
The width and height of the image.
@item SW, SH
Width and height scale depending on the currently filtered plane. It is the
ratio between the corresponding luma plane number of pixels and the current
plane ones. E.g. for YUV4:2:0 the values are @code{1,1} for the luma plane, and
@code{0.5,0.5} for chroma planes.
@item p(x, y)
Return the value of the pixel at location (@var{x},@var{y}) of the current
plane.
@item lum(x, y)
Return the value of the pixel at location (@var{x},@var{y}) of the luminance
plane.
@item cb(x, y)
Return the value of the pixel at location (@var{x},@var{y}) of the
blue-difference chroma plane.
@item cr(x, y)
Return the value of the pixel at location (@var{x},@var{y}) of the
red-difference chroma plane.
@end table
For functions, if @var{x} and @var{y} are outside the area, the value will be
automatically clipped to the closer edge.
Some examples follow:
@itemize
@item
Flip the image horizontally:
@example
geq=p(W-X\,Y)
@end example
@item
Generate a fancy enigmatic moving light:
@example
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
@end example
@end itemize
@section gradfun
Fix the banding artifacts that are sometimes introduced into nearly flat
@ -4469,9 +4544,9 @@ color=c=red@@0.2:s=qcif:r=10
If the input content is to be ignored, @code{nullsrc} can be used. The
following command generates noise in the luminance plane by employing
the @code{mp=geq} filter:
the @code{geq} filter:
@example
nullsrc=s=256x256, mp=geq=random(1)*255:128:128
nullsrc=s=256x256, geq=random(1)*255:128:128
@end example
@c man end VIDEO SOURCES

View File

@ -105,6 +105,7 @@ OBJS-$(CONFIG_FORMAT_FILTER) += vf_format.o
OBJS-$(CONFIG_FRAMESTEP_FILTER) += vf_framestep.o
OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o
OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o

View File

@ -97,6 +97,7 @@ void avfilter_register_all(void)
REGISTER_FILTER (FPS, fps, vf);
REGISTER_FILTER (FRAMESTEP, framestep, vf);
REGISTER_FILTER (FREI0R, frei0r, vf);
REGISTER_FILTER (GEQ, geq, vf);
REGISTER_FILTER (GRADFUN, gradfun, vf);
REGISTER_FILTER (HFLIP, hflip, vf);
REGISTER_FILTER (HQDN3D, hqdn3d, vf);

View File

@ -29,8 +29,8 @@
#include "libavutil/avutil.h"
#define LIBAVFILTER_VERSION_MAJOR 3
#define LIBAVFILTER_VERSION_MINOR 21
#define LIBAVFILTER_VERSION_MICRO 108
#define LIBAVFILTER_VERSION_MINOR 22
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \

237
libavfilter/vf_geq.c Normal file
View File

@ -0,0 +1,237 @@
/*
* Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
* Copyright (C) 2012 Clément Bœsch <ubitux@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Generic equation change filter
* Originally written by Michael Niedermayer for the MPlayer project, and
* ported by Clément Bœsch for FFmpeg.
*/
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "internal.h"
typedef struct {
const AVClass *class;
AVExpr *e[3]; ///< expressions for each plane
char *expr_str[3]; ///< expression strings for each plane
int framenum; ///< frame counter
AVFilterBufferRef *picref; ///< current input buffer
int hsub, vsub; ///< chroma subsampling
} GEQContext;
#define OFFSET(x) offsetof(GEQContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption geq_options[] = {
{ "lum_expr", "set luminance expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "cb_expr", "set chroma blue expression", OFFSET(expr_str) + sizeof(char*), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{ "cr_expr", "set chroma red expression", OFFSET(expr_str) + 2*sizeof(char*), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
{NULL},
};
AVFILTER_DEFINE_CLASS(geq);
static inline double getpix(void *priv, double x, double y, int plane)
{
int xi, yi;
GEQContext *geq = priv;
AVFilterBufferRef *picref = geq->picref;
const uint8_t *src = picref->data[plane];
const int linesize = picref->linesize[plane];
const int w = picref->video->w >> (plane ? geq->hsub : 0);
const int h = picref->video->h >> (plane ? geq->vsub : 0);
xi = x = av_clipf(x, 0, w - 2);
yi = y = av_clipf(y, 0, h - 2);
x -= xi;
y -= yi;
return (1-y)*((1-x)*src[xi + yi * linesize] + x*src[xi + 1 + yi * linesize])
+ y *((1-x)*src[xi + (yi+1) * linesize] + x*src[xi + 1 + (yi+1) * linesize]);
}
//TODO: cubic interpolate
//TODO: keep the last few frames
static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); }
static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1); }
static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); }
static const char *const var_names[] = { "X", "Y", "W", "H", "N", "SW", "SH", NULL };
enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_N, VAR_SW, VAR_SH, VAR_VARS_NB };
static av_cold int geq_init(AVFilterContext *ctx, const char *args)
{
GEQContext *geq = ctx->priv;
int plane, ret = 0;
static const char *shorthand[] = { "lum_expr", "cb_expr", "cr_expr", NULL };
geq->class = &geq_class;
av_opt_set_defaults(geq);
if ((ret = av_opt_set_from_string(geq, args, shorthand, "=", ":")) < 0)
return ret;
if (!geq->expr_str[0]) {
av_log(ctx, AV_LOG_ERROR, "Luminance expression is mandatory\n");
ret = AVERROR(EINVAL);
goto end;
}
if (!geq->expr_str[1] && !geq->expr_str[2]) {
/* No chroma at all: fallback on luma */
geq->expr_str[1] = av_strdup(geq->expr_str[0]);
geq->expr_str[2] = av_strdup(geq->expr_str[0]);
} else {
/* One chroma unspecified, fallback on the other */
if (!geq->expr_str[1]) geq->expr_str[1] = av_strdup(geq->expr_str[2]);
if (!geq->expr_str[2]) geq->expr_str[2] = av_strdup(geq->expr_str[1]);
}
if (!geq->expr_str[1] || !geq->expr_str[2]) {
ret = AVERROR(ENOMEM);
goto end;
}
for (plane = 0; plane < 3; plane++) {
static double (*p[])(void *, double, double) = { lum, cb, cr };
static const char *const func2_names[] = { "lum", "cb", "cr", "p", NULL };
double (*func2[])(void *, double, double) = { lum, cb, cr, p[plane], NULL };
ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane], var_names,
NULL, NULL, func2_names, func2, 0, ctx);
if (ret < 0)
break;
}
end:
return ret;
}
static int geq_query_formats(AVFilterContext *ctx)
{
static const enum PixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_NONE
};
ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0;
}
static int geq_config_props(AVFilterLink *inlink)
{
GEQContext *geq = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
geq->hsub = desc->log2_chroma_w;
geq->vsub = desc->log2_chroma_h;
return 0;
}
static int geq_end_frame(AVFilterLink *inlink)
{
int ret, plane;
GEQContext *geq = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *outpicref = outlink->out_buf;
double values[VAR_VARS_NB] = {
[VAR_N] = geq->framenum++,
};
geq->picref = inlink->cur_buf;
for (plane = 0; plane < 3; plane++) {
int x, y;
uint8_t *dst = outpicref->data[plane];
const int linesize = outpicref->linesize[plane];
const int w = inlink->w >> (plane ? geq->hsub : 0);
const int h = inlink->h >> (plane ? geq->vsub : 0);
values[VAR_W] = w;
values[VAR_H] = h;
values[VAR_SW] = w / (double)inlink->w;
values[VAR_SH] = h / (double)inlink->h;
for (y = 0; y < h; y++) {
values[VAR_Y] = y;
for (x = 0; x < w; x++) {
values[VAR_X] = x;
dst[x] = av_expr_eval(geq->e[plane], values, geq);
}
dst += linesize;
}
}
if ((ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 ||
(ret = ff_end_frame(outlink)) < 0)
return ret;
return 0;
}
static av_cold void geq_uninit(AVFilterContext *ctx)
{
int i;
GEQContext *geq = ctx->priv;
for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++)
av_expr_free(geq->e[i]);
av_opt_free(geq);
}
static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { return 0; }
static const AVFilterPad geq_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.draw_slice = null_draw_slice,
.config_props = geq_config_props,
.end_frame = geq_end_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};
static const AVFilterPad geq_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
AVFilter avfilter_vf_geq = {
.name = "geq",
.description = NULL_IF_CONFIG_SMALL("Apply generic equation to each pixel."),
.priv_size = sizeof(GEQContext),
.init = geq_init,
.uninit = geq_uninit,
.query_formats = geq_query_formats,
.inputs = geq_inputs,
.outputs = geq_outputs,
.priv_class = &geq_class,
};