Merge remote-tracking branch 'qatar/master'

* qatar/master:
  vorbis: Validate that the floor 1 X values contain no duplicates.
  avprobe: Identify codec probe failures rather than calling them unsupported codecs.
  avformat: Probe codecs at score 0 on buffer exhaustion conditions.
  avformat: Factorize codec probing.
  Indeo Audio decoder
  imc: make IMDCT support stereo output
  imc: move channel-specific data into separate context
  lavfi: remove request/poll and drawing functions from public API on next bump
  lavfi: make avfilter_insert_pad and pals private on next bump.
  lavfi: make formats API private on next bump.
  avplay: use buffersrc instead of custom input filter.
  avtools: move buffer management code from avconv to cmdutils.
  avconv: don't use InputStream in the buffer management code.
  avconv: fix exiting when max frames is reached.
  mpc8: fix maximum bands handling
  aacdec: Turn PS off when switching to stereo and turn it to implicit when switching to mono.

Conflicts:
	Changelog
	cmdutils.h
	ffmpeg.c
	ffplay.c
	ffprobe.c
	libavcodec/avcodec.h
	libavcodec/mpc8.c
	libavcodec/v210dec.h
	libavcodec/version.h
	libavcodec/vorbisdec.c
	libavfilter/avfilter.c
	libavfilter/avfilter.h
	libavfilter/buffersrc.c
	libavfilter/formats.c
	libavfilter/src_movie.c
	libavfilter/vf_aspect.c
	libavfilter/vf_blackframe.c
	libavfilter/vf_boxblur.c
	libavfilter/vf_crop.c
	libavfilter/vf_cropdetect.c
	libavfilter/vf_delogo.c
	libavfilter/vf_drawbox.c
	libavfilter/vf_drawtext.c
	libavfilter/vf_fade.c
	libavfilter/vf_fifo.c
	libavfilter/vf_format.c
	libavfilter/vf_frei0r.c
	libavfilter/vf_gradfun.c
	libavfilter/vf_hflip.c
	libavfilter/vf_hqdn3d.c
	libavfilter/vf_libopencv.c
	libavfilter/vf_lut.c
	libavfilter/vf_overlay.c
	libavfilter/vf_pad.c
	libavfilter/vf_scale.c
	libavfilter/vf_select.c
	libavfilter/vf_showinfo.c
	libavfilter/vf_transpose.c
	libavfilter/vf_unsharp.c
	libavfilter/vf_yadif.c
	libavfilter/vsrc_color.c
	libavfilter/vsrc_testsrc.c
	libavformat/utils.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-06-05 22:43:44 +02:00
commit ad60b3b181
71 changed files with 1341 additions and 1051 deletions

View File

@ -4,6 +4,7 @@ releases are sorted from youngest to oldest.
version next: version next:
- INI and flat output in ffprobe - INI and flat output in ffprobe
- Scene detection in libavfilter - Scene detection in libavfilter
- Indeo Audio decoder
version 0.11: version 0.11:

View File

@ -41,6 +41,7 @@
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "libavutil/imgutils.h"
#include "libavutil/parseutils.h" #include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/eval.h" #include "libavutil/eval.h"
@ -1222,3 +1223,144 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
} }
return array; return array;
} }
static int alloc_buffer(FrameBuffer **pool, AVCodecContext *s, FrameBuffer **pbuf)
{
FrameBuffer *buf = av_mallocz(sizeof(*buf));
int i, ret;
const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
int h_chroma_shift, v_chroma_shift;
int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
int w = s->width, h = s->height;
if (!buf)
return AVERROR(ENOMEM);
avcodec_align_dimensions(s, &w, &h);
if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
w += 2*edge;
h += 2*edge;
}
if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
s->pix_fmt, 32)) < 0) {
av_freep(&buf);
return ret;
}
/* XXX this shouldn't be needed, but some tests break without this line
* those decoders are buggy and need to be fixed.
* the following tests fail:
* cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
*/
memset(buf->base[0], 128, ret);
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
const int h_shift = i==0 ? 0 : h_chroma_shift;
const int v_shift = i==0 ? 0 : v_chroma_shift;
if ((s->flags & CODEC_FLAG_EMU_EDGE) || !buf->linesize[1] || !buf->base[i])
buf->data[i] = buf->base[i];
else
buf->data[i] = buf->base[i] +
FFALIGN((buf->linesize[i]*edge >> v_shift) +
(pixel_size*edge >> h_shift), 32);
}
buf->w = s->width;
buf->h = s->height;
buf->pix_fmt = s->pix_fmt;
buf->pool = pool;
*pbuf = buf;
return 0;
}
int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
{
FrameBuffer **pool = s->opaque;
FrameBuffer *buf;
int ret, i;
if(av_image_check_size(s->width, s->height, 0, s) || s->pix_fmt<0)
return -1;
if (!*pool && (ret = alloc_buffer(pool, s, pool)) < 0)
return ret;
buf = *pool;
*pool = buf->next;
buf->next = NULL;
if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
av_freep(&buf->base[0]);
av_free(buf);
if ((ret = alloc_buffer(pool, s, &buf)) < 0)
return ret;
}
av_assert0(!buf->refcount);
buf->refcount++;
frame->opaque = buf;
frame->type = FF_BUFFER_TYPE_USER;
frame->extended_data = frame->data;
frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
frame->width = buf->w;
frame->height = buf->h;
frame->format = buf->pix_fmt;
frame->sample_aspect_ratio = s->sample_aspect_ratio;
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
frame->data[i] = buf->data[i];
frame->linesize[i] = buf->linesize[i];
}
return 0;
}
static void unref_buffer(FrameBuffer *buf)
{
FrameBuffer **pool = buf->pool;
av_assert0(buf->refcount > 0);
buf->refcount--;
if (!buf->refcount) {
FrameBuffer *tmp;
for(tmp= *pool; tmp; tmp= tmp->next)
av_assert1(tmp != buf);
buf->next = *pool;
*pool = buf;
}
}
void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
{
FrameBuffer *buf = frame->opaque;
int i;
if(frame->type!=FF_BUFFER_TYPE_USER)
return avcodec_default_release_buffer(s, frame);
for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
frame->data[i] = NULL;
unref_buffer(buf);
}
void filter_release_buffer(AVFilterBuffer *fb)
{
FrameBuffer *buf = fb->priv;
av_free(fb);
unref_buffer(buf);
}
void free_buffer_pool(FrameBuffer **pool)
{
FrameBuffer *buf = *pool;
while (buf) {
*pool = buf->next;
av_freep(&buf->base[0]);
av_free(buf);
buf = *pool;
}
}

View File

@ -386,4 +386,46 @@ void exit_program(int ret);
*/ */
void *grow_array(void *array, int elem_size, int *size, int new_size); void *grow_array(void *array, int elem_size, int *size, int new_size);
typedef struct FrameBuffer {
uint8_t *base[4];
uint8_t *data[4];
int linesize[4];
int h, w;
enum PixelFormat pix_fmt;
int refcount;
struct FrameBuffer **pool; ///< head of the buffer pool
struct FrameBuffer *next;
} FrameBuffer;
/**
* Get a frame from the pool. This is intended to be used as a callback for
* AVCodecContext.get_buffer.
*
* @param s codec context. s->opaque must be a pointer to the head of the
* buffer pool.
* @param frame frame->opaque will be set to point to the FrameBuffer
* containing the frame data.
*/
int codec_get_buffer(AVCodecContext *s, AVFrame *frame);
/**
* A callback to be used for AVCodecContext.release_buffer along with
* codec_get_buffer().
*/
void codec_release_buffer(AVCodecContext *s, AVFrame *frame);
/**
* A callback to be used for AVFilterBuffer.free.
* @param fb buffer to free. fb->priv must be a pointer to the FrameBuffer
* containing the buffer data.
*/
void filter_release_buffer(AVFilterBuffer *fb);
/**
* Free all the buffers in the pool. This must be called after all the
* buffers have been released.
*/
void free_buffer_pool(FrameBuffer **pool);
#endif /* CMDUTILS_H */ #endif /* CMDUTILS_H */

View File

@ -748,6 +748,7 @@ following image formats are supported:
@tab encoding supported through external library libgsm @tab encoding supported through external library libgsm
@item GSM Microsoft variant @tab E @tab X @item GSM Microsoft variant @tab E @tab X
@tab encoding supported through external library libgsm @tab encoding supported through external library libgsm
@item IAC (Indeo Audio Coder) @tab @tab X
@item IMC (Intel Music Coder) @tab @tab X @item IMC (Intel Music Coder) @tab @tab X
@item MACE (Macintosh Audio Compression/Expansion) 3:1 @tab @tab X @item MACE (Macintosh Audio Compression/Expansion) 3:1 @tab @tab X
@item MACE (Macintosh Audio Compression/Expansion) 6:1 @tab @tab X @item MACE (Macintosh Audio Compression/Expansion) 6:1 @tab @tab X

156
ffmpeg.c
View File

@ -201,19 +201,6 @@ typedef struct FilterGraph {
int nb_outputs; int nb_outputs;
} FilterGraph; } FilterGraph;
typedef struct FrameBuffer {
uint8_t *base[4];
uint8_t *data[4];
int linesize[4];
int h, w;
enum PixelFormat pix_fmt;
int refcount;
struct InputStream *ist;
struct FrameBuffer *next;
} FrameBuffer;
typedef struct InputStream { typedef struct InputStream {
int file_index; int file_index;
AVStream *st; AVStream *st;
@ -534,145 +521,6 @@ static void reset_options(OptionsContext *o, int is_input)
init_opts(); init_opts();
} }
static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
{
FrameBuffer *buf = av_mallocz(sizeof(*buf));
int i, ret;
const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
int h_chroma_shift, v_chroma_shift;
int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
int w = s->width, h = s->height;
if (!buf)
return AVERROR(ENOMEM);
avcodec_align_dimensions(s, &w, &h);
if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
w += 2*edge;
h += 2*edge;
}
if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
s->pix_fmt, 32)) < 0) {
av_freep(&buf);
return ret;
}
/* XXX this shouldn't be needed, but some tests break without this line
* those decoders are buggy and need to be fixed.
* the following tests fail:
* cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
*/
memset(buf->base[0], 128, ret);
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
const int h_shift = i==0 ? 0 : h_chroma_shift;
const int v_shift = i==0 ? 0 : v_chroma_shift;
if ((s->flags & CODEC_FLAG_EMU_EDGE) || !buf->linesize[1] || !buf->base[i])
buf->data[i] = buf->base[i];
else
buf->data[i] = buf->base[i] +
FFALIGN((buf->linesize[i]*edge >> v_shift) +
(pixel_size*edge >> h_shift), 32);
}
buf->w = s->width;
buf->h = s->height;
buf->pix_fmt = s->pix_fmt;
buf->ist = ist;
*pbuf = buf;
return 0;
}
static void free_buffer_pool(InputStream *ist)
{
FrameBuffer *buf = ist->buffer_pool;
while (buf) {
ist->buffer_pool = buf->next;
av_freep(&buf->base[0]);
av_free(buf);
buf = ist->buffer_pool;
}
}
static void unref_buffer(InputStream *ist, FrameBuffer *buf)
{
av_assert0(buf->refcount > 0);
buf->refcount--;
if (!buf->refcount) {
FrameBuffer *tmp;
for(tmp= ist->buffer_pool; tmp; tmp= tmp->next)
av_assert1(tmp != buf);
buf->next = ist->buffer_pool;
ist->buffer_pool = buf;
}
}
static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
FrameBuffer *buf;
int ret, i;
if(av_image_check_size(s->width, s->height, 0, s) || s->pix_fmt<0)
return -1;
if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
return ret;
buf = ist->buffer_pool;
ist->buffer_pool = buf->next;
buf->next = NULL;
if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
av_freep(&buf->base[0]);
av_free(buf);
if ((ret = alloc_buffer(ist, s, &buf)) < 0)
return ret;
}
av_assert0(!buf->refcount);
buf->refcount++;
frame->opaque = buf;
frame->type = FF_BUFFER_TYPE_USER;
frame->extended_data = frame->data;
frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
frame->width = buf->w;
frame->height = buf->h;
frame->format = buf->pix_fmt;
frame->sample_aspect_ratio = s->sample_aspect_ratio;
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
frame->data[i] = buf->data[i];
frame->linesize[i] = buf->linesize[i];
}
return 0;
}
static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
FrameBuffer *buf = frame->opaque;
int i;
if(frame->type!=FF_BUFFER_TYPE_USER)
return avcodec_default_release_buffer(s, frame);
for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
frame->data[i] = NULL;
unref_buffer(ist, buf);
}
static void filter_release_buffer(AVFilterBuffer *fb)
{
FrameBuffer *buf = fb->priv;
av_free(fb);
unref_buffer(buf->ist, buf);
}
static enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target) static enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target)
{ {
if (codec && codec->pix_fmts) { if (codec && codec->pix_fmts) {
@ -1508,7 +1356,7 @@ void av_noreturn exit_program(int ret)
for (i = 0; i < nb_input_streams; i++) { for (i = 0; i < nb_input_streams; i++) {
av_freep(&input_streams[i]->decoded_frame); av_freep(&input_streams[i]->decoded_frame);
av_dict_free(&input_streams[i]->opts); av_dict_free(&input_streams[i]->opts);
free_buffer_pool(input_streams[i]); free_buffer_pool(&input_streams[i]->buffer_pool);
av_freep(&input_streams[i]->filters); av_freep(&input_streams[i]->filters);
av_freep(&input_streams[i]); av_freep(&input_streams[i]);
} }
@ -2845,7 +2693,7 @@ static int init_input_stream(int ist_index, char *error, int error_len)
if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) { if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {
ist->st->codec->get_buffer = codec_get_buffer; ist->st->codec->get_buffer = codec_get_buffer;
ist->st->codec->release_buffer = codec_release_buffer; ist->st->codec->release_buffer = codec_release_buffer;
ist->st->codec->opaque = ist; ist->st->codec->opaque = &ist->buffer_pool;
} }
if (!av_dict_get(ist->opts, "threads", NULL, 0)) if (!av_dict_get(ist->opts, "threads", NULL, 0))

326
ffplay.c
View File

@ -49,6 +49,7 @@
# include "libavfilter/avfilter.h" # include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h" # include "libavfilter/avfiltergraph.h"
# include "libavfilter/buffersink.h" # include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h"
#endif #endif
#include <SDL.h> #include <SDL.h>
@ -227,7 +228,10 @@ typedef struct VideoState {
int step; int step;
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
AVFilterContext *in_video_filter; ///< the first filter in the video chain
AVFilterContext *out_video_filter; ///< the last filter in the video chain AVFilterContext *out_video_filter; ///< the last filter in the video chain
int use_dr1;
FrameBuffer *buffer_pool;
#endif #endif
int refresh; int refresh;
@ -1545,222 +1549,29 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
} }
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
typedef struct {
VideoState *is;
AVFrame *frame;
int use_dr1;
} FilterPriv;
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
{
AVFilterContext *ctx = codec->opaque;
AVFilterBufferRef *ref;
int perms = AV_PERM_WRITE;
int i, w, h, stride[AV_NUM_DATA_POINTERS];
unsigned edge;
int pixel_size;
av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
perms |= AV_PERM_NEG_LINESIZES;
if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
}
if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
w = codec->width;
h = codec->height;
if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
return -1;
avcodec_align_dimensions2(codec, &w, &h, stride);
edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
w += edge << 1;
h += edge << 1;
if (codec->pix_fmt != ctx->outputs[0]->format) {
av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
return -1;
}
if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
return -1;
pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
ref->video->w = codec->width;
ref->video->h = codec->height;
for (i = 0; i < 4; i ++) {
unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
pic->base[i] = ref->data[i];
if (ref->data[i]) {
ref->data[i] += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
}
pic->data[i] = ref->data[i];
pic->linesize[i] = ref->linesize[i];
}
pic->opaque = ref;
pic->type = FF_BUFFER_TYPE_USER;
pic->reordered_opaque = codec->reordered_opaque;
pic->width = codec->width;
pic->height = codec->height;
pic->format = codec->pix_fmt;
pic->sample_aspect_ratio = codec->sample_aspect_ratio;
if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
else pic->pkt_pts = AV_NOPTS_VALUE;
return 0;
}
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
{
memset(pic->data, 0, sizeof(pic->data));
avfilter_unref_buffer(pic->opaque);
}
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
{
AVFilterBufferRef *ref = pic->opaque;
if (pic->data[0] == NULL) {
pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
return codec->get_buffer(codec, pic);
}
if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
(codec->pix_fmt != ref->format)) {
av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
return -1;
}
pic->reordered_opaque = codec->reordered_opaque;
if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
else pic->pkt_pts = AV_NOPTS_VALUE;
return 0;
}
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
{
FilterPriv *priv = ctx->priv;
AVCodecContext *codec;
if (!opaque) return -1;
priv->is = opaque;
codec = priv->is->video_st->codec;
codec->opaque = ctx;
if (codec->codec->capabilities & CODEC_CAP_DR1) {
av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
priv->use_dr1 = 1;
codec->get_buffer = input_get_buffer;
codec->release_buffer = input_release_buffer;
codec->reget_buffer = input_reget_buffer;
codec->thread_safe_callbacks = 1;
}
priv->frame = avcodec_alloc_frame();
return 0;
}
static void input_uninit(AVFilterContext *ctx)
{
FilterPriv *priv = ctx->priv;
av_free(priv->frame);
}
static int input_request_frame(AVFilterLink *link)
{
FilterPriv *priv = link->src->priv;
AVFilterBufferRef *picref;
int64_t pts = 0;
AVPacket pkt;
int ret;
while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
av_free_packet(&pkt);
if (ret < 0)
return -1;
if (priv->use_dr1 && priv->frame->opaque) {
picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
} else {
picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, priv->frame->width, priv->frame->height);
av_image_copy(picref->data, picref->linesize,
(const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
picref->format, priv->frame->width, priv->frame->height);
}
av_free_packet(&pkt);
avfilter_copy_frame_props(picref, priv->frame);
picref->video->sample_aspect_ratio = av_guess_sample_aspect_ratio(priv->is->ic, priv->is->video_st, priv->frame);
picref->pts = pts;
avfilter_start_frame(link, picref);
avfilter_draw_slice(link, 0, picref->video->h, 1);
avfilter_end_frame(link);
return 0;
}
static int input_query_formats(AVFilterContext *ctx)
{
FilterPriv *priv = ctx->priv;
enum PixelFormat pix_fmts[] = {
priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
};
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
return 0;
}
static int input_config_props(AVFilterLink *link)
{
FilterPriv *priv = link->src->priv;
AVStream *s = priv->is->video_st;
link->w = s->codec->width;
link->h = s->codec->height;
link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
link->time_base = s->time_base;
return 0;
}
static AVFilter input_filter =
{
.name = "ffplay_input",
.priv_size = sizeof(FilterPriv),
.init = input_init,
.uninit = input_uninit,
.query_formats = input_query_formats,
.inputs = (AVFilterPad[]) {{ .name = NULL }},
.outputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = input_request_frame,
.config_props = input_config_props, },
{ .name = NULL }},
};
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters) static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
{ {
static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE }; static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
char sws_flags_str[128]; char sws_flags_str[128];
char buffersrc_args[256];
int ret; int ret;
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc(); AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;; AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
AVCodecContext *codec = is->video_st->codec;
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags); snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
graph->scale_sws_opts = av_strdup(sws_flags_str); graph->scale_sws_opts = av_strdup(sws_flags_str);
if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src", snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
NULL, is, graph)) < 0) codec->width, codec->height, codec->pix_fmt,
is->video_st->time_base.num, is->video_st->time_base.den,
codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
if ((ret = avfilter_graph_create_filter(&filt_src,
avfilter_get_by_name("buffer"),
"src", buffersrc_args, NULL,
graph)) < 0)
return ret; return ret;
#if FF_API_OLD_VSINK_API #if FF_API_OLD_VSINK_API
@ -1809,8 +1620,16 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
if ((ret = avfilter_graph_config(graph, NULL)) < 0) if ((ret = avfilter_graph_config(graph, NULL)) < 0)
return ret; return ret;
is->in_video_filter = filt_src;
is->out_video_filter = filt_out; is->out_video_filter = filt_out;
if (codec->codec->capabilities & CODEC_CAP_DR1) {
is->use_dr1 = 1;
codec->get_buffer = codec_get_buffer;
codec->release_buffer = codec_release_buffer;
codec->opaque = &is->buffer_pool;
}
return ret; return ret;
} }
@ -1826,7 +1645,7 @@ static int video_thread(void *arg)
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
AVFilterGraph *graph = avfilter_graph_alloc(); AVFilterGraph *graph = avfilter_graph_alloc();
AVFilterContext *filt_out = NULL; AVFilterContext *filt_out = NULL, *filt_in = NULL;
int last_w = is->video_st->codec->width; int last_w = is->video_st->codec->width;
int last_h = is->video_st->codec->height; int last_h = is->video_st->codec->height;
@ -1837,18 +1656,31 @@ static int video_thread(void *arg)
SDL_PushEvent(&event); SDL_PushEvent(&event);
goto the_end; goto the_end;
} }
filt_in = is->in_video_filter;
filt_out = is->out_video_filter; filt_out = is->out_video_filter;
#endif #endif
for (;;) { for (;;) {
#if !CONFIG_AVFILTER
AVPacket pkt; AVPacket pkt;
#else #if CONFIG_AVFILTER
AVFilterBufferRef *picref; AVFilterBufferRef *picref;
AVRational tb = filt_out->inputs[0]->time_base; AVRational tb = filt_out->inputs[0]->time_base;
#endif #endif
while (is->paused && !is->videoq.abort_request) while (is->paused && !is->videoq.abort_request)
SDL_Delay(10); SDL_Delay(10);
ret = get_video_frame(is, frame, &pts_int, &pkt);
if (ret < 0)
goto the_end;
av_free_packet(&pkt);
if (!ret)
continue;
is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
is->frame_last_filter_delay = 0;
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
if ( last_w != is->video_st->codec->width if ( last_w != is->video_st->codec->width
|| last_h != is->video_st->codec->height) { || last_h != is->video_st->codec->height) {
@ -1862,48 +1694,55 @@ static int video_thread(void *arg)
last_w = is->video_st->codec->width; last_w = is->video_st->codec->width;
last_h = is->video_st->codec->height; last_h = is->video_st->codec->height;
} }
ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
if (picref) { frame->pts = pts_int;
if (is->use_dr1) {
FrameBuffer *buf = frame->opaque;
AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
frame->data, frame->linesize,
AV_PERM_READ | AV_PERM_PRESERVE,
frame->width, frame->height,
frame->format);
avfilter_copy_frame_props(fb, frame);
fb->buf->priv = buf;
fb->buf->free = filter_release_buffer;
buf->refcount++;
av_buffersrc_buffer(filt_in, fb);
} else
av_buffersrc_write_frame(filt_in, frame);
while (ret >= 0) {
ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
if (ret < 0) {
ret = 0;
break;
}
avfilter_fill_frame_from_video_buffer_ref(frame, picref); avfilter_fill_frame_from_video_buffer_ref(frame, picref);
pts_int = picref->pts; pts_int = picref->pts;
tb = filt_out->inputs[0]->time_base; tb = filt_out->inputs[0]->time_base;
pos = picref->pos; pos = picref->pos;
frame->opaque = picref; frame->opaque = picref;
ret = 1; if (av_cmp_q(tb, is->video_st->time_base)) {
} av_unused int64_t pts1 = pts_int;
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) { av_dlog(NULL, "video_thread(): "
av_unused int64_t pts1 = pts_int; "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base); tb.num, tb.den, pts1,
av_dlog(NULL, "video_thread(): " is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
"tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n", }
tb.num, tb.den, pts1, pts = pts_int * av_q2d(is->video_st->time_base);
is->video_st->time_base.num, is->video_st->time_base.den, pts_int); ret = queue_picture(is, frame, pts, pos);
} }
#else #else
ret = get_video_frame(is, frame, &pts_int, &pkt);
pos = pkt.pos;
av_free_packet(&pkt);
if (ret == 0)
continue;
#endif
if (ret < 0)
goto the_end;
is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
is->frame_last_filter_delay = 0;
#if CONFIG_AVFILTER
if (!picref)
continue;
#endif
pts = pts_int * av_q2d(is->video_st->time_base); pts = pts_int * av_q2d(is->video_st->time_base);
ret = queue_picture(is, frame, pts, pkt.pos);
ret = queue_picture(is, frame, pts, pos); #endif
if (ret < 0) if (ret < 0)
goto the_end; goto the_end;
@ -2461,6 +2300,7 @@ static void stream_component_close(VideoState *is, int stream_index)
ic->streams[stream_index]->discard = AVDISCARD_ALL; ic->streams[stream_index]->discard = AVDISCARD_ALL;
avcodec_close(avctx); avcodec_close(avctx);
free_buffer_pool(&is->buffer_pool);
switch (avctx->codec_type) { switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
is->audio_st = NULL; is->audio_st = NULL;

View File

@ -1866,7 +1866,11 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
AVStream *stream = fmt_ctx->streams[i]; AVStream *stream = fmt_ctx->streams[i];
AVCodec *codec; AVCodec *codec;
if (!(codec = avcodec_find_decoder(stream->codec->codec_id))) { if (stream->codec->codec_id == CODEC_ID_PROBE) {
av_log(NULL, AV_LOG_ERROR,
"Failed to probe codec for input stream %d\n",
stream->index);
} else if (!(codec = avcodec_find_decoder(stream->codec->codec_id))) {
av_log(NULL, AV_LOG_ERROR, av_log(NULL, AV_LOG_ERROR,
"Unsupported codec with id %d for input stream %d\n", "Unsupported codec with id %d for input stream %d\n",
stream->codec->codec_id, stream->index); stream->codec->codec_id, stream->index);

View File

@ -220,6 +220,7 @@ OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
OBJS-$(CONFIG_H264_VDA_HWACCEL) += vda_h264.o OBJS-$(CONFIG_H264_VDA_HWACCEL) += vda_h264.o
OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o
OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o
OBJS-$(CONFIG_IAC_DECODER) += imc.o
OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o
OBJS-$(CONFIG_IDF_DECODER) += bintext.o cga_data.o OBJS-$(CONFIG_IDF_DECODER) += bintext.o cga_data.o
OBJS-$(CONFIG_IFF_BYTERUN1_DECODER) += iff.o OBJS-$(CONFIG_IFF_BYTERUN1_DECODER) += iff.o

View File

@ -487,6 +487,7 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
return NULL; return NULL;
ac->oc[1].m4ac.chan_config = 2; ac->oc[1].m4ac.chan_config = 2;
ac->oc[1].m4ac.ps = 0;
} }
// And vice-versa // And vice-versa
if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) { if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) {
@ -504,6 +505,8 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
return NULL; return NULL;
ac->oc[1].m4ac.chan_config = 1; ac->oc[1].m4ac.chan_config = 1;
if (ac->oc[1].m4ac.sbr)
ac->oc[1].m4ac.ps = -1;
} }
// For indexed channel configurations map the channels solely based on position. // For indexed channel configurations map the channels solely based on position.
switch (ac->oc[1].m4ac.chan_config) { switch (ac->oc[1].m4ac.chan_config) {

View File

@ -286,6 +286,7 @@ void avcodec_register_all(void)
REGISTER_DECODER (G729, g729); REGISTER_DECODER (G729, g729);
REGISTER_DECODER (GSM, gsm); REGISTER_DECODER (GSM, gsm);
REGISTER_DECODER (GSM_MS, gsm_ms); REGISTER_DECODER (GSM_MS, gsm_ms);
REGISTER_DECODER (IAC, iac);
REGISTER_DECODER (IMC, imc); REGISTER_DECODER (IMC, imc);
REGISTER_DECODER (MACE3, mace3); REGISTER_DECODER (MACE3, mace3);
REGISTER_DECODER (MACE6, mace6); REGISTER_DECODER (MACE6, mace6);

View File

@ -406,6 +406,7 @@ enum CodecID {
CODEC_ID_8SVX_FIB, CODEC_ID_8SVX_FIB,
CODEC_ID_BMV_AUDIO, CODEC_ID_BMV_AUDIO,
CODEC_ID_RALF, CODEC_ID_RALF,
CODEC_ID_IAC,
CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
CODEC_ID_8SVX_RAW = MKBETAG('8','S','V','X'), CODEC_ID_8SVX_RAW = MKBETAG('8','S','V','X'),
CODEC_ID_SONIC = MKBETAG('S','O','N','C'), CODEC_ID_SONIC = MKBETAG('S','O','N','C'),

View File

@ -49,9 +49,7 @@
#define BANDS 32 #define BANDS 32
#define COEFFS 256 #define COEFFS 256
typedef struct { typedef struct IMCChannel {
AVFrame frame;
float old_floor[BANDS]; float old_floor[BANDS];
float flcoeffs1[BANDS]; float flcoeffs1[BANDS];
float flcoeffs2[BANDS]; float flcoeffs2[BANDS];
@ -61,16 +59,6 @@ typedef struct {
float flcoeffs6[BANDS]; float flcoeffs6[BANDS];
float CWdecoded[COEFFS]; float CWdecoded[COEFFS];
/** MDCT tables */
//@{
float mdct_sine_window[COEFFS];
float post_cos[COEFFS];
float post_sin[COEFFS];
float pre_coef1[COEFFS];
float pre_coef2[COEFFS];
float last_fft_im[COEFFS];
//@}
int bandWidthT[BANDS]; ///< codewords per band int bandWidthT[BANDS]; ///< codewords per band
int bitsBandT[BANDS]; ///< how many bits per codeword in band int bitsBandT[BANDS]; ///< how many bits per codeword in band
int CWlengthT[COEFFS]; ///< how many bits in each codeword int CWlengthT[COEFFS]; ///< how many bits in each codeword
@ -82,15 +70,37 @@ typedef struct {
int skipFlagCount[BANDS]; ///< skipped coeffients per band int skipFlagCount[BANDS]; ///< skipped coeffients per band
int skipFlags[COEFFS]; ///< skip coefficient decoding or not int skipFlags[COEFFS]; ///< skip coefficient decoding or not
int codewords[COEFFS]; ///< raw codewords read from bitstream int codewords[COEFFS]; ///< raw codewords read from bitstream
float last_fft_im[COEFFS];
int decoder_reset;
} IMCChannel;
typedef struct {
AVFrame frame;
IMCChannel chctx[2];
/** MDCT tables */
//@{
float mdct_sine_window[COEFFS];
float post_cos[COEFFS];
float post_sin[COEFFS];
float pre_coef1[COEFFS];
float pre_coef2[COEFFS];
//@}
float sqrt_tab[30]; float sqrt_tab[30];
GetBitContext gb; GetBitContext gb;
int decoder_reset;
float one_div_log2; float one_div_log2;
DSPContext dsp; DSPContext dsp;
FFTContext fft; FFTContext fft;
DECLARE_ALIGNED(32, FFTComplex, samples)[COEFFS / 2]; DECLARE_ALIGNED(32, FFTComplex, samples)[COEFFS / 2];
float *out_samples; float *out_samples;
int8_t cyclTab[32], cyclTab2[32];
float weights1[31], weights2[31];
} IMCContext; } IMCContext;
static VLC huffman_vlc[4][4]; static VLC huffman_vlc[4][4];
@ -110,15 +120,21 @@ static av_cold int imc_decode_init(AVCodecContext *avctx)
IMCContext *q = avctx->priv_data; IMCContext *q = avctx->priv_data;
double r1, r2; double r1, r2;
if (avctx->channels != 1) { if ((avctx->codec_id == CODEC_ID_IMC && avctx->channels != 1)
|| (avctx->codec_id == CODEC_ID_IAC && avctx->channels > 2)) {
av_log_ask_for_sample(avctx, "Number of channels is not supported\n"); av_log_ask_for_sample(avctx, "Number of channels is not supported\n");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
q->decoder_reset = 1; for (j = 0; j < avctx->channels; j++) {
q->chctx[j].decoder_reset = 1;
for (i = 0; i < BANDS; i++) for (i = 0; i < BANDS; i++)
q->old_floor[i] = 1.0; q->chctx[j].old_floor[i] = 1.0;
for (i = 0; i < COEFFS / 2; i++)
q->chctx[j].last_fft_im[i] = 0;
}
/* Build mdct window, a simple sine window normalized with sqrt(2) */ /* Build mdct window, a simple sine window normalized with sqrt(2) */
ff_sine_window_init(q->mdct_sine_window, COEFFS); ff_sine_window_init(q->mdct_sine_window, COEFFS);
@ -138,8 +154,6 @@ static av_cold int imc_decode_init(AVCodecContext *avctx)
q->pre_coef1[i] = -(r1 + r2) * sqrt(2.0); q->pre_coef1[i] = -(r1 + r2) * sqrt(2.0);
q->pre_coef2[i] = (r1 - r2) * sqrt(2.0); q->pre_coef2[i] = (r1 - r2) * sqrt(2.0);
} }
q->last_fft_im[i] = 0;
} }
/* Generate a square root table */ /* Generate a square root table */
@ -159,13 +173,26 @@ static av_cold int imc_decode_init(AVCodecContext *avctx)
} }
q->one_div_log2 = 1 / log(2); q->one_div_log2 = 1 / log(2);
memcpy(q->cyclTab, cyclTab, sizeof(cyclTab));
memcpy(q->cyclTab2, cyclTab2, sizeof(cyclTab2));
if (avctx->codec_id == CODEC_ID_IAC) {
q->cyclTab[29] = 31;
q->cyclTab2[31] = 28;
memcpy(q->weights1, iac_weights1, sizeof(iac_weights1));
memcpy(q->weights2, iac_weights2, sizeof(iac_weights2));
} else {
memcpy(q->weights1, imc_weights1, sizeof(imc_weights1));
memcpy(q->weights2, imc_weights2, sizeof(imc_weights2));
}
if ((ret = ff_fft_init(&q->fft, 7, 1))) { if ((ret = ff_fft_init(&q->fft, 7, 1))) {
av_log(avctx, AV_LOG_INFO, "FFT init failed\n"); av_log(avctx, AV_LOG_INFO, "FFT init failed\n");
return ret; return ret;
} }
ff_dsputil_init(&q->dsp, avctx); ff_dsputil_init(&q->dsp, avctx);
avctx->sample_fmt = AV_SAMPLE_FMT_FLT; avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
avctx->channel_layout = AV_CH_LAYOUT_MONO; avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO
: AV_CH_LAYOUT_STEREO;
avcodec_get_frame_defaults(&q->frame); avcodec_get_frame_defaults(&q->frame);
avctx->coded_frame = &q->frame; avctx->coded_frame = &q->frame;
@ -199,13 +226,13 @@ static void imc_calculate_coeffs(IMCContext *q, float *flcoeffs1,
} }
for (i = 0; i < BANDS; i++) { for (i = 0; i < BANDS; i++) {
for (cnt2 = i; cnt2 < cyclTab[i]; cnt2++) for (cnt2 = i; cnt2 < q->cyclTab[i]; cnt2++)
flcoeffs5[cnt2] = flcoeffs5[cnt2] + workT3[i]; flcoeffs5[cnt2] = flcoeffs5[cnt2] + workT3[i];
workT2[cnt2 - 1] = workT2[cnt2 - 1] + workT3[i]; workT2[cnt2 - 1] = workT2[cnt2 - 1] + workT3[i];
} }
for (i = 1; i < BANDS; i++) { for (i = 1; i < BANDS; i++) {
accum = (workT2[i - 1] + accum) * imc_weights1[i - 1]; accum = (workT2[i - 1] + accum) * q->weights1[i - 1];
flcoeffs5[i] += accum; flcoeffs5[i] += accum;
} }
@ -213,7 +240,7 @@ static void imc_calculate_coeffs(IMCContext *q, float *flcoeffs1,
workT2[i] = 0.0; workT2[i] = 0.0;
for (i = 0; i < BANDS; i++) { for (i = 0; i < BANDS; i++) {
for (cnt2 = i - 1; cnt2 > cyclTab2[i]; cnt2--) for (cnt2 = i - 1; cnt2 > q->cyclTab2[i]; cnt2--)
flcoeffs5[cnt2] += workT3[i]; flcoeffs5[cnt2] += workT3[i];
workT2[cnt2+1] += workT3[i]; workT2[cnt2+1] += workT3[i];
} }
@ -221,7 +248,7 @@ static void imc_calculate_coeffs(IMCContext *q, float *flcoeffs1,
accum = 0.0; accum = 0.0;
for (i = BANDS-2; i >= 0; i--) { for (i = BANDS-2; i >= 0; i--) {
accum = (workT2[i+1] + accum) * imc_weights2[i]; accum = (workT2[i+1] + accum) * q->weights2[i];
flcoeffs5[i] += accum; flcoeffs5[i] += accum;
// there is missing code here, but it seems to never be triggered // there is missing code here, but it seems to never be triggered
} }
@ -313,8 +340,8 @@ static void imc_decode_level_coefficients2(IMCContext *q, int *levlCoeffBuf,
/** /**
* Perform bit allocation depending on bits available * Perform bit allocation depending on bits available
*/ */
static int bit_allocation(IMCContext *q, int stream_format_code, int freebits, static int bit_allocation(IMCContext *q, IMCChannel *chctx,
int flag) int stream_format_code, int freebits, int flag)
{ {
int i, j; int i, j;
const float limit = -1.e20; const float limit = -1.e20;
@ -333,43 +360,43 @@ static int bit_allocation(IMCContext *q, int stream_format_code, int freebits,
int found_indx = 0; int found_indx = 0;
for (i = 0; i < BANDS; i++) for (i = 0; i < BANDS; i++)
highest = FFMAX(highest, q->flcoeffs1[i]); highest = FFMAX(highest, chctx->flcoeffs1[i]);
for (i = 0; i < BANDS - 1; i++) for (i = 0; i < BANDS - 1; i++)
q->flcoeffs4[i] = q->flcoeffs3[i] - log(q->flcoeffs5[i]) / log(2); chctx->flcoeffs4[i] = chctx->flcoeffs3[i] - log(chctx->flcoeffs5[i]) / log(2);
q->flcoeffs4[BANDS - 1] = limit; chctx->flcoeffs4[BANDS - 1] = limit;
highest = highest * 0.25; highest = highest * 0.25;
for (i = 0; i < BANDS; i++) { for (i = 0; i < BANDS; i++) {
indx = -1; indx = -1;
if ((band_tab[i + 1] - band_tab[i]) == q->bandWidthT[i]) if ((band_tab[i + 1] - band_tab[i]) == chctx->bandWidthT[i])
indx = 0; indx = 0;
if ((band_tab[i + 1] - band_tab[i]) > q->bandWidthT[i]) if ((band_tab[i + 1] - band_tab[i]) > chctx->bandWidthT[i])
indx = 1; indx = 1;
if (((band_tab[i + 1] - band_tab[i]) / 2) >= q->bandWidthT[i]) if (((band_tab[i + 1] - band_tab[i]) / 2) >= chctx->bandWidthT[i])
indx = 2; indx = 2;
if (indx == -1) if (indx == -1)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
q->flcoeffs4[i] += xTab[(indx * 2 + (q->flcoeffs1[i] < highest)) * 2 + flag]; chctx->flcoeffs4[i] += xTab[(indx * 2 + (chctx->flcoeffs1[i] < highest)) * 2 + flag];
} }
if (stream_format_code & 0x2) { if (stream_format_code & 0x2) {
q->flcoeffs4[0] = limit; chctx->flcoeffs4[0] = limit;
q->flcoeffs4[1] = limit; chctx->flcoeffs4[1] = limit;
q->flcoeffs4[2] = limit; chctx->flcoeffs4[2] = limit;
q->flcoeffs4[3] = limit; chctx->flcoeffs4[3] = limit;
} }
for (i = (stream_format_code & 0x2) ? 4 : 0; i < BANDS - 1; i++) { for (i = (stream_format_code & 0x2) ? 4 : 0; i < BANDS - 1; i++) {
iacc += q->bandWidthT[i]; iacc += chctx->bandWidthT[i];
summa += q->bandWidthT[i] * q->flcoeffs4[i]; summa += chctx->bandWidthT[i] * chctx->flcoeffs4[i];
} }
q->bandWidthT[BANDS - 1] = 0; chctx->bandWidthT[BANDS - 1] = 0;
summa = (summa * 0.5 - freebits) / iacc; summa = (summa * 0.5 - freebits) / iacc;
@ -382,13 +409,13 @@ static int bit_allocation(IMCContext *q, int stream_format_code, int freebits,
iacc = 0; iacc = 0;
for (j = (stream_format_code & 0x2) ? 4 : 0; j < BANDS; j++) { for (j = (stream_format_code & 0x2) ? 4 : 0; j < BANDS; j++) {
cwlen = av_clipf(((q->flcoeffs4[j] * 0.5) - summa + 0.5), 0, 6); cwlen = av_clipf(((chctx->flcoeffs4[j] * 0.5) - summa + 0.5), 0, 6);
q->bitsBandT[j] = cwlen; chctx->bitsBandT[j] = cwlen;
summer += q->bandWidthT[j] * cwlen; summer += chctx->bandWidthT[j] * cwlen;
if (cwlen > 0) if (cwlen > 0)
iacc += q->bandWidthT[j]; iacc += chctx->bandWidthT[j];
} }
flg = t2; flg = t2;
@ -405,13 +432,13 @@ static int bit_allocation(IMCContext *q, int stream_format_code, int freebits,
for (i = (stream_format_code & 0x2) ? 4 : 0; i < BANDS; i++) { for (i = (stream_format_code & 0x2) ? 4 : 0; i < BANDS; i++) {
for (j = band_tab[i]; j < band_tab[i + 1]; j++) for (j = band_tab[i]; j < band_tab[i + 1]; j++)
q->CWlengthT[j] = q->bitsBandT[i]; chctx->CWlengthT[j] = chctx->bitsBandT[i];
} }
if (freebits > summer) { if (freebits > summer) {
for (i = 0; i < BANDS; i++) { for (i = 0; i < BANDS; i++) {
workT[i] = (q->bitsBandT[i] == 6) ? -1.e20 workT[i] = (chctx->bitsBandT[i] == 6) ? -1.e20
: (q->bitsBandT[i] * -2 + q->flcoeffs4[i] - 0.415); : (chctx->bitsBandT[i] * -2 + chctx->flcoeffs4[i] - 0.415);
} }
highest = 0.0; highest = 0.0;
@ -432,11 +459,11 @@ static int bit_allocation(IMCContext *q, int stream_format_code, int freebits,
if (highest > -1.e20) { if (highest > -1.e20) {
workT[found_indx] -= 2.0; workT[found_indx] -= 2.0;
if (++q->bitsBandT[found_indx] == 6) if (++chctx->bitsBandT[found_indx] == 6)
workT[found_indx] = -1.e20; workT[found_indx] = -1.e20;
for (j = band_tab[found_indx]; j < band_tab[found_indx + 1] && (freebits > summer); j++) { for (j = band_tab[found_indx]; j < band_tab[found_indx + 1] && (freebits > summer); j++) {
q->CWlengthT[j]++; chctx->CWlengthT[j]++;
summer++; summer++;
} }
} }
@ -444,7 +471,7 @@ static int bit_allocation(IMCContext *q, int stream_format_code, int freebits,
} }
if (freebits < summer) { if (freebits < summer) {
for (i = 0; i < BANDS; i++) { for (i = 0; i < BANDS; i++) {
workT[i] = q->bitsBandT[i] ? (q->bitsBandT[i] * -2 + q->flcoeffs4[i] + 1.585) workT[i] = chctx->bitsBandT[i] ? (chctx->bitsBandT[i] * -2 + chctx->flcoeffs4[i] + 1.585)
: 1.e20; : 1.e20;
} }
if (stream_format_code & 0x2) { if (stream_format_code & 0x2) {
@ -466,12 +493,12 @@ static int bit_allocation(IMCContext *q, int stream_format_code, int freebits,
// break; // break;
workT[low_indx] = lowest + 2.0; workT[low_indx] = lowest + 2.0;
if (!--q->bitsBandT[low_indx]) if (!--chctx->bitsBandT[low_indx])
workT[low_indx] = 1.e20; workT[low_indx] = 1.e20;
for (j = band_tab[low_indx]; j < band_tab[low_indx+1] && (freebits < summer); j++) { for (j = band_tab[low_indx]; j < band_tab[low_indx+1] && (freebits < summer); j++) {
if (q->CWlengthT[j] > 0) { if (chctx->CWlengthT[j] > 0) {
q->CWlengthT[j]--; chctx->CWlengthT[j]--;
summer--; summer--;
} }
} }
@ -480,54 +507,54 @@ static int bit_allocation(IMCContext *q, int stream_format_code, int freebits,
return 0; return 0;
} }
static void imc_get_skip_coeff(IMCContext *q) static void imc_get_skip_coeff(IMCContext *q, IMCChannel *chctx)
{ {
int i, j; int i, j;
memset(q->skipFlagBits, 0, sizeof(q->skipFlagBits)); memset(chctx->skipFlagBits, 0, sizeof(chctx->skipFlagBits));
memset(q->skipFlagCount, 0, sizeof(q->skipFlagCount)); memset(chctx->skipFlagCount, 0, sizeof(chctx->skipFlagCount));
for (i = 0; i < BANDS; i++) { for (i = 0; i < BANDS; i++) {
if (!q->bandFlagsBuf[i] || !q->bandWidthT[i]) if (!chctx->bandFlagsBuf[i] || !chctx->bandWidthT[i])
continue; continue;
if (!q->skipFlagRaw[i]) { if (!chctx->skipFlagRaw[i]) {
q->skipFlagBits[i] = band_tab[i + 1] - band_tab[i]; chctx->skipFlagBits[i] = band_tab[i + 1] - band_tab[i];
for (j = band_tab[i]; j < band_tab[i + 1]; j++) { for (j = band_tab[i]; j < band_tab[i + 1]; j++) {
q->skipFlags[j] = get_bits1(&q->gb); chctx->skipFlags[j] = get_bits1(&q->gb);
if (q->skipFlags[j]) if (chctx->skipFlags[j])
q->skipFlagCount[i]++; chctx->skipFlagCount[i]++;
} }
} else { } else {
for (j = band_tab[i]; j < band_tab[i + 1] - 1; j += 2) { for (j = band_tab[i]; j < band_tab[i + 1] - 1; j += 2) {
if (!get_bits1(&q->gb)) { // 0 if (!get_bits1(&q->gb)) { // 0
q->skipFlagBits[i]++; chctx->skipFlagBits[i]++;
q->skipFlags[j] = 1; chctx->skipFlags[j] = 1;
q->skipFlags[j + 1] = 1; chctx->skipFlags[j + 1] = 1;
q->skipFlagCount[i] += 2; chctx->skipFlagCount[i] += 2;
} else { } else {
if (get_bits1(&q->gb)) { // 11 if (get_bits1(&q->gb)) { // 11
q->skipFlagBits[i] += 2; chctx->skipFlagBits[i] += 2;
q->skipFlags[j] = 0; chctx->skipFlags[j] = 0;
q->skipFlags[j + 1] = 1; chctx->skipFlags[j + 1] = 1;
q->skipFlagCount[i]++; chctx->skipFlagCount[i]++;
} else { } else {
q->skipFlagBits[i] += 3; chctx->skipFlagBits[i] += 3;
q->skipFlags[j + 1] = 0; chctx->skipFlags[j + 1] = 0;
if (!get_bits1(&q->gb)) { // 100 if (!get_bits1(&q->gb)) { // 100
q->skipFlags[j] = 1; chctx->skipFlags[j] = 1;
q->skipFlagCount[i]++; chctx->skipFlagCount[i]++;
} else { // 101 } else { // 101
q->skipFlags[j] = 0; chctx->skipFlags[j] = 0;
} }
} }
} }
} }
if (j < band_tab[i + 1]) { if (j < band_tab[i + 1]) {
q->skipFlagBits[i]++; chctx->skipFlagBits[i]++;
if ((q->skipFlags[j] = get_bits1(&q->gb))) if ((chctx->skipFlags[j] = get_bits1(&q->gb)))
q->skipFlagCount[i]++; chctx->skipFlagCount[i]++;
} }
} }
} }
@ -536,7 +563,8 @@ static void imc_get_skip_coeff(IMCContext *q)
/** /**
* Increase highest' band coefficient sizes as some bits won't be used * Increase highest' band coefficient sizes as some bits won't be used
*/ */
static void imc_adjust_bit_allocation(IMCContext *q, int summer) static void imc_adjust_bit_allocation(IMCContext *q, IMCChannel *chctx,
int summer)
{ {
float workT[32]; float workT[32];
int corrected = 0; int corrected = 0;
@ -545,8 +573,8 @@ static void imc_adjust_bit_allocation(IMCContext *q, int summer)
int found_indx = 0; int found_indx = 0;
for (i = 0; i < BANDS; i++) { for (i = 0; i < BANDS; i++) {
workT[i] = (q->bitsBandT[i] == 6) ? -1.e20 workT[i] = (chctx->bitsBandT[i] == 6) ? -1.e20
: (q->bitsBandT[i] * -2 + q->flcoeffs4[i] - 0.415); : (chctx->bitsBandT[i] * -2 + chctx->flcoeffs4[i] - 0.415);
} }
while (corrected < summer) { while (corrected < summer) {
@ -564,12 +592,12 @@ static void imc_adjust_bit_allocation(IMCContext *q, int summer)
if (highest > -1.e20) { if (highest > -1.e20) {
workT[found_indx] -= 2.0; workT[found_indx] -= 2.0;
if (++(q->bitsBandT[found_indx]) == 6) if (++(chctx->bitsBandT[found_indx]) == 6)
workT[found_indx] = -1.e20; workT[found_indx] = -1.e20;
for (j = band_tab[found_indx]; j < band_tab[found_indx+1] && (corrected < summer); j++) { for (j = band_tab[found_indx]; j < band_tab[found_indx+1] && (corrected < summer); j++) {
if (!q->skipFlags[j] && (q->CWlengthT[j] < 6)) { if (!chctx->skipFlags[j] && (chctx->CWlengthT[j] < 6)) {
q->CWlengthT[j]++; chctx->CWlengthT[j]++;
corrected++; corrected++;
} }
} }
@ -577,17 +605,19 @@ static void imc_adjust_bit_allocation(IMCContext *q, int summer)
} }
} }
static void imc_imdct256(IMCContext *q) static void imc_imdct256(IMCContext *q, IMCChannel *chctx, int channels)
{ {
int i; int i;
float re, im; float re, im;
float *dst1 = q->out_samples;
float *dst2 = q->out_samples + (COEFFS - 1) * channels;
/* prerotation */ /* prerotation */
for (i = 0; i < COEFFS / 2; i++) { for (i = 0; i < COEFFS / 2; i++) {
q->samples[i].re = -(q->pre_coef1[i] * q->CWdecoded[COEFFS - 1 - i * 2]) - q->samples[i].re = -(q->pre_coef1[i] * chctx->CWdecoded[COEFFS - 1 - i * 2]) -
(q->pre_coef2[i] * q->CWdecoded[i * 2]); (q->pre_coef2[i] * chctx->CWdecoded[i * 2]);
q->samples[i].im = (q->pre_coef2[i] * q->CWdecoded[COEFFS - 1 - i * 2]) - q->samples[i].im = (q->pre_coef2[i] * chctx->CWdecoded[COEFFS - 1 - i * 2]) -
(q->pre_coef1[i] * q->CWdecoded[i * 2]); (q->pre_coef1[i] * chctx->CWdecoded[i * 2]);
} }
/* FFT */ /* FFT */
@ -598,15 +628,18 @@ static void imc_imdct256(IMCContext *q)
for (i = 0; i < COEFFS / 2; i++) { for (i = 0; i < COEFFS / 2; i++) {
re = ( q->samples[i].re * q->post_cos[i]) + (-q->samples[i].im * q->post_sin[i]); re = ( q->samples[i].re * q->post_cos[i]) + (-q->samples[i].im * q->post_sin[i]);
im = (-q->samples[i].im * q->post_cos[i]) - ( q->samples[i].re * q->post_sin[i]); im = (-q->samples[i].im * q->post_cos[i]) - ( q->samples[i].re * q->post_sin[i]);
q->out_samples[i * 2] = (q->mdct_sine_window[COEFFS - 1 - i * 2] * q->last_fft_im[i]) *dst1 = (q->mdct_sine_window[COEFFS - 1 - i * 2] * chctx->last_fft_im[i])
+ (q->mdct_sine_window[i * 2] * re); + (q->mdct_sine_window[i * 2] * re);
q->out_samples[COEFFS - 1 - i * 2] = (q->mdct_sine_window[i * 2] * q->last_fft_im[i]) *dst2 = (q->mdct_sine_window[i * 2] * chctx->last_fft_im[i])
- (q->mdct_sine_window[COEFFS - 1 - i * 2] * re); - (q->mdct_sine_window[COEFFS - 1 - i * 2] * re);
q->last_fft_im[i] = im; dst1 += channels * 2;
dst2 -= channels * 2;
chctx->last_fft_im[i] = im;
} }
} }
static int inverse_quant_coeff(IMCContext *q, int stream_format_code) static int inverse_quant_coeff(IMCContext *q, IMCChannel *chctx,
int stream_format_code)
{ {
int i, j; int i, j;
int middle_value, cw_len, max_size; int middle_value, cw_len, max_size;
@ -614,30 +647,30 @@ static int inverse_quant_coeff(IMCContext *q, int stream_format_code)
for (i = 0; i < BANDS; i++) { for (i = 0; i < BANDS; i++) {
for (j = band_tab[i]; j < band_tab[i + 1]; j++) { for (j = band_tab[i]; j < band_tab[i + 1]; j++) {
q->CWdecoded[j] = 0; chctx->CWdecoded[j] = 0;
cw_len = q->CWlengthT[j]; cw_len = chctx->CWlengthT[j];
if (cw_len <= 0 || q->skipFlags[j]) if (cw_len <= 0 || chctx->skipFlags[j])
continue; continue;
max_size = 1 << cw_len; max_size = 1 << cw_len;
middle_value = max_size >> 1; middle_value = max_size >> 1;
if (q->codewords[j] >= max_size || q->codewords[j] < 0) if (chctx->codewords[j] >= max_size || chctx->codewords[j] < 0)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (cw_len >= 4) { if (cw_len >= 4) {
quantizer = imc_quantizer2[(stream_format_code & 2) >> 1]; quantizer = imc_quantizer2[(stream_format_code & 2) >> 1];
if (q->codewords[j] >= middle_value) if (chctx->codewords[j] >= middle_value)
q->CWdecoded[j] = quantizer[q->codewords[j] - 8] * q->flcoeffs6[i]; chctx->CWdecoded[j] = quantizer[chctx->codewords[j] - 8] * chctx->flcoeffs6[i];
else else
q->CWdecoded[j] = -quantizer[max_size - q->codewords[j] - 8 - 1] * q->flcoeffs6[i]; chctx->CWdecoded[j] = -quantizer[max_size - chctx->codewords[j] - 8 - 1] * chctx->flcoeffs6[i];
}else{ }else{
quantizer = imc_quantizer1[((stream_format_code & 2) >> 1) | (q->bandFlagsBuf[i] << 1)]; quantizer = imc_quantizer1[((stream_format_code & 2) >> 1) | (chctx->bandFlagsBuf[i] << 1)];
if (q->codewords[j] >= middle_value) if (chctx->codewords[j] >= middle_value)
q->CWdecoded[j] = quantizer[q->codewords[j] - 1] * q->flcoeffs6[i]; chctx->CWdecoded[j] = quantizer[chctx->codewords[j] - 1] * chctx->flcoeffs6[i];
else else
q->CWdecoded[j] = -quantizer[max_size - 2 - q->codewords[j]] * q->flcoeffs6[i]; chctx->CWdecoded[j] = -quantizer[max_size - 2 - chctx->codewords[j]] * chctx->flcoeffs6[i];
} }
} }
} }
@ -645,16 +678,16 @@ static int inverse_quant_coeff(IMCContext *q, int stream_format_code)
} }
static int imc_get_coeffs(IMCContext *q) static int imc_get_coeffs(IMCContext *q, IMCChannel *chctx)
{ {
int i, j, cw_len, cw; int i, j, cw_len, cw;
for (i = 0; i < BANDS; i++) { for (i = 0; i < BANDS; i++) {
if (!q->sumLenArr[i]) if (!chctx->sumLenArr[i])
continue; continue;
if (q->bandFlagsBuf[i] || q->bandWidthT[i]) { if (chctx->bandFlagsBuf[i] || chctx->bandWidthT[i]) {
for (j = band_tab[i]; j < band_tab[i + 1]; j++) { for (j = band_tab[i]; j < band_tab[i + 1]; j++) {
cw_len = q->CWlengthT[j]; cw_len = chctx->CWlengthT[j];
cw = 0; cw = 0;
if (get_bits_count(&q->gb) + cw_len > 512) { if (get_bits_count(&q->gb) + cw_len > 512) {
@ -662,33 +695,195 @@ static int imc_get_coeffs(IMCContext *q)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (cw_len && (!q->bandFlagsBuf[i] || !q->skipFlags[j])) if (cw_len && (!chctx->bandFlagsBuf[i] || !chctx->skipFlags[j]))
cw = get_bits(&q->gb, cw_len); cw = get_bits(&q->gb, cw_len);
q->codewords[j] = cw; chctx->codewords[j] = cw;
} }
} }
} }
return 0; return 0;
} }
static int imc_decode_block(AVCodecContext *avctx, IMCContext *q, int ch)
{
int stream_format_code;
int imc_hdr, i, j, ret;
int flag;
int bits, summer;
int counter, bitscount;
IMCChannel *chctx = q->chctx + ch;
/* Check the frame header */
imc_hdr = get_bits(&q->gb, 9);
if (imc_hdr & 0x18) {
av_log(avctx, AV_LOG_ERROR, "frame header check failed!\n");
av_log(avctx, AV_LOG_ERROR, "got %X.\n", imc_hdr);
return AVERROR_INVALIDDATA;
}
stream_format_code = get_bits(&q->gb, 3);
if (stream_format_code & 1) {
av_log_ask_for_sample(avctx, "Stream format %X is not supported\n",
stream_format_code);
return AVERROR_PATCHWELCOME;
}
// av_log(avctx, AV_LOG_DEBUG, "stream_format_code = %d\n", stream_format_code);
if (stream_format_code & 0x04)
chctx->decoder_reset = 1;
if (chctx->decoder_reset) {
memset(q->out_samples, 0, sizeof(q->out_samples));
for (i = 0; i < BANDS; i++)
chctx->old_floor[i] = 1.0;
for (i = 0; i < COEFFS; i++)
chctx->CWdecoded[i] = 0;
chctx->decoder_reset = 0;
}
flag = get_bits1(&q->gb);
imc_read_level_coeffs(q, stream_format_code, chctx->levlCoeffBuf);
if (stream_format_code & 0x4)
imc_decode_level_coefficients(q, chctx->levlCoeffBuf,
chctx->flcoeffs1, chctx->flcoeffs2);
else
imc_decode_level_coefficients2(q, chctx->levlCoeffBuf, chctx->old_floor,
chctx->flcoeffs1, chctx->flcoeffs2);
memcpy(chctx->old_floor, chctx->flcoeffs1, 32 * sizeof(float));
counter = 0;
for (i = 0; i < BANDS; i++) {
if (chctx->levlCoeffBuf[i] == 16) {
chctx->bandWidthT[i] = 0;
counter++;
} else
chctx->bandWidthT[i] = band_tab[i + 1] - band_tab[i];
}
memset(chctx->bandFlagsBuf, 0, BANDS * sizeof(int));
for (i = 0; i < BANDS - 1; i++) {
if (chctx->bandWidthT[i])
chctx->bandFlagsBuf[i] = get_bits1(&q->gb);
}
imc_calculate_coeffs(q, chctx->flcoeffs1, chctx->flcoeffs2, chctx->bandWidthT, chctx->flcoeffs3, chctx->flcoeffs5);
bitscount = 0;
/* first 4 bands will be assigned 5 bits per coefficient */
if (stream_format_code & 0x2) {
bitscount += 15;
chctx->bitsBandT[0] = 5;
chctx->CWlengthT[0] = 5;
chctx->CWlengthT[1] = 5;
chctx->CWlengthT[2] = 5;
for (i = 1; i < 4; i++) {
bits = (chctx->levlCoeffBuf[i] == 16) ? 0 : 5;
chctx->bitsBandT[i] = bits;
for (j = band_tab[i]; j < band_tab[i + 1]; j++) {
chctx->CWlengthT[j] = bits;
bitscount += bits;
}
}
}
if (avctx->codec_id == CODEC_ID_IAC) {
bitscount += !!chctx->bandWidthT[BANDS - 1];
if (!(stream_format_code & 0x2))
bitscount += 16;
}
if ((ret = bit_allocation(q, chctx, stream_format_code,
512 - bitscount - get_bits_count(&q->gb),
flag)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Bit allocations failed\n");
chctx->decoder_reset = 1;
return ret;
}
for (i = 0; i < BANDS; i++) {
chctx->sumLenArr[i] = 0;
chctx->skipFlagRaw[i] = 0;
for (j = band_tab[i]; j < band_tab[i + 1]; j++)
chctx->sumLenArr[i] += chctx->CWlengthT[j];
if (chctx->bandFlagsBuf[i])
if ((((band_tab[i + 1] - band_tab[i]) * 1.5) > chctx->sumLenArr[i]) && (chctx->sumLenArr[i] > 0))
chctx->skipFlagRaw[i] = 1;
}
imc_get_skip_coeff(q, chctx);
for (i = 0; i < BANDS; i++) {
chctx->flcoeffs6[i] = chctx->flcoeffs1[i];
/* band has flag set and at least one coded coefficient */
if (chctx->bandFlagsBuf[i] && (band_tab[i + 1] - band_tab[i]) != chctx->skipFlagCount[i]) {
chctx->flcoeffs6[i] *= q->sqrt_tab[ band_tab[i + 1] - band_tab[i]] /
q->sqrt_tab[(band_tab[i + 1] - band_tab[i] - chctx->skipFlagCount[i])];
}
}
/* calculate bits left, bits needed and adjust bit allocation */
bits = summer = 0;
for (i = 0; i < BANDS; i++) {
if (chctx->bandFlagsBuf[i]) {
for (j = band_tab[i]; j < band_tab[i + 1]; j++) {
if (chctx->skipFlags[j]) {
summer += chctx->CWlengthT[j];
chctx->CWlengthT[j] = 0;
}
}
bits += chctx->skipFlagBits[i];
summer -= chctx->skipFlagBits[i];
}
}
imc_adjust_bit_allocation(q, chctx, summer);
for (i = 0; i < BANDS; i++) {
chctx->sumLenArr[i] = 0;
for (j = band_tab[i]; j < band_tab[i + 1]; j++)
if (!chctx->skipFlags[j])
chctx->sumLenArr[i] += chctx->CWlengthT[j];
}
memset(chctx->codewords, 0, sizeof(chctx->codewords));
if (imc_get_coeffs(q, chctx) < 0) {
av_log(avctx, AV_LOG_ERROR, "Read coefficients failed\n");
chctx->decoder_reset = 1;
return AVERROR_INVALIDDATA;
}
if (inverse_quant_coeff(q, chctx, stream_format_code) < 0) {
av_log(avctx, AV_LOG_ERROR, "Inverse quantization of coefficients failed\n");
chctx->decoder_reset = 1;
return AVERROR_INVALIDDATA;
}
memset(chctx->skipFlags, 0, sizeof(chctx->skipFlags));
imc_imdct256(q, chctx, avctx->channels);
return 0;
}
static int imc_decode_frame(AVCodecContext *avctx, void *data, static int imc_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int ret, i;
IMCContext *q = avctx->priv_data; IMCContext *q = avctx->priv_data;
int stream_format_code;
int imc_hdr, i, j, ret;
int flag;
int bits, summer;
int counter, bitscount;
LOCAL_ALIGNED_16(uint16_t, buf16, [IMC_BLOCK_SIZE / 2]); LOCAL_ALIGNED_16(uint16_t, buf16, [IMC_BLOCK_SIZE / 2]);
if (buf_size < IMC_BLOCK_SIZE) { if (buf_size < IMC_BLOCK_SIZE * avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "imc frame too small!\n"); av_log(avctx, AV_LOG_ERROR, "frame too small!\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
@ -698,163 +893,36 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
q->out_samples = (float*)q->frame.data[0];
q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2); for (i = 0; i < avctx->channels; i++) {
q->out_samples = (float*)q->frame.data[0] + i;
init_get_bits(&q->gb, (const uint8_t*)buf16, IMC_BLOCK_SIZE * 8); q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2);
/* Check the frame header */ init_get_bits(&q->gb, (const uint8_t*)buf16, IMC_BLOCK_SIZE * 8);
imc_hdr = get_bits(&q->gb, 9);
if (imc_hdr != IMC_FRAME_ID) {
av_log(avctx, AV_LOG_ERROR, "imc frame header check failed!\n");
av_log(avctx, AV_LOG_ERROR, "got %x instead of 0x21.\n", imc_hdr);
return AVERROR_INVALIDDATA;
}
stream_format_code = get_bits(&q->gb, 3);
if (stream_format_code & 1) { buf += IMC_BLOCK_SIZE;
av_log(avctx, AV_LOG_ERROR, "Stream code format %X is not supported\n", stream_format_code);
return AVERROR_INVALIDDATA; if ((ret = imc_decode_block(avctx, q, i)) < 0)
return ret;
} }
// av_log(avctx, AV_LOG_DEBUG, "stream_format_code = %d\n", stream_format_code); if (avctx->channels == 2) {
float *src = (float*)q->frame.data[0], t1, t2;
if (stream_format_code & 0x04) for (i = 0; i < COEFFS; i++) {
q->decoder_reset = 1; t1 = src[0];
t2 = src[1];
if (q->decoder_reset) { src[0] = t1 + t2;
memset(q->out_samples, 0, sizeof(q->out_samples)); src[1] = t1 - t2;
for (i = 0; i < BANDS; i++) src += 2;
q->old_floor[i] = 1.0;
for (i = 0; i < COEFFS; i++)
q->CWdecoded[i] = 0;
q->decoder_reset = 0;
}
flag = get_bits1(&q->gb);
imc_read_level_coeffs(q, stream_format_code, q->levlCoeffBuf);
if (stream_format_code & 0x4)
imc_decode_level_coefficients(q, q->levlCoeffBuf,
q->flcoeffs1, q->flcoeffs2);
else
imc_decode_level_coefficients2(q, q->levlCoeffBuf, q->old_floor,
q->flcoeffs1, q->flcoeffs2);
memcpy(q->old_floor, q->flcoeffs1, 32 * sizeof(float));
counter = 0;
for (i = 0; i < BANDS; i++) {
if (q->levlCoeffBuf[i] == 16) {
q->bandWidthT[i] = 0;
counter++;
} else
q->bandWidthT[i] = band_tab[i + 1] - band_tab[i];
}
memset(q->bandFlagsBuf, 0, BANDS * sizeof(int));
for (i = 0; i < BANDS - 1; i++) {
if (q->bandWidthT[i])
q->bandFlagsBuf[i] = get_bits1(&q->gb);
}
imc_calculate_coeffs(q, q->flcoeffs1, q->flcoeffs2, q->bandWidthT, q->flcoeffs3, q->flcoeffs5);
bitscount = 0;
/* first 4 bands will be assigned 5 bits per coefficient */
if (stream_format_code & 0x2) {
bitscount += 15;
q->bitsBandT[0] = 5;
q->CWlengthT[0] = 5;
q->CWlengthT[1] = 5;
q->CWlengthT[2] = 5;
for (i = 1; i < 4; i++) {
bits = (q->levlCoeffBuf[i] == 16) ? 0 : 5;
q->bitsBandT[i] = bits;
for (j = band_tab[i]; j < band_tab[i + 1]; j++) {
q->CWlengthT[j] = bits;
bitscount += bits;
}
} }
} }
if ((ret = bit_allocation(q, stream_format_code,
512 - bitscount - get_bits_count(&q->gb),
flag)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Bit allocations failed\n");
q->decoder_reset = 1;
return ret;
}
for (i = 0; i < BANDS; i++) {
q->sumLenArr[i] = 0;
q->skipFlagRaw[i] = 0;
for (j = band_tab[i]; j < band_tab[i + 1]; j++)
q->sumLenArr[i] += q->CWlengthT[j];
if (q->bandFlagsBuf[i])
if ((((band_tab[i + 1] - band_tab[i]) * 1.5) > q->sumLenArr[i]) && (q->sumLenArr[i] > 0))
q->skipFlagRaw[i] = 1;
}
imc_get_skip_coeff(q);
for (i = 0; i < BANDS; i++) {
q->flcoeffs6[i] = q->flcoeffs1[i];
/* band has flag set and at least one coded coefficient */
if (q->bandFlagsBuf[i] && (band_tab[i + 1] - band_tab[i]) != q->skipFlagCount[i]) {
q->flcoeffs6[i] *= q->sqrt_tab[ band_tab[i + 1] - band_tab[i]] /
q->sqrt_tab[(band_tab[i + 1] - band_tab[i] - q->skipFlagCount[i])];
}
}
/* calculate bits left, bits needed and adjust bit allocation */
bits = summer = 0;
for (i = 0; i < BANDS; i++) {
if (q->bandFlagsBuf[i]) {
for (j = band_tab[i]; j < band_tab[i + 1]; j++) {
if (q->skipFlags[j]) {
summer += q->CWlengthT[j];
q->CWlengthT[j] = 0;
}
}
bits += q->skipFlagBits[i];
summer -= q->skipFlagBits[i];
}
}
imc_adjust_bit_allocation(q, summer);
for (i = 0; i < BANDS; i++) {
q->sumLenArr[i] = 0;
for (j = band_tab[i]; j < band_tab[i + 1]; j++)
if (!q->skipFlags[j])
q->sumLenArr[i] += q->CWlengthT[j];
}
memset(q->codewords, 0, sizeof(q->codewords));
if (imc_get_coeffs(q) < 0) {
av_log(avctx, AV_LOG_ERROR, "Read coefficients failed\n");
q->decoder_reset = 1;
return AVERROR_INVALIDDATA;
}
if (inverse_quant_coeff(q, stream_format_code) < 0) {
av_log(avctx, AV_LOG_ERROR, "Inverse quantization of coefficients failed\n");
q->decoder_reset = 1;
return AVERROR_INVALIDDATA;
}
memset(q->skipFlags, 0, sizeof(q->skipFlags));
imc_imdct256(q);
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = q->frame; *(AVFrame *)data = q->frame;
return IMC_BLOCK_SIZE; return IMC_BLOCK_SIZE * avctx->channels;
} }
@ -879,3 +947,15 @@ AVCodec ff_imc_decoder = {
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"), .long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"),
}; };
AVCodec ff_iac_decoder = {
.name = "iac",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_IAC,
.priv_data_size = sizeof(IMCContext),
.init = imc_decode_init,
.close = imc_decode_close,
.decode = imc_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("IAC (Indeo Audio Coder)"),
};

View File

@ -44,6 +44,25 @@ static const int8_t cyclTab2[32] = {
12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29}; 23, 24, 25, 26, 27, 28, 29};
static const float iac_weights1[31] = {
0.0538585, 0.0576251, 0.0645592, 0.0494032, 0.0428915, 0.0592188,
0.0604145, 0.0673549, 0.0797351, 0.0972911, 0.119376, 0.144777,
0.17181, 0.198625, 0.242918, 0.262113, 0.278434, 0.310752,
0.319978, 0.328482, 0.354631, 0.380212, 0.388783, 0.400428,
0.43096, 0.462397, 0.479469, 0.499329, 0.534526, 0.568631,
0.589218
};
static const float iac_weights2[31] = {
0.000375307, 0.000450455, 0.000612191, 0.000297262, 0.000202956,
0.000484887, 0.000511777, 0.000686431, 0.00108256, 0.00185267,
0.00321869, 0.00541861, 0.00860266, 0.012726, 0.0219151,
0.0269104, 0.0316774, 0.0426107, 0.046113, 0.0494974,
0.0608692, 0.0734633, 0.0780208, 0.0844921, 0.103034,
0.124606, 0.137421, 0.153336, 0.184296, 0.217792,
0.239742
};
static const float imc_weights1[31] = { static const float imc_weights1[31] = {
0.119595, 0.123124, 0.129192, 9.97377e-2, 8.1923e-2, 9.61153e-2, 8.77885e-2, 8.61174e-2, 0.119595, 0.123124, 0.129192, 9.97377e-2, 8.1923e-2, 9.61153e-2, 8.77885e-2, 8.61174e-2,
9.00882e-2, 9.91658e-2, 0.112991, 0.131126, 0.152886, 0.177292, 0.221782, 0.244917, 0.267386, 9.00882e-2, 9.91658e-2, 0.112991, 0.131126, 0.152886, 0.177292, 0.221782, 0.244917, 0.267386,

View File

@ -275,7 +275,8 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
maxband = c->last_max_band + get_vlc2(gb, band_vlc.table, MPC8_BANDS_BITS, 2); maxband = c->last_max_band + get_vlc2(gb, band_vlc.table, MPC8_BANDS_BITS, 2);
if(maxband > 32) maxband -= 33; if(maxband > 32) maxband -= 33;
} }
if(maxband >= BANDS) {
if(maxband > c->maxbands + 1 || maxband >= BANDS) {
av_log(avctx, AV_LOG_ERROR, "maxband %d too large\n",maxband); av_log(avctx, AV_LOG_ERROR, "maxband %d too large\n",maxband);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
@ -412,7 +413,8 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
} }
} }
ff_mpc_dequantize_and_synth(c, maxband, c->frame.data[0], avctx->channels); ff_mpc_dequantize_and_synth(c, maxband - 1, c->frame.data[0],
avctx->channels);
c->cur_frame++; c->cur_frame++;

View File

@ -27,7 +27,7 @@
*/ */
#define LIBAVCODEC_VERSION_MAJOR 54 #define LIBAVCODEC_VERSION_MAJOR 54
#define LIBAVCODEC_VERSION_MINOR 24 #define LIBAVCODEC_VERSION_MINOR 25
#define LIBAVCODEC_VERSION_MICRO 100 #define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \

View File

@ -123,7 +123,8 @@ int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num)
return 0; return 0;
} }
void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values) int ff_vorbis_ready_floor1_list(AVCodecContext *avccontext,
vorbis_floor1_entry *list, int values)
{ {
int i; int i;
list[0].sort = 0; list[0].sort = 0;
@ -147,6 +148,11 @@ void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values)
for (i = 0; i < values - 1; i++) { for (i = 0; i < values - 1; i++) {
int j; int j;
for (j = i + 1; j < values; j++) { for (j = i + 1; j < values; j++) {
if (list[i].x == list[j].x) {
av_log(avccontext, AV_LOG_ERROR,
"Duplicate value found in floor 1 X coordinates\n");
return AVERROR_INVALIDDATA;
}
if (list[list[i].sort].x > list[list[j].sort].x) { if (list[list[i].sort].x > list[list[j].sort].x) {
int tmp = list[i].sort; int tmp = list[i].sort;
list[i].sort = list[j].sort; list[i].sort = list[j].sort;
@ -154,6 +160,7 @@ void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values)
} }
} }
} }
return 0;
} }
static inline void render_line_unrolled(intptr_t x, int y, int x1, static inline void render_line_unrolled(intptr_t x, int y, int x1,

View File

@ -36,7 +36,8 @@ typedef struct {
uint16_t high; uint16_t high;
} vorbis_floor1_entry; } vorbis_floor1_entry;
void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values); int ff_vorbis_ready_floor1_list(AVCodecContext *avccontext,
vorbis_floor1_entry *list, int values);
unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n); // x^(1/n) unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n); // x^(1/n)
int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num); int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num);
void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values, void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values,

View File

@ -578,14 +578,10 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc)
} }
// Precalculate order of x coordinates - needed for decode // Precalculate order of x coordinates - needed for decode
ff_vorbis_ready_floor1_list(floor_setup->data.t1.list, floor_setup->data.t1.x_list_dim); if (ff_vorbis_ready_floor1_list(vc->avccontext,
floor_setup->data.t1.list,
for (j=1; j<floor_setup->data.t1.x_list_dim; j++) { floor_setup->data.t1.x_list_dim)) {
if ( floor_setup->data.t1.list[ floor_setup->data.t1.list[j-1].sort ].x return AVERROR_INVALIDDATA;
== floor_setup->data.t1.list[ floor_setup->data.t1.list[j ].sort ].x) {
av_log(vc->avccontext, AV_LOG_ERROR, "Non unique x values in floor type 1\n");
return AVERROR_INVALIDDATA;
}
} }
} else if (floor_setup->floor_type == 0) { } else if (floor_setup->floor_type == 0) {
unsigned max_codebook_dim = 0; unsigned max_codebook_dim = 0;

View File

@ -340,7 +340,8 @@ static int create_vorbis_context(vorbis_enc_context *venc,
}; };
fc->list[i].x = a[i - 2]; fc->list[i].x = a[i - 2];
} }
ff_vorbis_ready_floor1_list(fc->list, fc->values); if (ff_vorbis_ready_floor1_list(avccontext, fc->list, fc->values))
return AVERROR_BUG;
venc->nresidues = 1; venc->nresidues = 1;
venc->residues = av_malloc(sizeof(vorbis_enc_residue) * venc->nresidues); venc->residues = av_malloc(sizeof(vorbis_enc_residue) * venc->nresidues);

View File

@ -106,8 +106,8 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
} }
PARSE_FORMATS(s->formats_str, enum AVSampleFormat, s->formats, PARSE_FORMATS(s->formats_str, enum AVSampleFormat, s->formats,
avfilter_add_format, av_get_sample_fmt, AV_SAMPLE_FMT_NONE, "sample format"); ff_add_format, av_get_sample_fmt, AV_SAMPLE_FMT_NONE, "sample format");
PARSE_FORMATS(s->sample_rates_str, int, s->sample_rates, avfilter_add_format, PARSE_FORMATS(s->sample_rates_str, int, s->sample_rates, ff_add_format,
get_sample_rate, 0, "sample rate"); get_sample_rate, 0, "sample rate");
PARSE_FORMATS(s->channel_layouts_str, uint64_t, s->channel_layouts, PARSE_FORMATS(s->channel_layouts_str, uint64_t, s->channel_layouts,
ff_add_channel_layout, av_get_channel_layout, 0, ff_add_channel_layout, av_get_channel_layout, 0,
@ -122,8 +122,8 @@ static int query_formats(AVFilterContext *ctx)
{ {
AFormatContext *s = ctx->priv; AFormatContext *s = ctx->priv;
avfilter_set_common_formats(ctx, s->formats ? s->formats : ff_set_common_formats(ctx, s->formats ? s->formats :
avfilter_all_formats(AVMEDIA_TYPE_AUDIO)); ff_all_formats(AVMEDIA_TYPE_AUDIO));
ff_set_common_samplerates(ctx, s->sample_rates ? s->sample_rates : ff_set_common_samplerates(ctx, s->sample_rates ? s->sample_rates :
ff_all_samplerates()); ff_all_samplerates());
ff_set_common_channel_layouts(ctx, s->channel_layouts ? s->channel_layouts : ff_set_common_channel_layouts(ctx, s->channel_layouts ? s->channel_layouts :

View File

@ -349,7 +349,7 @@ static int request_samples(AVFilterContext *ctx, int min_samples)
if (s->input_state[i] == INPUT_OFF) if (s->input_state[i] == INPUT_OFF)
continue; continue;
while (!ret && av_audio_fifo_size(s->fifos[i]) < min_samples) while (!ret && av_audio_fifo_size(s->fifos[i]) < min_samples)
ret = avfilter_request_frame(ctx->inputs[i]); ret = ff_request_frame(ctx->inputs[i]);
if (ret == AVERROR_EOF) { if (ret == AVERROR_EOF) {
if (av_audio_fifo_size(s->fifos[i]) == 0) { if (av_audio_fifo_size(s->fifos[i]) == 0) {
s->input_state[i] = INPUT_OFF; s->input_state[i] = INPUT_OFF;
@ -410,7 +410,7 @@ static int request_frame(AVFilterLink *outlink)
} }
if (s->frame_list->nb_frames == 0) { if (s->frame_list->nb_frames == 0) {
ret = avfilter_request_frame(ctx->inputs[0]); ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF) { if (ret == AVERROR_EOF) {
s->input_state[0] = INPUT_OFF; s->input_state[0] = INPUT_OFF;
if (s->nb_inputs == 1) if (s->nb_inputs == 1)
@ -497,7 +497,7 @@ static int init(AVFilterContext *ctx, const char *args, void *opaque)
pad.name = av_strdup(name); pad.name = av_strdup(name);
pad.filter_samples = filter_samples; pad.filter_samples = filter_samples;
avfilter_insert_inpad(ctx, i, &pad); ff_insert_inpad(ctx, i, &pad);
} }
return 0; return 0;
@ -525,8 +525,8 @@ static void uninit(AVFilterContext *ctx)
static int query_formats(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx)
{ {
AVFilterFormats *formats = NULL; AVFilterFormats *formats = NULL;
avfilter_add_format(&formats, AV_SAMPLE_FMT_FLT); ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
avfilter_set_common_formats(ctx, formats); ff_set_common_formats(ctx, formats);
ff_set_common_channel_layouts(ctx, ff_all_channel_layouts()); ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
ff_set_common_samplerates(ctx, ff_all_samplerates()); ff_set_common_samplerates(ctx, ff_all_samplerates());
return 0; return 0;

View File

@ -24,6 +24,7 @@
#include "audio.h" #include "audio.h"
#include "avfilter.h" #include "avfilter.h"
#include "internal.h"
typedef struct ASyncContext { typedef struct ASyncContext {
const AVClass *class; const AVClass *class;
@ -116,7 +117,7 @@ static int request_frame(AVFilterLink *link)
{ {
AVFilterContext *ctx = link->src; AVFilterContext *ctx = link->src;
ASyncContext *s = ctx->priv; ASyncContext *s = ctx->priv;
int ret = avfilter_request_frame(ctx->inputs[0]); int ret = ff_request_frame(ctx->inputs[0]);
int nb_samples; int nb_samples;
/* flush the fifo */ /* flush the fifo */

View File

@ -55,18 +55,18 @@ static int query_formats(AVFilterContext *ctx)
AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
AVFilterFormats *in_formats = avfilter_all_formats(AVMEDIA_TYPE_AUDIO); AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
AVFilterFormats *out_formats = avfilter_all_formats(AVMEDIA_TYPE_AUDIO); AVFilterFormats *out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
AVFilterFormats *in_samplerates = ff_all_samplerates(); AVFilterFormats *in_samplerates = ff_all_samplerates();
AVFilterFormats *out_samplerates = ff_all_samplerates(); AVFilterFormats *out_samplerates = ff_all_samplerates();
AVFilterChannelLayouts *in_layouts = ff_all_channel_layouts(); AVFilterChannelLayouts *in_layouts = ff_all_channel_layouts();
AVFilterChannelLayouts *out_layouts = ff_all_channel_layouts(); AVFilterChannelLayouts *out_layouts = ff_all_channel_layouts();
avfilter_formats_ref(in_formats, &inlink->out_formats); ff_formats_ref(in_formats, &inlink->out_formats);
avfilter_formats_ref(out_formats, &outlink->in_formats); ff_formats_ref(out_formats, &outlink->in_formats);
avfilter_formats_ref(in_samplerates, &inlink->out_samplerates); ff_formats_ref(in_samplerates, &inlink->out_samplerates);
avfilter_formats_ref(out_samplerates, &outlink->in_samplerates); ff_formats_ref(out_samplerates, &outlink->in_samplerates);
ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts); ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts);
ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts); ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
@ -130,7 +130,7 @@ static int request_frame(AVFilterLink *outlink)
{ {
AVFilterContext *ctx = outlink->src; AVFilterContext *ctx = outlink->src;
ResampleContext *s = ctx->priv; ResampleContext *s = ctx->priv;
int ret = avfilter_request_frame(ctx->inputs[0]); int ret = ff_request_frame(ctx->inputs[0]);
/* flush the lavr delay buffer */ /* flush the lavr delay buffer */
if (ret == AVERROR_EOF && s->avr) { if (ret == AVERROR_EOF && s->avr) {

View File

@ -96,9 +96,9 @@ void ff_command_queue_pop(AVFilterContext *filter)
av_free(c); av_free(c);
} }
void avfilter_insert_pad(unsigned idx, unsigned *count, size_t padidx_off, void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links, AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad) AVFilterPad *newpad)
{ {
unsigned i; unsigned i;
@ -183,14 +183,15 @@ int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
/* if any information on supported media formats already exists on the /* if any information on supported media formats already exists on the
* link, we need to preserve that */ * link, we need to preserve that */
if (link->out_formats) if (link->out_formats)
avfilter_formats_changeref(&link->out_formats, ff_formats_changeref(&link->out_formats,
&filt->outputs[filt_dstpad_idx]->out_formats); &filt->outputs[filt_dstpad_idx]->out_formats);
if (link->out_samplerates)
ff_formats_changeref(&link->out_samplerates,
&filt->outputs[filt_dstpad_idx]->out_samplerates);
if (link->out_channel_layouts) if (link->out_channel_layouts)
ff_channel_layouts_changeref(&link->out_channel_layouts, ff_channel_layouts_changeref(&link->out_channel_layouts,
&filt->outputs[filt_dstpad_idx]->out_channel_layouts); &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
if (link->out_samplerates)
avfilter_formats_changeref(&link->out_samplerates,
&filt->outputs[filt_dstpad_idx]->out_samplerates);
return 0; return 0;
} }
@ -307,18 +308,18 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
} }
} }
int avfilter_request_frame(AVFilterLink *link) int ff_request_frame(AVFilterLink *link)
{ {
FF_DPRINTF_START(NULL, request_frame); ff_dlog_link(NULL, link, 1); FF_DPRINTF_START(NULL, request_frame); ff_dlog_link(NULL, link, 1);
if (link->srcpad->request_frame) if (link->srcpad->request_frame)
return link->srcpad->request_frame(link); return link->srcpad->request_frame(link);
else if (link->src->inputs[0]) else if (link->src->inputs[0])
return avfilter_request_frame(link->src->inputs[0]); return ff_request_frame(link->src->inputs[0]);
else return -1; else return -1;
} }
int avfilter_poll_frame(AVFilterLink *link) int ff_poll_frame(AVFilterLink *link)
{ {
int i, min = INT_MAX; int i, min = INT_MAX;
@ -329,7 +330,7 @@ int avfilter_poll_frame(AVFilterLink *link)
int val; int val;
if (!link->src->inputs[i]) if (!link->src->inputs[i])
return -1; return -1;
val = avfilter_poll_frame(link->src->inputs[i]); val = ff_poll_frame(link->src->inputs[i]);
min = FFMIN(min, val); min = FFMIN(min, val);
} }
@ -492,10 +493,10 @@ void avfilter_free(AVFilterContext *filter)
if ((link = filter->inputs[i])) { if ((link = filter->inputs[i])) {
if (link->src) if (link->src)
link->src->outputs[link->srcpad - link->src->output_pads] = NULL; link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
avfilter_formats_unref(&link->in_formats); ff_formats_unref(&link->in_formats);
avfilter_formats_unref(&link->out_formats); ff_formats_unref(&link->out_formats);
avfilter_formats_unref(&link->in_samplerates); ff_formats_unref(&link->in_samplerates);
avfilter_formats_unref(&link->out_samplerates); ff_formats_unref(&link->out_samplerates);
ff_channel_layouts_unref(&link->in_channel_layouts); ff_channel_layouts_unref(&link->in_channel_layouts);
ff_channel_layouts_unref(&link->out_channel_layouts); ff_channel_layouts_unref(&link->out_channel_layouts);
} }
@ -505,10 +506,10 @@ void avfilter_free(AVFilterContext *filter)
if ((link = filter->outputs[i])) { if ((link = filter->outputs[i])) {
if (link->dst) if (link->dst)
link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL; link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
avfilter_formats_unref(&link->in_formats); ff_formats_unref(&link->in_formats);
avfilter_formats_unref(&link->out_formats); ff_formats_unref(&link->out_formats);
avfilter_formats_unref(&link->in_samplerates); ff_formats_unref(&link->in_samplerates);
avfilter_formats_unref(&link->out_samplerates); ff_formats_unref(&link->out_samplerates);
ff_channel_layouts_unref(&link->in_channel_layouts); ff_channel_layouts_unref(&link->in_channel_layouts);
ff_channel_layouts_unref(&link->out_channel_layouts); ff_channel_layouts_unref(&link->out_channel_layouts);
} }
@ -535,3 +536,32 @@ int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque
ret = filter->filter->init(filter, args, opaque); ret = filter->filter->init(filter, args, opaque);
return ret; return ret;
} }
#if FF_API_DEFAULT_CONFIG_OUTPUT_LINK
void avfilter_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad)
{
ff_insert_pad(idx, count, padidx_off, pads, links, newpad);
}
void avfilter_insert_inpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
ff_insert_pad(index, &f->input_count, offsetof(AVFilterLink, dstpad),
&f->input_pads, &f->inputs, p);
}
void avfilter_insert_outpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
ff_insert_pad(index, &f->output_count, offsetof(AVFilterLink, srcpad),
&f->output_pads, &f->outputs, p);
}
int avfilter_poll_frame(AVFilterLink *link)
{
return ff_poll_frame(link);
}
int avfilter_request_frame(AVFilterLink *link)
{
return ff_request_frame(link);
}
#endif

View File

@ -60,6 +60,7 @@ const char *avfilter_license(void);
typedef struct AVFilterContext AVFilterContext; typedef struct AVFilterContext AVFilterContext;
typedef struct AVFilterLink AVFilterLink; typedef struct AVFilterLink AVFilterLink;
typedef struct AVFilterPad AVFilterPad; typedef struct AVFilterPad AVFilterPad;
typedef struct AVFilterFormats AVFilterFormats;
/** /**
* A reference-counted buffer data type used by the filter system. Filters * A reference-counted buffer data type used by the filter system. Filters
@ -210,6 +211,7 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask);
*/ */
void avfilter_unref_buffer(AVFilterBufferRef *ref); void avfilter_unref_buffer(AVFilterBufferRef *ref);
#if FF_API_FILTERS_PUBLIC
/** /**
* Remove a reference to a buffer and set the pointer to NULL. * Remove a reference to a buffer and set the pointer to NULL.
* If this is the last reference to the buffer, the buffer itself * If this is the last reference to the buffer, the buffer itself
@ -258,14 +260,18 @@ void avfilter_unref_bufferp(AVFilterBufferRef **ref);
* we must ensure that all links which reference either pre-merge format list * we must ensure that all links which reference either pre-merge format list
* get updated as well. Therefore, we have the format list structure store a * get updated as well. Therefore, we have the format list structure store a
* pointer to each of the pointers to itself. * pointer to each of the pointers to itself.
* @addtogroup lavfi_deprecated
* @deprecated Those functions are only useful inside filters and
* user filters are not supported at this point.
* @{
*/ */
typedef struct AVFilterFormats { struct AVFilterFormats {
unsigned format_count; ///< number of formats unsigned format_count; ///< number of formats
int *formats; ///< list of media formats int *formats; ///< list of media formats
unsigned refcount; ///< number of references to this list unsigned refcount; ///< number of references to this list
struct AVFilterFormats ***refs; ///< references to this list struct AVFilterFormats ***refs; ///< references to this list
} AVFilterFormats; };
/** /**
* Create a list of supported formats. This is intended for use in * Create a list of supported formats. This is intended for use in
@ -275,6 +281,7 @@ typedef struct AVFilterFormats {
* empty list is created. * empty list is created.
* @return the format list, with no existing references * @return the format list, with no existing references
*/ */
attribute_deprecated
AVFilterFormats *avfilter_make_format_list(const int *fmts); AVFilterFormats *avfilter_make_format_list(const int *fmts);
/** /**
@ -284,16 +291,12 @@ AVFilterFormats *avfilter_make_format_list(const int *fmts);
* *
* @return a non negative value in case of success, or a negative * @return a non negative value in case of success, or a negative
* value corresponding to an AVERROR code in case of error * value corresponding to an AVERROR code in case of error
*/
int avfilter_add_format(AVFilterFormats **avff, int64_t fmt);
#if FF_API_OLD_ALL_FORMATS_API
/**
* @deprecated Use avfilter_make_all_formats() instead. * @deprecated Use avfilter_make_all_formats() instead.
*/ */
attribute_deprecated attribute_deprecated
int avfilter_add_format(AVFilterFormats **avff, int64_t fmt);
attribute_deprecated
AVFilterFormats *avfilter_all_formats(enum AVMediaType type); AVFilterFormats *avfilter_all_formats(enum AVMediaType type);
#endif
/** /**
* Return a list of all formats supported by FFmpeg for the given media type. * Return a list of all formats supported by FFmpeg for the given media type.
@ -320,6 +323,7 @@ AVFilterFormats *avfilter_make_all_packing_formats(void);
* If a and b do not share any common formats, neither is modified, and NULL * If a and b do not share any common formats, neither is modified, and NULL
* is returned. * is returned.
*/ */
attribute_deprecated
AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b); AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b);
/** /**
@ -334,40 +338,36 @@ AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b);
* | |____| | | |____| * | |____| | | |____|
* |________| |________________________ * |________| |________________________
*/ */
attribute_deprecated
void avfilter_formats_ref(AVFilterFormats *formats, AVFilterFormats **ref); void avfilter_formats_ref(AVFilterFormats *formats, AVFilterFormats **ref);
attribute_deprecated
/**
* If *ref is non-NULL, remove *ref as a reference to the format list
* it currently points to, deallocates that list if this was the last
* reference, and sets *ref to NULL.
*
* Before After
* ________ ________ NULL
* |formats |<--------. |formats | ^
* | ____ | ____|________________ | ____ | ____|________________
* | |refs| | | __|_ | |refs| | | __|_
* | |* * | | | | | | AVFilterLink | |* * | | | | | | AVFilterLink
* | |* *--------->|*ref| | |* | | | |*ref|
* | |____| | | |____| | |____| | | |____|
* |________| |_____________________ |________| |_____________________
*/
void avfilter_formats_unref(AVFilterFormats **ref); void avfilter_formats_unref(AVFilterFormats **ref);
attribute_deprecated
/**
*
* Before After
* ________ ________
* |formats |<---------. |formats |<---------.
* | ____ | ___|___ | ____ | ___|___
* | |refs| | | | | | |refs| | | | | NULL
* | |* *--------->|*oldref| | |* *--------->|*newref| ^
* | |* * | | |_______| | |* * | | |_______| ___|___
* | |____| | | |____| | | | |
* |________| |________| |*oldref|
* |_______|
*/
void avfilter_formats_changeref(AVFilterFormats **oldref, void avfilter_formats_changeref(AVFilterFormats **oldref,
AVFilterFormats **newref); AVFilterFormats **newref);
/**
* Helpers for query_formats() which set all links to the same list of
* formats/layouts. If there are no links hooked to this filter, the list
* of formats is freed.
*/
attribute_deprecated
void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats);
attribute_deprecated
void avfilter_set_common_pixel_formats(AVFilterContext *ctx, AVFilterFormats *formats);
attribute_deprecated
void avfilter_set_common_sample_formats(AVFilterContext *ctx, AVFilterFormats *formats);
attribute_deprecated
void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats *formats);
#if FF_API_PACKING
attribute_deprecated
void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats *formats);
#endif
/**
* @}
*/
#endif
/** /**
* A filter pad used for either input or output. * A filter pad used for either input or output.
@ -523,19 +523,6 @@ attribute_deprecated
int avfilter_default_query_formats(AVFilterContext *ctx); int avfilter_default_query_formats(AVFilterContext *ctx);
#endif #endif
/**
* Helpers for query_formats() which set all links to the same list of
* formats/layouts. If there are no links hooked to this filter, the list
* of formats is freed.
*/
void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats);
void avfilter_set_common_pixel_formats(AVFilterContext *ctx, AVFilterFormats *formats);
void avfilter_set_common_sample_formats(AVFilterContext *ctx, AVFilterFormats *formats);
void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats *formats);
#if FF_API_PACKING
void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats *formats);
#endif
#if FF_API_FILTERS_PUBLIC #if FF_API_FILTERS_PUBLIC
/** start_frame() handler for filters which simply pass video along */ /** start_frame() handler for filters which simply pass video along */
attribute_deprecated attribute_deprecated
@ -831,6 +818,7 @@ AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
enum AVSampleFormat sample_fmt, enum AVSampleFormat sample_fmt,
uint64_t channel_layout); uint64_t channel_layout);
#if FF_API_FILTERS_PUBLIC
/** /**
* Request an input frame from the filter at the other end of the link. * Request an input frame from the filter at the other end of the link.
* *
@ -842,24 +830,10 @@ AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
*/ */
int avfilter_request_frame(AVFilterLink *link); int avfilter_request_frame(AVFilterLink *link);
/** attribute_deprecated
* Poll a frame from the filter chain.
*
* @param link the input link
* @return the number of immediately available frames, a negative
* number in case of error
*/
int avfilter_poll_frame(AVFilterLink *link); int avfilter_poll_frame(AVFilterLink *link);
/** attribute_deprecated
* Notify the next filter of the start of a frame.
*
* @param link the output link the frame will be sent over
* @param picref A reference to the frame about to be sent. The data for this
* frame need only be valid once draw_slice() is called for that
* portion. The receiving filter will free this reference when
* it no longer needs it.
*/
void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref); void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
/** /**
@ -867,24 +841,11 @@ void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
* *
* @param link the output link the frame was sent over * @param link the output link the frame was sent over
*/ */
attribute_deprecated
void avfilter_end_frame(AVFilterLink *link); void avfilter_end_frame(AVFilterLink *link);
attribute_deprecated
/**
* Send a slice to the next filter.
*
* Slices have to be provided in sequential order, either in
* top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined.
*
* @param link the output link over which the frame is being sent
* @param y offset in pixels from the top of the image for this slice
* @param h height of this slice in pixels
* @param slice_dir the assumed direction for sending slices,
* from the top slice to the bottom slice if the value is 1,
* from the bottom slice to the top slice if the value is -1,
* for other values the behavior of the function is undefined.
*/
void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir); void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
#endif
#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically #define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically
#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw) #define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw)
@ -972,37 +933,18 @@ void avfilter_free(AVFilterContext *filter);
int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); unsigned filt_srcpad_idx, unsigned filt_dstpad_idx);
/** #if FF_API_FILTERS_PUBLIC
* Insert a new pad. attribute_deprecated
*
* @param idx Insertion point. Pad is inserted at the end if this point
* is beyond the end of the list of pads.
* @param count Pointer to the number of pads in the list
* @param padidx_off Offset within an AVFilterLink structure to the element
* to increment when inserting a new pad causes link
* numbering to change
* @param pads Pointer to the pointer to the beginning of the list of pads
* @param links Pointer to the pointer to the beginning of the list of links
* @param newpad The new pad to add. A copy is made when adding.
*/
void avfilter_insert_pad(unsigned idx, unsigned *count, size_t padidx_off, void avfilter_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links, AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad); AVFilterPad *newpad);
/** Insert a new input pad for the filter. */ attribute_deprecated
static inline void avfilter_insert_inpad(AVFilterContext *f, unsigned index, void avfilter_insert_inpad(AVFilterContext *f, unsigned index,
AVFilterPad *p) AVFilterPad *p);
{ attribute_deprecated
avfilter_insert_pad(index, &f->input_count, offsetof(AVFilterLink, dstpad), void avfilter_insert_outpad(AVFilterContext *f, unsigned index,
&f->input_pads, &f->inputs, p); AVFilterPad *p);
} #endif
/** Insert a new output pad for the filter. */
static inline void avfilter_insert_outpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
avfilter_insert_pad(index, &f->output_count, offsetof(AVFilterLink, srcpad),
&f->output_pads, &f->outputs, p);
}
#endif /* AVFILTER_AVFILTER_H */ #endif /* AVFILTER_AVFILTER_H */

View File

@ -324,7 +324,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
continue; continue;
if (link->in_formats != link->out_formats && if (link->in_formats != link->out_formats &&
!avfilter_merge_formats(link->in_formats, !ff_merge_formats(link->in_formats,
link->out_formats)) link->out_formats))
convert_needed = 1; convert_needed = 1;
if (link->type == AVMEDIA_TYPE_AUDIO) { if (link->type == AVMEDIA_TYPE_AUDIO) {
@ -381,8 +381,8 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
filter_query_formats(convert); filter_query_formats(convert);
inlink = convert->inputs[0]; inlink = convert->inputs[0];
outlink = convert->outputs[0]; outlink = convert->outputs[0];
if (!avfilter_merge_formats( inlink->in_formats, inlink->out_formats) || if (!ff_merge_formats( inlink->in_formats, inlink->out_formats) ||
!avfilter_merge_formats(outlink->in_formats, outlink->out_formats)) !ff_merge_formats(outlink->in_formats, outlink->out_formats))
ret |= AVERROR(ENOSYS); ret |= AVERROR(ENOSYS);
if (inlink->type == AVMEDIA_TYPE_AUDIO && if (inlink->type == AVMEDIA_TYPE_AUDIO &&
(!ff_merge_samplerates(inlink->in_samplerates, (!ff_merge_samplerates(inlink->in_samplerates,
@ -452,10 +452,10 @@ static int pick_format(AVFilterLink *link, AVFilterLink *ref)
link->channel_layout = link->in_channel_layouts->channel_layouts[0]; link->channel_layout = link->in_channel_layouts->channel_layouts[0];
} }
avfilter_formats_unref(&link->in_formats); ff_formats_unref(&link->in_formats);
avfilter_formats_unref(&link->out_formats); ff_formats_unref(&link->out_formats);
avfilter_formats_unref(&link->in_samplerates); ff_formats_unref(&link->in_samplerates);
avfilter_formats_unref(&link->out_samplerates); ff_formats_unref(&link->out_samplerates);
ff_channel_layouts_unref(&link->in_channel_layouts); ff_channel_layouts_unref(&link->in_channel_layouts);
ff_channel_layouts_unref(&link->out_channel_layouts); ff_channel_layouts_unref(&link->out_channel_layouts);
@ -502,9 +502,9 @@ static int reduce_formats_on_filter(AVFilterContext *filter)
int i, j, k, ret = 0; int i, j, k, ret = 0;
REDUCE_FORMATS(int, AVFilterFormats, formats, formats, REDUCE_FORMATS(int, AVFilterFormats, formats, formats,
format_count, avfilter_add_format); format_count, ff_add_format);
REDUCE_FORMATS(int, AVFilterFormats, samplerates, formats, REDUCE_FORMATS(int, AVFilterFormats, samplerates, formats,
format_count, avfilter_add_format); format_count, ff_add_format);
REDUCE_FORMATS(uint64_t, AVFilterChannelLayouts, channel_layouts, REDUCE_FORMATS(uint64_t, AVFilterChannelLayouts, channel_layouts,
channel_layouts, nb_channel_layouts, ff_add_channel_layout); channel_layouts, nb_channel_layouts, ff_add_channel_layout);

View File

@ -31,6 +31,7 @@
#include "audio.h" #include "audio.h"
#include "avfilter.h" #include "avfilter.h"
#include "buffersink.h" #include "buffersink.h"
#include "internal.h"
typedef struct { typedef struct {
AVFifoBuffer *fifo; ///< FIFO buffer of frame references AVFifoBuffer *fifo; ///< FIFO buffer of frame references
@ -102,11 +103,11 @@ int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
if (av_fifo_size(sink->fifo)) if (av_fifo_size(sink->fifo))
return av_fifo_size(sink->fifo)/sizeof(*buf); return av_fifo_size(sink->fifo)/sizeof(*buf);
else else
return avfilter_poll_frame(ctx->inputs[0]); return ff_poll_frame(ctx->inputs[0]);
} }
if (!av_fifo_size(sink->fifo) && if (!av_fifo_size(sink->fifo) &&
(ret = avfilter_request_frame(link)) < 0) (ret = ff_request_frame(link)) < 0)
return ret; return ret;
if (!av_fifo_size(sink->fifo)) if (!av_fifo_size(sink->fifo))

View File

@ -28,6 +28,7 @@
#include "buffersrc.h" #include "buffersrc.h"
#include "formats.h" #include "formats.h"
#include "internal.h" #include "internal.h"
#include "video.h"
#include "vsrc_buffer.h" #include "vsrc_buffer.h"
#include "avcodec.h" #include "avcodec.h"
@ -328,14 +329,14 @@ static int query_formats(AVFilterContext *ctx)
switch (ctx->outputs[0]->type) { switch (ctx->outputs[0]->type) {
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
avfilter_add_format(&formats, c->pix_fmt); ff_add_format(&formats, c->pix_fmt);
avfilter_set_common_formats(ctx, formats); ff_set_common_formats(ctx, formats);
break; break;
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
avfilter_add_format(&formats, c->sample_fmt); ff_add_format(&formats, c->sample_fmt);
avfilter_set_common_formats(ctx, formats); ff_set_common_formats(ctx, formats);
avfilter_add_format(&samplerates, c->sample_rate); ff_add_format(&samplerates, c->sample_rate);
ff_set_common_samplerates(ctx, samplerates); ff_set_common_samplerates(ctx, samplerates);
ff_add_channel_layout(&channel_layouts, c->channel_layout); ff_add_channel_layout(&channel_layouts, c->channel_layout);
@ -385,9 +386,9 @@ static int request_frame(AVFilterLink *link)
switch (link->type) { switch (link->type) {
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
avfilter_start_frame(link, avfilter_ref_buffer(buf, ~0)); ff_start_frame(link, avfilter_ref_buffer(buf, ~0));
avfilter_draw_slice(link, 0, link->h, 1); ff_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link); ff_end_frame(link);
break; break;
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
ff_filter_samples(link, avfilter_ref_buffer(buf, ~0)); ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));

View File

@ -85,7 +85,7 @@ do {
MERGE_REF(ret, b, fmts, type, fail); \ MERGE_REF(ret, b, fmts, type, fail); \
} while (0) } while (0)
AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b) AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b)
{ {
AVFilterFormats *ret = NULL; AVFilterFormats *ret = NULL;
@ -213,7 +213,7 @@ int64_t *ff_copy_int64_list(const int64_t * const list)
} \ } \
} }
AVFilterFormats *avfilter_make_format_list(const int *fmts) AVFilterFormats *ff_make_format_list(const int *fmts)
{ {
MAKE_FORMAT_LIST(AVFilterFormats, formats, format_count); MAKE_FORMAT_LIST(AVFilterFormats, formats, format_count);
while (count--) while (count--)
@ -250,7 +250,7 @@ do { \
return 0; \ return 0; \
} while (0) } while (0)
int avfilter_add_format(AVFilterFormats **avff, int64_t fmt) int ff_add_format(AVFilterFormats **avff, int64_t fmt)
{ {
ADD_FORMAT(avff, fmt, int, formats, format_count); ADD_FORMAT(avff, fmt, int, formats, format_count);
} }
@ -260,12 +260,10 @@ int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
ADD_FORMAT(l, channel_layout, uint64_t, channel_layouts, nb_channel_layouts); ADD_FORMAT(l, channel_layout, uint64_t, channel_layouts, nb_channel_layouts);
} }
#if FF_API_OLD_ALL_FORMATS_API AVFilterFormats *ff_all_formats(enum AVMediaType type)
AVFilterFormats *avfilter_all_formats(enum AVMediaType type)
{ {
return avfilter_make_all_formats(type); return avfilter_make_all_formats(type);
} }
#endif
AVFilterFormats *avfilter_make_all_formats(enum AVMediaType type) AVFilterFormats *avfilter_make_all_formats(enum AVMediaType type)
{ {
@ -277,7 +275,7 @@ AVFilterFormats *avfilter_make_all_formats(enum AVMediaType type)
for (fmt = 0; fmt < num_formats; fmt++) for (fmt = 0; fmt < num_formats; fmt++)
if ((type != AVMEDIA_TYPE_VIDEO) || if ((type != AVMEDIA_TYPE_VIDEO) ||
(type == AVMEDIA_TYPE_VIDEO && !(av_pix_fmt_descriptors[fmt].flags & PIX_FMT_HWACCEL))) (type == AVMEDIA_TYPE_VIDEO && !(av_pix_fmt_descriptors[fmt].flags & PIX_FMT_HWACCEL)))
avfilter_add_format(&ret, fmt); ff_add_format(&ret, fmt);
return ret; return ret;
} }
@ -329,7 +327,7 @@ void ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **
FORMATS_REF(f, ref); FORMATS_REF(f, ref);
} }
void avfilter_formats_ref(AVFilterFormats *f, AVFilterFormats **ref) void ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
{ {
FORMATS_REF(f, ref); FORMATS_REF(f, ref);
} }
@ -365,7 +363,7 @@ do { \
*ref = NULL; \ *ref = NULL; \
} while (0) } while (0)
void avfilter_formats_unref(AVFilterFormats **ref) void ff_formats_unref(AVFilterFormats **ref)
{ {
FORMATS_UNREF(ref, formats); FORMATS_UNREF(ref, formats);
} }
@ -394,8 +392,7 @@ void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref,
FORMATS_CHANGEREF(oldref, newref); FORMATS_CHANGEREF(oldref, newref);
} }
void avfilter_formats_changeref(AVFilterFormats **oldref, void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref)
AVFilterFormats **newref)
{ {
FORMATS_CHANGEREF(oldref, newref); FORMATS_CHANGEREF(oldref, newref);
} }
@ -435,7 +432,7 @@ void ff_set_common_samplerates(AVFilterContext *ctx,
AVFilterFormats *samplerates) AVFilterFormats *samplerates)
{ {
SET_COMMON_FORMATS(ctx, samplerates, in_samplerates, out_samplerates, SET_COMMON_FORMATS(ctx, samplerates, in_samplerates, out_samplerates,
avfilter_formats_ref, formats); ff_formats_ref, formats);
} }
/** /**
@ -443,10 +440,10 @@ void ff_set_common_samplerates(AVFilterContext *ctx,
* formats. If there are no links hooked to this filter, the list of formats is * formats. If there are no links hooked to this filter, the list of formats is
* freed. * freed.
*/ */
void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats) void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
{ {
SET_COMMON_FORMATS(ctx, formats, in_formats, out_formats, SET_COMMON_FORMATS(ctx, formats, in_formats, out_formats,
avfilter_formats_ref, formats); ff_formats_ref, formats);
} }
int ff_default_query_formats(AVFilterContext *ctx) int ff_default_query_formats(AVFilterContext *ctx)
@ -455,7 +452,7 @@ int ff_default_query_formats(AVFilterContext *ctx)
ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type : ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
AVMEDIA_TYPE_VIDEO; AVMEDIA_TYPE_VIDEO;
avfilter_set_common_formats(ctx, avfilter_all_formats(type)); ff_set_common_formats(ctx, ff_all_formats(type));
if (type == AVMEDIA_TYPE_AUDIO) { if (type == AVMEDIA_TYPE_AUDIO) {
ff_set_common_channel_layouts(ctx, ff_all_channel_layouts()); ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
ff_set_common_samplerates(ctx, ff_all_samplerates()); ff_set_common_samplerates(ctx, ff_all_samplerates());
@ -539,6 +536,39 @@ int avfilter_default_query_formats(AVFilterContext *ctx)
{ {
return ff_default_query_formats(ctx); return ff_default_query_formats(ctx);
} }
void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
{
ff_set_common_formats(ctx, formats);
}
AVFilterFormats *avfilter_make_format_list(const int *fmts)
{
return ff_make_format_list(fmts);
}
int avfilter_add_format(AVFilterFormats **avff, int64_t fmt)
{
return ff_add_format(avff, fmt);
}
AVFilterFormats *avfilter_all_formats(enum AVMediaType type)
{
return ff_all_formats(type);
}
AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b)
{
return ff_merge_formats(a, b);
}
void avfilter_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
{
ff_formats_ref(f, ref);
}
void avfilter_formats_unref(AVFilterFormats **ref)
{
ff_formats_unref(ref);
}
void avfilter_formats_changeref(AVFilterFormats **oldref,
AVFilterFormats **newref)
{
ff_formats_changeref(oldref, newref);
}
#endif #endif
#ifdef TEST #ifdef TEST

View File

@ -21,6 +21,56 @@
#include "avfilter.h" #include "avfilter.h"
/**
* A list of supported formats for one end of a filter link. This is used
* during the format negotiation process to try to pick the best format to
* use to minimize the number of necessary conversions. Each filter gives a
* list of the formats supported by each input and output pad. The list
* given for each pad need not be distinct - they may be references to the
* same list of formats, as is often the case when a filter supports multiple
* formats, but will always output the same format as it is given in input.
*
* In this way, a list of possible input formats and a list of possible
* output formats are associated with each link. When a set of formats is
* negotiated over a link, the input and output lists are merged to form a
* new list containing only the common elements of each list. In the case
* that there were no common elements, a format conversion is necessary.
* Otherwise, the lists are merged, and all other links which reference
* either of the format lists involved in the merge are also affected.
*
* For example, consider the filter chain:
* filter (a) --> (b) filter (b) --> (c) filter
*
* where the letters in parenthesis indicate a list of formats supported on
* the input or output of the link. Suppose the lists are as follows:
* (a) = {A, B}
* (b) = {A, B, C}
* (c) = {B, C}
*
* First, the first link's lists are merged, yielding:
* filter (a) --> (a) filter (a) --> (c) filter
*
* Notice that format list (b) now refers to the same list as filter list (a).
* Next, the lists for the second link are merged, yielding:
* filter (a) --> (a) filter (a) --> (a) filter
*
* where (a) = {B}.
*
* Unfortunately, when the format lists at the two ends of a link are merged,
* we must ensure that all links which reference either pre-merge format list
* get updated as well. Therefore, we have the format list structure store a
* pointer to each of the pointers to itself.
*/
#if !FF_API_FILTERS_PUBLIC
struct AVFilterFormats {
unsigned format_count; ///< number of formats
int *formats; ///< list of media formats
unsigned refcount; ///< number of references to this list
struct AVFilterFormats ***refs; ///< references to this list
};
#endif
typedef struct AVFilterChannelLayouts { typedef struct AVFilterChannelLayouts {
uint64_t *channel_layouts; ///< list of channel layouts uint64_t *channel_layouts; ///< list of channel layouts
int nb_channel_layouts; ///< number of channel layouts int nb_channel_layouts; ///< number of channel layouts
@ -62,6 +112,13 @@ void ff_set_common_channel_layouts(AVFilterContext *ctx,
void ff_set_common_samplerates(AVFilterContext *ctx, void ff_set_common_samplerates(AVFilterContext *ctx,
AVFilterFormats *samplerates); AVFilterFormats *samplerates);
/**
* A helper for query_formats() which sets all links to the same list of
* formats. If there are no links hooked to this filter, the list of formats is
* freed.
*/
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats);
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout); int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout);
/** /**
@ -80,4 +137,85 @@ void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref,
int ff_default_query_formats(AVFilterContext *ctx); int ff_default_query_formats(AVFilterContext *ctx);
/**
* Create a list of supported formats. This is intended for use in
* AVFilter->query_formats().
*
* @param fmts list of media formats, terminated by -1
* @return the format list, with no existing references
*/
AVFilterFormats *ff_make_format_list(const int *fmts);
/**
* Add fmt to the list of media formats contained in *avff.
* If *avff is NULL the function allocates the filter formats struct
* and puts its pointer in *avff.
*
* @return a non negative value in case of success, or a negative
* value corresponding to an AVERROR code in case of error
*/
int ff_add_format(AVFilterFormats **avff, int64_t fmt);
/**
* Return a list of all formats supported by Libav for the given media type.
*/
AVFilterFormats *ff_all_formats(enum AVMediaType type);
/**
* Return a format list which contains the intersection of the formats of
* a and b. Also, all the references of a, all the references of b, and
* a and b themselves will be deallocated.
*
* If a and b do not share any common formats, neither is modified, and NULL
* is returned.
*/
AVFilterFormats *ff_merge_formats(AVFilterFormats *a, AVFilterFormats *b);
/**
* Add *ref as a new reference to formats.
* That is the pointers will point like in the ascii art below:
* ________
* |formats |<--------.
* | ____ | ____|___________________
* | |refs| | | __|_
* | |* * | | | | | | AVFilterLink
* | |* *--------->|*ref|
* | |____| | | |____|
* |________| |________________________
*/
void ff_formats_ref(AVFilterFormats *formats, AVFilterFormats **ref);
/**
* If *ref is non-NULL, remove *ref as a reference to the format list
* it currently points to, deallocates that list if this was the last
* reference, and sets *ref to NULL.
*
* Before After
* ________ ________ NULL
* |formats |<--------. |formats | ^
* | ____ | ____|________________ | ____ | ____|________________
* | |refs| | | __|_ | |refs| | | __|_
* | |* * | | | | | | AVFilterLink | |* * | | | | | | AVFilterLink
* | |* *--------->|*ref| | |* | | | |*ref|
* | |____| | | |____| | |____| | | |____|
* |________| |_____________________ |________| |_____________________
*/
void ff_formats_unref(AVFilterFormats **ref);
/**
*
* Before After
* ________ ________
* |formats |<---------. |formats |<---------.
* | ____ | ___|___ | ____ | ___|___
* | |refs| | | | | | |refs| | | | | NULL
* | |* *--------->|*oldref| | |* *--------->|*newref| ^
* | |* * | | |_______| | |* * | | |_______| ___|___
* | |____| | | |____| | | | |
* |________| |________| |*oldref|
* |_______|
*/
void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref);
#endif // AVFILTER_FORMATS_H #endif // AVFILTER_FORMATS_H

View File

@ -144,4 +144,54 @@ void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end);
void ff_dlog_link(void *ctx, AVFilterLink *link, int end); void ff_dlog_link(void *ctx, AVFilterLink *link, int end);
/**
* Insert a new pad.
*
* @param idx Insertion point. Pad is inserted at the end if this point
* is beyond the end of the list of pads.
* @param count Pointer to the number of pads in the list
* @param padidx_off Offset within an AVFilterLink structure to the element
* to increment when inserting a new pad causes link
* numbering to change
* @param pads Pointer to the pointer to the beginning of the list of pads
* @param links Pointer to the pointer to the beginning of the list of links
* @param newpad The new pad to add. A copy is made when adding.
*/
void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
AVFilterPad **pads, AVFilterLink ***links,
AVFilterPad *newpad);
/** Insert a new input pad for the filter. */
static inline void ff_insert_inpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
ff_insert_pad(index, &f->input_count, offsetof(AVFilterLink, dstpad),
&f->input_pads, &f->inputs, p);
}
/** Insert a new output pad for the filter. */
static inline void ff_insert_outpad(AVFilterContext *f, unsigned index,
AVFilterPad *p)
{
ff_insert_pad(index, &f->output_count, offsetof(AVFilterLink, srcpad),
&f->output_pads, &f->outputs, p);
}
/**
* Poll a frame from the filter chain.
*
* @param link the input link
* @return the number of immediately available frames, a negative
* number in case of error
*/
int ff_poll_frame(AVFilterLink *link);
/**
* Request an input frame from the filter at the other end of the link.
*
* @param link the input link
* @return zero on success
*/
int ff_request_frame(AVFilterLink *link);
#endif /* AVFILTER_INTERNAL_H */ #endif /* AVFILTER_INTERNAL_H */

View File

@ -25,6 +25,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "audio.h" #include "audio.h"
#include "internal.h"
#include "video.h" #include "video.h"
static int split_init(AVFilterContext *ctx, const char *args, void *opaque) static int split_init(AVFilterContext *ctx, const char *args, void *opaque)
@ -48,7 +49,7 @@ static int split_init(AVFilterContext *ctx, const char *args, void *opaque)
pad.type = ctx->filter->inputs[0].type; pad.type = ctx->filter->inputs[0].type;
pad.name = av_strdup(name); pad.name = av_strdup(name);
avfilter_insert_outpad(ctx, i, &pad); ff_insert_outpad(ctx, i, &pad);
} }
return 0; return 0;
@ -68,8 +69,8 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
int i; int i;
for (i = 0; i < ctx->output_count; i++) for (i = 0; i < ctx->output_count; i++)
avfilter_start_frame(ctx->outputs[i], ff_start_frame(ctx->outputs[i],
avfilter_ref_buffer(picref, ~AV_PERM_WRITE)); avfilter_ref_buffer(picref, ~AV_PERM_WRITE));
} }
static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
@ -78,7 +79,7 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
int i; int i;
for (i = 0; i < ctx->output_count; i++) for (i = 0; i < ctx->output_count; i++)
avfilter_draw_slice(ctx->outputs[i], y, h, slice_dir); ff_draw_slice(ctx->outputs[i], y, h, slice_dir);
} }
static void end_frame(AVFilterLink *inlink) static void end_frame(AVFilterLink *inlink)
@ -87,7 +88,7 @@ static void end_frame(AVFilterLink *inlink)
int i; int i;
for (i = 0; i < ctx->output_count; i++) for (i = 0; i < ctx->output_count; i++)
avfilter_end_frame(ctx->outputs[i]); ff_end_frame(ctx->outputs[i]);
avfilter_unref_buffer(inlink->cur_buf); avfilter_unref_buffer(inlink->cur_buf);
} }

View File

@ -39,6 +39,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h" #include "formats.h"
#include "video.h"
typedef struct { typedef struct {
/* common A/V fields */ /* common A/V fields */
@ -219,7 +220,7 @@ static int movie_query_formats(AVFilterContext *ctx)
MovieContext *movie = ctx->priv; MovieContext *movie = ctx->priv;
enum PixelFormat pix_fmts[] = { movie->codec_ctx->pix_fmt, PIX_FMT_NONE }; enum PixelFormat pix_fmts[] = { movie->codec_ctx->pix_fmt, PIX_FMT_NONE };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -318,9 +319,9 @@ static int movie_request_frame(AVFilterLink *outlink)
return ret; return ret;
outpicref = avfilter_ref_buffer(movie->picref, ~0); outpicref = avfilter_ref_buffer(movie->picref, ~0);
avfilter_start_frame(outlink, outpicref); ff_start_frame(outlink, outpicref);
avfilter_draw_slice(outlink, 0, outlink->h, 1); ff_draw_slice(outlink, 0, outlink->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(movie->picref); avfilter_unref_buffer(movie->picref);
movie->picref = NULL; movie->picref = NULL;

View File

@ -55,7 +55,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
AspectContext *aspect = link->dst->priv; AspectContext *aspect = link->dst->priv;
picref->video->sample_aspect_ratio = aspect->ratio; picref->video->sample_aspect_ratio = aspect->ratio;
avfilter_start_frame(link->dst->outputs[0], picref); ff_start_frame(link->dst->outputs[0], picref);
} }
#if CONFIG_SETDAR_FILTER #if CONFIG_SETDAR_FILTER

View File

@ -29,6 +29,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
#include "formats.h"
#include "video.h" #include "video.h"
typedef struct { typedef struct {
@ -47,7 +48,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -89,7 +90,7 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
p += picref->linesize[0]; p += picref->linesize[0];
} }
avfilter_draw_slice(ctx->outputs[0], y, h, slice_dir); ff_draw_slice(ctx->outputs[0], y, h, slice_dir);
} }
static void end_frame(AVFilterLink *inlink) static void end_frame(AVFilterLink *inlink)
@ -113,7 +114,7 @@ static void end_frame(AVFilterLink *inlink)
blackframe->frame++; blackframe->frame++;
blackframe->nblack = 0; blackframe->nblack = 0;
avfilter_unref_buffer(picref); avfilter_unref_buffer(picref);
avfilter_end_frame(inlink->dst->outputs[0]); ff_end_frame(inlink->dst->outputs[0]);
} }
AVFilter avfilter_vf_blackframe = { AVFilter avfilter_vf_blackframe = {

View File

@ -29,6 +29,8 @@
#include "libavutil/eval.h" #include "libavutil/eval.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
static const char *const var_names[] = { static const char *const var_names[] = {
"w", "w",
@ -129,7 +131,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -324,7 +326,7 @@ static void end_frame(AVFilterLink *inlink)
w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane],
boxblur->temp); boxblur->temp);
avfilter_draw_slice(outlink, 0, inlink->h, 1); ff_draw_slice(outlink, 0, inlink->h, 1);
avfilter_default_end_frame(inlink); avfilter_default_end_frame(inlink);
} }

View File

@ -26,6 +26,7 @@
/* #define DEBUG */ /* #define DEBUG */
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h" #include "video.h"
#include "libavutil/eval.h" #include "libavutil/eval.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
@ -113,7 +114,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -308,7 +309,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
ref2->data[3] += crop->x * crop->max_step[3]; ref2->data[3] += crop->x * crop->max_step[3];
} }
avfilter_start_frame(link->dst->outputs[0], ref2); ff_start_frame(link->dst->outputs[0], ref2);
} }
static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir) static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
@ -326,7 +327,7 @@ static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
if (y + h > crop->y + crop->h) if (y + h > crop->y + crop->h)
h = crop->y + crop->h - y; h = crop->y + crop->h - y;
avfilter_draw_slice(ctx->outputs[0], y - crop->y, h, slice_dir); ff_draw_slice(ctx->outputs[0], y - crop->y, h, slice_dir);
} }
static void end_frame(AVFilterLink *link) static void end_frame(AVFilterLink *link)
@ -335,7 +336,7 @@ static void end_frame(AVFilterLink *link)
crop->var_values[VAR_N] += 1.0; crop->var_values[VAR_N] += 1.0;
avfilter_unref_buffer(link->cur_buf); avfilter_unref_buffer(link->cur_buf);
avfilter_end_frame(link->dst->outputs[0]); ff_end_frame(link->dst->outputs[0]);
} }
AVFilter avfilter_vf_crop = { AVFilter avfilter_vf_crop = {

View File

@ -25,6 +25,7 @@
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h" #include "video.h"
typedef struct { typedef struct {
@ -47,7 +48,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -189,7 +190,7 @@ static void end_frame(AVFilterLink *inlink)
w, h, x, y); w, h, x, y);
} }
avfilter_end_frame(inlink->dst->outputs[0]); ff_end_frame(inlink->dst->outputs[0]);
} }
AVFilter avfilter_vf_cropdetect = { AVFilter avfilter_vf_cropdetect = {

View File

@ -29,6 +29,7 @@
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h" #include "video.h"
/** /**
@ -164,7 +165,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -226,7 +227,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
outpicref = inpicref; outpicref = inpicref;
outlink->out_buf = outpicref; outlink->out_buf = outpicref;
avfilter_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0)); ff_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
} }
static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { } static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
@ -255,8 +256,8 @@ static void end_frame(AVFilterLink *inlink)
delogo->show, direct); delogo->show, direct);
} }
avfilter_draw_slice(outlink, 0, inlink->h, 1); ff_draw_slice(outlink, 0, inlink->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(inpicref); avfilter_unref_buffer(inpicref);
if (!direct) if (!direct)
avfilter_unref_buffer(outpicref); avfilter_unref_buffer(outpicref);

View File

@ -28,6 +28,7 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/parseutils.h" #include "libavutil/parseutils.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h" #include "video.h"
enum { Y, U, V, A }; enum { Y, U, V, A };
@ -71,7 +72,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -118,7 +119,7 @@ static void draw_slice(AVFilterLink *inlink, int y0, int h, int slice_dir)
} }
} }
avfilter_draw_slice(inlink->dst->outputs[0], y0, h, 1); ff_draw_slice(inlink->dst->outputs[0], y0, h, 1);
} }
AVFilter avfilter_vf_drawbox = { AVFilter avfilter_vf_drawbox = {

View File

@ -41,6 +41,7 @@
#include "libavutil/lfg.h" #include "libavutil/lfg.h"
#include "avfilter.h" #include "avfilter.h"
#include "drawutils.h" #include "drawutils.h"
#include "formats.h"
#include "video.h" #include "video.h"
#undef time #undef time
@ -492,7 +493,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
static int query_formats(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx)
{ {
avfilter_set_common_pixel_formats(ctx, ff_draw_supported_pixel_formats(0)); ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
return 0; return 0;
} }
@ -812,8 +813,8 @@ static void end_frame(AVFilterLink *inlink)
dtext->var_values[VAR_N] += 1.0; dtext->var_values[VAR_N] += 1.0;
avfilter_draw_slice(outlink, 0, picref->video->h, 1); ff_draw_slice(outlink, 0, picref->video->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
} }
AVFilter avfilter_vf_drawtext = { AVFilter avfilter_vf_drawtext = {

View File

@ -32,6 +32,7 @@
#include "avfilter.h" #include "avfilter.h"
#include "drawutils.h" #include "drawutils.h"
#include "internal.h" #include "internal.h"
#include "formats.h"
#include "video.h" #include "video.h"
#define R 0 #define R 0
@ -158,7 +159,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -257,14 +258,14 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
} }
} }
avfilter_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); ff_draw_slice(inlink->dst->outputs[0], y, h, slice_dir);
} }
static void end_frame(AVFilterLink *inlink) static void end_frame(AVFilterLink *inlink)
{ {
FadeContext *fade = inlink->dst->priv; FadeContext *fade = inlink->dst->priv;
avfilter_end_frame(inlink->dst->outputs[0]); ff_end_frame(inlink->dst->outputs[0]);
if (fade->frame_index >= fade->start_frame && if (fade->frame_index >= fade->start_frame &&
fade->frame_index <= fade->stop_frame) fade->frame_index <= fade->stop_frame)

View File

@ -28,6 +28,8 @@
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
typedef struct typedef struct
{ {
@ -76,12 +78,12 @@ static int query_formats(AVFilterContext *ctx)
|| av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_BITSTREAM) || av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_BITSTREAM)
&& av_pix_fmt_descriptors[pix_fmt].nb_components && av_pix_fmt_descriptors[pix_fmt].nb_components
&& !av_pix_fmt_descriptors[pix_fmt].log2_chroma_h && !av_pix_fmt_descriptors[pix_fmt].log2_chroma_h
&& (ret = avfilter_add_format(&formats, pix_fmt)) < 0) { && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
avfilter_formats_unref(&formats); ff_formats_unref(&formats);
return ret; return ret;
} }
avfilter_formats_ref(formats, &ctx->inputs[0]->out_formats); ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
avfilter_formats_ref(formats, &ctx->outputs[0]->in_formats); ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
} }
return 0; return 0;
@ -123,7 +125,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
outpicref = avfilter_ref_buffer(inpicref, ~0); outpicref = avfilter_ref_buffer(inpicref, ~0);
outlink->out_buf = outpicref; outlink->out_buf = outpicref;
avfilter_start_frame(outlink, outpicref); ff_start_frame(outlink, outpicref);
} }
static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
@ -140,7 +142,7 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
* and that complexity will be added later */ * and that complexity will be added later */
if ( !inpicref->video->interlaced if ( !inpicref->video->interlaced
|| inpicref->video->top_field_first == fieldorder->dst_tff) { || inpicref->video->top_field_first == fieldorder->dst_tff) {
avfilter_draw_slice(outlink, y, h, slice_dir); ff_draw_slice(outlink, y, h, slice_dir);
} }
} }
@ -202,13 +204,13 @@ static void end_frame(AVFilterLink *inlink)
} }
} }
outpicref->video->top_field_first = fieldorder->dst_tff; outpicref->video->top_field_first = fieldorder->dst_tff;
avfilter_draw_slice(outlink, 0, h, 1); ff_draw_slice(outlink, 0, h, 1);
} else { } else {
av_dlog(ctx, av_dlog(ctx,
"not interlaced or field order already correct\n"); "not interlaced or field order already correct\n");
} }
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(inpicref); avfilter_unref_buffer(inpicref);
} }

View File

@ -24,6 +24,7 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "internal.h"
#include "video.h" #include "video.h"
typedef struct BufPic { typedef struct BufPic {
@ -83,9 +84,9 @@ static int request_frame(AVFilterLink *outlink)
/* by doing this, we give ownership of the reference to the next filter, /* by doing this, we give ownership of the reference to the next filter,
* so we don't have to worry about dereferencing it ourselves. */ * so we don't have to worry about dereferencing it ourselves. */
avfilter_start_frame(outlink, fifo->root.next->picref); ff_start_frame(outlink, fifo->root.next->picref);
avfilter_draw_slice (outlink, 0, outlink->h, 1); ff_draw_slice (outlink, 0, outlink->h, 1);
avfilter_end_frame (outlink); ff_end_frame (outlink);
if (fifo->last == fifo->root.next) if (fifo->last == fifo->root.next)
fifo->last = &fifo->root; fifo->last = &fifo->root;

View File

@ -26,6 +26,7 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "internal.h" #include "internal.h"
#include "formats.h"
#include "video.h" #include "video.h"
typedef struct { typedef struct {
@ -87,7 +88,7 @@ static AVFilterFormats *make_format_list(FormatContext *format, int flag)
#if CONFIG_FORMAT_FILTER #if CONFIG_FORMAT_FILTER
static int query_formats_format(AVFilterContext *ctx) static int query_formats_format(AVFilterContext *ctx)
{ {
avfilter_set_common_pixel_formats(ctx, make_format_list(ctx->priv, 1)); ff_set_common_formats(ctx, make_format_list(ctx->priv, 1));
return 0; return 0;
} }
@ -117,7 +118,7 @@ AVFilter avfilter_vf_format = {
#if CONFIG_NOFORMAT_FILTER #if CONFIG_NOFORMAT_FILTER
static int query_formats_noformat(AVFilterContext *ctx) static int query_formats_noformat(AVFilterContext *ctx)
{ {
avfilter_set_common_pixel_formats(ctx, make_format_list(ctx->priv, 0)); ff_set_common_formats(ctx, make_format_list(ctx->priv, 0));
return 0; return 0;
} }

View File

@ -31,6 +31,8 @@
#include "libavutil/parseutils.h" #include "libavutil/parseutils.h"
#include "avfilter.h" #include "avfilter.h"
#include "internal.h"
#include "video.h"
typedef struct FPSContext { typedef struct FPSContext {
const AVClass *class; const AVClass *class;
@ -133,7 +135,7 @@ static int request_frame(AVFilterLink *outlink)
int ret = 0; int ret = 0;
while (ret >= 0 && s->frames_out == frames_out) while (ret >= 0 && s->frames_out == frames_out)
ret = avfilter_request_frame(ctx->inputs[0]); ret = ff_request_frame(ctx->inputs[0]);
/* flush the fifo */ /* flush the fifo */
if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) { if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) {
@ -145,9 +147,9 @@ static int request_frame(AVFilterLink *outlink)
buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base, buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base,
outlink->time_base) + s->frames_out; outlink->time_base) + s->frames_out;
avfilter_start_frame(outlink, buf); ff_start_frame(outlink, buf);
avfilter_draw_slice(outlink, 0, outlink->h, 1); ff_draw_slice(outlink, 0, outlink->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
s->frames_out++; s->frames_out++;
} }
return 0; return 0;
@ -233,9 +235,9 @@ static void end_frame(AVFilterLink *inlink)
buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base, buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base,
outlink->time_base) + s->frames_out; outlink->time_base) + s->frames_out;
avfilter_start_frame(outlink, buf_out); ff_start_frame(outlink, buf_out);
avfilter_draw_slice(outlink, 0, outlink->h, 1); ff_draw_slice(outlink, 0, outlink->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
s->frames_out++; s->frames_out++;
} }
flush_fifo(s->fifo); flush_fifo(s->fifo);

View File

@ -31,6 +31,8 @@
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "libavutil/parseutils.h" #include "libavutil/parseutils.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
typedef f0r_instance_t (*f0r_construct_f)(unsigned int width, unsigned int height); typedef f0r_instance_t (*f0r_construct_f)(unsigned int width, unsigned int height);
typedef void (*f0r_destruct_f)(f0r_instance_t instance); typedef void (*f0r_destruct_f)(f0r_instance_t instance);
@ -320,20 +322,20 @@ static int query_formats(AVFilterContext *ctx)
AVFilterFormats *formats = NULL; AVFilterFormats *formats = NULL;
if (frei0r->plugin_info.color_model == F0R_COLOR_MODEL_BGRA8888) { if (frei0r->plugin_info.color_model == F0R_COLOR_MODEL_BGRA8888) {
avfilter_add_format(&formats, PIX_FMT_BGRA); ff_add_format(&formats, PIX_FMT_BGRA);
} else if (frei0r->plugin_info.color_model == F0R_COLOR_MODEL_RGBA8888) { } else if (frei0r->plugin_info.color_model == F0R_COLOR_MODEL_RGBA8888) {
avfilter_add_format(&formats, PIX_FMT_RGBA); ff_add_format(&formats, PIX_FMT_RGBA);
} else { /* F0R_COLOR_MODEL_PACKED32 */ } else { /* F0R_COLOR_MODEL_PACKED32 */
static const enum PixelFormat pix_fmts[] = { static const enum PixelFormat pix_fmts[] = {
PIX_FMT_BGRA, PIX_FMT_ARGB, PIX_FMT_ABGR, PIX_FMT_ARGB, PIX_FMT_NONE PIX_FMT_BGRA, PIX_FMT_ARGB, PIX_FMT_ABGR, PIX_FMT_ARGB, PIX_FMT_NONE
}; };
formats = avfilter_make_format_list(pix_fmts); formats = ff_make_format_list(pix_fmts);
} }
if (!formats) if (!formats)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
avfilter_set_common_pixel_formats(ctx, formats); ff_set_common_formats(ctx, formats);
return 0; return 0;
} }
@ -350,8 +352,8 @@ static void end_frame(AVFilterLink *inlink)
(const uint32_t *)inpicref->data[0], (const uint32_t *)inpicref->data[0],
(uint32_t *)outpicref->data[0]); (uint32_t *)outpicref->data[0]);
avfilter_unref_buffer(inpicref); avfilter_unref_buffer(inpicref);
avfilter_draw_slice(outlink, 0, outlink->h, 1); ff_draw_slice(outlink, 0, outlink->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(outpicref); avfilter_unref_buffer(outpicref);
} }
@ -436,11 +438,11 @@ static int source_request_frame(AVFilterLink *outlink)
picref->pts = frei0r->pts++; picref->pts = frei0r->pts++;
picref->pos = -1; picref->pos = -1;
avfilter_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0));
frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}), frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}),
NULL, (uint32_t *)picref->data[0]); NULL, (uint32_t *)picref->data[0]);
avfilter_draw_slice(outlink, 0, outlink->h, 1); ff_draw_slice(outlink, 0, outlink->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(picref); avfilter_unref_buffer(picref);
return 0; return 0;

View File

@ -36,7 +36,9 @@
#include "libavutil/cpu.h" #include "libavutil/cpu.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "gradfun.h" #include "gradfun.h"
#include "video.h"
DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = { DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = {
{0x00,0x60,0x18,0x78,0x06,0x66,0x1E,0x7E}, {0x00,0x60,0x18,0x78,0x06,0x66,0x1E,0x7E},
@ -160,7 +162,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -196,7 +198,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
outpicref = inpicref; outpicref = inpicref;
outlink->out_buf = outpicref; outlink->out_buf = outpicref;
avfilter_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0)); ff_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
} }
static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { } static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
@ -225,8 +227,8 @@ static void end_frame(AVFilterLink *inlink)
av_image_copy_plane(outpic->data[p], outpic->linesize[p], inpic->data[p], inpic->linesize[p], w, h); av_image_copy_plane(outpic->data[p], outpic->linesize[p], inpic->data[p], inpic->linesize[p], w, h);
} }
avfilter_draw_slice(outlink, 0, inlink->h, 1); ff_draw_slice(outlink, 0, inlink->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(inpic); avfilter_unref_buffer(inpic);
if (outpic != inpic) if (outpic != inpic)
avfilter_unref_buffer(outpic); avfilter_unref_buffer(outpic);

View File

@ -25,6 +25,8 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
@ -64,7 +66,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -156,7 +158,7 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
} }
} }
avfilter_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); ff_draw_slice(inlink->dst->outputs[0], y, h, slice_dir);
} }
AVFilter avfilter_vf_hflip = { AVFilter avfilter_vf_hflip = {

View File

@ -27,6 +27,8 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
typedef struct { typedef struct {
int Coefs[4][512*16]; int Coefs[4][512*16];
@ -268,7 +270,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_NONE PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -317,8 +319,8 @@ static void end_frame(AVFilterLink *inlink)
hqdn3d->Coefs[2], hqdn3d->Coefs[2],
hqdn3d->Coefs[3]); hqdn3d->Coefs[3]);
avfilter_draw_slice(outlink, 0, inpic->video->h, 1); ff_draw_slice(outlink, 0, inpic->video->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(inpic); avfilter_unref_buffer(inpic);
avfilter_unref_buffer(outpic); avfilter_unref_buffer(outpic);
} }

View File

@ -61,7 +61,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_BGR24, PIX_FMT_BGRA, PIX_FMT_GRAY8, PIX_FMT_NONE PIX_FMT_BGR24, PIX_FMT_BGRA, PIX_FMT_GRAY8, PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -364,8 +364,8 @@ static void end_frame(AVFilterLink *inlink)
fill_picref_from_iplimage(outpicref, &outimg, inlink->format); fill_picref_from_iplimage(outpicref, &outimg, inlink->format);
avfilter_unref_buffer(inpicref); avfilter_unref_buffer(inpicref);
avfilter_draw_slice(outlink, 0, outlink->h, 1); ff_draw_slice(outlink, 0, outlink->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(outpicref); avfilter_unref_buffer(outpicref);
} }

View File

@ -28,7 +28,9 @@
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "internal.h" #include "internal.h"
#include "video.h"
static const char *const var_names[] = { static const char *const var_names[] = {
"w", ///< width of the input video "w", ///< width of the input video
@ -146,7 +148,7 @@ static int query_formats(AVFilterContext *ctx)
const enum PixelFormat *pix_fmts = lut->is_rgb ? rgb_pix_fmts : const enum PixelFormat *pix_fmts = lut->is_rgb ? rgb_pix_fmts :
lut->is_yuv ? yuv_pix_fmts : all_pix_fmts; lut->is_yuv ? yuv_pix_fmts : all_pix_fmts;
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -334,7 +336,7 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
} }
} }
avfilter_draw_slice(outlink, y, h, slice_dir); ff_draw_slice(outlink, y, h, slice_dir);
} }
#define DEFINE_LUT_FILTER(name_, description_, init_) \ #define DEFINE_LUT_FILTER(name_, description_, init_) \

View File

@ -28,6 +28,7 @@
/* #define DEBUG */ /* #define DEBUG */
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "libavutil/eval.h" #include "libavutil/eval.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
@ -38,6 +39,7 @@
#include "internal.h" #include "internal.h"
#include "bufferqueue.h" #include "bufferqueue.h"
#include "drawutils.h" #include "drawutils.h"
#include "video.h"
static const char *const var_names[] = { static const char *const var_names[] = {
"main_w", "W", ///< width of the main video "main_w", "W", ///< width of the main video
@ -176,16 +178,16 @@ static int query_formats(AVFilterContext *ctx)
AVFilterFormats *overlay_formats; AVFilterFormats *overlay_formats;
if (over->allow_packed_rgb) { if (over->allow_packed_rgb) {
main_formats = avfilter_make_format_list(main_pix_fmts_rgb); main_formats = ff_make_format_list(main_pix_fmts_rgb);
overlay_formats = avfilter_make_format_list(overlay_pix_fmts_rgb); overlay_formats = ff_make_format_list(overlay_pix_fmts_rgb);
} else { } else {
main_formats = avfilter_make_format_list(main_pix_fmts_yuv); main_formats = ff_make_format_list(main_pix_fmts_yuv);
overlay_formats = avfilter_make_format_list(overlay_pix_fmts_yuv); overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv);
} }
avfilter_formats_ref(main_formats, &ctx->inputs [MAIN ]->out_formats); ff_formats_ref(main_formats, &ctx->inputs [MAIN ]->out_formats);
avfilter_formats_ref(overlay_formats, &ctx->inputs [OVERLAY]->out_formats); ff_formats_ref(overlay_formats, &ctx->inputs [OVERLAY]->out_formats);
avfilter_formats_ref(main_formats, &ctx->outputs[MAIN ]->in_formats ); ff_formats_ref(main_formats, &ctx->outputs[MAIN ]->in_formats );
return 0; return 0;
} }
@ -470,7 +472,7 @@ static int try_start_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic)
av_ts2str(over->overpicref->pts), av_ts2timestr(over->overpicref->pts, &outlink->time_base)); av_ts2str(over->overpicref->pts), av_ts2timestr(over->overpicref->pts, &outlink->time_base));
av_dlog(ctx, "\n"); av_dlog(ctx, "\n");
avfilter_start_frame(ctx->outputs[0], avfilter_ref_buffer(outpicref, ~0)); ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(outpicref, ~0));
over->frame_requested = 0; over->frame_requested = 0;
return 0; return 0;
} }
@ -498,9 +500,9 @@ static int try_push_frame(AVFilterContext *ctx)
blend_slice(ctx, outpicref, over->overpicref, over->x, over->y, blend_slice(ctx, outpicref, over->overpicref, over->x, over->y,
over->overpicref->video->w, over->overpicref->video->h, over->overpicref->video->w, over->overpicref->video->h,
0, outpicref->video->w, outpicref->video->h); 0, outpicref->video->w, outpicref->video->h);
avfilter_draw_slice(outlink, 0, outpicref->video->h, +1); ff_draw_slice(outlink, 0, outpicref->video->h, +1);
avfilter_unref_bufferp(&outlink->out_buf); avfilter_unref_bufferp(&outlink->out_buf);
avfilter_end_frame(outlink); ff_end_frame(outlink);
return 0; return 0;
} }
@ -536,7 +538,7 @@ static void draw_slice_main(AVFilterLink *inlink, int y, int h, int slice_dir)
over->overpicref->video->w, over->overpicref->video->h, over->overpicref->video->w, over->overpicref->video->h,
y, outpicref->video->w, h); y, outpicref->video->w, h);
} }
avfilter_draw_slice(outlink, y, h, slice_dir); ff_draw_slice(outlink, y, h, slice_dir);
} }
static void end_frame_main(AVFilterLink *inlink) static void end_frame_main(AVFilterLink *inlink)
@ -550,7 +552,7 @@ static void end_frame_main(AVFilterLink *inlink)
return; return;
avfilter_unref_bufferp(&inlink->cur_buf); avfilter_unref_bufferp(&inlink->cur_buf);
avfilter_unref_bufferp(&outlink->out_buf); avfilter_unref_bufferp(&outlink->out_buf);
avfilter_end_frame(ctx->outputs[0]); ff_end_frame(ctx->outputs[0]);
} }
static void start_frame_over(AVFilterLink *inlink, AVFilterBufferRef *inpicref) static void start_frame_over(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
@ -584,7 +586,7 @@ static int request_frame(AVFilterLink *outlink)
input = !over->overlay_eof && (over->queue_main.available || input = !over->overlay_eof && (over->queue_main.available ||
over->queue_over.available < 2) ? over->queue_over.available < 2) ?
OVERLAY : MAIN; OVERLAY : MAIN;
ret = avfilter_request_frame(ctx->inputs[input]); ret = ff_request_frame(ctx->inputs[input]);
/* EOF on main is reported immediately */ /* EOF on main is reported immediately */
if (ret == AVERROR_EOF && input == OVERLAY) { if (ret == AVERROR_EOF && input == OVERLAY) {
over->overlay_eof = 1; over->overlay_eof = 1;

View File

@ -25,6 +25,8 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/eval.h" #include "libavutil/eval.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
@ -67,7 +69,7 @@ enum var_name {
static int query_formats(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx)
{ {
avfilter_set_common_pixel_formats(ctx, ff_draw_supported_pixel_formats(0)); ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
return 0; return 0;
} }
@ -296,7 +298,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
outpicref->video->w = pad->w; outpicref->video->w = pad->w;
outpicref->video->h = pad->h; outpicref->video->h = pad->h;
avfilter_start_frame(inlink->dst->outputs[0], avfilter_ref_buffer(outpicref, ~0)); ff_start_frame(inlink->dst->outputs[0], avfilter_ref_buffer(outpicref, ~0));
} }
static void draw_send_bar_slice(AVFilterLink *link, int y, int h, int slice_dir, int before_slice) static void draw_send_bar_slice(AVFilterLink *link, int y, int h, int slice_dir, int before_slice)
@ -319,7 +321,7 @@ static void draw_send_bar_slice(AVFilterLink *link, int y, int h, int slice_dir,
link->dst->outputs[0]->out_buf->data, link->dst->outputs[0]->out_buf->data,
link->dst->outputs[0]->out_buf->linesize, link->dst->outputs[0]->out_buf->linesize,
0, bar_y, pad->w, bar_h); 0, bar_y, pad->w, bar_h);
avfilter_draw_slice(link->dst->outputs[0], bar_y, bar_h, slice_dir); ff_draw_slice(link->dst->outputs[0], bar_y, bar_h, slice_dir);
} }
} }
@ -352,7 +354,7 @@ static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
/* right border */ /* right border */
ff_fill_rectangle(&pad->draw, &pad->color, outpic->data, outpic->linesize, ff_fill_rectangle(&pad->draw, &pad->color, outpic->data, outpic->linesize,
pad->x + pad->in_w, y, pad->w - pad->x - pad->in_w, h); pad->x + pad->in_w, y, pad->w - pad->x - pad->in_w, h);
avfilter_draw_slice(link->dst->outputs[0], y, h, slice_dir); ff_draw_slice(link->dst->outputs[0], y, h, slice_dir);
draw_send_bar_slice(link, y, h, slice_dir, -1); draw_send_bar_slice(link, y, h, slice_dir, -1);
} }

View File

@ -25,6 +25,7 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "video.h"
typedef struct { typedef struct {
const AVPixFmtDescriptor *pix_desc; const AVPixFmtDescriptor *pix_desc;
@ -76,7 +77,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
priv->pix_desc->flags & PIX_FMT_PSEUDOPAL) priv->pix_desc->flags & PIX_FMT_PSEUDOPAL)
memcpy(outpicref->data[1], picref->data[1], AVPALETTE_SIZE); memcpy(outpicref->data[1], picref->data[1], AVPALETTE_SIZE);
avfilter_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0)); ff_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
} }
static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
@ -106,7 +107,7 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
} }
} }
avfilter_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); ff_draw_slice(inlink->dst->outputs[0], y, h, slice_dir);
} }
AVFilter avfilter_vf_pixdesctest = { AVFilter avfilter_vf_pixdesctest = {

View File

@ -24,6 +24,8 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/eval.h" #include "libavutil/eval.h"
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
@ -130,21 +132,21 @@ static int query_formats(AVFilterContext *ctx)
formats = NULL; formats = NULL;
for (pix_fmt = 0; pix_fmt < PIX_FMT_NB; pix_fmt++) for (pix_fmt = 0; pix_fmt < PIX_FMT_NB; pix_fmt++)
if ( sws_isSupportedInput(pix_fmt) if ( sws_isSupportedInput(pix_fmt)
&& (ret = avfilter_add_format(&formats, pix_fmt)) < 0) { && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
avfilter_formats_unref(&formats); ff_formats_unref(&formats);
return ret; return ret;
} }
avfilter_formats_ref(formats, &ctx->inputs[0]->out_formats); ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
} }
if (ctx->outputs[0]) { if (ctx->outputs[0]) {
formats = NULL; formats = NULL;
for (pix_fmt = 0; pix_fmt < PIX_FMT_NB; pix_fmt++) for (pix_fmt = 0; pix_fmt < PIX_FMT_NB; pix_fmt++)
if ( (sws_isSupportedOutput(pix_fmt) || pix_fmt == PIX_FMT_PAL8) if ( (sws_isSupportedOutput(pix_fmt) || pix_fmt == PIX_FMT_PAL8)
&& (ret = avfilter_add_format(&formats, pix_fmt)) < 0) { && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
avfilter_formats_unref(&formats); ff_formats_unref(&formats);
return ret; return ret;
} }
avfilter_formats_ref(formats, &ctx->outputs[0]->in_formats); ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
} }
return 0; return 0;
@ -293,7 +295,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
if (!scale->sws) { if (!scale->sws) {
avfilter_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0));
return; return;
} }
@ -315,7 +317,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
INT_MAX); INT_MAX);
scale->slice_y = 0; scale->slice_y = 0;
avfilter_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0)); ff_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
} }
static int scale_slice(AVFilterLink *link, struct SwsContext *sws, int y, int h, int mul, int field) static int scale_slice(AVFilterLink *link, struct SwsContext *sws, int y, int h, int mul, int field)
@ -350,7 +352,7 @@ static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
int out_h; int out_h;
if (!scale->sws) { if (!scale->sws) {
avfilter_draw_slice(link->dst->outputs[0], y, h, slice_dir); ff_draw_slice(link->dst->outputs[0], y, h, slice_dir);
return; return;
} }
@ -367,7 +369,7 @@ static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
if (slice_dir == -1) if (slice_dir == -1)
scale->slice_y -= out_h; scale->slice_y -= out_h;
avfilter_draw_slice(link->dst->outputs[0], scale->slice_y, out_h, slice_dir); ff_draw_slice(link->dst->outputs[0], scale->slice_y, out_h, slice_dir);
if (slice_dir == 1) if (slice_dir == 1)
scale->slice_y += out_h; scale->slice_y += out_h;
} }

View File

@ -28,6 +28,7 @@
#include "libavcodec/dsputil.h" #include "libavcodec/dsputil.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h" #include "formats.h"
#include "internal.h"
#include "video.h" #include "video.h"
static const char *const var_names[] = { static const char *const var_names[] = {
@ -284,7 +285,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
sizeof(picref), NULL); sizeof(picref), NULL);
return; return;
} }
avfilter_start_frame(inlink->dst->outputs[0], avfilter_ref_buffer(picref, ~0)); ff_start_frame(inlink->dst->outputs[0], avfilter_ref_buffer(picref, ~0));
} }
} }
@ -293,7 +294,7 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
SelectContext *select = inlink->dst->priv; SelectContext *select = inlink->dst->priv;
if (select->select && !select->cache_frames) if (select->select && !select->cache_frames)
avfilter_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); ff_draw_slice(inlink->dst->outputs[0], y, h, slice_dir);
} }
static void end_frame(AVFilterLink *inlink) static void end_frame(AVFilterLink *inlink)
@ -304,7 +305,7 @@ static void end_frame(AVFilterLink *inlink)
if (select->select) { if (select->select) {
if (select->cache_frames) if (select->cache_frames)
return; return;
avfilter_end_frame(inlink->dst->outputs[0]); ff_end_frame(inlink->dst->outputs[0]);
} }
avfilter_unref_buffer(picref); avfilter_unref_buffer(picref);
} }
@ -319,15 +320,15 @@ static int request_frame(AVFilterLink *outlink)
if (av_fifo_size(select->pending_frames)) { if (av_fifo_size(select->pending_frames)) {
AVFilterBufferRef *picref; AVFilterBufferRef *picref;
av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL); av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL);
avfilter_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0));
avfilter_draw_slice(outlink, 0, outlink->h, 1); ff_draw_slice(outlink, 0, outlink->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(picref); avfilter_unref_buffer(picref);
return 0; return 0;
} }
while (!select->select) { while (!select->select) {
int ret = avfilter_request_frame(inlink); int ret = ff_request_frame(inlink);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
@ -342,12 +343,12 @@ static int poll_frame(AVFilterLink *outlink)
int count, ret; int count, ret;
if (!av_fifo_size(select->pending_frames)) { if (!av_fifo_size(select->pending_frames)) {
if ((count = avfilter_poll_frame(inlink)) <= 0) if ((count = ff_poll_frame(inlink)) <= 0)
return count; return count;
/* request frame from input, and apply select condition to it */ /* request frame from input, and apply select condition to it */
select->cache_frames = 1; select->cache_frames = 1;
while (count-- && av_fifo_space(select->pending_frames)) { while (count-- && av_fifo_space(select->pending_frames)) {
ret = avfilter_request_frame(inlink); ret = ff_request_frame(inlink);
if (ret < 0) if (ret < 0)
break; break;
} }

View File

@ -120,7 +120,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
setpts->var_values[VAR_N] += 1.0; setpts->var_values[VAR_N] += 1.0;
setpts->var_values[VAR_PREV_INPTS ] = TS2D(inpicref ->pts); setpts->var_values[VAR_PREV_INPTS ] = TS2D(inpicref ->pts);
setpts->var_values[VAR_PREV_OUTPTS] = TS2D(outpicref->pts); setpts->var_values[VAR_PREV_OUTPTS] = TS2D(outpicref->pts);
avfilter_start_frame(inlink->dst->outputs[0], outpicref); ff_start_frame(inlink->dst->outputs[0], outpicref);
} }
static av_cold void uninit(AVFilterContext *ctx) static av_cold void uninit(AVFilterContext *ctx)

View File

@ -110,7 +110,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
avfilter_unref_buffer(picref); avfilter_unref_buffer(picref);
} }
avfilter_start_frame(outlink, picref2); ff_start_frame(outlink, picref2);
} }
AVFilter avfilter_vf_settb = { AVFilter avfilter_vf_settb = {

View File

@ -82,7 +82,7 @@ static void end_frame(AVFilterLink *inlink)
showinfo->frame++; showinfo->frame++;
avfilter_unref_buffer(picref); avfilter_unref_buffer(picref);
avfilter_end_frame(inlink->dst->outputs[0]); ff_end_frame(inlink->dst->outputs[0]);
} }
AVFilter avfilter_vf_showinfo = { AVFilter avfilter_vf_showinfo = {

View File

@ -73,7 +73,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
av_log(link->dst, AV_LOG_DEBUG, "h:%d\n", slice->h); av_log(link->dst, AV_LOG_DEBUG, "h:%d\n", slice->h);
avfilter_start_frame(link->dst->outputs[0], picref); ff_start_frame(link->dst->outputs[0], picref);
} }
static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir) static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
@ -83,16 +83,16 @@ static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
if (slice_dir == 1) { if (slice_dir == 1) {
for (y2 = y; y2 + slice->h <= y + h; y2 += slice->h) for (y2 = y; y2 + slice->h <= y + h; y2 += slice->h)
avfilter_draw_slice(link->dst->outputs[0], y2, slice->h, slice_dir); ff_draw_slice(link->dst->outputs[0], y2, slice->h, slice_dir);
if (y2 < y + h) if (y2 < y + h)
avfilter_draw_slice(link->dst->outputs[0], y2, y + h - y2, slice_dir); ff_draw_slice(link->dst->outputs[0], y2, y + h - y2, slice_dir);
} else if (slice_dir == -1) { } else if (slice_dir == -1) {
for (y2 = y + h; y2 - slice->h >= y; y2 -= slice->h) for (y2 = y + h; y2 - slice->h >= y; y2 -= slice->h)
avfilter_draw_slice(link->dst->outputs[0], y2 - slice->h, slice->h, slice_dir); ff_draw_slice(link->dst->outputs[0], y2 - slice->h, slice->h, slice_dir);
if (y2 > y) if (y2 > y)
avfilter_draw_slice(link->dst->outputs[0], y, y2 - y, slice_dir); ff_draw_slice(link->dst->outputs[0], y, y2 - y, slice_dir);
} }
} }

View File

@ -29,6 +29,8 @@
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
typedef struct { typedef struct {
int hsub, vsub; int hsub, vsub;
@ -80,7 +82,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -126,7 +128,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
outlink->out_buf->video->sample_aspect_ratio.den = picref->video->sample_aspect_ratio.num; outlink->out_buf->video->sample_aspect_ratio.den = picref->video->sample_aspect_ratio.num;
} }
avfilter_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0)); ff_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
} }
static void end_frame(AVFilterLink *inlink) static void end_frame(AVFilterLink *inlink)
@ -187,8 +189,8 @@ static void end_frame(AVFilterLink *inlink)
} }
avfilter_unref_buffer(inpic); avfilter_unref_buffer(inpic);
avfilter_draw_slice(outlink, 0, outpic->video->h, 1); ff_draw_slice(outlink, 0, outpic->video->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(outpic); avfilter_unref_buffer(outpic);
} }

View File

@ -37,6 +37,8 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
#include "libavutil/common.h" #include "libavutil/common.h"
#include "libavutil/mem.h" #include "libavutil/mem.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
@ -162,7 +164,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_YUVJ444P, PIX_FMT_YUVJ440P, PIX_FMT_NONE PIX_FMT_YUVJ444P, PIX_FMT_YUVJ440P, PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -223,8 +225,8 @@ static void end_frame(AVFilterLink *link)
apply_unsharp(out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, &unsharp->chroma); apply_unsharp(out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, &unsharp->chroma);
avfilter_unref_buffer(in); avfilter_unref_buffer(in);
avfilter_draw_slice(link->dst->outputs[0], 0, link->h, 1); ff_draw_slice(link->dst->outputs[0], 0, link->h, 1);
avfilter_end_frame(link->dst->outputs[0]); ff_end_frame(link->dst->outputs[0]);
avfilter_unref_buffer(out); avfilter_unref_buffer(out);
} }

View File

@ -78,14 +78,14 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *inpicref)
} }
} }
avfilter_start_frame(link->dst->outputs[0], outpicref); ff_start_frame(link->dst->outputs[0], outpicref);
} }
static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir) static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{ {
AVFilterContext *ctx = link->dst; AVFilterContext *ctx = link->dst;
avfilter_draw_slice(ctx->outputs[0], link->h - (y+h), h, -1 * slice_dir); ff_draw_slice(ctx->outputs[0], link->h - (y+h), h, -1 * slice_dir);
} }
AVFilter avfilter_vf_vflip = { AVFilter avfilter_vf_vflip = {

View File

@ -22,6 +22,8 @@
#include "libavutil/common.h" #include "libavutil/common.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h" #include "video.h"
#include "yadif.h" #include "yadif.h"
@ -227,10 +229,10 @@ static void return_frame(AVFilterContext *ctx, int is_second)
} else { } else {
yadif->out->pts = AV_NOPTS_VALUE; yadif->out->pts = AV_NOPTS_VALUE;
} }
avfilter_start_frame(ctx->outputs[0], yadif->out); ff_start_frame(ctx->outputs[0], yadif->out);
} }
avfilter_draw_slice(ctx->outputs[0], 0, link->h, 1); ff_draw_slice(ctx->outputs[0], 0, link->h, 1);
avfilter_end_frame(ctx->outputs[0]); ff_end_frame(ctx->outputs[0]);
yadif->frame_pending = (yadif->mode&1) && !is_second; yadif->frame_pending = (yadif->mode&1) && !is_second;
} }
@ -260,7 +262,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
yadif->prev = NULL; yadif->prev = NULL;
if (yadif->out->pts != AV_NOPTS_VALUE) if (yadif->out->pts != AV_NOPTS_VALUE)
yadif->out->pts *= 2; yadif->out->pts *= 2;
avfilter_start_frame(ctx->outputs[0], yadif->out); ff_start_frame(ctx->outputs[0], yadif->out);
return; return;
} }
@ -274,7 +276,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
yadif->out->video->interlaced = 0; yadif->out->video->interlaced = 0;
if (yadif->out->pts != AV_NOPTS_VALUE) if (yadif->out->pts != AV_NOPTS_VALUE)
yadif->out->pts *= 2; yadif->out->pts *= 2;
avfilter_start_frame(ctx->outputs[0], yadif->out); ff_start_frame(ctx->outputs[0], yadif->out);
} }
static void end_frame(AVFilterLink *link) static void end_frame(AVFilterLink *link)
@ -286,8 +288,8 @@ static void end_frame(AVFilterLink *link)
return; return;
if (yadif->auto_enable && !yadif->cur->video->interlaced) { if (yadif->auto_enable && !yadif->cur->video->interlaced) {
avfilter_draw_slice(ctx->outputs[0], 0, link->h, 1); ff_draw_slice(ctx->outputs[0], 0, link->h, 1);
avfilter_end_frame(ctx->outputs[0]); ff_end_frame(ctx->outputs[0]);
return; return;
} }
@ -310,7 +312,7 @@ static int request_frame(AVFilterLink *link)
if (yadif->eof) if (yadif->eof)
return AVERROR_EOF; return AVERROR_EOF;
ret = avfilter_request_frame(link->src->inputs[0]); ret = ff_request_frame(link->src->inputs[0]);
if (ret == AVERROR_EOF && yadif->cur) { if (ret == AVERROR_EOF && yadif->cur) {
AVFilterBufferRef *next = avfilter_ref_buffer(yadif->next, AV_PERM_READ); AVFilterBufferRef *next = avfilter_ref_buffer(yadif->next, AV_PERM_READ);
@ -335,14 +337,14 @@ static int poll_frame(AVFilterLink *link)
if (yadif->frame_pending) if (yadif->frame_pending)
return 1; return 1;
val = avfilter_poll_frame(link->src->inputs[0]); val = ff_poll_frame(link->src->inputs[0]);
if (val <= 0) if (val <= 0)
return val; return val;
if (val >= 1 && !yadif->next) { //FIXME change API to not requre this red tape if (val >= 1 && !yadif->next) { //FIXME change API to not requre this red tape
if ((ret = avfilter_request_frame(link->src->inputs[0])) < 0) if ((ret = ff_request_frame(link->src->inputs[0])) < 0)
return ret; return ret;
val = avfilter_poll_frame(link->src->inputs[0]); val = ff_poll_frame(link->src->inputs[0]);
if (val <= 0) if (val <= 0)
return val; return val;
} }
@ -390,7 +392,7 @@ static int query_formats(AVFilterContext *ctx)
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }

View File

@ -151,7 +151,7 @@ AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int
void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{ {
avfilter_start_frame(link->dst->outputs[0], picref); ff_start_frame(link->dst->outputs[0], picref);
} }
static void default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) static void default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
@ -164,13 +164,13 @@ static void default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
if (outlink) { if (outlink) {
outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
avfilter_copy_buffer_ref_props(outlink->out_buf, picref); avfilter_copy_buffer_ref_props(outlink->out_buf, picref);
avfilter_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0)); ff_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
} }
} }
/* XXX: should we do the duplicating of the picture ref here, instead of /* XXX: should we do the duplicating of the picture ref here, instead of
* forcing the source filter to do it? */ * forcing the source filter to do it? */
void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) void ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{ {
void (*start_frame)(AVFilterLink *, AVFilterBufferRef *); void (*start_frame)(AVFilterLink *, AVFilterBufferRef *);
AVFilterPad *dst = link->dstpad; AVFilterPad *dst = link->dstpad;
@ -217,7 +217,7 @@ void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
void ff_null_end_frame(AVFilterLink *link) void ff_null_end_frame(AVFilterLink *link)
{ {
avfilter_end_frame(link->dst->outputs[0]); ff_end_frame(link->dst->outputs[0]);
} }
static void default_end_frame(AVFilterLink *inlink) static void default_end_frame(AVFilterLink *inlink)
@ -235,11 +235,11 @@ static void default_end_frame(AVFilterLink *inlink)
avfilter_unref_buffer(outlink->out_buf); avfilter_unref_buffer(outlink->out_buf);
outlink->out_buf = NULL; outlink->out_buf = NULL;
} }
avfilter_end_frame(outlink); ff_end_frame(outlink);
} }
} }
void avfilter_end_frame(AVFilterLink *link) void ff_end_frame(AVFilterLink *link)
{ {
void (*end_frame)(AVFilterLink *); void (*end_frame)(AVFilterLink *);
@ -258,7 +258,7 @@ void avfilter_end_frame(AVFilterLink *link)
void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{ {
avfilter_draw_slice(link->dst->outputs[0], y, h, slice_dir); ff_draw_slice(link->dst->outputs[0], y, h, slice_dir);
} }
static void default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) static void default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
@ -269,10 +269,10 @@ static void default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir
outlink = inlink->dst->outputs[0]; outlink = inlink->dst->outputs[0];
if (outlink) if (outlink)
avfilter_draw_slice(outlink, y, h, slice_dir); ff_draw_slice(outlink, y, h, slice_dir);
} }
void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) void ff_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{ {
uint8_t *src[4], *dst[4]; uint8_t *src[4], *dst[4];
int i, j, vsub; int i, j, vsub;
@ -346,4 +346,16 @@ void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{ {
ff_null_draw_slice(link, y, h, slice_dir); ff_null_draw_slice(link, y, h, slice_dir);
} }
void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
ff_start_frame(link, picref);
}
void avfilter_end_frame(AVFilterLink *link)
{
ff_end_frame(link);
}
void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{
ff_draw_slice(link, y, h, slice_dir);
}
#endif #endif

View File

@ -33,4 +33,39 @@ void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir); void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
void ff_null_end_frame(AVFilterLink *link); void ff_null_end_frame(AVFilterLink *link);
/**
* Notify the next filter of the start of a frame.
*
* @param link the output link the frame will be sent over
* @param picref A reference to the frame about to be sent. The data for this
* frame need only be valid once draw_slice() is called for that
* portion. The receiving filter will free this reference when
* it no longer needs it.
*/
void ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
/**
* Notify the next filter that the current frame has finished.
*
* @param link the output link the frame was sent over
*/
void ff_end_frame(AVFilterLink *link);
/**
* Send a slice to the next filter.
*
* Slices have to be provided in sequential order, either in
* top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined.
*
* @param link the output link over which the frame is being sent
* @param y offset in pixels from the top of the image for this slice
* @param h height of this slice in pixels
* @param slice_dir the assumed direction for sending slices,
* from the top slice to the bottom slice if the value is 1,
* from the bottom slice to the top slice if the value is -1,
* for other values the behavior of the function is undefined.
*/
void ff_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
#endif /* AVFILTER_VIDEO_H */ #endif /* AVFILTER_VIDEO_H */

View File

@ -24,6 +24,8 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libavutil/colorspace.h" #include "libavutil/colorspace.h"
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
@ -73,7 +75,7 @@ static av_cold int color_init(AVFilterContext *ctx, const char *args, void *opaq
static int query_formats(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx)
{ {
avfilter_set_common_pixel_formats(ctx, ff_draw_supported_pixel_formats(0)); ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
return 0; return 0;
} }
@ -108,11 +110,11 @@ static int color_request_frame(AVFilterLink *link)
picref->pts = color->pts++; picref->pts = color->pts++;
picref->pos = -1; picref->pos = -1;
avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0)); ff_start_frame(link, avfilter_ref_buffer(picref, ~0));
ff_fill_rectangle(&color->draw, &color->color, picref->data, picref->linesize, ff_fill_rectangle(&color->draw, &color->color, picref->data, picref->linesize,
0, 0, color->w, color->h); 0, 0, color->w, color->h);
avfilter_draw_slice(link, 0, color->h, 1); ff_draw_slice(link, 0, color->h, 1);
avfilter_end_frame(link); ff_end_frame(link);
avfilter_unref_buffer(picref); avfilter_unref_buffer(picref);
return 0; return 0;

View File

@ -36,6 +36,8 @@
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "libavutil/parseutils.h" #include "libavutil/parseutils.h"
#include "avfilter.h" #include "avfilter.h"
#include "formats.h"
#include "video.h"
typedef struct { typedef struct {
const AVClass *class; const AVClass *class;
@ -146,9 +148,9 @@ static int request_frame(AVFilterLink *outlink)
test->fill_picture_fn(outlink->src, picref); test->fill_picture_fn(outlink->src, picref);
test->nb_frame++; test->nb_frame++;
avfilter_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0));
avfilter_draw_slice(outlink, 0, picref->video->h, 1); ff_draw_slice(outlink, 0, picref->video->h, 1);
avfilter_end_frame(outlink); ff_end_frame(outlink);
avfilter_unref_buffer(picref); avfilter_unref_buffer(picref);
return 0; return 0;
@ -388,7 +390,7 @@ static int test_query_formats(AVFilterContext *ctx)
static const enum PixelFormat pix_fmts[] = { static const enum PixelFormat pix_fmts[] = {
PIX_FMT_RGB24, PIX_FMT_NONE PIX_FMT_RGB24, PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }
@ -494,7 +496,7 @@ static int rgbtest_query_formats(AVFilterContext *ctx)
PIX_FMT_RGB555, PIX_FMT_BGR555, PIX_FMT_RGB555, PIX_FMT_BGR555,
PIX_FMT_NONE PIX_FMT_NONE
}; };
avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts)); ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
return 0; return 0;
} }

View File

@ -345,6 +345,7 @@ const AVCodecTag ff_codec_wav_tags[] = {
{ CODEC_ID_ATRAC3, 0x0270 }, { CODEC_ID_ATRAC3, 0x0270 },
{ CODEC_ID_ADPCM_G722, 0x028F }, { CODEC_ID_ADPCM_G722, 0x028F },
{ CODEC_ID_IMC, 0x0401 }, { CODEC_ID_IMC, 0x0401 },
{ CODEC_ID_IAC, 0x0402 },
{ CODEC_ID_GSM_MS, 0x1500 }, { CODEC_ID_GSM_MS, 0x1500 },
{ CODEC_ID_TRUESPEECH, 0x1501 }, { CODEC_ID_TRUESPEECH, 0x1501 },
{ CODEC_ID_AAC, 0x1600 }, /* ADTS AAC */ { CODEC_ID_AAC, 0x1600 }, /* ADTS AAC */

View File

@ -673,6 +673,42 @@ fail:
/*******************************************************/ /*******************************************************/
static void probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
{
if(st->request_probe>0){
AVProbeData *pd = &st->probe_data;
int end;
av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
--st->probe_packets;
if (pkt) {
pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
pd->buf_size += pkt->size;
memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
} else {
st->probe_packets = 0;
}
end= s->raw_packet_buffer_remaining_size <= 0
|| st->probe_packets<=0;
if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
int score= set_codec_from_probe_data(s, st, pd);
if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
|| end){
pd->buf_size=0;
av_freep(&pd->buf);
st->request_probe= -1;
if(st->codec->codec_id != CODEC_ID_NONE){
av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
}else
av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
}
}
}
}
int ff_read_packet(AVFormatContext *s, AVPacket *pkt) int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
{ {
int ret, i; int ret, i;
@ -683,7 +719,8 @@ int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
if (pktl) { if (pktl) {
*pkt = pktl->pkt; *pkt = pktl->pkt;
if(s->streams[pkt->stream_index]->request_probe <= 0){ st = s->streams[pkt->stream_index];
if(st->request_probe <= 0){
s->raw_packet_buffer = pktl->next; s->raw_packet_buffer = pktl->next;
s->raw_packet_buffer_remaining_size += pkt->size; s->raw_packet_buffer_remaining_size += pkt->size;
av_free(pktl); av_free(pktl);
@ -696,9 +733,13 @@ int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
if (ret < 0) { if (ret < 0) {
if (!pktl || ret == AVERROR(EAGAIN)) if (!pktl || ret == AVERROR(EAGAIN))
return ret; return ret;
for (i = 0; i < s->nb_streams; i++) for (i = 0; i < s->nb_streams; i++) {
if(s->streams[i]->request_probe > 0) st = s->streams[i];
s->streams[i]->request_probe = -1; if (st->probe_packets) {
probe_codec(s, st, NULL);
}
av_assert0(st->request_probe <= 0);
}
continue; continue;
} }
@ -739,34 +780,7 @@ int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end); add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
s->raw_packet_buffer_remaining_size -= pkt->size; s->raw_packet_buffer_remaining_size -= pkt->size;
if(st->request_probe>0){ probe_codec(s, st, pkt);
AVProbeData *pd = &st->probe_data;
int end;
av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
--st->probe_packets;
pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
pd->buf_size += pkt->size;
memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
end= s->raw_packet_buffer_remaining_size <= 0
|| st->probe_packets<=0;
if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
int score= set_codec_from_probe_data(s, st, pd);
if( (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
|| end){
pd->buf_size=0;
av_freep(&pd->buf);
st->request_probe= -1;
if(st->codec->codec_id != CODEC_ID_NONE){
av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
}else
av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
}
}
}
} }
} }