Merge remote-tracking branch 'qatar/master'
* qatar/master: mss3: use standard zigzag table mss3: split DSP functions that are used in MTS2(MSS4) into separate file motion-test: do not use getopt() tcp: add initial timeout limit for incoming connections configure: Change the rdtsc check to a linker check avconv: propagate fatal errors from lavfi. lavfi: add error handling to filter_samples(). fate-run: make avconv() properly deal with multiple inputs. asplit: don't leak the input buffer. af_resample: fix request_frame() behavior. af_asyncts: fix request_frame() behavior. libx264: support aspect ratio switching matroskadec: honor error_recognition when encountering unknown elements. lavr: resampling: add support for s32p, fltp, and dblp internal sample formats lavr: resampling: add filter type and Kaiser window beta to AVOptions lavr: Use AV_SAMPLE_FMT_NONE to auto-select the internal sample format lavr: mix: validate internal sample format in ff_audio_mix_init() Conflicts: ffmpeg.c ffplay.c libavcodec/libx264.c libavfilter/audio.c libavfilter/split.c libavformat/tcp.c tests/fate-run.sh Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
f8911b987d
@ -327,7 +327,7 @@ OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4enc.o msmpeg4data.o \
|
|||||||
h263dec.o h263.o ituh263dec.o \
|
h263dec.o h263.o ituh263dec.o \
|
||||||
mpeg4videodec.o
|
mpeg4videodec.o
|
||||||
OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
|
OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
|
||||||
OBJS-$(CONFIG_MSA1_DECODER) += mss3.o
|
OBJS-$(CONFIG_MSA1_DECODER) += mss3.o mss34dsp.o
|
||||||
OBJS-$(CONFIG_MSS1_DECODER) += mss1.o
|
OBJS-$(CONFIG_MSS1_DECODER) += mss1.o
|
||||||
OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
|
OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
|
||||||
OBJS-$(CONFIG_MSVIDEO1_ENCODER) += msvideo1enc.o elbg.o
|
OBJS-$(CONFIG_MSVIDEO1_ENCODER) += msvideo1enc.o elbg.o
|
||||||
|
@ -175,10 +175,10 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
|
|||||||
x4->params.b_tff = frame->top_field_first;
|
x4->params.b_tff = frame->top_field_first;
|
||||||
x264_encoder_reconfig(x4->enc, &x4->params);
|
x264_encoder_reconfig(x4->enc, &x4->params);
|
||||||
}
|
}
|
||||||
if (x4->params.vui.i_sar_height != ctx->sample_aspect_ratio.den
|
if (x4->params.vui.i_sar_height != ctx->sample_aspect_ratio.den ||
|
||||||
|| x4->params.vui.i_sar_width != ctx->sample_aspect_ratio.num) {
|
x4->params.vui.i_sar_width != ctx->sample_aspect_ratio.num) {
|
||||||
x4->params.vui.i_sar_height = ctx->sample_aspect_ratio.den;
|
x4->params.vui.i_sar_height = ctx->sample_aspect_ratio.den;
|
||||||
x4->params.vui.i_sar_width = ctx->sample_aspect_ratio.num;
|
x4->params.vui.i_sar_width = ctx->sample_aspect_ratio.num;
|
||||||
x264_encoder_reconfig(x4->enc, &x4->params);
|
x264_encoder_reconfig(x4->enc, &x4->params);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -119,15 +119,9 @@ int main(int argc, char **argv)
|
|||||||
int flags[2] = { AV_CPU_FLAG_MMX, AV_CPU_FLAG_MMX2 };
|
int flags[2] = { AV_CPU_FLAG_MMX, AV_CPU_FLAG_MMX2 };
|
||||||
int flags_size = HAVE_MMX2 ? 2 : 1;
|
int flags_size = HAVE_MMX2 ? 2 : 1;
|
||||||
|
|
||||||
for(;;) {
|
if (argc > 1) {
|
||||||
c = getopt(argc, argv, "h");
|
help();
|
||||||
if (c == -1)
|
return 1;
|
||||||
break;
|
|
||||||
switch(c) {
|
|
||||||
case 'h':
|
|
||||||
help();
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("ffmpeg motion test\n");
|
printf("ffmpeg motion test\n");
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
|
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "bytestream.h"
|
#include "bytestream.h"
|
||||||
|
#include "dsputil.h"
|
||||||
|
#include "mss34dsp.h"
|
||||||
|
|
||||||
#define HEADER_SIZE 27
|
#define HEADER_SIZE 27
|
||||||
|
|
||||||
@ -119,39 +121,6 @@ typedef struct MSS3Context {
|
|||||||
int hblock[16 * 16];
|
int hblock[16 * 16];
|
||||||
} MSS3Context;
|
} MSS3Context;
|
||||||
|
|
||||||
static const uint8_t mss3_luma_quant[64] = {
|
|
||||||
16, 11, 10, 16, 24, 40, 51, 61,
|
|
||||||
12, 12, 14, 19, 26, 58, 60, 55,
|
|
||||||
14, 13, 16, 24, 40, 57, 69, 56,
|
|
||||||
14, 17, 22, 29, 51, 87, 80, 62,
|
|
||||||
18, 22, 37, 56, 68, 109, 103, 77,
|
|
||||||
24, 35, 55, 64, 81, 104, 113, 92,
|
|
||||||
49, 64, 78, 87, 103, 121, 120, 101,
|
|
||||||
72, 92, 95, 98, 112, 100, 103, 99
|
|
||||||
};
|
|
||||||
|
|
||||||
static const uint8_t mss3_chroma_quant[64] = {
|
|
||||||
17, 18, 24, 47, 99, 99, 99, 99,
|
|
||||||
18, 21, 26, 66, 99, 99, 99, 99,
|
|
||||||
24, 26, 56, 99, 99, 99, 99, 99,
|
|
||||||
47, 66, 99, 99, 99, 99, 99, 99,
|
|
||||||
99, 99, 99, 99, 99, 99, 99, 99,
|
|
||||||
99, 99, 99, 99, 99, 99, 99, 99,
|
|
||||||
99, 99, 99, 99, 99, 99, 99, 99,
|
|
||||||
99, 99, 99, 99, 99, 99, 99, 99
|
|
||||||
};
|
|
||||||
|
|
||||||
static const uint8_t zigzag_scan[64] = {
|
|
||||||
0, 1, 8, 16, 9, 2, 3, 10,
|
|
||||||
17, 24, 32, 25, 18, 11, 4, 5,
|
|
||||||
12, 19, 26, 33, 40, 48, 41, 34,
|
|
||||||
27, 20, 13, 6, 7, 14, 21, 28,
|
|
||||||
35, 42, 49, 56, 57, 50, 43, 36,
|
|
||||||
29, 22, 15, 23, 30, 37, 44, 51,
|
|
||||||
58, 59, 52, 45, 38, 31, 39, 46,
|
|
||||||
53, 60, 61, 54, 47, 55, 62, 63
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
static void model2_reset(Model2 *m)
|
static void model2_reset(Model2 *m)
|
||||||
{
|
{
|
||||||
@ -578,7 +547,7 @@ static int decode_dct(RangeCoder *c, DCTBlockCoder *bc, int *block,
|
|||||||
if (!sign)
|
if (!sign)
|
||||||
val = -val;
|
val = -val;
|
||||||
|
|
||||||
zz_pos = zigzag_scan[pos];
|
zz_pos = ff_zigzag_direct[pos];
|
||||||
block[zz_pos] = val * bc->qmat[zz_pos];
|
block[zz_pos] = val * bc->qmat[zz_pos];
|
||||||
pos++;
|
pos++;
|
||||||
}
|
}
|
||||||
@ -586,58 +555,6 @@ static int decode_dct(RangeCoder *c, DCTBlockCoder *bc, int *block,
|
|||||||
return pos == 64 ? 0 : -1;
|
return pos == 64 ? 0 : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DCT_TEMPLATE(blk, step, SOP, shift) \
|
|
||||||
const int t0 = -39409 * blk[7 * step] - 58980 * blk[1 * step]; \
|
|
||||||
const int t1 = 39410 * blk[1 * step] - 58980 * blk[7 * step]; \
|
|
||||||
const int t2 = -33410 * blk[5 * step] - 167963 * blk[3 * step]; \
|
|
||||||
const int t3 = 33410 * blk[3 * step] - 167963 * blk[5 * step]; \
|
|
||||||
const int t4 = blk[3 * step] + blk[7 * step]; \
|
|
||||||
const int t5 = blk[1 * step] + blk[5 * step]; \
|
|
||||||
const int t6 = 77062 * t4 + 51491 * t5; \
|
|
||||||
const int t7 = 77062 * t5 - 51491 * t4; \
|
|
||||||
const int t8 = 35470 * blk[2 * step] - 85623 * blk[6 * step]; \
|
|
||||||
const int t9 = 35470 * blk[6 * step] + 85623 * blk[2 * step]; \
|
|
||||||
const int tA = SOP(blk[0 * step] - blk[4 * step]); \
|
|
||||||
const int tB = SOP(blk[0 * step] + blk[4 * step]); \
|
|
||||||
\
|
|
||||||
blk[0 * step] = ( t1 + t6 + t9 + tB) >> shift; \
|
|
||||||
blk[1 * step] = ( t3 + t7 + t8 + tA) >> shift; \
|
|
||||||
blk[2 * step] = ( t2 + t6 - t8 + tA) >> shift; \
|
|
||||||
blk[3 * step] = ( t0 + t7 - t9 + tB) >> shift; \
|
|
||||||
blk[4 * step] = (-(t0 + t7) - t9 + tB) >> shift; \
|
|
||||||
blk[5 * step] = (-(t2 + t6) - t8 + tA) >> shift; \
|
|
||||||
blk[6 * step] = (-(t3 + t7) + t8 + tA) >> shift; \
|
|
||||||
blk[7 * step] = (-(t1 + t6) + t9 + tB) >> shift; \
|
|
||||||
|
|
||||||
#define SOP_ROW(a) ((a) << 16) + 0x2000
|
|
||||||
#define SOP_COL(a) ((a + 32) << 16)
|
|
||||||
|
|
||||||
static void dct_put(uint8_t *dst, int stride, int *block)
|
|
||||||
{
|
|
||||||
int i, j;
|
|
||||||
int *ptr;
|
|
||||||
|
|
||||||
ptr = block;
|
|
||||||
for (i = 0; i < 8; i++) {
|
|
||||||
DCT_TEMPLATE(ptr, 1, SOP_ROW, 13);
|
|
||||||
ptr += 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
ptr = block;
|
|
||||||
for (i = 0; i < 8; i++) {
|
|
||||||
DCT_TEMPLATE(ptr, 8, SOP_COL, 22);
|
|
||||||
ptr++;
|
|
||||||
}
|
|
||||||
|
|
||||||
ptr = block;
|
|
||||||
for (j = 0; j < 8; j++) {
|
|
||||||
for (i = 0; i < 8; i++)
|
|
||||||
dst[i] = av_clip_uint8(ptr[i] + 128);
|
|
||||||
dst += stride;
|
|
||||||
ptr += 8;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void decode_dct_block(RangeCoder *c, DCTBlockCoder *bc,
|
static void decode_dct_block(RangeCoder *c, DCTBlockCoder *bc,
|
||||||
uint8_t *dst, int stride, int block_size,
|
uint8_t *dst, int stride, int block_size,
|
||||||
int *block, int mb_x, int mb_y)
|
int *block, int mb_x, int mb_y)
|
||||||
@ -655,7 +572,7 @@ static void decode_dct_block(RangeCoder *c, DCTBlockCoder *bc,
|
|||||||
c->got_error = 1;
|
c->got_error = 1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
dct_put(dst + i * 8, stride, block);
|
ff_mss34_dct_put(dst + i * 8, stride, block);
|
||||||
}
|
}
|
||||||
dst += 8 * stride;
|
dst += 8 * stride;
|
||||||
}
|
}
|
||||||
@ -702,14 +619,6 @@ static void decode_haar_block(RangeCoder *c, HaarBlockCoder *hc,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen_quant_mat(uint16_t *qmat, const uint8_t *ref, float scale)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < 64; i++)
|
|
||||||
qmat[i] = (uint16_t)(ref[i] * scale + 50.0) / 100;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void reset_coders(MSS3Context *ctx, int quality)
|
static void reset_coders(MSS3Context *ctx, int quality)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
@ -726,15 +635,8 @@ static void reset_coders(MSS3Context *ctx, int quality)
|
|||||||
for (j = 0; j < 125; j++)
|
for (j = 0; j < 125; j++)
|
||||||
model_reset(&ctx->image_coder[i].vq_model[j]);
|
model_reset(&ctx->image_coder[i].vq_model[j]);
|
||||||
if (ctx->dct_coder[i].quality != quality) {
|
if (ctx->dct_coder[i].quality != quality) {
|
||||||
float scale;
|
|
||||||
ctx->dct_coder[i].quality = quality;
|
ctx->dct_coder[i].quality = quality;
|
||||||
if (quality > 50)
|
ff_mss34_gen_quant_mat(ctx->dct_coder[i].qmat, quality, !i);
|
||||||
scale = 200.0f - 2 * quality;
|
|
||||||
else
|
|
||||||
scale = 5000.0f / quality;
|
|
||||||
gen_quant_mat(ctx->dct_coder[i].qmat,
|
|
||||||
i ? mss3_chroma_quant : mss3_luma_quant,
|
|
||||||
scale);
|
|
||||||
}
|
}
|
||||||
memset(ctx->dct_coder[i].prev_dc, 0,
|
memset(ctx->dct_coder[i].prev_dc, 0,
|
||||||
sizeof(*ctx->dct_coder[i].prev_dc) *
|
sizeof(*ctx->dct_coder[i].prev_dc) *
|
||||||
|
114
libavcodec/mss34dsp.c
Normal file
114
libavcodec/mss34dsp.c
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
/*
|
||||||
|
* Common stuff for some Microsoft Screen codecs
|
||||||
|
* Copyright (C) 2012 Konstantin Shishkov
|
||||||
|
*
|
||||||
|
* This file is part of FFmpeg.
|
||||||
|
*
|
||||||
|
* FFmpeg is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation; either
|
||||||
|
* version 2.1 of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* FFmpeg is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with FFmpeg; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
#include "libavutil/common.h"
|
||||||
|
#include "mss34dsp.h"
|
||||||
|
|
||||||
|
static const uint8_t luma_quant[64] = {
|
||||||
|
16, 11, 10, 16, 24, 40, 51, 61,
|
||||||
|
12, 12, 14, 19, 26, 58, 60, 55,
|
||||||
|
14, 13, 16, 24, 40, 57, 69, 56,
|
||||||
|
14, 17, 22, 29, 51, 87, 80, 62,
|
||||||
|
18, 22, 37, 56, 68, 109, 103, 77,
|
||||||
|
24, 35, 55, 64, 81, 104, 113, 92,
|
||||||
|
49, 64, 78, 87, 103, 121, 120, 101,
|
||||||
|
72, 92, 95, 98, 112, 100, 103, 99
|
||||||
|
};
|
||||||
|
|
||||||
|
static const uint8_t chroma_quant[64] = {
|
||||||
|
17, 18, 24, 47, 99, 99, 99, 99,
|
||||||
|
18, 21, 26, 66, 99, 99, 99, 99,
|
||||||
|
24, 26, 56, 99, 99, 99, 99, 99,
|
||||||
|
47, 66, 99, 99, 99, 99, 99, 99,
|
||||||
|
99, 99, 99, 99, 99, 99, 99, 99,
|
||||||
|
99, 99, 99, 99, 99, 99, 99, 99,
|
||||||
|
99, 99, 99, 99, 99, 99, 99, 99,
|
||||||
|
99, 99, 99, 99, 99, 99, 99, 99
|
||||||
|
};
|
||||||
|
|
||||||
|
void ff_mss34_gen_quant_mat(uint16_t *qmat, int quality, int luma)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
const uint8_t *qsrc = luma ? luma_quant : chroma_quant;
|
||||||
|
|
||||||
|
if (quality >= 50) {
|
||||||
|
int scale = 200 - 2 * quality;
|
||||||
|
|
||||||
|
for (i = 0; i < 64; i++)
|
||||||
|
qmat[i] = (qsrc[i] * scale + 50) / 100;
|
||||||
|
} else {
|
||||||
|
for (i = 0; i < 64; i++)
|
||||||
|
qmat[i] = (5000 * qsrc[i] / quality + 50) / 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DCT_TEMPLATE(blk, step, SOP, shift) \
|
||||||
|
const int t0 = -39409 * blk[7 * step] - 58980 * blk[1 * step]; \
|
||||||
|
const int t1 = 39410 * blk[1 * step] - 58980 * blk[7 * step]; \
|
||||||
|
const int t2 = -33410 * blk[5 * step] - 167963 * blk[3 * step]; \
|
||||||
|
const int t3 = 33410 * blk[3 * step] - 167963 * blk[5 * step]; \
|
||||||
|
const int t4 = blk[3 * step] + blk[7 * step]; \
|
||||||
|
const int t5 = blk[1 * step] + blk[5 * step]; \
|
||||||
|
const int t6 = 77062 * t4 + 51491 * t5; \
|
||||||
|
const int t7 = 77062 * t5 - 51491 * t4; \
|
||||||
|
const int t8 = 35470 * blk[2 * step] - 85623 * blk[6 * step]; \
|
||||||
|
const int t9 = 35470 * blk[6 * step] + 85623 * blk[2 * step]; \
|
||||||
|
const int tA = SOP(blk[0 * step] - blk[4 * step]); \
|
||||||
|
const int tB = SOP(blk[0 * step] + blk[4 * step]); \
|
||||||
|
\
|
||||||
|
blk[0 * step] = ( t1 + t6 + t9 + tB) >> shift; \
|
||||||
|
blk[1 * step] = ( t3 + t7 + t8 + tA) >> shift; \
|
||||||
|
blk[2 * step] = ( t2 + t6 - t8 + tA) >> shift; \
|
||||||
|
blk[3 * step] = ( t0 + t7 - t9 + tB) >> shift; \
|
||||||
|
blk[4 * step] = (-(t0 + t7) - t9 + tB) >> shift; \
|
||||||
|
blk[5 * step] = (-(t2 + t6) - t8 + tA) >> shift; \
|
||||||
|
blk[6 * step] = (-(t3 + t7) + t8 + tA) >> shift; \
|
||||||
|
blk[7 * step] = (-(t1 + t6) + t9 + tB) >> shift; \
|
||||||
|
|
||||||
|
#define SOP_ROW(a) ((a) << 16) + 0x2000
|
||||||
|
#define SOP_COL(a) ((a + 32) << 16)
|
||||||
|
|
||||||
|
void ff_mss34_dct_put(uint8_t *dst, int stride, int *block)
|
||||||
|
{
|
||||||
|
int i, j;
|
||||||
|
int *ptr;
|
||||||
|
|
||||||
|
ptr = block;
|
||||||
|
for (i = 0; i < 8; i++) {
|
||||||
|
DCT_TEMPLATE(ptr, 1, SOP_ROW, 13);
|
||||||
|
ptr += 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr = block;
|
||||||
|
for (i = 0; i < 8; i++) {
|
||||||
|
DCT_TEMPLATE(ptr, 8, SOP_COL, 22);
|
||||||
|
ptr++;
|
||||||
|
}
|
||||||
|
|
||||||
|
ptr = block;
|
||||||
|
for (j = 0; j < 8; j++) {
|
||||||
|
for (i = 0; i < 8; i++)
|
||||||
|
dst[i] = av_clip_uint8(ptr[i] + 128);
|
||||||
|
dst += stride;
|
||||||
|
ptr += 8;
|
||||||
|
}
|
||||||
|
}
|
45
libavcodec/mss34dsp.h
Normal file
45
libavcodec/mss34dsp.h
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
/*
|
||||||
|
* Common stuff for some Microsoft Screen codecs
|
||||||
|
* Copyright (C) 2012 Konstantin Shishkov
|
||||||
|
*
|
||||||
|
* This file is part of FFmpeg.
|
||||||
|
*
|
||||||
|
* FFmpeg is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation; either
|
||||||
|
* version 2.1 of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* FFmpeg is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with FFmpeg; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef AVCODEC_MSS34DSP_H
|
||||||
|
#define AVCODEC_MSS34DSP_H
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate quantisation matrix for given quality.
|
||||||
|
*
|
||||||
|
* @param qmat destination matrix
|
||||||
|
* @param quality quality setting (1-100)
|
||||||
|
* @param luma generate quantisation matrix for luma or chroma
|
||||||
|
*/
|
||||||
|
void ff_mss34_gen_quant_mat(uint16_t *qmat, int quality, int luma);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Transform and output DCT block.
|
||||||
|
*
|
||||||
|
* @param dst output plane
|
||||||
|
* @param stride output plane stride
|
||||||
|
* @param block block to transform and output
|
||||||
|
*/
|
||||||
|
void ff_mss34_dct_put(uint8_t *dst, int stride, int *block);
|
||||||
|
|
||||||
|
#endif /* AVCODEC_MSS34DSP_H */
|
@ -135,12 +135,13 @@ static int config_output(AVFilterLink *outlink)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
|
||||||
{
|
{
|
||||||
AConvertContext *aconvert = inlink->dst->priv;
|
AConvertContext *aconvert = inlink->dst->priv;
|
||||||
const int n = insamplesref->audio->nb_samples;
|
const int n = insamplesref->audio->nb_samples;
|
||||||
AVFilterLink *const outlink = inlink->dst->outputs[0];
|
AVFilterLink *const outlink = inlink->dst->outputs[0];
|
||||||
AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n);
|
AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n);
|
||||||
|
int ret;
|
||||||
|
|
||||||
swr_convert(aconvert->swr, outsamplesref->data, n,
|
swr_convert(aconvert->swr, outsamplesref->data, n,
|
||||||
(void *)insamplesref->data, n);
|
(void *)insamplesref->data, n);
|
||||||
@ -148,8 +149,9 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref
|
|||||||
avfilter_copy_buffer_ref_props(outsamplesref, insamplesref);
|
avfilter_copy_buffer_ref_props(outsamplesref, insamplesref);
|
||||||
outsamplesref->audio->channel_layout = outlink->channel_layout;
|
outsamplesref->audio->channel_layout = outlink->channel_layout;
|
||||||
|
|
||||||
ff_filter_samples(outlink, outsamplesref);
|
ret = ff_filter_samples(outlink, outsamplesref);
|
||||||
avfilter_unref_buffer(insamplesref);
|
avfilter_unref_buffer(insamplesref);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_af_aconvert = {
|
AVFilter avfilter_af_aconvert = {
|
||||||
|
@ -212,7 +212,7 @@ static inline void copy_samples(int nb_inputs, struct amerge_input in[],
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
AMergeContext *am = ctx->priv;
|
AMergeContext *am = ctx->priv;
|
||||||
@ -232,7 +232,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
for (i = 1; i < am->nb_inputs; i++)
|
for (i = 1; i < am->nb_inputs; i++)
|
||||||
nb_samples = FFMIN(nb_samples, am->in[i].nb_samples);
|
nb_samples = FFMIN(nb_samples, am->in[i].nb_samples);
|
||||||
if (!nb_samples)
|
if (!nb_samples)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
outbuf = ff_get_audio_buffer(ctx->outputs[0], AV_PERM_WRITE, nb_samples);
|
outbuf = ff_get_audio_buffer(ctx->outputs[0], AV_PERM_WRITE, nb_samples);
|
||||||
outs = outbuf->data[0];
|
outs = outbuf->data[0];
|
||||||
@ -285,7 +285,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ff_filter_samples(ctx->outputs[0], outbuf);
|
return ff_filter_samples(ctx->outputs[0], outbuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int init(AVFilterContext *ctx, const char *args)
|
static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||||
|
@ -305,9 +305,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
|
|||||||
if (s->next_pts != AV_NOPTS_VALUE)
|
if (s->next_pts != AV_NOPTS_VALUE)
|
||||||
s->next_pts += nb_samples;
|
s->next_pts += nb_samples;
|
||||||
|
|
||||||
ff_filter_samples(outlink, out_buf);
|
return ff_filter_samples(outlink, out_buf);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -448,31 +446,37 @@ static int request_frame(AVFilterLink *outlink)
|
|||||||
return output_frame(outlink, available_samples);
|
return output_frame(outlink, available_samples);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
MixContext *s = ctx->priv;
|
MixContext *s = ctx->priv;
|
||||||
AVFilterLink *outlink = ctx->outputs[0];
|
AVFilterLink *outlink = ctx->outputs[0];
|
||||||
int i;
|
int i, ret = 0;
|
||||||
|
|
||||||
for (i = 0; i < ctx->nb_inputs; i++)
|
for (i = 0; i < ctx->nb_inputs; i++)
|
||||||
if (ctx->inputs[i] == inlink)
|
if (ctx->inputs[i] == inlink)
|
||||||
break;
|
break;
|
||||||
if (i >= ctx->nb_inputs) {
|
if (i >= ctx->nb_inputs) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
|
av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
|
||||||
return;
|
ret = AVERROR(EINVAL);
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
|
int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
|
||||||
outlink->time_base);
|
outlink->time_base);
|
||||||
frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts);
|
ret = frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts);
|
||||||
|
if (ret < 0)
|
||||||
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
|
ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
|
||||||
buf->audio->nb_samples);
|
buf->audio->nb_samples);
|
||||||
|
|
||||||
|
fail:
|
||||||
avfilter_unref_buffer(buf);
|
avfilter_unref_buffer(buf);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int init(AVFilterContext *ctx, const char *args)
|
static int init(AVFilterContext *ctx, const char *args)
|
||||||
|
@ -168,13 +168,14 @@ static int config_output(AVFilterLink *outlink)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
|
||||||
{
|
{
|
||||||
AResampleContext *aresample = inlink->dst->priv;
|
AResampleContext *aresample = inlink->dst->priv;
|
||||||
const int n_in = insamplesref->audio->nb_samples;
|
const int n_in = insamplesref->audio->nb_samples;
|
||||||
int n_out = n_in * aresample->ratio * 2 ;
|
int n_out = n_in * aresample->ratio * 2 ;
|
||||||
AVFilterLink *const outlink = inlink->dst->outputs[0];
|
AVFilterLink *const outlink = inlink->dst->outputs[0];
|
||||||
AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out);
|
AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
|
||||||
avfilter_copy_buffer_ref_props(outsamplesref, insamplesref);
|
avfilter_copy_buffer_ref_props(outsamplesref, insamplesref);
|
||||||
@ -193,15 +194,16 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref
|
|||||||
if (n_out <= 0) {
|
if (n_out <= 0) {
|
||||||
avfilter_unref_buffer(outsamplesref);
|
avfilter_unref_buffer(outsamplesref);
|
||||||
avfilter_unref_buffer(insamplesref);
|
avfilter_unref_buffer(insamplesref);
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
outsamplesref->audio->sample_rate = outlink->sample_rate;
|
outsamplesref->audio->sample_rate = outlink->sample_rate;
|
||||||
outsamplesref->audio->nb_samples = n_out;
|
outsamplesref->audio->nb_samples = n_out;
|
||||||
|
|
||||||
ff_filter_samples(outlink, outsamplesref);
|
ret = ff_filter_samples(outlink, outsamplesref);
|
||||||
aresample->req_fullfilled= 1;
|
aresample->req_fullfilled= 1;
|
||||||
avfilter_unref_buffer(insamplesref);
|
avfilter_unref_buffer(insamplesref);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int request_frame(AVFilterLink *outlink)
|
static int request_frame(AVFilterLink *outlink)
|
||||||
|
@ -131,7 +131,7 @@ static int push_samples(AVFilterLink *outlink)
|
|||||||
return nb_out_samples;
|
return nb_out_samples;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
ASNSContext *asns = ctx->priv;
|
ASNSContext *asns = ctx->priv;
|
||||||
@ -145,7 +145,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(ctx, AV_LOG_ERROR,
|
av_log(ctx, AV_LOG_ERROR,
|
||||||
"Stretching audio fifo failed, discarded %d samples\n", nb_samples);
|
"Stretching audio fifo failed, discarded %d samples\n", nb_samples);
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples);
|
av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples);
|
||||||
@ -155,6 +155,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
|
|
||||||
if (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples)
|
if (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples)
|
||||||
push_samples(outlink);
|
push_samples(outlink);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int request_frame(AVFilterLink *outlink)
|
static int request_frame(AVFilterLink *outlink)
|
||||||
|
@ -40,7 +40,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
ShowInfoContext *showinfo = ctx->priv;
|
ShowInfoContext *showinfo = ctx->priv;
|
||||||
@ -83,7 +83,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
|
|||||||
av_log(ctx, AV_LOG_INFO, "]\n");
|
av_log(ctx, AV_LOG_INFO, "]\n");
|
||||||
|
|
||||||
showinfo->frame++;
|
showinfo->frame++;
|
||||||
ff_filter_samples(inlink->dst->outputs[0], samplesref);
|
return ff_filter_samples(inlink->dst->outputs[0], samplesref);
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_af_ashowinfo = {
|
AVFilter avfilter_af_ashowinfo = {
|
||||||
|
@ -107,11 +107,12 @@ static int config_output(AVFilterLink *outlink)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void send_out(AVFilterContext *ctx, int out_id)
|
static int send_out(AVFilterContext *ctx, int out_id)
|
||||||
{
|
{
|
||||||
AStreamSyncContext *as = ctx->priv;
|
AStreamSyncContext *as = ctx->priv;
|
||||||
struct buf_queue *queue = &as->queue[out_id];
|
struct buf_queue *queue = &as->queue[out_id];
|
||||||
AVFilterBufferRef *buf = queue->buf[queue->tail];
|
AVFilterBufferRef *buf = queue->buf[queue->tail];
|
||||||
|
int ret;
|
||||||
|
|
||||||
queue->buf[queue->tail] = NULL;
|
queue->buf[queue->tail] = NULL;
|
||||||
as->var_values[VAR_B1 + out_id]++;
|
as->var_values[VAR_B1 + out_id]++;
|
||||||
@ -121,11 +122,12 @@ static void send_out(AVFilterContext *ctx, int out_id)
|
|||||||
av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
|
av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
|
||||||
as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples /
|
as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples /
|
||||||
(double)ctx->inputs[out_id]->sample_rate;
|
(double)ctx->inputs[out_id]->sample_rate;
|
||||||
ff_filter_samples(ctx->outputs[out_id], buf);
|
ret = ff_filter_samples(ctx->outputs[out_id], buf);
|
||||||
queue->nb--;
|
queue->nb--;
|
||||||
queue->tail = (queue->tail + 1) % QUEUE_SIZE;
|
queue->tail = (queue->tail + 1) % QUEUE_SIZE;
|
||||||
if (as->req[out_id])
|
if (as->req[out_id])
|
||||||
as->req[out_id]--;
|
as->req[out_id]--;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void send_next(AVFilterContext *ctx)
|
static void send_next(AVFilterContext *ctx)
|
||||||
@ -165,7 +167,7 @@ static int request_frame(AVFilterLink *outlink)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
AStreamSyncContext *as = ctx->priv;
|
AStreamSyncContext *as = ctx->priv;
|
||||||
@ -175,6 +177,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
insamples;
|
insamples;
|
||||||
as->eof &= ~(1 << id);
|
as->eof &= ~(1 << id);
|
||||||
send_next(ctx);
|
send_next(ctx);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_af_astreamsync = {
|
AVFilter avfilter_af_astreamsync = {
|
||||||
|
@ -37,6 +37,9 @@ typedef struct ASyncContext {
|
|||||||
int resample;
|
int resample;
|
||||||
float min_delta_sec;
|
float min_delta_sec;
|
||||||
int max_comp;
|
int max_comp;
|
||||||
|
|
||||||
|
/* set by filter_samples() to signal an output frame to request_frame() */
|
||||||
|
int got_output;
|
||||||
} ASyncContext;
|
} ASyncContext;
|
||||||
|
|
||||||
#define OFFSET(x) offsetof(ASyncContext, x)
|
#define OFFSET(x) offsetof(ASyncContext, x)
|
||||||
@ -112,9 +115,13 @@ static int request_frame(AVFilterLink *link)
|
|||||||
{
|
{
|
||||||
AVFilterContext *ctx = link->src;
|
AVFilterContext *ctx = link->src;
|
||||||
ASyncContext *s = ctx->priv;
|
ASyncContext *s = ctx->priv;
|
||||||
int ret = ff_request_frame(ctx->inputs[0]);
|
int ret = 0;
|
||||||
int nb_samples;
|
int nb_samples;
|
||||||
|
|
||||||
|
s->got_output = 0;
|
||||||
|
while (ret >= 0 && !s->got_output)
|
||||||
|
ret = ff_request_frame(ctx->inputs[0]);
|
||||||
|
|
||||||
/* flush the fifo */
|
/* flush the fifo */
|
||||||
if (ret == AVERROR_EOF && (nb_samples = avresample_get_delay(s->avr))) {
|
if (ret == AVERROR_EOF && (nb_samples = avresample_get_delay(s->avr))) {
|
||||||
AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE,
|
AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE,
|
||||||
@ -124,18 +131,18 @@ static int request_frame(AVFilterLink *link)
|
|||||||
avresample_convert(s->avr, (void**)buf->extended_data, buf->linesize[0],
|
avresample_convert(s->avr, (void**)buf->extended_data, buf->linesize[0],
|
||||||
nb_samples, NULL, 0, 0);
|
nb_samples, NULL, 0, 0);
|
||||||
buf->pts = s->pts;
|
buf->pts = s->pts;
|
||||||
ff_filter_samples(link, buf);
|
return ff_filter_samples(link, buf);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf)
|
static int write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf)
|
||||||
{
|
{
|
||||||
avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data,
|
int ret = avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data,
|
||||||
buf->linesize[0], buf->audio->nb_samples);
|
buf->linesize[0], buf->audio->nb_samples);
|
||||||
avfilter_unref_buffer(buf);
|
avfilter_unref_buffer(buf);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* get amount of data currently buffered, in samples */
|
/* get amount of data currently buffered, in samples */
|
||||||
@ -144,7 +151,7 @@ static int64_t get_delay(ASyncContext *s)
|
|||||||
return avresample_available(s->avr) + avresample_get_delay(s->avr);
|
return avresample_available(s->avr) + avresample_get_delay(s->avr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
ASyncContext *s = ctx->priv;
|
ASyncContext *s = ctx->priv;
|
||||||
@ -152,7 +159,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
|||||||
int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout);
|
int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout);
|
||||||
int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts :
|
int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts :
|
||||||
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
|
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
|
||||||
int out_size;
|
int out_size, ret;
|
||||||
int64_t delta;
|
int64_t delta;
|
||||||
|
|
||||||
/* buffer data until we get the first timestamp */
|
/* buffer data until we get the first timestamp */
|
||||||
@ -160,14 +167,12 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
|||||||
if (pts != AV_NOPTS_VALUE) {
|
if (pts != AV_NOPTS_VALUE) {
|
||||||
s->pts = pts - get_delay(s);
|
s->pts = pts - get_delay(s);
|
||||||
}
|
}
|
||||||
write_to_fifo(s, buf);
|
return write_to_fifo(s, buf);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* now wait for the next timestamp */
|
/* now wait for the next timestamp */
|
||||||
if (pts == AV_NOPTS_VALUE) {
|
if (pts == AV_NOPTS_VALUE) {
|
||||||
write_to_fifo(s, buf);
|
return write_to_fifo(s, buf);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* when we have two timestamps, compute how many samples would we have
|
/* when we have two timestamps, compute how many samples would we have
|
||||||
@ -190,8 +195,10 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
|||||||
if (out_size > 0) {
|
if (out_size > 0) {
|
||||||
AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
|
AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
|
||||||
out_size);
|
out_size);
|
||||||
if (!buf_out)
|
if (!buf_out) {
|
||||||
return;
|
ret = AVERROR(ENOMEM);
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
avresample_read(s->avr, (void**)buf_out->extended_data, out_size);
|
avresample_read(s->avr, (void**)buf_out->extended_data, out_size);
|
||||||
buf_out->pts = s->pts;
|
buf_out->pts = s->pts;
|
||||||
@ -200,7 +207,10 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
|||||||
av_samples_set_silence(buf_out->extended_data, out_size - delta,
|
av_samples_set_silence(buf_out->extended_data, out_size - delta,
|
||||||
delta, nb_channels, buf->format);
|
delta, nb_channels, buf->format);
|
||||||
}
|
}
|
||||||
ff_filter_samples(outlink, buf_out);
|
ret = ff_filter_samples(outlink, buf_out);
|
||||||
|
if (ret < 0)
|
||||||
|
goto fail;
|
||||||
|
s->got_output = 1;
|
||||||
} else {
|
} else {
|
||||||
av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
|
av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
|
||||||
"whole buffer.\n");
|
"whole buffer.\n");
|
||||||
@ -210,9 +220,13 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
|||||||
avresample_read(s->avr, NULL, avresample_available(s->avr));
|
avresample_read(s->avr, NULL, avresample_available(s->avr));
|
||||||
|
|
||||||
s->pts = pts - avresample_get_delay(s->avr);
|
s->pts = pts - avresample_get_delay(s->avr);
|
||||||
avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data,
|
ret = avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data,
|
||||||
buf->linesize[0], buf->audio->nb_samples);
|
buf->linesize[0], buf->audio->nb_samples);
|
||||||
|
|
||||||
|
fail:
|
||||||
avfilter_unref_buffer(buf);
|
avfilter_unref_buffer(buf);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_af_asyncts = {
|
AVFilter avfilter_af_asyncts = {
|
||||||
|
@ -1040,7 +1040,7 @@ static void push_samples(ATempoContext *atempo,
|
|||||||
atempo->nsamples_out += n_out;
|
atempo->nsamples_out += n_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink,
|
static int filter_samples(AVFilterLink *inlink,
|
||||||
AVFilterBufferRef *src_buffer)
|
AVFilterBufferRef *src_buffer)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
@ -1074,6 +1074,7 @@ static void filter_samples(AVFilterLink *inlink,
|
|||||||
|
|
||||||
atempo->nsamples_in += n_in;
|
atempo->nsamples_in += n_in;
|
||||||
avfilter_unref_bufferp(&src_buffer);
|
avfilter_unref_bufferp(&src_buffer);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int request_frame(AVFilterLink *outlink)
|
static int request_frame(AVFilterLink *outlink)
|
||||||
|
@ -313,7 +313,7 @@ static int channelmap_query_formats(AVFilterContext *ctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
static int channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
AVFilterLink *outlink = ctx->outputs[0];
|
AVFilterLink *outlink = ctx->outputs[0];
|
||||||
@ -330,8 +330,10 @@ static void channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *b
|
|||||||
if (nch_out > FF_ARRAY_ELEMS(buf->data)) {
|
if (nch_out > FF_ARRAY_ELEMS(buf->data)) {
|
||||||
uint8_t **new_extended_data =
|
uint8_t **new_extended_data =
|
||||||
av_mallocz(nch_out * sizeof(*buf->extended_data));
|
av_mallocz(nch_out * sizeof(*buf->extended_data));
|
||||||
if (!new_extended_data)
|
if (!new_extended_data) {
|
||||||
return;
|
avfilter_unref_buffer(buf);
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
if (buf->extended_data == buf->data) {
|
if (buf->extended_data == buf->data) {
|
||||||
buf->extended_data = new_extended_data;
|
buf->extended_data = new_extended_data;
|
||||||
} else {
|
} else {
|
||||||
@ -353,7 +355,7 @@ static void channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *b
|
|||||||
memcpy(buf->data, buf->extended_data,
|
memcpy(buf->data, buf->extended_data,
|
||||||
FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0]));
|
FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0]));
|
||||||
|
|
||||||
ff_filter_samples(outlink, buf);
|
return ff_filter_samples(outlink, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int channelmap_config_input(AVFilterLink *inlink)
|
static int channelmap_config_input(AVFilterLink *inlink)
|
||||||
|
@ -105,24 +105,29 @@ static int query_formats(AVFilterContext *ctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
int i;
|
int i, ret = 0;
|
||||||
|
|
||||||
for (i = 0; i < ctx->nb_outputs; i++) {
|
for (i = 0; i < ctx->nb_outputs; i++) {
|
||||||
AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE);
|
AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE);
|
||||||
|
|
||||||
if (!buf_out)
|
if (!buf_out) {
|
||||||
return;
|
ret = AVERROR(ENOMEM);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
|
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
|
||||||
buf_out->audio->channel_layout =
|
buf_out->audio->channel_layout =
|
||||||
av_channel_layout_extract_channel(buf->audio->channel_layout, i);
|
av_channel_layout_extract_channel(buf->audio->channel_layout, i);
|
||||||
|
|
||||||
ff_filter_samples(ctx->outputs[i], buf_out);
|
ret = ff_filter_samples(ctx->outputs[i], buf_out);
|
||||||
|
if (ret < 0)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
avfilter_unref_buffer(buf);
|
avfilter_unref_buffer(buf);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_af_channelsplit = {
|
AVFilter avfilter_af_channelsplit = {
|
||||||
|
@ -120,13 +120,15 @@ static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, in
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
||||||
{
|
{
|
||||||
AVFilterLink *outlink = inlink->dst->outputs[0];
|
AVFilterLink *outlink = inlink->dst->outputs[0];
|
||||||
int16_t *taps, *endin, *in, *out;
|
int16_t *taps, *endin, *in, *out;
|
||||||
AVFilterBufferRef *outsamples =
|
AVFilterBufferRef *outsamples =
|
||||||
ff_get_audio_buffer(inlink, AV_PERM_WRITE,
|
ff_get_audio_buffer(inlink, AV_PERM_WRITE,
|
||||||
insamples->audio->nb_samples);
|
insamples->audio->nb_samples);
|
||||||
|
int ret;
|
||||||
|
|
||||||
avfilter_copy_buffer_ref_props(outsamples, insamples);
|
avfilter_copy_buffer_ref_props(outsamples, insamples);
|
||||||
|
|
||||||
taps = ((EarwaxContext *)inlink->dst->priv)->taps;
|
taps = ((EarwaxContext *)inlink->dst->priv)->taps;
|
||||||
@ -144,8 +146,9 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
// save part of input for next round
|
// save part of input for next round
|
||||||
memcpy(taps, endin, NUMTAPS * sizeof(*taps));
|
memcpy(taps, endin, NUMTAPS * sizeof(*taps));
|
||||||
|
|
||||||
ff_filter_samples(outlink, outsamples);
|
ret = ff_filter_samples(outlink, outsamples);
|
||||||
avfilter_unref_buffer(insamples);
|
avfilter_unref_buffer(insamples);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_af_earwax = {
|
AVFilter avfilter_af_earwax = {
|
||||||
|
@ -92,7 +92,7 @@ static const AVClass join_class = {
|
|||||||
.version = LIBAVUTIL_VERSION_INT,
|
.version = LIBAVUTIL_VERSION_INT,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
|
static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = link->dst;
|
AVFilterContext *ctx = link->dst;
|
||||||
JoinContext *s = ctx->priv;
|
JoinContext *s = ctx->priv;
|
||||||
@ -104,6 +104,8 @@ static void filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
|
|||||||
av_assert0(i < ctx->nb_inputs);
|
av_assert0(i < ctx->nb_inputs);
|
||||||
av_assert0(!s->input_frames[i]);
|
av_assert0(!s->input_frames[i]);
|
||||||
s->input_frames[i] = buf;
|
s->input_frames[i] = buf;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int parse_maps(AVFilterContext *ctx)
|
static int parse_maps(AVFilterContext *ctx)
|
||||||
@ -468,11 +470,11 @@ static int join_request_frame(AVFilterLink *outlink)
|
|||||||
priv->nb_in_buffers = ctx->nb_inputs;
|
priv->nb_in_buffers = ctx->nb_inputs;
|
||||||
buf->buf->priv = priv;
|
buf->buf->priv = priv;
|
||||||
|
|
||||||
ff_filter_samples(outlink, buf);
|
ret = ff_filter_samples(outlink, buf);
|
||||||
|
|
||||||
memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs);
|
memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
avfilter_unref_buffer(buf);
|
avfilter_unref_buffer(buf);
|
||||||
|
@ -343,8 +343,9 @@ static int config_props(AVFilterLink *link)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
int n = insamples->audio->nb_samples;
|
int n = insamples->audio->nb_samples;
|
||||||
AVFilterLink *const outlink = inlink->dst->outputs[0];
|
AVFilterLink *const outlink = inlink->dst->outputs[0];
|
||||||
AVFilterBufferRef *outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n);
|
AVFilterBufferRef *outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n);
|
||||||
@ -354,8 +355,9 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
avfilter_copy_buffer_ref_props(outsamples, insamples);
|
avfilter_copy_buffer_ref_props(outsamples, insamples);
|
||||||
outsamples->audio->channel_layout = outlink->channel_layout;
|
outsamples->audio->channel_layout = outlink->channel_layout;
|
||||||
|
|
||||||
ff_filter_samples(outlink, outsamples);
|
ret = ff_filter_samples(outlink, outsamples);
|
||||||
avfilter_unref_buffer(insamples);
|
avfilter_unref_buffer(insamples);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold void uninit(AVFilterContext *ctx)
|
static av_cold void uninit(AVFilterContext *ctx)
|
||||||
|
@ -38,6 +38,9 @@ typedef struct ResampleContext {
|
|||||||
AVAudioResampleContext *avr;
|
AVAudioResampleContext *avr;
|
||||||
|
|
||||||
int64_t next_pts;
|
int64_t next_pts;
|
||||||
|
|
||||||
|
/* set by filter_samples() to signal an output frame to request_frame() */
|
||||||
|
int got_output;
|
||||||
} ResampleContext;
|
} ResampleContext;
|
||||||
|
|
||||||
static av_cold void uninit(AVFilterContext *ctx)
|
static av_cold void uninit(AVFilterContext *ctx)
|
||||||
@ -102,12 +105,6 @@ static int config_output(AVFilterLink *outlink)
|
|||||||
av_opt_set_int(s->avr, "in_sample_rate", inlink ->sample_rate, 0);
|
av_opt_set_int(s->avr, "in_sample_rate", inlink ->sample_rate, 0);
|
||||||
av_opt_set_int(s->avr, "out_sample_rate", outlink->sample_rate, 0);
|
av_opt_set_int(s->avr, "out_sample_rate", outlink->sample_rate, 0);
|
||||||
|
|
||||||
/* if both the input and output formats are s16 or u8, use s16 as
|
|
||||||
the internal sample format */
|
|
||||||
if (av_get_bytes_per_sample(inlink->format) <= 2 &&
|
|
||||||
av_get_bytes_per_sample(outlink->format) <= 2)
|
|
||||||
av_opt_set_int(s->avr, "internal_sample_fmt", AV_SAMPLE_FMT_S16P, 0);
|
|
||||||
|
|
||||||
if ((ret = avresample_open(s->avr)) < 0)
|
if ((ret = avresample_open(s->avr)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -130,7 +127,11 @@ static int request_frame(AVFilterLink *outlink)
|
|||||||
{
|
{
|
||||||
AVFilterContext *ctx = outlink->src;
|
AVFilterContext *ctx = outlink->src;
|
||||||
ResampleContext *s = ctx->priv;
|
ResampleContext *s = ctx->priv;
|
||||||
int ret = ff_request_frame(ctx->inputs[0]);
|
int ret = 0;
|
||||||
|
|
||||||
|
s->got_output = 0;
|
||||||
|
while (ret >= 0 && !s->got_output)
|
||||||
|
ret = ff_request_frame(ctx->inputs[0]);
|
||||||
|
|
||||||
/* flush the lavr delay buffer */
|
/* flush the lavr delay buffer */
|
||||||
if (ret == AVERROR_EOF && s->avr) {
|
if (ret == AVERROR_EOF && s->avr) {
|
||||||
@ -156,21 +157,21 @@ static int request_frame(AVFilterLink *outlink)
|
|||||||
}
|
}
|
||||||
|
|
||||||
buf->pts = s->next_pts;
|
buf->pts = s->next_pts;
|
||||||
ff_filter_samples(outlink, buf);
|
return ff_filter_samples(outlink, buf);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
ResampleContext *s = ctx->priv;
|
ResampleContext *s = ctx->priv;
|
||||||
AVFilterLink *outlink = ctx->outputs[0];
|
AVFilterLink *outlink = ctx->outputs[0];
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (s->avr) {
|
if (s->avr) {
|
||||||
AVFilterBufferRef *buf_out;
|
AVFilterBufferRef *buf_out;
|
||||||
int delay, nb_samples, ret;
|
int delay, nb_samples;
|
||||||
|
|
||||||
/* maximum possible samples lavr can output */
|
/* maximum possible samples lavr can output */
|
||||||
delay = avresample_get_delay(s->avr);
|
delay = avresample_get_delay(s->avr);
|
||||||
@ -179,10 +180,19 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
|||||||
AV_ROUND_UP);
|
AV_ROUND_UP);
|
||||||
|
|
||||||
buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
|
buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
|
||||||
|
if (!buf_out) {
|
||||||
|
ret = AVERROR(ENOMEM);
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
ret = avresample_convert(s->avr, (void**)buf_out->extended_data,
|
ret = avresample_convert(s->avr, (void**)buf_out->extended_data,
|
||||||
buf_out->linesize[0], nb_samples,
|
buf_out->linesize[0], nb_samples,
|
||||||
(void**)buf->extended_data, buf->linesize[0],
|
(void**)buf->extended_data, buf->linesize[0],
|
||||||
buf->audio->nb_samples);
|
buf->audio->nb_samples);
|
||||||
|
if (ret < 0) {
|
||||||
|
avfilter_unref_buffer(buf_out);
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
av_assert0(!avresample_available(s->avr));
|
av_assert0(!avresample_available(s->avr));
|
||||||
|
|
||||||
@ -208,11 +218,18 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
|||||||
|
|
||||||
s->next_pts = buf_out->pts + buf_out->audio->nb_samples;
|
s->next_pts = buf_out->pts + buf_out->audio->nb_samples;
|
||||||
|
|
||||||
ff_filter_samples(outlink, buf_out);
|
ret = ff_filter_samples(outlink, buf_out);
|
||||||
|
s->got_output = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fail:
|
||||||
avfilter_unref_buffer(buf);
|
avfilter_unref_buffer(buf);
|
||||||
} else
|
} else {
|
||||||
ff_filter_samples(outlink, buf);
|
ret = ff_filter_samples(outlink, buf);
|
||||||
|
s->got_output = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_af_resample = {
|
AVFilter avfilter_af_resample = {
|
||||||
|
@ -78,7 +78,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
SilenceDetectContext *silence = inlink->dst->priv;
|
SilenceDetectContext *silence = inlink->dst->priv;
|
||||||
@ -118,7 +118,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ff_filter_samples(inlink->dst->outputs[0], insamples);
|
return ff_filter_samples(inlink->dst->outputs[0], insamples);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int query_formats(AVFilterContext *ctx)
|
static int query_formats(AVFilterContext *ctx)
|
||||||
|
@ -110,7 +110,7 @@ static int query_formats(AVFilterContext *ctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
||||||
{
|
{
|
||||||
VolumeContext *vol = inlink->dst->priv;
|
VolumeContext *vol = inlink->dst->priv;
|
||||||
AVFilterLink *outlink = inlink->dst->outputs[0];
|
AVFilterLink *outlink = inlink->dst->outputs[0];
|
||||||
@ -169,7 +169,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ff_filter_samples(outlink, insamples);
|
return ff_filter_samples(outlink, insamples);
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_af_volume = {
|
AVFilter avfilter_af_volume = {
|
||||||
|
@ -21,7 +21,10 @@
|
|||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
static void null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { }
|
static int null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
AVFilter avfilter_asink_anullsink = {
|
AVFilter avfilter_asink_anullsink = {
|
||||||
.name = "anullsink",
|
.name = "anullsink",
|
||||||
|
@ -150,19 +150,19 @@ fail:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void default_filter_samples(AVFilterLink *link,
|
static int default_filter_samples(AVFilterLink *link,
|
||||||
AVFilterBufferRef *samplesref)
|
AVFilterBufferRef *samplesref)
|
||||||
{
|
{
|
||||||
ff_filter_samples(link->dst->outputs[0], samplesref);
|
return ff_filter_samples(link->dst->outputs[0], samplesref);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_filter_samples_framed(AVFilterLink *link,
|
int ff_filter_samples_framed(AVFilterLink *link, AVFilterBufferRef *samplesref)
|
||||||
AVFilterBufferRef *samplesref)
|
|
||||||
{
|
{
|
||||||
void (*filter_samples)(AVFilterLink *, AVFilterBufferRef *);
|
int (*filter_samples)(AVFilterLink *, AVFilterBufferRef *);
|
||||||
AVFilterPad *dst = link->dstpad;
|
AVFilterPad *dst = link->dstpad;
|
||||||
int64_t pts;
|
int64_t pts;
|
||||||
AVFilterBufferRef *buf_out;
|
AVFilterBufferRef *buf_out;
|
||||||
|
int ret;
|
||||||
|
|
||||||
FF_TPRINTF_START(NULL, filter_samples); ff_tlog_link(NULL, link, 1);
|
FF_TPRINTF_START(NULL, filter_samples); ff_tlog_link(NULL, link, 1);
|
||||||
|
|
||||||
@ -193,21 +193,22 @@ void ff_filter_samples_framed(AVFilterLink *link,
|
|||||||
|
|
||||||
link->cur_buf = buf_out;
|
link->cur_buf = buf_out;
|
||||||
pts = buf_out->pts;
|
pts = buf_out->pts;
|
||||||
filter_samples(link, buf_out);
|
ret = filter_samples(link, buf_out);
|
||||||
ff_update_link_current_pts(link, pts);
|
ff_update_link_current_pts(link, pts);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
|
int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
|
||||||
{
|
{
|
||||||
int insamples = samplesref->audio->nb_samples, inpos = 0, nb_samples;
|
int insamples = samplesref->audio->nb_samples, inpos = 0, nb_samples;
|
||||||
AVFilterBufferRef *pbuf = link->partial_buf;
|
AVFilterBufferRef *pbuf = link->partial_buf;
|
||||||
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
|
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (!link->min_samples ||
|
if (!link->min_samples ||
|
||||||
(!pbuf &&
|
(!pbuf &&
|
||||||
insamples >= link->min_samples && insamples <= link->max_samples)) {
|
insamples >= link->min_samples && insamples <= link->max_samples)) {
|
||||||
ff_filter_samples_framed(link, samplesref);
|
return ff_filter_samples_framed(link, samplesref);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
/* Handle framing (min_samples, max_samples) */
|
/* Handle framing (min_samples, max_samples) */
|
||||||
while (insamples) {
|
while (insamples) {
|
||||||
@ -218,7 +219,7 @@ void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
|
|||||||
if (!pbuf) {
|
if (!pbuf) {
|
||||||
av_log(link->dst, AV_LOG_WARNING,
|
av_log(link->dst, AV_LOG_WARNING,
|
||||||
"Samples dropped due to memory allocation failure.\n");
|
"Samples dropped due to memory allocation failure.\n");
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
avfilter_copy_buffer_ref_props(pbuf, samplesref);
|
avfilter_copy_buffer_ref_props(pbuf, samplesref);
|
||||||
pbuf->pts = samplesref->pts +
|
pbuf->pts = samplesref->pts +
|
||||||
@ -234,10 +235,11 @@ void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
|
|||||||
insamples -= nb_samples;
|
insamples -= nb_samples;
|
||||||
pbuf->audio->nb_samples += nb_samples;
|
pbuf->audio->nb_samples += nb_samples;
|
||||||
if (pbuf->audio->nb_samples >= link->min_samples) {
|
if (pbuf->audio->nb_samples >= link->min_samples) {
|
||||||
ff_filter_samples_framed(link, pbuf);
|
ret = ff_filter_samples_framed(link, pbuf);
|
||||||
pbuf = NULL;
|
pbuf = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
avfilter_unref_buffer(samplesref);
|
avfilter_unref_buffer(samplesref);
|
||||||
link->partial_buf = pbuf;
|
link->partial_buf = pbuf;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -70,14 +70,17 @@ AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms,
|
|||||||
* @param samplesref a reference to the buffer of audio samples being sent. The
|
* @param samplesref a reference to the buffer of audio samples being sent. The
|
||||||
* receiving filter will free this reference when it no longer
|
* receiving filter will free this reference when it no longer
|
||||||
* needs it or pass it on to the next filter.
|
* needs it or pass it on to the next filter.
|
||||||
|
*
|
||||||
|
* @return >= 0 on success, a negative AVERROR on error. The receiving filter
|
||||||
|
* is responsible for unreferencing samplesref in case of error.
|
||||||
*/
|
*/
|
||||||
void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
|
int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Send a buffer of audio samples to the next link, without checking
|
* Send a buffer of audio samples to the next link, without checking
|
||||||
* min_samples.
|
* min_samples.
|
||||||
*/
|
*/
|
||||||
void ff_filter_samples_framed(AVFilterLink *link,
|
int ff_filter_samples_framed(AVFilterLink *link,
|
||||||
AVFilterBufferRef *samplesref);
|
AVFilterBufferRef *samplesref);
|
||||||
|
|
||||||
#endif /* AVFILTER_AUDIO_H */
|
#endif /* AVFILTER_AUDIO_H */
|
||||||
|
@ -180,7 +180,7 @@ static int request_frame(AVFilterLink *outlink)
|
|||||||
|
|
||||||
#define MAX_INT16 ((1<<15) -1)
|
#define MAX_INT16 ((1<<15) -1)
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
AVFilterLink *outlink = ctx->outputs[0];
|
AVFilterLink *outlink = ctx->outputs[0];
|
||||||
@ -225,6 +225,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
}
|
}
|
||||||
|
|
||||||
avfilter_unref_buffer(insamples);
|
avfilter_unref_buffer(insamples);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_avf_showwaves = {
|
AVFilter avfilter_avf_showwaves = {
|
||||||
|
@ -301,8 +301,12 @@ struct AVFilterPad {
|
|||||||
* and should do its processing.
|
* and should do its processing.
|
||||||
*
|
*
|
||||||
* Input audio pads only.
|
* Input audio pads only.
|
||||||
|
*
|
||||||
|
* @return >= 0 on success, a negative AVERROR on error. This function
|
||||||
|
* must ensure that samplesref is properly unreferenced on error if it
|
||||||
|
* hasn't been passed on to another filter.
|
||||||
*/
|
*/
|
||||||
void (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
|
int (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Frame poll callback. This returns the number of immediately available
|
* Frame poll callback. This returns the number of immediately available
|
||||||
|
@ -56,6 +56,12 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *buf)
|
|||||||
link->cur_buf = NULL;
|
link->cur_buf = NULL;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
|
||||||
|
{
|
||||||
|
start_frame(link, buf);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
|
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
|
||||||
{
|
{
|
||||||
BufferSinkContext *s = ctx->priv;
|
BufferSinkContext *s = ctx->priv;
|
||||||
@ -160,7 +166,7 @@ AVFilter avfilter_asink_abuffer = {
|
|||||||
|
|
||||||
.inputs = (AVFilterPad[]) {{ .name = "default",
|
.inputs = (AVFilterPad[]) {{ .name = "default",
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
.filter_samples = start_frame,
|
.filter_samples = filter_samples,
|
||||||
.min_perms = AV_PERM_READ,
|
.min_perms = AV_PERM_READ,
|
||||||
.needs_fifo = 1 },
|
.needs_fifo = 1 },
|
||||||
{ .name = NULL }},
|
{ .name = NULL }},
|
||||||
|
@ -408,6 +408,7 @@ static int request_frame(AVFilterLink *link)
|
|||||||
{
|
{
|
||||||
BufferSourceContext *c = link->src->priv;
|
BufferSourceContext *c = link->src->priv;
|
||||||
AVFilterBufferRef *buf;
|
AVFilterBufferRef *buf;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
if (!av_fifo_size(c->fifo)) {
|
if (!av_fifo_size(c->fifo)) {
|
||||||
if (c->eof)
|
if (c->eof)
|
||||||
@ -424,7 +425,7 @@ static int request_frame(AVFilterLink *link)
|
|||||||
ff_end_frame(link);
|
ff_end_frame(link);
|
||||||
break;
|
break;
|
||||||
case AVMEDIA_TYPE_AUDIO:
|
case AVMEDIA_TYPE_AUDIO:
|
||||||
ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
|
ret = ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
@ -432,7 +433,7 @@ static int request_frame(AVFilterLink *link)
|
|||||||
|
|
||||||
avfilter_unref_buffer(buf);
|
avfilter_unref_buffer(buf);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int poll_frame(AVFilterLink *link)
|
static int poll_frame(AVFilterLink *link)
|
||||||
|
@ -117,7 +117,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
|
|||||||
ff_start_frame(outlink, picref2);
|
ff_start_frame(outlink, picref2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
AVFilterLink *outlink = ctx->outputs[0];
|
AVFilterLink *outlink = ctx->outputs[0];
|
||||||
@ -132,7 +132,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
|
|||||||
avfilter_unref_buffer(insamples);
|
avfilter_unref_buffer(insamples);
|
||||||
}
|
}
|
||||||
|
|
||||||
ff_filter_samples(outlink, outsamples);
|
return ff_filter_samples(outlink, outsamples);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if CONFIG_SETTB_FILTER
|
#if CONFIG_SETTB_FILTER
|
||||||
|
@ -72,13 +72,25 @@ static av_cold void uninit(AVFilterContext *ctx)
|
|||||||
avfilter_unref_buffer(fifo->buf_out);
|
avfilter_unref_buffer(fifo->buf_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
static int add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
||||||
{
|
{
|
||||||
FifoContext *fifo = inlink->dst->priv;
|
FifoContext *fifo = inlink->dst->priv;
|
||||||
|
|
||||||
fifo->last->next = av_mallocz(sizeof(Buf));
|
fifo->last->next = av_mallocz(sizeof(Buf));
|
||||||
|
if (!fifo->last->next) {
|
||||||
|
avfilter_unref_buffer(buf);
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
fifo->last = fifo->last->next;
|
fifo->last = fifo->last->next;
|
||||||
fifo->last->buf = buf;
|
fifo->last->buf = buf;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
||||||
|
{
|
||||||
|
add_to_queue(inlink, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void queue_pop(FifoContext *s)
|
static void queue_pop(FifoContext *s)
|
||||||
@ -210,15 +222,13 @@ static int return_audio_frame(AVFilterContext *ctx)
|
|||||||
buf_out = s->buf_out;
|
buf_out = s->buf_out;
|
||||||
s->buf_out = NULL;
|
s->buf_out = NULL;
|
||||||
}
|
}
|
||||||
ff_filter_samples(link, buf_out);
|
return ff_filter_samples(link, buf_out);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int request_frame(AVFilterLink *outlink)
|
static int request_frame(AVFilterLink *outlink)
|
||||||
{
|
{
|
||||||
FifoContext *fifo = outlink->src->priv;
|
FifoContext *fifo = outlink->src->priv;
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
if (!fifo->root.next) {
|
if (!fifo->root.next) {
|
||||||
if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0)
|
if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0)
|
||||||
@ -238,7 +248,7 @@ static int request_frame(AVFilterLink *outlink)
|
|||||||
if (outlink->request_samples) {
|
if (outlink->request_samples) {
|
||||||
return return_audio_frame(outlink->src);
|
return return_audio_frame(outlink->src);
|
||||||
} else {
|
} else {
|
||||||
ff_filter_samples(outlink, fifo->root.next->buf);
|
ret = ff_filter_samples(outlink, fifo->root.next->buf);
|
||||||
queue_pop(fifo);
|
queue_pop(fifo);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -246,7 +256,7 @@ static int request_frame(AVFilterLink *outlink)
|
|||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_vf_fifo = {
|
AVFilter avfilter_vf_fifo = {
|
||||||
@ -261,7 +271,7 @@ AVFilter avfilter_vf_fifo = {
|
|||||||
.inputs = (const AVFilterPad[]) {{ .name = "default",
|
.inputs = (const AVFilterPad[]) {{ .name = "default",
|
||||||
.type = AVMEDIA_TYPE_VIDEO,
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
.get_video_buffer= ff_null_get_video_buffer,
|
.get_video_buffer= ff_null_get_video_buffer,
|
||||||
.start_frame = add_to_queue,
|
.start_frame = start_frame,
|
||||||
.draw_slice = draw_slice,
|
.draw_slice = draw_slice,
|
||||||
.end_frame = end_frame,
|
.end_frame = end_frame,
|
||||||
.rej_perms = AV_PERM_REUSE2, },
|
.rej_perms = AV_PERM_REUSE2, },
|
||||||
|
@ -135,8 +135,12 @@ struct AVFilterPad {
|
|||||||
* and should do its processing.
|
* and should do its processing.
|
||||||
*
|
*
|
||||||
* Input audio pads only.
|
* Input audio pads only.
|
||||||
|
*
|
||||||
|
* @return >= 0 on success, a negative AVERROR on error. This function
|
||||||
|
* must ensure that samplesref is properly unreferenced on error if it
|
||||||
|
* hasn't been passed on to another filter.
|
||||||
*/
|
*/
|
||||||
void (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
|
int (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Frame poll callback. This returns the number of immediately available
|
* Frame poll callback. This returns the number of immediately available
|
||||||
|
@ -244,9 +244,10 @@ AVFilter avfilter_vsink_buffersink = {
|
|||||||
|
|
||||||
#if CONFIG_ABUFFERSINK_FILTER
|
#if CONFIG_ABUFFERSINK_FILTER
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
|
static int filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
|
||||||
{
|
{
|
||||||
end_frame(link);
|
end_frame(link);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int asink_init(AVFilterContext *ctx, const char *args)
|
static av_cold int asink_init(AVFilterContext *ctx, const char *args)
|
||||||
|
@ -110,15 +110,19 @@ AVFilter avfilter_vf_split = {
|
|||||||
.outputs = (AVFilterPad[]) {{ .name = NULL}},
|
.outputs = (AVFilterPad[]) {{ .name = NULL}},
|
||||||
};
|
};
|
||||||
|
|
||||||
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
|
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
|
||||||
{
|
{
|
||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
int i;
|
int i, ret = 0;
|
||||||
|
|
||||||
for (i = 0; i < ctx->nb_outputs; i++)
|
for (i = 0; i < ctx->nb_outputs; i++) {
|
||||||
ff_filter_samples(inlink->dst->outputs[i],
|
ret = ff_filter_samples(inlink->dst->outputs[i],
|
||||||
avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE));
|
avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE));
|
||||||
|
if (ret < 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
avfilter_unref_buffer(samplesref);
|
avfilter_unref_buffer(samplesref);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilter avfilter_af_asplit = {
|
AVFilter avfilter_af_asplit = {
|
||||||
|
@ -842,8 +842,11 @@ static int ebml_parse_id(MatroskaDemuxContext *matroska, EbmlSyntax *syntax,
|
|||||||
matroska->num_levels > 0 &&
|
matroska->num_levels > 0 &&
|
||||||
matroska->levels[matroska->num_levels-1].length == 0xffffffffffffff)
|
matroska->levels[matroska->num_levels-1].length == 0xffffffffffffff)
|
||||||
return 0; // we reached the end of an unknown size cluster
|
return 0; // we reached the end of an unknown size cluster
|
||||||
if (!syntax[i].id && id != EBML_ID_VOID && id != EBML_ID_CRC32)
|
if (!syntax[i].id && id != EBML_ID_VOID && id != EBML_ID_CRC32) {
|
||||||
av_log(matroska->ctx, AV_LOG_INFO, "Unknown entry 0x%X\n", id);
|
av_log(matroska->ctx, AV_LOG_INFO, "Unknown entry 0x%X\n", id);
|
||||||
|
if (matroska->ctx->error_recognition & AV_EF_EXPLODE)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
return ebml_parse_elem(matroska, &syntax[i], data);
|
return ebml_parse_elem(matroska, &syntax[i], data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
|
|||||||
char buf[256];
|
char buf[256];
|
||||||
int ret;
|
int ret;
|
||||||
socklen_t optlen;
|
socklen_t optlen;
|
||||||
int timeout = 50;
|
int timeout = 50, listen_timeout = -1;
|
||||||
char hostname[1024],proto[1024],path[1024];
|
char hostname[1024],proto[1024],path[1024];
|
||||||
char portstr[10];
|
char portstr[10];
|
||||||
|
|
||||||
@ -59,6 +59,9 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
|
|||||||
if (av_find_info_tag(buf, sizeof(buf), "timeout", p)) {
|
if (av_find_info_tag(buf, sizeof(buf), "timeout", p)) {
|
||||||
timeout = strtol(buf, NULL, 10);
|
timeout = strtol(buf, NULL, 10);
|
||||||
}
|
}
|
||||||
|
if (av_find_info_tag(buf, sizeof(buf), "listen_timeout", p)) {
|
||||||
|
listen_timeout = strtol(buf, NULL, 10);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
hints.ai_family = AF_UNSPEC;
|
hints.ai_family = AF_UNSPEC;
|
||||||
hints.ai_socktype = SOCK_STREAM;
|
hints.ai_socktype = SOCK_STREAM;
|
||||||
@ -87,6 +90,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
|
|||||||
if (listen_socket) {
|
if (listen_socket) {
|
||||||
int fd1;
|
int fd1;
|
||||||
int reuse = 1;
|
int reuse = 1;
|
||||||
|
struct pollfd lp = { fd, POLLIN, 0 };
|
||||||
setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
|
setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
|
||||||
ret = bind(fd, cur_ai->ai_addr, cur_ai->ai_addrlen);
|
ret = bind(fd, cur_ai->ai_addr, cur_ai->ai_addrlen);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -98,6 +102,11 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
|
|||||||
ret = ff_neterrno();
|
ret = ff_neterrno();
|
||||||
goto fail1;
|
goto fail1;
|
||||||
}
|
}
|
||||||
|
ret = poll(&lp, 1, listen_timeout >= 0 ? listen_timeout : -1);
|
||||||
|
if (ret <= 0) {
|
||||||
|
ret = AVERROR(ETIMEDOUT);
|
||||||
|
goto fail1;
|
||||||
|
}
|
||||||
fd1 = accept(fd, NULL, NULL);
|
fd1 = accept(fd, NULL, NULL);
|
||||||
if (fd1 < 0) {
|
if (fd1 < 0) {
|
||||||
ret = ff_neterrno();
|
ret = ff_neterrno();
|
||||||
|
@ -305,6 +305,14 @@ int ff_audio_mix_init(AVAudioResampleContext *avr)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (avr->internal_sample_fmt != AV_SAMPLE_FMT_S16P &&
|
||||||
|
avr->internal_sample_fmt != AV_SAMPLE_FMT_FLTP) {
|
||||||
|
av_log(avr, AV_LOG_ERROR, "Unsupported internal format for "
|
||||||
|
"mixing: %s\n",
|
||||||
|
av_get_sample_fmt_name(avr->internal_sample_fmt));
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
/* build matrix if the user did not already set one */
|
/* build matrix if the user did not already set one */
|
||||||
if (!avr->am->matrix) {
|
if (!avr->am->matrix) {
|
||||||
int i, j;
|
int i, j;
|
||||||
|
@ -45,6 +45,13 @@ enum AVMixCoeffType {
|
|||||||
AV_MIX_COEFF_TYPE_NB, /** Number of coeff types. Not part of ABI */
|
AV_MIX_COEFF_TYPE_NB, /** Number of coeff types. Not part of ABI */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/** Resampling Filter Types */
|
||||||
|
enum AVResampleFilterType {
|
||||||
|
AV_RESAMPLE_FILTER_TYPE_CUBIC, /**< Cubic */
|
||||||
|
AV_RESAMPLE_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */
|
||||||
|
AV_RESAMPLE_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the LIBAVRESAMPLE_VERSION_INT constant.
|
* Return the LIBAVRESAMPLE_VERSION_INT constant.
|
||||||
*/
|
*/
|
||||||
|
@ -50,6 +50,8 @@ struct AVAudioResampleContext {
|
|||||||
int phase_shift; /**< log2 of the number of entries in the resampling polyphase filterbank */
|
int phase_shift; /**< log2 of the number of entries in the resampling polyphase filterbank */
|
||||||
int linear_interp; /**< if 1 then the resampling FIR filter will be linearly interpolated */
|
int linear_interp; /**< if 1 then the resampling FIR filter will be linearly interpolated */
|
||||||
double cutoff; /**< resampling cutoff frequency. 1.0 corresponds to half the output sample rate */
|
double cutoff; /**< resampling cutoff frequency. 1.0 corresponds to half the output sample rate */
|
||||||
|
enum AVResampleFilterType filter_type; /**< resampling filter type */
|
||||||
|
int kaiser_beta; /**< beta value for Kaiser window (only applicable if filter_type == AV_FILTER_TYPE_KAISER) */
|
||||||
|
|
||||||
int in_channels; /**< number of input channels */
|
int in_channels; /**< number of input channels */
|
||||||
int out_channels; /**< number of output channels */
|
int out_channels; /**< number of output channels */
|
||||||
|
@ -39,7 +39,7 @@ static const AVOption options[] = {
|
|||||||
{ "out_channel_layout", "Output Channel Layout", OFFSET(out_channel_layout), AV_OPT_TYPE_INT64, { 0 }, INT64_MIN, INT64_MAX, PARAM },
|
{ "out_channel_layout", "Output Channel Layout", OFFSET(out_channel_layout), AV_OPT_TYPE_INT64, { 0 }, INT64_MIN, INT64_MAX, PARAM },
|
||||||
{ "out_sample_fmt", "Output Sample Format", OFFSET(out_sample_fmt), AV_OPT_TYPE_INT, { AV_SAMPLE_FMT_S16 }, AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_NB-1, PARAM },
|
{ "out_sample_fmt", "Output Sample Format", OFFSET(out_sample_fmt), AV_OPT_TYPE_INT, { AV_SAMPLE_FMT_S16 }, AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_NB-1, PARAM },
|
||||||
{ "out_sample_rate", "Output Sample Rate", OFFSET(out_sample_rate), AV_OPT_TYPE_INT, { 48000 }, 1, INT_MAX, PARAM },
|
{ "out_sample_rate", "Output Sample Rate", OFFSET(out_sample_rate), AV_OPT_TYPE_INT, { 48000 }, 1, INT_MAX, PARAM },
|
||||||
{ "internal_sample_fmt", "Internal Sample Format", OFFSET(internal_sample_fmt), AV_OPT_TYPE_INT, { AV_SAMPLE_FMT_FLTP }, AV_SAMPLE_FMT_NONE, AV_SAMPLE_FMT_NB-1, PARAM },
|
{ "internal_sample_fmt", "Internal Sample Format", OFFSET(internal_sample_fmt), AV_OPT_TYPE_INT, { AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE, AV_SAMPLE_FMT_NB-1, PARAM },
|
||||||
{ "mix_coeff_type", "Mixing Coefficient Type", OFFSET(mix_coeff_type), AV_OPT_TYPE_INT, { AV_MIX_COEFF_TYPE_FLT }, AV_MIX_COEFF_TYPE_Q8, AV_MIX_COEFF_TYPE_NB-1, PARAM, "mix_coeff_type" },
|
{ "mix_coeff_type", "Mixing Coefficient Type", OFFSET(mix_coeff_type), AV_OPT_TYPE_INT, { AV_MIX_COEFF_TYPE_FLT }, AV_MIX_COEFF_TYPE_Q8, AV_MIX_COEFF_TYPE_NB-1, PARAM, "mix_coeff_type" },
|
||||||
{ "q8", "16-bit 8.8 Fixed-Point", 0, AV_OPT_TYPE_CONST, { AV_MIX_COEFF_TYPE_Q8 }, INT_MIN, INT_MAX, PARAM, "mix_coeff_type" },
|
{ "q8", "16-bit 8.8 Fixed-Point", 0, AV_OPT_TYPE_CONST, { AV_MIX_COEFF_TYPE_Q8 }, INT_MIN, INT_MAX, PARAM, "mix_coeff_type" },
|
||||||
{ "q15", "32-bit 17.15 Fixed-Point", 0, AV_OPT_TYPE_CONST, { AV_MIX_COEFF_TYPE_Q15 }, INT_MIN, INT_MAX, PARAM, "mix_coeff_type" },
|
{ "q15", "32-bit 17.15 Fixed-Point", 0, AV_OPT_TYPE_CONST, { AV_MIX_COEFF_TYPE_Q15 }, INT_MIN, INT_MAX, PARAM, "mix_coeff_type" },
|
||||||
@ -56,6 +56,11 @@ static const AVOption options[] = {
|
|||||||
{ "none", "None", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_NONE }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
|
{ "none", "None", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_NONE }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
|
||||||
{ "dolby", "Dolby", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_DOLBY }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
|
{ "dolby", "Dolby", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_DOLBY }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
|
||||||
{ "dplii", "Dolby Pro Logic II", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_DPLII }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
|
{ "dplii", "Dolby Pro Logic II", 0, AV_OPT_TYPE_CONST, { AV_MATRIX_ENCODING_DPLII }, INT_MIN, INT_MAX, PARAM, "matrix_encoding" },
|
||||||
|
{ "filter_type", "Filter Type", OFFSET(filter_type), AV_OPT_TYPE_INT, { AV_RESAMPLE_FILTER_TYPE_KAISER }, AV_RESAMPLE_FILTER_TYPE_CUBIC, AV_RESAMPLE_FILTER_TYPE_KAISER, PARAM, "filter_type" },
|
||||||
|
{ "cubic", "Cubic", 0, AV_OPT_TYPE_CONST, { AV_RESAMPLE_FILTER_TYPE_CUBIC }, INT_MIN, INT_MAX, PARAM, "filter_type" },
|
||||||
|
{ "blackman_nuttall", "Blackman Nuttall Windowed Sinc", 0, AV_OPT_TYPE_CONST, { AV_RESAMPLE_FILTER_TYPE_BLACKMAN_NUTTALL }, INT_MIN, INT_MAX, PARAM, "filter_type" },
|
||||||
|
{ "kaiser", "Kaiser Windowed Sinc", 0, AV_OPT_TYPE_CONST, { AV_RESAMPLE_FILTER_TYPE_KAISER }, INT_MIN, INT_MAX, PARAM, "filter_type" },
|
||||||
|
{ "kaiser_beta", "Kaiser Window Beta", OFFSET(kaiser_beta), AV_OPT_TYPE_INT, { 9 }, 2, 16, PARAM },
|
||||||
{ NULL },
|
{ NULL },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -24,37 +24,10 @@
|
|||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "audio_data.h"
|
#include "audio_data.h"
|
||||||
|
|
||||||
#ifdef CONFIG_RESAMPLE_FLT
|
|
||||||
/* float template */
|
|
||||||
#define FILTER_SHIFT 0
|
|
||||||
#define FELEM float
|
|
||||||
#define FELEM2 float
|
|
||||||
#define FELEML float
|
|
||||||
#define WINDOW_TYPE 24
|
|
||||||
#elifdef CONFIG_RESAMPLE_S32
|
|
||||||
/* s32 template */
|
|
||||||
#define FILTER_SHIFT 30
|
|
||||||
#define FELEM int32_t
|
|
||||||
#define FELEM2 int64_t
|
|
||||||
#define FELEML int64_t
|
|
||||||
#define FELEM_MAX INT32_MAX
|
|
||||||
#define FELEM_MIN INT32_MIN
|
|
||||||
#define WINDOW_TYPE 12
|
|
||||||
#else
|
|
||||||
/* s16 template */
|
|
||||||
#define FILTER_SHIFT 15
|
|
||||||
#define FELEM int16_t
|
|
||||||
#define FELEM2 int32_t
|
|
||||||
#define FELEML int64_t
|
|
||||||
#define FELEM_MAX INT16_MAX
|
|
||||||
#define FELEM_MIN INT16_MIN
|
|
||||||
#define WINDOW_TYPE 9
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct ResampleContext {
|
struct ResampleContext {
|
||||||
AVAudioResampleContext *avr;
|
AVAudioResampleContext *avr;
|
||||||
AudioData *buffer;
|
AudioData *buffer;
|
||||||
FELEM *filter_bank;
|
uint8_t *filter_bank;
|
||||||
int filter_length;
|
int filter_length;
|
||||||
int ideal_dst_incr;
|
int ideal_dst_incr;
|
||||||
int dst_incr;
|
int dst_incr;
|
||||||
@ -65,9 +38,35 @@ struct ResampleContext {
|
|||||||
int phase_shift;
|
int phase_shift;
|
||||||
int phase_mask;
|
int phase_mask;
|
||||||
int linear;
|
int linear;
|
||||||
|
enum AVResampleFilterType filter_type;
|
||||||
|
int kaiser_beta;
|
||||||
double factor;
|
double factor;
|
||||||
|
void (*set_filter)(void *filter, double *tab, int phase, int tap_count);
|
||||||
|
void (*resample_one)(struct ResampleContext *c, int no_filter, void *dst0,
|
||||||
|
int dst_index, const void *src0, int src_size,
|
||||||
|
int index, int frac);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* double template */
|
||||||
|
#define CONFIG_RESAMPLE_DBL
|
||||||
|
#include "resample_template.c"
|
||||||
|
#undef CONFIG_RESAMPLE_DBL
|
||||||
|
|
||||||
|
/* float template */
|
||||||
|
#define CONFIG_RESAMPLE_FLT
|
||||||
|
#include "resample_template.c"
|
||||||
|
#undef CONFIG_RESAMPLE_FLT
|
||||||
|
|
||||||
|
/* s32 template */
|
||||||
|
#define CONFIG_RESAMPLE_S32
|
||||||
|
#include "resample_template.c"
|
||||||
|
#undef CONFIG_RESAMPLE_S32
|
||||||
|
|
||||||
|
/* s16 template */
|
||||||
|
#include "resample_template.c"
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 0th order modified bessel function of the first kind.
|
* 0th order modified bessel function of the first kind.
|
||||||
*/
|
*/
|
||||||
@ -95,17 +94,17 @@ static double bessel(double x)
|
|||||||
* @param tap_count tap count
|
* @param tap_count tap count
|
||||||
* @param phase_count phase count
|
* @param phase_count phase count
|
||||||
* @param scale wanted sum of coefficients for each filter
|
* @param scale wanted sum of coefficients for each filter
|
||||||
* @param type 0->cubic
|
* @param filter_type filter type
|
||||||
* 1->blackman nuttall windowed sinc
|
* @param kaiser_beta kaiser window beta
|
||||||
* 2..16->kaiser windowed sinc beta=2..16
|
|
||||||
* @return 0 on success, negative AVERROR code on failure
|
* @return 0 on success, negative AVERROR code on failure
|
||||||
*/
|
*/
|
||||||
static int build_filter(FELEM *filter, double factor, int tap_count,
|
static int build_filter(ResampleContext *c)
|
||||||
int phase_count, int scale, int type)
|
|
||||||
{
|
{
|
||||||
int ph, i;
|
int ph, i;
|
||||||
double x, y, w;
|
double x, y, w, factor;
|
||||||
double *tab;
|
double *tab;
|
||||||
|
int tap_count = c->filter_length;
|
||||||
|
int phase_count = 1 << c->phase_shift;
|
||||||
const int center = (tap_count - 1) / 2;
|
const int center = (tap_count - 1) / 2;
|
||||||
|
|
||||||
tab = av_malloc(tap_count * sizeof(*tab));
|
tab = av_malloc(tap_count * sizeof(*tab));
|
||||||
@ -113,8 +112,7 @@ static int build_filter(FELEM *filter, double factor, int tap_count,
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
/* if upsampling, only need to interpolate, no filter */
|
/* if upsampling, only need to interpolate, no filter */
|
||||||
if (factor > 1.0)
|
factor = FFMIN(c->factor, 1.0);
|
||||||
factor = 1.0;
|
|
||||||
|
|
||||||
for (ph = 0; ph < phase_count; ph++) {
|
for (ph = 0; ph < phase_count; ph++) {
|
||||||
double norm = 0;
|
double norm = 0;
|
||||||
@ -122,39 +120,34 @@ static int build_filter(FELEM *filter, double factor, int tap_count,
|
|||||||
x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor;
|
x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor;
|
||||||
if (x == 0) y = 1.0;
|
if (x == 0) y = 1.0;
|
||||||
else y = sin(x) / x;
|
else y = sin(x) / x;
|
||||||
switch (type) {
|
switch (c->filter_type) {
|
||||||
case 0: {
|
case AV_RESAMPLE_FILTER_TYPE_CUBIC: {
|
||||||
const float d = -0.5; //first order derivative = -0.5
|
const float d = -0.5; //first order derivative = -0.5
|
||||||
x = fabs(((double)(i - center) - (double)ph / phase_count) * factor);
|
x = fabs(((double)(i - center) - (double)ph / phase_count) * factor);
|
||||||
if (x < 1.0) y = 1 - 3 * x*x + 2 * x*x*x + d * ( -x*x + x*x*x);
|
if (x < 1.0) y = 1 - 3 * x*x + 2 * x*x*x + d * ( -x*x + x*x*x);
|
||||||
else y = d * (-4 + 8 * x - 5 * x*x + x*x*x);
|
else y = d * (-4 + 8 * x - 5 * x*x + x*x*x);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 1:
|
case AV_RESAMPLE_FILTER_TYPE_BLACKMAN_NUTTALL:
|
||||||
w = 2.0 * x / (factor * tap_count) + M_PI;
|
w = 2.0 * x / (factor * tap_count) + M_PI;
|
||||||
y *= 0.3635819 - 0.4891775 * cos( w) +
|
y *= 0.3635819 - 0.4891775 * cos( w) +
|
||||||
0.1365995 * cos(2 * w) -
|
0.1365995 * cos(2 * w) -
|
||||||
0.0106411 * cos(3 * w);
|
0.0106411 * cos(3 * w);
|
||||||
break;
|
break;
|
||||||
default:
|
case AV_RESAMPLE_FILTER_TYPE_KAISER:
|
||||||
w = 2.0 * x / (factor * tap_count * M_PI);
|
w = 2.0 * x / (factor * tap_count * M_PI);
|
||||||
y *= bessel(type * sqrt(FFMAX(1 - w * w, 0)));
|
y *= bessel(c->kaiser_beta * sqrt(FFMAX(1 - w * w, 0)));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
tab[i] = y;
|
tab[i] = y;
|
||||||
norm += y;
|
norm += y;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* normalize so that an uniform color remains the same */
|
/* normalize so that an uniform color remains the same */
|
||||||
for (i = 0; i < tap_count; i++) {
|
for (i = 0; i < tap_count; i++)
|
||||||
#ifdef CONFIG_RESAMPLE_FLT
|
tab[i] = tab[i] / norm;
|
||||||
filter[ph * tap_count + i] = tab[i] / norm;
|
|
||||||
#else
|
c->set_filter(c->filter_bank, tab, ph, tap_count);
|
||||||
filter[ph * tap_count + i] = av_clip(lrintf(tab[i] * scale / norm),
|
|
||||||
FELEM_MIN, FELEM_MAX);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
av_free(tab);
|
av_free(tab);
|
||||||
@ -168,9 +161,12 @@ ResampleContext *ff_audio_resample_init(AVAudioResampleContext *avr)
|
|||||||
int in_rate = avr->in_sample_rate;
|
int in_rate = avr->in_sample_rate;
|
||||||
double factor = FFMIN(out_rate * avr->cutoff / in_rate, 1.0);
|
double factor = FFMIN(out_rate * avr->cutoff / in_rate, 1.0);
|
||||||
int phase_count = 1 << avr->phase_shift;
|
int phase_count = 1 << avr->phase_shift;
|
||||||
|
int felem_size;
|
||||||
|
|
||||||
/* TODO: add support for s32 and float internal formats */
|
if (avr->internal_sample_fmt != AV_SAMPLE_FMT_S16P &&
|
||||||
if (avr->internal_sample_fmt != AV_SAMPLE_FMT_S16P) {
|
avr->internal_sample_fmt != AV_SAMPLE_FMT_S32P &&
|
||||||
|
avr->internal_sample_fmt != AV_SAMPLE_FMT_FLTP &&
|
||||||
|
avr->internal_sample_fmt != AV_SAMPLE_FMT_DBLP) {
|
||||||
av_log(avr, AV_LOG_ERROR, "Unsupported internal format for "
|
av_log(avr, AV_LOG_ERROR, "Unsupported internal format for "
|
||||||
"resampling: %s\n",
|
"resampling: %s\n",
|
||||||
av_get_sample_fmt_name(avr->internal_sample_fmt));
|
av_get_sample_fmt_name(avr->internal_sample_fmt));
|
||||||
@ -186,18 +182,40 @@ ResampleContext *ff_audio_resample_init(AVAudioResampleContext *avr)
|
|||||||
c->linear = avr->linear_interp;
|
c->linear = avr->linear_interp;
|
||||||
c->factor = factor;
|
c->factor = factor;
|
||||||
c->filter_length = FFMAX((int)ceil(avr->filter_size / factor), 1);
|
c->filter_length = FFMAX((int)ceil(avr->filter_size / factor), 1);
|
||||||
|
c->filter_type = avr->filter_type;
|
||||||
|
c->kaiser_beta = avr->kaiser_beta;
|
||||||
|
|
||||||
c->filter_bank = av_mallocz(c->filter_length * (phase_count + 1) * sizeof(FELEM));
|
switch (avr->internal_sample_fmt) {
|
||||||
|
case AV_SAMPLE_FMT_DBLP:
|
||||||
|
c->resample_one = resample_one_dbl;
|
||||||
|
c->set_filter = set_filter_dbl;
|
||||||
|
break;
|
||||||
|
case AV_SAMPLE_FMT_FLTP:
|
||||||
|
c->resample_one = resample_one_flt;
|
||||||
|
c->set_filter = set_filter_flt;
|
||||||
|
break;
|
||||||
|
case AV_SAMPLE_FMT_S32P:
|
||||||
|
c->resample_one = resample_one_s32;
|
||||||
|
c->set_filter = set_filter_s32;
|
||||||
|
break;
|
||||||
|
case AV_SAMPLE_FMT_S16P:
|
||||||
|
c->resample_one = resample_one_s16;
|
||||||
|
c->set_filter = set_filter_s16;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
felem_size = av_get_bytes_per_sample(avr->internal_sample_fmt);
|
||||||
|
c->filter_bank = av_mallocz(c->filter_length * (phase_count + 1) * felem_size);
|
||||||
if (!c->filter_bank)
|
if (!c->filter_bank)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
if (build_filter(c->filter_bank, factor, c->filter_length, phase_count,
|
if (build_filter(c) < 0)
|
||||||
1 << FILTER_SHIFT, WINDOW_TYPE) < 0)
|
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
memcpy(&c->filter_bank[c->filter_length * phase_count + 1],
|
memcpy(&c->filter_bank[(c->filter_length * phase_count + 1) * felem_size],
|
||||||
c->filter_bank, (c->filter_length - 1) * sizeof(FELEM));
|
c->filter_bank, (c->filter_length - 1) * felem_size);
|
||||||
c->filter_bank[c->filter_length * phase_count] = c->filter_bank[c->filter_length - 1];
|
memcpy(&c->filter_bank[c->filter_length * phase_count * felem_size],
|
||||||
|
&c->filter_bank[(c->filter_length - 1) * felem_size], felem_size);
|
||||||
|
|
||||||
c->compensation_distance = 0;
|
c->compensation_distance = 0;
|
||||||
if (!av_reduce(&c->src_incr, &c->dst_incr, out_rate,
|
if (!av_reduce(&c->src_incr, &c->dst_incr, out_rate,
|
||||||
@ -311,10 +329,10 @@ reinit_fail:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int resample(ResampleContext *c, int16_t *dst, const int16_t *src,
|
static int resample(ResampleContext *c, void *dst, const void *src,
|
||||||
int *consumed, int src_size, int dst_size, int update_ctx)
|
int *consumed, int src_size, int dst_size, int update_ctx)
|
||||||
{
|
{
|
||||||
int dst_index, i;
|
int dst_index;
|
||||||
int index = c->index;
|
int index = c->index;
|
||||||
int frac = c->frac;
|
int frac = c->frac;
|
||||||
int dst_incr_frac = c->dst_incr % c->src_incr;
|
int dst_incr_frac = c->dst_incr % c->src_incr;
|
||||||
@ -334,7 +352,7 @@ static int resample(ResampleContext *c, int16_t *dst, const int16_t *src,
|
|||||||
|
|
||||||
if (dst) {
|
if (dst) {
|
||||||
for(dst_index = 0; dst_index < dst_size; dst_index++) {
|
for(dst_index = 0; dst_index < dst_size; dst_index++) {
|
||||||
dst[dst_index] = src[index2 >> 32];
|
c->resample_one(c, 1, dst, dst_index, src, 0, index2 >> 32, 0);
|
||||||
index2 += incr;
|
index2 += incr;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -345,42 +363,14 @@ static int resample(ResampleContext *c, int16_t *dst, const int16_t *src,
|
|||||||
frac = (frac + dst_index * (int64_t)dst_incr_frac) % c->src_incr;
|
frac = (frac + dst_index * (int64_t)dst_incr_frac) % c->src_incr;
|
||||||
} else {
|
} else {
|
||||||
for (dst_index = 0; dst_index < dst_size; dst_index++) {
|
for (dst_index = 0; dst_index < dst_size; dst_index++) {
|
||||||
FELEM *filter = c->filter_bank +
|
|
||||||
c->filter_length * (index & c->phase_mask);
|
|
||||||
int sample_index = index >> c->phase_shift;
|
int sample_index = index >> c->phase_shift;
|
||||||
|
|
||||||
if (!dst && (sample_index + c->filter_length > src_size ||
|
if (sample_index + c->filter_length > src_size ||
|
||||||
-sample_index >= src_size))
|
-sample_index >= src_size)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (dst) {
|
if (dst)
|
||||||
FELEM2 val = 0;
|
c->resample_one(c, 0, dst, dst_index, src, src_size, index, frac);
|
||||||
|
|
||||||
if (sample_index < 0) {
|
|
||||||
for (i = 0; i < c->filter_length; i++)
|
|
||||||
val += src[FFABS(sample_index + i) % src_size] *
|
|
||||||
(FELEM2)filter[i];
|
|
||||||
} else if (sample_index + c->filter_length > src_size) {
|
|
||||||
break;
|
|
||||||
} else if (c->linear) {
|
|
||||||
FELEM2 v2 = 0;
|
|
||||||
for (i = 0; i < c->filter_length; i++) {
|
|
||||||
val += src[abs(sample_index + i)] * (FELEM2)filter[i];
|
|
||||||
v2 += src[abs(sample_index + i)] * (FELEM2)filter[i + c->filter_length];
|
|
||||||
}
|
|
||||||
val += (v2 - val) * (FELEML)frac / c->src_incr;
|
|
||||||
} else {
|
|
||||||
for (i = 0; i < c->filter_length; i++)
|
|
||||||
val += src[sample_index + i] * (FELEM2)filter[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_RESAMPLE_FLT
|
|
||||||
dst[dst_index] = av_clip_int16(lrintf(val));
|
|
||||||
#else
|
|
||||||
val = (val + (1<<(FILTER_SHIFT-1)))>>FILTER_SHIFT;
|
|
||||||
dst[dst_index] = av_clip_int16(val);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
frac += dst_incr_frac;
|
frac += dst_incr_frac;
|
||||||
index += dst_incr;
|
index += dst_incr;
|
||||||
@ -451,8 +441,8 @@ int ff_audio_resample(ResampleContext *c, AudioData *dst, AudioData *src,
|
|||||||
|
|
||||||
/* resample each channel plane */
|
/* resample each channel plane */
|
||||||
for (ch = 0; ch < c->buffer->channels; ch++) {
|
for (ch = 0; ch < c->buffer->channels; ch++) {
|
||||||
out_samples = resample(c, (int16_t *)dst->data[ch],
|
out_samples = resample(c, (void *)dst->data[ch],
|
||||||
(const int16_t *)c->buffer->data[ch], consumed,
|
(const void *)c->buffer->data[ch], consumed,
|
||||||
c->buffer->nb_samples, dst->allocated_samples,
|
c->buffer->nb_samples, dst->allocated_samples,
|
||||||
ch + 1 == c->buffer->channels);
|
ch + 1 == c->buffer->channels);
|
||||||
}
|
}
|
||||||
|
102
libavresample/resample_template.c
Normal file
102
libavresample/resample_template.c
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
|
||||||
|
*
|
||||||
|
* This file is part of FFmpeg.
|
||||||
|
*
|
||||||
|
* FFmpeg is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation; either
|
||||||
|
* version 2.1 of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* FFmpeg is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with FFmpeg; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if defined(CONFIG_RESAMPLE_DBL)
|
||||||
|
#define SET_TYPE(func) func ## _dbl
|
||||||
|
#define FELEM double
|
||||||
|
#define FELEM2 double
|
||||||
|
#define FELEML double
|
||||||
|
#define OUT(d, v) d = v
|
||||||
|
#define DBL_TO_FELEM(d, v) d = v
|
||||||
|
#elif defined(CONFIG_RESAMPLE_FLT)
|
||||||
|
#define SET_TYPE(func) func ## _flt
|
||||||
|
#define FELEM float
|
||||||
|
#define FELEM2 float
|
||||||
|
#define FELEML float
|
||||||
|
#define OUT(d, v) d = v
|
||||||
|
#define DBL_TO_FELEM(d, v) d = v
|
||||||
|
#elif defined(CONFIG_RESAMPLE_S32)
|
||||||
|
#define SET_TYPE(func) func ## _s32
|
||||||
|
#define FELEM int32_t
|
||||||
|
#define FELEM2 int64_t
|
||||||
|
#define FELEML int64_t
|
||||||
|
#define OUT(d, v) d = av_clipl_int32((v + (1 << 29)) >> 30)
|
||||||
|
#define DBL_TO_FELEM(d, v) d = av_clipl_int32(llrint(v * (1 << 30)));
|
||||||
|
#else
|
||||||
|
#define SET_TYPE(func) func ## _s16
|
||||||
|
#define FELEM int16_t
|
||||||
|
#define FELEM2 int32_t
|
||||||
|
#define FELEML int64_t
|
||||||
|
#define OUT(d, v) d = av_clip_int16((v + (1 << 14)) >> 15)
|
||||||
|
#define DBL_TO_FELEM(d, v) d = av_clip_int16(lrint(v * (1 << 15)))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void SET_TYPE(resample_one)(ResampleContext *c, int no_filter,
|
||||||
|
void *dst0, int dst_index, const void *src0,
|
||||||
|
int src_size, int index, int frac)
|
||||||
|
{
|
||||||
|
FELEM *dst = dst0;
|
||||||
|
const FELEM *src = src0;
|
||||||
|
|
||||||
|
if (no_filter) {
|
||||||
|
dst[dst_index] = src[index];
|
||||||
|
} else {
|
||||||
|
int i;
|
||||||
|
int sample_index = index >> c->phase_shift;
|
||||||
|
FELEM2 val = 0;
|
||||||
|
FELEM *filter = ((FELEM *)c->filter_bank) +
|
||||||
|
c->filter_length * (index & c->phase_mask);
|
||||||
|
|
||||||
|
if (sample_index < 0) {
|
||||||
|
for (i = 0; i < c->filter_length; i++)
|
||||||
|
val += src[FFABS(sample_index + i) % src_size] *
|
||||||
|
(FELEM2)filter[i];
|
||||||
|
} else if (c->linear) {
|
||||||
|
FELEM2 v2 = 0;
|
||||||
|
for (i = 0; i < c->filter_length; i++) {
|
||||||
|
val += src[abs(sample_index + i)] * (FELEM2)filter[i];
|
||||||
|
v2 += src[abs(sample_index + i)] * (FELEM2)filter[i + c->filter_length];
|
||||||
|
}
|
||||||
|
val += (v2 - val) * (FELEML)frac / c->src_incr;
|
||||||
|
} else {
|
||||||
|
for (i = 0; i < c->filter_length; i++)
|
||||||
|
val += src[sample_index + i] * (FELEM2)filter[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
OUT(dst[dst_index], val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void SET_TYPE(set_filter)(void *filter0, double *tab, int phase,
|
||||||
|
int tap_count)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
FELEM *filter = ((FELEM *)filter0) + phase * tap_count;
|
||||||
|
for (i = 0; i < tap_count; i++) {
|
||||||
|
DBL_TO_FELEM(filter[i], tab[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef SET_TYPE
|
||||||
|
#undef FELEM
|
||||||
|
#undef FELEM2
|
||||||
|
#undef FELEML
|
||||||
|
#undef OUT
|
||||||
|
#undef DBL_TO_FELEM
|
@ -57,18 +57,43 @@ int avresample_open(AVAudioResampleContext *avr)
|
|||||||
avr->resample_needed = avr->in_sample_rate != avr->out_sample_rate ||
|
avr->resample_needed = avr->in_sample_rate != avr->out_sample_rate ||
|
||||||
avr->force_resampling;
|
avr->force_resampling;
|
||||||
|
|
||||||
/* set sample format conversion parameters */
|
/* select internal sample format if not specified by the user */
|
||||||
/* override user-requested internal format to avoid unexpected failures
|
if (avr->internal_sample_fmt == AV_SAMPLE_FMT_NONE &&
|
||||||
TODO: support more internal formats */
|
(avr->mixing_needed || avr->resample_needed)) {
|
||||||
if (avr->resample_needed && avr->internal_sample_fmt != AV_SAMPLE_FMT_S16P) {
|
enum AVSampleFormat in_fmt = av_get_planar_sample_fmt(avr->in_sample_fmt);
|
||||||
av_log(avr, AV_LOG_WARNING, "Using s16p as internal sample format\n");
|
enum AVSampleFormat out_fmt = av_get_planar_sample_fmt(avr->out_sample_fmt);
|
||||||
avr->internal_sample_fmt = AV_SAMPLE_FMT_S16P;
|
int max_bps = FFMAX(av_get_bytes_per_sample(in_fmt),
|
||||||
} else if (avr->mixing_needed &&
|
av_get_bytes_per_sample(out_fmt));
|
||||||
avr->internal_sample_fmt != AV_SAMPLE_FMT_S16P &&
|
if (max_bps <= 2) {
|
||||||
avr->internal_sample_fmt != AV_SAMPLE_FMT_FLTP) {
|
avr->internal_sample_fmt = AV_SAMPLE_FMT_S16P;
|
||||||
av_log(avr, AV_LOG_WARNING, "Using fltp as internal sample format\n");
|
} else if (avr->mixing_needed) {
|
||||||
avr->internal_sample_fmt = AV_SAMPLE_FMT_FLTP;
|
avr->internal_sample_fmt = AV_SAMPLE_FMT_FLTP;
|
||||||
|
} else {
|
||||||
|
if (max_bps <= 4) {
|
||||||
|
if (in_fmt == AV_SAMPLE_FMT_S32P ||
|
||||||
|
out_fmt == AV_SAMPLE_FMT_S32P) {
|
||||||
|
if (in_fmt == AV_SAMPLE_FMT_FLTP ||
|
||||||
|
out_fmt == AV_SAMPLE_FMT_FLTP) {
|
||||||
|
/* if one is s32 and the other is flt, use dbl */
|
||||||
|
avr->internal_sample_fmt = AV_SAMPLE_FMT_DBLP;
|
||||||
|
} else {
|
||||||
|
/* if one is s32 and the other is s32, s16, or u8, use s32 */
|
||||||
|
avr->internal_sample_fmt = AV_SAMPLE_FMT_S32P;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* if one is flt and the other is flt, s16 or u8, use flt */
|
||||||
|
avr->internal_sample_fmt = AV_SAMPLE_FMT_FLTP;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* if either is dbl, use dbl */
|
||||||
|
avr->internal_sample_fmt = AV_SAMPLE_FMT_DBLP;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
av_log(avr, AV_LOG_DEBUG, "Using %s as internal sample format\n",
|
||||||
|
av_get_sample_fmt_name(avr->internal_sample_fmt));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* set sample format conversion parameters */
|
||||||
if (avr->in_channels == 1)
|
if (avr->in_channels == 1)
|
||||||
avr->in_sample_fmt = av_get_planar_sample_fmt(avr->in_sample_fmt);
|
avr->in_sample_fmt = av_get_planar_sample_fmt(avr->in_sample_fmt);
|
||||||
if (avr->out_channels == 1)
|
if (avr->out_channels == 1)
|
||||||
|
@ -75,7 +75,13 @@ probefmt(){
|
|||||||
}
|
}
|
||||||
|
|
||||||
ffmpeg(){
|
ffmpeg(){
|
||||||
run ffmpeg -nostats -threads $threads -thread_type $thread_type -cpuflags $cpuflags "$@"
|
dec_opts="-threads $threads -thread_type $thread_type"
|
||||||
|
ffmpeg_args="-nostats -cpuflags $cpuflags"
|
||||||
|
for arg in $@; do
|
||||||
|
[ ${arg} = -i ] && ffmpeg_args="${ffmpeg_args} ${dec_opts}"
|
||||||
|
ffmpeg_args="${ffmpeg_args} ${arg}"
|
||||||
|
done
|
||||||
|
run ffmpeg ${ffmpeg_args}
|
||||||
}
|
}
|
||||||
|
|
||||||
framecrc(){
|
framecrc(){
|
||||||
|
Loading…
x
Reference in New Issue
Block a user