Merge remote-tracking branch 'qatar/master'
* qatar/master: dwt: check malloc calls ppc: Drop unused header regs.h af_resample: remove an extra space in the log output Convert vector_fmul range of functions to YASM and add AVX versions lavfi: add an audio split filter lavfi: rename vf_split.c to split.c Conflicts: doc/filters.texi libavcodec/ppc/regs.h libavfilter/Makefile libavfilter/allfilters.c libavfilter/f_split.c libavfilter/split.c libavfilter/version.h libavfilter/vf_split.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
ea5dab58e0
@ -32,6 +32,7 @@ version next:
|
||||
- accept + prefix to -pix_fmt option to disable automatic conversions.
|
||||
- audio filters support in libavfilter and avconv
|
||||
- add fps filter
|
||||
- audio split filter
|
||||
|
||||
|
||||
version 0.10:
|
||||
|
@ -322,6 +322,12 @@ outputs, like in:
|
||||
[in] asplit=3 [out0][out1][out2]
|
||||
@end example
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
|
||||
@end example
|
||||
will create 5 copies of the input audio.
|
||||
|
||||
|
||||
@section astreamsync
|
||||
|
||||
Forward two audio streams and control the order the buffers are forwarded.
|
||||
|
@ -267,8 +267,8 @@ static const int8_t sbr_offset[6][16] = {
|
||||
};
|
||||
|
||||
///< window coefficients for analysis/synthesis QMF banks
|
||||
static DECLARE_ALIGNED(16, float, sbr_qmf_window_ds)[320];
|
||||
static DECLARE_ALIGNED(16, float, sbr_qmf_window_us)[640] = {
|
||||
static DECLARE_ALIGNED(32, float, sbr_qmf_window_ds)[320];
|
||||
static DECLARE_ALIGNED(32, float, sbr_qmf_window_us)[640] = {
|
||||
0.0000000000, -0.0005525286, -0.0005617692, -0.0004947518,
|
||||
-0.0004875227, -0.0004893791, -0.0005040714, -0.0005226564,
|
||||
-0.0005466565, -0.0005677802, -0.0005870930, -0.0006132747,
|
||||
|
@ -33,8 +33,8 @@
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
DECLARE_ALIGNED(16, float, ff_aac_kbd_long_1024)[1024];
|
||||
DECLARE_ALIGNED(16, float, ff_aac_kbd_short_128)[128];
|
||||
DECLARE_ALIGNED(32, float, ff_aac_kbd_long_1024)[1024];
|
||||
DECLARE_ALIGNED(32, float, ff_aac_kbd_short_128)[128];
|
||||
|
||||
const uint8_t ff_aac_num_swb_1024[] = {
|
||||
41, 41, 47, 49, 49, 51, 47, 47, 43, 43, 43, 40, 40
|
||||
|
@ -44,8 +44,8 @@
|
||||
/* @name window coefficients
|
||||
* @{
|
||||
*/
|
||||
DECLARE_ALIGNED(16, extern float, ff_aac_kbd_long_1024)[1024];
|
||||
DECLARE_ALIGNED(16, extern float, ff_aac_kbd_short_128)[128];
|
||||
DECLARE_ALIGNED(32, extern float, ff_aac_kbd_long_1024)[1024];
|
||||
DECLARE_ALIGNED(32, extern float, ff_aac_kbd_short_128)[128];
|
||||
// @}
|
||||
|
||||
/* @name number of scalefactor window bands for long and short transform windows respectively
|
||||
|
@ -402,7 +402,7 @@ typedef struct DSPContext {
|
||||
/* assume len is a multiple of 4, and arrays are 16-byte aligned */
|
||||
void (*vorbis_inverse_coupling)(float *mag, float *ang, int blocksize);
|
||||
void (*ac3_downmix)(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len);
|
||||
/* assume len is a multiple of 8, and arrays are 16-byte aligned */
|
||||
/* assume len is a multiple of 16, and arrays are 32-byte aligned */
|
||||
void (*vector_fmul)(float *dst, const float *src0, const float *src1, int len);
|
||||
void (*vector_fmul_reverse)(float *dst, const float *src0, const float *src1, int len);
|
||||
/* assume len is a multiple of 8, and src arrays are 16-byte aligned */
|
||||
|
@ -35,10 +35,24 @@ void ff_slice_buffer_init(slice_buffer *buf, int line_count,
|
||||
buf->line_width = line_width;
|
||||
buf->data_count = max_allocated_lines;
|
||||
buf->line = av_mallocz(sizeof(IDWTELEM *) * line_count);
|
||||
if (!buf->line)
|
||||
return AVERROR(ENOMEM);
|
||||
buf->data_stack = av_malloc(sizeof(IDWTELEM *) * max_allocated_lines);
|
||||
if (!buf->data_stack) {
|
||||
av_free(buf->line);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
for (i = 0; i < max_allocated_lines; i++)
|
||||
for (i = 0; i < max_allocated_lines; i++) {
|
||||
buf->data_stack[i] = av_malloc(sizeof(IDWTELEM) * line_width);
|
||||
if (!buf->data_stack[i]) {
|
||||
for (i--; i >=0; i--)
|
||||
av_free(buf->data_stack[i]);
|
||||
av_free(buf->data_stack);
|
||||
av_free(buf->line);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
buf->data_stack_top = max_allocated_lines - 1;
|
||||
}
|
||||
|
@ -1,37 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Mans Rullgard
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_PPC_REGS_H
|
||||
#define AVCODEC_PPC_REGS_H
|
||||
|
||||
#include "libavutil/avutil.h"
|
||||
#include "config.h"
|
||||
|
||||
#if HAVE_IBM_ASM
|
||||
# define r(n) AV_TOSTRING(n)
|
||||
# define f(n) AV_TOSTRING(n)
|
||||
# define v(n) AV_TOSTRING(n)
|
||||
#else
|
||||
# define r(n) AV_TOSTRING(r ## n)
|
||||
# define f(n) AV_TOSTRING(f ## n)
|
||||
# define v(n) AV_TOSTRING(v ## n)
|
||||
#endif
|
||||
|
||||
#endif /* AVCODEC_PPC_REGS_H */
|
@ -38,8 +38,8 @@
|
||||
typedef struct {
|
||||
AVFrame frame;
|
||||
DSPContext dsp;
|
||||
DECLARE_ALIGNED(16, float, sp_lpc)[FFALIGN(36, 8)]; ///< LPC coefficients for speech data (spec: A)
|
||||
DECLARE_ALIGNED(16, float, gain_lpc)[FFALIGN(10, 8)]; ///< LPC coefficients for gain (spec: GB)
|
||||
DECLARE_ALIGNED(32, float, sp_lpc)[FFALIGN(36, 16)]; ///< LPC coefficients for speech data (spec: A)
|
||||
DECLARE_ALIGNED(32, float, gain_lpc)[FFALIGN(10, 16)]; ///< LPC coefficients for gain (spec: GB)
|
||||
|
||||
/** speech data history (spec: SB).
|
||||
* Its first 70 coefficients are updated only at backward filtering.
|
||||
@ -133,11 +133,11 @@ static void do_hybrid_window(RA288Context *ractx,
|
||||
int i;
|
||||
float buffer1[MAX_BACKWARD_FILTER_ORDER + 1];
|
||||
float buffer2[MAX_BACKWARD_FILTER_ORDER + 1];
|
||||
LOCAL_ALIGNED_16(float, work, [FFALIGN(MAX_BACKWARD_FILTER_ORDER +
|
||||
MAX_BACKWARD_FILTER_LEN +
|
||||
MAX_BACKWARD_FILTER_NONREC, 8)]);
|
||||
LOCAL_ALIGNED(32, float, work, [FFALIGN(MAX_BACKWARD_FILTER_ORDER +
|
||||
MAX_BACKWARD_FILTER_LEN +
|
||||
MAX_BACKWARD_FILTER_NONREC, 16)]);
|
||||
|
||||
ractx->dsp.vector_fmul(work, window, hist, FFALIGN(order + n + non_rec, 8));
|
||||
ractx->dsp.vector_fmul(work, window, hist, FFALIGN(order + n + non_rec, 16));
|
||||
|
||||
convolve(buffer1, work + order , n , order);
|
||||
convolve(buffer2, work + order + n, non_rec, order);
|
||||
@ -164,7 +164,7 @@ static void backward_filter(RA288Context *ractx,
|
||||
do_hybrid_window(ractx, order, n, non_rec, temp, hist, rec, window);
|
||||
|
||||
if (!compute_lpc_coefs(temp, order, lpc, 0, 1, 1))
|
||||
ractx->dsp.vector_fmul(lpc, lpc, tab, FFALIGN(order, 8));
|
||||
ractx->dsp.vector_fmul(lpc, lpc, tab, FFALIGN(order, 16));
|
||||
|
||||
memmove(hist, hist + n, move_size*sizeof(*hist));
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ static const int16_t codetable[128][5]={
|
||||
{ 3746, -606, 53, -269, -3301}, { 606, 2018, -1316, 4064, 398}
|
||||
};
|
||||
|
||||
DECLARE_ALIGNED(16, static const float, syn_window)[FFALIGN(111, 8)]={
|
||||
DECLARE_ALIGNED(32, static const float, syn_window)[FFALIGN(111, 16)]={
|
||||
0.576690972, 0.580838025, 0.585013986, 0.589219987, 0.59345597, 0.597723007,
|
||||
0.602020264, 0.606384277, 0.610748291, 0.615142822, 0.619598389, 0.624084473,
|
||||
0.628570557, 0.633117676, 0.637695313, 0.642272949, 0.646911621, 0.651580811,
|
||||
@ -119,7 +119,7 @@ DECLARE_ALIGNED(16, static const float, syn_window)[FFALIGN(111, 8)]={
|
||||
0.142852783, 0.0954284668,0.0477600098
|
||||
};
|
||||
|
||||
DECLARE_ALIGNED(16, static const float, gain_window)[FFALIGN(38, 8)]={
|
||||
DECLARE_ALIGNED(32, static const float, gain_window)[FFALIGN(38, 16)]={
|
||||
0.505699992, 0.524200022, 0.54339999, 0.563300014, 0.583953857, 0.60534668,
|
||||
0.627502441, 0.650482178, 0.674316406, 0.699005127, 0.724578857, 0.75112915,
|
||||
0.778625488, 0.807128906, 0.836669922, 0.86730957, 0.899078369, 0.932006836,
|
||||
@ -130,7 +130,7 @@ DECLARE_ALIGNED(16, static const float, gain_window)[FFALIGN(38, 8)]={
|
||||
};
|
||||
|
||||
/** synthesis bandwidth broadening table */
|
||||
DECLARE_ALIGNED(16, static const float, syn_bw_tab)[FFALIGN(36, 8)] = {
|
||||
DECLARE_ALIGNED(32, static const float, syn_bw_tab)[FFALIGN(36, 16)] = {
|
||||
0.98828125, 0.976699829, 0.965254128, 0.953942537, 0.942763507, 0.931715488,
|
||||
0.920796931, 0.910006344, 0.899342179, 0.888803005, 0.878387332, 0.868093729,
|
||||
0.857920766, 0.847867012, 0.837931097, 0.828111589, 0.818407178, 0.808816493,
|
||||
@ -140,7 +140,7 @@ DECLARE_ALIGNED(16, static const float, syn_bw_tab)[FFALIGN(36, 8)] = {
|
||||
};
|
||||
|
||||
/** gain bandwidth broadening table */
|
||||
DECLARE_ALIGNED(16, static const float, gain_bw_tab)[FFALIGN(10, 8)] = {
|
||||
DECLARE_ALIGNED(32, static const float, gain_bw_tab)[FFALIGN(10, 16)] = {
|
||||
0.90625, 0.821289063, 0.74432373, 0.674499512, 0.61126709,
|
||||
0.553955078, 0.50201416, 0.454956055, 0.41229248, 0.373657227
|
||||
};
|
||||
|
@ -78,8 +78,8 @@ typedef struct {
|
||||
* @name State variables
|
||||
* @{
|
||||
*/
|
||||
DECLARE_ALIGNED(16, float, synthesis_filterbank_samples)[SBR_SYNTHESIS_BUF_SIZE];
|
||||
DECLARE_ALIGNED(16, float, analysis_filterbank_samples) [1312];
|
||||
DECLARE_ALIGNED(32, float, synthesis_filterbank_samples)[SBR_SYNTHESIS_BUF_SIZE];
|
||||
DECLARE_ALIGNED(32, float, analysis_filterbank_samples) [1312];
|
||||
int synthesis_filterbank_samples_offset;
|
||||
///l_APrev and l_A
|
||||
int e_a[2];
|
||||
|
@ -31,7 +31,7 @@
|
||||
#endif
|
||||
|
||||
#define SINETABLE(size) \
|
||||
SINETABLE_CONST DECLARE_ALIGNED(16, float, ff_sine_##size)[size]
|
||||
SINETABLE_CONST DECLARE_ALIGNED(32, float, ff_sine_##size)[size]
|
||||
|
||||
/**
|
||||
* Generate a sine window.
|
||||
|
@ -2427,135 +2427,6 @@ static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2],
|
||||
}
|
||||
}
|
||||
|
||||
static void vector_fmul_3dnow(float *dst, const float *src0, const float *src1,
|
||||
int len)
|
||||
{
|
||||
x86_reg i = (len - 4) * 4;
|
||||
__asm__ volatile (
|
||||
"1: \n\t"
|
||||
"movq (%2, %0), %%mm0 \n\t"
|
||||
"movq 8(%2, %0), %%mm1 \n\t"
|
||||
"pfmul (%3, %0), %%mm0 \n\t"
|
||||
"pfmul 8(%3, %0), %%mm1 \n\t"
|
||||
"movq %%mm0, (%1, %0) \n\t"
|
||||
"movq %%mm1, 8(%1, %0) \n\t"
|
||||
"sub $16, %0 \n\t"
|
||||
"jge 1b \n\t"
|
||||
"femms \n\t"
|
||||
: "+r"(i)
|
||||
: "r"(dst), "r"(src0), "r"(src1)
|
||||
: "memory"
|
||||
);
|
||||
}
|
||||
|
||||
static void vector_fmul_sse(float *dst, const float *src0, const float *src1,
|
||||
int len)
|
||||
{
|
||||
x86_reg i = (len - 8) * 4;
|
||||
__asm__ volatile (
|
||||
"1: \n\t"
|
||||
"movaps (%2, %0), %%xmm0 \n\t"
|
||||
"movaps 16(%2, %0), %%xmm1 \n\t"
|
||||
"mulps (%3, %0), %%xmm0 \n\t"
|
||||
"mulps 16(%3, %0), %%xmm1 \n\t"
|
||||
"movaps %%xmm0, (%1, %0) \n\t"
|
||||
"movaps %%xmm1, 16(%1, %0) \n\t"
|
||||
"sub $32, %0 \n\t"
|
||||
"jge 1b \n\t"
|
||||
: "+r"(i)
|
||||
: "r"(dst), "r"(src0), "r"(src1)
|
||||
: "memory"
|
||||
);
|
||||
}
|
||||
|
||||
static void vector_fmul_reverse_3dnow2(float *dst, const float *src0,
|
||||
const float *src1, int len)
|
||||
{
|
||||
x86_reg i = len * 4 - 16;
|
||||
__asm__ volatile (
|
||||
"1: \n\t"
|
||||
"pswapd 8(%1), %%mm0 \n\t"
|
||||
"pswapd (%1), %%mm1 \n\t"
|
||||
"pfmul (%3, %0), %%mm0 \n\t"
|
||||
"pfmul 8(%3, %0), %%mm1 \n\t"
|
||||
"movq %%mm0, (%2, %0) \n\t"
|
||||
"movq %%mm1, 8(%2, %0) \n\t"
|
||||
"add $16, %1 \n\t"
|
||||
"sub $16, %0 \n\t"
|
||||
"jge 1b \n\t"
|
||||
: "+r"(i), "+r"(src1)
|
||||
: "r"(dst), "r"(src0)
|
||||
);
|
||||
__asm__ volatile ("femms");
|
||||
}
|
||||
|
||||
static void vector_fmul_reverse_sse(float *dst, const float *src0,
|
||||
const float *src1, int len)
|
||||
{
|
||||
x86_reg i = len * 4 - 32;
|
||||
__asm__ volatile (
|
||||
"1: \n\t"
|
||||
"movaps 16(%1), %%xmm0 \n\t"
|
||||
"movaps (%1), %%xmm1 \n\t"
|
||||
"shufps $0x1b, %%xmm0, %%xmm0 \n\t"
|
||||
"shufps $0x1b, %%xmm1, %%xmm1 \n\t"
|
||||
"mulps (%3, %0), %%xmm0 \n\t"
|
||||
"mulps 16(%3, %0), %%xmm1 \n\t"
|
||||
"movaps %%xmm0, (%2, %0) \n\t"
|
||||
"movaps %%xmm1, 16(%2, %0) \n\t"
|
||||
"add $32, %1 \n\t"
|
||||
"sub $32, %0 \n\t"
|
||||
"jge 1b \n\t"
|
||||
: "+r"(i), "+r"(src1)
|
||||
: "r"(dst), "r"(src0)
|
||||
);
|
||||
}
|
||||
|
||||
static void vector_fmul_add_3dnow(float *dst, const float *src0,
|
||||
const float *src1, const float *src2, int len)
|
||||
{
|
||||
x86_reg i = (len - 4) * 4;
|
||||
__asm__ volatile (
|
||||
"1: \n\t"
|
||||
"movq (%2, %0), %%mm0 \n\t"
|
||||
"movq 8(%2, %0), %%mm1 \n\t"
|
||||
"pfmul (%3, %0), %%mm0 \n\t"
|
||||
"pfmul 8(%3, %0), %%mm1 \n\t"
|
||||
"pfadd (%4, %0), %%mm0 \n\t"
|
||||
"pfadd 8(%4, %0), %%mm1 \n\t"
|
||||
"movq %%mm0, (%1, %0) \n\t"
|
||||
"movq %%mm1, 8(%1, %0) \n\t"
|
||||
"sub $16, %0 \n\t"
|
||||
"jge 1b \n\t"
|
||||
: "+r"(i)
|
||||
: "r"(dst), "r"(src0), "r"(src1), "r"(src2)
|
||||
: "memory"
|
||||
);
|
||||
__asm__ volatile ("femms");
|
||||
}
|
||||
|
||||
static void vector_fmul_add_sse(float *dst, const float *src0,
|
||||
const float *src1, const float *src2, int len)
|
||||
{
|
||||
x86_reg i = (len - 8) * 4;
|
||||
__asm__ volatile (
|
||||
"1: \n\t"
|
||||
"movaps (%2, %0), %%xmm0 \n\t"
|
||||
"movaps 16(%2, %0), %%xmm1 \n\t"
|
||||
"mulps (%3, %0), %%xmm0 \n\t"
|
||||
"mulps 16(%3, %0), %%xmm1 \n\t"
|
||||
"addps (%4, %0), %%xmm0 \n\t"
|
||||
"addps 16(%4, %0), %%xmm1 \n\t"
|
||||
"movaps %%xmm0, (%1, %0) \n\t"
|
||||
"movaps %%xmm1, 16(%1, %0) \n\t"
|
||||
"sub $32, %0 \n\t"
|
||||
"jge 1b \n\t"
|
||||
: "+r"(i)
|
||||
: "r"(dst), "r"(src0), "r"(src1), "r"(src2)
|
||||
: "memory"
|
||||
);
|
||||
}
|
||||
|
||||
#if HAVE_6REGS
|
||||
static void vector_fmul_window_3dnow2(float *dst, const float *src0,
|
||||
const float *src1, const float *win,
|
||||
@ -2710,6 +2581,21 @@ int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src,
|
||||
|
||||
float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
|
||||
|
||||
void ff_vector_fmul_sse(float *dst, const float *src0, const float *src1,
|
||||
int len);
|
||||
void ff_vector_fmul_avx(float *dst, const float *src0, const float *src1,
|
||||
int len);
|
||||
|
||||
void ff_vector_fmul_reverse_sse(float *dst, const float *src0,
|
||||
const float *src1, int len);
|
||||
void ff_vector_fmul_reverse_avx(float *dst, const float *src0,
|
||||
const float *src1, int len);
|
||||
|
||||
void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
|
||||
const float *src2, int len);
|
||||
void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
|
||||
const float *src2, int len);
|
||||
|
||||
void ff_vector_clip_int32_mmx (int32_t *dst, const int32_t *src,
|
||||
int32_t min, int32_t max, unsigned int len);
|
||||
void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
|
||||
@ -3000,8 +2886,6 @@ static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
|
||||
#endif
|
||||
|
||||
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
|
||||
c->vector_fmul = vector_fmul_3dnow;
|
||||
c->vector_fmul_add = vector_fmul_add_3dnow;
|
||||
|
||||
#if HAVE_7REGS
|
||||
c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
|
||||
@ -3011,7 +2895,6 @@ static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
|
||||
static void dsputil_init_3dnow2(DSPContext *c, AVCodecContext *avctx,
|
||||
int mm_flags)
|
||||
{
|
||||
c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
|
||||
#if HAVE_6REGS
|
||||
c->vector_fmul_window = vector_fmul_window_3dnow2;
|
||||
#endif
|
||||
@ -3031,11 +2914,11 @@ static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
|
||||
|
||||
c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
|
||||
c->ac3_downmix = ac3_downmix_sse;
|
||||
c->vector_fmul = vector_fmul_sse;
|
||||
c->vector_fmul_reverse = vector_fmul_reverse_sse;
|
||||
|
||||
if (!(mm_flags & AV_CPU_FLAG_3DNOW))
|
||||
c->vector_fmul_add = vector_fmul_add_sse;
|
||||
#if HAVE_YASM
|
||||
c->vector_fmul = ff_vector_fmul_sse;
|
||||
c->vector_fmul_reverse = ff_vector_fmul_reverse_sse;
|
||||
c->vector_fmul_add = ff_vector_fmul_add_sse;
|
||||
#endif
|
||||
|
||||
#if HAVE_6REGS
|
||||
c->vector_fmul_window = vector_fmul_window_sse;
|
||||
@ -3194,6 +3077,9 @@ static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
|
||||
}
|
||||
}
|
||||
c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
|
||||
c->vector_fmul = ff_vector_fmul_avx;
|
||||
c->vector_fmul_reverse = ff_vector_fmul_reverse_avx;
|
||||
c->vector_fmul_add = ff_vector_fmul_add_avx;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1129,6 +1129,111 @@ VECTOR_CLIP_INT32 11, 1, 1, 0
|
||||
VECTOR_CLIP_INT32 6, 1, 0, 0
|
||||
%endif
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
|
||||
;-----------------------------------------------------------------------------
|
||||
%macro VECTOR_FMUL 0
|
||||
cglobal vector_fmul, 4,4,2, dst, src0, src1, len
|
||||
lea lenq, [lend*4 - 2*mmsize]
|
||||
ALIGN 16
|
||||
.loop
|
||||
mova m0, [src0q + lenq]
|
||||
mova m1, [src0q + lenq + mmsize]
|
||||
mulps m0, m0, [src1q + lenq]
|
||||
mulps m1, m1, [src1q + lenq + mmsize]
|
||||
mova [dstq + lenq], m0
|
||||
mova [dstq + lenq + mmsize], m1
|
||||
|
||||
sub lenq, 2*mmsize
|
||||
jge .loop
|
||||
%if mmsize == 32
|
||||
vzeroupper
|
||||
RET
|
||||
%else
|
||||
REP_RET
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse
|
||||
VECTOR_FMUL
|
||||
INIT_YMM avx
|
||||
VECTOR_FMUL
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
|
||||
; int len)
|
||||
;-----------------------------------------------------------------------------
|
||||
%macro VECTOR_FMUL_REVERSE 0
|
||||
cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
|
||||
lea lenq, [lend*4 - 2*mmsize]
|
||||
ALIGN 16
|
||||
.loop
|
||||
%if cpuflag(avx)
|
||||
vmovaps xmm0, [src1q + 16]
|
||||
vinsertf128 m0, m0, [src1q], 1
|
||||
vshufps m0, m0, m0, q0123
|
||||
vmovaps xmm1, [src1q + mmsize + 16]
|
||||
vinsertf128 m1, m1, [src1q + mmsize], 1
|
||||
vshufps m1, m1, m1, q0123
|
||||
%else
|
||||
mova m0, [src1q]
|
||||
mova m1, [src1q + mmsize]
|
||||
shufps m0, m0, q0123
|
||||
shufps m1, m1, q0123
|
||||
%endif
|
||||
mulps m0, m0, [src0q + lenq + mmsize]
|
||||
mulps m1, m1, [src0q + lenq]
|
||||
mova [dstq + lenq + mmsize], m0
|
||||
mova [dstq + lenq], m1
|
||||
add src1q, 2*mmsize
|
||||
sub lenq, 2*mmsize
|
||||
jge .loop
|
||||
%if mmsize == 32
|
||||
vzeroupper
|
||||
RET
|
||||
%else
|
||||
REP_RET
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse
|
||||
VECTOR_FMUL_REVERSE
|
||||
INIT_YMM avx
|
||||
VECTOR_FMUL_REVERSE
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; vector_fmul_add(float *dst, const float *src0, const float *src1,
|
||||
; const float *src2, int len)
|
||||
;-----------------------------------------------------------------------------
|
||||
%macro VECTOR_FMUL_ADD 0
|
||||
cglobal vector_fmul_add, 5,5,2, dst, src0, src1, src2, len
|
||||
lea lenq, [lend*4 - 2*mmsize]
|
||||
ALIGN 16
|
||||
.loop
|
||||
mova m0, [src0q + lenq]
|
||||
mova m1, [src0q + lenq + mmsize]
|
||||
mulps m0, m0, [src1q + lenq]
|
||||
mulps m1, m1, [src1q + lenq + mmsize]
|
||||
addps m0, m0, [src2q + lenq]
|
||||
addps m1, m1, [src2q + lenq + mmsize]
|
||||
mova [dstq + lenq], m0
|
||||
mova [dstq + lenq + mmsize], m1
|
||||
|
||||
sub lenq, 2*mmsize
|
||||
jge .loop
|
||||
%if mmsize == 32
|
||||
vzeroupper
|
||||
RET
|
||||
%else
|
||||
REP_RET
|
||||
%endif
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse
|
||||
VECTOR_FMUL_ADD
|
||||
INIT_YMM avx
|
||||
VECTOR_FMUL_ADD
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void ff_butterflies_float_interleave(float *dst, const float *src0,
|
||||
; const float *src1, int len);
|
||||
|
@ -50,7 +50,7 @@ OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o
|
||||
OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o
|
||||
OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o
|
||||
OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o
|
||||
OBJS-$(CONFIG_ASPLIT_FILTER) += f_split.o
|
||||
OBJS-$(CONFIG_ASPLIT_FILTER) += split.o
|
||||
OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o
|
||||
OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o
|
||||
OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o
|
||||
@ -109,7 +109,7 @@ OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o
|
||||
OBJS-$(CONFIG_SETTB_FILTER) += vf_settb.o
|
||||
OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o
|
||||
OBJS-$(CONFIG_SLICIFY_FILTER) += vf_slicify.o
|
||||
OBJS-$(CONFIG_SPLIT_FILTER) += f_split.o
|
||||
OBJS-$(CONFIG_SPLIT_FILTER) += split.o
|
||||
OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o
|
||||
OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o
|
||||
OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o
|
||||
|
@ -119,7 +119,7 @@ static int config_output(AVFilterLink *outlink)
|
||||
av_get_channel_layout_string(buf2, sizeof(buf2),
|
||||
-1, outlink->channel_layout);
|
||||
av_log(ctx, AV_LOG_VERBOSE,
|
||||
"fmt:%s srate: %d cl:%s -> fmt:%s srate: %d cl:%s\n",
|
||||
"fmt:%s srate:%d cl:%s -> fmt:%s srate:%d cl:%s\n",
|
||||
av_get_sample_fmt_name(inlink ->format), inlink ->sample_rate, buf1,
|
||||
av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf2);
|
||||
|
||||
|
@ -119,20 +119,16 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
|
||||
}
|
||||
|
||||
AVFilter avfilter_af_asplit = {
|
||||
.name = "asplit",
|
||||
.name = "asplit",
|
||||
.description = NULL_IF_CONFIG_SMALL("Pass on the audio input to N audio outputs."),
|
||||
|
||||
.init = split_init,
|
||||
.uninit = split_uninit,
|
||||
|
||||
.inputs = (const AVFilterPad[]) {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.get_audio_buffer = ff_null_get_audio_buffer,
|
||||
.filter_samples = filter_samples,
|
||||
},
|
||||
{ .name = NULL }
|
||||
},
|
||||
.inputs = (const AVFilterPad[]) {{ .name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.get_audio_buffer = ff_null_get_audio_buffer,
|
||||
.filter_samples = filter_samples },
|
||||
{ .name = NULL }},
|
||||
.outputs = (const AVFilterPad[]) {{ .name = NULL }},
|
||||
};
|
@ -29,7 +29,7 @@
|
||||
#include "libavutil/avutil.h"
|
||||
|
||||
#define LIBAVFILTER_VERSION_MAJOR 2
|
||||
#define LIBAVFILTER_VERSION_MINOR 75
|
||||
#define LIBAVFILTER_VERSION_MINOR 76
|
||||
#define LIBAVFILTER_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
|
||||
|
Loading…
Reference in New Issue
Block a user