Merge commit '5cc0bd2cb47cbb1040f2bb0ded8d72a442c79b20'
* commit '5cc0bd2cb47cbb1040f2bb0ded8d72a442c79b20': binkaudio: decode directly to the user-provided AVFrame atrac3: decode directly to the user-provided AVFrame atrac1: decode directly to the user-provided AVFrame ape: decode directly to the user-provided AVFrame amrwb: decode directly to the user-provided AVFrame Conflicts: libavcodec/amrwbdec.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
f03cdbd045
@ -45,7 +45,6 @@
|
||||
#include "mips/amrwbdec_mips.h"
|
||||
|
||||
typedef struct {
|
||||
AVFrame avframe; ///< AVFrame for decoded samples
|
||||
AMRWBFrame frame; ///< AMRWB parameters decoded from bitstream
|
||||
enum Mode fr_cur_mode; ///< mode index of current frame
|
||||
uint8_t fr_quality; ///< frame quality index (FQI)
|
||||
@ -121,9 +120,6 @@ static av_cold int amrwb_decode_init(AVCodecContext *avctx)
|
||||
for (i = 0; i < 4; i++)
|
||||
ctx->prediction_error[i] = MIN_ENERGY;
|
||||
|
||||
avcodec_get_frame_defaults(&ctx->avframe);
|
||||
avctx->coded_frame = &ctx->avframe;
|
||||
|
||||
ff_acelp_filter_init(&ctx->acelpf_ctx);
|
||||
ff_acelp_vectors_init(&ctx->acelpv_ctx);
|
||||
ff_celp_filter_init(&ctx->celpf_ctx);
|
||||
@ -1097,6 +1093,7 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
AMRWBContext *ctx = avctx->priv_data;
|
||||
AVFrame *frame = data;
|
||||
AMRWBFrame *cf = &ctx->frame;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
@ -1114,12 +1111,12 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int sub, i, ret;
|
||||
|
||||
/* get output buffer */
|
||||
ctx->avframe.nb_samples = 4 * AMRWB_SFR_SIZE_16k;
|
||||
if ((ret = ff_get_buffer(avctx, &ctx->avframe)) < 0) {
|
||||
frame->nb_samples = 4 * AMRWB_SFR_SIZE_16k;
|
||||
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
buf_out = (float *)ctx->avframe.data[0];
|
||||
buf_out = (float *)frame->data[0];
|
||||
|
||||
header_size = decode_mime_header(ctx, buf);
|
||||
if (ctx->fr_cur_mode > MODE_SID) {
|
||||
@ -1265,8 +1262,7 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
|
||||
memcpy(ctx->isp_sub4_past, ctx->isp[3], LP_ORDER * sizeof(ctx->isp[3][0]));
|
||||
memcpy(ctx->isf_past_final, ctx->isf_cur, LP_ORDER * sizeof(float));
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = ctx->avframe;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return expected_fr_size;
|
||||
}
|
||||
|
@ -129,7 +129,6 @@ typedef struct APEPredictor {
|
||||
typedef struct APEContext {
|
||||
AVClass *class; ///< class for AVOptions
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
DSPContext dsp;
|
||||
int channels;
|
||||
int samples; ///< samples left to decode in current frame
|
||||
@ -235,9 +234,6 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
|
||||
ff_dsputil_init(&s->dsp, avctx);
|
||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
avctx->coded_frame = &s->frame;
|
||||
|
||||
return 0;
|
||||
filter_alloc_fail:
|
||||
ape_decode_close(avctx);
|
||||
@ -826,6 +822,7 @@ static void ape_unpack_stereo(APEContext *ctx, int count)
|
||||
static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
AVFrame *frame = data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
APEContext *s = avctx->priv_data;
|
||||
uint8_t *sample8;
|
||||
@ -906,8 +903,8 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
|
||||
|
||||
/* get output buffer */
|
||||
s->frame.nb_samples = blockstodecode;
|
||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
||||
frame->nb_samples = blockstodecode;
|
||||
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
@ -929,21 +926,21 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
switch (s->bps) {
|
||||
case 8:
|
||||
for (ch = 0; ch < s->channels; ch++) {
|
||||
sample8 = (uint8_t *)s->frame.data[ch];
|
||||
sample8 = (uint8_t *)frame->data[ch];
|
||||
for (i = 0; i < blockstodecode; i++)
|
||||
*sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
|
||||
}
|
||||
break;
|
||||
case 16:
|
||||
for (ch = 0; ch < s->channels; ch++) {
|
||||
sample16 = (int16_t *)s->frame.data[ch];
|
||||
sample16 = (int16_t *)frame->data[ch];
|
||||
for (i = 0; i < blockstodecode; i++)
|
||||
*sample16++ = s->decoded[ch][i];
|
||||
}
|
||||
break;
|
||||
case 24:
|
||||
for (ch = 0; ch < s->channels; ch++) {
|
||||
sample24 = (int32_t *)s->frame.data[ch];
|
||||
sample24 = (int32_t *)frame->data[ch];
|
||||
for (i = 0; i < blockstodecode; i++)
|
||||
*sample24++ = s->decoded[ch][i] << 8;
|
||||
}
|
||||
@ -952,8 +949,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
s->samples -= blockstodecode;
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = s->frame;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return !s->samples ? avpkt->size : 0;
|
||||
}
|
||||
|
@ -73,7 +73,6 @@ typedef struct {
|
||||
* The atrac1 context, holds all needed parameters for decoding
|
||||
*/
|
||||
typedef struct {
|
||||
AVFrame frame;
|
||||
AT1SUCtx SUs[AT1_MAX_CHANNELS]; ///< channel sound unit
|
||||
DECLARE_ALIGNED(32, float, spec)[AT1_SU_SAMPLES]; ///< the mdct spectrum buffer
|
||||
|
||||
@ -274,6 +273,7 @@ static void at1_subband_synthesis(AT1Ctx *q, AT1SUCtx* su, float *pOut)
|
||||
static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
AVFrame *frame = data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
AT1Ctx *q = avctx->priv_data;
|
||||
@ -287,8 +287,8 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
/* get output buffer */
|
||||
q->frame.nb_samples = AT1_SU_SAMPLES;
|
||||
if ((ret = ff_get_buffer(avctx, &q->frame)) < 0) {
|
||||
frame->nb_samples = AT1_SU_SAMPLES;
|
||||
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
@ -310,11 +310,10 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
ret = at1_imdct_block(su, q);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
at1_subband_synthesis(q, su, (float *)q->frame.extended_data[ch]);
|
||||
at1_subband_synthesis(q, su, (float *)frame->extended_data[ch]);
|
||||
}
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = q->frame;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return avctx->block_align;
|
||||
}
|
||||
@ -375,9 +374,6 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx)
|
||||
q->SUs[1].spectrum[0] = q->SUs[1].spec1;
|
||||
q->SUs[1].spectrum[1] = q->SUs[1].spec2;
|
||||
|
||||
avcodec_get_frame_defaults(&q->frame);
|
||||
avctx->coded_frame = &q->frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,6 @@ typedef struct ChannelUnit {
|
||||
} ChannelUnit;
|
||||
|
||||
typedef struct ATRAC3Context {
|
||||
AVFrame frame;
|
||||
GetBitContext gb;
|
||||
//@{
|
||||
/** stream data */
|
||||
@ -799,6 +798,7 @@ static int decode_frame(AVCodecContext *avctx, const uint8_t *databuf,
|
||||
static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
AVFrame *frame = data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
ATRAC3Context *q = avctx->priv_data;
|
||||
@ -812,8 +812,8 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
/* get output buffer */
|
||||
q->frame.nb_samples = SAMPLES_PER_FRAME;
|
||||
if ((ret = ff_get_buffer(avctx, &q->frame)) < 0) {
|
||||
frame->nb_samples = SAMPLES_PER_FRAME;
|
||||
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
@ -826,14 +826,13 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
|
||||
databuf = buf;
|
||||
}
|
||||
|
||||
ret = decode_frame(avctx, databuf, (float **)q->frame.extended_data);
|
||||
ret = decode_frame(avctx, databuf, (float **)frame->extended_data);
|
||||
if (ret) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Frame decoding error!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = q->frame;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return avctx->block_align;
|
||||
}
|
||||
@ -997,9 +996,6 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&q->frame);
|
||||
avctx->coded_frame = &q->frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,6 @@ static float quant_table[96];
|
||||
#define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11)
|
||||
|
||||
typedef struct {
|
||||
AVFrame frame;
|
||||
GetBitContext gb;
|
||||
int version_b; ///< Bink version 'b'
|
||||
int first;
|
||||
@ -143,9 +142,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
else
|
||||
return -1;
|
||||
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
avctx->coded_frame = &s->frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -294,6 +290,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
BinkAudioContext *s = avctx->priv_data;
|
||||
AVFrame *frame = data;
|
||||
GetBitContext *gb = &s->gb;
|
||||
int ret, consumed = 0;
|
||||
|
||||
@ -321,22 +318,21 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
/* get output buffer */
|
||||
s->frame.nb_samples = s->frame_len;
|
||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
||||
frame->nb_samples = s->frame_len;
|
||||
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (decode_block(s, (float **)s->frame.extended_data,
|
||||
if (decode_block(s, (float **)frame->extended_data,
|
||||
avctx->codec->id == AV_CODEC_ID_BINKAUDIO_DCT)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
get_bits_align32(gb);
|
||||
|
||||
s->frame.nb_samples = s->block_size / avctx->channels;
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = s->frame;
|
||||
frame->nb_samples = s->block_size / avctx->channels;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return consumed;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user