Merge commit '5cc0bd2cb47cbb1040f2bb0ded8d72a442c79b20'
* commit '5cc0bd2cb47cbb1040f2bb0ded8d72a442c79b20': binkaudio: decode directly to the user-provided AVFrame atrac3: decode directly to the user-provided AVFrame atrac1: decode directly to the user-provided AVFrame ape: decode directly to the user-provided AVFrame amrwb: decode directly to the user-provided AVFrame Conflicts: libavcodec/amrwbdec.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
f03cdbd045
@ -45,7 +45,6 @@
|
|||||||
#include "mips/amrwbdec_mips.h"
|
#include "mips/amrwbdec_mips.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVFrame avframe; ///< AVFrame for decoded samples
|
|
||||||
AMRWBFrame frame; ///< AMRWB parameters decoded from bitstream
|
AMRWBFrame frame; ///< AMRWB parameters decoded from bitstream
|
||||||
enum Mode fr_cur_mode; ///< mode index of current frame
|
enum Mode fr_cur_mode; ///< mode index of current frame
|
||||||
uint8_t fr_quality; ///< frame quality index (FQI)
|
uint8_t fr_quality; ///< frame quality index (FQI)
|
||||||
@ -121,9 +120,6 @@ static av_cold int amrwb_decode_init(AVCodecContext *avctx)
|
|||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++)
|
||||||
ctx->prediction_error[i] = MIN_ENERGY;
|
ctx->prediction_error[i] = MIN_ENERGY;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&ctx->avframe);
|
|
||||||
avctx->coded_frame = &ctx->avframe;
|
|
||||||
|
|
||||||
ff_acelp_filter_init(&ctx->acelpf_ctx);
|
ff_acelp_filter_init(&ctx->acelpf_ctx);
|
||||||
ff_acelp_vectors_init(&ctx->acelpv_ctx);
|
ff_acelp_vectors_init(&ctx->acelpv_ctx);
|
||||||
ff_celp_filter_init(&ctx->celpf_ctx);
|
ff_celp_filter_init(&ctx->celpf_ctx);
|
||||||
@ -1097,6 +1093,7 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
AMRWBContext *ctx = avctx->priv_data;
|
AMRWBContext *ctx = avctx->priv_data;
|
||||||
|
AVFrame *frame = data;
|
||||||
AMRWBFrame *cf = &ctx->frame;
|
AMRWBFrame *cf = &ctx->frame;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
@ -1114,12 +1111,12 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int sub, i, ret;
|
int sub, i, ret;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
ctx->avframe.nb_samples = 4 * AMRWB_SFR_SIZE_16k;
|
frame->nb_samples = 4 * AMRWB_SFR_SIZE_16k;
|
||||||
if ((ret = ff_get_buffer(avctx, &ctx->avframe)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
buf_out = (float *)ctx->avframe.data[0];
|
buf_out = (float *)frame->data[0];
|
||||||
|
|
||||||
header_size = decode_mime_header(ctx, buf);
|
header_size = decode_mime_header(ctx, buf);
|
||||||
if (ctx->fr_cur_mode > MODE_SID) {
|
if (ctx->fr_cur_mode > MODE_SID) {
|
||||||
@ -1266,7 +1263,6 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
memcpy(ctx->isf_past_final, ctx->isf_cur, LP_ORDER * sizeof(float));
|
memcpy(ctx->isf_past_final, ctx->isf_cur, LP_ORDER * sizeof(float));
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = ctx->avframe;
|
|
||||||
|
|
||||||
return expected_fr_size;
|
return expected_fr_size;
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,6 @@ typedef struct APEPredictor {
|
|||||||
typedef struct APEContext {
|
typedef struct APEContext {
|
||||||
AVClass *class; ///< class for AVOptions
|
AVClass *class; ///< class for AVOptions
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
AVFrame frame;
|
|
||||||
DSPContext dsp;
|
DSPContext dsp;
|
||||||
int channels;
|
int channels;
|
||||||
int samples; ///< samples left to decode in current frame
|
int samples; ///< samples left to decode in current frame
|
||||||
@ -235,9 +234,6 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
|
|||||||
ff_dsputil_init(&s->dsp, avctx);
|
ff_dsputil_init(&s->dsp, avctx);
|
||||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
filter_alloc_fail:
|
filter_alloc_fail:
|
||||||
ape_decode_close(avctx);
|
ape_decode_close(avctx);
|
||||||
@ -826,6 +822,7 @@ static void ape_unpack_stereo(APEContext *ctx, int count)
|
|||||||
static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
APEContext *s = avctx->priv_data;
|
APEContext *s = avctx->priv_data;
|
||||||
uint8_t *sample8;
|
uint8_t *sample8;
|
||||||
@ -906,8 +903,8 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
|
s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = blockstodecode;
|
frame->nb_samples = blockstodecode;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -929,21 +926,21 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
switch (s->bps) {
|
switch (s->bps) {
|
||||||
case 8:
|
case 8:
|
||||||
for (ch = 0; ch < s->channels; ch++) {
|
for (ch = 0; ch < s->channels; ch++) {
|
||||||
sample8 = (uint8_t *)s->frame.data[ch];
|
sample8 = (uint8_t *)frame->data[ch];
|
||||||
for (i = 0; i < blockstodecode; i++)
|
for (i = 0; i < blockstodecode; i++)
|
||||||
*sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
|
*sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 16:
|
case 16:
|
||||||
for (ch = 0; ch < s->channels; ch++) {
|
for (ch = 0; ch < s->channels; ch++) {
|
||||||
sample16 = (int16_t *)s->frame.data[ch];
|
sample16 = (int16_t *)frame->data[ch];
|
||||||
for (i = 0; i < blockstodecode; i++)
|
for (i = 0; i < blockstodecode; i++)
|
||||||
*sample16++ = s->decoded[ch][i];
|
*sample16++ = s->decoded[ch][i];
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 24:
|
case 24:
|
||||||
for (ch = 0; ch < s->channels; ch++) {
|
for (ch = 0; ch < s->channels; ch++) {
|
||||||
sample24 = (int32_t *)s->frame.data[ch];
|
sample24 = (int32_t *)frame->data[ch];
|
||||||
for (i = 0; i < blockstodecode; i++)
|
for (i = 0; i < blockstodecode; i++)
|
||||||
*sample24++ = s->decoded[ch][i] << 8;
|
*sample24++ = s->decoded[ch][i] << 8;
|
||||||
}
|
}
|
||||||
@ -953,7 +950,6 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
s->samples -= blockstodecode;
|
s->samples -= blockstodecode;
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return !s->samples ? avpkt->size : 0;
|
return !s->samples ? avpkt->size : 0;
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,6 @@ typedef struct {
|
|||||||
* The atrac1 context, holds all needed parameters for decoding
|
* The atrac1 context, holds all needed parameters for decoding
|
||||||
*/
|
*/
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVFrame frame;
|
|
||||||
AT1SUCtx SUs[AT1_MAX_CHANNELS]; ///< channel sound unit
|
AT1SUCtx SUs[AT1_MAX_CHANNELS]; ///< channel sound unit
|
||||||
DECLARE_ALIGNED(32, float, spec)[AT1_SU_SAMPLES]; ///< the mdct spectrum buffer
|
DECLARE_ALIGNED(32, float, spec)[AT1_SU_SAMPLES]; ///< the mdct spectrum buffer
|
||||||
|
|
||||||
@ -274,6 +273,7 @@ static void at1_subband_synthesis(AT1Ctx *q, AT1SUCtx* su, float *pOut)
|
|||||||
static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
AT1Ctx *q = avctx->priv_data;
|
AT1Ctx *q = avctx->priv_data;
|
||||||
@ -287,8 +287,8 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
q->frame.nb_samples = AT1_SU_SAMPLES;
|
frame->nb_samples = AT1_SU_SAMPLES;
|
||||||
if ((ret = ff_get_buffer(avctx, &q->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -310,11 +310,10 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
ret = at1_imdct_block(su, q);
|
ret = at1_imdct_block(su, q);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
at1_subband_synthesis(q, su, (float *)q->frame.extended_data[ch]);
|
at1_subband_synthesis(q, su, (float *)frame->extended_data[ch]);
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = q->frame;
|
|
||||||
|
|
||||||
return avctx->block_align;
|
return avctx->block_align;
|
||||||
}
|
}
|
||||||
@ -375,9 +374,6 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx)
|
|||||||
q->SUs[1].spectrum[0] = q->SUs[1].spec1;
|
q->SUs[1].spectrum[0] = q->SUs[1].spec1;
|
||||||
q->SUs[1].spectrum[1] = q->SUs[1].spec2;
|
q->SUs[1].spectrum[1] = q->SUs[1].spec2;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&q->frame);
|
|
||||||
avctx->coded_frame = &q->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +87,6 @@ typedef struct ChannelUnit {
|
|||||||
} ChannelUnit;
|
} ChannelUnit;
|
||||||
|
|
||||||
typedef struct ATRAC3Context {
|
typedef struct ATRAC3Context {
|
||||||
AVFrame frame;
|
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
//@{
|
//@{
|
||||||
/** stream data */
|
/** stream data */
|
||||||
@ -799,6 +798,7 @@ static int decode_frame(AVCodecContext *avctx, const uint8_t *databuf,
|
|||||||
static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
|
static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
ATRAC3Context *q = avctx->priv_data;
|
ATRAC3Context *q = avctx->priv_data;
|
||||||
@ -812,8 +812,8 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
q->frame.nb_samples = SAMPLES_PER_FRAME;
|
frame->nb_samples = SAMPLES_PER_FRAME;
|
||||||
if ((ret = ff_get_buffer(avctx, &q->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -826,14 +826,13 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
databuf = buf;
|
databuf = buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = decode_frame(avctx, databuf, (float **)q->frame.extended_data);
|
ret = decode_frame(avctx, databuf, (float **)frame->extended_data);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Frame decoding error!\n");
|
av_log(NULL, AV_LOG_ERROR, "Frame decoding error!\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = q->frame;
|
|
||||||
|
|
||||||
return avctx->block_align;
|
return avctx->block_align;
|
||||||
}
|
}
|
||||||
@ -997,9 +996,6 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&q->frame);
|
|
||||||
avctx->coded_frame = &q->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,7 +47,6 @@ static float quant_table[96];
|
|||||||
#define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11)
|
#define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11)
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVFrame frame;
|
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int version_b; ///< Bink version 'b'
|
int version_b; ///< Bink version 'b'
|
||||||
int first;
|
int first;
|
||||||
@ -143,9 +142,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
else
|
else
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -294,6 +290,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
BinkAudioContext *s = avctx->priv_data;
|
BinkAudioContext *s = avctx->priv_data;
|
||||||
|
AVFrame *frame = data;
|
||||||
GetBitContext *gb = &s->gb;
|
GetBitContext *gb = &s->gb;
|
||||||
int ret, consumed = 0;
|
int ret, consumed = 0;
|
||||||
|
|
||||||
@ -321,22 +318,21 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = s->frame_len;
|
frame->nb_samples = s->frame_len;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (decode_block(s, (float **)s->frame.extended_data,
|
if (decode_block(s, (float **)frame->extended_data,
|
||||||
avctx->codec->id == AV_CODEC_ID_BINKAUDIO_DCT)) {
|
avctx->codec->id == AV_CODEC_ID_BINKAUDIO_DCT)) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n");
|
av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
get_bits_align32(gb);
|
get_bits_align32(gb);
|
||||||
|
|
||||||
s->frame.nb_samples = s->block_size / avctx->channels;
|
frame->nb_samples = s->block_size / avctx->channels;
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return consumed;
|
return consumed;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user