Merge commit '5d5c248c3df30fa91a8dde639618c985b9a11c53'
* commit '5d5c248c3df30fa91a8dde639618c985b9a11c53': s302m: decode directly to the user-provided AVFrame ra288: decode directly to the user-provided AVFrame ra144: decode directly to the user-provided AVFrame ralf: decode directly to the user-provided AVFrame qdm2: decode directly to the user-provided AVFrame Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
08059f6150
@ -130,8 +130,6 @@ typedef struct {
|
|||||||
* QDM2 decoder context
|
* QDM2 decoder context
|
||||||
*/
|
*/
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVFrame frame;
|
|
||||||
|
|
||||||
/// Parameters from codec header, do not change during playback
|
/// Parameters from codec header, do not change during playback
|
||||||
int nb_channels; ///< number of channels
|
int nb_channels; ///< number of channels
|
||||||
int channels; ///< number of channels
|
int channels; ///< number of channels
|
||||||
@ -1879,9 +1877,6 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1962,6 +1957,7 @@ static int qdm2_decode (QDM2Context *q, const uint8_t *in, int16_t *out)
|
|||||||
static int qdm2_decode_frame(AVCodecContext *avctx, void *data,
|
static int qdm2_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
QDM2Context *s = avctx->priv_data;
|
QDM2Context *s = avctx->priv_data;
|
||||||
@ -1974,12 +1970,12 @@ static int qdm2_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
s->frame.nb_samples = 16 * s->frame_size;
|
frame->nb_samples = 16 * s->frame_size;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
out = (int16_t *)s->frame.data[0];
|
out = (int16_t *)frame->data[0];
|
||||||
|
|
||||||
for (i = 0; i < 16; i++) {
|
for (i = 0; i < 16; i++) {
|
||||||
if (qdm2_decode(s, buf, out) < 0)
|
if (qdm2_decode(s, buf, out) < 0)
|
||||||
@ -1988,7 +1984,6 @@ static int qdm2_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return s->checksum_size;
|
return s->checksum_size;
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,6 @@
|
|||||||
|
|
||||||
typedef struct RA144Context {
|
typedef struct RA144Context {
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
AVFrame frame;
|
|
||||||
LPCContext lpc_ctx;
|
LPCContext lpc_ctx;
|
||||||
AudioFrameQueue afq;
|
AudioFrameQueue afq;
|
||||||
int last_frame;
|
int last_frame;
|
||||||
|
@ -43,9 +43,6 @@ static av_cold int ra144_decode_init(AVCodecContext * avctx)
|
|||||||
avctx->channel_layout = AV_CH_LAYOUT_MONO;
|
avctx->channel_layout = AV_CH_LAYOUT_MONO;
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&ractx->frame);
|
|
||||||
avctx->coded_frame = &ractx->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,6 +62,7 @@ static void do_output_subblock(RA144Context *ractx, const uint16_t *lpc_coefs,
|
|||||||
static int ra144_decode_frame(AVCodecContext * avctx, void *data,
|
static int ra144_decode_frame(AVCodecContext * avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
static const uint8_t sizes[LPC_ORDER] = {6, 5, 5, 4, 4, 3, 3, 3, 3, 2};
|
static const uint8_t sizes[LPC_ORDER] = {6, 5, 5, 4, 4, 3, 3, 3, 3, 2};
|
||||||
@ -80,12 +78,12 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
ractx->frame.nb_samples = NBLOCKS * BLOCKSIZE;
|
frame->nb_samples = NBLOCKS * BLOCKSIZE;
|
||||||
if ((ret = ff_get_buffer(avctx, &ractx->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples = (int16_t *)ractx->frame.data[0];
|
samples = (int16_t *)frame->data[0];
|
||||||
|
|
||||||
if(buf_size < FRAMESIZE) {
|
if(buf_size < FRAMESIZE) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
@ -125,7 +123,6 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]);
|
FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]);
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = ractx->frame;
|
|
||||||
|
|
||||||
return FRAMESIZE;
|
return FRAMESIZE;
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,6 @@
|
|||||||
#define RA288_BLOCKS_PER_FRAME 32
|
#define RA288_BLOCKS_PER_FRAME 32
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVFrame frame;
|
|
||||||
AVFloatDSPContext fdsp;
|
AVFloatDSPContext fdsp;
|
||||||
DECLARE_ALIGNED(32, float, sp_lpc)[FFALIGN(36, 16)]; ///< LPC coefficients for speech data (spec: A)
|
DECLARE_ALIGNED(32, float, sp_lpc)[FFALIGN(36, 16)]; ///< LPC coefficients for speech data (spec: A)
|
||||||
DECLARE_ALIGNED(32, float, gain_lpc)[FFALIGN(10, 16)]; ///< LPC coefficients for gain (spec: GB)
|
DECLARE_ALIGNED(32, float, gain_lpc)[FFALIGN(10, 16)]; ///< LPC coefficients for gain (spec: GB)
|
||||||
@ -75,9 +74,6 @@ static av_cold int ra288_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avpriv_float_dsp_init(&ractx->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
avpriv_float_dsp_init(&ractx->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&ractx->frame);
|
|
||||||
avctx->coded_frame = &ractx->frame;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -185,6 +181,7 @@ static void backward_filter(RA288Context *ractx,
|
|||||||
static int ra288_decode_frame(AVCodecContext * avctx, void *data,
|
static int ra288_decode_frame(AVCodecContext * avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
float *out;
|
float *out;
|
||||||
@ -200,12 +197,12 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
ractx->frame.nb_samples = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME;
|
frame->nb_samples = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME;
|
||||||
if ((ret = ff_get_buffer(avctx, &ractx->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
out = (float *)ractx->frame.data[0];
|
out = (float *)frame->data[0];
|
||||||
|
|
||||||
init_get_bits(&gb, buf, avctx->block_align * 8);
|
init_get_bits(&gb, buf, avctx->block_align * 8);
|
||||||
|
|
||||||
@ -228,7 +225,6 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = ractx->frame;
|
|
||||||
|
|
||||||
return avctx->block_align;
|
return avctx->block_align;
|
||||||
}
|
}
|
||||||
|
@ -49,8 +49,6 @@ typedef struct VLCSet {
|
|||||||
#define RALF_MAX_PKT_SIZE 8192
|
#define RALF_MAX_PKT_SIZE 8192
|
||||||
|
|
||||||
typedef struct RALFContext {
|
typedef struct RALFContext {
|
||||||
AVFrame frame;
|
|
||||||
|
|
||||||
int version;
|
int version;
|
||||||
int max_frame_size;
|
int max_frame_size;
|
||||||
VLCSet sets[3];
|
VLCSet sets[3];
|
||||||
@ -154,9 +152,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO
|
avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO
|
||||||
: AV_CH_LAYOUT_MONO;
|
: AV_CH_LAYOUT_MONO;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&ctx->frame);
|
|
||||||
avctx->coded_frame = &ctx->frame;
|
|
||||||
|
|
||||||
ctx->max_frame_size = AV_RB32(avctx->extradata + 16);
|
ctx->max_frame_size = AV_RB32(avctx->extradata + 16);
|
||||||
if (ctx->max_frame_size > (1 << 20) || !ctx->max_frame_size) {
|
if (ctx->max_frame_size > (1 << 20) || !ctx->max_frame_size) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "invalid frame size %d\n",
|
av_log(avctx, AV_LOG_ERROR, "invalid frame size %d\n",
|
||||||
@ -426,6 +421,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
|
|||||||
AVPacket *avpkt)
|
AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
RALFContext *ctx = avctx->priv_data;
|
RALFContext *ctx = avctx->priv_data;
|
||||||
|
AVFrame *frame = data;
|
||||||
int16_t *samples0;
|
int16_t *samples0;
|
||||||
int16_t *samples1;
|
int16_t *samples1;
|
||||||
int ret;
|
int ret;
|
||||||
@ -463,13 +459,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
|
|||||||
src_size = avpkt->size;
|
src_size = avpkt->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->frame.nb_samples = ctx->max_frame_size;
|
frame->nb_samples = ctx->max_frame_size;
|
||||||
if ((ret = ff_get_buffer(avctx, &ctx->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Me fail get_buffer()? That's unpossible!\n");
|
av_log(avctx, AV_LOG_ERROR, "Me fail get_buffer()? That's unpossible!\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
samples0 = (int16_t *)ctx->frame.data[0];
|
samples0 = (int16_t *)frame->data[0];
|
||||||
samples1 = (int16_t *)ctx->frame.data[1];
|
samples1 = (int16_t *)frame->data[1];
|
||||||
|
|
||||||
if (src_size < 5) {
|
if (src_size < 5) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "too short packets are too short!\n");
|
av_log(avctx, AV_LOG_ERROR, "too short packets are too short!\n");
|
||||||
@ -511,9 +507,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
|
|||||||
bytes_left -= ctx->block_size[i];
|
bytes_left -= ctx->block_size[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->frame.nb_samples = ctx->sample_offset;
|
frame->nb_samples = ctx->sample_offset;
|
||||||
*got_frame_ptr = ctx->sample_offset > 0;
|
*got_frame_ptr = ctx->sample_offset > 0;
|
||||||
*(AVFrame*)data = ctx->frame;
|
|
||||||
|
|
||||||
return avpkt->size;
|
return avpkt->size;
|
||||||
}
|
}
|
||||||
|
@ -28,10 +28,6 @@
|
|||||||
|
|
||||||
#define AES3_HEADER_LEN 4
|
#define AES3_HEADER_LEN 4
|
||||||
|
|
||||||
typedef struct S302MDecodeContext {
|
|
||||||
AVFrame frame;
|
|
||||||
} S302MDecodeContext;
|
|
||||||
|
|
||||||
static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
|
static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
|
||||||
int buf_size)
|
int buf_size)
|
||||||
{
|
{
|
||||||
@ -95,7 +91,7 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
|
|||||||
static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame_ptr, AVPacket *avpkt)
|
int *got_frame_ptr, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
S302MDecodeContext *s = avctx->priv_data;
|
AVFrame *frame = data;
|
||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
int block_size, ret;
|
int block_size, ret;
|
||||||
@ -109,16 +105,16 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
block_size = (avctx->bits_per_coded_sample + 4) / 4;
|
block_size = (avctx->bits_per_coded_sample + 4) / 4;
|
||||||
s->frame.nb_samples = 2 * (buf_size / block_size) / avctx->channels;
|
frame->nb_samples = 2 * (buf_size / block_size) / avctx->channels;
|
||||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf_size = (s->frame.nb_samples * avctx->channels / 2) * block_size;
|
buf_size = (frame->nb_samples * avctx->channels / 2) * block_size;
|
||||||
|
|
||||||
if (avctx->bits_per_coded_sample == 24) {
|
if (avctx->bits_per_coded_sample == 24) {
|
||||||
uint32_t *o = (uint32_t *)s->frame.data[0];
|
uint32_t *o = (uint32_t *)frame->data[0];
|
||||||
for (; buf_size > 6; buf_size -= 7) {
|
for (; buf_size > 6; buf_size -= 7) {
|
||||||
*o++ = (ff_reverse[buf[2]] << 24) |
|
*o++ = (ff_reverse[buf[2]] << 24) |
|
||||||
(ff_reverse[buf[1]] << 16) |
|
(ff_reverse[buf[1]] << 16) |
|
||||||
@ -130,7 +126,7 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
buf += 7;
|
buf += 7;
|
||||||
}
|
}
|
||||||
} else if (avctx->bits_per_coded_sample == 20) {
|
} else if (avctx->bits_per_coded_sample == 20) {
|
||||||
uint32_t *o = (uint32_t *)s->frame.data[0];
|
uint32_t *o = (uint32_t *)frame->data[0];
|
||||||
for (; buf_size > 5; buf_size -= 6) {
|
for (; buf_size > 5; buf_size -= 6) {
|
||||||
*o++ = (ff_reverse[buf[2] & 0xf0] << 28) |
|
*o++ = (ff_reverse[buf[2] & 0xf0] << 28) |
|
||||||
(ff_reverse[buf[1]] << 20) |
|
(ff_reverse[buf[1]] << 20) |
|
||||||
@ -141,7 +137,7 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
buf += 6;
|
buf += 6;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
uint16_t *o = (uint16_t *)s->frame.data[0];
|
uint16_t *o = (uint16_t *)frame->data[0];
|
||||||
for (; buf_size > 4; buf_size -= 5) {
|
for (; buf_size > 4; buf_size -= 5) {
|
||||||
*o++ = (ff_reverse[buf[1]] << 8) |
|
*o++ = (ff_reverse[buf[1]] << 8) |
|
||||||
ff_reverse[buf[0]];
|
ff_reverse[buf[0]];
|
||||||
@ -153,28 +149,14 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
*(AVFrame *)data = s->frame;
|
|
||||||
|
|
||||||
return avpkt->size;
|
return avpkt->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int s302m_decode_init(AVCodecContext *avctx)
|
|
||||||
{
|
|
||||||
S302MDecodeContext *s = avctx->priv_data;
|
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->frame);
|
|
||||||
avctx->coded_frame = &s->frame;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
AVCodec ff_s302m_decoder = {
|
AVCodec ff_s302m_decoder = {
|
||||||
.name = "s302m",
|
.name = "s302m",
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
.id = AV_CODEC_ID_S302M,
|
.id = AV_CODEC_ID_S302M,
|
||||||
.priv_data_size = sizeof(S302MDecodeContext),
|
|
||||||
.init = s302m_decode_init,
|
|
||||||
.decode = s302m_decode_frame,
|
.decode = s302m_decode_frame,
|
||||||
.capabilities = CODEC_CAP_DR1,
|
.capabilities = CODEC_CAP_DR1,
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"),
|
.long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"),
|
||||||
|
Loading…
x
Reference in New Issue
Block a user