Merge remote-tracking branch 'qatar/master'
* qatar/master: (53 commits) probe: Restore identification of files with very large id3 tags and no extension. probe: Remove id3 tag presence as a criteria to do file extension checking. mpegts: MP4 SL support mpegts: MP4 OD support mpegts: Add support for Sections in PMT mpegts: Replace the MP4 descriptor parser with a recursive parser. mpegts: Add support for multiple mp4 descriptors mpegts: Parse mpeg2 SL descriptors. isom: Add MPEG4SYSTEMS dummy object type indication. aacdec: allow output reconfiguration on channel changes nellymoserenc: take float input samples instead of int16 nellymoserdec: use dsp functions for overlap and windowing nellymoserdec: do not fail if there is extra data in the packet nellymoserdec: fail if output buffer is too small nellymoserdec: remove pointless buffer size check. lavf: add init_put_byte() to the list of visible symbols. seek-test: free options dictionary after use snow: do not draw_edge if emu_edge is set tools/pktdumper: update to recent avformat api seek-test: update to recent avformat api ... Conflicts: doc/APIchanges libavcodec/mpegaudiodec.c libavcodec/nellymoserdec.c libavcodec/snow.c libavcodec/version.h libavcodec/wmadec.c libavformat/avformat.h libavformat/mpegts.c libavformat/mxfdec.c libavformat/utils.c libavformat/wtv.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
6faf0a21e1
@ -16,6 +16,10 @@ API changes, most recent first:
|
|||||||
2011-10-20 - b35e9e1 - lavu 51.22.0
|
2011-10-20 - b35e9e1 - lavu 51.22.0
|
||||||
Add av_strtok() to avstring.h.
|
Add av_strtok() to avstring.h.
|
||||||
|
|
||||||
|
2011-xx-xx - xxxxxxx - lavc 53.15.0
|
||||||
|
Remove avcodec_parse_frame.
|
||||||
|
Deprecate AVCodecContext.parse_only and CODEC_CAP_PARSE_ONLY.
|
||||||
|
|
||||||
2011-10-xx - xxxxxxx - lavf 53.10.0
|
2011-10-xx - xxxxxxx - lavf 53.10.0
|
||||||
Add avformat_new_stream(). Deprecate av_new_stream().
|
Add avformat_new_stream(). Deprecate av_new_stream().
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
#include "bytestream.h"
|
#include "bytestream.h"
|
||||||
#include "libavutil/audioconvert.h"
|
#include "libavutil/audioconvert.h"
|
||||||
|
#include "libavutil/avassert.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file
|
* @file
|
||||||
@ -163,48 +164,7 @@ typedef struct APEContext {
|
|||||||
|
|
||||||
// TODO: dsputilize
|
// TODO: dsputilize
|
||||||
|
|
||||||
static av_cold int ape_decode_init(AVCodecContext * avctx)
|
static av_cold int ape_decode_close(AVCodecContext *avctx)
|
||||||
{
|
|
||||||
APEContext *s = avctx->priv_data;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (avctx->extradata_size != 6) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (avctx->bits_per_coded_sample != 16) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Only 16-bit samples are supported\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (avctx->channels > 2) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
s->avctx = avctx;
|
|
||||||
s->channels = avctx->channels;
|
|
||||||
s->fileversion = AV_RL16(avctx->extradata);
|
|
||||||
s->compression_level = AV_RL16(avctx->extradata + 2);
|
|
||||||
s->flags = AV_RL16(avctx->extradata + 4);
|
|
||||||
|
|
||||||
av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n", s->compression_level, s->flags);
|
|
||||||
if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n", s->compression_level);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
s->fset = s->compression_level / 1000 - 1;
|
|
||||||
for (i = 0; i < APE_FILTER_LEVELS; i++) {
|
|
||||||
if (!ape_filter_orders[s->fset][i])
|
|
||||||
break;
|
|
||||||
s->filterbuf[i] = av_malloc((ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
dsputil_init(&s->dsp, avctx);
|
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
|
||||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static av_cold int ape_decode_close(AVCodecContext * avctx)
|
|
||||||
{
|
{
|
||||||
APEContext *s = avctx->priv_data;
|
APEContext *s = avctx->priv_data;
|
||||||
int i;
|
int i;
|
||||||
@ -216,6 +176,54 @@ static av_cold int ape_decode_close(AVCodecContext * avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static av_cold int ape_decode_init(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
APEContext *s = avctx->priv_data;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (avctx->extradata_size != 6) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
if (avctx->bits_per_coded_sample != 16) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Only 16-bit samples are supported\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
if (avctx->channels > 2) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
s->avctx = avctx;
|
||||||
|
s->channels = avctx->channels;
|
||||||
|
s->fileversion = AV_RL16(avctx->extradata);
|
||||||
|
s->compression_level = AV_RL16(avctx->extradata + 2);
|
||||||
|
s->flags = AV_RL16(avctx->extradata + 4);
|
||||||
|
|
||||||
|
av_log(avctx, AV_LOG_DEBUG, "Compression Level: %d - Flags: %d\n",
|
||||||
|
s->compression_level, s->flags);
|
||||||
|
if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n",
|
||||||
|
s->compression_level);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
s->fset = s->compression_level / 1000 - 1;
|
||||||
|
for (i = 0; i < APE_FILTER_LEVELS; i++) {
|
||||||
|
if (!ape_filter_orders[s->fset][i])
|
||||||
|
break;
|
||||||
|
FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i],
|
||||||
|
(ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4,
|
||||||
|
filter_alloc_fail);
|
||||||
|
}
|
||||||
|
|
||||||
|
dsputil_init(&s->dsp, avctx);
|
||||||
|
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||||
|
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||||
|
return 0;
|
||||||
|
filter_alloc_fail:
|
||||||
|
ape_decode_close(avctx);
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @name APE range decoding functions
|
* @name APE range decoding functions
|
||||||
* @{
|
* @{
|
||||||
@ -228,7 +236,7 @@ static av_cold int ape_decode_close(AVCodecContext * avctx)
|
|||||||
#define BOTTOM_VALUE (TOP_VALUE >> 8)
|
#define BOTTOM_VALUE (TOP_VALUE >> 8)
|
||||||
|
|
||||||
/** Start the decoder */
|
/** Start the decoder */
|
||||||
static inline void range_start_decoding(APEContext * ctx)
|
static inline void range_start_decoding(APEContext *ctx)
|
||||||
{
|
{
|
||||||
ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
|
ctx->rc.buffer = bytestream_get_byte(&ctx->ptr);
|
||||||
ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
|
ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS);
|
||||||
@ -236,13 +244,16 @@ static inline void range_start_decoding(APEContext * ctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** Perform normalization */
|
/** Perform normalization */
|
||||||
static inline void range_dec_normalize(APEContext * ctx)
|
static inline void range_dec_normalize(APEContext *ctx)
|
||||||
{
|
{
|
||||||
while (ctx->rc.range <= BOTTOM_VALUE) {
|
while (ctx->rc.range <= BOTTOM_VALUE) {
|
||||||
ctx->rc.buffer <<= 8;
|
ctx->rc.buffer <<= 8;
|
||||||
if(ctx->ptr < ctx->data_end)
|
if(ctx->ptr < ctx->data_end) {
|
||||||
ctx->rc.buffer += *ctx->ptr;
|
ctx->rc.buffer += *ctx->ptr;
|
||||||
ctx->ptr++;
|
ctx->ptr++;
|
||||||
|
} else {
|
||||||
|
ctx->error = 1;
|
||||||
|
}
|
||||||
ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
|
ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF);
|
||||||
ctx->rc.range <<= 8;
|
ctx->rc.range <<= 8;
|
||||||
}
|
}
|
||||||
@ -254,7 +265,7 @@ static inline void range_dec_normalize(APEContext * ctx)
|
|||||||
* @param tot_f is the total frequency or (code_value)1<<shift
|
* @param tot_f is the total frequency or (code_value)1<<shift
|
||||||
* @return the culmulative frequency
|
* @return the culmulative frequency
|
||||||
*/
|
*/
|
||||||
static inline int range_decode_culfreq(APEContext * ctx, int tot_f)
|
static inline int range_decode_culfreq(APEContext *ctx, int tot_f)
|
||||||
{
|
{
|
||||||
range_dec_normalize(ctx);
|
range_dec_normalize(ctx);
|
||||||
ctx->rc.help = ctx->rc.range / tot_f;
|
ctx->rc.help = ctx->rc.range / tot_f;
|
||||||
@ -266,7 +277,7 @@ static inline int range_decode_culfreq(APEContext * ctx, int tot_f)
|
|||||||
* @param ctx decoder context
|
* @param ctx decoder context
|
||||||
* @param shift number of bits to decode
|
* @param shift number of bits to decode
|
||||||
*/
|
*/
|
||||||
static inline int range_decode_culshift(APEContext * ctx, int shift)
|
static inline int range_decode_culshift(APEContext *ctx, int shift)
|
||||||
{
|
{
|
||||||
range_dec_normalize(ctx);
|
range_dec_normalize(ctx);
|
||||||
ctx->rc.help = ctx->rc.range >> shift;
|
ctx->rc.help = ctx->rc.range >> shift;
|
||||||
@ -280,14 +291,14 @@ static inline int range_decode_culshift(APEContext * ctx, int shift)
|
|||||||
* @param sy_f the interval length (frequency of the symbol)
|
* @param sy_f the interval length (frequency of the symbol)
|
||||||
* @param lt_f the lower end (frequency sum of < symbols)
|
* @param lt_f the lower end (frequency sum of < symbols)
|
||||||
*/
|
*/
|
||||||
static inline void range_decode_update(APEContext * ctx, int sy_f, int lt_f)
|
static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f)
|
||||||
{
|
{
|
||||||
ctx->rc.low -= ctx->rc.help * lt_f;
|
ctx->rc.low -= ctx->rc.help * lt_f;
|
||||||
ctx->rc.range = ctx->rc.help * sy_f;
|
ctx->rc.range = ctx->rc.help * sy_f;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Decode n bits (n <= 16) without modelling */
|
/** Decode n bits (n <= 16) without modelling */
|
||||||
static inline int range_decode_bits(APEContext * ctx, int n)
|
static inline int range_decode_bits(APEContext *ctx, int n)
|
||||||
{
|
{
|
||||||
int sym = range_decode_culshift(ctx, n);
|
int sym = range_decode_culshift(ctx, n);
|
||||||
range_decode_update(ctx, 1, sym);
|
range_decode_update(ctx, 1, sym);
|
||||||
@ -339,7 +350,7 @@ static const uint16_t counts_diff_3980[21] = {
|
|||||||
* @param counts probability range start position
|
* @param counts probability range start position
|
||||||
* @param counts_diff probability range widths
|
* @param counts_diff probability range widths
|
||||||
*/
|
*/
|
||||||
static inline int range_get_symbol(APEContext * ctx,
|
static inline int range_get_symbol(APEContext *ctx,
|
||||||
const uint16_t counts[],
|
const uint16_t counts[],
|
||||||
const uint16_t counts_diff[])
|
const uint16_t counts_diff[])
|
||||||
{
|
{
|
||||||
@ -374,7 +385,7 @@ static inline void update_rice(APERice *rice, int x)
|
|||||||
rice->k++;
|
rice->k++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ape_decode_value(APEContext * ctx, APERice *rice)
|
static inline int ape_decode_value(APEContext *ctx, APERice *rice)
|
||||||
{
|
{
|
||||||
int x, overflow;
|
int x, overflow;
|
||||||
|
|
||||||
@ -441,7 +452,7 @@ static inline int ape_decode_value(APEContext * ctx, APERice *rice)
|
|||||||
return -(x >> 1);
|
return -(x >> 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void entropy_decode(APEContext * ctx, int blockstodecode, int stereo)
|
static void entropy_decode(APEContext *ctx, int blockstodecode, int stereo)
|
||||||
{
|
{
|
||||||
int32_t *decoded0 = ctx->decoded0;
|
int32_t *decoded0 = ctx->decoded0;
|
||||||
int32_t *decoded1 = ctx->decoded1;
|
int32_t *decoded1 = ctx->decoded1;
|
||||||
@ -464,9 +475,11 @@ static void entropy_decode(APEContext * ctx, int blockstodecode, int stereo)
|
|||||||
range_dec_normalize(ctx); /* normalize to use up all bytes */
|
range_dec_normalize(ctx); /* normalize to use up all bytes */
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_entropy_decoder(APEContext * ctx)
|
static int init_entropy_decoder(APEContext *ctx)
|
||||||
{
|
{
|
||||||
/* Read the CRC */
|
/* Read the CRC */
|
||||||
|
if (ctx->data_end - ctx->ptr < 6)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
ctx->CRC = bytestream_get_be32(&ctx->ptr);
|
ctx->CRC = bytestream_get_be32(&ctx->ptr);
|
||||||
|
|
||||||
/* Read the frame flags if they exist */
|
/* Read the frame flags if they exist */
|
||||||
@ -474,6 +487,8 @@ static void init_entropy_decoder(APEContext * ctx)
|
|||||||
if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
|
if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) {
|
||||||
ctx->CRC &= ~0x80000000;
|
ctx->CRC &= ~0x80000000;
|
||||||
|
|
||||||
|
if (ctx->data_end - ctx->ptr < 6)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
ctx->frameflags = bytestream_get_be32(&ctx->ptr);
|
ctx->frameflags = bytestream_get_be32(&ctx->ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -490,13 +505,15 @@ static void init_entropy_decoder(APEContext * ctx)
|
|||||||
ctx->ptr++;
|
ctx->ptr++;
|
||||||
|
|
||||||
range_start_decoding(ctx);
|
range_start_decoding(ctx);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const int32_t initial_coeffs[4] = {
|
static const int32_t initial_coeffs[4] = {
|
||||||
360, 317, -109, 98
|
360, 317, -109, 98
|
||||||
};
|
};
|
||||||
|
|
||||||
static void init_predictor_decoder(APEContext * ctx)
|
static void init_predictor_decoder(APEContext *ctx)
|
||||||
{
|
{
|
||||||
APEPredictor *p = &ctx->predictor;
|
APEPredictor *p = &ctx->predictor;
|
||||||
|
|
||||||
@ -519,7 +536,10 @@ static inline int APESIGN(int32_t x) {
|
|||||||
return (x < 0) - (x > 0);
|
return (x < 0) - (x > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_always_inline int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int adaptA, const int adaptB)
|
static av_always_inline int predictor_update_filter(APEPredictor *p,
|
||||||
|
const int decoded, const int filter,
|
||||||
|
const int delayA, const int delayB,
|
||||||
|
const int adaptA, const int adaptB)
|
||||||
{
|
{
|
||||||
int32_t predictionA, predictionB, sign;
|
int32_t predictionA, predictionB, sign;
|
||||||
|
|
||||||
@ -563,7 +583,7 @@ static av_always_inline int predictor_update_filter(APEPredictor *p, const int d
|
|||||||
return p->filterA[filter];
|
return p->filterA[filter];
|
||||||
}
|
}
|
||||||
|
|
||||||
static void predictor_decode_stereo(APEContext * ctx, int count)
|
static void predictor_decode_stereo(APEContext *ctx, int count)
|
||||||
{
|
{
|
||||||
APEPredictor *p = &ctx->predictor;
|
APEPredictor *p = &ctx->predictor;
|
||||||
int32_t *decoded0 = ctx->decoded0;
|
int32_t *decoded0 = ctx->decoded0;
|
||||||
@ -571,9 +591,11 @@ static void predictor_decode_stereo(APEContext * ctx, int count)
|
|||||||
|
|
||||||
while (count--) {
|
while (count--) {
|
||||||
/* Predictor Y */
|
/* Predictor Y */
|
||||||
*decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB, YADAPTCOEFFSA, YADAPTCOEFFSB);
|
*decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
|
||||||
|
YADAPTCOEFFSA, YADAPTCOEFFSB);
|
||||||
decoded0++;
|
decoded0++;
|
||||||
*decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB, XADAPTCOEFFSA, XADAPTCOEFFSB);
|
*decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
|
||||||
|
XADAPTCOEFFSA, XADAPTCOEFFSB);
|
||||||
decoded1++;
|
decoded1++;
|
||||||
|
|
||||||
/* Combined */
|
/* Combined */
|
||||||
@ -587,7 +609,7 @@ static void predictor_decode_stereo(APEContext * ctx, int count)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void predictor_decode_mono(APEContext * ctx, int count)
|
static void predictor_decode_mono(APEContext *ctx, int count)
|
||||||
{
|
{
|
||||||
APEPredictor *p = &ctx->predictor;
|
APEPredictor *p = &ctx->predictor;
|
||||||
int32_t *decoded0 = ctx->decoded0;
|
int32_t *decoded0 = ctx->decoded0;
|
||||||
@ -632,7 +654,7 @@ static void predictor_decode_mono(APEContext * ctx, int count)
|
|||||||
p->lastA[0] = currentA;
|
p->lastA[0] = currentA;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void do_init_filter(APEFilter *f, int16_t * buf, int order)
|
static void do_init_filter(APEFilter *f, int16_t *buf, int order)
|
||||||
{
|
{
|
||||||
f->coeffs = buf;
|
f->coeffs = buf;
|
||||||
f->historybuffer = buf + order;
|
f->historybuffer = buf + order;
|
||||||
@ -644,20 +666,23 @@ static void do_init_filter(APEFilter *f, int16_t * buf, int order)
|
|||||||
f->avg = 0;
|
f->avg = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_filter(APEContext * ctx, APEFilter *f, int16_t * buf, int order)
|
static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order)
|
||||||
{
|
{
|
||||||
do_init_filter(&f[0], buf, order);
|
do_init_filter(&f[0], buf, order);
|
||||||
do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
|
do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void do_apply_filter(APEContext * ctx, int version, APEFilter *f, int32_t *data, int count, int order, int fracbits)
|
static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
|
||||||
|
int32_t *data, int count, int order, int fracbits)
|
||||||
{
|
{
|
||||||
int res;
|
int res;
|
||||||
int absres;
|
int absres;
|
||||||
|
|
||||||
while (count--) {
|
while (count--) {
|
||||||
/* round fixedpoint scalar product */
|
/* round fixedpoint scalar product */
|
||||||
res = ctx->dsp.scalarproduct_and_madd_int16(f->coeffs, f->delay - order, f->adaptcoeffs - order, order, APESIGN(*data));
|
res = ctx->dsp.scalarproduct_and_madd_int16(f->coeffs, f->delay - order,
|
||||||
|
f->adaptcoeffs - order,
|
||||||
|
order, APESIGN(*data));
|
||||||
res = (res + (1 << (fracbits - 1))) >> fracbits;
|
res = (res + (1 << (fracbits - 1))) >> fracbits;
|
||||||
res += *data;
|
res += *data;
|
||||||
*data++ = res;
|
*data++ = res;
|
||||||
@ -676,7 +701,8 @@ static void do_apply_filter(APEContext * ctx, int version, APEFilter *f, int32_t
|
|||||||
/* Update the adaption coefficients */
|
/* Update the adaption coefficients */
|
||||||
absres = FFABS(res);
|
absres = FFABS(res);
|
||||||
if (absres)
|
if (absres)
|
||||||
*f->adaptcoeffs = ((res & (1<<31)) - (1<<30)) >> (25 + (absres <= f->avg*3) + (absres <= f->avg*4/3));
|
*f->adaptcoeffs = ((res & (1<<31)) - (1<<30)) >>
|
||||||
|
(25 + (absres <= f->avg*3) + (absres <= f->avg*4/3));
|
||||||
else
|
else
|
||||||
*f->adaptcoeffs = 0;
|
*f->adaptcoeffs = 0;
|
||||||
|
|
||||||
@ -699,8 +725,8 @@ static void do_apply_filter(APEContext * ctx, int version, APEFilter *f, int32_t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void apply_filter(APEContext * ctx, APEFilter *f,
|
static void apply_filter(APEContext *ctx, APEFilter *f,
|
||||||
int32_t * data0, int32_t * data1,
|
int32_t *data0, int32_t *data1,
|
||||||
int count, int order, int fracbits)
|
int count, int order, int fracbits)
|
||||||
{
|
{
|
||||||
do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
|
do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits);
|
||||||
@ -708,34 +734,38 @@ static void apply_filter(APEContext * ctx, APEFilter *f,
|
|||||||
do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
|
do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ape_apply_filters(APEContext * ctx, int32_t * decoded0,
|
static void ape_apply_filters(APEContext *ctx, int32_t *decoded0,
|
||||||
int32_t * decoded1, int count)
|
int32_t *decoded1, int count)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < APE_FILTER_LEVELS; i++) {
|
for (i = 0; i < APE_FILTER_LEVELS; i++) {
|
||||||
if (!ape_filter_orders[ctx->fset][i])
|
if (!ape_filter_orders[ctx->fset][i])
|
||||||
break;
|
break;
|
||||||
apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count, ape_filter_orders[ctx->fset][i], ape_filter_fracbits[ctx->fset][i]);
|
apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count,
|
||||||
|
ape_filter_orders[ctx->fset][i],
|
||||||
|
ape_filter_fracbits[ctx->fset][i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_frame_decoder(APEContext * ctx)
|
static int init_frame_decoder(APEContext *ctx)
|
||||||
{
|
{
|
||||||
int i;
|
int i, ret;
|
||||||
init_entropy_decoder(ctx);
|
if ((ret = init_entropy_decoder(ctx)) < 0)
|
||||||
|
return ret;
|
||||||
init_predictor_decoder(ctx);
|
init_predictor_decoder(ctx);
|
||||||
|
|
||||||
for (i = 0; i < APE_FILTER_LEVELS; i++) {
|
for (i = 0; i < APE_FILTER_LEVELS; i++) {
|
||||||
if (!ape_filter_orders[ctx->fset][i])
|
if (!ape_filter_orders[ctx->fset][i])
|
||||||
break;
|
break;
|
||||||
init_filter(ctx, ctx->filters[i], ctx->filterbuf[i], ape_filter_orders[ctx->fset][i]);
|
init_filter(ctx, ctx->filters[i], ctx->filterbuf[i],
|
||||||
|
ape_filter_orders[ctx->fset][i]);
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ape_unpack_mono(APEContext * ctx, int count)
|
static void ape_unpack_mono(APEContext *ctx, int count)
|
||||||
{
|
{
|
||||||
int32_t left;
|
|
||||||
int32_t *decoded0 = ctx->decoded0;
|
int32_t *decoded0 = ctx->decoded0;
|
||||||
int32_t *decoded1 = ctx->decoded1;
|
int32_t *decoded1 = ctx->decoded1;
|
||||||
|
|
||||||
@ -754,14 +784,11 @@ static void ape_unpack_mono(APEContext * ctx, int count)
|
|||||||
|
|
||||||
/* Pseudo-stereo - just copy left channel to right channel */
|
/* Pseudo-stereo - just copy left channel to right channel */
|
||||||
if (ctx->channels == 2) {
|
if (ctx->channels == 2) {
|
||||||
while (count--) {
|
memcpy(decoded1, decoded0, count * sizeof(*decoded1));
|
||||||
left = *decoded0;
|
|
||||||
*(decoded1++) = *(decoded0++) = left;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ape_unpack_stereo(APEContext * ctx, int count)
|
static void ape_unpack_stereo(APEContext *ctx, int count)
|
||||||
{
|
{
|
||||||
int32_t left, right;
|
int32_t left, right;
|
||||||
int32_t *decoded0 = ctx->decoded0;
|
int32_t *decoded0 = ctx->decoded0;
|
||||||
@ -789,7 +816,7 @@ static void ape_unpack_stereo(APEContext * ctx, int count)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ape_decode_frame(AVCodecContext * avctx,
|
static int ape_decode_frame(AVCodecContext *avctx,
|
||||||
void *data, int *data_size,
|
void *data, int *data_size,
|
||||||
AVPacket *avpkt)
|
AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
@ -797,49 +824,65 @@ static int ape_decode_frame(AVCodecContext * avctx,
|
|||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
APEContext *s = avctx->priv_data;
|
APEContext *s = avctx->priv_data;
|
||||||
int16_t *samples = data;
|
int16_t *samples = data;
|
||||||
int nblocks;
|
uint32_t nblocks;
|
||||||
int i, n;
|
int i;
|
||||||
int blockstodecode;
|
int blockstodecode;
|
||||||
int bytes_used;
|
int bytes_used;
|
||||||
|
|
||||||
if (buf_size == 0 && !s->samples) {
|
|
||||||
*data_size = 0;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* should not happen but who knows */
|
/* should not happen but who knows */
|
||||||
if (BLOCKS_PER_LOOP * 2 * avctx->channels > *data_size) {
|
if (BLOCKS_PER_LOOP * 2 * avctx->channels > *data_size) {
|
||||||
av_log (avctx, AV_LOG_ERROR, "Packet size is too big to be handled in lavc! (max is %d where you have %d)\n", *data_size, s->samples * 2 * avctx->channels);
|
av_log (avctx, AV_LOG_ERROR, "Output buffer is too small.\n");
|
||||||
return -1;
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* this should never be negative, but bad things will happen if it is, so
|
||||||
|
check it just to make sure. */
|
||||||
|
av_assert0(s->samples >= 0);
|
||||||
|
|
||||||
if(!s->samples){
|
if(!s->samples){
|
||||||
s->data = av_realloc(s->data, (buf_size + 3) & ~3);
|
uint32_t offset;
|
||||||
|
void *tmp_data;
|
||||||
|
|
||||||
|
if (buf_size < 8) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp_data = av_realloc(s->data, FFALIGN(buf_size, 4));
|
||||||
|
if (!tmp_data)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
s->data = tmp_data;
|
||||||
s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2);
|
s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2);
|
||||||
s->ptr = s->last_ptr = s->data;
|
s->ptr = s->last_ptr = s->data;
|
||||||
s->data_end = s->data + buf_size;
|
s->data_end = s->data + buf_size;
|
||||||
|
|
||||||
nblocks = s->samples = bytestream_get_be32(&s->ptr);
|
nblocks = bytestream_get_be32(&s->ptr);
|
||||||
n = bytestream_get_be32(&s->ptr);
|
offset = bytestream_get_be32(&s->ptr);
|
||||||
if(n < 0 || n > 3){
|
if (offset > 3) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
|
av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n");
|
||||||
s->data = NULL;
|
s->data = NULL;
|
||||||
return -1;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
s->ptr += n;
|
if (s->data_end - s->ptr < offset) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
s->ptr += offset;
|
||||||
|
|
||||||
s->currentframeblocks = nblocks;
|
if (!nblocks || nblocks > INT_MAX) {
|
||||||
buf += 4;
|
av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %u.\n", nblocks);
|
||||||
if (s->samples <= 0) {
|
return AVERROR_INVALIDDATA;
|
||||||
*data_size = 0;
|
|
||||||
return buf_size;
|
|
||||||
}
|
}
|
||||||
|
s->currentframeblocks = s->samples = nblocks;
|
||||||
|
|
||||||
memset(s->decoded0, 0, sizeof(s->decoded0));
|
memset(s->decoded0, 0, sizeof(s->decoded0));
|
||||||
memset(s->decoded1, 0, sizeof(s->decoded1));
|
memset(s->decoded1, 0, sizeof(s->decoded1));
|
||||||
|
|
||||||
/* Initialize the frame decoder */
|
/* Initialize the frame decoder */
|
||||||
init_frame_decoder(s);
|
if (init_frame_decoder(s) < 0) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!s->data) {
|
if (!s->data) {
|
||||||
@ -858,10 +901,10 @@ static int ape_decode_frame(AVCodecContext * avctx,
|
|||||||
ape_unpack_stereo(s, blockstodecode);
|
ape_unpack_stereo(s, blockstodecode);
|
||||||
emms_c();
|
emms_c();
|
||||||
|
|
||||||
if(s->error || s->ptr > s->data_end){
|
if (s->error) {
|
||||||
s->samples=0;
|
s->samples=0;
|
||||||
av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
|
av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n");
|
||||||
return -1;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < blockstodecode; i++) {
|
for (i = 0; i < blockstodecode; i++) {
|
||||||
|
@ -383,6 +383,8 @@ enum CodecID {
|
|||||||
|
|
||||||
CODEC_ID_MPEG2TS= 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
|
CODEC_ID_MPEG2TS= 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
|
||||||
* stream (only used by libavformat) */
|
* stream (only used by libavformat) */
|
||||||
|
CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
|
||||||
|
* stream (only used by libavformat) */
|
||||||
CODEC_ID_FFMETADATA=0x21000, ///< Dummy codec for streams containing only metadata information.
|
CODEC_ID_FFMETADATA=0x21000, ///< Dummy codec for streams containing only metadata information.
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -682,8 +684,10 @@ typedef struct RcOverride{
|
|||||||
* assume the buffer was allocated by avcodec_default_get_buffer.
|
* assume the buffer was allocated by avcodec_default_get_buffer.
|
||||||
*/
|
*/
|
||||||
#define CODEC_CAP_DR1 0x0002
|
#define CODEC_CAP_DR1 0x0002
|
||||||
|
#if FF_API_PARSE_FRAME
|
||||||
/* If 'parse_only' field is true, then avcodec_parse_frame() can be used. */
|
/* If 'parse_only' field is true, then avcodec_parse_frame() can be used. */
|
||||||
#define CODEC_CAP_PARSE_ONLY 0x0004
|
#define CODEC_CAP_PARSE_ONLY 0x0004
|
||||||
|
#endif
|
||||||
#define CODEC_CAP_TRUNCATED 0x0008
|
#define CODEC_CAP_TRUNCATED 0x0008
|
||||||
/* Codec can export data for HW decoding (XvMC). */
|
/* Codec can export data for HW decoding (XvMC). */
|
||||||
#define CODEC_CAP_HWACCEL 0x0010
|
#define CODEC_CAP_HWACCEL 0x0010
|
||||||
@ -1590,9 +1594,15 @@ typedef struct AVCodecContext {
|
|||||||
*/
|
*/
|
||||||
int block_align;
|
int block_align;
|
||||||
|
|
||||||
int parse_only; /* - decoding only: If true, only parsing is done
|
#if FF_API_PARSE_FRAME
|
||||||
(function avcodec_parse_frame()). The frame
|
/**
|
||||||
data is returned. Only MPEG codecs support this now. */
|
* If true, only parsing is done. The frame data is returned.
|
||||||
|
* Only MPEG audio decoders support this now.
|
||||||
|
* - encoding: unused
|
||||||
|
* - decoding: Set by user
|
||||||
|
*/
|
||||||
|
attribute_deprecated int parse_only;
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 0-> h263 quant 1-> mpeg quant
|
* 0-> h263 quant 1-> mpeg quant
|
||||||
@ -4047,10 +4057,6 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
|
|||||||
*/
|
*/
|
||||||
void avsubtitle_free(AVSubtitle *sub);
|
void avsubtitle_free(AVSubtitle *sub);
|
||||||
|
|
||||||
int avcodec_parse_frame(AVCodecContext *avctx, uint8_t **pdata,
|
|
||||||
int *data_size_ptr,
|
|
||||||
uint8_t *buf, int buf_size);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Encode an audio frame from samples into buf.
|
* Encode an audio frame from samples into buf.
|
||||||
*
|
*
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -30,7 +30,9 @@ AVCodec ff_mp1float_decoder = {
|
|||||||
.priv_data_size = sizeof(MPADecodeContext),
|
.priv_data_size = sizeof(MPADecodeContext),
|
||||||
.init = decode_init,
|
.init = decode_init,
|
||||||
.decode = decode_frame,
|
.decode = decode_frame,
|
||||||
|
#if FF_API_PARSE_FRAME
|
||||||
.capabilities = CODEC_CAP_PARSE_ONLY,
|
.capabilities = CODEC_CAP_PARSE_ONLY,
|
||||||
|
#endif
|
||||||
.flush = flush,
|
.flush = flush,
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
|
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
|
||||||
};
|
};
|
||||||
@ -43,7 +45,9 @@ AVCodec ff_mp2float_decoder = {
|
|||||||
.priv_data_size = sizeof(MPADecodeContext),
|
.priv_data_size = sizeof(MPADecodeContext),
|
||||||
.init = decode_init,
|
.init = decode_init,
|
||||||
.decode = decode_frame,
|
.decode = decode_frame,
|
||||||
|
#if FF_API_PARSE_FRAME
|
||||||
.capabilities = CODEC_CAP_PARSE_ONLY,
|
.capabilities = CODEC_CAP_PARSE_ONLY,
|
||||||
|
#endif
|
||||||
.flush = flush,
|
.flush = flush,
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
|
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
|
||||||
};
|
};
|
||||||
@ -56,7 +60,9 @@ AVCodec ff_mp3float_decoder = {
|
|||||||
.priv_data_size = sizeof(MPADecodeContext),
|
.priv_data_size = sizeof(MPADecodeContext),
|
||||||
.init = decode_init,
|
.init = decode_init,
|
||||||
.decode = decode_frame,
|
.decode = decode_frame,
|
||||||
|
#if FF_API_PARSE_FRAME
|
||||||
.capabilities = CODEC_CAP_PARSE_ONLY,
|
.capabilities = CODEC_CAP_PARSE_ONLY,
|
||||||
|
#endif
|
||||||
.flush = flush,
|
.flush = flush,
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
|
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
|
||||||
};
|
};
|
||||||
@ -69,7 +75,9 @@ AVCodec ff_mp3adufloat_decoder = {
|
|||||||
.priv_data_size = sizeof(MPADecodeContext),
|
.priv_data_size = sizeof(MPADecodeContext),
|
||||||
.init = decode_init,
|
.init = decode_init,
|
||||||
.decode = decode_frame_adu,
|
.decode = decode_frame_adu,
|
||||||
|
#if FF_API_PARSE_FRAME
|
||||||
.capabilities = CODEC_CAP_PARSE_ONLY,
|
.capabilities = CODEC_CAP_PARSE_ONLY,
|
||||||
|
#endif
|
||||||
.flush = flush,
|
.flush = flush,
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
|
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
|
||||||
};
|
};
|
||||||
|
@ -48,7 +48,7 @@
|
|||||||
typedef struct NellyMoserDecodeContext {
|
typedef struct NellyMoserDecodeContext {
|
||||||
AVCodecContext* avctx;
|
AVCodecContext* avctx;
|
||||||
float *float_buf;
|
float *float_buf;
|
||||||
float state[NELLY_BUF_LEN];
|
DECLARE_ALIGNED(16, float, state)[NELLY_BUF_LEN];
|
||||||
AVLFG random_state;
|
AVLFG random_state;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
float scale_bias;
|
float scale_bias;
|
||||||
@ -58,23 +58,6 @@ typedef struct NellyMoserDecodeContext {
|
|||||||
DECLARE_ALIGNED(32, float, imdct_out)[NELLY_BUF_LEN * 2];
|
DECLARE_ALIGNED(32, float, imdct_out)[NELLY_BUF_LEN * 2];
|
||||||
} NellyMoserDecodeContext;
|
} NellyMoserDecodeContext;
|
||||||
|
|
||||||
static void overlap_and_window(NellyMoserDecodeContext *s, float *state, float *audio, float *a_in)
|
|
||||||
{
|
|
||||||
int bot, top;
|
|
||||||
|
|
||||||
bot = 0;
|
|
||||||
top = NELLY_BUF_LEN-1;
|
|
||||||
|
|
||||||
while (bot < NELLY_BUF_LEN) {
|
|
||||||
audio[bot] = a_in [bot]*ff_sine_128[bot]
|
|
||||||
+state[bot]*ff_sine_128[top];
|
|
||||||
|
|
||||||
bot++;
|
|
||||||
top--;
|
|
||||||
}
|
|
||||||
memcpy(state, a_in + NELLY_BUF_LEN, sizeof(float)*NELLY_BUF_LEN);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void nelly_decode_block(NellyMoserDecodeContext *s,
|
static void nelly_decode_block(NellyMoserDecodeContext *s,
|
||||||
const unsigned char block[NELLY_BLOCK_LEN],
|
const unsigned char block[NELLY_BLOCK_LEN],
|
||||||
float audio[NELLY_SAMPLES])
|
float audio[NELLY_SAMPLES])
|
||||||
@ -125,7 +108,9 @@ static void nelly_decode_block(NellyMoserDecodeContext *s,
|
|||||||
s->imdct_ctx.imdct_calc(&s->imdct_ctx, s->imdct_out, aptr);
|
s->imdct_ctx.imdct_calc(&s->imdct_ctx, s->imdct_out, aptr);
|
||||||
/* XXX: overlapping and windowing should be part of a more
|
/* XXX: overlapping and windowing should be part of a more
|
||||||
generic imdct function */
|
generic imdct function */
|
||||||
overlap_and_window(s, s->state, aptr, s->imdct_out);
|
s->dsp.vector_fmul_reverse(s->state, s->state, ff_sine_128, NELLY_BUF_LEN);
|
||||||
|
s->dsp.vector_fmul_add(aptr, s->imdct_out, ff_sine_128, s->state, NELLY_BUF_LEN);
|
||||||
|
memcpy(s->state, s->imdct_out + NELLY_BUF_LEN, sizeof(float)*NELLY_BUF_LEN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,20 +157,21 @@ static int decode_tag(AVCodecContext * avctx,
|
|||||||
float *samples_flt = data;
|
float *samples_flt = data;
|
||||||
*data_size = 0;
|
*data_size = 0;
|
||||||
|
|
||||||
if (buf_size < avctx->block_align) {
|
|
||||||
return buf_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (buf_size % NELLY_BLOCK_LEN) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Tag size %d.\n", buf_size);
|
|
||||||
return buf_size;
|
|
||||||
}
|
|
||||||
block_size = NELLY_SAMPLES * av_get_bytes_per_sample(avctx->sample_fmt);
|
block_size = NELLY_SAMPLES * av_get_bytes_per_sample(avctx->sample_fmt);
|
||||||
blocks = FFMIN(buf_size / NELLY_BLOCK_LEN, data_max / block_size);
|
blocks = buf_size / NELLY_BLOCK_LEN;
|
||||||
|
|
||||||
if (blocks <= 0) {
|
if (blocks <= 0) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
if (data_max < blocks * block_size) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
if (buf_size % NELLY_BLOCK_LEN) {
|
||||||
|
av_log(avctx, AV_LOG_WARNING, "Leftover bytes: %d.\n",
|
||||||
|
buf_size % NELLY_BLOCK_LEN);
|
||||||
|
}
|
||||||
/* Normal numbers of blocks for sample rates:
|
/* Normal numbers of blocks for sample rates:
|
||||||
* 8000 Hz - 1
|
* 8000 Hz - 1
|
||||||
* 11025 Hz - 2
|
* 11025 Hz - 2
|
||||||
|
@ -146,7 +146,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
avctx->frame_size = NELLY_SAMPLES;
|
avctx->frame_size = NELLY_SAMPLES;
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
ff_mdct_init(&s->mdct_ctx, 8, 0, 1.0);
|
ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0);
|
||||||
dsputil_init(&s->dsp, avctx);
|
dsputil_init(&s->dsp, avctx);
|
||||||
|
|
||||||
/* Generate overlap window */
|
/* Generate overlap window */
|
||||||
@ -352,17 +352,15 @@ static void encode_block(NellyMoserEncodeContext *s, unsigned char *output, int
|
|||||||
static int encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, void *data)
|
static int encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size, void *data)
|
||||||
{
|
{
|
||||||
NellyMoserEncodeContext *s = avctx->priv_data;
|
NellyMoserEncodeContext *s = avctx->priv_data;
|
||||||
const int16_t *samples = data;
|
const float *samples = data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (s->last_frame)
|
if (s->last_frame)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (data) {
|
if (data) {
|
||||||
for (i = 0; i < avctx->frame_size; i++) {
|
memcpy(s->buf[s->bufsel], samples, avctx->frame_size * sizeof(*samples));
|
||||||
s->buf[s->bufsel][i] = samples[i];
|
for (i = avctx->frame_size; i < NELLY_SAMPLES; i++) {
|
||||||
}
|
|
||||||
for (; i < NELLY_SAMPLES; i++) {
|
|
||||||
s->buf[s->bufsel][i] = 0;
|
s->buf[s->bufsel][i] = 0;
|
||||||
}
|
}
|
||||||
s->bufsel = 1 - s->bufsel;
|
s->bufsel = 1 - s->bufsel;
|
||||||
@ -393,5 +391,5 @@ AVCodec ff_nellymoser_encoder = {
|
|||||||
.close = encode_end,
|
.close = encode_end,
|
||||||
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
|
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"),
|
.long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"),
|
||||||
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
|
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
|
||||||
};
|
};
|
||||||
|
@ -1665,7 +1665,7 @@ static int frame_start(SnowContext *s){
|
|||||||
int w= s->avctx->width; //FIXME round up to x16 ?
|
int w= s->avctx->width; //FIXME round up to x16 ?
|
||||||
int h= s->avctx->height;
|
int h= s->avctx->height;
|
||||||
|
|
||||||
if(s->current_picture.data[0] && !(s->avctx->flags&CODEC_FLAG_EMU_EDGE)){
|
if (s->current_picture.data[0] && !(s->avctx->flags&CODEC_FLAG_EMU_EDGE)) {
|
||||||
s->dsp.draw_edges(s->current_picture.data[0],
|
s->dsp.draw_edges(s->current_picture.data[0],
|
||||||
s->current_picture.linesize[0], w , h ,
|
s->current_picture.linesize[0], w , h ,
|
||||||
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
|
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
#define AVCODEC_VERSION_H
|
#define AVCODEC_VERSION_H
|
||||||
|
|
||||||
#define LIBAVCODEC_VERSION_MAJOR 53
|
#define LIBAVCODEC_VERSION_MAJOR 53
|
||||||
#define LIBAVCODEC_VERSION_MINOR 23
|
#define LIBAVCODEC_VERSION_MINOR 24
|
||||||
#define LIBAVCODEC_VERSION_MICRO 0
|
#define LIBAVCODEC_VERSION_MICRO 0
|
||||||
|
|
||||||
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
|
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
|
||||||
@ -101,5 +101,8 @@
|
|||||||
#ifndef FF_API_GET_ALPHA_INFO
|
#ifndef FF_API_GET_ALPHA_INFO
|
||||||
#define FF_API_GET_ALPHA_INFO (LIBAVCODEC_VERSION_MAJOR < 54)
|
#define FF_API_GET_ALPHA_INFO (LIBAVCODEC_VERSION_MAJOR < 54)
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef FF_API_PARSE_FRAME
|
||||||
|
#define FF_API_PARSE_FRAME (LIBAVCODEC_VERSION_MAJOR < 54)
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* AVCODEC_VERSION_H */
|
#endif /* AVCODEC_VERSION_H */
|
||||||
|
@ -816,7 +816,7 @@ static int wma_decode_superframe(AVCodecContext *avctx,
|
|||||||
const uint8_t *buf = avpkt->data;
|
const uint8_t *buf = avpkt->data;
|
||||||
int buf_size = avpkt->size;
|
int buf_size = avpkt->size;
|
||||||
WMACodecContext *s = avctx->priv_data;
|
WMACodecContext *s = avctx->priv_data;
|
||||||
int nb_frames, bit_offset, i, pos, len;
|
int nb_frames, bit_offset, i, pos, len, out_size;
|
||||||
uint8_t *q;
|
uint8_t *q;
|
||||||
int16_t *samples;
|
int16_t *samples;
|
||||||
|
|
||||||
@ -838,13 +838,19 @@ static int wma_decode_superframe(AVCodecContext *avctx,
|
|||||||
if (s->use_bit_reservoir) {
|
if (s->use_bit_reservoir) {
|
||||||
/* read super frame header */
|
/* read super frame header */
|
||||||
skip_bits(&s->gb, 4); /* super frame index */
|
skip_bits(&s->gb, 4); /* super frame index */
|
||||||
nb_frames = get_bits(&s->gb, 4) - 1;
|
nb_frames = get_bits(&s->gb, 4) - (s->last_superframe_len <= 0);
|
||||||
|
} else {
|
||||||
|
nb_frames = 1;
|
||||||
|
}
|
||||||
|
|
||||||
if((nb_frames+1) * s->nb_channels * s->frame_len * sizeof(int16_t) > *data_size){
|
out_size = nb_frames * s->frame_len * s->nb_channels *
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n");
|
av_get_bytes_per_sample(avctx->sample_fmt);
|
||||||
goto fail;
|
if (*data_size < out_size) {
|
||||||
}
|
av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n");
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (s->use_bit_reservoir) {
|
||||||
bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3);
|
bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3);
|
||||||
|
|
||||||
if (s->last_superframe_len > 0) {
|
if (s->last_superframe_len > 0) {
|
||||||
@ -873,6 +879,7 @@ static int wma_decode_superframe(AVCodecContext *avctx,
|
|||||||
if (wma_decode_frame(s, samples) < 0)
|
if (wma_decode_frame(s, samples) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
samples += s->nb_channels * s->frame_len;
|
samples += s->nb_channels * s->frame_len;
|
||||||
|
nb_frames--;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* read each frame starting from bit_offset */
|
/* read each frame starting from bit_offset */
|
||||||
@ -901,10 +908,6 @@ static int wma_decode_superframe(AVCodecContext *avctx,
|
|||||||
s->last_superframe_len = len;
|
s->last_superframe_len = len;
|
||||||
memcpy(s->last_superframe, buf + pos, len);
|
memcpy(s->last_superframe, buf + pos, len);
|
||||||
} else {
|
} else {
|
||||||
if(s->nb_channels * s->frame_len * sizeof(int16_t) > *data_size){
|
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n");
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
/* single frame decode */
|
/* single frame decode */
|
||||||
if (wma_decode_frame(s, samples) < 0)
|
if (wma_decode_frame(s, samples) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
@ -912,7 +915,7 @@ static int wma_decode_superframe(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
//av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len, (int8_t *)samples - (int8_t *)data, s->block_align);
|
//av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len, (int8_t *)samples - (int8_t *)data, s->block_align);
|
||||||
*data_size = (int8_t *)samples - (int8_t *)data;
|
*data_size = out_size;
|
||||||
return buf_size;
|
return buf_size;
|
||||||
fail:
|
fail:
|
||||||
/* when error, we reset the bit reservoir */
|
/* when error, we reset the bit reservoir */
|
||||||
|
@ -86,12 +86,14 @@
|
|||||||
* subframe in order to reconstruct the output samples.
|
* subframe in order to reconstruct the output samples.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "libavutil/intreadwrite.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
#include "put_bits.h"
|
#include "put_bits.h"
|
||||||
#include "wmaprodata.h"
|
#include "wmaprodata.h"
|
||||||
#include "dsputil.h"
|
#include "dsputil.h"
|
||||||
|
#include "fmtconvert.h"
|
||||||
#include "sinewin.h"
|
#include "sinewin.h"
|
||||||
#include "wma.h"
|
#include "wma.h"
|
||||||
|
|
||||||
@ -166,6 +168,7 @@ typedef struct WMAProDecodeCtx {
|
|||||||
/* generic decoder variables */
|
/* generic decoder variables */
|
||||||
AVCodecContext* avctx; ///< codec context for av_log
|
AVCodecContext* avctx; ///< codec context for av_log
|
||||||
DSPContext dsp; ///< accelerated DSP functions
|
DSPContext dsp; ///< accelerated DSP functions
|
||||||
|
FmtConvertContext fmt_conv;
|
||||||
uint8_t frame_data[MAX_FRAMESIZE +
|
uint8_t frame_data[MAX_FRAMESIZE +
|
||||||
FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
|
FF_INPUT_BUFFER_PADDING_SIZE];///< compressed frame data
|
||||||
PutBitContext pb; ///< context for filling the frame_data buffer
|
PutBitContext pb; ///< context for filling the frame_data buffer
|
||||||
@ -279,6 +282,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
dsputil_init(&s->dsp, avctx);
|
dsputil_init(&s->dsp, avctx);
|
||||||
|
ff_fmt_convert_init(&s->fmt_conv, avctx);
|
||||||
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
|
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
|
||||||
|
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
|
||||||
@ -767,7 +771,7 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c)
|
|||||||
/* Integers 0..15 as single-precision floats. The table saves a
|
/* Integers 0..15 as single-precision floats. The table saves a
|
||||||
costly int to float conversion, and storing the values as
|
costly int to float conversion, and storing the values as
|
||||||
integers allows fast sign-flipping. */
|
integers allows fast sign-flipping. */
|
||||||
static const int fval_tab[16] = {
|
static const uint32_t fval_tab[16] = {
|
||||||
0x00000000, 0x3f800000, 0x40000000, 0x40400000,
|
0x00000000, 0x3f800000, 0x40000000, 0x40400000,
|
||||||
0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
|
0x40800000, 0x40a00000, 0x40c00000, 0x40e00000,
|
||||||
0x41000000, 0x41100000, 0x41200000, 0x41300000,
|
0x41000000, 0x41100000, 0x41200000, 0x41300000,
|
||||||
@ -799,7 +803,7 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c)
|
|||||||
4 vector coded large values) */
|
4 vector coded large values) */
|
||||||
while ((s->transmit_num_vec_coeffs || !rl_mode) &&
|
while ((s->transmit_num_vec_coeffs || !rl_mode) &&
|
||||||
(cur_coeff + 3 < ci->num_vec_coeffs)) {
|
(cur_coeff + 3 < ci->num_vec_coeffs)) {
|
||||||
int vals[4];
|
uint32_t vals[4];
|
||||||
int i;
|
int i;
|
||||||
unsigned int idx;
|
unsigned int idx;
|
||||||
|
|
||||||
@ -809,15 +813,15 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c)
|
|||||||
for (i = 0; i < 4; i += 2) {
|
for (i = 0; i < 4; i += 2) {
|
||||||
idx = get_vlc2(&s->gb, vec2_vlc.table, VLCBITS, VEC2MAXDEPTH);
|
idx = get_vlc2(&s->gb, vec2_vlc.table, VLCBITS, VEC2MAXDEPTH);
|
||||||
if (idx == HUFF_VEC2_SIZE - 1) {
|
if (idx == HUFF_VEC2_SIZE - 1) {
|
||||||
int v0, v1;
|
uint32_t v0, v1;
|
||||||
v0 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
|
v0 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
|
||||||
if (v0 == HUFF_VEC1_SIZE - 1)
|
if (v0 == HUFF_VEC1_SIZE - 1)
|
||||||
v0 += ff_wma_get_large_val(&s->gb);
|
v0 += ff_wma_get_large_val(&s->gb);
|
||||||
v1 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
|
v1 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH);
|
||||||
if (v1 == HUFF_VEC1_SIZE - 1)
|
if (v1 == HUFF_VEC1_SIZE - 1)
|
||||||
v1 += ff_wma_get_large_val(&s->gb);
|
v1 += ff_wma_get_large_val(&s->gb);
|
||||||
((float*)vals)[i ] = v0;
|
vals[i ] = ((av_alias32){ .f32 = v0 }).u32;
|
||||||
((float*)vals)[i+1] = v1;
|
vals[i+1] = ((av_alias32){ .f32 = v1 }).u32;
|
||||||
} else {
|
} else {
|
||||||
vals[i] = fval_tab[symbol_to_vec2[idx] >> 4 ];
|
vals[i] = fval_tab[symbol_to_vec2[idx] >> 4 ];
|
||||||
vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF];
|
vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF];
|
||||||
@ -833,8 +837,8 @@ static int decode_coeffs(WMAProDecodeCtx *s, int c)
|
|||||||
/** decode sign */
|
/** decode sign */
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
if (vals[i]) {
|
if (vals[i]) {
|
||||||
int sign = get_bits1(&s->gb) - 1;
|
uint32_t sign = get_bits1(&s->gb) - 1;
|
||||||
*(uint32_t*)&ci->coeffs[cur_coeff] = vals[i] ^ sign<<31;
|
AV_WN32A(&ci->coeffs[cur_coeff], vals[i] ^ sign << 31);
|
||||||
num_zeros = 0;
|
num_zeros = 0;
|
||||||
} else {
|
} else {
|
||||||
ci->coeffs[cur_coeff] = 0;
|
ci->coeffs[cur_coeff] = 0;
|
||||||
@ -1281,6 +1285,7 @@ static int decode_frame(WMAProDecodeCtx *s)
|
|||||||
int more_frames = 0;
|
int more_frames = 0;
|
||||||
int len = 0;
|
int len = 0;
|
||||||
int i;
|
int i;
|
||||||
|
const float *out_ptr[WMAPRO_MAX_CHANNELS];
|
||||||
|
|
||||||
/** check for potential output buffer overflow */
|
/** check for potential output buffer overflow */
|
||||||
if (s->num_channels * s->samples_per_frame > s->samples_end - s->samples) {
|
if (s->num_channels * s->samples_per_frame > s->samples_end - s->samples) {
|
||||||
@ -1356,18 +1361,12 @@ static int decode_frame(WMAProDecodeCtx *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** interleave samples and write them to the output buffer */
|
/** interleave samples and write them to the output buffer */
|
||||||
|
for (i = 0; i < s->num_channels; i++)
|
||||||
|
out_ptr[i] = s->channel[i].out;
|
||||||
|
s->fmt_conv.float_interleave(s->samples, out_ptr, s->samples_per_frame,
|
||||||
|
s->num_channels);
|
||||||
|
|
||||||
for (i = 0; i < s->num_channels; i++) {
|
for (i = 0; i < s->num_channels; i++) {
|
||||||
float* ptr = s->samples + i;
|
|
||||||
int incr = s->num_channels;
|
|
||||||
float* iptr = s->channel[i].out;
|
|
||||||
float* iend = iptr + s->samples_per_frame;
|
|
||||||
|
|
||||||
// FIXME should create/use a DSP function here
|
|
||||||
while (iptr < iend) {
|
|
||||||
*ptr = *iptr++;
|
|
||||||
ptr += incr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** reuse second half of the IMDCT output for the next frame */
|
/** reuse second half of the IMDCT output for the next frame */
|
||||||
memcpy(&s->channel[i].out[0],
|
memcpy(&s->channel[i].out[0],
|
||||||
&s->channel[i].out[s->samples_per_frame],
|
&s->channel[i].out[s->samples_per_frame],
|
||||||
|
@ -1730,7 +1730,7 @@ static int synth_superframe(AVCodecContext *ctx,
|
|||||||
{
|
{
|
||||||
WMAVoiceContext *s = ctx->priv_data;
|
WMAVoiceContext *s = ctx->priv_data;
|
||||||
GetBitContext *gb = &s->gb, s_gb;
|
GetBitContext *gb = &s->gb, s_gb;
|
||||||
int n, res, n_samples = 480;
|
int n, res, out_size, n_samples = 480;
|
||||||
double lsps[MAX_FRAMES][MAX_LSPS];
|
double lsps[MAX_FRAMES][MAX_LSPS];
|
||||||
const double *mean_lsf = s->lsps == 16 ?
|
const double *mean_lsf = s->lsps == 16 ?
|
||||||
wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode];
|
wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode];
|
||||||
@ -1748,7 +1748,10 @@ static int synth_superframe(AVCodecContext *ctx,
|
|||||||
s->sframe_cache_size = 0;
|
s->sframe_cache_size = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((res = check_bits_for_superframe(gb, s)) == 1) return 1;
|
if ((res = check_bits_for_superframe(gb, s)) == 1) {
|
||||||
|
*data_size = 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* First bit is speech/music bit, it differentiates between WMAVoice
|
/* First bit is speech/music bit, it differentiates between WMAVoice
|
||||||
* speech samples (the actual codec) and WMAVoice music samples, which
|
* speech samples (the actual codec) and WMAVoice music samples, which
|
||||||
@ -1789,6 +1792,14 @@ static int synth_superframe(AVCodecContext *ctx,
|
|||||||
stabilize_lsps(lsps[n], s->lsps);
|
stabilize_lsps(lsps[n], s->lsps);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_size = n_samples * av_get_bytes_per_sample(ctx->sample_fmt);
|
||||||
|
if (*data_size < out_size) {
|
||||||
|
av_log(ctx, AV_LOG_ERROR,
|
||||||
|
"Output buffer too small (%d given - %zu needed)\n",
|
||||||
|
*data_size, out_size);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
/* Parse frames, optionally preceeded by per-frame (independent) LSPs. */
|
/* Parse frames, optionally preceeded by per-frame (independent) LSPs. */
|
||||||
for (n = 0; n < 3; n++) {
|
for (n = 0; n < 3; n++) {
|
||||||
if (!s->has_residual_lsps) {
|
if (!s->has_residual_lsps) {
|
||||||
@ -1808,8 +1819,10 @@ static int synth_superframe(AVCodecContext *ctx,
|
|||||||
&samples[n * MAX_FRAMESIZE],
|
&samples[n * MAX_FRAMESIZE],
|
||||||
lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1],
|
lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1],
|
||||||
&excitation[s->history_nsamples + n * MAX_FRAMESIZE],
|
&excitation[s->history_nsamples + n * MAX_FRAMESIZE],
|
||||||
&synth[s->lsps + n * MAX_FRAMESIZE])))
|
&synth[s->lsps + n * MAX_FRAMESIZE]))) {
|
||||||
|
*data_size = 0;
|
||||||
return res;
|
return res;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Statistics? FIXME - we don't check for length, a slight overrun
|
/* Statistics? FIXME - we don't check for length, a slight overrun
|
||||||
@ -1821,7 +1834,7 @@ static int synth_superframe(AVCodecContext *ctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Specify nr. of output samples */
|
/* Specify nr. of output samples */
|
||||||
*data_size = n_samples * sizeof(float);
|
*data_size = out_size;
|
||||||
|
|
||||||
/* Update history */
|
/* Update history */
|
||||||
memcpy(s->prev_lsps, lsps[2],
|
memcpy(s->prev_lsps, lsps[2],
|
||||||
@ -1915,22 +1928,16 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
|
|||||||
GetBitContext *gb = &s->gb;
|
GetBitContext *gb = &s->gb;
|
||||||
int size, res, pos;
|
int size, res, pos;
|
||||||
|
|
||||||
if (*data_size < 480 * sizeof(float)) {
|
|
||||||
av_log(ctx, AV_LOG_ERROR,
|
|
||||||
"Output buffer too small (%d given - %zu needed)\n",
|
|
||||||
*data_size, 480 * sizeof(float));
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
*data_size = 0;
|
|
||||||
|
|
||||||
/* Packets are sometimes a multiple of ctx->block_align, with a packet
|
/* Packets are sometimes a multiple of ctx->block_align, with a packet
|
||||||
* header at each ctx->block_align bytes. However, FFmpeg's ASF demuxer
|
* header at each ctx->block_align bytes. However, FFmpeg's ASF demuxer
|
||||||
* feeds us ASF packets, which may concatenate multiple "codec" packets
|
* feeds us ASF packets, which may concatenate multiple "codec" packets
|
||||||
* in a single "muxer" packet, so we artificially emulate that by
|
* in a single "muxer" packet, so we artificially emulate that by
|
||||||
* capping the packet size at ctx->block_align. */
|
* capping the packet size at ctx->block_align. */
|
||||||
for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align);
|
for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align);
|
||||||
if (!size)
|
if (!size) {
|
||||||
|
*data_size = 0;
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
init_get_bits(&s->gb, avpkt->data, size << 3);
|
init_get_bits(&s->gb, avpkt->data, size << 3);
|
||||||
|
|
||||||
/* size == ctx->block_align is used to indicate whether we are dealing with
|
/* size == ctx->block_align is used to indicate whether we are dealing with
|
||||||
|
@ -1297,7 +1297,7 @@ static int asf_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* no index or seeking by index failed */
|
/* no index or seeking by index failed */
|
||||||
if(av_seek_frame_binary(s, stream_index, pts, flags)<0)
|
if (ff_seek_frame_binary(s, stream_index, pts, flags) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
asf_reset_header(s);
|
asf_reset_header(s);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -523,8 +523,10 @@ typedef struct AVStream {
|
|||||||
AVRational r_frame_rate;
|
AVRational r_frame_rate;
|
||||||
void *priv_data;
|
void *priv_data;
|
||||||
|
|
||||||
|
#if FF_API_REORDER_PRIVATE
|
||||||
/* internal data used in av_find_stream_info() */
|
/* internal data used in av_find_stream_info() */
|
||||||
int64_t first_dts;
|
int64_t first_dts;
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* encoding: pts generation when outputting stream
|
* encoding: pts generation when outputting stream
|
||||||
@ -539,7 +541,9 @@ typedef struct AVStream {
|
|||||||
* encoding: set by libavformat in av_write_header
|
* encoding: set by libavformat in av_write_header
|
||||||
*/
|
*/
|
||||||
AVRational time_base;
|
AVRational time_base;
|
||||||
|
#if FF_API_REORDER_PRIVATE
|
||||||
int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
|
int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
|
||||||
|
#endif
|
||||||
#if FF_API_STREAM_COPY
|
#if FF_API_STREAM_COPY
|
||||||
/* ffmpeg.c private use */
|
/* ffmpeg.c private use */
|
||||||
attribute_deprecated int stream_copy; /**< If set, just copy stream. */
|
attribute_deprecated int stream_copy; /**< If set, just copy stream. */
|
||||||
@ -572,6 +576,7 @@ typedef struct AVStream {
|
|||||||
*/
|
*/
|
||||||
int64_t duration;
|
int64_t duration;
|
||||||
|
|
||||||
|
#if FF_API_REORDER_PRIVATE
|
||||||
/* av_read_frame() support */
|
/* av_read_frame() support */
|
||||||
enum AVStreamParseType need_parsing;
|
enum AVStreamParseType need_parsing;
|
||||||
struct AVCodecParserContext *parser;
|
struct AVCodecParserContext *parser;
|
||||||
@ -584,14 +589,17 @@ typedef struct AVStream {
|
|||||||
support seeking natively. */
|
support seeking natively. */
|
||||||
int nb_index_entries;
|
int nb_index_entries;
|
||||||
unsigned int index_entries_allocated_size;
|
unsigned int index_entries_allocated_size;
|
||||||
|
#endif
|
||||||
|
|
||||||
int64_t nb_frames; ///< number of frames in this stream if known or 0
|
int64_t nb_frames; ///< number of frames in this stream if known or 0
|
||||||
|
|
||||||
int disposition; /**< AV_DISPOSITION_* bit field */
|
int disposition; /**< AV_DISPOSITION_* bit field */
|
||||||
|
|
||||||
|
#if FF_API_REORDER_PRIVATE
|
||||||
AVProbeData probe_data;
|
AVProbeData probe_data;
|
||||||
#define MAX_REORDER_DELAY 16
|
#define MAX_REORDER_DELAY 16
|
||||||
int64_t pts_buffer[MAX_REORDER_DELAY+1];
|
int64_t pts_buffer[MAX_REORDER_DELAY+1];
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sample aspect ratio (0 if unknown)
|
* sample aspect ratio (0 if unknown)
|
||||||
@ -602,6 +610,7 @@ typedef struct AVStream {
|
|||||||
|
|
||||||
AVDictionary *metadata;
|
AVDictionary *metadata;
|
||||||
|
|
||||||
|
#if FF_API_REORDER_PRIVATE
|
||||||
/* Intended mostly for av_read_frame() support. Not supposed to be used by */
|
/* Intended mostly for av_read_frame() support. Not supposed to be used by */
|
||||||
/* external applications; try to use something else if at all possible. */
|
/* external applications; try to use something else if at all possible. */
|
||||||
const uint8_t *cur_ptr;
|
const uint8_t *cur_ptr;
|
||||||
@ -630,12 +639,21 @@ typedef struct AVStream {
|
|||||||
* used internally, NOT PART OF PUBLIC API, dont read or write from outside of libav*
|
* used internally, NOT PART OF PUBLIC API, dont read or write from outside of libav*
|
||||||
*/
|
*/
|
||||||
struct AVPacketList *last_in_packet_buffer;
|
struct AVPacketList *last_in_packet_buffer;
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Average framerate
|
* Average framerate
|
||||||
*/
|
*/
|
||||||
AVRational avg_frame_rate;
|
AVRational avg_frame_rate;
|
||||||
|
|
||||||
|
/*****************************************************************
|
||||||
|
* All fields below this line are not part of the public API. They
|
||||||
|
* may not be used outside of libavformat and can be changed and
|
||||||
|
* removed at will.
|
||||||
|
* New public fields should be added right above.
|
||||||
|
*****************************************************************
|
||||||
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Number of frames that have been demuxed during av_find_stream_info()
|
* Number of frames that have been demuxed during av_find_stream_info()
|
||||||
*/
|
*/
|
||||||
@ -665,6 +683,49 @@ typedef struct AVStream {
|
|||||||
* NOT PART OF PUBLIC API
|
* NOT PART OF PUBLIC API
|
||||||
*/
|
*/
|
||||||
int request_probe;
|
int request_probe;
|
||||||
|
#if !FF_API_REORDER_PRIVATE
|
||||||
|
const uint8_t *cur_ptr;
|
||||||
|
int cur_len;
|
||||||
|
AVPacket cur_pkt;
|
||||||
|
|
||||||
|
// Timestamp generation support:
|
||||||
|
/**
|
||||||
|
* Timestamp corresponding to the last dts sync point.
|
||||||
|
*
|
||||||
|
* Initialized when AVCodecParserContext.dts_sync_point >= 0 and
|
||||||
|
* a DTS is received from the underlying container. Otherwise set to
|
||||||
|
* AV_NOPTS_VALUE by default.
|
||||||
|
*/
|
||||||
|
int64_t reference_dts;
|
||||||
|
int64_t first_dts;
|
||||||
|
int64_t cur_dts;
|
||||||
|
int last_IP_duration;
|
||||||
|
int64_t last_IP_pts;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Number of packets to buffer for codec probing
|
||||||
|
*/
|
||||||
|
#define MAX_PROBE_PACKETS 2500
|
||||||
|
int probe_packets;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* last packet in packet_buffer for this stream when muxing.
|
||||||
|
*/
|
||||||
|
struct AVPacketList *last_in_packet_buffer;
|
||||||
|
AVProbeData probe_data;
|
||||||
|
#define MAX_REORDER_DELAY 16
|
||||||
|
int64_t pts_buffer[MAX_REORDER_DELAY+1];
|
||||||
|
/* av_read_frame() support */
|
||||||
|
enum AVStreamParseType need_parsing;
|
||||||
|
struct AVCodecParserContext *parser;
|
||||||
|
|
||||||
|
AVIndexEntry *index_entries; /**< Only used if the format does not
|
||||||
|
support seeking natively. */
|
||||||
|
int nb_index_entries;
|
||||||
|
unsigned int index_entries_allocated_size;
|
||||||
|
|
||||||
|
int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
|
||||||
|
#endif
|
||||||
} AVStream;
|
} AVStream;
|
||||||
|
|
||||||
#define AV_PROGRAM_RUNNING 1
|
#define AV_PROGRAM_RUNNING 1
|
||||||
@ -724,6 +785,7 @@ typedef struct AVFormatContext {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */
|
int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */
|
||||||
|
#if FF_API_REORDER_PRIVATE
|
||||||
/* private data for pts handling (do not modify directly). */
|
/* private data for pts handling (do not modify directly). */
|
||||||
/**
|
/**
|
||||||
* This buffer is only needed when packets were already buffered but
|
* This buffer is only needed when packets were already buffered but
|
||||||
@ -731,6 +793,7 @@ typedef struct AVFormatContext {
|
|||||||
* streams.
|
* streams.
|
||||||
*/
|
*/
|
||||||
struct AVPacketList *packet_buffer;
|
struct AVPacketList *packet_buffer;
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decoding: position of the first frame of the component, in
|
* Decoding: position of the first frame of the component, in
|
||||||
@ -761,11 +824,13 @@ typedef struct AVFormatContext {
|
|||||||
*/
|
*/
|
||||||
int bit_rate;
|
int bit_rate;
|
||||||
|
|
||||||
|
#if FF_API_REORDER_PRIVATE
|
||||||
/* av_read_frame() support */
|
/* av_read_frame() support */
|
||||||
AVStream *cur_st;
|
AVStream *cur_st;
|
||||||
|
|
||||||
/* av_seek_frame() support */
|
/* av_seek_frame() support */
|
||||||
int64_t data_offset; /**< offset of the first packet */
|
int64_t data_offset; /**< offset of the first packet */
|
||||||
|
#endif
|
||||||
|
|
||||||
#if FF_API_MUXRATE
|
#if FF_API_MUXRATE
|
||||||
/**
|
/**
|
||||||
@ -876,6 +941,7 @@ typedef struct AVFormatContext {
|
|||||||
int debug;
|
int debug;
|
||||||
#define FF_FDEBUG_TS 0x0001
|
#define FF_FDEBUG_TS 0x0001
|
||||||
|
|
||||||
|
#if FF_API_REORDER_PRIVATE
|
||||||
/**
|
/**
|
||||||
* Raw packets from the demuxer, prior to parsing and decoding.
|
* Raw packets from the demuxer, prior to parsing and decoding.
|
||||||
* This buffer is used for buffering packets until the codec can
|
* This buffer is used for buffering packets until the codec can
|
||||||
@ -886,15 +952,18 @@ typedef struct AVFormatContext {
|
|||||||
struct AVPacketList *raw_packet_buffer_end;
|
struct AVPacketList *raw_packet_buffer_end;
|
||||||
|
|
||||||
struct AVPacketList *packet_buffer_end;
|
struct AVPacketList *packet_buffer_end;
|
||||||
|
#endif
|
||||||
|
|
||||||
AVDictionary *metadata;
|
AVDictionary *metadata;
|
||||||
|
|
||||||
|
#if FF_API_REORDER_PRIVATE
|
||||||
/**
|
/**
|
||||||
* Remaining size available for raw_packet_buffer, in bytes.
|
* Remaining size available for raw_packet_buffer, in bytes.
|
||||||
* NOT PART OF PUBLIC API
|
* NOT PART OF PUBLIC API
|
||||||
*/
|
*/
|
||||||
#define RAW_PACKET_BUFFER_SIZE 2500000
|
#define RAW_PACKET_BUFFER_SIZE 2500000
|
||||||
int raw_packet_buffer_remaining_size;
|
int raw_packet_buffer_remaining_size;
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Start time of the stream in real world time, in microseconds
|
* Start time of the stream in real world time, in microseconds
|
||||||
@ -923,6 +992,43 @@ typedef struct AVFormatContext {
|
|||||||
* This will be moved into demuxer private options. Thus no API/ABI compatibility
|
* This will be moved into demuxer private options. Thus no API/ABI compatibility
|
||||||
*/
|
*/
|
||||||
int ts_id;
|
int ts_id;
|
||||||
|
|
||||||
|
/*****************************************************************
|
||||||
|
* All fields below this line are not part of the public API. They
|
||||||
|
* may not be used outside of libavformat and can be changed and
|
||||||
|
* removed at will.
|
||||||
|
* New public fields should be added right above.
|
||||||
|
*****************************************************************
|
||||||
|
*/
|
||||||
|
#if !FF_API_REORDER_PRIVATE
|
||||||
|
/**
|
||||||
|
* Raw packets from the demuxer, prior to parsing and decoding.
|
||||||
|
* This buffer is used for buffering packets until the codec can
|
||||||
|
* be identified, as parsing cannot be done without knowing the
|
||||||
|
* codec.
|
||||||
|
*/
|
||||||
|
struct AVPacketList *raw_packet_buffer;
|
||||||
|
struct AVPacketList *raw_packet_buffer_end;
|
||||||
|
/**
|
||||||
|
* Remaining size available for raw_packet_buffer, in bytes.
|
||||||
|
*/
|
||||||
|
#define RAW_PACKET_BUFFER_SIZE 2500000
|
||||||
|
int raw_packet_buffer_remaining_size;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This buffer is only needed when packets were already buffered but
|
||||||
|
* not decoded, for example to get the codec parameters in MPEG
|
||||||
|
* streams.
|
||||||
|
*/
|
||||||
|
struct AVPacketList *packet_buffer;
|
||||||
|
struct AVPacketList *packet_buffer_end;
|
||||||
|
|
||||||
|
/* av_read_frame() support */
|
||||||
|
AVStream *cur_st;
|
||||||
|
|
||||||
|
/* av_seek_frame() support */
|
||||||
|
int64_t data_offset; /**< offset of the first packet */
|
||||||
|
#endif
|
||||||
} AVFormatContext;
|
} AVFormatContext;
|
||||||
|
|
||||||
typedef struct AVPacketList {
|
typedef struct AVPacketList {
|
||||||
@ -1479,40 +1585,20 @@ int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
|
|||||||
int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
|
int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
|
||||||
int size, int distance, int flags);
|
int size, int distance, int flags);
|
||||||
|
|
||||||
/**
|
#if FF_API_SEEK_PUBLIC
|
||||||
* Perform a binary search using av_index_search_timestamp() and
|
attribute_deprecated
|
||||||
* AVInputFormat.read_timestamp().
|
|
||||||
* This is not supposed to be called directly by a user application,
|
|
||||||
* but by demuxers.
|
|
||||||
* @param target_ts target timestamp in the time base of the given stream
|
|
||||||
* @param stream_index stream number
|
|
||||||
*/
|
|
||||||
int av_seek_frame_binary(AVFormatContext *s, int stream_index,
|
int av_seek_frame_binary(AVFormatContext *s, int stream_index,
|
||||||
int64_t target_ts, int flags);
|
int64_t target_ts, int flags);
|
||||||
|
attribute_deprecated
|
||||||
/**
|
|
||||||
* Update cur_dts of all streams based on the given timestamp and AVStream.
|
|
||||||
*
|
|
||||||
* Stream ref_st unchanged, others set cur_dts in their native time base.
|
|
||||||
* Only needed for timestamp wrapping or if (dts not set and pts!=dts).
|
|
||||||
* @param timestamp new dts expressed in time_base of param ref_st
|
|
||||||
* @param ref_st reference stream giving time_base of param timestamp
|
|
||||||
*/
|
|
||||||
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
|
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
|
||||||
|
attribute_deprecated
|
||||||
/**
|
|
||||||
* Perform a binary search using read_timestamp().
|
|
||||||
* This is not supposed to be called directly by a user application,
|
|
||||||
* but by demuxers.
|
|
||||||
* @param target_ts target timestamp in the time base of the given stream
|
|
||||||
* @param stream_index stream number
|
|
||||||
*/
|
|
||||||
int64_t av_gen_search(AVFormatContext *s, int stream_index,
|
int64_t av_gen_search(AVFormatContext *s, int stream_index,
|
||||||
int64_t target_ts, int64_t pos_min,
|
int64_t target_ts, int64_t pos_min,
|
||||||
int64_t pos_max, int64_t pos_limit,
|
int64_t pos_max, int64_t pos_limit,
|
||||||
int64_t ts_min, int64_t ts_max,
|
int64_t ts_min, int64_t ts_max,
|
||||||
int flags, int64_t *ts_ret,
|
int flags, int64_t *ts_ret,
|
||||||
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
|
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* media file output
|
* media file output
|
||||||
|
@ -251,4 +251,37 @@ enum CodecID ff_guess_image2_codec(const char *filename);
|
|||||||
*/
|
*/
|
||||||
int64_t ff_iso8601_to_unix_time(const char *datestr);
|
int64_t ff_iso8601_to_unix_time(const char *datestr);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Perform a binary search using av_index_search_timestamp() and
|
||||||
|
* AVInputFormat.read_timestamp().
|
||||||
|
*
|
||||||
|
* @param target_ts target timestamp in the time base of the given stream
|
||||||
|
* @param stream_index stream number
|
||||||
|
*/
|
||||||
|
int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
|
||||||
|
int64_t target_ts, int flags);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update cur_dts of all streams based on the given timestamp and AVStream.
|
||||||
|
*
|
||||||
|
* Stream ref_st unchanged, others set cur_dts in their native time base.
|
||||||
|
* Only needed for timestamp wrapping or if (dts not set and pts!=dts).
|
||||||
|
* @param timestamp new dts expressed in time_base of param ref_st
|
||||||
|
* @param ref_st reference stream giving time_base of param timestamp
|
||||||
|
*/
|
||||||
|
void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Perform a binary search using read_timestamp().
|
||||||
|
*
|
||||||
|
* @param target_ts target timestamp in the time base of the given stream
|
||||||
|
* @param stream_index stream number
|
||||||
|
*/
|
||||||
|
int64_t ff_gen_search(AVFormatContext *s, int stream_index,
|
||||||
|
int64_t target_ts, int64_t pos_min,
|
||||||
|
int64_t pos_max, int64_t pos_limit,
|
||||||
|
int64_t ts_min, int64_t ts_max,
|
||||||
|
int flags, int64_t *ts_ret,
|
||||||
|
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
|
||||||
|
|
||||||
#endif /* AVFORMAT_INTERNAL_H */
|
#endif /* AVFORMAT_INTERNAL_H */
|
||||||
|
@ -61,6 +61,8 @@ const AVCodecTag ff_mp4_obj_type[] = {
|
|||||||
{ CODEC_ID_VORBIS , 0xDD }, /* non standard, gpac uses it */
|
{ CODEC_ID_VORBIS , 0xDD }, /* non standard, gpac uses it */
|
||||||
{ CODEC_ID_DVD_SUBTITLE, 0xE0 }, /* non standard, see unsupported-embedded-subs-2.mp4 */
|
{ CODEC_ID_DVD_SUBTITLE, 0xE0 }, /* non standard, see unsupported-embedded-subs-2.mp4 */
|
||||||
{ CODEC_ID_QCELP , 0xE1 },
|
{ CODEC_ID_QCELP , 0xE1 },
|
||||||
|
{ CODEC_ID_MPEG4SYSTEMS, 0x01 },
|
||||||
|
{ CODEC_ID_MPEG4SYSTEMS, 0x02 },
|
||||||
{ CODEC_ID_NONE , 0 },
|
{ CODEC_ID_NONE , 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -150,10 +150,12 @@ int ff_mp4_read_descr(AVFormatContext *fc, AVIOContext *pb, int *tag);
|
|||||||
int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, AVIOContext *pb);
|
int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, AVIOContext *pb);
|
||||||
void ff_mp4_parse_es_descr(AVIOContext *pb, int *es_id);
|
void ff_mp4_parse_es_descr(AVIOContext *pb, int *es_id);
|
||||||
|
|
||||||
|
#define MP4ODescrTag 0x01
|
||||||
#define MP4IODescrTag 0x02
|
#define MP4IODescrTag 0x02
|
||||||
#define MP4ESDescrTag 0x03
|
#define MP4ESDescrTag 0x03
|
||||||
#define MP4DecConfigDescrTag 0x04
|
#define MP4DecConfigDescrTag 0x04
|
||||||
#define MP4DecSpecificDescrTag 0x05
|
#define MP4DecSpecificDescrTag 0x05
|
||||||
|
#define MP4SLDescrTag 0x06
|
||||||
|
|
||||||
int ff_mov_read_esds(AVFormatContext *fc, AVIOContext *pb, MOVAtom atom);
|
int ff_mov_read_esds(AVFormatContext *fc, AVIOContext *pb, MOVAtom atom);
|
||||||
enum CodecID ff_mov_get_lpcm_codec_id(int bps, int flags);
|
enum CodecID ff_mov_get_lpcm_codec_id(int bps, int flags);
|
||||||
|
@ -2071,7 +2071,7 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index,
|
|||||||
matroska->skip_to_keyframe = !(flags & AVSEEK_FLAG_ANY);
|
matroska->skip_to_keyframe = !(flags & AVSEEK_FLAG_ANY);
|
||||||
matroska->skip_to_timecode = st->index_entries[index].timestamp;
|
matroska->skip_to_timecode = st->index_entries[index].timestamp;
|
||||||
matroska->done = 0;
|
matroska->done = 0;
|
||||||
av_update_cur_dts(s, st, st->index_entries[index].timestamp);
|
ff_update_cur_dts(s, st, st->index_entries[index].timestamp);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "libavutil/opt.h"
|
#include "libavutil/opt.h"
|
||||||
#include "libavutil/avassert.h"
|
#include "libavutil/avassert.h"
|
||||||
#include "libavcodec/bytestream.h"
|
#include "libavcodec/bytestream.h"
|
||||||
|
#include "libavcodec/get_bits.h"
|
||||||
#include "avformat.h"
|
#include "avformat.h"
|
||||||
#include "mpegts.h"
|
#include "mpegts.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
@ -43,6 +44,8 @@
|
|||||||
|
|
||||||
#define MAX_PES_PAYLOAD 200*1024
|
#define MAX_PES_PAYLOAD 200*1024
|
||||||
|
|
||||||
|
#define MAX_MP4_DESCR_COUNT 16
|
||||||
|
|
||||||
enum MpegTSFilterType {
|
enum MpegTSFilterType {
|
||||||
MPEGTS_PES,
|
MPEGTS_PES,
|
||||||
MPEGTS_SECTION,
|
MPEGTS_SECTION,
|
||||||
@ -73,6 +76,7 @@ typedef struct MpegTSSectionFilter {
|
|||||||
|
|
||||||
struct MpegTSFilter {
|
struct MpegTSFilter {
|
||||||
int pid;
|
int pid;
|
||||||
|
int es_id;
|
||||||
int last_cc; /* last cc code (-1 if first packet) */
|
int last_cc; /* last cc code (-1 if first packet) */
|
||||||
enum MpegTSFilterType type;
|
enum MpegTSFilterType type;
|
||||||
union {
|
union {
|
||||||
@ -173,6 +177,7 @@ typedef struct PESContext {
|
|||||||
int64_t ts_packet_pos; /**< position of first TS packet of this PES packet */
|
int64_t ts_packet_pos; /**< position of first TS packet of this PES packet */
|
||||||
uint8_t header[MAX_PES_HEADER_SIZE];
|
uint8_t header[MAX_PES_HEADER_SIZE];
|
||||||
uint8_t *buffer;
|
uint8_t *buffer;
|
||||||
|
SLConfigDescr sl;
|
||||||
} PESContext;
|
} PESContext;
|
||||||
|
|
||||||
extern AVInputFormat ff_mpegts_demuxer;
|
extern AVInputFormat ff_mpegts_demuxer;
|
||||||
@ -327,6 +332,7 @@ static MpegTSFilter *mpegts_open_section_filter(MpegTSContext *ts, unsigned int
|
|||||||
ts->pids[pid] = filter;
|
ts->pids[pid] = filter;
|
||||||
filter->type = MPEGTS_SECTION;
|
filter->type = MPEGTS_SECTION;
|
||||||
filter->pid = pid;
|
filter->pid = pid;
|
||||||
|
filter->es_id = -1;
|
||||||
filter->last_cc = -1;
|
filter->last_cc = -1;
|
||||||
sec = &filter->u.section_filter;
|
sec = &filter->u.section_filter;
|
||||||
sec->section_cb = section_cb;
|
sec->section_cb = section_cb;
|
||||||
@ -355,6 +361,7 @@ static MpegTSFilter *mpegts_open_pes_filter(MpegTSContext *ts, unsigned int pid,
|
|||||||
ts->pids[pid] = filter;
|
ts->pids[pid] = filter;
|
||||||
filter->type = MPEGTS_PES;
|
filter->type = MPEGTS_PES;
|
||||||
filter->pid = pid;
|
filter->pid = pid;
|
||||||
|
filter->es_id = -1;
|
||||||
filter->last_cc = -1;
|
filter->last_cc = -1;
|
||||||
pes = &filter->u.pes_filter;
|
pes = &filter->u.pes_filter;
|
||||||
pes->pes_cb = pes_cb;
|
pes->pes_cb = pes_cb;
|
||||||
@ -682,6 +689,83 @@ static void new_pes_packet(PESContext *pes, AVPacket *pkt)
|
|||||||
pes->flags = 0;
|
pes->flags = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t get_bits64(GetBitContext *gb, int bits)
|
||||||
|
{
|
||||||
|
uint64_t ret = 0;
|
||||||
|
while (bits > 17) {
|
||||||
|
ret <<= 17;
|
||||||
|
ret |= get_bits(gb, 17);
|
||||||
|
bits -= 17;
|
||||||
|
}
|
||||||
|
ret <<= bits;
|
||||||
|
ret |= get_bits(gb, bits);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int read_sl_header(PESContext *pes, SLConfigDescr *sl, const uint8_t *buf, int buf_size)
|
||||||
|
{
|
||||||
|
GetBitContext gb;
|
||||||
|
int au_start_flag = 0, au_end_flag = 0, ocr_flag = 0, idle_flag = 0;
|
||||||
|
int padding_flag = 0, padding_bits = 0, inst_bitrate_flag = 0;
|
||||||
|
int dts_flag = -1, cts_flag = -1;
|
||||||
|
int64_t dts = AV_NOPTS_VALUE, cts = AV_NOPTS_VALUE;
|
||||||
|
init_get_bits(&gb, buf, buf_size*8);
|
||||||
|
|
||||||
|
if (sl->use_au_start)
|
||||||
|
au_start_flag = get_bits1(&gb);
|
||||||
|
if (sl->use_au_end)
|
||||||
|
au_end_flag = get_bits1(&gb);
|
||||||
|
if (!sl->use_au_start && !sl->use_au_end)
|
||||||
|
au_start_flag = au_end_flag = 1;
|
||||||
|
if (sl->ocr_len > 0)
|
||||||
|
ocr_flag = get_bits1(&gb);
|
||||||
|
if (sl->use_idle)
|
||||||
|
idle_flag = get_bits1(&gb);
|
||||||
|
if (sl->use_padding)
|
||||||
|
padding_flag = get_bits1(&gb);
|
||||||
|
if (padding_flag)
|
||||||
|
padding_bits = get_bits(&gb, 3);
|
||||||
|
|
||||||
|
if (!idle_flag && (!padding_flag || padding_bits != 0)) {
|
||||||
|
if (sl->packet_seq_num_len)
|
||||||
|
skip_bits_long(&gb, sl->packet_seq_num_len);
|
||||||
|
if (sl->degr_prior_len)
|
||||||
|
if (get_bits1(&gb))
|
||||||
|
skip_bits(&gb, sl->degr_prior_len);
|
||||||
|
if (ocr_flag)
|
||||||
|
skip_bits_long(&gb, sl->ocr_len);
|
||||||
|
if (au_start_flag) {
|
||||||
|
if (sl->use_rand_acc_pt)
|
||||||
|
get_bits1(&gb);
|
||||||
|
if (sl->au_seq_num_len > 0)
|
||||||
|
skip_bits_long(&gb, sl->au_seq_num_len);
|
||||||
|
if (sl->use_timestamps) {
|
||||||
|
dts_flag = get_bits1(&gb);
|
||||||
|
cts_flag = get_bits1(&gb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (sl->inst_bitrate_len)
|
||||||
|
inst_bitrate_flag = get_bits1(&gb);
|
||||||
|
if (dts_flag == 1)
|
||||||
|
dts = get_bits64(&gb, sl->timestamp_len);
|
||||||
|
if (cts_flag == 1)
|
||||||
|
cts = get_bits64(&gb, sl->timestamp_len);
|
||||||
|
if (sl->au_len > 0)
|
||||||
|
skip_bits_long(&gb, sl->au_len);
|
||||||
|
if (inst_bitrate_flag)
|
||||||
|
skip_bits_long(&gb, sl->inst_bitrate_len);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dts != AV_NOPTS_VALUE)
|
||||||
|
pes->dts = dts;
|
||||||
|
if (cts != AV_NOPTS_VALUE)
|
||||||
|
pes->pts = cts;
|
||||||
|
|
||||||
|
av_set_pts_info(pes->st, sl->timestamp_len, 1, sl->timestamp_res);
|
||||||
|
|
||||||
|
return (get_bits_count(&gb) + 7) >> 3;
|
||||||
|
}
|
||||||
|
|
||||||
/* return non zero if a packet could be constructed */
|
/* return non zero if a packet could be constructed */
|
||||||
static int mpegts_push_data(MpegTSFilter *filter,
|
static int mpegts_push_data(MpegTSFilter *filter,
|
||||||
const uint8_t *buf, int buf_size, int is_start,
|
const uint8_t *buf, int buf_size, int is_start,
|
||||||
@ -833,6 +917,12 @@ static int mpegts_push_data(MpegTSFilter *filter,
|
|||||||
/* we got the full header. We parse it and get the payload */
|
/* we got the full header. We parse it and get the payload */
|
||||||
pes->state = MPEGTS_PAYLOAD;
|
pes->state = MPEGTS_PAYLOAD;
|
||||||
pes->data_index = 0;
|
pes->data_index = 0;
|
||||||
|
if (pes->stream_type == 0x12) {
|
||||||
|
int sl_header_bytes = read_sl_header(pes, &pes->sl, p, buf_size);
|
||||||
|
pes->pes_header_size += sl_header_bytes;
|
||||||
|
p += sl_header_bytes;
|
||||||
|
buf_size -= sl_header_bytes;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case MPEGTS_PAYLOAD:
|
case MPEGTS_PAYLOAD:
|
||||||
@ -897,48 +987,289 @@ static PESContext *add_pes_stream(MpegTSContext *ts, int pid, int pcr_pid)
|
|||||||
return pes;
|
return pes;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mp4_read_iods(AVFormatContext *s, const uint8_t *buf, unsigned size,
|
#define MAX_LEVEL 4
|
||||||
int *es_id, uint8_t **dec_config_descr,
|
typedef struct {
|
||||||
int *dec_config_descr_size)
|
AVFormatContext *s;
|
||||||
{
|
|
||||||
AVIOContext pb;
|
AVIOContext pb;
|
||||||
int tag;
|
Mp4Descr *descr;
|
||||||
unsigned len;
|
Mp4Descr *active_descr;
|
||||||
|
int descr_count;
|
||||||
|
int max_descr_count;
|
||||||
|
int level;
|
||||||
|
} MP4DescrParseContext;
|
||||||
|
|
||||||
ffio_init_context(&pb, buf, size, 0, NULL, NULL, NULL, NULL);
|
static int init_MP4DescrParseContext(
|
||||||
|
MP4DescrParseContext *d, AVFormatContext *s, const uint8_t *buf,
|
||||||
|
unsigned size, Mp4Descr *descr, int max_descr_count)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
if (size > (1<<30))
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
len = ff_mp4_read_descr(s, &pb, &tag);
|
if ((ret = ffio_init_context(&d->pb, (unsigned char*)buf, size, 0,
|
||||||
if (tag == MP4IODescrTag) {
|
NULL, NULL, NULL, NULL)) < 0)
|
||||||
avio_rb16(&pb); // ID
|
return ret;
|
||||||
avio_r8(&pb);
|
|
||||||
avio_r8(&pb);
|
d->s = s;
|
||||||
avio_r8(&pb);
|
d->level = 0;
|
||||||
avio_r8(&pb);
|
d->descr_count = 0;
|
||||||
avio_r8(&pb);
|
d->descr = descr;
|
||||||
len = ff_mp4_read_descr(s, &pb, &tag);
|
d->active_descr = NULL;
|
||||||
if (tag == MP4ESDescrTag) {
|
d->max_descr_count = max_descr_count;
|
||||||
ff_mp4_parse_es_descr(&pb, es_id);
|
|
||||||
av_dlog(s, "ES_ID %#x\n", *es_id);
|
return 0;
|
||||||
len = ff_mp4_read_descr(s, &pb, &tag);
|
}
|
||||||
if (tag == MP4DecConfigDescrTag) {
|
|
||||||
*dec_config_descr = av_malloc(len);
|
static void update_offsets(AVIOContext *pb, int64_t *off, int *len) {
|
||||||
if (!*dec_config_descr)
|
int64_t new_off = avio_tell(pb);
|
||||||
return AVERROR(ENOMEM);
|
(*len) -= new_off - *off;
|
||||||
*dec_config_descr_size = len;
|
*off = new_off;
|
||||||
avio_read(&pb, *dec_config_descr, len);
|
}
|
||||||
}
|
|
||||||
}
|
static int parse_mp4_descr(MP4DescrParseContext *d, int64_t off, int len,
|
||||||
|
int target_tag);
|
||||||
|
|
||||||
|
static int parse_mp4_descr_arr(MP4DescrParseContext *d, int64_t off, int len)
|
||||||
|
{
|
||||||
|
while (len > 0) {
|
||||||
|
if (parse_mp4_descr(d, off, len, 0) < 0)
|
||||||
|
return -1;
|
||||||
|
update_offsets(&d->pb, &off, &len);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int parse_MP4IODescrTag(MP4DescrParseContext *d, int64_t off, int len)
|
||||||
|
{
|
||||||
|
avio_rb16(&d->pb); // ID
|
||||||
|
avio_r8(&d->pb);
|
||||||
|
avio_r8(&d->pb);
|
||||||
|
avio_r8(&d->pb);
|
||||||
|
avio_r8(&d->pb);
|
||||||
|
avio_r8(&d->pb);
|
||||||
|
update_offsets(&d->pb, &off, &len);
|
||||||
|
return parse_mp4_descr_arr(d, off, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int parse_MP4ODescrTag(MP4DescrParseContext *d, int64_t off, int len)
|
||||||
|
{
|
||||||
|
int id_flags;
|
||||||
|
if (len < 2)
|
||||||
|
return 0;
|
||||||
|
id_flags = avio_rb16(&d->pb);
|
||||||
|
if (!(id_flags & 0x0020)) { //URL_Flag
|
||||||
|
update_offsets(&d->pb, &off, &len);
|
||||||
|
return parse_mp4_descr_arr(d, off, len); //ES_Descriptor[]
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int parse_MP4ESDescrTag(MP4DescrParseContext *d, int64_t off, int len)
|
||||||
|
{
|
||||||
|
int es_id = 0;
|
||||||
|
if (d->descr_count >= d->max_descr_count)
|
||||||
|
return -1;
|
||||||
|
ff_mp4_parse_es_descr(&d->pb, &es_id);
|
||||||
|
d->active_descr = d->descr + (d->descr_count++);
|
||||||
|
|
||||||
|
d->active_descr->es_id = es_id;
|
||||||
|
update_offsets(&d->pb, &off, &len);
|
||||||
|
parse_mp4_descr(d, off, len, MP4DecConfigDescrTag);
|
||||||
|
update_offsets(&d->pb, &off, &len);
|
||||||
|
if (len > 0)
|
||||||
|
parse_mp4_descr(d, off, len, MP4SLDescrTag);
|
||||||
|
d->active_descr = NULL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int parse_MP4DecConfigDescrTag(MP4DescrParseContext *d, int64_t off, int len)
|
||||||
|
{
|
||||||
|
Mp4Descr *descr = d->active_descr;
|
||||||
|
if (!descr)
|
||||||
|
return -1;
|
||||||
|
d->active_descr->dec_config_descr = av_malloc(len);
|
||||||
|
if (!descr->dec_config_descr)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
descr->dec_config_descr_len = len;
|
||||||
|
avio_read(&d->pb, descr->dec_config_descr, len);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int parse_MP4SLDescrTag(MP4DescrParseContext *d, int64_t off, int len)
|
||||||
|
{
|
||||||
|
Mp4Descr *descr = d->active_descr;
|
||||||
|
int predefined;
|
||||||
|
if (!descr)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
predefined = avio_r8(&d->pb);
|
||||||
|
if (!predefined) {
|
||||||
|
int lengths;
|
||||||
|
int flags = avio_r8(&d->pb);
|
||||||
|
descr->sl.use_au_start = !!(flags & 0x80);
|
||||||
|
descr->sl.use_au_end = !!(flags & 0x40);
|
||||||
|
descr->sl.use_rand_acc_pt = !!(flags & 0x20);
|
||||||
|
descr->sl.use_padding = !!(flags & 0x08);
|
||||||
|
descr->sl.use_timestamps = !!(flags & 0x04);
|
||||||
|
descr->sl.use_idle = !!(flags & 0x02);
|
||||||
|
descr->sl.timestamp_res = avio_rb32(&d->pb);
|
||||||
|
avio_rb32(&d->pb);
|
||||||
|
descr->sl.timestamp_len = avio_r8(&d->pb);
|
||||||
|
descr->sl.ocr_len = avio_r8(&d->pb);
|
||||||
|
descr->sl.au_len = avio_r8(&d->pb);
|
||||||
|
descr->sl.inst_bitrate_len = avio_r8(&d->pb);
|
||||||
|
lengths = avio_rb16(&d->pb);
|
||||||
|
descr->sl.degr_prior_len = lengths >> 12;
|
||||||
|
descr->sl.au_seq_num_len = (lengths >> 7) & 0x1f;
|
||||||
|
descr->sl.packet_seq_num_len = (lengths >> 2) & 0x1f;
|
||||||
|
} else {
|
||||||
|
av_log_missing_feature(d->s, "Predefined SLConfigDescriptor\n", 0);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int parse_mp4_descr(MP4DescrParseContext *d, int64_t off, int len,
|
||||||
|
int target_tag) {
|
||||||
|
int tag;
|
||||||
|
int len1 = ff_mp4_read_descr(d->s, &d->pb, &tag);
|
||||||
|
update_offsets(&d->pb, &off, &len);
|
||||||
|
if (len < 0 || len1 > len || len1 <= 0) {
|
||||||
|
av_log(d->s, AV_LOG_ERROR, "Tag %x length violation new length %d bytes remaining %d\n", tag, len1, len);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (d->level++ >= MAX_LEVEL) {
|
||||||
|
av_log(d->s, AV_LOG_ERROR, "Maximum MP4 descriptor level exceeded\n");
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (target_tag && tag != target_tag) {
|
||||||
|
av_log(d->s, AV_LOG_ERROR, "Found tag %x expected %x\n", tag, target_tag);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (tag) {
|
||||||
|
case MP4IODescrTag:
|
||||||
|
parse_MP4IODescrTag(d, off, len1);
|
||||||
|
break;
|
||||||
|
case MP4ODescrTag:
|
||||||
|
parse_MP4ODescrTag(d, off, len1);
|
||||||
|
break;
|
||||||
|
case MP4ESDescrTag:
|
||||||
|
parse_MP4ESDescrTag(d, off, len1);
|
||||||
|
break;
|
||||||
|
case MP4DecConfigDescrTag:
|
||||||
|
parse_MP4DecConfigDescrTag(d, off, len1);
|
||||||
|
break;
|
||||||
|
case MP4SLDescrTag:
|
||||||
|
parse_MP4SLDescrTag(d, off, len1);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
done:
|
||||||
|
d->level--;
|
||||||
|
avio_seek(&d->pb, off + len1, SEEK_SET);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mp4_read_iods(AVFormatContext *s, const uint8_t *buf, unsigned size,
|
||||||
|
Mp4Descr *descr, int *descr_count, int max_descr_count)
|
||||||
|
{
|
||||||
|
MP4DescrParseContext d;
|
||||||
|
if (init_MP4DescrParseContext(&d, s, buf, size, descr, max_descr_count) < 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
parse_mp4_descr(&d, avio_tell(&d.pb), size, MP4IODescrTag);
|
||||||
|
|
||||||
|
*descr_count = d.descr_count;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mp4_read_od(AVFormatContext *s, const uint8_t *buf, unsigned size,
|
||||||
|
Mp4Descr *descr, int *descr_count, int max_descr_count)
|
||||||
|
{
|
||||||
|
MP4DescrParseContext d;
|
||||||
|
if (init_MP4DescrParseContext(&d, s, buf, size, descr, max_descr_count) < 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
parse_mp4_descr_arr(&d, avio_tell(&d.pb), size);
|
||||||
|
|
||||||
|
*descr_count = d.descr_count;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void m4sl_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
|
||||||
|
{
|
||||||
|
MpegTSContext *ts = filter->u.section_filter.opaque;
|
||||||
|
SectionHeader h;
|
||||||
|
const uint8_t *p, *p_end;
|
||||||
|
AVIOContext pb;
|
||||||
|
Mp4Descr mp4_descr[MAX_MP4_DESCR_COUNT] = {{ 0 }};
|
||||||
|
int mp4_descr_count = 0;
|
||||||
|
int i, pid;
|
||||||
|
AVFormatContext *s = ts->stream;
|
||||||
|
|
||||||
|
p_end = section + section_len - 4;
|
||||||
|
p = section;
|
||||||
|
if (parse_section_header(&h, &p, p_end) < 0)
|
||||||
|
return;
|
||||||
|
if (h.tid != M4OD_TID)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mp4_read_od(s, p, (unsigned)(p_end - p), mp4_descr, &mp4_descr_count, MAX_MP4_DESCR_COUNT);
|
||||||
|
|
||||||
|
for (pid = 0; pid < NB_PID_MAX; pid++) {
|
||||||
|
if (!ts->pids[pid])
|
||||||
|
continue;
|
||||||
|
for (i = 0; i < mp4_descr_count; i++) {
|
||||||
|
PESContext *pes;
|
||||||
|
AVStream *st;
|
||||||
|
if (ts->pids[pid]->es_id != mp4_descr[i].es_id)
|
||||||
|
continue;
|
||||||
|
if (!(ts->pids[pid] && ts->pids[pid]->type == MPEGTS_PES)) {
|
||||||
|
av_log(s, AV_LOG_ERROR, "pid %x is not PES\n", pid);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
pes = ts->pids[pid]->u.pes_filter.opaque;
|
||||||
|
st = pes->st;
|
||||||
|
if (!st) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
pes->sl = mp4_descr[i].sl;
|
||||||
|
|
||||||
|
ffio_init_context(&pb, mp4_descr[i].dec_config_descr,
|
||||||
|
mp4_descr[i].dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
|
||||||
|
ff_mp4_read_dec_config_descr(s, st, &pb);
|
||||||
|
if (st->codec->codec_id == CODEC_ID_AAC &&
|
||||||
|
st->codec->extradata_size > 0)
|
||||||
|
st->need_parsing = 0;
|
||||||
|
if (st->codec->codec_id == CODEC_ID_H264 &&
|
||||||
|
st->codec->extradata_size > 0)
|
||||||
|
st->need_parsing = 0;
|
||||||
|
|
||||||
|
if (st->codec->codec_id <= CODEC_ID_NONE) {
|
||||||
|
} else if (st->codec->codec_id < CODEC_ID_FIRST_AUDIO) {
|
||||||
|
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||||
|
} else if (st->codec->codec_id < CODEC_ID_FIRST_SUBTITLE) {
|
||||||
|
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||||
|
} else if (st->codec->codec_id < CODEC_ID_FIRST_UNKNOWN) {
|
||||||
|
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (i = 0; i < mp4_descr_count; i++)
|
||||||
|
av_free(mp4_descr[i].dec_config_descr);
|
||||||
|
}
|
||||||
|
|
||||||
int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type,
|
int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type,
|
||||||
const uint8_t **pp, const uint8_t *desc_list_end,
|
const uint8_t **pp, const uint8_t *desc_list_end,
|
||||||
int mp4_dec_config_descr_len, int mp4_es_id, int pid,
|
Mp4Descr *mp4_descr, int mp4_descr_count, int pid,
|
||||||
uint8_t *mp4_dec_config_descr)
|
MpegTSContext *ts)
|
||||||
{
|
{
|
||||||
const uint8_t *desc_end;
|
const uint8_t *desc_end;
|
||||||
int desc_len, desc_tag;
|
int desc_len, desc_tag, desc_es_id;
|
||||||
char language[252];
|
char language[252];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -959,13 +1290,31 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
|
|||||||
mpegts_find_stream_type(st, desc_tag, DESC_types);
|
mpegts_find_stream_type(st, desc_tag, DESC_types);
|
||||||
|
|
||||||
switch(desc_tag) {
|
switch(desc_tag) {
|
||||||
|
case 0x1E: /* SL descriptor */
|
||||||
|
desc_es_id = get16(pp, desc_end);
|
||||||
|
if (ts && ts->pids[pid])
|
||||||
|
ts->pids[pid]->es_id = desc_es_id;
|
||||||
|
for (i = 0; i < mp4_descr_count; i++)
|
||||||
|
if (mp4_descr[i].dec_config_descr_len &&
|
||||||
|
mp4_descr[i].es_id == desc_es_id) {
|
||||||
|
AVIOContext pb;
|
||||||
|
ffio_init_context(&pb, mp4_descr[i].dec_config_descr,
|
||||||
|
mp4_descr[i].dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
|
||||||
|
ff_mp4_read_dec_config_descr(fc, st, &pb);
|
||||||
|
if (st->codec->codec_id == CODEC_ID_AAC &&
|
||||||
|
st->codec->extradata_size > 0)
|
||||||
|
st->need_parsing = 0;
|
||||||
|
if (st->codec->codec_id == CODEC_ID_MPEG4SYSTEMS)
|
||||||
|
mpegts_open_section_filter(ts, pid, m4sl_cb, ts, 1);
|
||||||
|
}
|
||||||
|
break;
|
||||||
case 0x1F: /* FMC descriptor */
|
case 0x1F: /* FMC descriptor */
|
||||||
get16(pp, desc_end);
|
get16(pp, desc_end);
|
||||||
if ((st->codec->codec_id == CODEC_ID_AAC_LATM || st->request_probe>0) &&
|
if (mp4_descr_count > 0 && (st->codec->codec_id == CODEC_ID_AAC_LATM || st->request_probe>0) &&
|
||||||
mp4_dec_config_descr_len && mp4_es_id == pid) {
|
mp4_descr->dec_config_descr_len && mp4_descr->es_id == pid) {
|
||||||
AVIOContext pb;
|
AVIOContext pb;
|
||||||
ffio_init_context(&pb, mp4_dec_config_descr,
|
ffio_init_context(&pb, mp4_descr->dec_config_descr,
|
||||||
mp4_dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
|
mp4_descr->dec_config_descr_len, 0, NULL, NULL, NULL, NULL);
|
||||||
ff_mp4_read_dec_config_descr(fc, st, &pb);
|
ff_mp4_read_dec_config_descr(fc, st, &pb);
|
||||||
if (st->codec->codec_id == CODEC_ID_AAC &&
|
if (st->codec->codec_id == CODEC_ID_AAC &&
|
||||||
st->codec->extradata_size > 0){
|
st->codec->extradata_size > 0){
|
||||||
@ -1054,9 +1403,10 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
int program_info_length, pcr_pid, pid, stream_type;
|
int program_info_length, pcr_pid, pid, stream_type;
|
||||||
int desc_list_len;
|
int desc_list_len;
|
||||||
uint32_t prog_reg_desc = 0; /* registration descriptor */
|
uint32_t prog_reg_desc = 0; /* registration descriptor */
|
||||||
uint8_t *mp4_dec_config_descr = NULL;
|
|
||||||
int mp4_dec_config_descr_len = 0;
|
Mp4Descr mp4_descr[MAX_MP4_DESCR_COUNT] = {{ 0 }};
|
||||||
int mp4_es_id = 0;
|
int mp4_descr_count = 0;
|
||||||
|
int i;
|
||||||
|
|
||||||
av_dlog(ts->stream, "PMT: len %i\n", section_len);
|
av_dlog(ts->stream, "PMT: len %i\n", section_len);
|
||||||
hex_dump_debug(ts->stream, (uint8_t *)section, section_len);
|
hex_dump_debug(ts->stream, (uint8_t *)section, section_len);
|
||||||
@ -1099,8 +1449,8 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
get8(&p, p_end); // scope
|
get8(&p, p_end); // scope
|
||||||
get8(&p, p_end); // label
|
get8(&p, p_end); // label
|
||||||
len -= 2;
|
len -= 2;
|
||||||
mp4_read_iods(ts->stream, p, len, &mp4_es_id,
|
mp4_read_iods(ts->stream, p, len, mp4_descr + mp4_descr_count,
|
||||||
&mp4_dec_config_descr, &mp4_dec_config_descr_len);
|
&mp4_descr_count, MAX_MP4_DESCR_COUNT);
|
||||||
} else if (tag == 0x05 && len >= 4) { // registration descriptor
|
} else if (tag == 0x05 && len >= 4) { // registration descriptor
|
||||||
prog_reg_desc = bytestream_get_le32(&p);
|
prog_reg_desc = bytestream_get_le32(&p);
|
||||||
len -= 4;
|
len -= 4;
|
||||||
@ -1117,6 +1467,7 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
|
|
||||||
for(;;) {
|
for(;;) {
|
||||||
st = 0;
|
st = 0;
|
||||||
|
pes = NULL;
|
||||||
stream_type = get8(&p, p_end);
|
stream_type = get8(&p, p_end);
|
||||||
if (stream_type < 0)
|
if (stream_type < 0)
|
||||||
break;
|
break;
|
||||||
@ -1132,19 +1483,28 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
pes->st->id = pes->pid;
|
pes->st->id = pes->pid;
|
||||||
}
|
}
|
||||||
st = pes->st;
|
st = pes->st;
|
||||||
} else {
|
} else if (stream_type != 0x13) {
|
||||||
if (ts->pids[pid]) mpegts_close_filter(ts, ts->pids[pid]); //wrongly added sdt filter probably
|
if (ts->pids[pid]) mpegts_close_filter(ts, ts->pids[pid]); //wrongly added sdt filter probably
|
||||||
pes = add_pes_stream(ts, pid, pcr_pid);
|
pes = add_pes_stream(ts, pid, pcr_pid);
|
||||||
if (pes) {
|
if (pes) {
|
||||||
st = avformat_new_stream(pes->stream, NULL);
|
st = avformat_new_stream(pes->stream, NULL);
|
||||||
st->id = pes->pid;
|
st->id = pes->pid;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
int idx = ff_find_stream_index(ts->stream, pid);
|
||||||
|
if (idx >= 0) {
|
||||||
|
st = ts->stream->streams[idx];
|
||||||
|
} else {
|
||||||
|
st = avformat_new_stream(pes->stream, NULL);
|
||||||
|
st->id = pid;
|
||||||
|
st->codec->codec_type = AVMEDIA_TYPE_DATA;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!st)
|
if (!st)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (!pes->stream_type)
|
if (pes && !pes->stream_type)
|
||||||
mpegts_set_stream_info(st, pes, stream_type, prog_reg_desc);
|
mpegts_set_stream_info(st, pes, stream_type, prog_reg_desc);
|
||||||
|
|
||||||
add_pid_to_pmt(ts, h->id, pid);
|
add_pid_to_pmt(ts, h->id, pid);
|
||||||
@ -1159,10 +1519,10 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
break;
|
break;
|
||||||
for(;;) {
|
for(;;) {
|
||||||
if (ff_parse_mpeg2_descriptor(ts->stream, st, stream_type, &p, desc_list_end,
|
if (ff_parse_mpeg2_descriptor(ts->stream, st, stream_type, &p, desc_list_end,
|
||||||
mp4_dec_config_descr_len, mp4_es_id, pid, mp4_dec_config_descr) < 0)
|
mp4_descr, mp4_descr_count, pid, ts) < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (prog_reg_desc == AV_RL32("HDMV") && stream_type == 0x83 && pes->sub_st) {
|
if (pes && prog_reg_desc == AV_RL32("HDMV") && stream_type == 0x83 && pes->sub_st) {
|
||||||
ff_program_add_stream_index(ts->stream, h->id, pes->sub_st->index);
|
ff_program_add_stream_index(ts->stream, h->id, pes->sub_st->index);
|
||||||
pes->sub_st->codec->codec_tag = st->codec->codec_tag;
|
pes->sub_st->codec->codec_tag = st->codec->codec_tag;
|
||||||
}
|
}
|
||||||
@ -1171,7 +1531,8 @@ static void pmt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len
|
|||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
av_free(mp4_dec_config_descr);
|
for (i = 0; i < mp4_descr_count; i++)
|
||||||
|
av_free(mp4_descr[i].dec_config_descr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pat_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
|
static void pat_cb(MpegTSFilter *filter, const uint8_t *section, int section_len)
|
||||||
@ -1839,7 +2200,7 @@ static int read_seek2(AVFormatContext *s,
|
|||||||
ts_adj = target_ts;
|
ts_adj = target_ts;
|
||||||
stream_index_gen_search = stream_index;
|
stream_index_gen_search = stream_index;
|
||||||
}
|
}
|
||||||
pos = av_gen_search(s, stream_index_gen_search, ts_adj,
|
pos = ff_gen_search(s, stream_index_gen_search, ts_adj,
|
||||||
0, INT64_MAX, -1,
|
0, INT64_MAX, -1,
|
||||||
AV_NOPTS_VALUE,
|
AV_NOPTS_VALUE,
|
||||||
AV_NOPTS_VALUE,
|
AV_NOPTS_VALUE,
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
/* table ids */
|
/* table ids */
|
||||||
#define PAT_TID 0x00
|
#define PAT_TID 0x00
|
||||||
#define PMT_TID 0x02
|
#define PMT_TID 0x02
|
||||||
|
#define M4OD_TID 0x05
|
||||||
#define SDT_TID 0x42
|
#define SDT_TID 0x42
|
||||||
|
|
||||||
#define STREAM_TYPE_VIDEO_MPEG1 0x01
|
#define STREAM_TYPE_VIDEO_MPEG1 0x01
|
||||||
@ -64,6 +65,30 @@ int ff_mpegts_parse_packet(MpegTSContext *ts, AVPacket *pkt,
|
|||||||
const uint8_t *buf, int len);
|
const uint8_t *buf, int len);
|
||||||
void ff_mpegts_parse_close(MpegTSContext *ts);
|
void ff_mpegts_parse_close(MpegTSContext *ts);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int use_au_start;
|
||||||
|
int use_au_end;
|
||||||
|
int use_rand_acc_pt;
|
||||||
|
int use_padding;
|
||||||
|
int use_timestamps;
|
||||||
|
int use_idle;
|
||||||
|
int timestamp_res;
|
||||||
|
int timestamp_len;
|
||||||
|
int ocr_len;
|
||||||
|
int au_len;
|
||||||
|
int inst_bitrate_len;
|
||||||
|
int degr_prior_len;
|
||||||
|
int au_seq_num_len;
|
||||||
|
int packet_seq_num_len;
|
||||||
|
} SLConfigDescr;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int es_id;
|
||||||
|
int dec_config_descr_len;
|
||||||
|
uint8_t *dec_config_descr;
|
||||||
|
SLConfigDescr sl;
|
||||||
|
} Mp4Descr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse an MPEG-2 descriptor
|
* Parse an MPEG-2 descriptor
|
||||||
* @param[in] fc Format context (used for logging only)
|
* @param[in] fc Format context (used for logging only)
|
||||||
@ -79,7 +104,7 @@ void ff_mpegts_parse_close(MpegTSContext *ts);
|
|||||||
*/
|
*/
|
||||||
int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type,
|
int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type,
|
||||||
const uint8_t **pp, const uint8_t *desc_list_end,
|
const uint8_t **pp, const uint8_t *desc_list_end,
|
||||||
int mp4_dec_config_descr_len, int mp4_es_id, int pid,
|
Mp4Descr *mp4_descr, int mp4_descr_count, int pid,
|
||||||
uint8_t *mp4_dec_config_descr);
|
MpegTSContext *ts);
|
||||||
|
|
||||||
#endif /* AVFORMAT_MPEGTS_H */
|
#endif /* AVFORMAT_MPEGTS_H */
|
||||||
|
@ -49,6 +49,7 @@
|
|||||||
#include "libavutil/mathematics.h"
|
#include "libavutil/mathematics.h"
|
||||||
#include "libavcodec/bytestream.h"
|
#include "libavcodec/bytestream.h"
|
||||||
#include "avformat.h"
|
#include "avformat.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "mxf.h"
|
#include "mxf.h"
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
@ -1141,7 +1142,7 @@ static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
|
|||||||
seconds = av_rescale(sample_time, st->time_base.num, st->time_base.den);
|
seconds = av_rescale(sample_time, st->time_base.num, st->time_base.den);
|
||||||
if (avio_seek(s->pb, (s->bit_rate * seconds) >> 3, SEEK_SET) < 0)
|
if (avio_seek(s->pb, (s->bit_rate * seconds) >> 3, SEEK_SET) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
av_update_cur_dts(s, st, sample_time);
|
ff_update_cur_dts(s, st, sample_time);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -874,16 +874,16 @@ static int read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flag
|
|||||||
(void **) next_node);
|
(void **) next_node);
|
||||||
av_log(s, AV_LOG_DEBUG, "%"PRIu64"-%"PRIu64" %"PRId64"-%"PRId64"\n", next_node[0]->pos, next_node[1]->pos,
|
av_log(s, AV_LOG_DEBUG, "%"PRIu64"-%"PRIu64" %"PRId64"-%"PRId64"\n", next_node[0]->pos, next_node[1]->pos,
|
||||||
next_node[0]->ts , next_node[1]->ts);
|
next_node[0]->ts , next_node[1]->ts);
|
||||||
pos= av_gen_search(s, -1, dummy.ts, next_node[0]->pos, next_node[1]->pos, next_node[1]->pos,
|
pos = ff_gen_search(s, -1, dummy.ts, next_node[0]->pos, next_node[1]->pos, next_node[1]->pos,
|
||||||
next_node[0]->ts , next_node[1]->ts, AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp);
|
next_node[0]->ts , next_node[1]->ts, AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp);
|
||||||
|
|
||||||
if(!(flags & AVSEEK_FLAG_BACKWARD)){
|
if(!(flags & AVSEEK_FLAG_BACKWARD)){
|
||||||
dummy.pos= pos+16;
|
dummy.pos= pos+16;
|
||||||
next_node[1]= &nopts_sp;
|
next_node[1]= &nopts_sp;
|
||||||
av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
|
av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
|
||||||
(void **) next_node);
|
(void **) next_node);
|
||||||
pos2= av_gen_search(s, -2, dummy.pos, next_node[0]->pos , next_node[1]->pos, next_node[1]->pos,
|
pos2 = ff_gen_search(s, -2, dummy.pos, next_node[0]->pos , next_node[1]->pos, next_node[1]->pos,
|
||||||
next_node[0]->back_ptr, next_node[1]->back_ptr, flags, &ts, nut_read_timestamp);
|
next_node[0]->back_ptr, next_node[1]->back_ptr, flags, &ts, nut_read_timestamp);
|
||||||
if(pos2>=0)
|
if(pos2>=0)
|
||||||
pos= pos2;
|
pos= pos2;
|
||||||
//FIXME dir but I think it does not matter
|
//FIXME dir but I think it does not matter
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include "oggdec.h"
|
#include "oggdec.h"
|
||||||
#include "avformat.h"
|
#include "avformat.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "vorbiscomment.h"
|
#include "vorbiscomment.h"
|
||||||
|
|
||||||
#define MAX_PAGE_SIZE 65307
|
#define MAX_PAGE_SIZE 65307
|
||||||
@ -661,7 +662,7 @@ static int ogg_read_seek(AVFormatContext *s, int stream_index,
|
|||||||
&& !(flags & AVSEEK_FLAG_ANY))
|
&& !(flags & AVSEEK_FLAG_ANY))
|
||||||
os->keyframe_seek = 1;
|
os->keyframe_seek = 1;
|
||||||
|
|
||||||
ret = av_seek_frame_binary(s, stream_index, timestamp, flags);
|
ret = ff_seek_frame_binary(s, stream_index, timestamp, flags);
|
||||||
os = ogg->streams + stream_index;
|
os = ogg->streams + stream_index;
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
os->keyframe_seek = 0;
|
os->keyframe_seek = 0;
|
||||||
|
@ -64,10 +64,10 @@ int main(int argc, char **argv)
|
|||||||
AVFormatContext *ic = NULL;
|
AVFormatContext *ic = NULL;
|
||||||
int i, ret, stream_id;
|
int i, ret, stream_id;
|
||||||
int64_t timestamp;
|
int64_t timestamp;
|
||||||
AVFormatParameters params, *ap= ¶ms;
|
AVDictionary *format_opts = NULL;
|
||||||
memset(ap, 0, sizeof(params));
|
|
||||||
ap->channels=1;
|
av_dict_set(&format_opts, "channels", "1", 0);
|
||||||
ap->sample_rate= 22050;
|
av_dict_set(&format_opts, "sample_rate", "22050", 0);
|
||||||
|
|
||||||
/* initialize libavcodec, and register all codecs and formats */
|
/* initialize libavcodec, and register all codecs and formats */
|
||||||
av_register_all();
|
av_register_all();
|
||||||
@ -80,7 +80,8 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
filename = argv[1];
|
filename = argv[1];
|
||||||
|
|
||||||
ret = av_open_input_file(&ic, filename, NULL, 0, ap);
|
ret = avformat_open_input(&ic, filename, NULL, &format_opts);
|
||||||
|
av_dict_free(&format_opts);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
fprintf(stderr, "cannot open %s\n", filename);
|
fprintf(stderr, "cannot open %s\n", filename);
|
||||||
exit(1);
|
exit(1);
|
||||||
|
@ -340,6 +340,7 @@ AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score
|
|||||||
fmt = NULL;
|
fmt = NULL;
|
||||||
}
|
}
|
||||||
*score_ret= score_max;
|
*score_ret= score_max;
|
||||||
|
|
||||||
return fmt;
|
return fmt;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1427,7 +1428,15 @@ void ff_read_frame_flush(AVFormatContext *s)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
|
#if FF_API_SEEK_PUBLIC
|
||||||
|
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
|
||||||
|
{
|
||||||
|
return ff_update_cur_dts(s, ref_st, timestamp);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
|
||||||
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for(i = 0; i < s->nb_streams; i++) {
|
for(i = 0; i < s->nb_streams; i++) {
|
||||||
@ -1547,7 +1556,14 @@ int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
|
|||||||
wanted_timestamp, flags);
|
wanted_timestamp, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if FF_API_SEEK_PUBLIC
|
||||||
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
|
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
|
||||||
|
return ff_seek_frame_binary(s, stream_index, target_ts, flags);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
|
||||||
|
{
|
||||||
AVInputFormat *avif= s->iformat;
|
AVInputFormat *avif= s->iformat;
|
||||||
int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
|
int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
|
||||||
int64_t ts_min, ts_max, ts;
|
int64_t ts_min, ts_max, ts;
|
||||||
@ -1594,7 +1610,7 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
|
pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
|
||||||
if(pos<0)
|
if(pos<0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
@ -1603,12 +1619,28 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ff_read_frame_flush(s);
|
ff_read_frame_flush(s);
|
||||||
av_update_cur_dts(s, st, ts);
|
ff_update_cur_dts(s, st, ts);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
|
#if FF_API_SEEK_PUBLIC
|
||||||
|
int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
|
||||||
|
int64_t pos_min, int64_t pos_max, int64_t pos_limit,
|
||||||
|
int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
|
||||||
|
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
|
||||||
|
{
|
||||||
|
return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max,
|
||||||
|
pos_limit, ts_min, ts_max, flags, ts_ret,
|
||||||
|
read_timestamp);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
|
||||||
|
int64_t pos_min, int64_t pos_max, int64_t pos_limit,
|
||||||
|
int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
|
||||||
|
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
|
||||||
|
{
|
||||||
int64_t pos, ts;
|
int64_t pos, ts;
|
||||||
int64_t start_pos, filesize;
|
int64_t start_pos, filesize;
|
||||||
int no_change;
|
int no_change;
|
||||||
@ -1775,7 +1807,7 @@ static int seek_frame_generic(AVFormatContext *s,
|
|||||||
ie= &st->index_entries[st->nb_index_entries-1];
|
ie= &st->index_entries[st->nb_index_entries-1];
|
||||||
if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
|
if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
av_update_cur_dts(s, st, ie->timestamp);
|
ff_update_cur_dts(s, st, ie->timestamp);
|
||||||
}else{
|
}else{
|
||||||
if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
|
if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
@ -1812,7 +1844,7 @@ static int seek_frame_generic(AVFormatContext *s,
|
|||||||
ie = &st->index_entries[index];
|
ie = &st->index_entries[index];
|
||||||
if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
|
if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
av_update_cur_dts(s, st, ie->timestamp);
|
ff_update_cur_dts(s, st, ie->timestamp);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1853,7 +1885,7 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int f
|
|||||||
|
|
||||||
if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
|
if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
|
||||||
ff_read_frame_flush(s);
|
ff_read_frame_flush(s);
|
||||||
return av_seek_frame_binary(s, stream_index, timestamp, flags);
|
return ff_seek_frame_binary(s, stream_index, timestamp, flags);
|
||||||
} else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
|
} else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
|
||||||
ff_read_frame_flush(s);
|
ff_read_frame_flush(s);
|
||||||
return seek_frame_generic(s, stream_index, timestamp, flags);
|
return seek_frame_generic(s, stream_index, timestamp, flags);
|
||||||
|
@ -107,5 +107,11 @@
|
|||||||
#ifndef FF_API_STREAM_COPY
|
#ifndef FF_API_STREAM_COPY
|
||||||
#define FF_API_STREAM_COPY (LIBAVFORMAT_VERSION_MAJOR < 54)
|
#define FF_API_STREAM_COPY (LIBAVFORMAT_VERSION_MAJOR < 54)
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef FF_API_SEEK_PUBLIC
|
||||||
|
#define FF_API_SEEK_PUBLIC (LIBAVFORMAT_VERSION_MAJOR < 54)
|
||||||
|
#endif
|
||||||
|
#ifndef FF_API_REORDER_PRIVATE
|
||||||
|
#define FF_API_REORDER_PRIVATE (LIBAVFORMAT_VERSION_MAJOR < 54)
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* AVFORMAT_VERSION_H */
|
#endif /* AVFORMAT_VERSION_H */
|
||||||
|
@ -773,7 +773,7 @@ static int parse_chunks(AVFormatContext *s, int mode, int64_t seekts, int *len_p
|
|||||||
buf_size = FFMIN(len - consumed, sizeof(buf));
|
buf_size = FFMIN(len - consumed, sizeof(buf));
|
||||||
avio_read(pb, buf, buf_size);
|
avio_read(pb, buf, buf_size);
|
||||||
consumed += buf_size;
|
consumed += buf_size;
|
||||||
ff_parse_mpeg2_descriptor(s, st, 0, &pbuf, buf + buf_size, 0, 0, 0, 0);
|
ff_parse_mpeg2_descriptor(s, st, 0, &pbuf, buf + buf_size, NULL, 0, 0, NULL);
|
||||||
}
|
}
|
||||||
} else if (!ff_guidcmp(g, EVENTID_AudioTypeSpanningEvent)) {
|
} else if (!ff_guidcmp(g, EVENTID_AudioTypeSpanningEvent)) {
|
||||||
int stream_index = ff_find_stream_index(s, sid);
|
int stream_index = ff_find_stream_index(s, sid);
|
||||||
|
@ -44,7 +44,7 @@ int main(int argc, char **argv)
|
|||||||
{
|
{
|
||||||
char fntemplate[PATH_MAX];
|
char fntemplate[PATH_MAX];
|
||||||
char pktfilename[PATH_MAX];
|
char pktfilename[PATH_MAX];
|
||||||
AVFormatContext *fctx;
|
AVFormatContext *fctx = NULL;
|
||||||
AVPacket pkt;
|
AVPacket pkt;
|
||||||
int64_t pktnum = 0;
|
int64_t pktnum = 0;
|
||||||
int64_t maxpkts = 0;
|
int64_t maxpkts = 0;
|
||||||
@ -83,9 +83,9 @@ int main(int argc, char **argv)
|
|||||||
// register all file formats
|
// register all file formats
|
||||||
av_register_all();
|
av_register_all();
|
||||||
|
|
||||||
err = av_open_input_file(&fctx, argv[1], NULL, 0, NULL);
|
err = avformat_open_input(&fctx, argv[1], NULL, NULL);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
fprintf(stderr, "av_open_input_file: error %d\n", err);
|
fprintf(stderr, "cannot open input: error %d\n", err);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user