Merge remote-tracking branch 'qatar/master'

* qatar/master:
  fate: add dxtory test
  adx_parser: rewrite.
  adxdec: Validate channel count to fix a division by zero.
  adxdec: Do not require extradata.
  cmdutils: K&R reformatting cosmetics
  alacdec: implement the 2-pass prediction type.
  alacenc: implement the 2-pass prediction type.
  alacenc: do not generate invalid multi-channel ALAC files
  alacdec: fill in missing or guessed info about the extradata format.
  utvideo: proper median prediction for interlaced videos
  lavu: bump lavu minor for av_popcount64
  dca: K&R formatting cosmetics
  dct: K&R formatting cosmetics
  lavf: flush decoders in avformat_find_stream_info().
  win32: detect number of CPUs using affinity
  Add av_popcount64
  snow: Restore three mistakenly removed casts.

Conflicts:
	cmdutils.c
	doc/APIchanges
	libavcodec/adx_parser.c
	libavcodec/adxdec.c
	libavcodec/alacenc.c
	libavutil/avutil.h
	tests/fate/screen.mak

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-01-04 01:12:34 +01:00
commit ad1c8dd673
18 changed files with 764 additions and 573 deletions

View File

@ -61,7 +61,8 @@ static FILE *report_file;
void init_opts(void)
{
#if CONFIG_SWSCALE
sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC, NULL, NULL, NULL);
sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC,
NULL, NULL, NULL);
#endif
}
@ -94,7 +95,8 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
fflush(report_file);
}
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
double parse_number_or_die(const char *context, const char *numstr, int type,
double min, double max)
{
char *tail;
const char *error;
@ -114,7 +116,8 @@ double parse_number_or_die(const char *context, const char *numstr, int type, do
return 0;
}
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
int64_t parse_time_or_die(const char *context, const char *timestr,
int is_duration)
{
int64_t us;
if (av_parse_time(&us, timestr, is_duration) < 0) {
@ -125,7 +128,8 @@ int64_t parse_time_or_die(const char *context, const char *timestr, int is_durat
return us;
}
void show_help_options(const OptionDef *options, const char *msg, int mask, int value)
void show_help_options(const OptionDef *options, const char *msg, int mask,
int value)
{
const OptionDef *po;
int first;
@ -158,7 +162,8 @@ void show_help_children(const AVClass *class, int flags)
show_help_children(child, flags);
}
static const OptionDef* find_option(const OptionDef *po, const char *name){
static const OptionDef *find_option(const OptionDef *po, const char *name)
{
const char *p = strchr(name, ':');
int len = p ? p - name : strlen(name);
@ -231,8 +236,8 @@ static inline void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
}
#endif /* WIN32 && !__MINGW32CE__ */
int parse_option(void *optctx, const char *opt, const char *arg, const OptionDef *options)
int parse_option(void *optctx, const char *opt, const char *arg,
const OptionDef *options)
{
const OptionDef *po;
int bool_val = 1;
@ -261,7 +266,8 @@ unknown_opt:
/* new-style options contain an offset into optctx, old-style address of
* a global var*/
dst = po->flags & (OPT_OFFSET|OPT_SPEC) ? (uint8_t*)optctx + po->u.off : po->u.dst_ptr;
dst = po->flags & (OPT_OFFSET | OPT_SPEC) ? (uint8_t *)optctx + po->u.off
: po->u.dst_ptr;
if (po->flags & OPT_SPEC) {
SpecifierOpt **so = dst;
@ -290,10 +296,11 @@ unknown_opt:
} else if (po->flags & OPT_DOUBLE) {
*(double *)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY);
} else if (po->u.func_arg) {
int ret = po->flags & OPT_FUNC2 ? po->u.func2_arg(optctx, opt, arg) :
po->u.func_arg(opt, arg);
int ret = po->flags & OPT_FUNC2 ? po->u.func2_arg(optctx, opt, arg)
: po->u.func_arg(opt, arg);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to set value '%s' for option '%s'\n", arg, opt);
av_log(NULL, AV_LOG_ERROR,
"Failed to set value '%s' for option '%s'\n", arg, opt);
return ret;
}
}
@ -336,7 +343,8 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
/*
* Return index of option opt in argv or 0 if not found.
*/
static int locate_option(int argc, char **argv, const OptionDef *options, const char *optname)
static int locate_option(int argc, char **argv, const OptionDef *options,
const char *optname)
{
const OptionDef *po;
int i;
@ -419,15 +427,18 @@ int opt_default(const char *opt, const char *arg)
p = opt + strlen(opt);
av_strlcpy(opt_stripped, opt, FFMIN(sizeof(opt_stripped), p - opt + 1));
if ((oc = av_opt_find(&cc, opt_stripped, NULL, 0, AV_OPT_SEARCH_CHILDREN|AV_OPT_SEARCH_FAKE_OBJ)) ||
if ((oc = av_opt_find(&cc, opt_stripped, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) ||
((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') &&
(oc = av_opt_find(&cc, opt + 1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ))))
av_dict_set(&codec_opts, opt, arg, FLAGS(oc));
if ((of = av_opt_find(&fc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
if ((of = av_opt_find(&fc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
av_dict_set(&format_opts, opt, arg, FLAGS(of));
#if CONFIG_SWSCALE
sc = sws_get_class();
if ((os = av_opt_find(&sc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
if ((os = av_opt_find(&sc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
// XXX we only support sws_flags, not arbitrary sws options
int ret = av_opt_set(sws_opts, opt, arg, 0);
if (ret < 0) {
@ -604,7 +615,8 @@ void show_banner(int argc, char **argv, const OptionDef *options)
if (idx)
return;
av_log(NULL, AV_LOG_INFO, "%s version " FFMPEG_VERSION ", Copyright (c) %d-%d the FFmpeg developers\n",
av_log(NULL, AV_LOG_INFO,
"%s version " FFMPEG_VERSION ", Copyright (c) %d-%d the FFmpeg developers\n",
program_name, program_birth_year, this_year);
av_log(NULL, AV_LOG_INFO, " built on %s %s with %s %s\n",
__DATE__, __TIME__, CC_TYPE, CC_VERSION);
@ -696,8 +708,7 @@ int opt_formats(const char *opt, const char *arg)
AVOutputFormat *ofmt = NULL;
const char *last_name;
printf(
"File formats:\n"
printf("File formats:\n"
" D. = Demuxing supported\n"
" .E = Muxing supported\n"
" --\n");
@ -730,8 +741,7 @@ int opt_formats(const char *opt, const char *arg)
break;
last_name = name;
printf(
" %s%s %-15s %s\n",
printf(" %s%s %-15s %s\n",
decode ? "D" : " ",
encode ? "E" : " ",
name,
@ -744,8 +754,7 @@ int opt_codecs(const char *opt, const char *arg)
{
AVCodec *p = NULL, *p2;
const char *last_name;
printf(
"Codecs:\n"
printf("Codecs:\n"
" D..... = Decoding supported\n"
" .E.... = Encoding supported\n"
" ..V... = Video codec\n"
@ -770,8 +779,10 @@ int opt_codecs(const char *opt, const char *arg)
decode = encode = cap = 0;
}
if (p2 && strcmp(p->name, p2->name) == 0) {
if(p->decode) decode=1;
if(p->encode) encode=1;
if (p->decode)
decode = 1;
if (p->encode)
encode = 1;
cap |= p->capabilities;
}
}
@ -793,8 +804,7 @@ int opt_codecs(const char *opt, const char *arg)
type_str = "?";
break;
}
printf(
" %s%s%s%s%s%s %-15s %s",
printf(" %s%s%s%s%s%s %-15s %s",
decode ? "D" : (/* p2->decoder ? "d" : */ " "),
encode ? "E" : " ",
type_str,
@ -803,13 +813,14 @@ int opt_codecs(const char *opt, const char *arg)
cap & CODEC_CAP_TRUNCATED ? "T" : " ",
p2->name,
p2->long_name ? p2->long_name : "");
/* if(p2->decoder && decode==0)
printf(" use %s for decoding", p2->decoder->name);*/
#if 0
if (p2->decoder && decode == 0)
printf(" use %s for decoding", p2->decoder->name);
#endif
printf("\n");
}
printf("\n");
printf(
"Note, the names of encoders and decoders do not always match, so there are\n"
printf("Note, the names of encoders and decoders do not always match, so there are\n"
"several cases where the above table shows encoder only or decoder only entries\n"
"even though both encoding and decoding are supported. For example, the h263\n"
"decoder corresponds to the h263 and h263p encoders, for file formats it is even\n"
@ -863,8 +874,7 @@ int opt_pix_fmts(const char *opt, const char *arg)
{
enum PixelFormat pix_fmt;
printf(
"Pixel formats:\n"
printf("Pixel formats:\n"
"I.... = Supported Input format for conversion\n"
".O... = Supported Output format for conversion\n"
"..H.. = Hardware accelerated format\n"
@ -921,7 +931,8 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
FILE *f = fopen(filename, "rb");
if (!f) {
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename, strerror(errno));
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename,
strerror(errno));
return AVERROR(errno);
}
fseek(f, 0, SEEK_END);
@ -952,14 +963,14 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
}
FILE *get_preset_file(char *filename, size_t filename_size,
const char *preset_name, int is_path, const char *codec_name)
const char *preset_name, int is_path,
const char *codec_name)
{
FILE *f = NULL;
int i;
const char *base[3] = { getenv("FFMPEG_DATADIR"),
getenv("HOME"),
FFMPEG_DATADIR,
};
FFMPEG_DATADIR, };
if (is_path) {
av_strlcpy(filename, preset_name, filename_size);
@ -985,11 +996,14 @@ FILE *get_preset_file(char *filename, size_t filename_size,
for (i = 0; i < 3 && !f; i++) {
if (!base[i])
continue;
snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", preset_name);
snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i],
i != 1 ? "" : "/.ffmpeg", preset_name);
f = fopen(filename, "r");
if (!f && codec_name) {
snprintf(filename, filename_size,
"%s%s/%s-%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", codec_name, preset_name);
"%s%s/%s-%s.ffpreset",
base[i], i != 1 ? "" : "/.ffmpeg", codec_name,
preset_name);
f = fopen(filename, "r");
}
}
@ -1002,7 +1016,8 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
{
if (*spec <= '9' && *spec >= '0') /* opt:index */
return strtol(spec, NULL, 0) == st->index;
else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' || *spec == 't') { /* opt:[vasdt] */
else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
*spec == 't') { /* opt:[vasdt] */
enum AVMediaType type;
switch (*spec++) {
@ -1051,11 +1066,13 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
return AVERROR(EINVAL);
}
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatContext *s, AVStream *st)
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
AVFormatContext *s, AVStream *st)
{
AVDictionary *ret = NULL;
AVDictionaryEntry *t = NULL;
int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM;
int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM
: AV_OPT_FLAG_DECODING_PARAM;
char prefix = 0;
const AVClass *cc = avcodec_get_class();
@ -1063,9 +1080,18 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatCont
return NULL;
switch (codec->type) {
case AVMEDIA_TYPE_VIDEO: prefix = 'v'; flags |= AV_OPT_FLAG_VIDEO_PARAM; break;
case AVMEDIA_TYPE_AUDIO: prefix = 'a'; flags |= AV_OPT_FLAG_AUDIO_PARAM; break;
case AVMEDIA_TYPE_SUBTITLE: prefix = 's'; flags |= AV_OPT_FLAG_SUBTITLE_PARAM; break;
case AVMEDIA_TYPE_VIDEO:
prefix = 'v';
flags |= AV_OPT_FLAG_VIDEO_PARAM;
break;
case AVMEDIA_TYPE_AUDIO:
prefix = 'a';
flags |= AV_OPT_FLAG_AUDIO_PARAM;
break;
case AVMEDIA_TYPE_SUBTITLE:
prefix = 's';
flags |= AV_OPT_FLAG_SUBTITLE_PARAM;
break;
}
while (t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX)) {
@ -1080,9 +1106,13 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatCont
}
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
(codec && codec->priv_class && av_opt_find(&codec->priv_class, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ)))
(codec && codec->priv_class &&
av_opt_find(&codec->priv_class, t->key, NULL, flags,
AV_OPT_SEARCH_FAKE_OBJ)))
av_dict_set(&ret, t->key, t->value, 0);
else if (t->key[0] == prefix && av_opt_find(&cc, t->key+1, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ))
else if (t->key[0] == prefix &&
av_opt_find(&cc, t->key + 1, NULL, flags,
AV_OPT_SEARCH_FAKE_OBJ))
av_dict_set(&ret, t->key + 1, t->value, 0);
if (p)
@ -1091,7 +1121,8 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatCont
return ret;
}
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
AVDictionary *codec_opts)
{
int i;
AVDictionary **opts;
@ -1100,11 +1131,13 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *cod
return NULL;
opts = av_mallocz(s->nb_streams * sizeof(*opts));
if (!opts) {
av_log(NULL, AV_LOG_ERROR, "Could not alloc memory for stream options.\n");
av_log(NULL, AV_LOG_ERROR,
"Could not alloc memory for stream options.\n");
return NULL;
}
for (i = 0; i < s->nb_streams; i++)
opts[i] = filter_codec_opts(codec_opts, avcodec_find_decoder(s->streams[i]->codec->codec_id), s, s->streams[i]);
opts[i] = filter_codec_opts(codec_opts, avcodec_find_decoder(s->streams[i]->codec->codec_id),
s, s->streams[i]);
return opts;
}

4
configure vendored
View File

@ -1175,9 +1175,9 @@ HAVE_LIST="
fork
getaddrinfo
gethrtime
GetProcessAffinityMask
GetProcessMemoryInfo
GetProcessTimes
GetSystemInfo
getrusage
gnu_as
ibm_asm
@ -3016,8 +3016,8 @@ check_func_headers windows.h PeekNamedPipe
check_func_headers io.h setmode
check_func_headers lzo/lzo1x.h lzo1x_999_compress
check_lib2 "windows.h psapi.h" GetProcessMemoryInfo -lpsapi
check_func_headers windows.h GetProcessAffinityMask
check_func_headers windows.h GetProcessTimes
check_func_headers windows.h GetSystemInfo
check_func_headers windows.h MapViewOfFile
check_func_headers windows.h VirtualAlloc

View File

@ -31,21 +31,24 @@ API changes, most recent first:
2011-10-20 - b35e9e1 - lavu 51.22.0
Add av_strtok() to avstring.h.
2011-01-03 - b73ec05 - lavu 51.21.0
Add av_popcount64
2011-12-18 - 8400b12 - lavc 53.28.1
Deprecate AVFrame.age. The field is unused.
2011-xx-xx - xxxxxxx - lavf 53.17.0
Add avformat_open_input().
2011-12-12 - 5266045 - lavf 53.17.0
Add avformat_close_input().
Deprecate av_close_input_file() and av_close_input_stream().
2011-xx-xx - xxxxxxx - lavc 53.25.0
2011-12-02 - 0eea212 - lavc 53.25.0
Add nb_samples and extended_data fields to AVFrame.
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
avcodec_decode_audio4() writes output samples to an AVFrame, which allows
audio decoders to use get_buffer().
2011-xx-xx - xxxxxxx - lavc 53.24.0
2011-12-04 - 560f773 - lavc 53.24.0
Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
Change AVCodecContext.error[4] to [8] at next major bump.

View File

@ -58,7 +58,7 @@ int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf,
/* channels */
avctx->channels = buf[7];
if (avctx->channels > 2)
if (avctx->channels <= 0 || avctx->channels > 2)
return AVERROR_INVALIDDATA;
/* sample rate */

View File

@ -50,20 +50,24 @@ static int adx_parse(AVCodecParserContext *s1,
if (!s->header_size) {
for (i = 0; i < buf_size; i++) {
state = (state << 8) | buf[i];
if((state&0xFFFF0000FFFFFF00) == 0x8000000003120400ULL && (state&0xFF) && ((state>>32)&0xFFFF)>=4){
s->header_size= ((state>>32)&0xFFFF) + 4;
s->block_size = BLOCK_SIZE * (state&0xFF);
/* check for fixed fields in ADX header for possible match */
if ((state & 0xFFFF0000FFFFFF00) == 0x8000000003120400ULL) {
int channels = state & 0xFF;
int header_size = ((state >> 32) & 0xFFFF) + 4;
if (channels > 0 && header_size >= 8) {
s->header_size = header_size;
s->block_size = BLOCK_SIZE * channels;
s->remaining = i - 7 + s->header_size + s->block_size;
break;
}
}
}
pc->state64 = state;
}
if (s->header_size) {
if (!s->remaining) {
if (!s->remaining)
s->remaining = s->block_size;
}
if (s->remaining <= buf_size) {
next = s->remaining;
s->remaining = 0;

View File

@ -46,6 +46,7 @@ static av_cold int adx_decode_init(AVCodecContext *avctx)
return AVERROR_INVALIDDATA;
}
c->channels = avctx->channels;
c->header_parsed = 1;
}
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
@ -106,21 +107,21 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data,
return buf_size;
}
if(AV_RB16(buf) == 0x8000){
if (!c->header_parsed && buf_size >= 2 && AV_RB16(buf) == 0x8000) {
int header_size;
if ((ret = avpriv_adx_decode_header(avctx, buf,
buf_size, &header_size,
if ((ret = avpriv_adx_decode_header(avctx, buf, buf_size, &header_size,
c->coeff)) < 0) {
av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n");
return AVERROR_INVALIDDATA;
}
c->channels = avctx->channels;
c->header_parsed = 1;
if (buf_size < header_size)
return AVERROR_INVALIDDATA;
buf += header_size;
buf_size -= header_size;
}
if(c->channels <= 0)
if (!c->header_parsed)
return AVERROR_INVALIDDATA;
/* calculate number of blocks in the packet */

View File

@ -25,27 +25,23 @@
* @author 2005 David Hammerton
* @see http://crazney.net/programs/itunes/alac.html
*
* Note: This decoder expects a 36- (0x24-)byte QuickTime atom to be
* Note: This decoder expects a 36-byte QuickTime atom to be
* passed through the extradata[_size] fields. This atom is tacked onto
* the end of an 'alac' stsd atom and has the following format:
* bytes 0-3 atom size (0x24), big-endian
* bytes 4-7 atom type ('alac', not the 'alac' tag from start of stsd)
* bytes 8-35 data bytes needed by decoder
*
* Extradata:
* 32bit size
* 32bit tag (=alac)
* 32bit zero?
* 32bit max sample per frame
* 8bit ?? (zero?)
* 32bit atom size
* 32bit tag ("alac")
* 32bit tag version (0)
* 32bit samples per frame (used when not set explicitly in the frames)
* 8bit compatible version (0)
* 8bit sample size
* 8bit history mult
* 8bit initial history
* 8bit kmodifier
* 8bit channels?
* 16bit ??
* 32bit max coded frame size
* 32bit bitrate?
* 8bit history mult (40)
* 8bit initial history (14)
* 8bit kmodifier (10)
* 8bit channels
* 16bit maxRun (255)
* 32bit max coded frame size (0 means unknown)
* 32bit average bitrate (0 means unknown)
* 32bit samplerate
*/
@ -464,24 +460,29 @@ static int alac_decode_frame(AVCodecContext *avctx, void *data,
if(ret<0)
return ret;
if (prediction_type[ch] == 0) {
/* adaptive fir */
/* adaptive FIR filter */
if (prediction_type[ch] == 15) {
/* Prediction type 15 runs the adaptive FIR twice.
* The first pass uses the special-case coef_num = 31, while
* the second pass uses the coefs from the bitstream.
*
* However, this prediction type is not currently used by the
* reference encoder.
*/
predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
alac->predicterror_buffer[ch],
outputsamples, readsamplesize,
NULL, 31, 0);
} else if (prediction_type[ch] > 0) {
av_log(avctx, AV_LOG_WARNING, "unknown prediction type: %i\n",
prediction_type[ch]);
}
predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
alac->outputsamples_buffer[ch],
outputsamples,
readsamplesize,
outputsamples, readsamplesize,
predictor_coef_table[ch],
predictor_coef_num[ch],
prediction_quantitization[ch]);
} else {
av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type[ch]);
/* I think the only other prediction type (or perhaps this is
* just a boolean?) runs adaptive fir twice.. like:
* predictor_decompress_fir_adapt(predictor_error, tempout, ...)
* predictor_decompress_fir_adapt(predictor_error, outputsamples ...)
* little strange..
*/
}
}
} else {
/* not compressed, easy case */
@ -584,7 +585,7 @@ static int alac_set_info(ALACContext *alac)
ptr += 4; /* size */
ptr += 4; /* alac */
ptr += 4; /* 0 ? */
ptr += 4; /* version */
if(AV_RB32(ptr) >= UINT_MAX/4){
av_log(alac->avctx, AV_LOG_ERROR, "setinfo_max_samples_per_frame too large\n");
@ -593,15 +594,15 @@ static int alac_set_info(ALACContext *alac)
/* buffer size / 2 ? */
alac->setinfo_max_samples_per_frame = bytestream_get_be32(&ptr);
ptr++; /* ??? */
ptr++; /* compatible version */
alac->setinfo_sample_size = *ptr++;
alac->setinfo_rice_historymult = *ptr++;
alac->setinfo_rice_initialhistory = *ptr++;
alac->setinfo_rice_kmodifier = *ptr++;
alac->numchannels = *ptr++;
bytestream_get_be16(&ptr); /* ??? */
bytestream_get_be16(&ptr); /* maxRun */
bytestream_get_be32(&ptr); /* max coded frame size */
bytestream_get_be32(&ptr); /* bitrate ? */
bytestream_get_be32(&ptr); /* average bitrate */
bytestream_get_be32(&ptr); /* samplerate */
return 0;

View File

@ -348,6 +348,7 @@ static void alac_entropy_coder(AlacEncodeContext *s)
static void write_compressed_frame(AlacEncodeContext *s)
{
int i, j;
int prediction_type = 0;
if (s->avctx->channels == 2)
alac_stereo_decorrelation(s);
@ -358,7 +359,7 @@ static void write_compressed_frame(AlacEncodeContext *s)
calc_predictor_params(s, i);
put_bits(&s->pbctx, 4, 0); // prediction type : currently only type 0 has been RE'd
put_bits(&s->pbctx, 4, prediction_type);
put_bits(&s->pbctx, 4, s->lpc[i].lpc_quant);
put_bits(&s->pbctx, 3, s->rc.rice_modifier);
@ -373,6 +374,14 @@ static void write_compressed_frame(AlacEncodeContext *s)
for (i = 0; i < s->avctx->channels; i++) {
alac_linear_predictor(s, i);
// TODO: determine when this will actually help. for now it's not used.
if (prediction_type == 15) {
// 2nd pass 1st order filter
for (j = s->avctx->frame_size - 1; j > 0; j--)
s->predictor_buf[j] -= s->predictor_buf[j - 1];
}
alac_entropy_coder(s);
}
}
@ -391,8 +400,11 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
return -1;
}
/* TODO: Correctly implement multi-channel ALAC.
It is similar to multi-channel AAC, in that it has a series of
single-channel (SCE), channel-pair (CPE), and LFE elements. */
if (avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "channels > 2 not supported\n");
av_log(avctx, AV_LOG_ERROR, "only mono or stereo input is currently supported\n");
return AVERROR_PATCHWELCOME;
}

View File

@ -127,7 +127,6 @@ static const int dca_ext_audio_descr_mask[] = {
* OV -> center back
* All 2 channel configurations -> AV_CH_LAYOUT_STEREO
*/
static const uint64_t dca_core_channel_layout[] = {
AV_CH_FRONT_CENTER, ///< 1, A
AV_CH_LAYOUT_STEREO, ///< 2, A + B (dual mono)
@ -138,13 +137,31 @@ static const uint64_t dca_core_channel_layout[] = {
AV_CH_LAYOUT_STEREO | AV_CH_BACK_CENTER, ///< 3, L + R + S
AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER | AV_CH_BACK_CENTER, ///< 4, C + L + R + S
AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT, ///< 4, L + R + SL + SR
AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, ///< 5, C + L + R+ SL+SR
AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER, ///< 6, CL + CR + L + R + SL + SR
AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT|AV_CH_FRONT_CENTER|AV_CH_BACK_CENTER, ///< 6, C + L + R+ LR + RR + OV
AV_CH_FRONT_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_BACK_CENTER|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, ///< 6, CF+ CR+LF+ RF+LR + RR
AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT, ///< 7, CL + C + CR + L + R + SL + SR
AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT, ///< 8, CL + CR + L + R + SL1 + SL2+ SR1 + SR2
AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_BACK_CENTER|AV_CH_SIDE_RIGHT, ///< 8, CL + C+ CR + L + R + SL + S+ SR
AV_CH_LAYOUT_STEREO | AV_CH_FRONT_CENTER | AV_CH_SIDE_LEFT |
AV_CH_SIDE_RIGHT, ///< 5, C + L + R + SL + SR
AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT |
AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER, ///< 6, CL + CR + L + R + SL + SR
AV_CH_LAYOUT_STEREO | AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT |
AV_CH_FRONT_CENTER | AV_CH_BACK_CENTER, ///< 6, C + L + R + LR + RR + OV
AV_CH_FRONT_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER |
AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_BACK_CENTER |
AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT, ///< 6, CF + CR + LF + RF + LR + RR
AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_CENTER |
AV_CH_FRONT_RIGHT_OF_CENTER | AV_CH_LAYOUT_STEREO |
AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT, ///< 7, CL + C + CR + L + R + SL + SR
AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_RIGHT_OF_CENTER |
AV_CH_LAYOUT_STEREO | AV_CH_SIDE_LEFT | AV_CH_SIDE_RIGHT |
AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT, ///< 8, CL + CR + L + R + SL1 + SL2 + SR1 + SR2
AV_CH_FRONT_LEFT_OF_CENTER | AV_CH_FRONT_CENTER |
AV_CH_FRONT_RIGHT_OF_CENTER | AV_CH_LAYOUT_STEREO |
AV_CH_SIDE_LEFT | AV_CH_BACK_CENTER | AV_CH_SIDE_RIGHT, ///< 8, CL + C + CR + L + R + SL + S + SR
};
static const int8_t dca_lfe_index[] = {
@ -254,9 +271,11 @@ static BitAlloc dca_tmode; ///< transition mode VLCs
static BitAlloc dca_scalefactor; ///< scalefactor VLCs
static BitAlloc dca_smpl_bitalloc[11]; ///< samples VLCs
static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba, int idx)
static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba,
int idx)
{
return get_vlc2(gb, ba->vlc[idx].table, ba->vlc[idx].bits, ba->wrap) + ba->offset;
return get_vlc2(gb, ba->vlc[idx].table, ba->vlc[idx].bits, ba->wrap) +
ba->offset;
}
typedef struct {
@ -416,11 +435,13 @@ static av_cold void dca_init_vlcs(void)
for (i = 0; i < 10; i++)
for (j = 0; j < 7; j++) {
if (!bitalloc_codes[i][j]) break;
if (!bitalloc_codes[i][j])
break;
dca_smpl_bitalloc[i + 1].offset = bitalloc_offsets[i];
dca_smpl_bitalloc[i + 1].wrap = 1 + (j > 4);
dca_smpl_bitalloc[i + 1].vlc[j].table = &dca_table[dca_vlc_offs[c]];
dca_smpl_bitalloc[i + 1].vlc[j].table_allocated = dca_vlc_offs[c + 1] - dca_vlc_offs[c];
init_vlc(&dca_smpl_bitalloc[i + 1].vlc[j], bitalloc_maxbits[i][j],
bitalloc_sizes[i],
bitalloc_bits[i][j], 1, 1,
@ -494,16 +515,21 @@ static int dca_parse_audio_coding_header(DCAContext * s, int base_channel)
av_log(s->avctx, AV_LOG_DEBUG, "subframes: %i\n", s->subframes);
av_log(s->avctx, AV_LOG_DEBUG, "prim channels: %i\n", s->prim_channels);
for (i = base_channel; i < s->prim_channels; i++) {
av_log(s->avctx, AV_LOG_DEBUG, "subband activity: %i\n", s->subband_activity[i]);
av_log(s->avctx, AV_LOG_DEBUG, "vq start subband: %i\n", s->vq_start_subband[i]);
av_log(s->avctx, AV_LOG_DEBUG, "joint intensity: %i\n", s->joint_intensity[i]);
av_log(s->avctx, AV_LOG_DEBUG, "transient mode codebook: %i\n", s->transient_huffman[i]);
av_log(s->avctx, AV_LOG_DEBUG, "scale factor codebook: %i\n", s->scalefactor_huffman[i]);
av_log(s->avctx, AV_LOG_DEBUG, "bit allocation quantizer: %i\n", s->bitalloc_huffman[i]);
av_log(s->avctx, AV_LOG_DEBUG, "subband activity: %i\n",
s->subband_activity[i]);
av_log(s->avctx, AV_LOG_DEBUG, "vq start subband: %i\n",
s->vq_start_subband[i]);
av_log(s->avctx, AV_LOG_DEBUG, "joint intensity: %i\n",
s->joint_intensity[i]);
av_log(s->avctx, AV_LOG_DEBUG, "transient mode codebook: %i\n",
s->transient_huffman[i]);
av_log(s->avctx, AV_LOG_DEBUG, "scale factor codebook: %i\n",
s->scalefactor_huffman[i]);
av_log(s->avctx, AV_LOG_DEBUG, "bit allocation quantizer: %i\n",
s->bitalloc_huffman[i]);
av_log(s->avctx, AV_LOG_DEBUG, "quant index huff:");
for (j = 0; j < 11; j++)
av_log(s->avctx, AV_LOG_DEBUG, " %i",
s->quant_index_huffman[i][j]);
av_log(s->avctx, AV_LOG_DEBUG, " %i", s->quant_index_huffman[i][j]);
av_log(s->avctx, AV_LOG_DEBUG, "\n");
av_log(s->avctx, AV_LOG_DEBUG, "scalefac adj:");
for (j = 0; j < 11; j++)
@ -564,7 +590,8 @@ static int dca_parse_frame_header(DCAContext * s)
/* FIXME: channels mixing levels */
s->output = s->amode;
if (s->lfe) s->output |= DCA_LFE;
if (s->lfe)
s->output |= DCA_LFE;
#ifdef TRACE
av_log(s->avctx, AV_LOG_DEBUG, "frame type: %i\n", s->frame_type);
@ -692,7 +719,8 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index
const uint32_t *scale_table;
int scale_sum;
memset(s->scale_factor[j], 0, s->subband_activity[j] * sizeof(s->scale_factor[0][0][0]) * 2);
memset(s->scale_factor[j], 0,
s->subband_activity[j] * sizeof(s->scale_factor[0][0][0]) * 2);
if (s->scalefactor_huffman[j] == 6)
scale_table = scale_factor_quant7;
@ -810,9 +838,11 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index
}
#ifdef TRACE
av_log(s->avctx, AV_LOG_DEBUG, "subsubframes: %i\n", s->subsubframes[s->current_subframe]);
av_log(s->avctx, AV_LOG_DEBUG, "subsubframes: %i\n",
s->subsubframes[s->current_subframe]);
av_log(s->avctx, AV_LOG_DEBUG, "partial samples: %i\n",
s->partial_samples[s->current_subframe]);
for (j = base_channel; j < s->prim_channels; j++) {
av_log(s->avctx, AV_LOG_DEBUG, "prediction mode:");
for (k = 0; k < s->subband_activity[j]; k++)
@ -862,8 +892,10 @@ static int dca_subframe_header(DCAContext * s, int base_channel, int block_index
if (!base_channel && s->prim_channels > 2 && s->downmix) {
av_log(s->avctx, AV_LOG_DEBUG, "Downmix coeffs:\n");
for (j = 0; j < s->prim_channels; j++) {
av_log(s->avctx, AV_LOG_DEBUG, "Channel 0,%d = %f\n", j, dca_downmix_coeffs[s->downmix_coef[j][0]]);
av_log(s->avctx, AV_LOG_DEBUG, "Channel 1,%d = %f\n", j, dca_downmix_coeffs[s->downmix_coef[j][1]]);
av_log(s->avctx, AV_LOG_DEBUG, "Channel 0, %d = %f\n", j,
dca_downmix_coeffs[s->downmix_coef[j][0]]);
av_log(s->avctx, AV_LOG_DEBUG, "Channel 1, %d = %f\n", j,
dca_downmix_coeffs[s->downmix_coef[j][1]]);
}
av_log(s->avctx, AV_LOG_DEBUG, "\n");
}
@ -915,11 +947,11 @@ static void qmf_32_subbands(DCAContext * s, int chans,
}
s->synth.synth_filter_float(&s->imdct,
s->subband_fir_hist[chans], &s->hist_index[chans],
s->subband_fir_hist[chans],
&s->hist_index[chans],
s->subband_fir_noidea[chans], prCoeff,
samples_out, s->raXin, scale);
samples_out += 32;
}
}
@ -949,8 +981,7 @@ static void lfe_interpolation_fir(DCAContext *s, int decimation_select,
}
/* Interpolation */
for (deciindex = 0; deciindex < num_deci_sample; deciindex++) {
s->dcadsp.lfe_fir(samples_out, samples_in, prCoeff, decifactor,
scale);
s->dcadsp.lfe_fir(samples_out, samples_in, prCoeff, decifactor, scale);
samples_in++;
samples_out += 2 * decifactor;
}
@ -1123,7 +1154,8 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
} else {
/* Deal with transients */
int sfi = s->transition_mode[k][l] && subsubframe >= s->transition_mode[k][l];
float rscale = quant_step_size * s->scale_factor[k][l][sfi] * s->scalefactor_adj[k][sel];
float rscale = quant_step_size * s->scale_factor[k][l][sfi] *
s->scalefactor_adj[k][sel];
if (abits >= 11 || !dca_smpl_bitalloc[abits].vlc[sel].table) {
if (abits <= 7) {
@ -1150,7 +1182,8 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
} else {
/* Huffman coded */
for (m = 0; m < 8; m++)
block[m] = get_bitalloc(&s->gb, &dca_smpl_bitalloc[abits], sel);
block[m] = get_bitalloc(&s->gb,
&dca_smpl_bitalloc[abits], sel);
}
s->fmt_conv.int32_to_float_fmul_scalar(subband_samples[k][l],
@ -1171,8 +1204,7 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
else if (s->predictor_history)
subband_samples[k][l][m] +=
(adpcm_vb[s->prediction_vq[k][l]][n - 1] *
s->subband_samples_hist[k][l][m - n +
4] / 8192);
s->subband_samples_hist[k][l][m - n + 4] / 8192);
}
}
}
@ -1186,7 +1218,8 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
int hfvq = s->high_freq_vq[k][l];
if (!s->debug_flag & 0x01) {
av_log(s->avctx, AV_LOG_DEBUG, "Stream with high frequencies VQ coding\n");
av_log(s->avctx, AV_LOG_DEBUG,
"Stream with high frequencies VQ coding\n");
s->debug_flag |= 0x01;
}
@ -1210,7 +1243,8 @@ static int dca_subsubframe(DCAContext * s, int base_channel, int block_index)
/* Backup predictor history for adpcm */
for (k = base_channel; k < s->prim_channels; k++)
for (l = 0; l < s->vq_start_subband[k]; l++)
memcpy(s->subband_samples_hist[k][l], &subband_samples[k][l][4],
memcpy(s->subband_samples_hist[k][l],
&subband_samples[k][l][4],
4 * sizeof(subband_samples[0][0][0]));
return 0;
@ -1223,9 +1257,10 @@ static int dca_filter_channels(DCAContext * s, int block_index)
/* 32 subbands QMF */
for (k = 0; k < s->prim_channels; k++) {
/* static float pcm_to_double[8] =
{32768.0, 32768.0, 524288.0, 524288.0, 0, 8388608.0, 8388608.0};*/
qmf_32_subbands(s, k, subband_samples[k], &s->samples[256 * s->channel_order_tab[k]],
/* static float pcm_to_double[8] = { 32768.0, 32768.0, 524288.0, 524288.0,
0, 8388608.0, 8388608.0 };*/
qmf_32_subbands(s, k, subband_samples[k],
&s->samples[256 * s->channel_order_tab[k]],
M_SQRT1_2 * s->scale_bias /* pcm_to_double[s->source_pcm_res] */);
}
@ -1371,18 +1406,16 @@ static int dca_convert_bitstream(const uint8_t * src, int src_size, uint8_t * ds
static int dca_exss_mask2count(int mask)
{
/* count bits that mean speaker pairs twice */
return av_popcount(mask)
+ av_popcount(mask & (
DCA_EXSS_CENTER_LEFT_RIGHT
| DCA_EXSS_FRONT_LEFT_RIGHT
| DCA_EXSS_FRONT_HIGH_LEFT_RIGHT
| DCA_EXSS_WIDE_LEFT_RIGHT
| DCA_EXSS_SIDE_LEFT_RIGHT
| DCA_EXSS_SIDE_HIGH_LEFT_RIGHT
| DCA_EXSS_SIDE_REAR_LEFT_RIGHT
| DCA_EXSS_REAR_LEFT_RIGHT
| DCA_EXSS_REAR_HIGH_LEFT_RIGHT
));
return av_popcount(mask) +
av_popcount(mask & (DCA_EXSS_CENTER_LEFT_RIGHT |
DCA_EXSS_FRONT_LEFT_RIGHT |
DCA_EXSS_FRONT_HIGH_LEFT_RIGHT |
DCA_EXSS_WIDE_LEFT_RIGHT |
DCA_EXSS_SIDE_LEFT_RIGHT |
DCA_EXSS_SIDE_HIGH_LEFT_RIGHT |
DCA_EXSS_SIDE_REAR_LEFT_RIGHT |
DCA_EXSS_REAR_LEFT_RIGHT |
DCA_EXSS_REAR_HIGH_LEFT_RIGHT));
}
/**
@ -1543,7 +1576,8 @@ static int dca_exss_parse_asset_header(DCAContext *s)
if (!(extensions_mask & DCA_EXT_CORE))
av_log(s->avctx, AV_LOG_WARNING, "DTS core detection mismatch.\n");
if ((extensions_mask & DCA_CORE_EXTS) != s->core_ext_mask)
av_log(s->avctx, AV_LOG_WARNING, "DTS extensions detection mismatch (%d, %d)\n",
av_log(s->avctx, AV_LOG_WARNING,
"DTS extensions detection mismatch (%d, %d)\n",
extensions_mask & DCA_CORE_EXTS, s->core_ext_mask);
return 0;
@ -1720,7 +1754,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
s->core_ext_mask |= DCA_EXT_XCH;
/* extension amode should == 1, number of channels in extension */
/* extension amode(number of channels in extension) should be 1 */
/* AFAIK XCh is not used for more channels */
if ((ext_amode = get_bits(&s->gb, 4)) != 1) {
av_log(avctx, AV_LOG_ERROR, "XCh extension amode %d not"
@ -1731,12 +1765,11 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
/* much like core primary audio coding header */
dca_parse_audio_coding_header(s, s->xch_base_channel);
for (i = 0; i < (s->sample_blocks / 8); i++) {
for (i = 0; i < (s->sample_blocks / 8); i++)
if ((ret = dca_decode_block(s, s->xch_base_channel, i))) {
av_log(avctx, AV_LOG_ERROR, "error decoding XCh extension\n");
continue;
}
}
s->xch_present = 1;
break;
@ -1753,7 +1786,8 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
if (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + fsize96)
continue;
av_log(avctx, AV_LOG_DEBUG, "X96 extension found at %d bits\n", get_bits_count(&s->gb));
av_log(avctx, AV_LOG_DEBUG, "X96 extension found at %d bits\n",
get_bits_count(&s->gb));
skip_bits(&s->gb, 12);
av_log(avctx, AV_LOG_DEBUG, "FSIZE96 = %d bytes\n", fsize96);
av_log(avctx, AV_LOG_DEBUG, "REVNO = %d\n", get_bits(&s->gb, 4));
@ -1765,7 +1799,6 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31);
}
} else {
/* no supported extensions, skip the rest of the core substream */
skip_bits_long(&s->gb, core_ss_end - get_bits_count(&s->gb));
@ -1777,8 +1810,8 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
s->profile = FF_PROFILE_DTS_ES;
/* check for ExSS (HD part) */
if (s->dca_buffer_size - s->frame_size > 32
&& get_bits_long(&s->gb, 32) == DCA_HD_MARKER)
if (s->dca_buffer_size - s->frame_size > 32 &&
get_bits_long(&s->gb, 32) == DCA_HD_MARKER)
dca_exss_parse_header(s);
avctx->profile = s->profile;
@ -1868,9 +1901,8 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
/* update lfe history */
lfe_samples = 2 * s->lfe * (s->sample_blocks / 8);
for (i = 0; i < 2 * s->lfe * 4; i++) {
for (i = 0; i < 2 * s->lfe * 4; i++)
s->lfe_data[i] = s->lfe_data[i + lfe_samples];
}
*got_frame_ptr = 1;
*(AVFrame *) data = s->frame;
@ -1949,8 +1981,8 @@ AVCodec ff_dca_decoder = {
.close = dca_decode_end,
.long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"),
.capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
},
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.profiles = NULL_IF_CONFIG_SMALL(profiles),
};

View File

@ -28,6 +28,7 @@
*/
#include <math.h>
#include "libavutil/mathematics.h"
#include "dct.h"
#include "dct32.h"
@ -161,7 +162,6 @@ static void ff_dct_calc_II_c(DCTContext *ctx, FFTSample *data)
float s = SIN(ctx, n, i);
data[i] = c * inr + s * ini;
data[i + 1] = next;
next += s * inr - c * ini;
@ -189,7 +189,6 @@ av_cold int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType inverse)
ff_init_ff_cos_tabs(nbits + 2);
s->costab = ff_cos_tabs[nbits + 2];
s->csc2 = av_malloc(n / 2 * sizeof(FFTSample));
if (ff_rdft_init(&s->rdft, nbits, inverse == DCT_III) < 0) {
@ -209,7 +208,8 @@ av_cold int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType inverse)
}
s->dct32 = ff_dct32_float;
if (HAVE_MMX) ff_dct_init_mmx(s);
if (HAVE_MMX)
ff_dct_init_mmx(s);
return 0;
}

View File

@ -35,7 +35,7 @@
#define _GNU_SOURCE
#include <sched.h>
#endif
#if HAVE_GETSYSTEMINFO
#if HAVE_GETPROCESSAFFINITYMASK
#include <windows.h>
#endif
#if HAVE_SYSCTL
@ -172,10 +172,11 @@ static int get_logical_cpus(AVCodecContext *avctx)
if (!ret) {
nb_cpus = CPU_COUNT(&cpuset);
}
#elif HAVE_GETSYSTEMINFO
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
nb_cpus = sysinfo.dwNumberOfProcessors;
#elif HAVE_GETPROCESSAFFINITYMASK
DWORD_PTR proc_aff, sys_aff;
ret = GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff);
if (ret)
nb_cpus = av_popcount64(proc_aff);
#elif HAVE_SYSCTL && defined(HW_NCPU)
int mib[2] = { CTL_HW, HW_NCPU };
size_t len = sizeof(nb_cpus);

View File

@ -516,9 +516,9 @@ static void halfpel_interpol(SnowContext *s, uint8_t *halfpel[4][4], AVFrame *fr
int ls= frame->linesize[p];
uint8_t *src= frame->data[p];
halfpel[1][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[2][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[3][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[1][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[2][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[3][p] = (uint8_t*) av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[0][p]= src;
for(y=0; y<h; y++){

View File

@ -282,6 +282,77 @@ static void restore_median(uint8_t *src, int step, int stride,
}
}
/* UtVideo interlaced mode treats every two lines as a single one,
* so restoring function should take care of possible padding between
* two parts of the same "line".
*/
static void restore_median_il(uint8_t *src, int step, int stride,
int width, int height, int slices, int rmode)
{
int i, j, slice;
int A, B, C;
uint8_t *bsrc;
int slice_start, slice_height;
const int cmask = ~(rmode ? 3 : 1);
const int stride2 = stride << 1;
for (slice = 0; slice < slices; slice++) {
slice_start = ((slice * height) / slices) & cmask;
slice_height = ((((slice + 1) * height) / slices) & cmask) - slice_start;
slice_height >>= 1;
bsrc = src + slice_start * stride;
// first line - left neighbour prediction
bsrc[0] += 0x80;
A = bsrc[0];
for (i = step; i < width * step; i += step) {
bsrc[i] += A;
A = bsrc[i];
}
for (i = 0; i < width * step; i += step) {
bsrc[stride + i] += A;
A = bsrc[stride + i];
}
bsrc += stride2;
if (slice_height == 1)
continue;
// second line - first element has top predition, the rest uses median
C = bsrc[-stride2];
bsrc[0] += C;
A = bsrc[0];
for (i = step; i < width * step; i += step) {
B = bsrc[i - stride2];
bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
C = B;
A = bsrc[i];
}
for (i = 0; i < width * step; i += step) {
B = bsrc[i - stride];
bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C));
C = B;
A = bsrc[stride + i];
}
bsrc += stride2;
// the rest of lines use continuous median prediction
for (j = 2; j < slice_height; j++) {
for (i = 0; i < width * step; i += step) {
B = bsrc[i - stride2];
bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
C = B;
A = bsrc[i];
}
for (i = 0; i < width * step; i += step) {
B = bsrc[i - stride];
bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C));
C = B;
A = bsrc[i + stride];
}
bsrc += stride2;
}
}
}
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
@ -381,10 +452,18 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN)
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
restore_median(c->pic.data[i], 1, c->pic.linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
c->slices, !i);
} else {
restore_median_il(c->pic.data[i], 1, c->pic.linesize[i],
avctx->width >> !!i,
avctx->height >> !!i,
c->slices, !i);
}
}
}
break;
case PIX_FMT_YUV422P:
@ -395,9 +474,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN)
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
restore_median(c->pic.data[i], 1, c->pic.linesize[i],
avctx->width >> !!i, avctx->height, c->slices, 0);
avctx->width >> !!i, avctx->height,
c->slices, 0);
} else {
restore_median_il(c->pic.data[i], 1, c->pic.linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
}
}
}
break;
}

View File

@ -2235,7 +2235,7 @@ static int has_decode_delay_been_guessed(AVStream *st)
static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
{
AVCodec *codec;
int got_picture, ret = 0;
int got_picture = 1, ret = 0;
AVFrame picture;
AVPacket pkt = *avpkt;
@ -2248,7 +2248,8 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **option
return ret;
}
while (pkt.size > 0 && ret >= 0 &&
while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
ret >= 0 &&
(!has_codec_parameters(st->codec) ||
!has_decode_delay_been_guessed(st) ||
(!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
@ -2377,14 +2378,9 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
int i, count, ret, read_size, j;
AVStream *st;
AVPacket pkt1, *pkt;
AVDictionary *one_thread_opt = NULL;
int64_t old_offset = avio_tell(ic->pb);
int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
/* this function doesn't flush the decoders, so force thread count
* to 1 to fix behavior when thread count > number of frames in the file */
av_dict_set(&one_thread_opt, "threads", "1", 0);
for(i=0;i<ic->nb_streams;i++) {
AVCodec *codec;
st = ic->streams[i];
@ -2406,21 +2402,15 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
assert(!st->codec->codec);
codec = avcodec_find_decoder(st->codec->codec_id);
/* this function doesn't flush the decoders, so force thread count
* to 1 to fix behavior when thread count > number of frames in the file */
if (options)
av_dict_set(&options[i], "threads", "1", 0);
/* Ensure that subtitle_header is properly set. */
if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
&& codec && !st->codec->codec)
avcodec_open2(st->codec, codec, options ? &options[i] : &one_thread_opt);
avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
//try to just open decoders, in case this is enough to get parameters
if(!has_codec_parameters(st->codec)){
if (codec && !st->codec->codec)
avcodec_open2(st->codec, codec, options ? &options[i]
: &one_thread_opt);
avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
}
}
@ -2487,9 +2477,21 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
if (ret < 0) {
/* EOF or error*/
AVPacket empty_pkt = { 0 };
int err;
av_init_packet(&empty_pkt);
ret = -1; /* we could not have all the codec parameters before EOF */
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
/* flush the decoders */
while ((err = try_decode_frame(st, &empty_pkt,
(options && i < orig_nb_streams) ?
&options[i] : NULL)) >= 0)
if (has_codec_parameters(st->codec))
break;
if (!has_codec_parameters(st->codec)){
char buf[256];
avcodec_string(buf, sizeof(buf), st->codec, 0);
@ -2562,8 +2564,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
least one frame of codec data, this makes sure the codec initializes
the channel configuration and does not only trust the values from the container.
*/
try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i]
: &one_thread_opt);
try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
st->codec_info_nb_frames++;
count++;
@ -2689,7 +2690,6 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
ic->streams[i]->codec->thread_count = 0;
av_freep(&ic->streams[i]->info);
}
av_dict_free(&one_thread_opt);
return ret;
}

View File

@ -154,7 +154,7 @@
*/
#define LIBAVUTIL_VERSION_MAJOR 51
#define LIBAVUTIL_VERSION_MINOR 33
#define LIBAVUTIL_VERSION_MINOR 34
#define LIBAVUTIL_VERSION_MICRO 100
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \

View File

@ -220,6 +220,16 @@ static av_always_inline av_const int av_popcount_c(uint32_t x)
return (x + (x >> 16)) & 0x3F;
}
/**
* Count number of bits set to one in x
* @param x value to count bits of
* @return the number of bits set to one in x
*/
static av_always_inline av_const int av_popcount64_c(uint64_t x)
{
return av_popcount(x) + av_popcount(x >> 32);
}
#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
@ -385,3 +395,6 @@ static av_always_inline av_const int av_popcount_c(uint32_t x)
#ifndef av_popcount
# define av_popcount av_popcount_c
#endif
#ifndef av_popcount64
# define av_popcount64 av_popcount64_c
#endif

View File

@ -1,6 +1,9 @@
FATE_SCREEN += fate-cscd
fate-cscd: CMD = framecrc -i $(SAMPLES)/CSCD/sample_video.avi -an -vsync 0 -pix_fmt rgb24
FATE_SCREEN += fate-dxtory
fate-dxtory: CMD = framecrc -i $(SAMPLES)/dxtory/dxtory_mic.avi
FATE_SCREEN += fate-fraps-v0
fate-fraps-v0: CMD = framecrc -i $(SAMPLES)/fraps/Griffin_Ragdoll01-partial.avi

1
tests/ref/fate/dxtory Normal file
View File

@ -0,0 +1 @@
0, 0, 1382400, 0x44373645