Merge remote-tracking branch 'qatar/master'

* qatar/master:
  mpegenc: use avctx->slices as number of slices
  v410enc: fix undefined signed left shift caused by integer promotion
  Release notes: mention cleaned up header includes
  fix Changelog file
  Fix a bunch of typos.
  Drop some pointless void* return value casts from av_malloc() invocations.
  wavpack: fix typos in previous cosmetic clean-up commit
  wavpack: cosmetics: K&R pretty-printing
  avconv: remove the 'codec framerate is different from stream' warning
  wavpack: determine sample_fmt before requesting a buffer
  bmv audio: implement new audio decoding API
  mpegaudiodec: skip all channels when skipping granules
  mpegenc: simplify muxrate calculation

Conflicts:
	Changelog
	avconv.c
	doc/RELEASE_NOTES
	libavcodec/h264.c
	libavcodec/mpeg12.c
	libavcodec/mpegaudiodec.c
	libavcodec/mpegvideo.c
	libavformat/mpegenc.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-01-03 02:25:56 +01:00
commit 7d8f115843
23 changed files with 454 additions and 414 deletions

View File

@ -13,8 +13,6 @@ version next:
- tinterlace video filter
- astreamsync audio filter
- amerge audio filter
- Indeo 4 decoder
- SMJPEG demuxer
- Automatic thread count based on detection number of (available) CPU cores
- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
@ -153,6 +151,8 @@ easier to use. The changes are:
- Dxtory capture format decoder
- cellauto source
- Simple segmenting muxer
- Indeo 4 decoder
- SMJPEG demuxer
version 0.8:

View File

@ -3196,7 +3196,7 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
*/
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
{
int i, rfps, rfps_base;
int i;
for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i];
@ -3225,19 +3225,10 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
case AVMEDIA_TYPE_VIDEO:
if(!ist->dec)
ist->dec = avcodec_find_decoder(dec->codec_id);
rfps = ic->streams[i]->r_frame_rate.num;
rfps_base = ic->streams[i]->r_frame_rate.den;
if (dec->lowres) {
dec->flags |= CODEC_FLAG_EMU_EDGE;
}
if (dec->time_base.den != rfps * dec->ticks_per_frame || dec->time_base.num != rfps_base) {
av_log(NULL, AV_LOG_INFO,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num,
(float)rfps / rfps_base, rfps, rfps_base);
}
if (o->video_disable)
st->discard = AVDISCARD_ALL;
else if (video_discard)

View File

@ -3463,7 +3463,7 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
*/
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
{
int i, rfps, rfps_base;
int i;
char *next, *codec_tag = NULL;
for (i = 0; i < ic->nb_streams; i++) {
@ -3501,19 +3501,10 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
case AVMEDIA_TYPE_VIDEO:
if(!ist->dec)
ist->dec = avcodec_find_decoder(dec->codec_id);
rfps = ic->streams[i]->r_frame_rate.num;
rfps_base = ic->streams[i]->r_frame_rate.den;
if (dec->lowres) {
dec->flags |= CODEC_FLAG_EMU_EDGE;
}
if (dec->time_base.den != rfps * dec->ticks_per_frame || dec->time_base.num != rfps_base) {
av_log(NULL, AV_LOG_INFO,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num,
(float)rfps / rfps_base, rfps, rfps_base);
}
if (o->video_disable)
st->discard = AVDISCARD_ALL;
else if (video_discard)

View File

@ -285,12 +285,17 @@ static av_cold int decode_end(AVCodecContext *avctx)
return 0;
}
typedef struct BMVAudioDecContext {
AVFrame frame;
} BMVAudioDecContext;
static const int bmv_aud_mults[16] = {
16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32
};
static av_cold int bmv_aud_decode_init(AVCodecContext *avctx)
{
BMVAudioDecContext *c = avctx->priv_data;
if (avctx->channels != 2) {
av_log(avctx, AV_LOG_INFO, "invalid number of channels\n");
@ -299,17 +304,21 @@ static av_cold int bmv_aud_decode_init(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&c->frame);
avctx->coded_frame = &c->frame;
return 0;
}
static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
AVPacket *avpkt)
static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
BMVAudioDecContext *c = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int blocks = 0, total_blocks, i;
int out_size;
int16_t *output_samples = data;
int ret;
int16_t *output_samples;
int scale[2];
total_blocks = *buf++;
@ -318,11 +327,14 @@ static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data, int *data_siz
total_blocks * 65 + 1, buf_size);
return AVERROR_INVALIDDATA;
}
out_size = total_blocks * 64 * sizeof(*output_samples);
if (*data_size < out_size) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
return AVERROR(EINVAL);
/* get output buffer */
c->frame.nb_samples = total_blocks * 32;
if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
output_samples = (int16_t *)c->frame.data[0];
for (blocks = 0; blocks < total_blocks; blocks++) {
uint8_t code = *buf++;
@ -335,7 +347,9 @@ static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data, int *data_siz
}
}
*data_size = out_size;
*got_frame_ptr = 1;
*(AVFrame *)data = c->frame;
return buf_size;
}
@ -354,7 +368,9 @@ AVCodec ff_bmv_audio_decoder = {
.name = "bmv_audio",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_BMV_AUDIO,
.priv_data_size = sizeof(BMVAudioDecContext),
.init = bmv_aud_decode_init,
.decode = bmv_aud_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV audio"),
};

View File

@ -50,7 +50,11 @@ static void decode_mb(MpegEncContext *s, int ref){
h->mb_xy= s->mb_x + s->mb_y*s->mb_stride;
memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
assert(ref>=0);
if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
/* FIXME: It is posible albeit uncommon that slice references
* differ between slices. We take the easy approach and ignore
* it for now. If this turns out to have any relevance in
* practice then correct remapping should be added. */
if (ref >= h->ref_count[0])
ref=0;
fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);

View File

@ -1315,7 +1315,6 @@ int ff_h264_frame_start(H264Context *h){
MpegEncContext * const s = &h->s;
int i;
const int pixel_shift = h->pixel_shift;
int thread_count = (s->avctx->active_thread_type & FF_THREAD_SLICE) ? s->avctx->thread_count : 1;
if(MPV_frame_start(s, s->avctx) < 0)
return -1;
@ -1345,7 +1344,7 @@ int ff_h264_frame_start(H264Context *h){
/* can't be in alloc_tables because linesize isn't known there.
* FIXME: redo bipred weight to not require extra buffer? */
for(i = 0; i < thread_count; i++)
for(i = 0; i < s->slice_context_count; i++)
if(h->thread_context[i] && !h->thread_context[i]->s.obmc_scratchpad)
h->thread_context[i]->s.obmc_scratchpad = av_malloc(16*6*s->linesize);
@ -2852,7 +2851,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
return -1;
}
} else {
for(i = 1; i < s->avctx->thread_count; i++) {
for(i = 1; i < s->slice_context_count; i++) {
H264Context *c;
c = h->thread_context[i] = av_malloc(sizeof(H264Context));
memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext));
@ -2866,7 +2865,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
clone_tables(c, h, i);
}
for(i = 0; i < s->avctx->thread_count; i++)
for(i = 0; i < s->slice_context_count; i++)
if (context_init(h->thread_context[i]) < 0) {
av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n");
return -1;
@ -3782,7 +3781,10 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
h->nal_unit_type= 0;
h->max_contexts = (HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_SLICE)) ? avctx->thread_count : 1;
if(!s->slice_context_count)
s->slice_context_count= 1;
h->max_contexts = s->slice_context_count;
if(!(s->flags2 & CODEC_FLAG2_CHUNKS)){
h->current_slice = 0;
if (!s->first_field)

View File

@ -885,7 +885,7 @@ static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
uint8_t *src_m1, *src_0, *src_p1, *src_p2;
int y;
uint8_t *buf;
buf = (uint8_t*)av_malloc(width);
buf = av_malloc(width);
src_m1 = src1;
memcpy(buf,src_m1,width);

View File

@ -2485,7 +2485,9 @@ static int decode_chunks(AVCodecContext *avctx,
}
if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)) {
int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count;
int threshold = (s2->mb_height * s->slice_count +
s2->slice_context_count / 2) /
s2->slice_context_count;
av_assert0(avctx->thread_count > 1);
if (threshold <= mb_y) {
MpegEncContext *thread_context = s2->thread_context[s->slice_count];

View File

@ -1427,6 +1427,7 @@ static int mp_decode_layer3(MPADecodeContext *s)
}
if (!s->adu_mode) {
int skip;
const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
assert((get_bits_count(&s->gb) & 7) == 0);
/* now we get bits from the main_data_begin offset */

View File

@ -638,6 +638,8 @@ void MPV_common_defaults(MpegEncContext *s)
s->picture_range_start = 0;
s->picture_range_end = MAX_PICTURE_COUNT;
s->slice_context_count = 1;
}
/**
@ -656,12 +658,14 @@ void MPV_decode_defaults(MpegEncContext *s)
*/
av_cold int MPV_common_init(MpegEncContext *s)
{
int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
threads = (s->encoding ||
(HAVE_THREADS &&
s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
int nb_slices = (HAVE_THREADS &&
s->avctx->active_thread_type & FF_THREAD_SLICE) ?
s->avctx->thread_count : 1;
if (s->encoding && s->avctx->slices)
nb_slices = s->avctx->slices;
if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
s->mb_height = (s->height + 31) / 32 * 2;
else if (s->codec_id != CODEC_ID_H264)
@ -673,14 +677,15 @@ av_cold int MPV_common_init(MpegEncContext *s)
return -1;
}
if ((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
(s->avctx->thread_count > MAX_THREADS ||
(s->avctx->thread_count > s->mb_height && s->mb_height))) {
int max_threads = FFMIN(MAX_THREADS, s->mb_height);
av_log(s->avctx, AV_LOG_WARNING,
"too many threads (%d), reducing to %d\n",
s->avctx->thread_count, max_threads);
threads = max_threads;
if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
int max_slices;
if (s->mb_height)
max_slices = FFMIN(MAX_THREADS, s->mb_height);
else
max_slices = MAX_THREADS;
av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
" reducing to %d\n", nb_slices, max_slices);
nb_slices = max_slices;
}
if ((s->width || s->height) &&
@ -831,17 +836,20 @@ av_cold int MPV_common_init(MpegEncContext *s)
s->context_initialized = 1;
s->thread_context[0] = s;
if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
for (i = 1; i < threads; i++) {
// if (s->width && s->height) {
if (nb_slices > 1) {
for (i = 1; i < nb_slices; i++) {
s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
}
for (i = 0; i < threads; i++) {
for (i = 0; i < nb_slices; i++) {
if (init_duplicate_context(s->thread_context[i], s) < 0)
goto fail;
s->thread_context[i]->start_mb_y = (s->mb_height*(i ) + s->avctx->thread_count / 2) / s->avctx->thread_count;
s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count / 2) / s->avctx->thread_count;
s->thread_context[i]->start_mb_y =
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
s->thread_context[i]->end_mb_y =
(s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
}
} else {
if (init_duplicate_context(s, s) < 0)
@ -849,6 +857,8 @@ av_cold int MPV_common_init(MpegEncContext *s)
s->start_mb_y = 0;
s->end_mb_y = s->mb_height;
}
s->slice_context_count = nb_slices;
// }
return 0;
fail:
@ -861,13 +871,14 @@ void MPV_common_end(MpegEncContext *s)
{
int i, j, k;
if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_SLICE)) {
for (i = 0; i < s->avctx->thread_count; i++) {
if (s->slice_context_count > 1) {
for (i = 0; i < s->slice_context_count; i++) {
free_duplicate_context(s->thread_context[i]);
}
for (i = 1; i < s->avctx->thread_count; i++) {
for (i = 1; i < s->slice_context_count; i++) {
av_freep(&s->thread_context[i]);
}
s->slice_context_count = 1;
} else free_duplicate_context(s);
av_freep(&s->parse_context.buffer);

View File

@ -270,6 +270,7 @@ typedef struct MpegEncContext {
int start_mb_y; ///< start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
int end_mb_y; ///< end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
struct MpegEncContext *thread_context[MAX_THREADS];
int slice_context_count; ///< number of used thread_contexts
/**
* copy of the previous picture structure.

View File

@ -1435,7 +1435,8 @@ int MPV_encode_picture(AVCodecContext *avctx,
{
MpegEncContext *s = avctx->priv_data;
AVFrame *pic_arg = data;
int i, stuffing_count, context_count = avctx->thread_count;
int i, stuffing_count;
int context_count = s->slice_context_count;
for (i = 0; i < context_count; i++) {
int start_y = s->thread_context[i]->start_mb_y;
@ -3072,7 +3073,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
{
int i;
int bits;
int context_count = s->avctx->thread_count;
int context_count = s->slice_context_count;
s->picture_number = picture_number;

View File

@ -507,7 +507,7 @@ static const AVOption options[]={
{"cholesky", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = AV_LPC_TYPE_CHOLESKY }, INT_MIN, INT_MAX, A|E, "lpc_type"},
{"lpc_passes", "deprecated, use flac-specific options", OFFSET(lpc_passes), AV_OPT_TYPE_INT, {.dbl = -1 }, INT_MIN, INT_MAX, A|E},
#endif
{"slices", "number of slices, used in parallelized decoding", OFFSET(slices), AV_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, V|E},
{"slices", "number of slices, used in parallelized encoding", OFFSET(slices), AV_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, V|E},
{"thread_type", "select multithreading type", OFFSET(thread_type), AV_OPT_TYPE_FLAGS, {.dbl = FF_THREAD_SLICE|FF_THREAD_FRAME }, 0, INT_MAX, V|E|D, "thread_type"},
{"slice", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_THREAD_SLICE }, INT_MIN, INT_MAX, V|E|D, "thread_type"},
{"frame", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_THREAD_FRAME }, INT_MIN, INT_MAX, V|E|D, "thread_type"},

View File

@ -516,9 +516,9 @@ static void halfpel_interpol(SnowContext *s, uint8_t *halfpel[4][4], AVFrame *fr
int ls= frame->linesize[p];
uint8_t *src= frame->data[p];
halfpel[1][p]= (uint8_t*)av_malloc(ls * (h+2*EDGE_WIDTH)) + EDGE_WIDTH*(1+ls);
halfpel[2][p]= (uint8_t*)av_malloc(ls * (h+2*EDGE_WIDTH)) + EDGE_WIDTH*(1+ls);
halfpel[3][p]= (uint8_t*)av_malloc(ls * (h+2*EDGE_WIDTH)) + EDGE_WIDTH*(1+ls);
halfpel[1][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[2][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[3][p] = av_malloc(ls * (h + 2 * EDGE_WIDTH)) + EDGE_WIDTH * (1 + ls);
halfpel[0][p]= src;
for(y=0; y<h; y++){

View File

@ -67,7 +67,7 @@ static int v410_encode_frame(AVCodecContext *avctx, uint8_t *buf,
for (j = 0; j < avctx->width; j++) {
val = u[j] << 2;
val |= y[j] << 12;
val |= v[j] << 22;
val |= (uint32_t) v[j] << 22;
AV_WL32(dst, val);
dst += 4;
output_size += 4;

View File

@ -18,11 +18,13 @@
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define BITSTREAM_READER_LE
#include "libavutil/audioconvert.h"
#include "avcodec.h"
#include "get_bits.h"
#include "unary.h"
#include "libavutil/audioconvert.h"
/**
* @file
@ -217,10 +219,12 @@ static av_always_inline int wp_log2(int32_t val)
if (samples && in) { \
if ((samples ^ in) < 0) { \
weight -= delta; \
if(weight < -1024) weight = -1024; \
if (weight < -1024) \
weight = -1024; \
} else { \
weight += delta; \
if(weight > 1024) weight = 1024; \
if (weight > 1024) \
weight = 1024; \
} \
}
@ -229,13 +233,13 @@ static av_always_inline int get_tail(GetBitContext *gb, int k)
{
int p, e, res;
if(k<1)return 0;
if (k < 1)
return 0;
p = av_log2(k);
e = (1 << (p + 1)) - k - 1;
res = p ? get_bits(gb, p) : 0;
if(res >= e){
if (res >= e)
res = (res << 1) - e + get_bits1(gb);
}
return res;
}
@ -273,7 +277,8 @@ static void update_error_limit(WavpackFrameContext *ctx)
}
}
static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb, int channel, int *last)
static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb,
int channel, int *last)
{
int t, t2;
int sign, base, add, ret;
@ -281,7 +286,8 @@ static int wv_get_value(WavpackFrameContext *ctx, GetBitContext *gb, int channel
*last = 0;
if((ctx->ch[0].median[0] < 2U) && (ctx->ch[1].median[0] < 2U) && !ctx->zero && !ctx->one){
if ((ctx->ch[0].median[0] < 2U) && (ctx->ch[1].median[0] < 2U) &&
!ctx->zero && !ctx->one) {
if (ctx->zeroes) {
ctx->zeroes--;
if (ctx->zeroes) {
@ -391,7 +397,8 @@ error:
return 0;
}
static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc, int S)
static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc,
int S)
{
int bit;
@ -437,26 +444,26 @@ static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
if (sign)
S = -S;
if (S >= 0x1000000) {
if(s->got_extra_bits && get_bits1(&s->gb_extra_bits)){
if (s->got_extra_bits && get_bits1(&s->gb_extra_bits))
S = get_bits(&s->gb_extra_bits, 23);
}else{
else
S = 0;
}
exp = 255;
} else if (exp) {
int shift = 23 - av_log2(S);
exp = s->float_max_exp;
if(exp <= shift){
if (exp <= shift)
shift = --exp;
}
exp -= shift;
if (shift) {
S <<= shift;
if ((s->float_flag & WV_FLT_SHIFT_ONES) ||
(s->got_extra_bits && (s->float_flag & WV_FLT_SHIFT_SAME) && get_bits1(&s->gb_extra_bits)) ){
(s->got_extra_bits && (s->float_flag & WV_FLT_SHIFT_SAME) &&
get_bits1(&s->gb_extra_bits))) {
S |= (1 << shift) - 1;
} else if(s->got_extra_bits && (s->float_flag & WV_FLT_SHIFT_SENT)){
} else if (s->got_extra_bits &&
(s->float_flag & WV_FLT_SHIFT_SENT)) {
S |= get_bits(&s->gb_extra_bits, shift);
}
}
@ -492,7 +499,8 @@ static void wv_reset_saved_context(WavpackFrameContext *s)
s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF;
}
static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, void *dst, const int type)
static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb,
void *dst, const int type)
{
int i, j, count = 0;
int last, t;
@ -508,9 +516,11 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
s->one = s->zero = s->zeroes = 0;
do {
L = wv_get_value(s, gb, 0, &last);
if(last) break;
if (last)
break;
R = wv_get_value(s, gb, 1, &last);
if(last) break;
if (last)
break;
for (i = 0; i < s->terms; i++) {
t = s->decorr[i].value;
if (t > 0) {
@ -611,7 +621,8 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
return count * 2;
}
static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void *dst, const int type)
static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb,
void *dst, const int type)
{
int i, j, count = 0;
int last, t;
@ -628,7 +639,8 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
do {
T = wv_get_value(s, gb, 0, &last);
S = 0;
if(last) break;
if (last)
break;
for (i = 0; i < s->terms; i++) {
t = s->decorr[i].value;
if (t > 8) {
@ -646,7 +658,8 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
S = T + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10);
else
S = T + ((s->decorr[i].weightA * A + 512) >> 10);
if(A && T) s->decorr[i].weightA -= ((((T ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
if (A && T)
s->decorr[i].weightA -= ((((T ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
s->decorr[i].samplesA[j] = T = S;
}
pos = (pos + 1) & 7;
@ -704,7 +717,8 @@ static av_cold int wavpack_decode_init(AVCodecContext *avctx)
else
avctx->sample_fmt = AV_SAMPLE_FMT_S32;
if (avctx->channels <= 2 && !avctx->channel_layout)
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO :
AV_CH_LAYOUT_MONO;
s->multichannel = avctx->channels > 2;
/* lavf demuxer does not provide extradata, Matroska stores 0x403
@ -744,8 +758,8 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
WavpackFrameContext *s;
void *samples = data;
int samplecount;
int got_terms = 0, got_weights = 0, got_samples = 0, got_entropy = 0, got_bs = 0, got_float = 0;
int got_hybrid = 0;
int got_terms = 0, got_weights = 0, got_samples = 0,
got_entropy = 0, got_bs = 0, got_float = 0, got_hybrid = 0;
const uint8_t *orig_buf = buf;
const uint8_t *buf_end = buf + buf_size;
int i, j, id, size, ssize, weights, t;
@ -783,13 +797,6 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
s->samples = wc->samples;
}
s->frame_flags = AV_RL32(buf); buf += 4;
if(s->frame_flags&0x80){
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
} else if((s->frame_flags&0x03) <= 1){
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
} else {
avctx->sample_fmt = AV_SAMPLE_FMT_S32;
}
bpp = av_get_bytes_per_sample(avctx->sample_fmt);
samples = (uint8_t*)samples + bpp * wc->ch_offset;
@ -799,7 +806,8 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
s->hybrid = s->frame_flags & WV_HYBRID_MODE;
s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE;
s->hybrid_maxclip = (1LL << ((((s->frame_flags & 0x03) + 1) << 3) - 1)) - 1;
s->post_shift = 8 * (bpp-1-(s->frame_flags&0x03)) + ((s->frame_flags >> 13) & 0x1f);
s->post_shift = 8 * (bpp - 1 - (s->frame_flags & 0x03)) +
((s->frame_flags >> 13) & 0x1f);
s->CRC = AV_RL32(buf); buf += 4;
if (wc->mkv_mode)
buf += 4; //skip block size;
@ -816,7 +824,8 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
}
size <<= 1; // size is specified in words
ssize = size;
if(id & WP_IDF_ODD) size--;
if (id & WP_IDF_ODD)
size--;
if (size < 0) {
av_log(avctx, AV_LOG_ERROR, "Got incorrect block %02X with size %i\n", id, size);
break;
@ -860,12 +869,14 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
t = (int8_t)(*buf++);
s->decorr[s->terms - i - 1].weightA = t << 3;
if (s->decorr[s->terms - i - 1].weightA > 0)
s->decorr[s->terms - i - 1].weightA += (s->decorr[s->terms - i - 1].weightA + 64) >> 7;
s->decorr[s->terms - i - 1].weightA +=
(s->decorr[s->terms - i - 1].weightA + 64) >> 7;
if (s->stereo_in) {
t = (int8_t)(*buf++);
s->decorr[s->terms - i - 1].weightB = t << 3;
if (s->decorr[s->terms - i - 1].weightB > 0)
s->decorr[s->terms - i - 1].weightB += (s->decorr[s->terms - i - 1].weightB + 64) >> 7;
s->decorr[s->terms - i - 1].weightB +=
(s->decorr[s->terms - i - 1].weightB + 64) >> 7;
}
}
got_weights = 1;
@ -893,10 +904,9 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
} else {
for (j = 0; j < s->decorr[i].value; j++) {
s->decorr[i].samplesA[j] = wp_exp2(AV_RL16(buf)); buf += 2;
if(s->stereo_in){
if (s->stereo_in)
s->decorr[i].samplesB[j] = wp_exp2(AV_RL16(buf)); buf += 2;
}
}
t += s->decorr[i].value * 2 * (s->stereo_in + 1);
}
}
@ -904,7 +914,8 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
break;
case WP_ID_ENTROPY:
if (size != 6 * (s->stereo_in + 1)) {
av_log(avctx, AV_LOG_ERROR, "Entropy vars size should be %i, got %i", 6 * (s->stereo_in + 1), size);
av_log(avctx, AV_LOG_ERROR, "Entropy vars size should be %i, "
"got %i", 6 * (s->stereo_in + 1), size);
buf += ssize;
continue;
}
@ -981,7 +992,8 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
break;
case WP_ID_EXTRABITS:
if (size <= 4) {
av_log(avctx, AV_LOG_ERROR, "Invalid EXTRABITS, size = %i\n", size);
av_log(avctx, AV_LOG_ERROR, "Invalid EXTRABITS, size = %i\n",
size);
buf += size;
continue;
}
@ -999,30 +1011,24 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
}
chan = *buf++;
switch (size - 2) {
case 0:
chmask = *buf;
break;
case 1:
chmask = AV_RL16(buf);
break;
case 2:
chmask = AV_RL24(buf);
break;
case 3:
chmask = AV_RL32(buf);
break;
case 0: chmask = *buf; break;
case 1: chmask = AV_RL16(buf); break;
case 2: chmask = AV_RL24(buf); break;
case 3: chmask = AV_RL32(buf); break;
case 5:
chan |= (buf[1] & 0xF) << 8;
chmask = AV_RL24(buf + 2);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Invalid channel info size %d\n", size);
av_log(avctx, AV_LOG_ERROR, "Invalid channel info size %d\n",
size);
chan = avctx->channels;
chmask = avctx->channel_layout;
}
if (chan != avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "Block reports total %d channels, decoder believes it's %d channels\n",
chan, avctx->channels);
av_log(avctx, AV_LOG_ERROR, "Block reports total %d channels, "
"decoder believes it's %d channels\n", chan,
avctx->channels);
return -1;
}
if (!avctx->channel_layout)
@ -1032,7 +1038,8 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
default:
buf += size;
}
if(id & WP_IDF_ODD) buf++;
if (id & WP_IDF_ODD)
buf++;
}
if (!got_terms) {
@ -1147,7 +1154,7 @@ static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
WavpackContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int frame_size, ret;
int frame_size, ret, frame_flags;
int samplecount = 0;
s->block = 0;
@ -1156,11 +1163,15 @@ static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
/* determine number of samples */
if (s->mkv_mode) {
s->samples = AV_RL32(buf); buf += 4;
frame_flags = AV_RL32(buf);
} else {
if (s->multichannel)
if (s->multichannel) {
s->samples = AV_RL32(buf + 4);
else
frame_flags = AV_RL32(buf + 8);
} else {
s->samples = AV_RL32(buf);
frame_flags = AV_RL32(buf + 4);
}
}
if (s->samples <= 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of samples: %d\n",
@ -1168,6 +1179,14 @@ static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR(EINVAL);
}
if (frame_flags & 0x80) {
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
} else if ((frame_flags & 0x03) <= 1) {
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
} else {
avctx->sample_fmt = AV_SAMPLE_FMT_S32;
}
/* get output buffer */
s->frame.nb_samples = s->samples;
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
@ -1188,13 +1207,14 @@ static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
}
}
if (frame_size < 0 || frame_size > buf_size) {
av_log(avctx, AV_LOG_ERROR, "Block %d has invalid size (size %d vs. %d bytes left)\n",
s->block, frame_size, buf_size);
av_log(avctx, AV_LOG_ERROR, "Block %d has invalid size (size %d "
"vs. %d bytes left)\n", s->block, frame_size, buf_size);
wavpack_decode_flush(avctx);
return -1;
}
if((samplecount = wavpack_decode_block(avctx, s->block, s->frame.data[0],
got_frame_ptr, buf, frame_size)) < 0) {
if ((samplecount = wavpack_decode_block(avctx, s->block,
s->frame.data[0], got_frame_ptr,
buf, frame_size)) < 0) {
wavpack_decode_flush(avctx);
return -1;
}

View File

@ -606,7 +606,7 @@ static int mms_read(URLContext *h, uint8_t *buf, int size)
// copy the data to the packet buffer.
result = ff_mms_read_data(mms, buf, size);
if (result == 0) {
av_dlog(NULL, "read asf media paket size is zero!\n");
av_dlog(NULL, "Read ASF media packet size is zero!\n");
break;
}
}

View File

@ -429,7 +429,7 @@ static int mpeg_mux_init(AVFormatContext *ctx)
if (!s->mux_rate) {
/* we increase slightly the bitrate to take into account the
headers. XXX: compute it exactly */
bitrate += bitrate*5LL/100;
bitrate += bitrate / 20;
bitrate += 10000;
s->mux_rate = (bitrate + (8 * 50) - 1) / (8 * 50);
}

View File

@ -355,7 +355,7 @@ static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int
int i;
/* XXX: suppress this malloc */
buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) );
buf1 = av_malloc(size * sizeof(uint8_t));
write_packet_header(s, stream, size, !!(flags & AV_PKT_FLAG_KEY));

View File

@ -36,8 +36,8 @@
int main(int argc, char **argv)
{
int i, funcNum;
uint8_t *srcBuffer= (uint8_t*)av_malloc(SIZE);
uint8_t *dstBuffer= (uint8_t*)av_malloc(SIZE);
uint8_t *srcBuffer = av_malloc(SIZE);
uint8_t *dstBuffer = av_malloc(SIZE);
int failedNum=0;
int passedNum=0;

View File

@ -62,13 +62,13 @@ fi
if [ -n "$do_mpeg2thread" ] ; then
# mpeg2 encoding interlaced
do_video_encoding mpeg2thread.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 2"
do_video_encoding mpeg2thread.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 2 -slices 2"
do_video_decoding
fi
if [ -n "$do_mpeg2thread_ilace" ]; then
# mpeg2 encoding interlaced using intra vlc
do_video_encoding mpeg2threadivlc.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -flags2 +ivlc -threads 2"
do_video_encoding mpeg2threadivlc.mpg "-qscale 10 -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -flags2 +ivlc -threads 2 -slices 2"
do_video_decoding
# mpeg2 encoding interlaced
@ -158,7 +158,7 @@ do_video_decoding
fi
if [ -n "$do_mpeg4thread" ] ; then
do_video_encoding mpeg4-thread.avi "-b 500k -flags +mv4+part+aic -trellis 1 -mbd bits -ps 200 -bf 2 -an -vcodec mpeg4 -threads 2"
do_video_encoding mpeg4-thread.avi "-b 500k -flags +mv4+part+aic -trellis 1 -mbd bits -ps 200 -bf 2 -an -vcodec mpeg4 -threads 2 -slices 2"
do_video_decoding
fi

View File

@ -67,7 +67,7 @@ $EGREP $OPT '^\+ *(const *|)static' $*| $EGREP --color=always '[^=]= *(0|NULL)[^
cat $TMP
hiegrep '# *ifdef * (HAVE|CONFIG)_' 'ifdefs that should be #if' $*
hiegrep '\b(awnser|cant|dont|wont|usefull|successfull|occured|teh|alot|wether|skiped|heigth|informations|colums|loosy|loosing|seperate|preceed)\b' 'common typos' $*
hiegrep '\b(awnser|cant|dont|wont|usefull|successfull|occured|teh|alot|wether|skiped|heigth|informations|colums|loosy|loosing|seperate|preceed|upto|paket)\b' 'common typos' $*
hiegrep 'av_log\( *NULL' 'Missing context in av_log' $*
hiegrep '[^sn]printf' 'Please use av_log' $*