From 0801b5979b118f0f47443e4561d0fa977695d263 Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Mon, 17 Sep 2012 16:13:24 -0400 Subject: [PATCH 1/9] binkaudio: use a different value for the coefficient scale for the DCT codec Eliminates the need for vector_fmul_scalar() in each frame. --- libavcodec/binkaudio.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c index 31a6a7caba..915e7aa171 100644 --- a/libavcodec/binkaudio.c +++ b/libavcodec/binkaudio.c @@ -47,7 +47,6 @@ static float quant_table[96]; typedef struct { AVFrame frame; GetBitContext gb; - DSPContext dsp; FmtConvertContext fmt_conv; int version_b; ///< Bink version 'b' int first; @@ -79,7 +78,6 @@ static av_cold int decode_init(AVCodecContext *avctx) int i; int frame_len_bits; - ff_dsputil_init(&s->dsp, avctx); ff_fmt_convert_init(&s->fmt_conv, avctx); /* determine frame length */ @@ -112,7 +110,10 @@ static av_cold int decode_init(AVCodecContext *avctx) s->overlap_len = s->frame_len / 16; s->block_size = (s->frame_len - s->overlap_len) * s->channels; sample_rate_half = (sample_rate + 1) / 2; - s->root = 2.0 / sqrt(s->frame_len); + if (avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT) + s->root = 2.0 / sqrt(s->frame_len); + else + s->root = s->frame_len / sqrt(s->frame_len); for (i = 0; i < 96; i++) { /* constant is result of 0.066399999/log10(M_E) */ quant_table[i] = expf(i * 0.15289164787221953823f) * s->root; @@ -259,7 +260,6 @@ static int decode_block(BinkAudioContext *s, int16_t *out, int use_dct) if (CONFIG_BINKAUDIO_DCT_DECODER && use_dct) { coeffs[0] /= 0.5; s->trans.dct.dct_calc(&s->trans.dct, coeffs); - s->dsp.vector_fmul_scalar(coeffs, coeffs, s->frame_len / 2, s->frame_len); } else if (CONFIG_BINKAUDIO_RDFT_DECODER) s->trans.rdft.rdft_calc(&s->trans.rdft, coeffs); From 7bfd1766d1c18f07b0a2dd042418a874d49ea60d Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Sun, 26 Aug 2012 20:41:45 -0400 Subject: [PATCH 2/9] binkaudio: use float sample format Use planar for DCT codec, interleaved for RDFT codec. --- libavcodec/binkaudio.c | 56 ++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 35 deletions(-) diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c index 915e7aa171..957af79c03 100644 --- a/libavcodec/binkaudio.c +++ b/libavcodec/binkaudio.c @@ -47,7 +47,6 @@ static float quant_table[96]; typedef struct { AVFrame frame; GetBitContext gb; - FmtConvertContext fmt_conv; int version_b; ///< Bink version 'b' int first; int channels; @@ -58,10 +57,7 @@ typedef struct { unsigned int *bands; float root; DECLARE_ALIGNED(32, FFTSample, coeffs)[BINK_BLOCK_MAX_SIZE]; - DECLARE_ALIGNED(16, int16_t, previous)[BINK_BLOCK_MAX_SIZE / 16]; ///< coeffs from previous audio block - DECLARE_ALIGNED(16, int16_t, current)[BINK_BLOCK_MAX_SIZE / 16]; - float *coeffs_ptr[MAX_CHANNELS]; ///< pointers to the coeffs arrays for float_to_int16_interleave - float *prev_ptr[MAX_CHANNELS]; ///< pointers to the overlap points in the coeffs array + float previous[MAX_CHANNELS][BINK_BLOCK_MAX_SIZE / 16]; ///< coeffs from previous audio block uint8_t *packet_buffer; union { RDFTContext rdft; @@ -78,8 +74,6 @@ static av_cold int decode_init(AVCodecContext *avctx) int i; int frame_len_bits; - ff_fmt_convert_init(&s->fmt_conv, avctx); - /* determine frame length */ if (avctx->sample_rate < 22050) { frame_len_bits = 9; @@ -98,12 +92,14 @@ static av_cold int decode_init(AVCodecContext *avctx) if (avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT) { // audio is already interleaved for the RDFT format variant + avctx->sample_fmt = AV_SAMPLE_FMT_FLT; sample_rate *= avctx->channels; s->channels = 1; if (!s->version_b) frame_len_bits += av_log2(avctx->channels); } else { s->channels = avctx->channels; + avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; } s->frame_len = 1 << frame_len_bits; @@ -111,9 +107,9 @@ static av_cold int decode_init(AVCodecContext *avctx) s->block_size = (s->frame_len - s->overlap_len) * s->channels; sample_rate_half = (sample_rate + 1) / 2; if (avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT) - s->root = 2.0 / sqrt(s->frame_len); + s->root = 2.0 / (sqrt(s->frame_len) * 32768.0); else - s->root = s->frame_len / sqrt(s->frame_len); + s->root = s->frame_len / (sqrt(s->frame_len) * 32768.0); for (i = 0; i < 96; i++) { /* constant is result of 0.066399999/log10(M_E) */ quant_table[i] = expf(i * 0.15289164787221953823f) * s->root; @@ -135,12 +131,6 @@ static av_cold int decode_init(AVCodecContext *avctx) s->bands[s->num_bands] = s->frame_len; s->first = 1; - avctx->sample_fmt = AV_SAMPLE_FMT_S16; - - for (i = 0; i < s->channels; i++) { - s->coeffs_ptr[i] = s->coeffs + i * s->frame_len; - s->prev_ptr[i] = s->coeffs_ptr[i] + s->frame_len - s->overlap_len; - } if (CONFIG_BINKAUDIO_RDFT_DECODER && avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT) ff_rdft_init(&s->trans.rdft, frame_len_bits, DFT_C2R); @@ -179,7 +169,7 @@ static const uint8_t rle_length_tab[16] = { * @param[out] out Output buffer (must contain s->block_size elements) * @return 0 on success, negative error code on failure */ -static int decode_block(BinkAudioContext *s, int16_t *out, int use_dct) +static int decode_block(BinkAudioContext *s, float **out, int use_dct) { int ch, i, j, k; float q, quant[25]; @@ -190,7 +180,8 @@ static int decode_block(BinkAudioContext *s, int16_t *out, int use_dct) skip_bits(gb, 2); for (ch = 0; ch < s->channels; ch++) { - FFTSample *coeffs = s->coeffs_ptr[ch]; + FFTSample *coeffs = out[ch]; + if (s->version_b) { if (get_bits_left(gb) < 64) return AVERROR_INVALIDDATA; @@ -265,24 +256,19 @@ static int decode_block(BinkAudioContext *s, int16_t *out, int use_dct) s->trans.rdft.rdft_calc(&s->trans.rdft, coeffs); } - s->fmt_conv.float_to_int16_interleave(s->current, - (const float **)s->prev_ptr, - s->overlap_len, s->channels); - s->fmt_conv.float_to_int16_interleave(out, (const float **)s->coeffs_ptr, - s->frame_len - s->overlap_len, - s->channels); - - if (!s->first) { + for (ch = 0; ch < s->channels; ch++) { + int j; int count = s->overlap_len * s->channels; - int shift = av_log2(count); - for (i = 0; i < count; i++) { - out[i] = (s->previous[i] * (count - i) + out[i] * i) >> shift; + if (!s->first) { + j = ch; + for (i = 0; i < s->overlap_len; i++, j += s->channels) + out[ch][i] = (s->previous[ch][i] * (count - j) + + out[ch][i] * j) / count; } + memcpy(s->previous[ch], &out[ch][s->frame_len - s->overlap_len], + s->overlap_len * sizeof(*s->previous[ch])); } - memcpy(s->previous, s->current, - s->overlap_len * s->channels * sizeof(*s->previous)); - s->first = 0; return 0; @@ -311,7 +297,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { BinkAudioContext *s = avctx->priv_data; - int16_t *samples; GetBitContext *gb = &s->gb; int ret, consumed = 0; @@ -339,19 +324,20 @@ static int decode_frame(AVCodecContext *avctx, void *data, } /* get output buffer */ - s->frame.nb_samples = s->block_size / avctx->channels; + s->frame.nb_samples = s->frame_len; if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } - samples = (int16_t *)s->frame.data[0]; - if (decode_block(s, samples, avctx->codec->id == AV_CODEC_ID_BINKAUDIO_DCT)) { + if (decode_block(s, (float **)s->frame.extended_data, + avctx->codec->id == AV_CODEC_ID_BINKAUDIO_DCT)) { av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n"); return AVERROR_INVALIDDATA; } get_bits_align32(gb); + s->frame.nb_samples = s->block_size / avctx->channels; *got_frame_ptr = 1; *(AVFrame *)data = s->frame; From ee90119e9ee0e2c54f1017bbe1460bfcd50555d0 Mon Sep 17 00:00:00 2001 From: Justin Ruggles Date: Mon, 17 Sep 2012 16:20:36 -0400 Subject: [PATCH 3/9] binkaudio: remove unneeded GET_BITS_SAFE macro Normal get_bits() already has overread protection. --- libavcodec/binkaudio.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c index 957af79c03..af56526cca 100644 --- a/libavcodec/binkaudio.c +++ b/libavcodec/binkaudio.c @@ -158,12 +158,6 @@ static const uint8_t rle_length_tab[16] = { 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 32, 64 }; -#define GET_BITS_SAFE(out, nbits) do { \ - if (get_bits_left(gb) < nbits) \ - return AVERROR_INVALIDDATA; \ - out = get_bits(gb, nbits); \ -} while (0) - /** * Decode Bink Audio block * @param[out] out Output buffer (must contain s->block_size elements) @@ -210,10 +204,9 @@ static int decode_block(BinkAudioContext *s, float **out, int use_dct) if (s->version_b) { j = i + 16; } else { - int v; - GET_BITS_SAFE(v, 1); + int v = get_bits1(gb); if (v) { - GET_BITS_SAFE(v, 4); + v = get_bits(gb, 4); j = i + rle_length_tab[v] * 8; } else { j = i + 8; @@ -222,7 +215,7 @@ static int decode_block(BinkAudioContext *s, float **out, int use_dct) j = FFMIN(j, s->frame_len); - GET_BITS_SAFE(width, 4); + width = get_bits(gb, 4); if (width == 0) { memset(coeffs + i, 0, (j - i) * sizeof(*coeffs)); i = j; @@ -232,10 +225,10 @@ static int decode_block(BinkAudioContext *s, float **out, int use_dct) while (i < j) { if (s->bands[k] == i) q = quant[k++]; - GET_BITS_SAFE(coeff, width); + coeff = get_bits(gb, width); if (coeff) { int v; - GET_BITS_SAFE(v, 1); + v = get_bits1(gb); if (v) coeffs[i] = -q * coeff; else From 419ffb239074dbe1ff3c2a2debc95ca48ee42268 Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Fri, 14 Sep 2012 19:18:17 +0200 Subject: [PATCH 4/9] avcodec: cleanup utils.c --- libavcodec/utils.c | 495 ++++++++++++++++++++++++--------------------- 1 file changed, 263 insertions(+), 232 deletions(-) diff --git a/libavcodec/utils.c b/libavcodec/utils.c index f56dd7b793..8c10e12c3c 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -47,23 +47,26 @@ #include #include -static int volatile entangled_thread_counter=0; +static int volatile entangled_thread_counter = 0; static int (*ff_lockmgr_cb)(void **mutex, enum AVLockOp op); static void *codec_mutex; static void *avformat_mutex; void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size) { - if(min_size < *size) + if (min_size < *size) return ptr; - min_size= FFMAX(17*min_size/16 + 32, min_size); + min_size = FFMAX(17 * min_size / 16 + 32, min_size); - ptr= av_realloc(ptr, min_size); - if(!ptr) //we could set this to the unmodified min_size but this is safer if the user lost the ptr and uses NULL now - min_size= 0; + ptr = av_realloc(ptr, min_size); + /* we could set this to the unmodified min_size but this is safer + * if the user lost the ptr and uses NULL now + */ + if (!ptr) + min_size = 0; - *size= min_size; + *size = min_size; return ptr; } @@ -73,11 +76,12 @@ void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size) void **p = ptr; if (min_size < *size) return; - min_size= FFMAX(17*min_size/16 + 32, min_size); + min_size = FFMAX(17 * min_size / 16 + 32, min_size); av_free(*p); *p = av_malloc(min_size); - if (!*p) min_size = 0; - *size= min_size; + if (!*p) + min_size = 0; + *size = min_size; } void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size) @@ -98,8 +102,10 @@ static AVCodec *first_avcodec = NULL; AVCodec *av_codec_next(const AVCodec *c) { - if(c) return c->next; - else return first_avcodec; + if (c) + return c->next; + else + return first_avcodec; } static void avcodec_init(void) @@ -128,8 +134,9 @@ void avcodec_register(AVCodec *codec) AVCodec **p; avcodec_init(); p = &first_avcodec; - while (*p != NULL) p = &(*p)->next; - *p = codec; + while (*p != NULL) + p = &(*p)->next; + *p = codec; codec->next = NULL; if (codec->init_static_data) @@ -141,23 +148,24 @@ unsigned avcodec_get_edge_width(void) return EDGE_WIDTH; } -void avcodec_set_dimensions(AVCodecContext *s, int width, int height){ - s->coded_width = width; - s->coded_height= height; - s->width = width; - s->height = height; +void avcodec_set_dimensions(AVCodecContext *s, int width, int height) +{ + s->coded_width = width; + s->coded_height = height; + s->width = width; + s->height = height; } -#define INTERNAL_BUFFER_SIZE (32+1) +#define INTERNAL_BUFFER_SIZE (32 + 1) void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS]) { int i; - int w_align= 1; - int h_align= 1; + int w_align = 1; + int h_align = 1; - switch(s->pix_fmt){ + switch (s->pix_fmt) { case PIX_FMT_YUV420P: case PIX_FMT_YUYV422: case PIX_FMT_UYVY422: @@ -194,58 +202,62 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, break; case PIX_FMT_YUV411P: case PIX_FMT_UYYVYY411: - w_align=32; - h_align=8; + w_align = 32; + h_align = 8; break; case PIX_FMT_YUV410P: - if(s->codec_id == AV_CODEC_ID_SVQ1){ - w_align=64; - h_align=64; + if (s->codec_id == AV_CODEC_ID_SVQ1) { + w_align = 64; + h_align = 64; } case PIX_FMT_RGB555: - if(s->codec_id == AV_CODEC_ID_RPZA){ - w_align=4; - h_align=4; + if (s->codec_id == AV_CODEC_ID_RPZA) { + w_align = 4; + h_align = 4; } case PIX_FMT_PAL8: case PIX_FMT_BGR8: case PIX_FMT_RGB8: - if(s->codec_id == AV_CODEC_ID_SMC){ - w_align=4; - h_align=4; + if (s->codec_id == AV_CODEC_ID_SMC) { + w_align = 4; + h_align = 4; } break; case PIX_FMT_BGR24: - if((s->codec_id == AV_CODEC_ID_MSZH) || (s->codec_id == AV_CODEC_ID_ZLIB)){ - w_align=4; - h_align=4; + if ((s->codec_id == AV_CODEC_ID_MSZH) || + (s->codec_id == AV_CODEC_ID_ZLIB)) { + w_align = 4; + h_align = 4; } break; default: - w_align= 1; - h_align= 1; + w_align = 1; + h_align = 1; break; } - *width = FFALIGN(*width , w_align); - *height= FFALIGN(*height, h_align); + *width = FFALIGN(*width, w_align); + *height = FFALIGN(*height, h_align); if (s->codec_id == AV_CODEC_ID_H264) - *height+=2; // some of the optimized chroma MC reads one line too much + // some of the optimized chroma MC reads one line too much + *height += 2; for (i = 0; i < 4; i++) linesize_align[i] = STRIDE_ALIGN; } -void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ +void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height) +{ int chroma_shift = av_pix_fmt_descriptors[s->pix_fmt].log2_chroma_w; int linesize_align[AV_NUM_DATA_POINTERS]; int align; + avcodec_align_dimensions2(s, width, height, linesize_align); - align = FFMAX(linesize_align[0], linesize_align[3]); + align = FFMAX(linesize_align[0], linesize_align[3]); linesize_align[1] <<= chroma_shift; linesize_align[2] <<= chroma_shift; - align = FFMAX3(align, linesize_align[1], linesize_align[2]); - *width=FFALIGN(*width, align); + align = FFMAX3(align, linesize_align[1], linesize_align[2]); + *width = FFALIGN(*width, align); } int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, @@ -305,7 +317,7 @@ static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) buf = avci->buffer; /* if there is a previously-used internal buffer, check its size and - channel count to see if we can reuse it */ + * channel count to see if we can reuse it */ if (buf->extended_data) { /* if current buffer is too small, free it */ if (buf->extended_data[0] && buf_size > buf->audio_data_size) { @@ -313,10 +325,10 @@ static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) if (buf->extended_data != buf->data) av_free(&buf->extended_data); buf->extended_data = NULL; - buf->data[0] = NULL; + buf->data[0] = NULL; } /* if number of channels has changed, reset and/or free extended data - pointers but leave data buffer in buf->data[0] for reuse */ + * pointers but leave data buffer in buf->data[0] for reuse */ if (buf->nb_channels != avctx->channels) { if (buf->extended_data != buf->data) av_free(buf->extended_data); @@ -325,7 +337,7 @@ static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) } /* if there is no previous buffer or the previous buffer cannot be used - as-is, allocate a new buffer and/or rearrange the channel pointers */ + * as-is, allocate a new buffer and/or rearrange the channel pointers */ if (!buf->extended_data) { if (!buf->data[0]) { if (!(buf->data[0] = av_mallocz(buf_size))) @@ -351,10 +363,12 @@ static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) memcpy(frame->data, buf->data, sizeof(frame->data)); } - frame->type = FF_BUFFER_TYPE_INTERNAL; + frame->type = FF_BUFFER_TYPE_INTERNAL; - if (avctx->pkt) frame->pkt_pts = avctx->pkt->pts; - else frame->pkt_pts = AV_NOPTS_VALUE; + if (avctx->pkt) + frame->pkt_pts = avctx->pkt->pts; + else + frame->pkt_pts = AV_NOPTS_VALUE; frame->reordered_opaque = avctx->reordered_opaque; frame->sample_rate = avctx->sample_rate; @@ -363,7 +377,7 @@ static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) if (avctx->debug & FF_DEBUG_BUFFERS) av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, " - "internal audio buffer used\n", frame); + "internal audio buffer used\n", frame); return 0; } @@ -371,53 +385,53 @@ static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) static int video_get_buffer(AVCodecContext *s, AVFrame *pic) { int i; - int w= s->width; - int h= s->height; + int w = s->width; + int h = s->height; InternalBuffer *buf; AVCodecInternal *avci = s->internal; - if(pic->data[0]!=NULL) { + if (pic->data[0] != NULL) { av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n"); return -1; } - if(avci->buffer_count >= INTERNAL_BUFFER_SIZE) { + if (avci->buffer_count >= INTERNAL_BUFFER_SIZE) { av_log(s, AV_LOG_ERROR, "buffer_count overflow (missing release_buffer?)\n"); return -1; } - if(av_image_check_size(w, h, 0, s)) + if (av_image_check_size(w, h, 0, s)) return -1; if (!avci->buffer) { - avci->buffer = av_mallocz((INTERNAL_BUFFER_SIZE+1) * + avci->buffer = av_mallocz((INTERNAL_BUFFER_SIZE + 1) * sizeof(InternalBuffer)); } buf = &avci->buffer[avci->buffer_count]; - if(buf->base[0] && (buf->width != w || buf->height != h || buf->pix_fmt != s->pix_fmt)){ + if (buf->base[0] && (buf->width != w || buf->height != h || buf->pix_fmt != s->pix_fmt)) { for (i = 0; i < AV_NUM_DATA_POINTERS; i++) { av_freep(&buf->base[i]); - buf->data[i]= NULL; + buf->data[i] = NULL; } } if (!buf->base[0]) { int h_chroma_shift, v_chroma_shift; - int size[4] = {0}; + int size[4] = { 0 }; int tmpsize; int unaligned; AVPicture picture; int stride_align[AV_NUM_DATA_POINTERS]; - const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1; + const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1 + 1; avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); avcodec_align_dimensions2(s, &w, &h, stride_align); - if(!(s->flags&CODEC_FLAG_EMU_EDGE)){ - w+= EDGE_WIDTH*2; - h+= EDGE_WIDTH*2; + if (!(s->flags & CODEC_FLAG_EMU_EDGE)) { + w += EDGE_WIDTH * 2; + h += EDGE_WIDTH * 2; } do { @@ -425,72 +439,74 @@ static int video_get_buffer(AVCodecContext *s, AVFrame *pic) // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2 av_image_fill_linesizes(picture.linesize, s->pix_fmt, w); // increase alignment of w for next try (rhs gives the lowest bit set in w) - w += w & ~(w-1); + w += w & ~(w - 1); unaligned = 0; - for (i=0; i<4; i++){ + for (i = 0; i < 4; i++) unaligned |= picture.linesize[i] % stride_align[i]; - } } while (unaligned); tmpsize = av_image_fill_pointers(picture.data, s->pix_fmt, h, NULL, picture.linesize); if (tmpsize < 0) return -1; - for (i=0; i<3 && picture.data[i+1]; i++) - size[i] = picture.data[i+1] - picture.data[i]; + for (i = 0; i < 3 && picture.data[i + 1]; i++) + size[i] = picture.data[i + 1] - picture.data[i]; size[i] = tmpsize - (picture.data[i] - picture.data[0]); memset(buf->base, 0, sizeof(buf->base)); memset(buf->data, 0, sizeof(buf->data)); - for(i=0; i<4 && size[i]; i++){ - const int h_shift= i==0 ? 0 : h_chroma_shift; - const int v_shift= i==0 ? 0 : v_chroma_shift; + for (i = 0; i < 4 && size[i]; i++) { + const int h_shift = i == 0 ? 0 : h_chroma_shift; + const int v_shift = i == 0 ? 0 : v_chroma_shift; - buf->linesize[i]= picture.linesize[i]; + buf->linesize[i] = picture.linesize[i]; - buf->base[i]= av_malloc(size[i]+16); //FIXME 16 - if(buf->base[i]==NULL) return -1; + buf->base[i] = av_malloc(size[i] + 16); //FIXME 16 + if (buf->base[i] == NULL) + return -1; memset(buf->base[i], 128, size[i]); // no edge if EDGE EMU or not planar YUV - if((s->flags&CODEC_FLAG_EMU_EDGE) || !size[2]) + if ((s->flags & CODEC_FLAG_EMU_EDGE) || !size[2]) buf->data[i] = buf->base[i]; else - buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (pixel_size*EDGE_WIDTH>>h_shift), stride_align[i]); + buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i] * EDGE_WIDTH >> v_shift) + (pixel_size * EDGE_WIDTH >> h_shift), stride_align[i]); } for (; i < AV_NUM_DATA_POINTERS; i++) { - buf->base[i] = buf->data[i] = NULL; + buf->base[i] = buf->data[i] = NULL; buf->linesize[i] = 0; } - if(size[1] && !size[2]) - ff_set_systematic_pal2((uint32_t*)buf->data[1], s->pix_fmt); - buf->width = s->width; - buf->height = s->height; - buf->pix_fmt= s->pix_fmt; + if (size[1] && !size[2]) + ff_set_systematic_pal2((uint32_t *)buf->data[1], s->pix_fmt); + buf->width = s->width; + buf->height = s->height; + buf->pix_fmt = s->pix_fmt; } - pic->type= FF_BUFFER_TYPE_INTERNAL; + pic->type = FF_BUFFER_TYPE_INTERNAL; for (i = 0; i < AV_NUM_DATA_POINTERS; i++) { - pic->base[i]= buf->base[i]; - pic->data[i]= buf->data[i]; - pic->linesize[i]= buf->linesize[i]; + pic->base[i] = buf->base[i]; + pic->data[i] = buf->data[i]; + pic->linesize[i] = buf->linesize[i]; } pic->extended_data = pic->data; avci->buffer_count++; - pic->width = buf->width; - pic->height = buf->height; - pic->format = buf->pix_fmt; + pic->width = buf->width; + pic->height = buf->height; + pic->format = buf->pix_fmt; pic->sample_aspect_ratio = s->sample_aspect_ratio; - if(s->pkt) pic->pkt_pts= s->pkt->pts; - else pic->pkt_pts= AV_NOPTS_VALUE; - pic->reordered_opaque= s->reordered_opaque; + if (s->pkt) + pic->pkt_pts = s->pkt->pts; + else + pic->pkt_pts = AV_NOPTS_VALUE; + pic->reordered_opaque = s->reordered_opaque; - if(s->debug&FF_DEBUG_BUFFERS) + if (s->debug & FF_DEBUG_BUFFERS) av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d " - "buffers used\n", pic, avci->buffer_count); + "buffers used\n", pic, avci->buffer_count); return 0; } @@ -507,14 +523,15 @@ int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame) } } -void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){ +void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic) +{ int i; InternalBuffer *buf, *last; AVCodecInternal *avci = s->internal; assert(s->codec_type == AVMEDIA_TYPE_VIDEO); - assert(pic->type==FF_BUFFER_TYPE_INTERNAL); + assert(pic->type == FF_BUFFER_TYPE_INTERNAL); assert(avci->buffer_count); if (avci->buffer) { @@ -532,25 +549,25 @@ void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){ FFSWAP(InternalBuffer, *buf, *last); } - for (i = 0; i < AV_NUM_DATA_POINTERS; i++) { - pic->data[i]=NULL; + for (i = 0; i < AV_NUM_DATA_POINTERS; i++) + pic->data[i] = NULL; // pic->base[i]=NULL; - } -//printf("R%X\n", pic->opaque); + //printf("R%X\n", pic->opaque); - if(s->debug&FF_DEBUG_BUFFERS) + if (s->debug & FF_DEBUG_BUFFERS) av_log(s, AV_LOG_DEBUG, "default_release_buffer called on pic %p, %d " - "buffers used\n", pic, avci->buffer_count); + "buffers used\n", pic, avci->buffer_count); } -int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){ +int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic) +{ AVFrame temp_pic; int i; assert(s->codec_type == AVMEDIA_TYPE_VIDEO); /* If no picture return a new buffer */ - if(pic->data[0] == NULL) { + if (pic->data[0] == NULL) { /* We will copy from buffer, so must be readable */ pic->buffer_hints |= FF_BUFFER_HINTS_READABLE; return s->get_buffer(s, pic); @@ -559,10 +576,12 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){ assert(s->pix_fmt == pic->format); /* If internal buffer type return the same buffer */ - if(pic->type == FF_BUFFER_TYPE_INTERNAL) { - if(s->pkt) pic->pkt_pts= s->pkt->pts; - else pic->pkt_pts= AV_NOPTS_VALUE; - pic->reordered_opaque= s->reordered_opaque; + if (pic->type == FF_BUFFER_TYPE_INTERNAL) { + if (s->pkt) + pic->pkt_pts = s->pkt->pts; + else + pic->pkt_pts = AV_NOPTS_VALUE; + pic->reordered_opaque = s->reordered_opaque; return 0; } @@ -570,58 +589,66 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){ * Not internal type and reget_buffer not overridden, emulate cr buffer */ temp_pic = *pic; - for(i = 0; i < AV_NUM_DATA_POINTERS; i++) + for (i = 0; i < AV_NUM_DATA_POINTERS; i++) pic->data[i] = pic->base[i] = NULL; pic->opaque = NULL; /* Allocate new frame */ if (s->get_buffer(s, pic)) return -1; /* Copy image data from old buffer to new buffer */ - av_picture_copy((AVPicture*)pic, (AVPicture*)&temp_pic, s->pix_fmt, s->width, - s->height); + av_picture_copy((AVPicture *)pic, (AVPicture *)&temp_pic, s->pix_fmt, s->width, + s->height); s->release_buffer(s, &temp_pic); // Release old frame return 0; } -int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size){ +int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size) +{ int i; - for(i=0; ipts= AV_NOPTS_VALUE; - pic->key_frame= 1; - pic->sample_aspect_ratio = (AVRational){0, 1}; - pic->format = -1; /* unknown */ + pic->pts = AV_NOPTS_VALUE; + pic->key_frame = 1; + pic->sample_aspect_ratio = (AVRational) {0, 1 }; + pic->format = -1; /* unknown */ } -AVFrame *avcodec_alloc_frame(void){ - AVFrame *pic= av_malloc(sizeof(AVFrame)); +AVFrame *avcodec_alloc_frame(void) +{ + AVFrame *pic = av_malloc(sizeof(AVFrame)); - if(pic==NULL) return NULL; + if (pic == NULL) + return NULL; avcodec_get_frame_defaults(pic); @@ -642,7 +669,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code } if ((codec && avctx->codec && codec != avctx->codec)) { av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, " - "but %s passed to avcodec_open2().\n", avctx->codec->name, codec->name); + "but %s passed to avcodec_open2().\n", avctx->codec->name, codec->name); return AVERROR(EINVAL); } if (!codec) @@ -661,7 +688,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code } entangled_thread_counter++; - if(entangled_thread_counter != 1){ + if (entangled_thread_counter != 1) { av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n"); ret = -1; goto end; @@ -674,28 +701,28 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code } if (codec->priv_data_size > 0) { - if(!avctx->priv_data){ - avctx->priv_data = av_mallocz(codec->priv_data_size); if (!avctx->priv_data) { - ret = AVERROR(ENOMEM); - goto end; + avctx->priv_data = av_mallocz(codec->priv_data_size); + if (!avctx->priv_data) { + ret = AVERROR(ENOMEM); + goto end; + } + if (codec->priv_class) { + *(const AVClass **)avctx->priv_data = codec->priv_class; + av_opt_set_defaults(avctx->priv_data); + } } - if (codec->priv_class) { - *(const AVClass**)avctx->priv_data = codec->priv_class; - av_opt_set_defaults(avctx->priv_data); - } - } - if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0) - goto free_and_end; + if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0) + goto free_and_end; } else { avctx->priv_data = NULL; } if ((ret = av_opt_set_dict(avctx, &tmp)) < 0) goto free_and_end; - if(avctx->coded_width && avctx->coded_height) + if (avctx->coded_width && avctx->coded_height) avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); - else if(avctx->width && avctx->height) + else if (avctx->width && avctx->height) avcodec_set_dimensions(avctx, avctx->width, avctx->height); if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height) @@ -706,7 +733,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code } /* if the decoder init function was already called previously, - free the already allocated subtitle_header before overwriting it */ + * free the already allocated subtitle_header before overwriting it */ if (av_codec_is_decoder(codec)) av_freep(&avctx->subtitle_header); @@ -723,7 +750,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code avctx->codec_id = codec->id; } if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type - && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) { + && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) { av_log(avctx, AV_LOG_ERROR, "codec type or id mismatches\n"); ret = AVERROR(EINVAL); goto free_and_end; @@ -802,7 +829,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code } } - if(avctx->codec->init && !(avctx->active_thread_type&FF_THREAD_FRAME)){ + if (avctx->codec->init && !(avctx->active_thread_type & FF_THREAD_FRAME)) { ret = avctx->codec->init(avctx); if (ret < 0) { goto free_and_end; @@ -834,7 +861,7 @@ free_and_end: av_dict_free(&tmp); av_freep(&avctx->priv_data); av_freep(&avctx->internal); - avctx->codec= NULL; + avctx->codec = NULL; goto end; } @@ -851,7 +878,7 @@ int ff_alloc_packet(AVPacket *avpkt, int size) av_init_packet(avpkt); avpkt->destruct = destruct; - avpkt->size = size; + avpkt->size = size; return 0; } else { return av_new_packet(avpkt, size); @@ -927,7 +954,7 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, if (av_sample_fmt_is_planar(avctx->sample_fmt) && avctx->channels > AV_NUM_DATA_POINTERS) { av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, " - "with more than %d channels, but extended_data is not set.\n", + "with more than %d channels, but extended_data is not set.\n", AV_NUM_DATA_POINTERS); return AVERROR(EINVAL); } @@ -990,8 +1017,8 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, } /* NOTE: if we add any audio encoders which output non-keyframe packets, - this needs to be moved to the encoders, but for now we can do it - here to simplify things */ + * this needs to be moved to the encoders, but for now we can do it + * here to simplify things */ avpkt->flags |= AV_PKT_FLAG_KEY; if (padded_frame) { @@ -1026,35 +1053,35 @@ int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, frame->nb_samples = avctx->frame_size; } else { /* if frame_size is not set, the number of samples must be - calculated from the buffer size */ + * calculated from the buffer size */ int64_t nb_samples; if (!av_get_bits_per_sample(avctx->codec_id)) { av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not " - "support this codec\n"); + "support this codec\n"); return AVERROR(EINVAL); } nb_samples = (int64_t)buf_size * 8 / (av_get_bits_per_sample(avctx->codec_id) * - avctx->channels); + avctx->channels); if (nb_samples >= INT_MAX) return AVERROR(EINVAL); frame->nb_samples = nb_samples; } /* it is assumed that the samples buffer is large enough based on the - relevant parameters */ + * relevant parameters */ samples_size = av_samples_get_buffer_size(NULL, avctx->channels, frame->nb_samples, avctx->sample_fmt, 1); if ((ret = avcodec_fill_audio_frame(frame, avctx->channels, avctx->sample_fmt, - (const uint8_t *) samples, + (const uint8_t *)samples, samples_size, 1))) return ret; /* fabricate frame pts from sample count. - this is needed because the avcodec_encode_audio() API does not have - a way for the user to provide pts */ + * this is needed because the avcodec_encode_audio() API does not have + * a way for the user to provide pts */ frame->pts = ff_samples_to_time_base(avctx, avctx->internal->sample_count); avctx->internal->sample_count += frame->nb_samples; @@ -1082,16 +1109,17 @@ int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, return ret ? ret : pkt.size; } + #endif #if FF_API_OLD_ENCODE_VIDEO int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, - const AVFrame *pict) + const AVFrame *pict) { AVPacket pkt; int ret, got_packet = 0; - if(buf_size < FF_MIN_BUFFER_SIZE){ + if (buf_size < FF_MIN_BUFFER_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n"); return -1; } @@ -1117,6 +1145,7 @@ int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf return ret ? ret : pkt.size; } + #endif int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, @@ -1132,7 +1161,7 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { av_free_packet(avpkt); av_init_packet(avpkt); - avpkt->size = 0; + avpkt->size = 0; return 0; } @@ -1168,11 +1197,11 @@ int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub) { int ret; - if(sub->start_display_time) { + if (sub->start_display_time) { av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n"); return -1; } - if(sub->num_rects == 0 || !sub->rects) + if (sub->num_rects == 0 || !sub->rects) return -1; ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub); avctx->frame_number++; @@ -1222,55 +1251,55 @@ static void apply_param_change(AVCodecContext *avctx, AVPacket *avpkt) } int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, - int *got_picture_ptr, - AVPacket *avpkt) + int *got_picture_ptr, + AVPacket *avpkt) { int ret; - *got_picture_ptr= 0; - if((avctx->coded_width||avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx)) + *got_picture_ptr = 0; + if ((avctx->coded_width || avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx)) return -1; avctx->pkt = avpkt; apply_param_change(avctx, avpkt); - if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type&FF_THREAD_FRAME)){ - if (HAVE_THREADS && avctx->active_thread_type&FF_THREAD_FRAME) - ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr, - avpkt); + if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) { + if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) + ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr, + avpkt); else { ret = avctx->codec->decode(avctx, picture, got_picture_ptr, - avpkt); - picture->pkt_dts= avpkt->dts; + avpkt); + picture->pkt_dts = avpkt->dts; picture->sample_aspect_ratio = avctx->sample_aspect_ratio; - picture->width = avctx->width; - picture->height = avctx->height; - picture->format = avctx->pix_fmt; + picture->width = avctx->width; + picture->height = avctx->height; + picture->format = avctx->pix_fmt; } emms_c(); //needed to avoid an emms_c() call before every return; if (*got_picture_ptr) avctx->frame_number++; - }else - ret= 0; + } else + ret = 0; return ret; } #if FF_API_OLD_DECODE_AUDIO int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, - int *frame_size_ptr, - AVPacket *avpkt) + int *frame_size_ptr, + AVPacket *avpkt) { AVFrame frame; int ret, got_frame = 0; if (avctx->get_buffer != avcodec_default_get_buffer) { av_log(avctx, AV_LOG_ERROR, "Custom get_buffer() for use with" - "avcodec_decode_audio3() detected. Overriding with avcodec_default_get_buffer\n"); + "avcodec_decode_audio3() detected. Overriding with avcodec_default_get_buffer\n"); av_log(avctx, AV_LOG_ERROR, "Please port your application to " - "avcodec_decode_audio4()\n"); + "avcodec_decode_audio4()\n"); avctx->get_buffer = avcodec_default_get_buffer; } @@ -1278,13 +1307,13 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa if (ret >= 0 && got_frame) { int ch, plane_size; - int planar = av_sample_fmt_is_planar(avctx->sample_fmt); + int planar = av_sample_fmt_is_planar(avctx->sample_fmt); int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels, frame.nb_samples, avctx->sample_fmt, 1); if (*frame_size_ptr < data_size) { av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for " - "the current frame (%d < %d)\n", *frame_size_ptr, data_size); + "the current frame (%d < %d)\n", *frame_size_ptr, data_size); return AVERROR(EINVAL); } @@ -1303,6 +1332,7 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa } return ret; } + #endif int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, @@ -1336,8 +1366,8 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, } int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, - int *got_sub_ptr, - AVPacket *avpkt) + int *got_sub_ptr, + AVPacket *avpkt) { int ret; @@ -1353,8 +1383,7 @@ void avsubtitle_free(AVSubtitle *sub) { int i; - for (i = 0; i < sub->num_rects; i++) - { + for (i = 0; i < sub->num_rects; i++) { av_freep(&sub->rects[i]->pict.data[0]); av_freep(&sub->rects[i]->pict.data[1]); av_freep(&sub->rects[i]->pict.data[2]); @@ -1378,7 +1407,7 @@ av_cold int avcodec_close(AVCodecContext *avctx) } entangled_thread_counter++; - if(entangled_thread_counter != 1){ + if (entangled_thread_counter != 1) { av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n"); entangled_thread_counter--; return -1; @@ -1413,7 +1442,7 @@ av_cold int avcodec_close(AVCodecContext *avctx) AVCodec *avcodec_find_encoder(enum AVCodecID id) { - AVCodec *p, *experimental=NULL; + AVCodec *p, *experimental = NULL; p = first_avcodec; while (p) { if (av_codec_is_encoder(p) && p->id == id) { @@ -1434,7 +1463,7 @@ AVCodec *avcodec_find_encoder_by_name(const char *name) return NULL; p = first_avcodec; while (p) { - if (av_codec_is_encoder(p) && strcmp(name,p->name) == 0) + if (av_codec_is_encoder(p) && strcmp(name, p->name) == 0) return p; p = p->next; } @@ -1460,7 +1489,7 @@ AVCodec *avcodec_find_decoder_by_name(const char *name) return NULL; p = first_avcodec; while (p) { - if (av_codec_is_decoder(p) && strcmp(name,p->name) == 0) + if (av_codec_is_decoder(p) && strcmp(name, p->name) == 0) return p; p = p->next; } @@ -1472,7 +1501,7 @@ static int get_bit_rate(AVCodecContext *ctx) int bit_rate; int bits_per_sample; - switch(ctx->codec_type) { + switch (ctx->codec_type) { case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_DATA: case AVMEDIA_TYPE_SUBTITLE: @@ -1496,11 +1525,11 @@ size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_ta for (i = 0; i < 4; i++) { len = snprintf(buf, buf_size, - isprint(codec_tag&0xFF) ? "%c" : "[%d]", codec_tag&0xFF); - buf += len; - buf_size = buf_size > len ? buf_size - len : 0; - ret += len; - codec_tag>>=8; + isprint(codec_tag & 0xFF) ? "%c" : "[%d]", codec_tag & 0xFF); + buf += len; + buf_size = buf_size > len ? buf_size - len : 0; + ret += len; + codec_tag >>= 8; } return ret; } @@ -1526,7 +1555,7 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) profile = av_get_profile_name(p, enc->profile); } else if (enc->codec_id == AV_CODEC_ID_MPEG2TS) { /* fake mpeg2 transport stream codec (currently not - registered) */ + * registered) */ codec_name = "mpeg2ts"; } else if (enc->codec_name[0] != '\0') { codec_name = enc->codec_name; @@ -1538,7 +1567,7 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) codec_name = buf1; } - switch(enc->codec_type) { + switch (enc->codec_type) { case AVMEDIA_TYPE_VIDEO: snprintf(buf, buf_size, "Video: %s%s", @@ -1557,19 +1586,19 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) enc->width, enc->height); if (enc->sample_aspect_ratio.num) { av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, - enc->width*enc->sample_aspect_ratio.num, - enc->height*enc->sample_aspect_ratio.den, - 1024*1024); + enc->width * enc->sample_aspect_ratio.num, + enc->height * enc->sample_aspect_ratio.den, + 1024 * 1024); snprintf(buf + strlen(buf), buf_size - strlen(buf), " [PAR %d:%d DAR %d:%d]", enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); } - if(av_log_get_level() >= AV_LOG_DEBUG){ - int g= av_gcd(enc->time_base.num, enc->time_base.den); + if (av_log_get_level() >= AV_LOG_DEBUG) { + int g = av_gcd(enc->time_base.num, enc->time_base.den); snprintf(buf + strlen(buf), buf_size - strlen(buf), - ", %d/%d", - enc->time_base.num/g, enc->time_base.den/g); + ", %d/%d", + enc->time_base.num / g, enc->time_base.den / g); } } if (encode) { @@ -1636,9 +1665,9 @@ const char *av_get_profile_name(const AVCodec *codec, int profile) return NULL; } -unsigned avcodec_version( void ) +unsigned avcodec_version(void) { - return LIBAVCODEC_VERSION_INT; + return LIBAVCODEC_VERSION_INT; } const char *avcodec_configuration(void) @@ -1654,9 +1683,9 @@ const char *avcodec_license(void) void avcodec_flush_buffers(AVCodecContext *avctx) { - if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_FRAME) + if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) ff_thread_flush(avctx); - else if(avctx->codec->flush) + else if (avctx->codec->flush) avctx->codec->flush(avctx); } @@ -1671,16 +1700,16 @@ static void video_free_buffers(AVCodecContext *s) if (avci->buffer_count) av_log(s, AV_LOG_WARNING, "Found %i unreleased buffers!\n", avci->buffer_count); - for(i=0; ibuffer[i]; - for(j=0; j<4; j++){ + for (j = 0; j < 4; j++) { av_freep(&buf->base[j]); - buf->data[j]= NULL; + buf->data[j] = NULL; } } av_freep(&avci->buffer); - avci->buffer_count=0; + avci->buffer_count = 0; } static void audio_free_buffers(AVCodecContext *avctx) @@ -1716,7 +1745,7 @@ void avcodec_default_free_buffers(AVCodecContext *avctx) int av_get_exact_bits_per_sample(enum AVCodecID codec_id) { - switch(codec_id){ + switch (codec_id) { case AV_CODEC_ID_ADPCM_CT: case AV_CODEC_ID_ADPCM_IMA_APC: case AV_CODEC_ID_ADPCM_IMA_EA_SEAD: @@ -1923,16 +1952,18 @@ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) } #if !HAVE_THREADS -int ff_thread_init(AVCodecContext *s){ +int ff_thread_init(AVCodecContext *s) +{ return -1; } + #endif unsigned int av_xiphlacing(unsigned char *s, unsigned int v) { unsigned int n = 0; - while(v >= 0xff) { + while (v >= 0xff) { *s++ = 0xff; v -= 0xff; n++; @@ -1942,9 +1973,10 @@ unsigned int av_xiphlacing(unsigned char *s, unsigned int v) return n; } -int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b){ +int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b) +{ int i; - for(i=0; iid == codec_id + while ((hwaccel = av_hwaccel_next(hwaccel))) + if (hwaccel->id == codec_id && hwaccel->pix_fmt == pix_fmt) return hwaccel; - } return NULL; } @@ -2041,10 +2072,10 @@ int avpriv_unlock_avformat(void) unsigned int avpriv_toupper4(unsigned int x) { - return toupper( x &0xFF) - + (toupper((x>>8 )&0xFF)<<8 ) - + (toupper((x>>16)&0xFF)<<16) - + (toupper((x>>24)&0xFF)<<24); + return toupper(x & 0xFF) + + (toupper((x >> 8) & 0xFF) << 8) + + (toupper((x >> 16) & 0xFF) << 16) + + (toupper((x >> 24) & 0xFF) << 24); } #if !HAVE_THREADS From 9888ffb1ce5e0a17f711b01933d504c72ea29d3b Mon Sep 17 00:00:00 2001 From: Luca Barbato Date: Sun, 16 Sep 2012 20:32:12 +0200 Subject: [PATCH 5/9] mov: check for EOF in long lasting loops A quite widespread pattern in the demuxer is read a 32bit unsigned integer and then loop till this value is reached. Checking for EOF prevents pathological situations. --- libavformat/mov.c | 72 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 56 insertions(+), 16 deletions(-) diff --git a/libavformat/mov.c b/libavformat/mov.c index 87c890ebfc..56a075e0b0 100644 --- a/libavformat/mov.c +++ b/libavformat/mov.c @@ -1035,14 +1035,19 @@ static int mov_read_stco(MOVContext *c, AVIOContext *pb, MOVAtom atom) sc->chunk_count = entries; if (atom.type == MKTAG('s','t','c','o')) - for (i=0; ieof_reached; i++) sc->chunk_offsets[i] = avio_rb32(pb); else if (atom.type == MKTAG('c','o','6','4')) - for (i=0; ieof_reached; i++) sc->chunk_offsets[i] = avio_rb64(pb); else return AVERROR_INVALIDDATA; + sc->chunk_count = i; + + if (pb->eof_reached) + return AVERROR_EOF; + return 0; } @@ -1092,7 +1097,9 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; - for (pseudo_stream_id=0; pseudo_stream_ideof_reached; + pseudo_stream_id++) { //Parsing Sample description table enum AVCodecID id; int dref_id = 1; @@ -1361,6 +1368,9 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) avio_skip(pb, a.size); } + if (pb->eof_reached) + return AVERROR_EOF; + if (st->codec->codec_type==AVMEDIA_TYPE_AUDIO && st->codec->sample_rate==0 && sc->time_scale>1) st->codec->sample_rate= sc->time_scale; @@ -1457,13 +1467,18 @@ static int mov_read_stsc(MOVContext *c, AVIOContext *pb, MOVAtom atom) sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data)); if (!sc->stsc_data) return AVERROR(ENOMEM); - sc->stsc_count = entries; - for (i=0; ieof_reached; i++) { sc->stsc_data[i].first = avio_rb32(pb); sc->stsc_data[i].count = avio_rb32(pb); sc->stsc_data[i].id = avio_rb32(pb); } + + sc->stsc_count = i; + + if (pb->eof_reached) + return AVERROR_EOF; + return 0; } @@ -1486,13 +1501,17 @@ static int mov_read_stps(MOVContext *c, AVIOContext *pb, MOVAtom atom) sc->stps_data = av_malloc(entries * sizeof(*sc->stps_data)); if (!sc->stps_data) return AVERROR(ENOMEM); - sc->stps_count = entries; - for (i = 0; i < entries; i++) { + for (i = 0; i < entries && !pb->eof_reached; i++) { sc->stps_data[i] = avio_rb32(pb); //av_dlog(c->fc, "stps %d\n", sc->stps_data[i]); } + sc->stps_count = i; + + if (pb->eof_reached) + return AVERROR_EOF; + return 0; } @@ -1524,12 +1543,17 @@ static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom) sc->keyframes = av_malloc(entries * sizeof(int)); if (!sc->keyframes) return AVERROR(ENOMEM); - sc->keyframe_count = entries; - for (i=0; ieof_reached; i++) { sc->keyframes[i] = avio_rb32(pb); //av_dlog(c->fc, "keyframes[]=%d\n", sc->keyframes[i]); } + + sc->keyframe_count = i; + + if (pb->eof_reached) + return AVERROR_EOF; + return 0; } @@ -1596,11 +1620,16 @@ static int mov_read_stsz(MOVContext *c, AVIOContext *pb, MOVAtom atom) init_get_bits(&gb, buf, 8*num_bytes); - for (i = 0; i < entries; i++) { + for (i = 0; i < entries && !pb->eof_reached; i++) { sc->sample_sizes[i] = get_bits_long(&gb, field_size); sc->data_size += sc->sample_sizes[i]; } + sc->sample_count = i; + + if (pb->eof_reached) + return AVERROR_EOF; + av_free(buf); return 0; } @@ -1634,9 +1663,7 @@ static int mov_read_stts(MOVContext *c, AVIOContext *pb, MOVAtom atom) if (!sc->stts_data) return AVERROR(ENOMEM); - sc->stts_count = entries; - - for (i=0; ieof_reached; i++) { int sample_duration; int sample_count; @@ -1652,6 +1679,11 @@ static int mov_read_stts(MOVContext *c, AVIOContext *pb, MOVAtom atom) total_sample_count+=sample_count; } + sc->stts_count = i; + + if (pb->eof_reached) + return AVERROR_EOF; + st->nb_frames= total_sample_count; if (duration) st->duration= duration; @@ -1683,9 +1715,8 @@ static int mov_read_ctts(MOVContext *c, AVIOContext *pb, MOVAtom atom) sc->ctts_data = av_malloc(entries * sizeof(*sc->ctts_data)); if (!sc->ctts_data) return AVERROR(ENOMEM); - sc->ctts_count = entries; - for (i=0; ieof_reached; i++) { int count =avio_rb32(pb); int duration =avio_rb32(pb); @@ -1695,6 +1726,11 @@ static int mov_read_ctts(MOVContext *c, AVIOContext *pb, MOVAtom atom) sc->dts_shift = FFMAX(sc->dts_shift, -duration); } + sc->ctts_count = i; + + if (pb->eof_reached) + return AVERROR_EOF; + av_dlog(c->fc, "dts shift %d\n", sc->dts_shift); return 0; @@ -2252,7 +2288,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom) offset = frag->base_data_offset + data_offset; distance = 0; av_dlog(c->fc, "first sample flags 0x%x\n", first_sample_flags); - for (i = 0; i < entries; i++) { + for (i = 0; i < entries && !pb->eof_reached; i++) { unsigned sample_size = frag->size; int sample_flags = i ? frag->flags : first_sample_flags; unsigned sample_duration = frag->duration; @@ -2283,6 +2319,10 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom) offset += sample_size; sc->data_size += sample_size; } + + if (pb->eof_reached) + return AVERROR_EOF; + frag->moof_offset = offset; st->duration = sc->track_end = dts + sc->time_offset; return 0; From 84cc314e40dc0a8844a322e54c2827d247f3cc7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Storsj=C3=B6?= Date: Tue, 18 Sep 2012 11:31:44 +0300 Subject: [PATCH 6/9] smoothstreaming: Export the mp4 codec tags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes stream copy from a format that already has incompatible codec tags set. The chained ismv muxer exports this same codec tag list, so set it on this one as well, to allow the caller (and lavf common code) to set them correctly. Signed-off-by: Martin Storsjö --- libavformat/Makefile | 2 +- libavformat/smoothstreamingenc.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/libavformat/Makefile b/libavformat/Makefile index ae168b6ff4..53cc675324 100644 --- a/libavformat/Makefile +++ b/libavformat/Makefile @@ -292,7 +292,7 @@ OBJS-$(CONFIG_SIFF_DEMUXER) += siff.o OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o OBJS-$(CONFIG_SMJPEG_DEMUXER) += smjpegdec.o smjpeg.o OBJS-$(CONFIG_SMJPEG_MUXER) += smjpegenc.o smjpeg.o -OBJS-$(CONFIG_SMOOTHSTREAMING_MUXER) += smoothstreamingenc.o +OBJS-$(CONFIG_SMOOTHSTREAMING_MUXER) += smoothstreamingenc.o isom.o OBJS-$(CONFIG_SOL_DEMUXER) += sol.o pcm.o OBJS-$(CONFIG_SOX_DEMUXER) += soxdec.o pcm.o OBJS-$(CONFIG_SOX_MUXER) += soxenc.o diff --git a/libavformat/smoothstreamingenc.c b/libavformat/smoothstreamingenc.c index 5ddba7eb06..08e6526533 100644 --- a/libavformat/smoothstreamingenc.c +++ b/libavformat/smoothstreamingenc.c @@ -30,6 +30,7 @@ #include "os_support.h" #include "avc.h" #include "url.h" +#include "isom.h" #include "libavutil/opt.h" #include "libavutil/avstring.h" @@ -617,5 +618,6 @@ AVOutputFormat ff_smoothstreaming_muxer = { .write_header = ism_write_header, .write_packet = ism_write_packet, .write_trailer = ism_write_trailer, + .codec_tag = (const AVCodecTag* const []){ ff_mp4_obj_type, 0 }, .priv_class = &ism_class, }; From e772f9faec0747715462d801e47058a31133d548 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Storsj=C3=B6?= Date: Tue, 18 Sep 2012 11:34:16 +0300 Subject: [PATCH 7/9] configure: Make the smoothstreaming muxer enable the ismv muxer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This doesn't fix any build failure, but the smoothstreaming muxer opens a chained ismv muxer. Signed-off-by: Martin Storsjö --- configure | 1 + 1 file changed, 1 insertion(+) diff --git a/configure b/configure index 8a59262299..74dbb6cdcc 100755 --- a/configure +++ b/configure @@ -1618,6 +1618,7 @@ rtsp_muxer_select="rtp_muxer http_protocol rtp_protocol" sap_demuxer_select="sdp_demuxer" sap_muxer_select="rtp_muxer rtp_protocol" sdp_demuxer_select="rtpdec" +smoothstreaming_muxer_select="ismv_muxer" spdif_muxer_select="aac_parser" tg2_muxer_select="mov_muxer" tgp_muxer_select="mov_muxer" From aeeb782c2ab6be783f7658dfccf11fa89b33566c Mon Sep 17 00:00:00 2001 From: Mans Rullgard Date: Sun, 16 Sep 2012 22:06:53 +0100 Subject: [PATCH 8/9] configure: add --toolchain option This allows creating canned shorthands for common combinations of cc, ld etc. Signed-off-by: Mans Rullgard --- configure | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/configure b/configure index 74dbb6cdcc..2d400c0ced 100755 --- a/configure +++ b/configure @@ -205,6 +205,7 @@ Advanced options (experts only): --target-os=OS compiler targets OS [$target_os] --target-exec=CMD command to run executables on target --target-path=DIR path to view of build directory on target + --toolchain=NAME set tool defaults according to NAME --nm=NM use nm tool --ar=AR use archive tool AR [$ar_default] --as=AS use assembler AS [$as_default] @@ -1327,6 +1328,7 @@ CMDLINE_SET=" target_exec target_os target_path + toolchain " CMDLINE_APPEND=" @@ -2038,6 +2040,17 @@ ranlib="${cross_prefix}${ranlib}" sysinclude_default="${sysroot}/usr/include" +case "$toolchain" in + msvc) + cc_default="c99wrap cl" + ld_default="c99wrap link" + nm_default="dumpbin -symbols" + ;; + ?*) + die "Unknown toolchain $toolchain" + ;; +esac + set_default cc pkg_config sysinclude enabled cross_compile || host_cc_default=$cc set_default host_cc From 1b3439b3055b083df51d7f7838ecc6b3f708b15c Mon Sep 17 00:00:00 2001 From: Janne Grunau Date: Wed, 5 Sep 2012 16:34:05 +0200 Subject: [PATCH 9/9] mpegvideo: move frame size dependent memory management to separate functions This is a preparation for supporting frame size changes during frame-based multithreading. --- libavcodec/mpegvideo.c | 377 ++++++++++++++++++++++------------------- 1 file changed, 204 insertions(+), 173 deletions(-) diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index f51184f9f6..a1e59af73d 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -653,13 +653,168 @@ void ff_MPV_decode_defaults(MpegEncContext *s) ff_MPV_common_defaults(s); } +/** + * Initialize and allocates MpegEncContext fields dependent on the resolution. + */ +static int init_context_frame(MpegEncContext *s) +{ + int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y; + + s->mb_width = (s->width + 15) / 16; + s->mb_stride = s->mb_width + 1; + s->b8_stride = s->mb_width * 2 + 1; + s->b4_stride = s->mb_width * 4 + 1; + mb_array_size = s->mb_height * s->mb_stride; + mv_table_size = (s->mb_height + 2) * s->mb_stride + 1; + + /* set default edge pos, will be overriden + * in decode_header if needed */ + s->h_edge_pos = s->mb_width * 16; + s->v_edge_pos = s->mb_height * 16; + + s->mb_num = s->mb_width * s->mb_height; + + s->block_wrap[0] = + s->block_wrap[1] = + s->block_wrap[2] = + s->block_wrap[3] = s->b8_stride; + s->block_wrap[4] = + s->block_wrap[5] = s->mb_stride; + + y_size = s->b8_stride * (2 * s->mb_height + 1); + c_size = s->mb_stride * (s->mb_height + 1); + yc_size = y_size + 2 * c_size; + + FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), + fail); // error ressilience code looks cleaner with this + for (y = 0; y < s->mb_height; y++) + for (x = 0; x < s->mb_width; x++) + s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride; + + s->mb_index2xy[s->mb_height * s->mb_width] = + (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed? + + if (s->encoding) { + /* Allocate MV tables */ + FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, + mv_table_size * 2 * sizeof(int16_t), fail); + FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, + mv_table_size * 2 * sizeof(int16_t), fail); + FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, + mv_table_size * 2 * sizeof(int16_t), fail); + FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, + mv_table_size * 2 * sizeof(int16_t), fail); + FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, + mv_table_size * 2 * sizeof(int16_t), fail); + FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, + mv_table_size * 2 * sizeof(int16_t), fail); + s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1; + s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1; + s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1; + s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + + s->mb_stride + 1; + s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + + s->mb_stride + 1; + s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1; + + /* Allocate MB type table */ + FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * + sizeof(uint16_t), fail); // needed for encoding + + FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * + sizeof(int), fail); + + FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab, + mb_array_size * sizeof(float), fail); + FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab, + mb_array_size * sizeof(float), fail); + + } + + FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer, + mb_array_size * sizeof(uint8_t), fail); + FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, + mb_array_size * sizeof(uint8_t), fail); + + if (s->codec_id == AV_CODEC_ID_MPEG4 || + (s->flags & CODEC_FLAG_INTERLACED_ME)) { + /* interlaced direct mode decoding tables */ + for (i = 0; i < 2; i++) { + int j, k; + for (j = 0; j < 2; j++) { + for (k = 0; k < 2; k++) { + FF_ALLOCZ_OR_GOTO(s->avctx, + s->b_field_mv_table_base[i][j][k], + mv_table_size * 2 * sizeof(int16_t), + fail); + s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + + s->mb_stride + 1; + } + FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], + mb_array_size * 2 * sizeof(uint8_t), fail); + FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], + mv_table_size * 2 * sizeof(int16_t), fail); + s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + + s->mb_stride + 1; + } + FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], + mb_array_size * 2 * sizeof(uint8_t), fail); + } + } + if (s->out_format == FMT_H263) { + /* cbp values */ + FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail); + s->coded_block = s->coded_block_base + s->b8_stride + 1; + + /* cbp, ac_pred, pred_dir */ + FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table, + mb_array_size * sizeof(uint8_t), fail); + FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, + mb_array_size * sizeof(uint8_t), fail); + } + + if (s->h263_pred || s->h263_plus || !s->encoding) { + /* dc values */ + // MN: we need these for error resilience of intra-frames + FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, + yc_size * sizeof(int16_t), fail); + s->dc_val[0] = s->dc_val_base + s->b8_stride + 1; + s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1; + s->dc_val[2] = s->dc_val[1] + c_size; + for (i = 0; i < yc_size; i++) + s->dc_val_base[i] = 1024; + } + + /* which mb is a intra block */ + FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail); + memset(s->mbintra_table, 1, mb_array_size); + + /* init macroblock skip table */ + FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail); + // Note the + 1 is for a quicker mpeg4 slice_end detection + + if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || + s->avctx->debug_mv) { + s->visualization_buffer[0] = av_malloc((s->mb_width * 16 + + 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH); + s->visualization_buffer[1] = av_malloc((s->mb_width * 16 + + 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH); + s->visualization_buffer[2] = av_malloc((s->mb_width * 16 + + 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH); + } + + return 0; +fail: + return AVERROR(ENOMEM); +} + /** * init common structure for both encoder and decoder. * this assumes that some variables like width/height are already set */ av_cold int ff_MPV_common_init(MpegEncContext *s) { - int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y; + int i, err; int nb_slices = (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_SLICE) ? s->avctx->thread_count : 1; @@ -699,35 +854,10 @@ av_cold int ff_MPV_common_init(MpegEncContext *s) s->flags2 = s->avctx->flags2; if (s->width && s->height) { - s->mb_width = (s->width + 15) / 16; - s->mb_stride = s->mb_width + 1; - s->b8_stride = s->mb_width * 2 + 1; - s->b4_stride = s->mb_width * 4 + 1; - mb_array_size = s->mb_height * s->mb_stride; - mv_table_size = (s->mb_height + 2) * s->mb_stride + 1; - /* set chroma shifts */ avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); - /* set default edge pos, will be overriden - * in decode_header if needed */ - s->h_edge_pos = s->mb_width * 16; - s->v_edge_pos = s->mb_height * 16; - - s->mb_num = s->mb_width * s->mb_height; - - s->block_wrap[0] = - s->block_wrap[1] = - s->block_wrap[2] = - s->block_wrap[3] = s->b8_stride; - s->block_wrap[4] = - s->block_wrap[5] = s->mb_stride; - - y_size = s->b8_stride * (2 * s->mb_height + 1); - c_size = s->mb_stride * (s->mb_height + 1); - yc_size = y_size + 2 * c_size; - /* convert fourcc to upper case */ s->codec_tag = avpriv_toupper4(s->avctx->codec_tag); @@ -735,42 +865,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s) s->avctx->coded_frame = &s->current_picture.f; - FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), - fail); // error ressilience code looks cleaner with this - for (y = 0; y < s->mb_height; y++) - for (x = 0; x < s->mb_width; x++) - s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride; - - s->mb_index2xy[s->mb_height * s->mb_width] = - (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed? - if (s->encoding) { - /* Allocate MV tables */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, - mv_table_size * 2 * sizeof(int16_t), fail); - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, - mv_table_size * 2 * sizeof(int16_t), fail); - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, - mv_table_size * 2 * sizeof(int16_t), fail); - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, - mv_table_size * 2 * sizeof(int16_t), fail); - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, - mv_table_size * 2 * sizeof(int16_t), fail); - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, - mv_table_size * 2 * sizeof(int16_t), fail); - s->p_mv_table = s->p_mv_table_base + - s->mb_stride + 1; - s->b_forw_mv_table = s->b_forw_mv_table_base + - s->mb_stride + 1; - s->b_back_mv_table = s->b_back_mv_table_base + - s->mb_stride + 1; - s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + - s->mb_stride + 1; - s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + - s->mb_stride + 1; - s->b_direct_mv_table = s->b_direct_mv_table_base + - s->mb_stride + 1; - if (s->msmpeg4_version) { FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2 * 2 * (MAX_LEVEL + 1) * @@ -778,13 +873,6 @@ av_cold int ff_MPV_common_init(MpegEncContext *s) } FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail); - /* Allocate MB type table */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * - sizeof(uint16_t), fail); // needed for encoding - - FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * - sizeof(int), fail); - FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, @@ -802,11 +890,6 @@ av_cold int ff_MPV_common_init(MpegEncContext *s) FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail); } - - FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab, - mb_array_size * sizeof(float), fail); - FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab, - mb_array_size * sizeof(float), fail); } } @@ -818,81 +901,10 @@ av_cold int ff_MPV_common_init(MpegEncContext *s) } if (s->width && s->height) { - FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer, - mb_array_size * sizeof(uint8_t), fail); - FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, - mb_array_size * sizeof(uint8_t), fail); - - if (s->codec_id == AV_CODEC_ID_MPEG4 || - (s->flags & CODEC_FLAG_INTERLACED_ME)) { - /* interlaced direct mode decoding tables */ - for (i = 0; i < 2; i++) { - int j, k; - for (j = 0; j < 2; j++) { - for (k = 0; k < 2; k++) { - FF_ALLOCZ_OR_GOTO(s->avctx, - s->b_field_mv_table_base[i][j][k], - mv_table_size * 2 * sizeof(int16_t), - fail); - s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + - s->mb_stride + 1; - } - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], - mb_array_size * 2 * sizeof(uint8_t), - fail); - FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], - mv_table_size * 2 * sizeof(int16_t), - fail); - s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] - + s->mb_stride + 1; - } - FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], - mb_array_size * 2 * sizeof(uint8_t), - fail); - } - } - if (s->out_format == FMT_H263) { - /* cbp values */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail); - s->coded_block = s->coded_block_base + s->b8_stride + 1; - - /* cbp, ac_pred, pred_dir */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table, - mb_array_size * sizeof(uint8_t), fail); - FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, - mb_array_size * sizeof(uint8_t), fail); - } - - if (s->h263_pred || s->h263_plus || !s->encoding) { - /* dc values */ - // MN: we need these for error resilience of intra-frames - FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, - yc_size * sizeof(int16_t), fail); - s->dc_val[0] = s->dc_val_base + s->b8_stride + 1; - s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1; - s->dc_val[2] = s->dc_val[1] + c_size; - for (i = 0; i < yc_size; i++) - s->dc_val_base[i] = 1024; - } - - /* which mb is a intra block */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail); - memset(s->mbintra_table, 1, mb_array_size); - - /* init macroblock skip table */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail); - // Note the + 1 is for a quicker mpeg4 slice_end detection + if ((err = init_context_frame(s))) + goto fail; s->parse_context.state = -1; - if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || - s->avctx->debug_mv) { - s->visualization_buffer[0] = av_malloc((s->mb_width * 16 + - 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH); - s->visualization_buffer[1] = av_malloc((s->mb_width * 16 + - 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH); - s->visualization_buffer[2] = av_malloc((s->mb_width * 16 + - 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH); - } } s->context_initialized = 1; @@ -928,24 +940,15 @@ av_cold int ff_MPV_common_init(MpegEncContext *s) return -1; } -/* init common structure for both encoder and decoder */ -void ff_MPV_common_end(MpegEncContext *s) +/** + * Frees and resets MpegEncContext fields depending on the resolution. + * Is used during resolution changes to avoid a full reinitialization of the + * codec. + */ +static int free_context_frame(MpegEncContext *s) { int i, j, k; - if (s->slice_context_count > 1) { - for (i = 0; i < s->slice_context_count; i++) { - free_duplicate_context(s->thread_context[i]); - } - for (i = 1; i < s->slice_context_count; i++) { - av_freep(&s->thread_context[i]); - } - s->slice_context_count = 1; - } else free_duplicate_context(s); - - av_freep(&s->parse_context.buffer); - s->parse_context.buffer_size = 0; - av_freep(&s->mb_type); av_freep(&s->p_mv_table_base); av_freep(&s->b_forw_mv_table_base); @@ -979,15 +982,49 @@ void ff_MPV_common_end(MpegEncContext *s) av_freep(&s->pred_dir_table); av_freep(&s->mbskip_table); + + av_freep(&s->error_status_table); + av_freep(&s->er_temp_buffer); + av_freep(&s->mb_index2xy); + av_freep(&s->lambda_table); + av_freep(&s->cplx_tab); + av_freep(&s->bits_tab); + + s->linesize = s->uvlinesize = 0; + + for (i = 0; i < 3; i++) + av_freep(&s->visualization_buffer[i]); + + if (!(s->avctx->active_thread_type & FF_THREAD_FRAME)) + avcodec_default_free_buffers(s->avctx); + + return 0; +} + +/* init common structure for both encoder and decoder */ +void ff_MPV_common_end(MpegEncContext *s) +{ + int i; + + if (s->slice_context_count > 1) { + for (i = 0; i < s->slice_context_count; i++) { + free_duplicate_context(s->thread_context[i]); + } + for (i = 1; i < s->slice_context_count; i++) { + av_freep(&s->thread_context[i]); + } + s->slice_context_count = 1; + } else free_duplicate_context(s); + + av_freep(&s->parse_context.buffer); + s->parse_context.buffer_size = 0; + av_freep(&s->bitstream_buffer); s->allocated_bitstream_buffer_size = 0; av_freep(&s->avctx->stats_out); av_freep(&s->ac_stats); - av_freep(&s->error_status_table); - av_freep(&s->er_temp_buffer); - av_freep(&s->mb_index2xy); - av_freep(&s->lambda_table); + av_freep(&s->q_intra_matrix); av_freep(&s->q_inter_matrix); av_freep(&s->q_intra_matrix16); @@ -995,8 +1032,8 @@ void ff_MPV_common_end(MpegEncContext *s) av_freep(&s->input_picture); av_freep(&s->reordered_input_picture); av_freep(&s->dct_offset); - av_freep(&s->cplx_tab); - av_freep(&s->bits_tab); + + free_context_frame(s); if (s->picture && !s->avctx->internal->is_copy) { for (i = 0; i < s->picture_count; i++) { @@ -1009,12 +1046,6 @@ void ff_MPV_common_end(MpegEncContext *s) s->next_picture_ptr = s->current_picture_ptr = NULL; s->linesize = s->uvlinesize = 0; - - for (i = 0; i < 3; i++) - av_freep(&s->visualization_buffer[i]); - - if (!(s->avctx->active_thread_type & FF_THREAD_FRAME)) - avcodec_default_free_buffers(s->avctx); } void ff_init_rl(RLTable *rl,