lavc decoders: work with refcounted frames.

This commit is contained in:
Anton Khirnov 2012-11-21 21:34:46 +01:00
parent 6e7b50b427
commit 759001c534
239 changed files with 3904 additions and 4335 deletions

View File

@ -57,6 +57,11 @@ which re-allocates them for other threads.
Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
speed gain at this point but it should work. speed gain at this point but it should work.
If there are inter-frame dependencies, so the codec calls
ff_thread_report/await_progress(), set AVCodecInternal.allocate_progress. The
frames must then be freed with ff_thread_release_buffer().
Otherwise leave it at zero and decode directly into the user-supplied frames.
Call ff_thread_report_progress() after some part of the current picture has decoded. Call ff_thread_report_progress() after some part of the current picture has decoded.
A good place to put this is where draw_horiz_band() is called - add this if it isn't A good place to put this is where draw_horiz_band() is called - add this if it isn't
called anywhere, as it's useful too and the implementation is trivial when you're called anywhere, as it's useful too and the implementation is trivial when you're

View File

@ -24,6 +24,7 @@
* 4XM codec. * 4XM codec.
*/ */
#include "libavutil/frame.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
@ -131,7 +132,7 @@ typedef struct CFrameBuffer {
typedef struct FourXContext { typedef struct FourXContext {
AVCodecContext *avctx; AVCodecContext *avctx;
DSPContext dsp; DSPContext dsp;
AVFrame *current_picture, *last_picture; AVFrame *last_picture;
GetBitContext pre_gb; ///< ac/dc prefix GetBitContext pre_gb; ///< ac/dc prefix
GetBitContext gb; GetBitContext gb;
GetByteContext g; GetByteContext g;
@ -256,15 +257,15 @@ static av_cold void init_vlcs(FourXContext *f)
} }
} }
static void init_mv(FourXContext *f) static void init_mv(FourXContext *f, int linesize)
{ {
int i; int i;
for (i = 0; i < 256; i++) { for (i = 0; i < 256; i++) {
if (f->version > 1) if (f->version > 1)
f->mv[i] = mv[i][0] + mv[i][1] * f->current_picture->linesize[0] / 2; f->mv[i] = mv[i][0] + mv[i][1] * linesize / 2;
else else
f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * f->current_picture->linesize[0] / 2; f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * linesize / 2;
} }
} }
@ -385,14 +386,15 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
} }
} }
static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length) static int decode_p_frame(FourXContext *f, AVFrame *frame,
const uint8_t *buf, int length)
{ {
int x, y; int x, y;
const int width = f->avctx->width; const int width = f->avctx->width;
const int height = f->avctx->height; const int height = f->avctx->height;
uint16_t *src = (uint16_t *)f->last_picture->data[0]; uint16_t *src = (uint16_t *)f->last_picture->data[0];
uint16_t *dst = (uint16_t *)f->current_picture->data[0]; uint16_t *dst = (uint16_t *)frame->data[0];
const int stride = f->current_picture->linesize[0] >> 1; const int stride = frame->linesize[0] >> 1;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra, unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
bytestream_offset, wordstream_offset; bytestream_offset, wordstream_offset;
@ -435,7 +437,7 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
bytestream2_init(&f->g, buf + bytestream_offset, bytestream2_init(&f->g, buf + bytestream_offset,
length - bytestream_offset); length - bytestream_offset);
init_mv(f); init_mv(f, frame->linesize[0]);
for (y = 0; y < height; y += 8) { for (y = 0; y < height; y += 8) {
for (x = 0; x < width; x += 8) for (x = 0; x < width; x += 8)
@ -494,12 +496,12 @@ static int decode_i_block(FourXContext *f, int16_t *block)
return 0; return 0;
} }
static inline void idct_put(FourXContext *f, int x, int y) static inline void idct_put(FourXContext *f, AVFrame *frame, int x, int y)
{ {
int16_t (*block)[64] = f->block; int16_t (*block)[64] = f->block;
int stride = f->current_picture->linesize[0] >> 1; int stride = frame->linesize[0] >> 1;
int i; int i;
uint16_t *dst = ((uint16_t*)f->current_picture->data[0]) + y * stride + x; uint16_t *dst = ((uint16_t*)frame->data[0]) + y * stride + x;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
block[i][0] += 0x80 * 8 * 8; block[i][0] += 0x80 * 8 * 8;
@ -647,14 +649,14 @@ static int mix(int c0, int c1)
return red / 3 * 1024 + green / 3 * 32 + blue / 3; return red / 3 * 1024 + green / 3 * 32 + blue / 3;
} }
static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length) static int decode_i2_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
{ {
int x, y, x2, y2; int x, y, x2, y2;
const int width = f->avctx->width; const int width = f->avctx->width;
const int height = f->avctx->height; const int height = f->avctx->height;
const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4); const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4);
uint16_t *dst = (uint16_t*)f->current_picture->data[0]; uint16_t *dst = (uint16_t*)frame->data[0];
const int stride = f->current_picture->linesize[0]>>1; const int stride = frame->linesize[0]>>1;
GetByteContext g3; GetByteContext g3;
if (length < mbs * 8) { if (length < mbs * 8) {
@ -693,7 +695,7 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
return 0; return 0;
} }
static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length) static int decode_i_frame(FourXContext *f, AVFrame *frame, const uint8_t *buf, int length)
{ {
int x, y, ret; int x, y, ret;
const int width = f->avctx->width; const int width = f->avctx->width;
@ -747,7 +749,7 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
if ((ret = decode_i_mb(f)) < 0) if ((ret = decode_i_mb(f)) < 0)
return ret; return ret;
idct_put(f, x, y); idct_put(f, frame, x, y);
} }
} }
@ -764,7 +766,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
int buf_size = avpkt->size; int buf_size = avpkt->size;
FourXContext *const f = avctx->priv_data; FourXContext *const f = avctx->priv_data;
AVFrame *picture = data; AVFrame *picture = data;
AVFrame *p;
int i, frame_4cc, frame_size, ret; int i, frame_4cc, frame_size, ret;
frame_4cc = AV_RL32(buf); frame_4cc = AV_RL32(buf);
@ -825,43 +826,34 @@ static int decode_frame(AVCodecContext *avctx, void *data,
frame_size = buf_size - 12; frame_size = buf_size - 12;
} }
FFSWAP(AVFrame*, f->current_picture, f->last_picture);
p = f->current_picture;
avctx->coded_frame = p;
// alternatively we would have to use our own buffer management // alternatively we would have to use our own buffer management
avctx->flags |= CODEC_FLAG_EMU_EDGE; avctx->flags |= CODEC_FLAG_EMU_EDGE;
if (p->data[0]) if ((ret = ff_get_buffer(avctx, picture, AV_GET_BUFFER_FLAG_REF)) < 0) {
avctx->release_buffer(avctx, p);
p->reference = 1;
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
if (frame_4cc == AV_RL32("ifr2")) { if (frame_4cc == AV_RL32("ifr2")) {
p->pict_type = AV_PICTURE_TYPE_I; picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i2_frame(f, buf - 4, frame_size + 4)) < 0) if ((ret = decode_i2_frame(f, picture, buf - 4, frame_size + 4)) < 0)
return ret; return ret;
} else if (frame_4cc == AV_RL32("ifrm")) { } else if (frame_4cc == AV_RL32("ifrm")) {
p->pict_type = AV_PICTURE_TYPE_I; picture->pict_type = AV_PICTURE_TYPE_I;
if ((ret = decode_i_frame(f, buf, frame_size)) < 0) if ((ret = decode_i_frame(f, picture, buf, frame_size)) < 0)
return ret; return ret;
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) { } else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
if (!f->last_picture->data[0]) { if (!f->last_picture->data[0]) {
f->last_picture->reference = 1; if ((ret = ff_get_buffer(avctx, f->last_picture,
if ((ret = ff_get_buffer(avctx, f->last_picture)) < 0) { AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
memset(f->last_picture->data[0], 0, avctx->height * FFABS(f->last_picture->linesize[0])); memset(f->last_picture->data[0], 0, avctx->height * FFABS(f->last_picture->linesize[0]));
} }
p->pict_type = AV_PICTURE_TYPE_P; picture->pict_type = AV_PICTURE_TYPE_P;
if ((ret = decode_p_frame(f, buf, frame_size)) < 0) if ((ret = decode_p_frame(f, picture, buf, frame_size)) < 0)
return ret; return ret;
} else if (frame_4cc == AV_RL32("snd_")) { } else if (frame_4cc == AV_RL32("snd_")) {
av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n", av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n",
@ -871,9 +863,11 @@ static int decode_frame(AVCodecContext *avctx, void *data,
buf_size); buf_size);
} }
p->key_frame = p->pict_type == AV_PICTURE_TYPE_I; picture->key_frame = picture->pict_type == AV_PICTURE_TYPE_I;
*picture = *p; av_frame_unref(f->last_picture);
if ((ret = av_frame_ref(f->last_picture, picture)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
emms_c(); emms_c();
@ -900,13 +894,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
else else
avctx->pix_fmt = AV_PIX_FMT_BGR555; avctx->pix_fmt = AV_PIX_FMT_BGR555;
f->current_picture = avcodec_alloc_frame(); f->last_picture = av_frame_alloc();
f->last_picture = avcodec_alloc_frame(); if (!f->last_picture)
if (!f->current_picture || !f->last_picture) {
avcodec_free_frame(&f->current_picture);
avcodec_free_frame(&f->last_picture);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
}
return 0; return 0;
} }
@ -924,12 +914,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
f->cfrm[i].allocated_size = 0; f->cfrm[i].allocated_size = 0;
} }
ff_free_vlc(&f->pre_vlc); ff_free_vlc(&f->pre_vlc);
if (f->current_picture->data[0]) av_frame_free(&f->last_picture);
avctx->release_buffer(avctx, f->current_picture);
if (f->last_picture->data[0])
avctx->release_buffer(avctx, f->last_picture);
avcodec_free_frame(&f->current_picture);
avcodec_free_frame(&f->last_picture);
return 0; return 0;
} }

View File

@ -46,7 +46,6 @@ static const enum AVPixelFormat pixfmt_rgb24[] = {
typedef struct EightBpsContext { typedef struct EightBpsContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame pic;
unsigned char planes; unsigned char planes;
unsigned char planemap[4]; unsigned char planemap[4];
@ -57,6 +56,7 @@ typedef struct EightBpsContext {
static int decode_frame(AVCodecContext *avctx, void *data, static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt) int *got_frame, AVPacket *avpkt)
{ {
AVFrame *frame = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
EightBpsContext * const c = avctx->priv_data; EightBpsContext * const c = avctx->priv_data;
@ -71,12 +71,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
unsigned char *planemap = c->planemap; unsigned char *planemap = c->planemap;
int ret; int ret;
if (c->pic.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 0;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -96,8 +91,8 @@ static int decode_frame(AVCodecContext *avctx, void *data,
/* Decode a plane */ /* Decode a plane */
for (row = 0; row < height; row++) { for (row = 0; row < height; row++) {
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p]; pixptr = frame->data[0] + row * frame->linesize[0] + planemap[p];
pixptr_end = pixptr + c->pic.linesize[0]; pixptr_end = pixptr + frame->linesize[0];
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2)); dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
/* Decode a row of this plane */ /* Decode a row of this plane */
while (dlen > 0) { while (dlen > 0) {
@ -134,15 +129,14 @@ static int decode_frame(AVCodecContext *avctx, void *data,
AV_PKT_DATA_PALETTE, AV_PKT_DATA_PALETTE,
NULL); NULL);
if (pal) { if (pal) {
c->pic.palette_has_changed = 1; frame->palette_has_changed = 1;
memcpy(c->pal, pal, AVPALETTE_SIZE); memcpy(c->pal, pal, AVPALETTE_SIZE);
} }
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE); memcpy (frame->data[1], c->pal, AVPALETTE_SIZE);
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */ /* always report that the buffer was completely consumed */
return buf_size; return buf_size;
@ -153,7 +147,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
EightBpsContext * const c = avctx->priv_data; EightBpsContext * const c = avctx->priv_data;
c->avctx = avctx; c->avctx = avctx;
c->pic.data[0] = NULL;
switch (avctx->bits_per_coded_sample) { switch (avctx->bits_per_coded_sample) {
case 8: case 8:
@ -192,23 +185,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0; return 0;
} }
static av_cold int decode_end(AVCodecContext *avctx)
{
EightBpsContext * const c = avctx->priv_data;
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
return 0;
}
AVCodec ff_eightbps_decoder = { AVCodec ff_eightbps_decoder = {
.name = "8bps", .name = "8bps",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_8BPS, .id = AV_CODEC_ID_8BPS,
.priv_data_size = sizeof(EightBpsContext), .priv_data_size = sizeof(EightBpsContext),
.init = decode_init, .init = decode_init,
.close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"), .long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"),

View File

@ -137,7 +137,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = buf_size * (is_compr + 1); frame->nb_samples = buf_size * (is_compr + 1);
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -179,8 +179,9 @@ static int frame_configure_elements(AVCodecContext *avctx)
} }
/* get output buffer */ /* get output buffer */
av_frame_unref(ac->frame);
ac->frame->nb_samples = 2048; ac->frame->nb_samples = 2048;
if ((ret = ff_get_buffer(avctx, ac->frame)) < 0) { if ((ret = ff_get_buffer(avctx, ac->frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -29,12 +29,13 @@
#include <string.h> #include <string.h>
#include "avcodec.h" #include "avcodec.h"
#include "internal.h"
#include "msrledec.h" #include "msrledec.h"
typedef struct AascContext { typedef struct AascContext {
AVCodecContext *avctx; AVCodecContext *avctx;
GetByteContext gb; GetByteContext gb;
AVFrame frame; AVFrame *frame;
} AascContext; } AascContext;
static av_cold int aasc_decode_init(AVCodecContext *avctx) static av_cold int aasc_decode_init(AVCodecContext *avctx)
@ -45,6 +46,10 @@ static av_cold int aasc_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_BGR24; avctx->pix_fmt = AV_PIX_FMT_BGR24;
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
@ -57,9 +62,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
AascContext *s = avctx->priv_data; AascContext *s = avctx->priv_data;
int compr, i, stride, ret; int compr, i, stride, ret;
s->frame.reference = 1; if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -71,13 +74,13 @@ static int aasc_decode_frame(AVCodecContext *avctx,
case 0: case 0:
stride = (avctx->width * 3 + 3) & ~3; stride = (avctx->width * 3 + 3) & ~3;
for (i = avctx->height - 1; i >= 0; i--) { for (i = avctx->height - 1; i >= 0; i--) {
memcpy(s->frame.data[0] + i * s->frame.linesize[0], buf, avctx->width * 3); memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * 3);
buf += stride; buf += stride;
} }
break; break;
case 1: case 1:
bytestream2_init(&s->gb, buf, buf_size); bytestream2_init(&s->gb, buf, buf_size);
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb); ff_msrle_decode(avctx, (AVPicture*)s->frame, 8, &s->gb);
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr); av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
@ -85,7 +88,8 @@ static int aasc_decode_frame(AVCodecContext *avctx,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame; if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
/* report that the buffer was completely consumed */ /* report that the buffer was completely consumed */
return buf_size; return buf_size;
@ -95,9 +99,7 @@ static av_cold int aasc_decode_end(AVCodecContext *avctx)
{ {
AascContext *s = avctx->priv_data; AascContext *s = avctx->priv_data;
/* release the last frame */ av_frame_free(&s->frame);
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
return 0; return 0;
} }

View File

@ -1371,7 +1371,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = s->num_blocks * 256; frame->nb_samples = s->num_blocks * 256;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -609,7 +609,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = nb_samples; frame->nb_samples = nb_samples;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -141,7 +141,7 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = num_blocks * BLOCK_SAMPLES; frame->nb_samples = num_blocks * BLOCK_SAMPLES;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -283,7 +283,7 @@ static int decode_element(AVCodecContext *avctx, AVFrame *frame, int ch_index,
if (!alac->nb_samples) { if (!alac->nb_samples) {
/* get output buffer */ /* get output buffer */
frame->nb_samples = output_samples; frame->nb_samples = output_samples;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -1462,7 +1462,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
/* get output buffer */ /* get output buffer */
frame->nb_samples = ctx->cur_frame_length; frame->nb_samples = ctx->cur_frame_length;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -945,7 +945,7 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = AMR_BLOCK_SIZE; frame->nb_samples = AMR_BLOCK_SIZE;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -1092,7 +1092,7 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = 4 * AMRWB_SFR_SIZE_16k; frame->nb_samples = 4 * AMRWB_SFR_SIZE_16k;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -26,9 +26,10 @@
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
typedef struct AnmContext { typedef struct AnmContext {
AVFrame frame; AVFrame *frame;
int palette[AVPALETTE_COUNT]; int palette[AVPALETTE_COUNT];
GetByteContext gb; GetByteContext gb;
int x; ///< x coordinate position int x; ///< x coordinate position
@ -41,7 +42,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
s->frame.reference = 1; s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size); bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
if (bytestream2_get_bytes_left(&s->gb) < 16 * 8 + 4 * 256) if (bytestream2_get_bytes_left(&s->gb) < 16 * 8 + 4 * 256)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -113,12 +117,12 @@ static int decode_frame(AVCodecContext *avctx,
uint8_t *dst, *dst_end; uint8_t *dst, *dst_end;
int count, ret; int count, ret;
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0){ if ((ret = ff_reget_buffer(avctx, s->frame)) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
dst = s->frame.data[0]; dst = s->frame->data[0];
dst_end = s->frame.data[0] + s->frame.linesize[0]*avctx->height; dst_end = s->frame->data[0] + s->frame->linesize[0]*avctx->height;
bytestream2_init(&s->gb, avpkt->data, buf_size); bytestream2_init(&s->gb, avpkt->data, buf_size);
@ -136,7 +140,7 @@ static int decode_frame(AVCodecContext *avctx,
do { do {
/* if statements are ordered by probability */ /* if statements are ordered by probability */
#define OP(gb, pixel, count) \ #define OP(gb, pixel, count) \
op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame.linesize[0]) op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame->linesize[0])
int type = bytestream2_get_byte(&s->gb); int type = bytestream2_get_byte(&s->gb);
count = type & 0x7F; count = type & 0x7F;
@ -168,18 +172,20 @@ static int decode_frame(AVCodecContext *avctx,
} }
} while (bytestream2_get_bytes_left(&s->gb) > 0); } while (bytestream2_get_bytes_left(&s->gb) > 0);
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame; if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
return buf_size; return buf_size;
} }
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
{ {
AnmContext *s = avctx->priv_data; AnmContext *s = avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame); av_frame_free(&s->frame);
return 0; return 0;
} }

View File

@ -25,6 +25,7 @@
*/ */
#include "libavutil/common.h" #include "libavutil/common.h"
#include "libavutil/frame.h"
#include "libavutil/lfg.h" #include "libavutil/lfg.h"
#include "avcodec.h" #include "avcodec.h"
#include "cga_data.h" #include "cga_data.h"
@ -49,7 +50,7 @@ static const uint8_t ansi_to_cga[16] = {
}; };
typedef struct { typedef struct {
AVFrame frame; AVFrame *frame;
int x; /**< x cursor position (pixels) */ int x; /**< x cursor position (pixels) */
int y; /**< y cursor position (pixels) */ int y; /**< y cursor position (pixels) */
int sx; /**< saved x cursor position (pixels) */ int sx; /**< saved x cursor position (pixels) */
@ -77,6 +78,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
AnsiContext *s = avctx->priv_data; AnsiContext *s = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
/* defaults */ /* defaults */
s->font = ff_vga16_font; s->font = ff_vga16_font;
s->font_height = 16; s->font_height = 16;
@ -101,11 +106,11 @@ static void hscroll(AVCodecContext *avctx)
i = 0; i = 0;
for (; i < avctx->height - s->font_height; i++) for (; i < avctx->height - s->font_height; i++)
memcpy(s->frame.data[0] + i * s->frame.linesize[0], memcpy(s->frame->data[0] + i * s->frame->linesize[0],
s->frame.data[0] + (i + s->font_height) * s->frame.linesize[0], s->frame->data[0] + (i + s->font_height) * s->frame->linesize[0],
avctx->width); avctx->width);
for (; i < avctx->height; i++) for (; i < avctx->height; i++)
memset(s->frame.data[0] + i * s->frame.linesize[0], memset(s->frame->data[0] + i * s->frame->linesize[0],
DEFAULT_BG_COLOR, avctx->width); DEFAULT_BG_COLOR, avctx->width);
} }
@ -114,7 +119,7 @@ static void erase_line(AVCodecContext * avctx, int xoffset, int xlength)
AnsiContext *s = avctx->priv_data; AnsiContext *s = avctx->priv_data;
int i; int i;
for (i = 0; i < s->font_height; i++) for (i = 0; i < s->font_height; i++)
memset(s->frame.data[0] + (s->y + i)*s->frame.linesize[0] + xoffset, memset(s->frame->data[0] + (s->y + i)*s->frame->linesize[0] + xoffset,
DEFAULT_BG_COLOR, xlength); DEFAULT_BG_COLOR, xlength);
} }
@ -123,7 +128,7 @@ static void erase_screen(AVCodecContext *avctx)
AnsiContext *s = avctx->priv_data; AnsiContext *s = avctx->priv_data;
int i; int i;
for (i = 0; i < avctx->height; i++) for (i = 0; i < avctx->height; i++)
memset(s->frame.data[0] + i * s->frame.linesize[0], DEFAULT_BG_COLOR, avctx->width); memset(s->frame->data[0] + i * s->frame->linesize[0], DEFAULT_BG_COLOR, avctx->width);
s->x = s->y = 0; s->x = s->y = 0;
} }
@ -144,8 +149,8 @@ static void draw_char(AVCodecContext *avctx, int c)
FFSWAP(int, fg, bg); FFSWAP(int, fg, bg);
if ((s->attributes & ATTR_CONCEALED)) if ((s->attributes & ATTR_CONCEALED))
fg = bg; fg = bg;
ff_draw_pc_font(s->frame.data[0] + s->y * s->frame.linesize[0] + s->x, ff_draw_pc_font(s->frame->data[0] + s->y * s->frame->linesize[0] + s->x,
s->frame.linesize[0], s->font, s->font_height, c, fg, bg); s->frame->linesize[0], s->font, s->font_height, c, fg, bg);
s->x += FONT_WIDTH; s->x += FONT_WIDTH;
if (s->x >= avctx->width) { if (s->x >= avctx->width) {
s->x = 0; s->x = 0;
@ -220,17 +225,16 @@ static int execute_code(AVCodecContext * avctx, int c)
av_log_ask_for_sample(avctx, "unsupported screen mode\n"); av_log_ask_for_sample(avctx, "unsupported screen mode\n");
} }
if (width != avctx->width || height != avctx->height) { if (width != avctx->width || height != avctx->height) {
if (s->frame.data[0]) av_frame_unref(s->frame);
avctx->release_buffer(avctx, &s->frame);
avcodec_set_dimensions(avctx, width, height); avcodec_set_dimensions(avctx, width, height);
ret = ff_get_buffer(avctx, &s->frame); ret = ff_get_buffer(avctx, s->frame, AV_GET_BUFFER_FLAG_REF);
if (ret < 0) { if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
s->frame.pict_type = AV_PICTURE_TYPE_I; s->frame->pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1; s->frame->palette_has_changed = 1;
memcpy(s->frame.data[1], ff_cga_palette, 16 * 4); memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);
erase_screen(avctx); erase_screen(avctx);
} else if (c == 'l') { } else if (c == 'l') {
erase_screen(avctx); erase_screen(avctx);
@ -241,13 +245,13 @@ static int execute_code(AVCodecContext * avctx, int c)
case 0: case 0:
erase_line(avctx, s->x, avctx->width - s->x); erase_line(avctx, s->x, avctx->width - s->x);
if (s->y < avctx->height - s->font_height) if (s->y < avctx->height - s->font_height)
memset(s->frame.data[0] + (s->y + s->font_height)*s->frame.linesize[0], memset(s->frame->data[0] + (s->y + s->font_height)*s->frame->linesize[0],
DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame.linesize[0]); DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame->linesize[0]);
break; break;
case 1: case 1:
erase_line(avctx, 0, s->x); erase_line(avctx, 0, s->x);
if (s->y > 0) if (s->y > 0)
memset(s->frame.data[0], DEFAULT_BG_COLOR, s->y * s->frame.linesize[0]); memset(s->frame->data[0], DEFAULT_BG_COLOR, s->y * s->frame->linesize[0]);
break; break;
case 2: case 2:
erase_screen(avctx); erase_screen(avctx);
@ -320,19 +324,19 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
int ret, i, count; int ret, i, count;
ret = avctx->reget_buffer(avctx, &s->frame); ret = ff_reget_buffer(avctx, s->frame);
if (ret < 0){ if (ret < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
if (!avctx->frame_number) { if (!avctx->frame_number) {
memset(s->frame.data[0], 0, avctx->height * FFABS(s->frame.linesize[0])); memset(s->frame->data[0], 0, avctx->height * FFABS(s->frame->linesize[0]));
memset(s->frame.data[1], 0, AVPALETTE_SIZE); memset(s->frame->data[1], 0, AVPALETTE_SIZE);
} }
s->frame.pict_type = AV_PICTURE_TYPE_I; s->frame->pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1; s->frame->palette_has_changed = 1;
memcpy(s->frame.data[1], ff_cga_palette, 16 * 4); memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);
while(buf < buf_end) { while(buf < buf_end) {
switch(s->state) { switch(s->state) {
@ -416,15 +420,16 @@ static int decode_frame(AVCodecContext *avctx,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame; if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
return buf_size; return buf_size;
} }
static av_cold int decode_close(AVCodecContext *avctx) static av_cold int decode_close(AVCodecContext *avctx)
{ {
AnsiContext *s = avctx->priv_data; AnsiContext *s = avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame); av_frame_free(&s->frame);
return 0; return 0;
} }

View File

@ -907,7 +907,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = blockstodecode; frame->nb_samples = blockstodecode;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -183,14 +183,14 @@ static inline int decode_mb(ASV1Context *a, int16_t block[6][64])
return 0; return 0;
} }
static inline void idct_put(ASV1Context *a, int mb_x, int mb_y) static inline void idct_put(ASV1Context *a, AVFrame *frame, int mb_x, int mb_y)
{ {
int16_t (*block)[64] = a->block; int16_t (*block)[64] = a->block;
int linesize = a->picture.linesize[0]; int linesize = frame->linesize[0];
uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16; uint8_t *dest_y = frame->data[0] + (mb_y * 16* linesize ) + mb_x * 16;
uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8; uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
uint8_t *dest_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8; uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
a->dsp.idct_put(dest_y , linesize, block[0]); a->dsp.idct_put(dest_y , linesize, block[0]);
a->dsp.idct_put(dest_y + 8, linesize, block[1]); a->dsp.idct_put(dest_y + 8, linesize, block[1]);
@ -198,8 +198,8 @@ static inline void idct_put(ASV1Context *a, int mb_x, int mb_y)
a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]); a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]);
if (!(a->avctx->flags&CODEC_FLAG_GRAY)) { if (!(a->avctx->flags&CODEC_FLAG_GRAY)) {
a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]); a->dsp.idct_put(dest_cb, frame->linesize[1], block[4]);
a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]); a->dsp.idct_put(dest_cr, frame->linesize[2], block[5]);
} }
} }
@ -210,15 +210,10 @@ static int decode_frame(AVCodecContext *avctx,
ASV1Context * const a = avctx->priv_data; ASV1Context * const a = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AVFrame *picture = data; AVFrame * const p = data;
AVFrame * const p = &a->picture;
int mb_x, mb_y, ret; int mb_x, mb_y, ret;
if (p->data[0]) if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
avctx->release_buffer(avctx, p);
p->reference = 0;
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -245,7 +240,7 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = decode_mb(a, a->block)) < 0) if ((ret = decode_mb(a, a->block)) < 0)
return ret; return ret;
idct_put(a, mb_x, mb_y); idct_put(a, p, mb_x, mb_y);
} }
} }
@ -255,7 +250,7 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = decode_mb(a, a->block)) < 0) if ((ret = decode_mb(a, a->block)) < 0)
return ret; return ret;
idct_put(a, mb_x, mb_y); idct_put(a, p, mb_x, mb_y);
} }
} }
@ -265,11 +260,10 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = decode_mb(a, a->block)) < 0) if ((ret = decode_mb(a, a->block)) < 0)
return ret; return ret;
idct_put(a, mb_x, mb_y); idct_put(a, p, mb_x, mb_y);
} }
} }
*picture = a->picture;
*got_frame = 1; *got_frame = 1;
emms_c(); emms_c();
@ -280,7 +274,6 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int decode_init(AVCodecContext *avctx) static av_cold int decode_init(AVCodecContext *avctx)
{ {
ASV1Context * const a = avctx->priv_data; ASV1Context * const a = avctx->priv_data;
AVFrame *p = &a->picture;
const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2; const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
int i; int i;
@ -304,11 +297,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
a->intra_matrix[i] = 64 * scale * ff_mpeg1_default_intra_matrix[index] / a->inv_qscale; a->intra_matrix[i] = 64 * scale * ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
} }
p->qstride = a->mb_width;
p->qscale_table = av_malloc(p->qstride * a->mb_height);
p->quality = (32 * scale + a->inv_qscale / 2) / a->inv_qscale;
memset(p->qscale_table, p->quality, p->qstride * a->mb_height);
return 0; return 0;
} }
@ -317,12 +305,8 @@ static av_cold int decode_end(AVCodecContext *avctx)
ASV1Context * const a = avctx->priv_data; ASV1Context * const a = avctx->priv_data;
av_freep(&a->bitstream_buffer); av_freep(&a->bitstream_buffer);
av_freep(&a->picture.qscale_table);
a->bitstream_buffer_size = 0; a->bitstream_buffer_size = 0;
if (a->picture.data[0])
avctx->release_buffer(avctx, &a->picture);
return 0; return 0;
} }

View File

@ -287,7 +287,7 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = AT1_SU_SAMPLES; frame->nb_samples = AT1_SU_SAMPLES;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -812,7 +812,7 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = SAMPLES_PER_FRAME; frame->nb_samples = SAMPLES_PER_FRAME;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -27,16 +27,8 @@
#include "internal.h" #include "internal.h"
#include "libavutil/internal.h" #include "libavutil/internal.h"
typedef struct AuraDecodeContext {
AVCodecContext *avctx;
AVFrame frame;
} AuraDecodeContext;
static av_cold int aura_decode_init(AVCodecContext *avctx) static av_cold int aura_decode_init(AVCodecContext *avctx)
{ {
AuraDecodeContext *s = avctx->priv_data;
s->avctx = avctx;
/* width needs to be divisible by 4 for this codec to work */ /* width needs to be divisible by 4 for this codec to work */
if (avctx->width & 0x3) if (avctx->width & 0x3)
return AVERROR(EINVAL); return AVERROR(EINVAL);
@ -49,7 +41,7 @@ static int aura_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *pkt) AVPacket *pkt)
{ {
AuraDecodeContext *s = avctx->priv_data; AVFrame *frame = data;
uint8_t *Y, *U, *V; uint8_t *Y, *U, *V;
uint8_t val; uint8_t val;
int x, y, ret; int x, y, ret;
@ -67,19 +59,14 @@ static int aura_decode_frame(AVCodecContext *avctx,
/* pixel data starts 48 bytes in, after 3x16-byte tables */ /* pixel data starts 48 bytes in, after 3x16-byte tables */
buf += 48; buf += 48;
if (s->frame.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &s->frame);
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
s->frame.reference = 0;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
Y = s->frame.data[0]; Y = frame->data[0];
U = s->frame.data[1]; U = frame->data[1];
V = s->frame.data[2]; V = frame->data[2];
/* iterate through each line in the height */ /* iterate through each line in the height */
for (y = 0; y < avctx->height; y++) { for (y = 0; y < avctx->height; y++) {
@ -102,34 +89,21 @@ static int aura_decode_frame(AVCodecContext *avctx,
Y[1] = Y[ 0] + delta_table[val & 0xF]; Y[1] = Y[ 0] + delta_table[val & 0xF];
Y += 2; U++; V++; Y += 2; U++; V++;
} }
Y += s->frame.linesize[0] - avctx->width; Y += frame->linesize[0] - avctx->width;
U += s->frame.linesize[1] - (avctx->width >> 1); U += frame->linesize[1] - (avctx->width >> 1);
V += s->frame.linesize[2] - (avctx->width >> 1); V += frame->linesize[2] - (avctx->width >> 1);
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return pkt->size; return pkt->size;
} }
static av_cold int aura_decode_end(AVCodecContext *avctx)
{
AuraDecodeContext *s = avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
return 0;
}
AVCodec ff_aura2_decoder = { AVCodec ff_aura2_decoder = {
.name = "aura2", .name = "aura2",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_AURA2, .id = AV_CODEC_ID_AURA2,
.priv_data_size = sizeof(AuraDecodeContext),
.init = aura_decode_init, .init = aura_decode_init,
.close = aura_decode_end,
.decode = aura_decode_frame, .decode = aura_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Auravision Aura 2"), .long_name = NULL_IF_CONFIG_SMALL("Auravision Aura 2"),

View File

@ -823,6 +823,7 @@ typedef struct AVPanScan{
#define FF_QSCALE_TYPE_H264 2 #define FF_QSCALE_TYPE_H264 2
#define FF_QSCALE_TYPE_VP56 3 #define FF_QSCALE_TYPE_VP56 3
#if FF_API_GET_BUFFER
#define FF_BUFFER_TYPE_INTERNAL 1 #define FF_BUFFER_TYPE_INTERNAL 1
#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user) #define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user)
#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared. #define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
@ -832,6 +833,12 @@ typedef struct AVPanScan{
#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer. #define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content. #define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update). #define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
#endif
/**
* The decoder will keep a reference to the frame and may reuse it later.
*/
#define AV_GET_BUFFER_FLAG_REF (1 << 0)
/** /**
* @defgroup lavc_packet AVPacket * @defgroup lavc_packet AVPacket
@ -1859,6 +1866,7 @@ typedef struct AVCodecContext {
*/ */
enum AVSampleFormat request_sample_fmt; enum AVSampleFormat request_sample_fmt;
#if FF_API_GET_BUFFER
/** /**
* Called at the beginning of each frame to get a buffer for it. * Called at the beginning of each frame to get a buffer for it.
* *
@ -1918,7 +1926,10 @@ typedef struct AVCodecContext {
* *
* - encoding: unused * - encoding: unused
* - decoding: Set by libavcodec, user can override. * - decoding: Set by libavcodec, user can override.
*
* @deprecated use get_buffer2()
*/ */
attribute_deprecated
int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic); int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
/** /**
@ -1929,7 +1940,10 @@ typedef struct AVCodecContext {
* but not by more than one thread at once, so does not need to be reentrant. * but not by more than one thread at once, so does not need to be reentrant.
* - encoding: unused * - encoding: unused
* - decoding: Set by libavcodec, user can override. * - decoding: Set by libavcodec, user can override.
*
* @deprecated custom freeing callbacks should be set from get_buffer2()
*/ */
attribute_deprecated
void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic); void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
/** /**
@ -1944,8 +1958,100 @@ typedef struct AVCodecContext {
* - encoding: unused * - encoding: unused
* - decoding: Set by libavcodec, user can override. * - decoding: Set by libavcodec, user can override.
*/ */
attribute_deprecated
int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic); int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
#endif
/**
* This callback is called at the beginning of each frame to get data
* buffer(s) for it. There may be one contiguous buffer for all the data or
* there may be a buffer per each data plane or anything in between. Each
* buffer must be reference-counted using the AVBuffer API.
*
* The following fields will be set in the frame before this callback is
* called:
* - format
* - width, height (video only)
* - sample_rate, channel_layout, nb_samples (audio only)
* Their values may differ from the corresponding values in
* AVCodecContext. This callback must use the frame values, not the codec
* context values, to calculate the required buffer size.
*
* This callback must fill the following fields in the frame:
* - data[]
* - linesize[]
* - extended_data:
* * if the data is planar audio with more than 8 channels, then this
* callback must allocate and fill extended_data to contain all pointers
* to all data planes. data[] must hold as many pointers as it can.
* extended_data must be allocated with av_malloc() and will be freed in
* av_frame_unref().
* * otherwise exended_data must point to data
* - buf[] must contain references to the buffers that contain the frame
* data.
* - extended_buf and nb_extended_buf must be allocated with av_malloc() by
* this callback and filled with the extra buffers if there are more
* buffers than buf[] can hold. extended_buf will be freed in
* av_frame_unref().
*
* If CODEC_CAP_DR1 is not set then get_buffer2() must call
* avcodec_default_get_buffer2() instead of providing buffers allocated by
* some other means.
*
* Each data plane must be aligned to the maximum required by the target
* CPU.
*
* @see avcodec_default_get_buffer2()
*
* Video:
*
* If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
* (read and/or written to if it is writable) later by libavcodec.
*
* If CODEC_FLAG_EMU_EDGE is not set in s->flags, the buffer must contain an
* edge of the size returned by avcodec_get_edge_width() on all sides.
*
* avcodec_align_dimensions2() should be used to find the required width and
* height, as they normally need to be rounded up to the next multiple of 16.
*
* If frame multithreading is used and thread_safe_callbacks is set,
* this callback may be called from a different thread, but not from more
* than one at once. Does not need to be reentrant.
*
* @see avcodec_align_dimensions2()
*
* Audio:
*
* Decoders request a buffer of a particular size by setting
* AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
* however, utilize only part of the buffer by setting AVFrame.nb_samples
* to a smaller value in the output frame.
*
* As a convenience, av_samples_get_buffer_size() and
* av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
* functions to find the required data size and to fill data pointers and
* linesize. In AVFrame.linesize, only linesize[0] may be set for audio
* since all planes must be the same size.
*
* @see av_samples_get_buffer_size(), av_samples_fill_arrays()
*
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
*/
int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
/**
* If non-zero, the decoded audio and video frames returned from
* avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted
* and are valid indefinitely. The caller must free them with
* av_frame_unref() when they are not needed anymore.
* Otherwise, the decoded frames must not be freed by the caller and are
* only valid until the next decode call.
*
* - encoding: unused
* - decoding: set by the caller before avcodec_open2().
*/
int refcounted_frames;
/* - encoding parameters */ /* - encoding parameters */
float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
@ -3209,9 +3315,18 @@ AVCodec *avcodec_find_decoder(enum AVCodecID id);
*/ */
AVCodec *avcodec_find_decoder_by_name(const char *name); AVCodec *avcodec_find_decoder_by_name(const char *name);
int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic); #if FF_API_GET_BUFFER
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic); attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic); attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
#endif
/**
* The default callback for AVCodecContext.get_buffer2(). It is made public so
* it can be called by custom get_buffer2() implementations for decoders without
* CODEC_CAP_DR1 set.
*/
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
/** /**
* Return the amount of padding in pixels which the get_buffer callback must * Return the amount of padding in pixels which the get_buffer callback must
@ -4147,8 +4262,6 @@ int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
*/ */
void avcodec_flush_buffers(AVCodecContext *avctx); void avcodec_flush_buffers(AVCodecContext *avctx);
void avcodec_default_free_buffers(AVCodecContext *s);
/** /**
* Return codec bits per sample. * Return codec bits per sample.
* *

View File

@ -21,6 +21,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "get_bits.h" #include "get_bits.h"
#include "internal.h"
typedef struct { typedef struct {
@ -59,11 +60,10 @@ avs_decode_frame(AVCodecContext * avctx,
AvsBlockType type; AvsBlockType type;
GetBitContext change_map; GetBitContext change_map;
if ((ret = avctx->reget_buffer(avctx, p)) < 0) { if ((ret = ff_reget_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
p->reference = 1;
p->pict_type = AV_PICTURE_TYPE_P; p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0; p->key_frame = 0;
@ -149,7 +149,8 @@ avs_decode_frame(AVCodecContext * avctx,
align_get_bits(&change_map); align_get_bits(&change_map);
} }
*picture = avs->picture; if ((ret = av_frame_ref(picture, &avs->picture)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
@ -165,8 +166,7 @@ static av_cold int avs_decode_init(AVCodecContext * avctx)
static av_cold int avs_decode_end(AVCodecContext *avctx) static av_cold int avs_decode_end(AVCodecContext *avctx)
{ {
AvsContext *s = avctx->priv_data; AvsContext *s = avctx->priv_data;
if (s->picture.data[0]) av_frame_unref(&s->picture);
avctx->release_buffer(avctx, &s->picture);
return 0; return 0;
} }

View File

@ -31,6 +31,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "bethsoftvideo.h" #include "bethsoftvideo.h"
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
typedef struct BethsoftvidContext { typedef struct BethsoftvidContext {
AVFrame frame; AVFrame frame;
@ -40,9 +41,6 @@ typedef struct BethsoftvidContext {
static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx) static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx)
{ {
BethsoftvidContext *vid = avctx->priv_data; BethsoftvidContext *vid = avctx->priv_data;
vid->frame.reference = 1;
vid->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
return 0; return 0;
} }
@ -75,7 +73,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
int code, ret; int code, ret;
int yoffset; int yoffset;
if ((ret = avctx->reget_buffer(avctx, &vid->frame)) < 0) { if ((ret = ff_reget_buffer(avctx, &vid->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -136,8 +134,10 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
} }
end: end:
if ((ret = av_frame_ref(data, &vid->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = vid->frame;
return avpkt->size; return avpkt->size;
} }
@ -145,8 +145,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
static av_cold int bethsoftvid_decode_end(AVCodecContext *avctx) static av_cold int bethsoftvid_decode_end(AVCodecContext *avctx)
{ {
BethsoftvidContext * vid = avctx->priv_data; BethsoftvidContext * vid = avctx->priv_data;
if(vid->frame.data[0]) av_frame_unref(&vid->frame);
avctx->release_buffer(avctx, &vid->frame);
return 0; return 0;
} }

View File

@ -33,7 +33,6 @@
typedef struct BFIContext { typedef struct BFIContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
uint8_t *dst; uint8_t *dst;
} BFIContext; } BFIContext;
@ -48,6 +47,7 @@ static av_cold int bfi_decode_init(AVCodecContext *avctx)
static int bfi_decode_frame(AVCodecContext *avctx, void *data, static int bfi_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt) int *got_frame, AVPacket *avpkt)
{ {
AVFrame *frame = data;
GetByteContext g; GetByteContext g;
int buf_size = avpkt->size; int buf_size = avpkt->size;
BFIContext *bfi = avctx->priv_data; BFIContext *bfi = avctx->priv_data;
@ -57,12 +57,7 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
uint32_t *pal; uint32_t *pal;
int i, j, ret, height = avctx->height; int i, j, ret, height = avctx->height;
if (bfi->frame.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &bfi->frame);
bfi->frame.reference = 1;
if ((ret = ff_get_buffer(avctx, &bfi->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -71,14 +66,14 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
/* Set frame parameters and palette, if necessary */ /* Set frame parameters and palette, if necessary */
if (!avctx->frame_number) { if (!avctx->frame_number) {
bfi->frame.pict_type = AV_PICTURE_TYPE_I; frame->pict_type = AV_PICTURE_TYPE_I;
bfi->frame.key_frame = 1; frame->key_frame = 1;
/* Setting the palette */ /* Setting the palette */
if (avctx->extradata_size > 768) { if (avctx->extradata_size > 768) {
av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n"); av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
pal = (uint32_t *)bfi->frame.data[1]; pal = (uint32_t *)frame->data[1];
for (i = 0; i < avctx->extradata_size / 3; i++) { for (i = 0; i < avctx->extradata_size / 3; i++) {
int shift = 16; int shift = 16;
*pal = 0; *pal = 0;
@ -87,10 +82,10 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
(avctx->extradata[i * 3 + j] >> 4)) << shift; (avctx->extradata[i * 3 + j] >> 4)) << shift;
pal++; pal++;
} }
bfi->frame.palette_has_changed = 1; frame->palette_has_changed = 1;
} else { } else {
bfi->frame.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
bfi->frame.key_frame = 0; frame->key_frame = 0;
} }
bytestream2_skip(&g, 4); // Unpacked size, not required. bytestream2_skip(&g, 4); // Unpacked size, not required.
@ -158,22 +153,20 @@ static int bfi_decode_frame(AVCodecContext *avctx, void *data,
} }
src = bfi->dst; src = bfi->dst;
dst = bfi->frame.data[0]; dst = frame->data[0];
while (height--) { while (height--) {
memcpy(dst, src, avctx->width); memcpy(dst, src, avctx->width);
src += avctx->width; src += avctx->width;
dst += bfi->frame.linesize[0]; dst += frame->linesize[0];
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame *)data = bfi->frame;
return buf_size; return buf_size;
} }
static av_cold int bfi_decode_close(AVCodecContext *avctx) static av_cold int bfi_decode_close(AVCodecContext *avctx)
{ {
BFIContext *bfi = avctx->priv_data; BFIContext *bfi = avctx->priv_data;
if (bfi->frame.data[0])
avctx->release_buffer(avctx, &bfi->frame);
av_free(bfi->dst); av_free(bfi->dst);
return 0; return 0;
} }

View File

@ -113,7 +113,7 @@ typedef struct BinkContext {
AVCodecContext *avctx; AVCodecContext *avctx;
DSPContext dsp; DSPContext dsp;
BinkDSPContext bdsp; BinkDSPContext bdsp;
AVFrame *pic, *last; AVFrame *last;
int version; ///< internal Bink file version int version; ///< internal Bink file version
int has_alpha; int has_alpha;
int swap_planes; int swap_planes;
@ -792,8 +792,8 @@ static inline void put_pixels8x8_overlapped(uint8_t *dst, uint8_t *src, int stri
memcpy(dst + i*stride, tmp + i*8, 8); memcpy(dst + i*stride, tmp + i*8, 8);
} }
static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, static int binkb_decode_plane(BinkContext *c, AVFrame *frame, GetBitContext *gb,
int is_key, int is_chroma) int plane_idx, int is_key, int is_chroma)
{ {
int blk, ret; int blk, ret;
int i, j, bx, by; int i, j, bx, by;
@ -807,13 +807,13 @@ static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
int ybias = is_key ? -15 : 0; int ybias = is_key ? -15 : 0;
int qp; int qp;
const int stride = c->pic->linesize[plane_idx]; const int stride = frame->linesize[plane_idx];
int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3; int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3;
int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3; int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3;
binkb_init_bundles(c); binkb_init_bundles(c);
ref_start = c->pic->data[plane_idx]; ref_start = frame->data[plane_idx];
ref_end = c->pic->data[plane_idx] + (bh * c->pic->linesize[plane_idx] + bw) * 8; ref_end = frame->data[plane_idx] + (bh * frame->linesize[plane_idx] + bw) * 8;
for (i = 0; i < 64; i++) for (i = 0; i < 64; i++)
coordmap[i] = (i & 7) + (i >> 3) * stride; coordmap[i] = (i & 7) + (i >> 3) * stride;
@ -824,7 +824,7 @@ static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
return ret; return ret;
} }
dst = c->pic->data[plane_idx] + 8*by*stride; dst = frame->data[plane_idx] + 8*by*stride;
for (bx = 0; bx < bw; bx++, dst += 8) { for (bx = 0; bx < bw; bx++, dst += 8) {
blk = binkb_get_value(c, BINKB_SRC_BLOCK_TYPES); blk = binkb_get_value(c, BINKB_SRC_BLOCK_TYPES);
switch (blk) { switch (blk) {
@ -938,8 +938,8 @@ static int binkb_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
return 0; return 0;
} }
static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx, static int bink_decode_plane(BinkContext *c, AVFrame *frame, GetBitContext *gb,
int is_chroma) int plane_idx, int is_chroma)
{ {
int blk, ret; int blk, ret;
int i, j, bx, by; int i, j, bx, by;
@ -952,7 +952,7 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
LOCAL_ALIGNED_16(int32_t, dctblock, [64]); LOCAL_ALIGNED_16(int32_t, dctblock, [64]);
int coordmap[64]; int coordmap[64];
const int stride = c->pic->linesize[plane_idx]; const int stride = frame->linesize[plane_idx];
int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3; int bw = is_chroma ? (c->avctx->width + 15) >> 4 : (c->avctx->width + 7) >> 3;
int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3; int bh = is_chroma ? (c->avctx->height + 15) >> 4 : (c->avctx->height + 7) >> 3;
int width = c->avctx->width >> is_chroma; int width = c->avctx->width >> is_chroma;
@ -962,7 +962,7 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
read_bundle(gb, c, i); read_bundle(gb, c, i);
ref_start = c->last->data[plane_idx] ? c->last->data[plane_idx] ref_start = c->last->data[plane_idx] ? c->last->data[plane_idx]
: c->pic->data[plane_idx]; : frame->data[plane_idx];
ref_end = ref_start ref_end = ref_start
+ (bw - 1 + c->last->linesize[plane_idx] * (bh - 1)) * 8; + (bw - 1 + c->last->linesize[plane_idx] * (bh - 1)) * 8;
@ -991,9 +991,9 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
if (by == bh) if (by == bh)
break; break;
dst = c->pic->data[plane_idx] + 8*by*stride; dst = frame->data[plane_idx] + 8*by*stride;
prev = (c->last->data[plane_idx] ? c->last->data[plane_idx] prev = (c->last->data[plane_idx] ? c->last->data[plane_idx]
: c->pic->data[plane_idx]) + 8*by*stride; : frame->data[plane_idx]) + 8*by*stride;
for (bx = 0; bx < bw; bx++, dst += 8, prev += 8) { for (bx = 0; bx < bw; bx++, dst += 8, prev += 8) {
blk = get_value(c, BINK_SRC_BLOCK_TYPES); blk = get_value(c, BINK_SRC_BLOCK_TYPES);
// 16x16 block type on odd line means part of the already decoded block, so skip it // 16x16 block type on odd line means part of the already decoded block, so skip it
@ -1165,30 +1165,30 @@ static int bink_decode_plane(BinkContext *c, GetBitContext *gb, int plane_idx,
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt) static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
{ {
BinkContext * const c = avctx->priv_data; BinkContext * const c = avctx->priv_data;
AVFrame *frame = data;
GetBitContext gb; GetBitContext gb;
int plane, plane_idx, ret; int plane, plane_idx, ret;
int bits_count = pkt->size << 3; int bits_count = pkt->size << 3;
if (c->version > 'b') { if (c->version > 'b') {
if(c->pic->data[0]) if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
avctx->release_buffer(avctx, c->pic);
if ((ret = ff_get_buffer(avctx, c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
} else { } else {
if ((ret = avctx->reget_buffer(avctx, c->pic)) < 0) { if ((ret = ff_reget_buffer(avctx, c->last)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
if ((ret = av_frame_ref(frame, c->last)) < 0)
return ret;
} }
init_get_bits(&gb, pkt->data, bits_count); init_get_bits(&gb, pkt->data, bits_count);
if (c->has_alpha) { if (c->has_alpha) {
if (c->version >= 'i') if (c->version >= 'i')
skip_bits_long(&gb, 32); skip_bits_long(&gb, 32);
if ((ret = bink_decode_plane(c, &gb, 3, 0)) < 0) if ((ret = bink_decode_plane(c, frame, &gb, 3, 0)) < 0)
return ret; return ret;
} }
if (c->version >= 'i') if (c->version >= 'i')
@ -1198,10 +1198,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3); plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
if (c->version > 'b') { if (c->version > 'b') {
if ((ret = bink_decode_plane(c, &gb, plane_idx, !!plane)) < 0) if ((ret = bink_decode_plane(c, frame, &gb, plane_idx, !!plane)) < 0)
return ret; return ret;
} else { } else {
if ((ret = binkb_decode_plane(c, &gb, plane_idx, if ((ret = binkb_decode_plane(c, frame, &gb, plane_idx,
!avctx->frame_number, !!plane)) < 0) !avctx->frame_number, !!plane)) < 0)
return ret; return ret;
} }
@ -1210,11 +1210,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
} }
emms_c(); emms_c();
*got_frame = 1; if (c->version > 'b') {
*(AVFrame*)data = *c->pic; av_frame_unref(c->last);
if ((ret = av_frame_ref(c->last, frame)) < 0)
return ret;
}
if (c->version > 'b') *got_frame = 1;
FFSWAP(AVFrame*, c->pic, c->last);
/* always report that the buffer was completely consumed */ /* always report that the buffer was completely consumed */
return pkt->size; return pkt->size;
@ -1293,13 +1295,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
} }
c->avctx = avctx; c->avctx = avctx;
c->pic = avcodec_alloc_frame(); c->last = av_frame_alloc();
c->last = avcodec_alloc_frame(); if (!c->last)
if (!c->pic || !c->last) {
avcodec_free_frame(&c->pic);
avcodec_free_frame(&c->last);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
}
if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0) if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
return ret; return ret;
@ -1325,12 +1323,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
{ {
BinkContext * const c = avctx->priv_data; BinkContext * const c = avctx->priv_data;
if (c->pic->data[0]) av_frame_free(&c->last);
avctx->release_buffer(avctx, c->pic);
if (c->last->data[0])
avctx->release_buffer(avctx, c->last);
avcodec_free_frame(&c->pic);
avcodec_free_frame(&c->last);
free_bundles(c); free_bundles(c);
return 0; return 0;

View File

@ -318,7 +318,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = s->frame_len; frame->nb_samples = s->frame_len;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -25,25 +25,13 @@
#include "internal.h" #include "internal.h"
#include "msrledec.h" #include "msrledec.h"
static av_cold int bmp_decode_init(AVCodecContext *avctx)
{
BMPContext *s = avctx->priv_data;
avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame = &s->picture;
return 0;
}
static int bmp_decode_frame(AVCodecContext *avctx, static int bmp_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
BMPContext *s = avctx->priv_data; AVFrame *p = data;
AVFrame *picture = data;
AVFrame *p = &s->picture;
unsigned int fsize, hsize; unsigned int fsize, hsize;
int width, height; int width, height;
unsigned int depth; unsigned int depth;
@ -205,11 +193,7 @@ static int bmp_decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (p->data[0]) if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
avctx->release_buffer(avctx, p);
p->reference = 0;
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -350,29 +334,15 @@ static int bmp_decode_frame(AVCodecContext *avctx,
} }
} }
*picture = s->picture;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
} }
static av_cold int bmp_decode_end(AVCodecContext *avctx)
{
BMPContext* c = avctx->priv_data;
if (c->picture.data[0])
avctx->release_buffer(avctx, &c->picture);
return 0;
}
AVCodec ff_bmp_decoder = { AVCodec ff_bmp_decoder = {
.name = "bmp", .name = "bmp",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_BMP, .id = AV_CODEC_ID_BMP,
.priv_data_size = sizeof(BMPContext),
.init = bmp_decode_init,
.close = bmp_decode_end,
.decode = bmp_decode_frame, .decode = bmp_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("BMP (Windows and OS/2 bitmap)"), .long_name = NULL_IF_CONFIG_SMALL("BMP (Windows and OS/2 bitmap)"),

View File

@ -43,7 +43,6 @@ enum BMVFlags{
typedef struct BMVDecContext { typedef struct BMVDecContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame pic;
uint8_t *frame, frame_base[SCREEN_WIDE * (SCREEN_HIGH + 1)]; uint8_t *frame, frame_base[SCREEN_WIDE * (SCREEN_HIGH + 1)];
uint32_t pal[256]; uint32_t pal[256];
@ -198,6 +197,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *pkt) AVPacket *pkt)
{ {
BMVDecContext * const c = avctx->priv_data; BMVDecContext * const c = avctx->priv_data;
AVFrame *frame = data;
int type, scr_off; int type, scr_off;
int i, ret; int i, ret;
uint8_t *srcptr, *outptr; uint8_t *srcptr, *outptr;
@ -240,11 +240,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
scr_off = 0; scr_off = 0;
} }
if (c->pic.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 3;
if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -254,20 +250,19 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE); memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
c->pic.palette_has_changed = type & BMV_PALETTE; frame->palette_has_changed = type & BMV_PALETTE;
outptr = c->pic.data[0]; outptr = frame->data[0];
srcptr = c->frame; srcptr = c->frame;
for (i = 0; i < avctx->height; i++) { for (i = 0; i < avctx->height; i++) {
memcpy(outptr, srcptr, avctx->width); memcpy(outptr, srcptr, avctx->width);
srcptr += avctx->width; srcptr += avctx->width;
outptr += c->pic.linesize[0]; outptr += frame->linesize[0];
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */ /* always report that the buffer was completely consumed */
return pkt->size; return pkt->size;
@ -285,16 +280,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0; return 0;
} }
static av_cold int decode_end(AVCodecContext *avctx)
{
BMVDecContext *c = avctx->priv_data;
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
return 0;
}
static const int bmv_aud_mults[16] = { static const int bmv_aud_mults[16] = {
16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32 16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32
}; };
@ -328,7 +313,7 @@ static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = total_blocks * 32; frame->nb_samples = total_blocks * 32;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -356,7 +341,6 @@ AVCodec ff_bmv_video_decoder = {
.id = AV_CODEC_ID_BMV_VIDEO, .id = AV_CODEC_ID_BMV_VIDEO,
.priv_data_size = sizeof(BMVDecContext), .priv_data_size = sizeof(BMVDecContext),
.init = decode_init, .init = decode_init,
.close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV video"), .long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV video"),

View File

@ -21,6 +21,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
typedef struct { typedef struct {
AVFrame pictures[2]; AVFrame pictures[2];
@ -55,10 +56,9 @@ static av_cold int decode_end(AVCodecContext *avctx)
{ {
C93DecoderContext * const c93 = avctx->priv_data; C93DecoderContext * const c93 = avctx->priv_data;
if (c93->pictures[0].data[0]) av_frame_unref(&c93->pictures[0]);
avctx->release_buffer(avctx, &c93->pictures[0]); av_frame_unref(&c93->pictures[1]);
if (c93->pictures[1].data[0])
avctx->release_buffer(avctx, &c93->pictures[1]);
return 0; return 0;
} }
@ -120,17 +120,13 @@ static int decode_frame(AVCodecContext *avctx, void *data,
C93DecoderContext * const c93 = avctx->priv_data; C93DecoderContext * const c93 = avctx->priv_data;
AVFrame * const newpic = &c93->pictures[c93->currentpic]; AVFrame * const newpic = &c93->pictures[c93->currentpic];
AVFrame * const oldpic = &c93->pictures[c93->currentpic^1]; AVFrame * const oldpic = &c93->pictures[c93->currentpic^1];
AVFrame *picture = data;
GetByteContext gb; GetByteContext gb;
uint8_t *out; uint8_t *out;
int stride, ret, i, x, y, b, bt = 0; int stride, ret, i, x, y, b, bt = 0;
c93->currentpic ^= 1; c93->currentpic ^= 1;
newpic->reference = 1; if ((ret = ff_reget_buffer(avctx, newpic)) < 0) {
newpic->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE;
if ((ret = avctx->reget_buffer(avctx, newpic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -239,7 +235,8 @@ static int decode_frame(AVCodecContext *avctx, void *data,
memcpy(newpic->data[1], oldpic->data[1], 256 * 4); memcpy(newpic->data[1], oldpic->data[1], 256 * 4);
} }
*picture = *newpic; if ((ret = av_frame_ref(data, newpic)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;

View File

@ -736,9 +736,9 @@ av_cold int ff_cavs_init(AVCodecContext *avctx) {
h->avctx = avctx; h->avctx = avctx;
avctx->pix_fmt= AV_PIX_FMT_YUV420P; avctx->pix_fmt= AV_PIX_FMT_YUV420P;
h->cur.f = avcodec_alloc_frame(); h->cur.f = av_frame_alloc();
h->DPB[0].f = avcodec_alloc_frame(); h->DPB[0].f = av_frame_alloc();
h->DPB[1].f = avcodec_alloc_frame(); h->DPB[1].f = av_frame_alloc();
if (!h->cur.f || !h->DPB[0].f || !h->DPB[1].f) { if (!h->cur.f || !h->DPB[0].f || !h->DPB[1].f) {
ff_cavs_end(avctx); ff_cavs_end(avctx);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -769,15 +769,9 @@ av_cold int ff_cavs_init(AVCodecContext *avctx) {
av_cold int ff_cavs_end(AVCodecContext *avctx) { av_cold int ff_cavs_end(AVCodecContext *avctx) {
AVSContext *h = avctx->priv_data; AVSContext *h = avctx->priv_data;
if (h->cur.f->data[0]) av_frame_free(&h->cur.f);
avctx->release_buffer(avctx, h->cur.f); av_frame_free(&h->DPB[0].f);
if (h->DPB[0].f->data[0]) av_frame_free(&h->DPB[1].f);
avctx->release_buffer(avctx, h->DPB[0].f);
if (h->DPB[1].f->data[0])
avctx->release_buffer(avctx, h->DPB[1].f);
avcodec_free_frame(&h->cur.f);
avcodec_free_frame(&h->DPB[0].f);
avcodec_free_frame(&h->DPB[1].f);
av_free(h->top_qp); av_free(h->top_qp);
av_free(h->top_mv[0]); av_free(h->top_mv[0]);

View File

@ -931,6 +931,8 @@ static int decode_pic(AVSContext *h)
int skip_count = -1; int skip_count = -1;
enum cavs_mb mb_type; enum cavs_mb mb_type;
av_frame_unref(h->cur.f);
skip_bits(&h->gb, 16);//bbv_dwlay skip_bits(&h->gb, 16);//bbv_dwlay
if (h->stc == PIC_PB_START_CODE) { if (h->stc == PIC_PB_START_CODE) {
h->cur.f->pict_type = get_bits(&h->gb, 2) + AV_PICTURE_TYPE_I; h->cur.f->pict_type = get_bits(&h->gb, 2) + AV_PICTURE_TYPE_I;
@ -956,11 +958,9 @@ static int decode_pic(AVSContext *h)
if (h->stream_revision > 0) if (h->stream_revision > 0)
skip_bits(&h->gb, 1); //marker_bit skip_bits(&h->gb, 1); //marker_bit
} }
/* release last B frame */
if (h->cur.f->data[0])
h->avctx->release_buffer(h->avctx, h->cur.f);
ff_get_buffer(h->avctx, h->cur.f); ff_get_buffer(h->avctx, h->cur.f, h->cur.f->pict_type == AV_PICTURE_TYPE_B ?
0 : AV_GET_BUFFER_FLAG_REF);
if (!h->edge_emu_buffer) { if (!h->edge_emu_buffer) {
int alloc_size = FFALIGN(FFABS(h->cur.f->linesize[0]) + 32, 32); int alloc_size = FFALIGN(FFABS(h->cur.f->linesize[0]) + 32, 32);
@ -1056,8 +1056,7 @@ static int decode_pic(AVSContext *h)
} while (ff_cavs_next_mb(h)); } while (ff_cavs_next_mb(h));
} }
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) { if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
if (h->DPB[1].f->data[0]) av_frame_unref(h->DPB[1].f);
h->avctx->release_buffer(h->avctx, h->DPB[1].f);
FFSWAP(AVSFrame, h->cur, h->DPB[1]); FFSWAP(AVSFrame, h->cur, h->DPB[1]);
FFSWAP(AVSFrame, h->DPB[0], h->DPB[1]); FFSWAP(AVSFrame, h->DPB[0], h->DPB[1]);
} }
@ -1119,19 +1118,15 @@ static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVSContext *h = avctx->priv_data; AVSContext *h = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AVFrame *picture = data;
uint32_t stc = -1; uint32_t stc = -1;
int input_size; int input_size, ret;
const uint8_t *buf_end; const uint8_t *buf_end;
const uint8_t *buf_ptr; const uint8_t *buf_ptr;
if (buf_size == 0) { if (buf_size == 0) {
if (!h->low_delay && h->DPB[0].f->data[0]) { if (!h->low_delay && h->DPB[0].f->data[0]) {
*got_frame = 1; *got_frame = 1;
*picture = *h->DPB[0].f; av_frame_move_ref(data, h->DPB[0].f);
if (h->cur.f->data[0])
avctx->release_buffer(avctx, h->cur.f);
FFSWAP(AVSFrame, h->cur, h->DPB[0]);
} }
return 0; return 0;
} }
@ -1150,10 +1145,8 @@ static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break; break;
case PIC_I_START_CODE: case PIC_I_START_CODE:
if (!h->got_keyframe) { if (!h->got_keyframe) {
if(h->DPB[0].f->data[0]) av_frame_unref(h->DPB[0].f);
avctx->release_buffer(avctx, h->DPB[0].f); av_frame_unref(h->DPB[1].f);
if(h->DPB[1].f->data[0])
avctx->release_buffer(avctx, h->DPB[1].f);
h->got_keyframe = 1; h->got_keyframe = 1;
} }
case PIC_PB_START_CODE: case PIC_PB_START_CODE:
@ -1167,12 +1160,14 @@ static int cavs_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
*got_frame = 1; *got_frame = 1;
if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) { if (h->cur.f->pict_type != AV_PICTURE_TYPE_B) {
if (h->DPB[1].f->data[0]) { if (h->DPB[1].f->data[0]) {
*picture = *h->DPB[1].f; if ((ret = av_frame_ref(data, h->DPB[1].f)) < 0)
return ret;
} else { } else {
*got_frame = 0; *got_frame = 0;
} }
} else } else {
*picture = *h->cur.f; av_frame_move_ref(data, h->cur.f);
}
break; break;
case EXT_START_CODE: case EXT_START_CODE:
//mpeg_decode_extension(avctx, buf_ptr, input_size); //mpeg_decode_extension(avctx, buf_ptr, input_size);

View File

@ -64,26 +64,18 @@
#define CDG_PALETTE_SIZE 16 #define CDG_PALETTE_SIZE 16
typedef struct CDGraphicsContext { typedef struct CDGraphicsContext {
AVFrame frame; AVFrame *frame;
int hscroll; int hscroll;
int vscroll; int vscroll;
} CDGraphicsContext; } CDGraphicsContext;
static void cdg_init_frame(AVFrame *frame)
{
avcodec_get_frame_defaults(frame);
frame->reference = 3;
frame->buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_READABLE |
FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
}
static av_cold int cdg_decode_init(AVCodecContext *avctx) static av_cold int cdg_decode_init(AVCodecContext *avctx)
{ {
CDGraphicsContext *cc = avctx->priv_data; CDGraphicsContext *cc = avctx->priv_data;
cdg_init_frame(&cc->frame); cc->frame = av_frame_alloc();
if (!cc->frame)
return AVERROR(ENOMEM);
avctx->width = CDG_FULL_WIDTH; avctx->width = CDG_FULL_WIDTH;
avctx->height = CDG_FULL_HEIGHT; avctx->height = CDG_FULL_HEIGHT;
@ -95,8 +87,8 @@ static av_cold int cdg_decode_init(AVCodecContext *avctx)
static void cdg_border_preset(CDGraphicsContext *cc, uint8_t *data) static void cdg_border_preset(CDGraphicsContext *cc, uint8_t *data)
{ {
int y; int y;
int lsize = cc->frame.linesize[0]; int lsize = cc->frame->linesize[0];
uint8_t *buf = cc->frame.data[0]; uint8_t *buf = cc->frame->data[0];
int color = data[0] & 0x0F; int color = data[0] & 0x0F;
if (!(data[1] & 0x0F)) { if (!(data[1] & 0x0F)) {
@ -120,7 +112,7 @@ static void cdg_load_palette(CDGraphicsContext *cc, uint8_t *data, int low)
uint16_t color; uint16_t color;
int i; int i;
int array_offset = low ? 0 : 8; int array_offset = low ? 0 : 8;
uint32_t *palette = (uint32_t *) cc->frame.data[1]; uint32_t *palette = (uint32_t *) cc->frame->data[1];
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
color = (data[2 * i] << 6) + (data[2 * i + 1] & 0x3F); color = (data[2 * i] << 6) + (data[2 * i + 1] & 0x3F);
@ -129,7 +121,7 @@ static void cdg_load_palette(CDGraphicsContext *cc, uint8_t *data, int low)
b = ((color ) & 0x000F) * 17; b = ((color ) & 0x000F) * 17;
palette[i + array_offset] = r << 16 | g << 8 | b; palette[i + array_offset] = r << 16 | g << 8 | b;
} }
cc->frame.palette_has_changed = 1; cc->frame->palette_has_changed = 1;
} }
static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b) static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b)
@ -138,8 +130,8 @@ static int cdg_tile_block(CDGraphicsContext *cc, uint8_t *data, int b)
int color; int color;
int x, y; int x, y;
int ai; int ai;
int stride = cc->frame.linesize[0]; int stride = cc->frame->linesize[0];
uint8_t *buf = cc->frame.data[0]; uint8_t *buf = cc->frame->data[0];
ri = (data[2] & 0x1F) * CDG_TILE_HEIGHT + cc->vscroll; ri = (data[2] & 0x1F) * CDG_TILE_HEIGHT + cc->vscroll;
ci = (data[3] & 0x3F) * CDG_TILE_WIDTH + cc->hscroll; ci = (data[3] & 0x3F) * CDG_TILE_WIDTH + cc->hscroll;
@ -210,8 +202,8 @@ static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data,
int color; int color;
int hscmd, h_off, hinc, vscmd, v_off, vinc; int hscmd, h_off, hinc, vscmd, v_off, vinc;
int y; int y;
int stride = cc->frame.linesize[0]; int stride = cc->frame->linesize[0];
uint8_t *in = cc->frame.data[0]; uint8_t *in = cc->frame->data[0];
uint8_t *out = new_frame->data[0]; uint8_t *out = new_frame->data[0];
color = data[0] & 0x0F; color = data[0] & 0x0F;
@ -239,7 +231,7 @@ static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data,
if (!hinc && !vinc) if (!hinc && !vinc)
return; return;
memcpy(new_frame->data[1], cc->frame.data[1], CDG_PALETTE_SIZE * 4); memcpy(new_frame->data[1], cc->frame->data[1], CDG_PALETTE_SIZE * 4);
for (y = FFMAX(0, vinc); y < FFMIN(CDG_FULL_HEIGHT + vinc, CDG_FULL_HEIGHT); y++) for (y = FFMAX(0, vinc); y < FFMIN(CDG_FULL_HEIGHT + vinc, CDG_FULL_HEIGHT); y++)
memcpy(out + FFMAX(0, hinc) + stride * y, memcpy(out + FFMAX(0, hinc) + stride * y,
@ -274,7 +266,7 @@ static int cdg_decode_frame(AVCodecContext *avctx,
int ret; int ret;
uint8_t command, inst; uint8_t command, inst;
uint8_t cdg_data[CDG_DATA_SIZE]; uint8_t cdg_data[CDG_DATA_SIZE];
AVFrame new_frame; AVFrame *frame = data;
CDGraphicsContext *cc = avctx->priv_data; CDGraphicsContext *cc = avctx->priv_data;
if (buf_size < CDG_MINIMUM_PKT_SIZE) { if (buf_size < CDG_MINIMUM_PKT_SIZE) {
@ -282,13 +274,13 @@ static int cdg_decode_frame(AVCodecContext *avctx,
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
ret = avctx->reget_buffer(avctx, &cc->frame); ret = ff_reget_buffer(avctx, cc->frame);
if (ret) { if (ret) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
if (!avctx->frame_number) if (!avctx->frame_number)
memset(cc->frame.data[0], 0, cc->frame.linesize[0] * avctx->height); memset(cc->frame->data[0], 0, cc->frame->linesize[0] * avctx->height);
command = bytestream_get_byte(&buf); command = bytestream_get_byte(&buf);
inst = bytestream_get_byte(&buf); inst = bytestream_get_byte(&buf);
@ -300,8 +292,8 @@ static int cdg_decode_frame(AVCodecContext *avctx,
switch (inst) { switch (inst) {
case CDG_INST_MEMORY_PRESET: case CDG_INST_MEMORY_PRESET:
if (!(cdg_data[1] & 0x0F)) if (!(cdg_data[1] & 0x0F))
memset(cc->frame.data[0], cdg_data[0] & 0x0F, memset(cc->frame->data[0], cdg_data[0] & 0x0F,
cc->frame.linesize[0] * CDG_FULL_HEIGHT); cc->frame->linesize[0] * CDG_FULL_HEIGHT);
break; break;
case CDG_INST_LOAD_PAL_LO: case CDG_INST_LOAD_PAL_LO:
case CDG_INST_LOAD_PAL_HIGH: case CDG_INST_LOAD_PAL_HIGH:
@ -335,28 +327,33 @@ static int cdg_decode_frame(AVCodecContext *avctx,
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
cdg_init_frame(&new_frame); ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
ret = ff_get_buffer(avctx, &new_frame);
if (ret) { if (ret) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
cdg_scroll(cc, cdg_data, &new_frame, inst == CDG_INST_SCROLL_COPY); cdg_scroll(cc, cdg_data, frame, inst == CDG_INST_SCROLL_COPY);
avctx->release_buffer(avctx, &cc->frame); av_frame_unref(cc->frame);
cc->frame = new_frame; ret = av_frame_ref(cc->frame, frame);
if (ret < 0)
return ret;
break; break;
default: default:
break; break;
} }
if (!frame->data[0]) {
ret = av_frame_ref(frame, cc->frame);
if (ret < 0)
return ret;
}
*got_frame = 1; *got_frame = 1;
} else { } else {
*got_frame = 0; *got_frame = 0;
buf_size = 0; buf_size = 0;
} }
*(AVFrame *) data = cc->frame;
return buf_size; return buf_size;
} }
@ -364,8 +361,7 @@ static av_cold int cdg_decode_end(AVCodecContext *avctx)
{ {
CDGraphicsContext *cc = avctx->priv_data; CDGraphicsContext *cc = avctx->priv_data;
if (cc->frame.data[0]) av_frame_free(&cc->frame);
avctx->release_buffer(avctx, &cc->frame);
return 0; return 0;
} }

View File

@ -49,7 +49,6 @@ static av_cold int cdxl_decode_init(AVCodecContext *avctx)
{ {
CDXLVideoContext *c = avctx->priv_data; CDXLVideoContext *c = avctx->priv_data;
avcodec_get_frame_defaults(&c->frame);
c->new_video_size = 0; c->new_video_size = 0;
c->avctx = avctx; c->avctx = avctx;
@ -113,15 +112,15 @@ static void import_format(CDXLVideoContext *c, int linesize, uint8_t *out)
} }
} }
static void cdxl_decode_rgb(CDXLVideoContext *c) static void cdxl_decode_rgb(CDXLVideoContext *c, AVFrame *frame)
{ {
uint32_t *new_palette = (uint32_t *)c->frame.data[1]; uint32_t *new_palette = (uint32_t *)frame->data[1];
import_palette(c, new_palette); import_palette(c, new_palette);
import_format(c, c->frame.linesize[0], c->frame.data[0]); import_format(c, frame->linesize[0], frame->data[0]);
} }
static void cdxl_decode_ham6(CDXLVideoContext *c) static void cdxl_decode_ham6(CDXLVideoContext *c, AVFrame *frame)
{ {
AVCodecContext *avctx = c->avctx; AVCodecContext *avctx = c->avctx;
uint32_t new_palette[16], r, g, b; uint32_t new_palette[16], r, g, b;
@ -129,7 +128,7 @@ static void cdxl_decode_ham6(CDXLVideoContext *c)
int x, y; int x, y;
ptr = c->new_video; ptr = c->new_video;
out = c->frame.data[0]; out = frame->data[0];
import_palette(c, new_palette); import_palette(c, new_palette);
import_format(c, avctx->width, c->new_video); import_format(c, avctx->width, c->new_video);
@ -160,11 +159,11 @@ static void cdxl_decode_ham6(CDXLVideoContext *c)
} }
AV_WL24(out + x * 3, r | g | b); AV_WL24(out + x * 3, r | g | b);
} }
out += c->frame.linesize[0]; out += frame->linesize[0];
} }
} }
static void cdxl_decode_ham8(CDXLVideoContext *c) static void cdxl_decode_ham8(CDXLVideoContext *c, AVFrame *frame)
{ {
AVCodecContext *avctx = c->avctx; AVCodecContext *avctx = c->avctx;
uint32_t new_palette[64], r, g, b; uint32_t new_palette[64], r, g, b;
@ -172,7 +171,7 @@ static void cdxl_decode_ham8(CDXLVideoContext *c)
int x, y; int x, y;
ptr = c->new_video; ptr = c->new_video;
out = c->frame.data[0]; out = frame->data[0];
import_palette(c, new_palette); import_palette(c, new_palette);
import_format(c, avctx->width, c->new_video); import_format(c, avctx->width, c->new_video);
@ -203,7 +202,7 @@ static void cdxl_decode_ham8(CDXLVideoContext *c)
} }
AV_WL24(out + x * 3, r | g | b); AV_WL24(out + x * 3, r | g | b);
} }
out += c->frame.linesize[0]; out += frame->linesize[0];
} }
} }
@ -211,7 +210,7 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *pkt) int *got_frame, AVPacket *pkt)
{ {
CDXLVideoContext *c = avctx->priv_data; CDXLVideoContext *c = avctx->priv_data;
AVFrame * const p = &c->frame; AVFrame * const p = data;
int ret, w, h, encoding, aligned_width, buf_size = pkt->size; int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
const uint8_t *buf = pkt->data; const uint8_t *buf = pkt->data;
@ -259,11 +258,7 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (p->data[0]) if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
avctx->release_buffer(avctx, p);
p->reference = 0;
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -275,14 +270,13 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
if (!c->new_video) if (!c->new_video)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
if (c->bpp == 8) if (c->bpp == 8)
cdxl_decode_ham8(c); cdxl_decode_ham8(c, p);
else else
cdxl_decode_ham6(c); cdxl_decode_ham6(c, p);
} else { } else {
cdxl_decode_rgb(c); cdxl_decode_rgb(c, p);
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = c->frame;
return buf_size; return buf_size;
} }
@ -292,8 +286,6 @@ static av_cold int cdxl_decode_end(AVCodecContext *avctx)
CDXLVideoContext *c = avctx->priv_data; CDXLVideoContext *c = avctx->priv_data;
av_free(c->new_video); av_free(c->new_video);
if (c->frame.data[0])
avctx->release_buffer(avctx, &c->frame);
return 0; return 0;
} }

View File

@ -37,6 +37,7 @@
#include "libavutil/common.h" #include "libavutil/common.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "avcodec.h" #include "avcodec.h"
#include "internal.h"
typedef struct { typedef struct {
@ -429,10 +430,7 @@ static int cinepak_decode_frame(AVCodecContext *avctx,
s->data = buf; s->data = buf;
s->size = buf_size; s->size = buf_size;
s->frame.reference = 1; if ((ret = ff_reget_buffer(avctx, &s->frame))) {
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, &s->frame))) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -450,8 +448,10 @@ static int cinepak_decode_frame(AVCodecContext *avctx,
if (s->palette_video) if (s->palette_video)
memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE); memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE);
if ((ret = av_frame_ref(data, &s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */ /* report that the buffer was completely consumed */
return buf_size; return buf_size;
@ -461,8 +461,7 @@ static av_cold int cinepak_decode_end(AVCodecContext *avctx)
{ {
CinepakContext *s = avctx->priv_data; CinepakContext *s = avctx->priv_data;
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
return 0; return 0;
} }

View File

@ -29,19 +29,6 @@
#include "internal.h" #include "internal.h"
#include "put_bits.h" #include "put_bits.h"
typedef struct CLJRContext {
AVFrame picture;
} CLJRContext;
static av_cold int common_init(AVCodecContext *avctx)
{
CLJRContext * const a = avctx->priv_data;
avctx->coded_frame = &a->picture;
return 0;
}
#if CONFIG_CLJR_DECODER #if CONFIG_CLJR_DECODER
static int decode_frame(AVCodecContext *avctx, static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
@ -49,15 +36,10 @@ static int decode_frame(AVCodecContext *avctx,
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
CLJRContext * const a = avctx->priv_data;
GetBitContext gb; GetBitContext gb;
AVFrame *picture = data; AVFrame * const p = data;
AVFrame * const p = &a->picture;
int x, y, ret; int x, y, ret;
if (p->data[0])
avctx->release_buffer(avctx, p);
if (avctx->height <= 0 || avctx->width <= 0) { if (avctx->height <= 0 || avctx->width <= 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid width or height\n"); av_log(avctx, AV_LOG_ERROR, "Invalid width or height\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -69,8 +51,7 @@ static int decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
p->reference = 0; if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -80,9 +61,9 @@ static int decode_frame(AVCodecContext *avctx,
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
for (y = 0; y < avctx->height; y++) { for (y = 0; y < avctx->height; y++) {
uint8_t *luma = &a->picture.data[0][y * a->picture.linesize[0]]; uint8_t *luma = &p->data[0][y * p->linesize[0]];
uint8_t *cb = &a->picture.data[1][y * a->picture.linesize[1]]; uint8_t *cb = &p->data[1][y * p->linesize[1]];
uint8_t *cr = &a->picture.data[2][y * a->picture.linesize[2]]; uint8_t *cr = &p->data[2][y * p->linesize[2]];
for (x = 0; x < avctx->width; x += 4) { for (x = 0; x < avctx->width; x += 4) {
luma[3] = get_bits(&gb, 5) << 3; luma[3] = get_bits(&gb, 5) << 3;
luma[2] = get_bits(&gb, 5) << 3; luma[2] = get_bits(&gb, 5) << 3;
@ -94,7 +75,6 @@ static int decode_frame(AVCodecContext *avctx,
} }
} }
*picture = a->picture;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
@ -103,15 +83,6 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int decode_init(AVCodecContext *avctx) static av_cold int decode_init(AVCodecContext *avctx)
{ {
avctx->pix_fmt = AV_PIX_FMT_YUV411P; avctx->pix_fmt = AV_PIX_FMT_YUV411P;
return common_init(avctx);
}
static av_cold int decode_end(AVCodecContext *avctx)
{
CLJRContext *a = avctx->priv_data;
if (a->picture.data[0])
avctx->release_buffer(avctx, &a->picture);
return 0; return 0;
} }
@ -119,9 +90,7 @@ AVCodec ff_cljr_decoder = {
.name = "cljr", .name = "cljr",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_CLJR, .id = AV_CODEC_ID_CLJR,
.priv_data_size = sizeof(CLJRContext),
.init = decode_init, .init = decode_init,
.close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Cirrus Logic AccuPak"), .long_name = NULL_IF_CONFIG_SMALL("Cirrus Logic AccuPak"),
@ -129,6 +98,19 @@ AVCodec ff_cljr_decoder = {
#endif #endif
#if CONFIG_CLJR_ENCODER #if CONFIG_CLJR_ENCODER
typedef struct CLJRContext {
AVFrame picture;
} CLJRContext;
static av_cold int encode_init(AVCodecContext *avctx)
{
CLJRContext * const a = avctx->priv_data;
avctx->coded_frame = &a->picture;
return 0;
}
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *p, int *got_packet) const AVFrame *p, int *got_packet)
{ {
@ -173,7 +155,7 @@ AVCodec ff_cljr_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_CLJR, .id = AV_CODEC_ID_CLJR,
.priv_data_size = sizeof(CLJRContext), .priv_data_size = sizeof(CLJRContext),
.init = common_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE },

View File

@ -271,18 +271,13 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
int *got_picture_ptr, AVPacket *avpkt) int *got_picture_ptr, AVPacket *avpkt)
{ {
CLLCContext *ctx = avctx->priv_data; CLLCContext *ctx = avctx->priv_data;
AVFrame *pic = avctx->coded_frame; AVFrame *pic = data;
uint8_t *src = avpkt->data; uint8_t *src = avpkt->data;
uint32_t info_tag, info_offset; uint32_t info_tag, info_offset;
int data_size; int data_size;
GetBitContext gb; GetBitContext gb;
int coding_type, ret; int coding_type, ret;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
pic->reference = 0;
/* Skip the INFO header if present */ /* Skip the INFO header if present */
info_offset = 0; info_offset = 0;
info_tag = AV_RL32(src); info_tag = AV_RL32(src);
@ -334,7 +329,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
avctx->pix_fmt = AV_PIX_FMT_RGB24; avctx->pix_fmt = AV_PIX_FMT_RGB24;
avctx->bits_per_raw_sample = 8; avctx->bits_per_raw_sample = 8;
ret = ff_get_buffer(avctx, pic); ret = ff_get_buffer(avctx, pic, 0);
if (ret < 0) { if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n"); av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return ret; return ret;
@ -349,7 +344,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
avctx->pix_fmt = AV_PIX_FMT_ARGB; avctx->pix_fmt = AV_PIX_FMT_ARGB;
avctx->bits_per_raw_sample = 8; avctx->bits_per_raw_sample = 8;
ret = ff_get_buffer(avctx, pic); ret = ff_get_buffer(avctx, pic, 0);
if (ret < 0) { if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n"); av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return ret; return ret;
@ -369,7 +364,6 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
pic->pict_type = AV_PICTURE_TYPE_I; pic->pict_type = AV_PICTURE_TYPE_I;
*got_picture_ptr = 1; *got_picture_ptr = 1;
*(AVFrame *)data = *pic;
return avpkt->size; return avpkt->size;
} }
@ -378,10 +372,6 @@ static av_cold int cllc_decode_close(AVCodecContext *avctx)
{ {
CLLCContext *ctx = avctx->priv_data; CLLCContext *ctx = avctx->priv_data;
if (avctx->coded_frame->data[0])
avctx->release_buffer(avctx, avctx->coded_frame);
av_freep(&avctx->coded_frame);
av_freep(&ctx->swapped_buf); av_freep(&ctx->swapped_buf);
return 0; return 0;
@ -398,12 +388,6 @@ static av_cold int cllc_decode_init(AVCodecContext *avctx)
ff_dsputil_init(&ctx->dsp, avctx); ff_dsputil_init(&ctx->dsp, avctx);
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
return AVERROR(ENOMEM);
}
return 0; return 0;
} }

View File

@ -142,7 +142,7 @@ static int cng_decode_frame(AVCodecContext *avctx, void *data,
p->excitation, avctx->frame_size, p->order); p->excitation, avctx->frame_size, p->order);
frame->nb_samples = avctx->frame_size; frame->nb_samples = avctx->frame_size;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -958,7 +958,7 @@ static int cook_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
if (q->discarded_packets >= 2) { if (q->discarded_packets >= 2) {
frame->nb_samples = q->samples_per_channel; frame->nb_samples = q->samples_per_channel;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -31,7 +31,6 @@
#include "libavutil/lzo.h" #include "libavutil/lzo.h"
typedef struct { typedef struct {
AVFrame pic;
int linelen, height, bpp; int linelen, height, bpp;
unsigned int decomp_size; unsigned int decomp_size;
unsigned char* decomp_buf; unsigned char* decomp_buf;
@ -150,12 +149,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (c->pic.data[0]) if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 1;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -186,36 +180,35 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
// flip upside down, add difference frame // flip upside down, add difference frame
if (buf[0] & 1) { // keyframe if (buf[0] & 1) { // keyframe
c->pic.pict_type = AV_PICTURE_TYPE_I; picture->pict_type = AV_PICTURE_TYPE_I;
c->pic.key_frame = 1; picture->key_frame = 1;
switch (c->bpp) { switch (c->bpp) {
case 16: case 16:
copy_frame_16(&c->pic, c->decomp_buf, c->linelen, c->height); copy_frame_16(picture, c->decomp_buf, c->linelen, c->height);
break; break;
case 32: case 32:
copy_frame_32(&c->pic, c->decomp_buf, c->linelen, c->height); copy_frame_32(picture, c->decomp_buf, c->linelen, c->height);
break; break;
default: default:
copy_frame_default(&c->pic, c->decomp_buf, FFALIGN(c->linelen, 4), copy_frame_default(picture, c->decomp_buf, FFALIGN(c->linelen, 4),
c->linelen, c->height); c->linelen, c->height);
} }
} else { } else {
c->pic.pict_type = AV_PICTURE_TYPE_P; picture->pict_type = AV_PICTURE_TYPE_P;
c->pic.key_frame = 0; picture->key_frame = 0;
switch (c->bpp) { switch (c->bpp) {
case 16: case 16:
add_frame_16(&c->pic, c->decomp_buf, c->linelen, c->height); add_frame_16(picture, c->decomp_buf, c->linelen, c->height);
break; break;
case 32: case 32:
add_frame_32(&c->pic, c->decomp_buf, c->linelen, c->height); add_frame_32(picture, c->decomp_buf, c->linelen, c->height);
break; break;
default: default:
add_frame_default(&c->pic, c->decomp_buf, FFALIGN(c->linelen, 4), add_frame_default(picture, c->decomp_buf, FFALIGN(c->linelen, 4),
c->linelen, c->height); c->linelen, c->height);
} }
} }
*picture = c->pic;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
} }
@ -234,7 +227,6 @@ static av_cold int decode_init(AVCodecContext *avctx) {
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
c->bpp = avctx->bits_per_coded_sample; c->bpp = avctx->bits_per_coded_sample;
c->pic.data[0] = NULL;
c->linelen = avctx->width * avctx->bits_per_coded_sample / 8; c->linelen = avctx->width * avctx->bits_per_coded_sample / 8;
c->height = avctx->height; c->height = avctx->height;
stride = c->linelen; stride = c->linelen;
@ -252,8 +244,6 @@ static av_cold int decode_init(AVCodecContext *avctx) {
static av_cold int decode_end(AVCodecContext *avctx) { static av_cold int decode_end(AVCodecContext *avctx) {
CamStudioContext *c = avctx->priv_data; CamStudioContext *c = avctx->priv_data;
av_freep(&c->decomp_buf); av_freep(&c->decomp_buf);
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
return 0; return 0;
} }

View File

@ -40,7 +40,6 @@
typedef struct CyuvDecodeContext { typedef struct CyuvDecodeContext {
AVCodecContext *avctx; AVCodecContext *avctx;
int width, height; int width, height;
AVFrame frame;
} CyuvDecodeContext; } CyuvDecodeContext;
static av_cold int cyuv_decode_init(AVCodecContext *avctx) static av_cold int cyuv_decode_init(AVCodecContext *avctx)
@ -65,6 +64,7 @@ static int cyuv_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
CyuvDecodeContext *s=avctx->priv_data; CyuvDecodeContext *s=avctx->priv_data;
AVFrame *frame = data;
unsigned char *y_plane; unsigned char *y_plane;
unsigned char *u_plane; unsigned char *u_plane;
@ -101,26 +101,21 @@ static int cyuv_decode_frame(AVCodecContext *avctx,
/* pixel data starts 48 bytes in, after 3x16-byte tables */ /* pixel data starts 48 bytes in, after 3x16-byte tables */
stream_ptr = 48; stream_ptr = 48;
if (s->frame.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &s->frame);
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
s->frame.reference = 0;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
y_plane = s->frame.data[0]; y_plane = frame->data[0];
u_plane = s->frame.data[1]; u_plane = frame->data[1];
v_plane = s->frame.data[2]; v_plane = frame->data[2];
/* iterate through each line in the height */ /* iterate through each line in the height */
for (y_ptr = 0, u_ptr = 0, v_ptr = 0; for (y_ptr = 0, u_ptr = 0, v_ptr = 0;
y_ptr < (s->height * s->frame.linesize[0]); y_ptr < (s->height * frame->linesize[0]);
y_ptr += s->frame.linesize[0] - s->width, y_ptr += frame->linesize[0] - s->width,
u_ptr += s->frame.linesize[1] - s->width / 4, u_ptr += frame->linesize[1] - s->width / 4,
v_ptr += s->frame.linesize[2] - s->width / 4) { v_ptr += frame->linesize[2] - s->width / 4) {
/* reset predictors */ /* reset predictors */
cur_byte = buf[stream_ptr++]; cur_byte = buf[stream_ptr++];
@ -164,21 +159,10 @@ static int cyuv_decode_frame(AVCodecContext *avctx,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data= s->frame;
return buf_size; return buf_size;
} }
static av_cold int cyuv_decode_end(AVCodecContext *avctx)
{
CyuvDecodeContext *s = avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
return 0;
}
#if CONFIG_AURA_DECODER #if CONFIG_AURA_DECODER
AVCodec ff_aura_decoder = { AVCodec ff_aura_decoder = {
.name = "aura", .name = "aura",
@ -186,7 +170,6 @@ AVCodec ff_aura_decoder = {
.id = AV_CODEC_ID_AURA, .id = AV_CODEC_ID_AURA,
.priv_data_size = sizeof(CyuvDecodeContext), .priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init, .init = cyuv_decode_init,
.close = cyuv_decode_end,
.decode = cyuv_decode_frame, .decode = cyuv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Auravision AURA"), .long_name = NULL_IF_CONFIG_SMALL("Auravision AURA"),
@ -200,7 +183,6 @@ AVCodec ff_cyuv_decoder = {
.id = AV_CODEC_ID_CYUV, .id = AV_CODEC_ID_CYUV,
.priv_data_size = sizeof(CyuvDecodeContext), .priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init, .init = cyuv_decode_init,
.close = cyuv_decode_end,
.decode = cyuv_decode_frame, .decode = cyuv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Creative YUV (CYUV)"), .long_name = NULL_IF_CONFIG_SMALL("Creative YUV (CYUV)"),

View File

@ -1836,7 +1836,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = 256 * (s->sample_blocks / 8); frame->nb_samples = 256 * (s->sample_blocks / 8);
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -28,8 +28,6 @@
#include "libavutil/mem.h" #include "libavutil/mem.h"
typedef struct DfaContext { typedef struct DfaContext {
AVFrame pic;
uint32_t pal[256]; uint32_t pal[256];
uint8_t *frame_buf; uint8_t *frame_buf;
} DfaContext; } DfaContext;
@ -311,6 +309,7 @@ static int dfa_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
AVFrame *frame = data;
DfaContext *s = avctx->priv_data; DfaContext *s = avctx->priv_data;
GetByteContext gb; GetByteContext gb;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
@ -319,10 +318,7 @@ static int dfa_decode_frame(AVCodecContext *avctx,
int ret; int ret;
int i, pal_elems; int i, pal_elems;
if (s->pic.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0))) {
avctx->release_buffer(avctx, &s->pic);
if ((ret = ff_get_buffer(avctx, &s->pic))) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -340,7 +336,7 @@ static int dfa_decode_frame(AVCodecContext *avctx,
s->pal[i] = bytestream2_get_be24(&gb) << 2; s->pal[i] = bytestream2_get_be24(&gb) << 2;
s->pal[i] |= (s->pal[i] >> 6) & 0x333; s->pal[i] |= (s->pal[i] >> 6) & 0x333;
} }
s->pic.palette_has_changed = 1; frame->palette_has_changed = 1;
} else if (chunk_type <= 9) { } else if (chunk_type <= 9) {
if (decoder[chunk_type - 2](&gb, s->frame_buf, avctx->width, avctx->height)) { if (decoder[chunk_type - 2](&gb, s->frame_buf, avctx->width, avctx->height)) {
av_log(avctx, AV_LOG_ERROR, "Error decoding %s chunk\n", av_log(avctx, AV_LOG_ERROR, "Error decoding %s chunk\n",
@ -355,16 +351,15 @@ static int dfa_decode_frame(AVCodecContext *avctx,
} }
buf = s->frame_buf; buf = s->frame_buf;
dst = s->pic.data[0]; dst = frame->data[0];
for (i = 0; i < avctx->height; i++) { for (i = 0; i < avctx->height; i++) {
memcpy(dst, buf, avctx->width); memcpy(dst, buf, avctx->width);
dst += s->pic.linesize[0]; dst += frame->linesize[0];
buf += avctx->width; buf += avctx->width;
} }
memcpy(s->pic.data[1], s->pal, sizeof(s->pal)); memcpy(frame->data[1], s->pal, sizeof(s->pal));
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->pic;
return avpkt->size; return avpkt->size;
} }
@ -373,9 +368,6 @@ static av_cold int dfa_decode_end(AVCodecContext *avctx)
{ {
DfaContext *s = avctx->priv_data; DfaContext *s = avctx->priv_data;
if (s->pic.data[0])
avctx->release_buffer(avctx, &s->pic);
av_freep(&s->frame_buf); av_freep(&s->frame_buf);
return 0; return 0;

View File

@ -34,7 +34,6 @@
typedef struct DNXHDContext { typedef struct DNXHDContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame picture;
GetBitContext gb; GetBitContext gb;
int cid; ///< compression id int cid; ///< compression id
unsigned int width, height; unsigned int width, height;
@ -63,9 +62,6 @@ static av_cold int dnxhd_decode_init(AVCodecContext *avctx)
DNXHDContext *ctx = avctx->priv_data; DNXHDContext *ctx = avctx->priv_data;
ctx->avctx = avctx; ctx->avctx = avctx;
avctx->coded_frame = &ctx->picture;
ctx->picture.type = AV_PICTURE_TYPE_I;
ctx->picture.key_frame = 1;
return 0; return 0;
} }
@ -100,7 +96,8 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, int cid)
return 0; return 0;
} }
static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_size, int first_field) static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
const uint8_t *buf, int buf_size, int first_field)
{ {
static const uint8_t header_prefix[] = { 0x00, 0x00, 0x02, 0x80, 0x01 }; static const uint8_t header_prefix[] = { 0x00, 0x00, 0x02, 0x80, 0x01 };
int i, cid; int i, cid;
@ -114,8 +111,8 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si
} }
if (buf[5] & 2) { /* interlaced */ if (buf[5] & 2) { /* interlaced */
ctx->cur_field = buf[5] & 1; ctx->cur_field = buf[5] & 1;
ctx->picture.interlaced_frame = 1; frame->interlaced_frame = 1;
ctx->picture.top_field_first = first_field ^ ctx->cur_field; frame->top_field_first = first_field ^ ctx->cur_field;
av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field); av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field);
} }
@ -158,11 +155,11 @@ static int dnxhd_decode_header(DNXHDContext *ctx, const uint8_t *buf, int buf_si
av_dlog(ctx->avctx, "mb width %d, mb height %d\n", ctx->mb_width, ctx->mb_height); av_dlog(ctx->avctx, "mb width %d, mb height %d\n", ctx->mb_width, ctx->mb_height);
if ((ctx->height+15)>>4 == ctx->mb_height && ctx->picture.interlaced_frame) if ((ctx->height+15)>>4 == ctx->mb_height && frame->interlaced_frame)
ctx->height <<= 1; ctx->height <<= 1;
if (ctx->mb_height > 68 || if (ctx->mb_height > 68 ||
(ctx->mb_height<<ctx->picture.interlaced_frame) > (ctx->height+15)>>4) { (ctx->mb_height << frame->interlaced_frame) > (ctx->height+15)>>4) {
av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height);
return -1; return -1;
} }
@ -262,11 +259,11 @@ static void dnxhd_decode_dct_block_10(DNXHDContext *ctx, int16_t *block,
dnxhd_decode_dct_block(ctx, block, n, qscale, 6, 8, 4); dnxhd_decode_dct_block(ctx, block, n, qscale, 6, 8, 4);
} }
static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y) static int dnxhd_decode_macroblock(DNXHDContext *ctx, AVFrame *frame, int x, int y)
{ {
int shift1 = ctx->bit_depth == 10; int shift1 = ctx->bit_depth == 10;
int dct_linesize_luma = ctx->picture.linesize[0]; int dct_linesize_luma = frame->linesize[0];
int dct_linesize_chroma = ctx->picture.linesize[1]; int dct_linesize_chroma = frame->linesize[1];
uint8_t *dest_y, *dest_u, *dest_v; uint8_t *dest_y, *dest_u, *dest_v;
int dct_y_offset, dct_x_offset; int dct_y_offset, dct_x_offset;
int qscale, i; int qscale, i;
@ -279,19 +276,19 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y)
ctx->decode_dct_block(ctx, ctx->blocks[i], i, qscale); ctx->decode_dct_block(ctx, ctx->blocks[i], i, qscale);
} }
if (ctx->picture.interlaced_frame) { if (frame->interlaced_frame) {
dct_linesize_luma <<= 1; dct_linesize_luma <<= 1;
dct_linesize_chroma <<= 1; dct_linesize_chroma <<= 1;
} }
dest_y = ctx->picture.data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1)); dest_y = frame->data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1));
dest_u = ctx->picture.data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1)); dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
dest_v = ctx->picture.data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1)); dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1));
if (ctx->cur_field) { if (ctx->cur_field) {
dest_y += ctx->picture.linesize[0]; dest_y += frame->linesize[0];
dest_u += ctx->picture.linesize[1]; dest_u += frame->linesize[1];
dest_v += ctx->picture.linesize[2]; dest_v += frame->linesize[2];
} }
dct_y_offset = dct_linesize_luma << 3; dct_y_offset = dct_linesize_luma << 3;
@ -312,7 +309,8 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, int x, int y)
return 0; return 0;
} }
static int dnxhd_decode_macroblocks(DNXHDContext *ctx, const uint8_t *buf, int buf_size) static int dnxhd_decode_macroblocks(DNXHDContext *ctx, AVFrame *frame,
const uint8_t *buf, int buf_size)
{ {
int x, y; int x, y;
for (y = 0; y < ctx->mb_height; y++) { for (y = 0; y < ctx->mb_height; y++) {
@ -322,7 +320,7 @@ static int dnxhd_decode_macroblocks(DNXHDContext *ctx, const uint8_t *buf, int b
init_get_bits(&ctx->gb, buf + ctx->mb_scan_index[y], (buf_size - ctx->mb_scan_index[y]) << 3); init_get_bits(&ctx->gb, buf + ctx->mb_scan_index[y], (buf_size - ctx->mb_scan_index[y]) << 3);
for (x = 0; x < ctx->mb_width; x++) { for (x = 0; x < ctx->mb_width; x++) {
//START_TIMER; //START_TIMER;
dnxhd_decode_macroblock(ctx, x, y); dnxhd_decode_macroblock(ctx, frame, x, y);
//STOP_TIMER("decode macroblock"); //STOP_TIMER("decode macroblock");
} }
} }
@ -337,11 +335,12 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
DNXHDContext *ctx = avctx->priv_data; DNXHDContext *ctx = avctx->priv_data;
AVFrame *picture = data; AVFrame *picture = data;
int first_field = 1; int first_field = 1;
int ret;
av_dlog(avctx, "frame size %d\n", buf_size); av_dlog(avctx, "frame size %d\n", buf_size);
decode_coding_unit: decode_coding_unit:
if (dnxhd_decode_header(ctx, buf, buf_size, first_field) < 0) if (dnxhd_decode_header(ctx, picture, buf, buf_size, first_field) < 0)
return -1; return -1;
if ((avctx->width || avctx->height) && if ((avctx->width || avctx->height) &&
@ -356,24 +355,23 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
avcodec_set_dimensions(avctx, ctx->width, ctx->height); avcodec_set_dimensions(avctx, ctx->width, ctx->height);
if (first_field) { if (first_field) {
if (ctx->picture.data[0]) if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
avctx->release_buffer(avctx, &ctx->picture);
if (ff_get_buffer(avctx, &ctx->picture) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
picture->pict_type = AV_PICTURE_TYPE_I;
picture->key_frame = 1;
} }
dnxhd_decode_macroblocks(ctx, buf + 0x280, buf_size - 0x280); dnxhd_decode_macroblocks(ctx, picture, buf + 0x280, buf_size - 0x280);
if (first_field && ctx->picture.interlaced_frame) { if (first_field && picture->interlaced_frame) {
buf += ctx->cid_table->coding_unit_size; buf += ctx->cid_table->coding_unit_size;
buf_size -= ctx->cid_table->coding_unit_size; buf_size -= ctx->cid_table->coding_unit_size;
first_field = 0; first_field = 0;
goto decode_coding_unit; goto decode_coding_unit;
} }
*picture = ctx->picture;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
} }
@ -382,8 +380,6 @@ static av_cold int dnxhd_decode_close(AVCodecContext *avctx)
{ {
DNXHDContext *ctx = avctx->priv_data; DNXHDContext *ctx = avctx->priv_data;
if (ctx->picture.data[0])
avctx->release_buffer(avctx, &ctx->picture);
ff_free_vlc(&ctx->ac_vlc); ff_free_vlc(&ctx->ac_vlc);
ff_free_vlc(&ctx->dc_vlc); ff_free_vlc(&ctx->dc_vlc);
ff_free_vlc(&ctx->run_vlc); ff_free_vlc(&ctx->run_vlc);

View File

@ -208,7 +208,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = out / avctx->channels; frame->nb_samples = out / avctx->channels;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -25,11 +25,6 @@
#include "avcodec.h" #include "avcodec.h"
#include "internal.h" #include "internal.h"
typedef struct DPXContext {
AVFrame picture;
} DPXContext;
static unsigned int read32(const uint8_t **ptr, int is_big) static unsigned int read32(const uint8_t **ptr, int is_big)
{ {
unsigned int temp; unsigned int temp;
@ -58,9 +53,7 @@ static int decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = avpkt->data + avpkt->size; const uint8_t *buf_end = avpkt->data + avpkt->size;
int buf_size = avpkt->size; int buf_size = avpkt->size;
DPXContext *const s = avctx->priv_data; AVFrame *const p = data;
AVFrame *picture = data;
AVFrame *const p = &s->picture;
uint8_t *ptr; uint8_t *ptr;
unsigned int offset; unsigned int offset;
@ -154,13 +147,11 @@ static int decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
if ((ret = av_image_check_size(w, h, 0, avctx)) < 0) if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
return ret; return ret;
if (w != avctx->width || h != avctx->height) if (w != avctx->width || h != avctx->height)
avcodec_set_dimensions(avctx, w, h); avcodec_set_dimensions(avctx, w, h);
if ((ret = ff_get_buffer(avctx, p)) < 0) { if ((ret = ff_get_buffer(avctx, p, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -212,36 +203,15 @@ static int decode_frame(AVCodecContext *avctx,
break; break;
} }
*picture = s->picture;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
} }
static av_cold int decode_init(AVCodecContext *avctx)
{
DPXContext *s = avctx->priv_data;
avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame = &s->picture;
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
DPXContext *s = avctx->priv_data;
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
return 0;
}
AVCodec ff_dpx_decoder = { AVCodec ff_dpx_decoder = {
.name = "dpx", .name = "dpx",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_DPX, .id = AV_CODEC_ID_DPX,
.priv_data_size = sizeof(DPXContext),
.init = decode_init,
.close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("DPX image"), .long_name = NULL_IF_CONFIG_SMALL("DPX image"),
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,

View File

@ -281,10 +281,9 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
break; break;
} }
cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if ((res = ff_reget_buffer(avctx, &cin->frame)) < 0) {
if (avctx->reget_buffer(avctx, &cin->frame)) {
av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n"); av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n");
return -1; return res;
} }
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette)); memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
@ -296,8 +295,10 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_table[CIN_PRE_BMP]); FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_table[CIN_PRE_BMP]);
if ((res = av_frame_ref(data, &cin->frame)) < 0)
return res;
*got_frame = 1; *got_frame = 1;
*(AVFrame *)data = cin->frame;
return buf_size; return buf_size;
} }
@ -307,8 +308,7 @@ static av_cold int cinvideo_decode_end(AVCodecContext *avctx)
CinVideoContext *cin = avctx->priv_data; CinVideoContext *cin = avctx->priv_data;
int i; int i;
if (cin->frame.data[0]) av_frame_unref(&cin->frame);
avctx->release_buffer(avctx, &cin->frame);
for (i = 0; i < 3; ++i) for (i = 0; i < 3; ++i)
av_free(cin->bitmap_table[i]); av_free(cin->bitmap_table[i]);
@ -341,7 +341,7 @@ static int cinaudio_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = avpkt->size - cin->initial_decode_frame; frame->nb_samples = avpkt->size - cin->initial_decode_frame;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -326,16 +326,12 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
return -1; /* NOTE: we only accept several full frames */ return -1; /* NOTE: we only accept several full frames */
} }
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
s->picture.reference = 0;
s->picture.key_frame = 1; s->picture.key_frame = 1;
s->picture.pict_type = AV_PICTURE_TYPE_I; s->picture.pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt; avctx->pix_fmt = s->sys->pix_fmt;
avctx->time_base = s->sys->time_base; avctx->time_base = s->sys->time_base;
avcodec_set_dimensions(avctx, s->sys->width, s->sys->height); avcodec_set_dimensions(avctx, s->sys->width, s->sys->height);
if (ff_get_buffer(avctx, &s->picture) < 0) { if (ff_get_buffer(avctx, &s->picture, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
@ -350,7 +346,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
/* return image */ /* return image */
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->picture; av_frame_move_ref(data, &s->picture);
/* Determine the codec's sample_aspect ratio from the packet */ /* Determine the codec's sample_aspect ratio from the packet */
vsc_pack = buf + 80*5 + 48 + 5; vsc_pack = buf + 80*5 + 48 + 5;
@ -367,8 +363,7 @@ static int dvvideo_close(AVCodecContext *c)
{ {
DVVideoContext *s = c->priv_data; DVVideoContext *s = c->priv_data;
if (s->picture.data[0]) av_frame_unref(&s->picture);
c->release_buffer(c, &s->picture);
return 0; return 0;
} }

View File

@ -38,7 +38,7 @@
* Decoder context * Decoder context
*/ */
typedef struct DxaDecContext { typedef struct DxaDecContext {
AVFrame pic, prev; AVFrame prev;
int dsize; int dsize;
uint8_t *decomp_buf; uint8_t *decomp_buf;
@ -48,12 +48,12 @@ typedef struct DxaDecContext {
static const int shift1[6] = { 0, 8, 8, 8, 4, 4 }; static const int shift1[6] = { 0, 8, 8, 8, 4, 4 };
static const int shift2[6] = { 0, 0, 8, 4, 0, 4 }; static const int shift2[6] = { 0, 0, 8, 4, 0, 4 };
static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint8_t *src, uint8_t *ref) static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst,
int stride, uint8_t *src, uint8_t *ref)
{ {
uint8_t *code, *data, *mv, *msk, *tmp, *tmp2; uint8_t *code, *data, *mv, *msk, *tmp, *tmp2;
int i, j, k; int i, j, k;
int type, x, y, d, d2; int type, x, y, d, d2;
int stride = c->pic.linesize[0];
uint32_t mask; uint32_t mask;
code = src + 12; code = src + 12;
@ -191,6 +191,7 @@ static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{ {
AVFrame *frame = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
DxaDecContext * const c = avctx->priv_data; DxaDecContext * const c = avctx->priv_data;
@ -216,17 +217,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
buf_size -= 768+4; buf_size -= 768+4;
} }
if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) { if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE); memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
c->pic.palette_has_changed = pc; frame->palette_has_changed = pc;
outptr = c->pic.data[0]; outptr = frame->data[0];
srcptr = c->decomp_buf; srcptr = c->decomp_buf;
tmpptr = c->prev.data[0]; tmpptr = c->prev.data[0];
stride = c->pic.linesize[0]; stride = frame->linesize[0];
if(buf[0]=='N' && buf[1]=='U' && buf[2]=='L' && buf[3]=='L') if(buf[0]=='N' && buf[1]=='U' && buf[2]=='L' && buf[3]=='L')
compr = -1; compr = -1;
@ -240,22 +241,22 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
} }
switch(compr){ switch(compr){
case -1: case -1:
c->pic.key_frame = 0; frame->key_frame = 0;
c->pic.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
if(c->prev.data[0]) if(c->prev.data[0])
memcpy(c->pic.data[0], c->prev.data[0], c->pic.linesize[0] * avctx->height); memcpy(frame->data[0], c->prev.data[0], frame->linesize[0] * avctx->height);
else{ // Should happen only when first frame is 'NULL' else{ // Should happen only when first frame is 'NULL'
memset(c->pic.data[0], 0, c->pic.linesize[0] * avctx->height); memset(frame->data[0], 0, frame->linesize[0] * avctx->height);
c->pic.key_frame = 1; frame->key_frame = 1;
c->pic.pict_type = AV_PICTURE_TYPE_I; frame->pict_type = AV_PICTURE_TYPE_I;
} }
break; break;
case 2: case 2:
case 3: case 3:
case 4: case 4:
case 5: case 5:
c->pic.key_frame = !(compr & 1); frame->key_frame = !(compr & 1);
c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I; frame->pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
for(j = 0; j < avctx->height; j++){ for(j = 0; j < avctx->height; j++){
if(compr & 1){ if(compr & 1){
for(i = 0; i < avctx->width; i++) for(i = 0; i < avctx->width; i++)
@ -269,21 +270,20 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
break; break;
case 12: // ScummVM coding case 12: // ScummVM coding
case 13: case 13:
c->pic.key_frame = 0; frame->key_frame = 0;
c->pic.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
decode_13(avctx, c, c->pic.data[0], srcptr, c->prev.data[0]); decode_13(avctx, c, frame->data[0], frame->linesize[0], srcptr, c->prev.data[0]);
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", buf[4]); av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", buf[4]);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
FFSWAP(AVFrame, c->pic, c->prev); av_frame_unref(&c->prev);
if(c->pic.data[0]) if ((ret = av_frame_ref(&c->prev, frame)) < 0)
avctx->release_buffer(avctx, &c->pic); return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = c->prev;
/* always report that the buffer was completely consumed */ /* always report that the buffer was completely consumed */
return orig_buf_size; return orig_buf_size;
@ -309,10 +309,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
DxaDecContext * const c = avctx->priv_data; DxaDecContext * const c = avctx->priv_data;
av_freep(&c->decomp_buf); av_freep(&c->decomp_buf);
if(c->prev.data[0]) av_frame_unref(&c->prev);
avctx->release_buffer(avctx, &c->prev);
if(c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
return 0; return 0;
} }

View File

@ -28,9 +28,6 @@
static av_cold int decode_init(AVCodecContext *avctx) static av_cold int decode_init(AVCodecContext *avctx)
{ {
avctx->pix_fmt = AV_PIX_FMT_YUV420P; avctx->pix_fmt = AV_PIX_FMT_YUV420P;
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
@ -39,21 +36,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
int h, w; int h, w;
AVFrame *pic = avctx->coded_frame; AVFrame *pic = data;
const uint8_t *src = avpkt->data; const uint8_t *src = avpkt->data;
uint8_t *Y1, *Y2, *U, *V; uint8_t *Y1, *Y2, *U, *V;
int ret; int ret;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
if (avpkt->size < avctx->width * avctx->height * 3 / 2 + 16) { if (avpkt->size < avctx->width * avctx->height * 3 / 2 + 16) {
av_log(avctx, AV_LOG_ERROR, "packet too small\n"); av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
pic->reference = 0; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
if ((ret = ff_get_buffer(avctx, pic)) < 0)
return ret; return ret;
pic->pict_type = AV_PICTURE_TYPE_I; pic->pict_type = AV_PICTURE_TYPE_I;
@ -84,28 +77,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = *pic;
return avpkt->size; return avpkt->size;
} }
static av_cold int decode_close(AVCodecContext *avctx)
{
AVFrame *pic = avctx->coded_frame;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
av_freep(&avctx->coded_frame);
return 0;
}
AVCodec ff_dxtory_decoder = { AVCodec ff_dxtory_decoder = {
.name = "dxtory", .name = "dxtory",
.long_name = NULL_IF_CONFIG_SMALL("Dxtory"), .long_name = NULL_IF_CONFIG_SMALL("Dxtory"),
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_DXTORY, .id = AV_CODEC_ID_DXTORY,
.init = decode_init, .init = decode_init,
.close = decode_close,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
}; };

View File

@ -69,15 +69,15 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context
ff_dxva2_get_surface_index(ctx, r), ff_dxva2_get_surface_index(ctx, r),
r->long_ref != 0); r->long_ref != 0);
if ((r->f.reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX) if ((r->reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX)
pp->FieldOrderCntList[i][0] = r->field_poc[0]; pp->FieldOrderCntList[i][0] = r->field_poc[0];
if ((r->f.reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX) if ((r->reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX)
pp->FieldOrderCntList[i][1] = r->field_poc[1]; pp->FieldOrderCntList[i][1] = r->field_poc[1];
pp->FrameNumList[i] = r->long_ref ? r->pic_id : r->frame_num; pp->FrameNumList[i] = r->long_ref ? r->pic_id : r->frame_num;
if (r->f.reference & PICT_TOP_FIELD) if (r->reference & PICT_TOP_FIELD)
pp->UsedForReferenceFlags |= 1 << (2*i + 0); pp->UsedForReferenceFlags |= 1 << (2*i + 0);
if (r->f.reference & PICT_BOTTOM_FIELD) if (r->reference & PICT_BOTTOM_FIELD)
pp->UsedForReferenceFlags |= 1 << (2*i + 1); pp->UsedForReferenceFlags |= 1 << (2*i + 1);
} else { } else {
pp->RefFrameList[i].bPicEntry = 0xff; pp->RefFrameList[i].bPicEntry = 0xff;
@ -230,7 +230,7 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
unsigned plane; unsigned plane;
fill_picture_entry(&slice->RefPicList[list][i], fill_picture_entry(&slice->RefPicList[list][i],
ff_dxva2_get_surface_index(ctx, r), ff_dxva2_get_surface_index(ctx, r),
r->f.reference == PICT_BOTTOM_FIELD); r->reference == PICT_BOTTOM_FIELD);
for (plane = 0; plane < 3; plane++) { for (plane = 0; plane < 3; plane++) {
int w, o; int w, o;
if (plane == 0 && h->luma_weight_flag[list]) { if (plane == 0 && h->luma_weight_flag[list]) {

View File

@ -36,9 +36,8 @@
typedef struct CmvContext { typedef struct CmvContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; ///< current AVFrame *last_frame; ///< last
AVFrame last_frame; ///< last AVFrame *last2_frame; ///< second-last
AVFrame last2_frame; ///< second-last
int width, height; int width, height;
unsigned int palette[AVPALETTE_COUNT]; unsigned int palette[AVPALETTE_COUNT];
} CmvContext; } CmvContext;
@ -47,16 +46,27 @@ static av_cold int cmv_decode_init(AVCodecContext *avctx){
CmvContext *s = avctx->priv_data; CmvContext *s = avctx->priv_data;
s->avctx = avctx; s->avctx = avctx;
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
s->last_frame = av_frame_alloc();
s->last2_frame = av_frame_alloc();
if (!s->last_frame || !s->last2_frame) {
av_frame_free(&s->last_frame);
av_frame_free(&s->last2_frame);
return AVERROR(ENOMEM);
}
return 0; return 0;
} }
static void cmv_decode_intra(CmvContext * s, const uint8_t *buf, const uint8_t *buf_end){ static void cmv_decode_intra(CmvContext * s, AVFrame *frame,
unsigned char *dst = s->frame.data[0]; const uint8_t *buf, const uint8_t *buf_end)
{
unsigned char *dst = frame->data[0];
int i; int i;
for (i=0; i < s->avctx->height && buf_end - buf >= s->avctx->width; i++) { for (i=0; i < s->avctx->height && buf_end - buf >= s->avctx->width; i++) {
memcpy(dst, buf, s->avctx->width); memcpy(dst, buf, s->avctx->width);
dst += s->frame.linesize[0]; dst += frame->linesize[0];
buf += s->avctx->width; buf += s->avctx->width;
} }
} }
@ -80,7 +90,9 @@ static void cmv_motcomp(unsigned char *dst, int dst_stride,
} }
} }
static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *buf_end){ static void cmv_decode_inter(CmvContext *s, AVFrame *frame, const uint8_t *buf,
const uint8_t *buf_end)
{
const uint8_t *raw = buf + (s->avctx->width*s->avctx->height/16); const uint8_t *raw = buf + (s->avctx->width*s->avctx->height/16);
int x,y,i; int x,y,i;
@ -88,28 +100,28 @@ static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *
for(y=0; y<s->avctx->height/4; y++) for(y=0; y<s->avctx->height/4; y++)
for(x=0; x<s->avctx->width/4 && buf_end - buf > i; x++) { for(x=0; x<s->avctx->width/4 && buf_end - buf > i; x++) {
if (buf[i]==0xFF) { if (buf[i]==0xFF) {
unsigned char *dst = s->frame.data[0] + (y*4)*s->frame.linesize[0] + x*4; unsigned char *dst = frame->data[0] + (y*4)*frame->linesize[0] + x*4;
if (raw+16<buf_end && *raw==0xFF) { /* intra */ if (raw+16<buf_end && *raw==0xFF) { /* intra */
raw++; raw++;
memcpy(dst, raw, 4); memcpy(dst, raw, 4);
memcpy(dst+s->frame.linesize[0], raw+4, 4); memcpy(dst + frame->linesize[0], raw+4, 4);
memcpy(dst+2*s->frame.linesize[0], raw+8, 4); memcpy(dst + 2 * frame->linesize[0], raw+8, 4);
memcpy(dst+3*s->frame.linesize[0], raw+12, 4); memcpy(dst + 3 * frame->linesize[0], raw+12, 4);
raw+=16; raw+=16;
}else if(raw<buf_end) { /* inter using second-last frame as reference */ }else if(raw<buf_end) { /* inter using second-last frame as reference */
int xoffset = (*raw & 0xF) - 7; int xoffset = (*raw & 0xF) - 7;
int yoffset = ((*raw >> 4)) - 7; int yoffset = ((*raw >> 4)) - 7;
if (s->last2_frame.data[0]) if (s->last2_frame->data[0])
cmv_motcomp(s->frame.data[0], s->frame.linesize[0], cmv_motcomp(frame->data[0], frame->linesize[0],
s->last2_frame.data[0], s->last2_frame.linesize[0], s->last2_frame->data[0], s->last2_frame->linesize[0],
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height); x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
raw++; raw++;
} }
}else{ /* inter using last frame as reference */ }else{ /* inter using last frame as reference */
int xoffset = (buf[i] & 0xF) - 7; int xoffset = (buf[i] & 0xF) - 7;
int yoffset = ((buf[i] >> 4)) - 7; int yoffset = ((buf[i] >> 4)) - 7;
cmv_motcomp(s->frame.data[0], s->frame.linesize[0], cmv_motcomp(frame->data[0], frame->linesize[0],
s->last_frame.data[0], s->last_frame.linesize[0], s->last_frame->data[0], s->last_frame->linesize[0],
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height); x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
} }
i++; i++;
@ -154,6 +166,8 @@ static int cmv_decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
CmvContext *s = avctx->priv_data; CmvContext *s = avctx->priv_data;
const uint8_t *buf_end = buf + buf_size; const uint8_t *buf_end = buf + buf_size;
AVFrame *frame = data;
int ret;
if (buf_end - buf < EA_PREAMBLE_SIZE) if (buf_end - buf < EA_PREAMBLE_SIZE)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -166,46 +180,39 @@ static int cmv_decode_frame(AVCodecContext *avctx,
if (av_image_check_size(s->width, s->height, 0, s->avctx)) if (av_image_check_size(s->width, s->height, 0, s->avctx))
return -1; return -1;
/* shuffle */ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
if (s->last2_frame.data[0])
avctx->release_buffer(avctx, &s->last2_frame);
FFSWAP(AVFrame, s->last_frame, s->last2_frame);
FFSWAP(AVFrame, s->frame, s->last_frame);
s->frame.reference = 1;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
if (ff_get_buffer(avctx, &s->frame)<0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); memcpy(frame->data[1], s->palette, AVPALETTE_SIZE);
buf += EA_PREAMBLE_SIZE; buf += EA_PREAMBLE_SIZE;
if ((buf[0]&1)) { // subtype if ((buf[0]&1)) { // subtype
cmv_decode_inter(s, buf+2, buf_end); cmv_decode_inter(s, frame, buf+2, buf_end);
s->frame.key_frame = 0; frame->key_frame = 0;
s->frame.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
}else{ }else{
s->frame.key_frame = 1; frame->key_frame = 1;
s->frame.pict_type = AV_PICTURE_TYPE_I; frame->pict_type = AV_PICTURE_TYPE_I;
cmv_decode_intra(s, buf+2, buf_end); cmv_decode_intra(s, frame, buf+2, buf_end);
} }
av_frame_unref(s->last2_frame);
av_frame_move_ref(s->last2_frame, s->last_frame);
if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
static av_cold int cmv_decode_end(AVCodecContext *avctx){ static av_cold int cmv_decode_end(AVCodecContext *avctx){
CmvContext *s = avctx->priv_data; CmvContext *s = avctx->priv_data;
if (s->frame.data[0])
s->avctx->release_buffer(avctx, &s->frame); av_frame_free(&s->last_frame);
if (s->last_frame.data[0]) av_frame_free(&s->last2_frame);
s->avctx->release_buffer(avctx, &s->last_frame);
if (s->last2_frame.data[0])
s->avctx->release_buffer(avctx, &s->last2_frame);
return 0; return 0;
} }

View File

@ -45,7 +45,6 @@
typedef struct MadContext { typedef struct MadContext {
AVCodecContext *avctx; AVCodecContext *avctx;
DSPContext dsp; DSPContext dsp;
AVFrame frame;
AVFrame last_frame; AVFrame last_frame;
GetBitContext gb; GetBitContext gb;
void *bitstream_buf; void *bitstream_buf;
@ -78,34 +77,36 @@ static inline void comp(unsigned char *dst, int dst_stride,
dst[j*dst_stride + i] = av_clip_uint8(src[j*src_stride + i] + add); dst[j*dst_stride + i] = av_clip_uint8(src[j*src_stride + i] + add);
} }
static inline void comp_block(MadContext *t, int mb_x, int mb_y, static inline void comp_block(MadContext *t, AVFrame *frame,
int mb_x, int mb_y,
int j, int mv_x, int mv_y, int add) int j, int mv_x, int mv_y, int add)
{ {
if (j < 4) { if (j < 4) {
comp(t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3), comp(frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
t->frame.linesize[0], frame->linesize[0],
t->last_frame.data[0] + (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x, t->last_frame.data[0] + (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x,
t->last_frame.linesize[0], add); t->last_frame.linesize[0], add);
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) { } else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
int index = j - 3; int index = j - 3;
comp(t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x * 8, comp(frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x * 8,
t->frame.linesize[index], frame->linesize[index],
t->last_frame.data[index] + (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2), t->last_frame.data[index] + (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2),
t->last_frame.linesize[index], add); t->last_frame.linesize[index], add);
} }
} }
static inline void idct_put(MadContext *t, int16_t *block, int mb_x, int mb_y, int j) static inline void idct_put(MadContext *t, AVFrame *frame, int16_t *block,
int mb_x, int mb_y, int j)
{ {
if (j < 4) { if (j < 4) {
ff_ea_idct_put_c( ff_ea_idct_put_c(
t->frame.data[0] + (mb_y*16 + ((j&2)<<2))*t->frame.linesize[0] + mb_x*16 + ((j&1)<<3), frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
t->frame.linesize[0], block); frame->linesize[0], block);
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) { } else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
int index = j - 3; int index = j - 3;
ff_ea_idct_put_c( ff_ea_idct_put_c(
t->frame.data[index] + (mb_y*8)*t->frame.linesize[index] + mb_x*8, frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x*8,
t->frame.linesize[index], block); frame->linesize[index], block);
} }
} }
@ -179,7 +180,7 @@ static int decode_motion(GetBitContext *gb)
return value; return value;
} }
static void decode_mb(MadContext *s, int inter) static void decode_mb(MadContext *s, AVFrame *frame, int inter)
{ {
int mv_map = 0; int mv_map = 0;
int mv_x, mv_y; int mv_x, mv_y;
@ -199,11 +200,11 @@ static void decode_mb(MadContext *s, int inter)
for (j=0; j<6; j++) { for (j=0; j<6; j++) {
if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map
int add = 2*decode_motion(&s->gb); int add = 2*decode_motion(&s->gb);
comp_block(s, s->mb_x, s->mb_y, j, mv_x, mv_y, add); comp_block(s, frame, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
} else { } else {
s->dsp.clear_block(s->block); s->dsp.clear_block(s->block);
decode_block_intra(s, s->block); decode_block_intra(s, s->block);
idct_put(s, s->block, s->mb_x, s->mb_y, j); idct_put(s, frame, s->block, s->mb_x, s->mb_y, j);
} }
} }
} }
@ -225,9 +226,10 @@ static int decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
MadContext *s = avctx->priv_data; MadContext *s = avctx->priv_data;
AVFrame *frame = data;
int width, height; int width, height;
int chunk_type; int chunk_type;
int inter; int inter, ret;
if (buf_size < 17) { if (buf_size < 17) {
av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n"); av_log(avctx, AV_LOG_ERROR, "Input buffer too small\n");
@ -251,16 +253,12 @@ static int decode_frame(AVCodecContext *avctx,
if (av_image_check_size(width, height, 0, avctx) < 0) if (av_image_check_size(width, height, 0, avctx) < 0)
return -1; return -1;
avcodec_set_dimensions(avctx, width, height); avcodec_set_dimensions(avctx, width, height);
if (s->frame.data[0]) av_frame_unref(&s->last_frame);
avctx->release_buffer(avctx, &s->frame);
} }
s->frame.reference = 1; if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
if (!s->frame.data[0]) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
if (ff_get_buffer(avctx, &s->frame) < 0) { return ret;
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
} }
av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size, av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size,
@ -272,13 +270,15 @@ static int decode_frame(AVCodecContext *avctx,
for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++) for (s->mb_y=0; s->mb_y < (avctx->height+15)/16; s->mb_y++)
for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++) for (s->mb_x=0; s->mb_x < (avctx->width +15)/16; s->mb_x++)
decode_mb(s, inter); decode_mb(s, frame, inter);
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
if (chunk_type != MADe_TAG) if (chunk_type != MADe_TAG) {
FFSWAP(AVFrame, s->frame, s->last_frame); av_frame_unref(&s->last_frame);
if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
return ret;
}
return buf_size; return buf_size;
} }
@ -286,10 +286,7 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
{ {
MadContext *t = avctx->priv_data; MadContext *t = avctx->priv_data;
if (t->frame.data[0]) av_frame_unref(&t->last_frame);
avctx->release_buffer(avctx, &t->frame);
if (t->last_frame.data[0])
avctx->release_buffer(avctx, &t->last_frame);
av_free(t->bitstream_buf); av_free(t->bitstream_buf);
return 0; return 0;
} }

View File

@ -39,7 +39,6 @@
typedef struct TgqContext { typedef struct TgqContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
int width, height; int width, height;
ScanTable scantable; ScanTable scantable;
int qtable[64]; int qtable[64];
@ -105,21 +104,21 @@ static void tgq_decode_block(TgqContext *s, int16_t block[64], GetBitContext *gb
block[0] += 128 << 4; block[0] += 128 << 4;
} }
static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64], static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64], AVFrame *frame,
int mb_x, int mb_y) int mb_x, int mb_y)
{ {
int linesize = s->frame.linesize[0]; int linesize = frame->linesize[0];
uint8_t *dest_y = s->frame.data[0] + (mb_y * 16 * linesize) + mb_x * 16; uint8_t *dest_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8; uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8; uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
ff_ea_idct_put_c(dest_y , linesize, block[0]); ff_ea_idct_put_c(dest_y , linesize, block[0]);
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]); ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
ff_ea_idct_put_c(dest_y + 8 * linesize , linesize, block[2]); ff_ea_idct_put_c(dest_y + 8 * linesize , linesize, block[2]);
ff_ea_idct_put_c(dest_y + 8 * linesize + 8, linesize, block[3]); ff_ea_idct_put_c(dest_y + 8 * linesize + 8, linesize, block[3]);
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
ff_ea_idct_put_c(dest_cb, s->frame.linesize[1], block[4]); ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]);
ff_ea_idct_put_c(dest_cr, s->frame.linesize[2], block[5]); ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]);
} }
} }
@ -132,23 +131,24 @@ static inline void tgq_dconly(TgqContext *s, unsigned char *dst,
memset(dst + j * dst_stride, level, 8); memset(dst + j * dst_stride, level, 8);
} }
static void tgq_idct_put_mb_dconly(TgqContext *s, int mb_x, int mb_y, const int8_t *dc) static void tgq_idct_put_mb_dconly(TgqContext *s, AVFrame *frame,
int mb_x, int mb_y, const int8_t *dc)
{ {
int linesize = s->frame.linesize[0]; int linesize = frame->linesize[0];
uint8_t *dest_y = s->frame.data[0] + (mb_y * 16 * linesize) + mb_x * 16; uint8_t *dest_y = frame->data[0] + (mb_y * 16 * linesize) + mb_x * 16;
uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8; uint8_t *dest_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8; uint8_t *dest_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
tgq_dconly(s, dest_y, linesize, dc[0]); tgq_dconly(s, dest_y, linesize, dc[0]);
tgq_dconly(s, dest_y + 8, linesize, dc[1]); tgq_dconly(s, dest_y + 8, linesize, dc[1]);
tgq_dconly(s, dest_y + 8 * linesize, linesize, dc[2]); tgq_dconly(s, dest_y + 8 * linesize, linesize, dc[2]);
tgq_dconly(s, dest_y + 8 * linesize + 8, linesize, dc[3]); tgq_dconly(s, dest_y + 8 * linesize + 8, linesize, dc[3]);
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
tgq_dconly(s, dest_cb, s->frame.linesize[1], dc[4]); tgq_dconly(s, dest_cb, frame->linesize[1], dc[4]);
tgq_dconly(s, dest_cr, s->frame.linesize[2], dc[5]); tgq_dconly(s, dest_cr, frame->linesize[2], dc[5]);
} }
} }
static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x) static void tgq_decode_mb(TgqContext *s, AVFrame *frame, int mb_y, int mb_x)
{ {
int mode; int mode;
int i; int i;
@ -160,7 +160,7 @@ static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x)
init_get_bits(&gb, s->gb.buffer, FFMIN(s->gb.buffer_end - s->gb.buffer, mode) * 8); init_get_bits(&gb, s->gb.buffer, FFMIN(s->gb.buffer_end - s->gb.buffer, mode) * 8);
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
tgq_decode_block(s, s->block[i], &gb); tgq_decode_block(s, s->block[i], &gb);
tgq_idct_put_mb(s, s->block, mb_x, mb_y); tgq_idct_put_mb(s, s->block, frame, mb_x, mb_y);
bytestream2_skip(&s->gb, mode); bytestream2_skip(&s->gb, mode);
} else { } else {
if (mode == 3) { if (mode == 3) {
@ -177,7 +177,7 @@ static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x)
} else { } else {
av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode); av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode);
} }
tgq_idct_put_mb_dconly(s, mb_x, mb_y, dc); tgq_idct_put_mb_dconly(s, frame, mb_x, mb_y, dc);
} }
} }
@ -199,6 +199,7 @@ static int tgq_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
TgqContext *s = avctx->priv_data; TgqContext *s = avctx->priv_data;
AVFrame *frame = data;
int x, y, ret; int x, y, ret;
int big_endian = AV_RL32(&buf[4]) > 0x000FFFFF; int big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
@ -217,47 +218,32 @@ static int tgq_decode_frame(AVCodecContext *avctx,
if (s->avctx->width!=s->width || s->avctx->height!=s->height) { if (s->avctx->width!=s->width || s->avctx->height!=s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height); avcodec_set_dimensions(s->avctx, s->width, s->height);
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
} }
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb)); tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
bytestream2_skip(&s->gb, 3); bytestream2_skip(&s->gb, 3);
if (!s->frame.data[0]) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
s->frame.key_frame = 1; av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
s->frame.pict_type = AV_PICTURE_TYPE_I; return ret;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
} }
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++) for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++) for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
tgq_decode_mb(s, y, x); tgq_decode_mb(s, frame, y, x);
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return avpkt->size; return avpkt->size;
} }
static av_cold int tgq_decode_end(AVCodecContext *avctx)
{
TgqContext *s = avctx->priv_data;
if (s->frame.data[0])
s->avctx->release_buffer(avctx, &s->frame);
return 0;
}
AVCodec ff_eatgq_decoder = { AVCodec ff_eatgq_decoder = {
.name = "eatgq", .name = "eatgq",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TGQ, .id = AV_CODEC_ID_TGQ,
.priv_data_size = sizeof(TgqContext), .priv_data_size = sizeof(TgqContext),
.init = tgq_decode_init, .init = tgq_decode_init,
.close = tgq_decode_end,
.decode = tgq_decode_frame, .decode = tgq_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGQ video"), .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGQ video"),

View File

@ -31,6 +31,7 @@
#include "avcodec.h" #include "avcodec.h"
#define BITSTREAM_READER_LE #define BITSTREAM_READER_LE
#include "get_bits.h" #include "get_bits.h"
#include "internal.h"
#include "libavutil/imgutils.h" #include "libavutil/imgutils.h"
#include "libavutil/mem.h" #include "libavutil/mem.h"
@ -39,8 +40,8 @@
typedef struct TgvContext { typedef struct TgvContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
AVFrame last_frame; AVFrame last_frame;
uint8_t *frame_buffer;
int width,height; int width,height;
uint32_t palette[AVPALETTE_COUNT]; uint32_t palette[AVPALETTE_COUNT];
@ -138,8 +139,8 @@ static int unpack(const uint8_t *src, const uint8_t *src_end,
* Decode inter-frame * Decode inter-frame
* @return 0 on success, -1 on critical buffer underflow * @return 0 on success, -1 on critical buffer underflow
*/ */
static int tgv_decode_inter(TgvContext *s, const uint8_t *buf, static int tgv_decode_inter(TgvContext *s, AVFrame *frame,
const uint8_t *buf_end) const uint8_t *buf, const uint8_t *buf_end)
{ {
int num_mvs; int num_mvs;
int num_blocks_raw; int num_blocks_raw;
@ -237,22 +238,13 @@ static int tgv_decode_inter(TgvContext *s, const uint8_t *buf,
for (j = 0; j < 4; j++) for (j = 0; j < 4; j++)
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
s->frame.data[0][(y * 4 + j) * s->frame.linesize[0] + (x * 4 + i)] = frame->data[0][(y * 4 + j) * frame->linesize[0] + (x * 4 + i)] =
src[j * src_stride + i]; src[j * src_stride + i];
} }
return 0; return 0;
} }
/** release AVFrame buffers if allocated */
static void cond_release_buffer(AVFrame *pic)
{
if (pic->data[0]) {
av_freep(&pic->data[0]);
av_free(pic->data[1]);
}
}
static int tgv_decode_frame(AVCodecContext *avctx, static int tgv_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
@ -261,6 +253,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
TgvContext *s = avctx->priv_data; TgvContext *s = avctx->priv_data;
const uint8_t *buf_end = buf + buf_size; const uint8_t *buf_end = buf + buf_size;
AVFrame *frame = data;
int chunk_type, ret; int chunk_type, ret;
chunk_type = AV_RL32(&buf[0]); chunk_type = AV_RL32(&buf[0]);
@ -277,8 +270,8 @@ static int tgv_decode_frame(AVCodecContext *avctx,
s->height = AV_RL16(&buf[2]); s->height = AV_RL16(&buf[2]);
if (s->avctx->width != s->width || s->avctx->height != s->height) { if (s->avctx->width != s->width || s->avctx->height != s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height); avcodec_set_dimensions(s->avctx, s->width, s->height);
cond_release_buffer(&s->frame); av_freep(&s->frame_buffer);
cond_release_buffer(&s->last_frame); av_frame_unref(&s->last_frame);
} }
pal_count = AV_RL16(&buf[6]); pal_count = AV_RL16(&buf[6]);
@ -292,46 +285,46 @@ static int tgv_decode_frame(AVCodecContext *avctx,
if ((ret = av_image_check_size(s->width, s->height, 0, avctx)) < 0) if ((ret = av_image_check_size(s->width, s->height, 0, avctx)) < 0)
return ret; return ret;
/* shuffle */ if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
FFSWAP(AVFrame, s->frame, s->last_frame); return ret;
if (!s->frame.data[0]) {
s->frame.reference = 1;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
s->frame.linesize[0] = s->width;
s->frame.data[0] = av_malloc(s->width * s->height); memcpy(frame->data[1], s->palette, AVPALETTE_SIZE);
if (!s->frame.data[0])
return AVERROR(ENOMEM);
s->frame.data[1] = av_malloc(AVPALETTE_SIZE);
if (!s->frame.data[1]) {
av_freep(&s->frame.data[0]);
return AVERROR(ENOMEM);
}
}
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
if (chunk_type == kVGT_TAG) { if (chunk_type == kVGT_TAG) {
s->frame.key_frame = 1; int y;
s->frame.pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1;
if (unpack(buf, buf_end, s->frame.data[0], s->avctx->width, s->avctx->height) < 0) { frame->pict_type = AV_PICTURE_TYPE_I;
if (!s->frame_buffer &&
!(s->frame_buffer = av_malloc(s->width * s->height)))
return AVERROR(ENOMEM);
if (unpack(buf, buf_end, s->frame_buffer, s->avctx->width, s->avctx->height) < 0) {
av_log(avctx, AV_LOG_WARNING, "truncated intra frame\n"); av_log(avctx, AV_LOG_WARNING, "truncated intra frame\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
for (y = 0; y < s->height; y++)
memcpy(frame->data[0] + y * frame->linesize[0],
s->frame_buffer + y * s->width,
s->width);
} else { } else {
if (!s->last_frame.data[0]) { if (!s->last_frame.data[0]) {
av_log(avctx, AV_LOG_WARNING, "inter frame without corresponding intra frame\n"); av_log(avctx, AV_LOG_WARNING, "inter frame without corresponding intra frame\n");
return buf_size; return buf_size;
} }
s->frame.key_frame = 0; frame->key_frame = 0;
s->frame.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
if (tgv_decode_inter(s, buf, buf_end) < 0) { if (tgv_decode_inter(s, frame, buf, buf_end) < 0) {
av_log(avctx, AV_LOG_WARNING, "truncated inter frame\n"); av_log(avctx, AV_LOG_WARNING, "truncated inter frame\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
} }
av_frame_unref(&s->last_frame);
if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
@ -339,8 +332,8 @@ static int tgv_decode_frame(AVCodecContext *avctx,
static av_cold int tgv_decode_end(AVCodecContext *avctx) static av_cold int tgv_decode_end(AVCodecContext *avctx)
{ {
TgvContext *s = avctx->priv_data; TgvContext *s = avctx->priv_data;
cond_release_buffer(&s->frame); av_frame_unref(&s->last_frame);
cond_release_buffer(&s->last_frame); av_freep(&s->frame_buffer);
av_free(s->mv_codebook); av_free(s->mv_codebook);
av_free(s->block_codebook); av_free(s->block_codebook);
return 0; return 0;
@ -355,4 +348,5 @@ AVCodec ff_eatgv_decoder = {
.close = tgv_decode_end, .close = tgv_decode_end,
.decode = tgv_decode_frame, .decode = tgv_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGV video"), .long_name = NULL_IF_CONFIG_SMALL("Electronic Arts TGV video"),
.capabilities = CODEC_CAP_DR1,
}; };

View File

@ -36,7 +36,6 @@
typedef struct TqiContext { typedef struct TqiContext {
MpegEncContext s; MpegEncContext s;
AVFrame frame;
void *bitstream_buf; void *bitstream_buf;
unsigned int bitstream_buf_size; unsigned int bitstream_buf_size;
DECLARE_ALIGNED(16, int16_t, block)[6][64]; DECLARE_ALIGNED(16, int16_t, block)[6][64];
@ -68,21 +67,21 @@ static int tqi_decode_mb(MpegEncContext *s, int16_t (*block)[64])
return 0; return 0;
} }
static inline void tqi_idct_put(TqiContext *t, int16_t (*block)[64]) static inline void tqi_idct_put(TqiContext *t, AVFrame *frame, int16_t (*block)[64])
{ {
MpegEncContext *s = &t->s; MpegEncContext *s = &t->s;
int linesize= t->frame.linesize[0]; int linesize = frame->linesize[0];
uint8_t *dest_y = t->frame.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16; uint8_t *dest_y = frame->data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16;
uint8_t *dest_cb = t->frame.data[1] + (s->mb_y * 8 * t->frame.linesize[1]) + s->mb_x * 8; uint8_t *dest_cb = frame->data[1] + (s->mb_y * 8 * frame->linesize[1]) + s->mb_x * 8;
uint8_t *dest_cr = t->frame.data[2] + (s->mb_y * 8 * t->frame.linesize[2]) + s->mb_x * 8; uint8_t *dest_cr = frame->data[2] + (s->mb_y * 8 * frame->linesize[2]) + s->mb_x * 8;
ff_ea_idct_put_c(dest_y , linesize, block[0]); ff_ea_idct_put_c(dest_y , linesize, block[0]);
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]); ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
ff_ea_idct_put_c(dest_y + 8*linesize , linesize, block[2]); ff_ea_idct_put_c(dest_y + 8*linesize , linesize, block[2]);
ff_ea_idct_put_c(dest_y + 8*linesize + 8, linesize, block[3]); ff_ea_idct_put_c(dest_y + 8*linesize + 8, linesize, block[3]);
if(!(s->avctx->flags&CODEC_FLAG_GRAY)) { if(!(s->avctx->flags&CODEC_FLAG_GRAY)) {
ff_ea_idct_put_c(dest_cb, t->frame.linesize[1], block[4]); ff_ea_idct_put_c(dest_cb, frame->linesize[1], block[4]);
ff_ea_idct_put_c(dest_cr, t->frame.linesize[2], block[5]); ff_ea_idct_put_c(dest_cr, frame->linesize[2], block[5]);
} }
} }
@ -104,21 +103,20 @@ static int tqi_decode_frame(AVCodecContext *avctx,
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
TqiContext *t = avctx->priv_data; TqiContext *t = avctx->priv_data;
MpegEncContext *s = &t->s; MpegEncContext *s = &t->s;
AVFrame *frame = data;
int ret;
s->width = AV_RL16(&buf[0]); s->width = AV_RL16(&buf[0]);
s->height = AV_RL16(&buf[2]); s->height = AV_RL16(&buf[2]);
tqi_calculate_qtable(s, buf[4]); tqi_calculate_qtable(s, buf[4]);
buf += 8; buf += 8;
if (t->frame.data[0])
avctx->release_buffer(avctx, &t->frame);
if (s->avctx->width!=s->width || s->avctx->height!=s->height) if (s->avctx->width!=s->width || s->avctx->height!=s->height)
avcodec_set_dimensions(s->avctx, s->width, s->height); avcodec_set_dimensions(s->avctx, s->width, s->height);
if(ff_get_buffer(avctx, &t->frame) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
av_fast_padded_malloc(&t->bitstream_buf, &t->bitstream_buf_size, av_fast_padded_malloc(&t->bitstream_buf, &t->bitstream_buf_size,
@ -134,19 +132,16 @@ static int tqi_decode_frame(AVCodecContext *avctx,
{ {
if (tqi_decode_mb(s, t->block) < 0) if (tqi_decode_mb(s, t->block) < 0)
break; break;
tqi_idct_put(t, t->block); tqi_idct_put(t, frame, t->block);
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = t->frame;
return buf_size; return buf_size;
} }
static av_cold int tqi_decode_end(AVCodecContext *avctx) static av_cold int tqi_decode_end(AVCodecContext *avctx)
{ {
TqiContext *t = avctx->priv_data; TqiContext *t = avctx->priv_data;
if(t->frame.data[0])
avctx->release_buffer(avctx, &t->frame);
av_free(t->bitstream_buf); av_free(t->bitstream_buf);
return 0; return 0;
} }

View File

@ -143,7 +143,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w,
mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride; mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
error = s->error_status_table[mb_index]; error = s->error_status_table[mb_index];
if (IS_INTER(s->cur_pic->f.mb_type[mb_index])) if (IS_INTER(s->cur_pic->mb_type[mb_index]))
continue; // inter continue; // inter
if (!(error & ER_DC_ERROR)) if (!(error & ER_DC_ERROR))
continue; // dc-ok continue; // dc-ok
@ -152,7 +152,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w,
for (j = b_x + 1; j < w; j++) { for (j = b_x + 1; j < w; j++) {
int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride; int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j]; int error_j = s->error_status_table[mb_index_j];
int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]); int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) { if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[0] = dc[j + b_y * stride]; color[0] = dc[j + b_y * stride];
distance[0] = j - b_x; distance[0] = j - b_x;
@ -164,7 +164,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w,
for (j = b_x - 1; j >= 0; j--) { for (j = b_x - 1; j >= 0; j--) {
int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride; int mb_index_j = (j >> is_luma) + (b_y >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j]; int error_j = s->error_status_table[mb_index_j];
int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]); int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) { if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[1] = dc[j + b_y * stride]; color[1] = dc[j + b_y * stride];
distance[1] = b_x - j; distance[1] = b_x - j;
@ -176,7 +176,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w,
for (j = b_y + 1; j < h; j++) { for (j = b_y + 1; j < h; j++) {
int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride; int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j]; int error_j = s->error_status_table[mb_index_j];
int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]); int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) { if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[2] = dc[b_x + j * stride]; color[2] = dc[b_x + j * stride];
@ -189,7 +189,7 @@ static void guess_dc(ERContext *s, int16_t *dc, int w,
for (j = b_y - 1; j >= 0; j--) { for (j = b_y - 1; j >= 0; j--) {
int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride; int mb_index_j = (b_x >> is_luma) + (j >> is_luma) * s->mb_stride;
int error_j = s->error_status_table[mb_index_j]; int error_j = s->error_status_table[mb_index_j];
int intra_j = IS_INTRA(s->cur_pic->f.mb_type[mb_index_j]); int intra_j = IS_INTRA(s->cur_pic->mb_type[mb_index_j]);
if (intra_j == 0 || !(error_j & ER_DC_ERROR)) { if (intra_j == 0 || !(error_j & ER_DC_ERROR)) {
color[3] = dc[b_x + j * stride]; color[3] = dc[b_x + j * stride];
distance[3] = b_y - j; distance[3] = b_y - j;
@ -229,13 +229,13 @@ static void h_block_filter(ERContext *s, uint8_t *dst, int w,
int y; int y;
int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
int left_intra = IS_INTRA(s->cur_pic->f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]); int left_intra = IS_INTRA(s->cur_pic->mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
int right_intra = IS_INTRA(s->cur_pic->f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]); int right_intra = IS_INTRA(s->cur_pic->mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
int left_damage = left_status & ER_MB_ERROR; int left_damage = left_status & ER_MB_ERROR;
int right_damage = right_status & ER_MB_ERROR; int right_damage = right_status & ER_MB_ERROR;
int offset = b_x * 8 + b_y * stride * 8; int offset = b_x * 8 + b_y * stride * 8;
int16_t *left_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; int16_t *left_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
int16_t *right_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)]; int16_t *right_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
if (!(left_damage || right_damage)) if (!(left_damage || right_damage))
continue; // both undamaged continue; // both undamaged
if ((!left_intra) && (!right_intra) && if ((!left_intra) && (!right_intra) &&
@ -297,14 +297,14 @@ static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h,
int x; int x;
int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]; int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
int top_intra = IS_INTRA(s->cur_pic->f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]); int top_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
int bottom_intra = IS_INTRA(s->cur_pic->f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]); int bottom_intra = IS_INTRA(s->cur_pic->mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
int top_damage = top_status & ER_MB_ERROR; int top_damage = top_status & ER_MB_ERROR;
int bottom_damage = bottom_status & ER_MB_ERROR; int bottom_damage = bottom_status & ER_MB_ERROR;
int offset = b_x * 8 + b_y * stride * 8; int offset = b_x * 8 + b_y * stride * 8;
int16_t *top_mv = s->cur_pic->f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; int16_t *top_mv = s->cur_pic->motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
int16_t *bottom_mv = s->cur_pic->f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x]; int16_t *bottom_mv = s->cur_pic->motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
if (!(top_damage || bottom_damage)) if (!(top_damage || bottom_damage))
continue; // both undamaged continue; // both undamaged
@ -369,7 +369,7 @@ static void guess_mv(ERContext *s)
int f = 0; int f = 0;
int error = s->error_status_table[mb_xy]; int error = s->error_status_table[mb_xy];
if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy])) if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
f = MV_FROZEN; // intra // FIXME check f = MV_FROZEN; // intra // FIXME check
if (!(error & ER_MV_ERROR)) if (!(error & ER_MV_ERROR))
f = MV_FROZEN; // inter with undamaged MV f = MV_FROZEN; // inter with undamaged MV
@ -386,7 +386,7 @@ static void guess_mv(ERContext *s)
const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_xy = mb_x + mb_y * s->mb_stride;
int mv_dir = (s->last_pic && s->last_pic->f.data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD; int mv_dir = (s->last_pic && s->last_pic->f.data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy])) if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
continue; continue;
if (!(s->error_status_table[mb_xy] & ER_MV_ERROR)) if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
continue; continue;
@ -427,7 +427,7 @@ static void guess_mv(ERContext *s)
if (fixed[mb_xy] == MV_FROZEN) if (fixed[mb_xy] == MV_FROZEN)
continue; continue;
assert(!IS_INTRA(s->cur_pic->f.mb_type[mb_xy])); assert(!IS_INTRA(s->cur_pic->mb_type[mb_xy]));
assert(s->last_pic && s->last_pic->f.data[0]); assert(s->last_pic && s->last_pic->f.data[0]);
j = 0; j = 0;
@ -458,38 +458,38 @@ static void guess_mv(ERContext *s)
if (mb_x > 0 && fixed[mb_xy - 1]) { if (mb_x > 0 && fixed[mb_xy - 1]) {
mv_predictor[pred_count][0] = mv_predictor[pred_count][0] =
s->cur_pic->f.motion_val[0][mot_index - mot_step][0]; s->cur_pic->motion_val[0][mot_index - mot_step][0];
mv_predictor[pred_count][1] = mv_predictor[pred_count][1] =
s->cur_pic->f.motion_val[0][mot_index - mot_step][1]; s->cur_pic->motion_val[0][mot_index - mot_step][1];
ref[pred_count] = ref[pred_count] =
s->cur_pic->f.ref_index[0][4 * (mb_xy - 1)]; s->cur_pic->ref_index[0][4 * (mb_xy - 1)];
pred_count++; pred_count++;
} }
if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) { if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) {
mv_predictor[pred_count][0] = mv_predictor[pred_count][0] =
s->cur_pic->f.motion_val[0][mot_index + mot_step][0]; s->cur_pic->motion_val[0][mot_index + mot_step][0];
mv_predictor[pred_count][1] = mv_predictor[pred_count][1] =
s->cur_pic->f.motion_val[0][mot_index + mot_step][1]; s->cur_pic->motion_val[0][mot_index + mot_step][1];
ref[pred_count] = ref[pred_count] =
s->cur_pic->f.ref_index[0][4 * (mb_xy + 1)]; s->cur_pic->ref_index[0][4 * (mb_xy + 1)];
pred_count++; pred_count++;
} }
if (mb_y > 0 && fixed[mb_xy - mb_stride]) { if (mb_y > 0 && fixed[mb_xy - mb_stride]) {
mv_predictor[pred_count][0] = mv_predictor[pred_count][0] =
s->cur_pic->f.motion_val[0][mot_index - mot_stride * mot_step][0]; s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][0];
mv_predictor[pred_count][1] = mv_predictor[pred_count][1] =
s->cur_pic->f.motion_val[0][mot_index - mot_stride * mot_step][1]; s->cur_pic->motion_val[0][mot_index - mot_stride * mot_step][1];
ref[pred_count] = ref[pred_count] =
s->cur_pic->f.ref_index[0][4 * (mb_xy - s->mb_stride)]; s->cur_pic->ref_index[0][4 * (mb_xy - s->mb_stride)];
pred_count++; pred_count++;
} }
if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) { if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) {
mv_predictor[pred_count][0] = mv_predictor[pred_count][0] =
s->cur_pic->f.motion_val[0][mot_index + mot_stride * mot_step][0]; s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][0];
mv_predictor[pred_count][1] = mv_predictor[pred_count][1] =
s->cur_pic->f.motion_val[0][mot_index + mot_stride * mot_step][1]; s->cur_pic->motion_val[0][mot_index + mot_stride * mot_step][1];
ref[pred_count] = ref[pred_count] =
s->cur_pic->f.ref_index[0][4 * (mb_xy + s->mb_stride)]; s->cur_pic->ref_index[0][4 * (mb_xy + s->mb_stride)];
pred_count++; pred_count++;
} }
if (pred_count == 0) if (pred_count == 0)
@ -547,19 +547,19 @@ skip_mean_and_median:
if (s->avctx->codec_id == AV_CODEC_ID_H264) { if (s->avctx->codec_id == AV_CODEC_ID_H264) {
// FIXME // FIXME
} else { } else {
ff_thread_await_progress(&s->last_pic->f, ff_thread_await_progress(&s->last_pic->tf,
mb_y, 0); mb_y, 0);
} }
if (!s->last_pic->f.motion_val[0] || if (!s->last_pic->motion_val[0] ||
!s->last_pic->f.ref_index[0]) !s->last_pic->ref_index[0])
goto skip_last_mv; goto skip_last_mv;
prev_x = s->last_pic->f.motion_val[0][mot_index][0]; prev_x = s->last_pic->motion_val[0][mot_index][0];
prev_y = s->last_pic->f.motion_val[0][mot_index][1]; prev_y = s->last_pic->motion_val[0][mot_index][1];
prev_ref = s->last_pic->f.ref_index[0][4 * mb_xy]; prev_ref = s->last_pic->ref_index[0][4 * mb_xy];
} else { } else {
prev_x = s->cur_pic->f.motion_val[0][mot_index][0]; prev_x = s->cur_pic->motion_val[0][mot_index][0];
prev_y = s->cur_pic->f.motion_val[0][mot_index][1]; prev_y = s->cur_pic->motion_val[0][mot_index][1];
prev_ref = s->cur_pic->f.ref_index[0][4 * mb_xy]; prev_ref = s->cur_pic->ref_index[0][4 * mb_xy];
} }
/* last MV */ /* last MV */
@ -576,9 +576,9 @@ skip_last_mv:
uint8_t *src = s->cur_pic->f.data[0] + uint8_t *src = s->cur_pic->f.data[0] +
mb_x * 16 + mb_y * 16 * linesize[0]; mb_x * 16 + mb_y * 16 * linesize[0];
s->cur_pic->f.motion_val[0][mot_index][0] = s->cur_pic->motion_val[0][mot_index][0] =
s->mv[0][0][0] = mv_predictor[j][0]; s->mv[0][0][0] = mv_predictor[j][0];
s->cur_pic->f.motion_val[0][mot_index][1] = s->cur_pic->motion_val[0][mot_index][1] =
s->mv[0][0][1] = mv_predictor[j][1]; s->mv[0][0][1] = mv_predictor[j][1];
// predictor intra or otherwise not available // predictor intra or otherwise not available
@ -623,8 +623,8 @@ skip_last_mv:
for (i = 0; i < mot_step; i++) for (i = 0; i < mot_step; i++)
for (j = 0; j < mot_step; j++) { for (j = 0; j < mot_step; j++) {
s->cur_pic->f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0]; s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
s->cur_pic->f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1]; s->cur_pic->motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
} }
s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD, s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
@ -706,7 +706,7 @@ static int is_intra_more_likely(ERContext *s)
if (s->avctx->codec_id == AV_CODEC_ID_H264) { if (s->avctx->codec_id == AV_CODEC_ID_H264) {
// FIXME // FIXME
} else { } else {
ff_thread_await_progress(&s->last_pic->f, mb_y, 0); ff_thread_await_progress(&s->last_pic->tf, mb_y, 0);
} }
is_intra_likely += s->dsp->sad[0](NULL, last_mb_ptr, mb_ptr, is_intra_likely += s->dsp->sad[0](NULL, last_mb_ptr, mb_ptr,
linesize[0], 16); linesize[0], 16);
@ -714,7 +714,7 @@ static int is_intra_more_likely(ERContext *s)
last_mb_ptr + linesize[0] * 16, last_mb_ptr + linesize[0] * 16,
linesize[0], 16); linesize[0], 16);
} else { } else {
if (IS_INTRA(s->cur_pic->f.mb_type[mb_xy])) if (IS_INTRA(s->cur_pic->mb_type[mb_xy]))
is_intra_likely++; is_intra_likely++;
else else
is_intra_likely--; is_intra_likely--;
@ -831,13 +831,25 @@ void ff_er_frame_end(ERContext *s)
return; return;
}; };
if (s->cur_pic->f.motion_val[0] == NULL) { if (s->cur_pic->motion_val[0] == NULL) {
av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n"); av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
s->cur_pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t)); s->cur_pic->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
s->cur_pic->motion_val_base[i] = av_mallocz((size + 4) * 2 * sizeof(uint16_t)); s->cur_pic->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t));
s->cur_pic->f.motion_val[i] = s->cur_pic->motion_val_base[i] + 4; if (!s->cur_pic->ref_index_buf[i] || !s->cur_pic->motion_val_buf[i])
break;
s->cur_pic->ref_index[i] = s->cur_pic->ref_index_buf[i]->data;
s->cur_pic->motion_val[i] = (int16_t (*)[2])s->cur_pic->motion_val_buf[i]->data + 4;
}
if (i < 2) {
for (i = 0; i < 2; i++) {
av_buffer_unref(&s->cur_pic->ref_index_buf[i]);
av_buffer_unref(&s->cur_pic->motion_val_buf[i]);
s->cur_pic->ref_index[i] = NULL;
s->cur_pic->motion_val[i] = NULL;
}
return;
} }
s->cur_pic->f.motion_subsample_log2 = 3; s->cur_pic->f.motion_subsample_log2 = 3;
} }
@ -997,9 +1009,9 @@ void ff_er_frame_end(ERContext *s)
continue; continue;
if (is_intra_likely) if (is_intra_likely)
s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_INTRA4x4; s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4;
else else
s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0; s->cur_pic->mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
} }
// change inter to intra blocks if no reference frames are available // change inter to intra blocks if no reference frames are available
@ -1007,15 +1019,15 @@ void ff_er_frame_end(ERContext *s)
!(s->next_pic && s->next_pic->f.data[0])) !(s->next_pic && s->next_pic->f.data[0]))
for (i = 0; i < s->mb_num; i++) { for (i = 0; i < s->mb_num; i++) {
const int mb_xy = s->mb_index2xy[i]; const int mb_xy = s->mb_index2xy[i];
if (!IS_INTRA(s->cur_pic->f.mb_type[mb_xy])) if (!IS_INTRA(s->cur_pic->mb_type[mb_xy]))
s->cur_pic->f.mb_type[mb_xy] = MB_TYPE_INTRA4x4; s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4;
} }
/* handle inter blocks with damaged AC */ /* handle inter blocks with damaged AC */
for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
for (mb_x = 0; mb_x < s->mb_width; mb_x++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_xy = mb_x + mb_y * s->mb_stride;
const int mb_type = s->cur_pic->f.mb_type[mb_xy]; const int mb_type = s->cur_pic->mb_type[mb_xy];
const int dir = !(s->last_pic && s->last_pic->f.data[0]); const int dir = !(s->last_pic && s->last_pic->f.data[0]);
const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD; const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
int mv_type; int mv_type;
@ -1034,13 +1046,13 @@ void ff_er_frame_end(ERContext *s)
int j; int j;
mv_type = MV_TYPE_8X8; mv_type = MV_TYPE_8X8;
for (j = 0; j < 4; j++) { for (j = 0; j < 4; j++) {
s->mv[0][j][0] = s->cur_pic->f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0]; s->mv[0][j][0] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
s->mv[0][j][1] = s->cur_pic->f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1]; s->mv[0][j][1] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
} }
} else { } else {
mv_type = MV_TYPE_16X16; mv_type = MV_TYPE_16X16;
s->mv[0][0][0] = s->cur_pic->f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0]; s->mv[0][0][0] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
s->mv[0][0][1] = s->cur_pic->f.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1]; s->mv[0][0][1] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
} }
s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */, s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */,
@ -1054,7 +1066,7 @@ void ff_er_frame_end(ERContext *s)
for (mb_x = 0; mb_x < s->mb_width; mb_x++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
int xy = mb_x * 2 + mb_y * 2 * s->b8_stride; int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_xy = mb_x + mb_y * s->mb_stride;
const int mb_type = s->cur_pic->f.mb_type[mb_xy]; const int mb_type = s->cur_pic->mb_type[mb_xy];
int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
error = s->error_status_table[mb_xy]; error = s->error_status_table[mb_xy];
@ -1075,12 +1087,12 @@ void ff_er_frame_end(ERContext *s)
int time_pp = s->pp_time; int time_pp = s->pp_time;
int time_pb = s->pb_time; int time_pb = s->pb_time;
ff_thread_await_progress(&s->next_pic->f, mb_y, 0); ff_thread_await_progress(&s->next_pic->tf, mb_y, 0);
s->mv[0][0][0] = s->next_pic->f.motion_val[0][xy][0] * time_pb / time_pp; s->mv[0][0][0] = s->next_pic->motion_val[0][xy][0] * time_pb / time_pp;
s->mv[0][0][1] = s->next_pic->f.motion_val[0][xy][1] * time_pb / time_pp; s->mv[0][0][1] = s->next_pic->motion_val[0][xy][1] * time_pb / time_pp;
s->mv[1][0][0] = s->next_pic->f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp; s->mv[1][0][0] = s->next_pic->motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
s->mv[1][0][1] = s->next_pic->f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp; s->mv[1][0][1] = s->next_pic->motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
} else { } else {
s->mv[0][0][0] = 0; s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0; s->mv[0][0][1] = 0;
@ -1105,7 +1117,7 @@ void ff_er_frame_end(ERContext *s)
int16_t *dc_ptr; int16_t *dc_ptr;
uint8_t *dest_y, *dest_cb, *dest_cr; uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_xy = mb_x + mb_y * s->mb_stride;
const int mb_type = s->cur_pic->f.mb_type[mb_xy]; const int mb_type = s->cur_pic->mb_type[mb_xy];
error = s->error_status_table[mb_xy]; error = s->error_status_table[mb_xy];
@ -1156,7 +1168,7 @@ void ff_er_frame_end(ERContext *s)
for (mb_x = 0; mb_x < s->mb_width; mb_x++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
uint8_t *dest_y, *dest_cb, *dest_cr; uint8_t *dest_y, *dest_cb, *dest_cr;
const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_xy = mb_x + mb_y * s->mb_stride;
const int mb_type = s->cur_pic->f.mb_type[mb_xy]; const int mb_type = s->cur_pic->mb_type[mb_xy];
error = s->error_status_table[mb_xy]; error = s->error_status_table[mb_xy];

View File

@ -78,8 +78,7 @@ static av_cold int escape124_decode_close(AVCodecContext *avctx)
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
av_free(s->codebooks[i].blocks); av_free(s->codebooks[i].blocks);
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
return 0; return 0;
} }
@ -203,6 +202,7 @@ static int escape124_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
Escape124Context *s = avctx->priv_data; Escape124Context *s = avctx->priv_data;
AVFrame *frame = data;
GetBitContext gb; GetBitContext gb;
unsigned frame_flags, frame_size; unsigned frame_flags, frame_size;
@ -214,8 +214,7 @@ static int escape124_decode_frame(AVCodecContext *avctx,
uint16_t* old_frame_data, *new_frame_data; uint16_t* old_frame_data, *new_frame_data;
unsigned old_stride, new_stride; unsigned old_stride, new_stride;
int ret;
AVFrame new_frame = { { 0 } };
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
@ -230,10 +229,14 @@ static int escape124_decode_frame(AVCodecContext *avctx,
// Leave last frame unchanged // Leave last frame unchanged
// FIXME: Is this necessary? I haven't seen it in any real samples // FIXME: Is this necessary? I haven't seen it in any real samples
if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) { if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) {
if (!s->frame.data[0])
return AVERROR_INVALIDDATA;
av_log(NULL, AV_LOG_DEBUG, "Skipping frame\n"); av_log(NULL, AV_LOG_DEBUG, "Skipping frame\n");
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame; if ((ret = av_frame_ref(frame, &s->frame)) < 0)
return ret;
return frame_size; return frame_size;
} }
@ -266,14 +269,13 @@ static int escape124_decode_frame(AVCodecContext *avctx,
} }
} }
new_frame.reference = 3; if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
if (ff_get_buffer(avctx, &new_frame)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
new_frame_data = (uint16_t*)new_frame.data[0]; new_frame_data = (uint16_t*)frame->data[0];
new_stride = new_frame.linesize[0] / 2; new_stride = frame->linesize[0] / 2;
old_frame_data = (uint16_t*)s->frame.data[0]; old_frame_data = (uint16_t*)s->frame.data[0];
old_stride = s->frame.linesize[0] / 2; old_stride = s->frame.linesize[0] / 2;
@ -354,10 +356,10 @@ static int escape124_decode_frame(AVCodecContext *avctx,
"Escape sizes: %i, %i, %i\n", "Escape sizes: %i, %i, %i\n",
frame_size, buf_size, get_bits_count(&gb) / 8); frame_size, buf_size, get_bits_count(&gb) / 8);
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame); if ((ret = av_frame_ref(&s->frame, frame)) < 0)
return ret;
*(AVFrame*)data = s->frame = new_frame;
*got_frame = 1; *got_frame = 1;
return frame_size; return frame_size;

View File

@ -266,10 +266,7 @@ av_cold int ffv1_close(AVCodecContext *avctx)
FFV1Context *s = avctx->priv_data; FFV1Context *s = avctx->priv_data;
int i, j; int i, j;
if (avctx->codec->decode && s->picture.data[0]) av_frame_unref(&s->last_picture);
avctx->release_buffer(avctx, &s->picture);
if (avctx->codec->decode && s->last_picture.data[0])
avctx->release_buffer(avctx, &s->last_picture);
for (j = 0; j < s->slice_count; j++) { for (j = 0; j < s->slice_count; j++) {
FFV1Context *fs = s->slice_context[j]; FFV1Context *fs = s->slice_context[j];

View File

@ -80,6 +80,8 @@ typedef struct FFV1Context {
int flags; int flags;
int picture_number; int picture_number;
AVFrame picture, last_picture; AVFrame picture, last_picture;
AVFrame *cur;
int plane_count; int plane_count;
int ac; // 1 = range coder <-> 0 = golomb rice int ac; // 1 = range coder <-> 0 = golomb rice
int ac_byte_count; // number of bytes used for AC coding int ac_byte_count; // number of bytes used for AC coding

View File

@ -316,16 +316,16 @@ static int decode_slice_header(FFV1Context *f, FFV1Context *fs)
ps = get_symbol(c, state, 0); ps = get_symbol(c, state, 0);
if (ps == 1) { if (ps == 1) {
f->picture.interlaced_frame = 1; f->cur->interlaced_frame = 1;
f->picture.top_field_first = 1; f->cur->top_field_first = 1;
} else if (ps == 2) { } else if (ps == 2) {
f->picture.interlaced_frame = 1; f->cur->interlaced_frame = 1;
f->picture.top_field_first = 0; f->cur->top_field_first = 0;
} else if (ps == 3) { } else if (ps == 3) {
f->picture.interlaced_frame = 0; f->cur->interlaced_frame = 0;
} }
f->picture.sample_aspect_ratio.num = get_symbol(c, state, 0); f->cur->sample_aspect_ratio.num = get_symbol(c, state, 0);
f->picture.sample_aspect_ratio.den = get_symbol(c, state, 0); f->cur->sample_aspect_ratio.den = get_symbol(c, state, 0);
return 0; return 0;
} }
@ -338,7 +338,7 @@ static int decode_slice(AVCodecContext *c, void *arg)
const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & PIX_FMT_PLANAR) const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & PIX_FMT_PLANAR)
? (c->bits_per_raw_sample > 8) + 1 ? (c->bits_per_raw_sample > 8) + 1
: 4; : 4;
AVFrame *const p = &f->picture; AVFrame *const p = f->cur;
if (f->version > 2) { if (f->version > 2) {
if (decode_slice_header(f, fs) < 0) { if (decode_slice_header(f, fs) < 0) {
@ -348,7 +348,7 @@ static int decode_slice(AVCodecContext *c, void *arg)
} }
if ((ret = ffv1_init_slice_state(f, fs)) < 0) if ((ret = ffv1_init_slice_state(f, fs)) < 0)
return ret; return ret;
if (f->picture.key_frame) if (f->cur->key_frame)
ffv1_clear_slice_state(f, fs); ffv1_clear_slice_state(f, fs);
width = fs->slice_width; width = fs->slice_width;
height = fs->slice_height; height = fs->slice_height;
@ -799,16 +799,12 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data,
int buf_size = avpkt->size; int buf_size = avpkt->size;
FFV1Context *f = avctx->priv_data; FFV1Context *f = avctx->priv_data;
RangeCoder *const c = &f->slice_context[0]->c; RangeCoder *const c = &f->slice_context[0]->c;
AVFrame *const p = &f->picture;
int i, ret; int i, ret;
uint8_t keystate = 128; uint8_t keystate = 128;
const uint8_t *buf_p; const uint8_t *buf_p;
AVFrame *const p = data;
AVFrame *picture = data; f->cur = p;
/* release previously stored data */
if (p->data[0])
avctx->release_buffer(avctx, p);
ff_init_range_decoder(c, buf, buf_size); ff_init_range_decoder(c, buf, buf_size);
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
@ -829,8 +825,7 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data,
p->key_frame = 0; p->key_frame = 0;
} }
p->reference = 3; //for error concealment if ((ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF)) < 0) {
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -869,6 +864,8 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data,
ff_init_range_decoder(&fs->c, buf_p, v); ff_init_range_decoder(&fs->c, buf_p, v);
} else } else
fs->c.bytestream_end = (uint8_t *)(buf_p + v); fs->c.bytestream_end = (uint8_t *)(buf_p + v);
fs->cur = p;
} }
avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL,
@ -884,13 +881,13 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data,
for (j = 0; j < 4; j++) { for (j = 0; j < 4; j++) {
int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0; int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0;
int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0; int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
dst[j] = f->picture.data[j] + f->picture.linesize[j] * dst[j] = p->data[j] + p->linesize[j] *
(fs->slice_y >> sv) + (fs->slice_x >> sh); (fs->slice_y >> sv) + (fs->slice_x >> sh);
src[j] = f->last_picture.data[j] + src[j] = f->last_picture.data[j] +
f->last_picture.linesize[j] * f->last_picture.linesize[j] *
(fs->slice_y >> sv) + (fs->slice_x >> sh); (fs->slice_y >> sv) + (fs->slice_x >> sh);
} }
av_image_copy(dst, f->picture.linesize, (const uint8_t **)src, av_image_copy(dst, p->linesize, (const uint8_t **)src,
f->last_picture.linesize, f->last_picture.linesize,
avctx->pix_fmt, fs->slice_width, avctx->pix_fmt, fs->slice_width,
fs->slice_height); fs->slice_height);
@ -899,10 +896,12 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data,
f->picture_number++; f->picture_number++;
*picture = *p; av_frame_unref(&f->last_picture);
*got_frame = 1; if ((ret = av_frame_ref(&f->last_picture, p)) < 0)
return ret;
f->cur = NULL;
FFSWAP(AVFrame, f->picture, f->last_picture); *got_frame = 1;
return buf_size; return buf_size;
} }

View File

@ -529,7 +529,7 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = s->blocksize; frame->nb_samples = s->blocksize;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -41,6 +41,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#include "get_bits.h" #include "get_bits.h"
#include "internal.h"
typedef struct BlockInfo { typedef struct BlockInfo {
uint8_t *pos; uint8_t *pos;
@ -238,7 +239,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
{ {
int buf_size = avpkt->size; int buf_size = avpkt->size;
FlashSVContext *s = avctx->priv_data; FlashSVContext *s = avctx->priv_data;
int h_blocks, v_blocks, h_part, v_part, i, j; int h_blocks, v_blocks, h_part, v_part, i, j, ret;
GetBitContext gb; GetBitContext gb;
/* no supplementary picture */ /* no supplementary picture */
@ -327,13 +328,9 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
s->image_width, s->image_height, s->block_width, s->block_height, s->image_width, s->image_height, s->block_width, s->block_height,
h_blocks, v_blocks, h_part, v_part); h_blocks, v_blocks, h_part, v_part);
s->frame.reference = 3; if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
if (avctx->reget_buffer(avctx, &s->frame) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1; return ret;
} }
/* loop over all block columns */ /* loop over all block columns */
@ -358,8 +355,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
s->diff_height = cur_blk_height; s->diff_height = cur_blk_height;
if (8 * size > get_bits_left(&gb)) { if (8 * size > get_bits_left(&gb)) {
avctx->release_buffer(avctx, &s->frame); av_frame_unref(&s->frame);
s->frame.data[0] = NULL;
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
@ -441,8 +437,10 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height); memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height);
} }
if ((ret = av_frame_ref(data, &s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
if ((get_bits_count(&gb) / 8) != buf_size) if ((get_bits_count(&gb) / 8) != buf_size)
av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n", av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n",
@ -458,8 +456,7 @@ static av_cold int flashsv_decode_end(AVCodecContext *avctx)
FlashSVContext *s = avctx->priv_data; FlashSVContext *s = avctx->priv_data;
inflateEnd(&s->zstream); inflateEnd(&s->zstream);
/* release the frame if needed */ /* release the frame if needed */
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
/* free the tmpblock */ /* free the tmpblock */
av_free(s->tmpblock); av_free(s->tmpblock);

View File

@ -42,6 +42,7 @@
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#include "internal.h"
#include "mathops.h" #include "mathops.h"
#define FLI_256_COLOR 4 #define FLI_256_COLOR 4
@ -167,9 +168,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
bytestream2_init(&g2, buf, buf_size); bytestream2_init(&g2, buf, buf_size);
s->frame.reference = 1; if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -424,8 +423,10 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
s->new_palette = 0; s->new_palette = 0;
} }
if ((ret = av_frame_ref(data, &s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
@ -463,9 +464,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
bytestream2_init(&g2, buf, buf_size); bytestream2_init(&g2, buf, buf_size);
s->frame.reference = 1; if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -685,9 +684,10 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \ av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
"and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2)); "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));
if ((ret = av_frame_ref(data, &s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
@ -733,8 +733,7 @@ static av_cold int flic_decode_end(AVCodecContext *avctx)
{ {
FlicDecodeContext *s = avctx->priv_data; FlicDecodeContext *s = avctx->priv_data;
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
return 0; return 0;
} }

View File

@ -36,6 +36,7 @@
#include "huffman.h" #include "huffman.h"
#include "bytestream.h" #include "bytestream.h"
#include "dsputil.h" #include "dsputil.h"
#include "internal.h"
#define FPS_TAG MKTAG('F', 'P', 'S', 'x') #define FPS_TAG MKTAG('F', 'P', 'S', 'x')
@ -60,7 +61,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
{ {
FrapsContext * const s = avctx->priv_data; FrapsContext * const s = avctx->priv_data;
avctx->coded_frame = &s->frame;
avctx->pix_fmt = AV_PIX_FMT_NONE; /* set in decode_frame */ avctx->pix_fmt = AV_PIX_FMT_NONE; /* set in decode_frame */
s->avctx = avctx; s->avctx = avctx;
@ -161,7 +161,7 @@ static int decode_frame(AVCodecContext *avctx,
pix_fmt = version & 1 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_YUVJ420P; pix_fmt = version & 1 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_YUVJ420P;
if (avctx->pix_fmt != pix_fmt && f->data[0]) { if (avctx->pix_fmt != pix_fmt && f->data[0]) {
avctx->release_buffer(avctx, f); av_frame_unref(f);
} }
avctx->pix_fmt = pix_fmt; avctx->pix_fmt = pix_fmt;
@ -184,11 +184,7 @@ static int decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
f->reference = 1; if ((ret = ff_reget_buffer(avctx, f)) < 0) {
f->buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -225,11 +221,7 @@ static int decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
f->reference = 1; if ((ret = ff_reget_buffer(avctx, f)) < 0) {
f->buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -252,11 +244,7 @@ static int decode_frame(AVCodecContext *avctx,
* Fraps v4 is virtually the same * Fraps v4 is virtually the same
*/ */
planes = 3; planes = 3;
f->reference = 1; if ((ret = ff_reget_buffer(avctx, f)) < 0) {
f->buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -300,11 +288,7 @@ static int decode_frame(AVCodecContext *avctx,
case 5: case 5:
/* Virtually the same as version 4, but is for RGB24 */ /* Virtually the same as version 4, but is for RGB24 */
planes = 3; planes = 3;
f->reference = 1; if ((ret = ff_reget_buffer(avctx, f)) < 0) {
f->buffer_hints = FF_BUFFER_HINTS_VALID |
FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, f)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -350,7 +334,8 @@ static int decode_frame(AVCodecContext *avctx,
break; break;
} }
*frame = *f; if ((ret = av_frame_ref(frame, f)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
@ -366,8 +351,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
{ {
FrapsContext *s = (FrapsContext*)avctx->priv_data; FrapsContext *s = (FrapsContext*)avctx->priv_data;
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
av_freep(&s->tmpbuf); av_freep(&s->tmpbuf);
return 0; return 0;

View File

@ -32,10 +32,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
} }
avctx->pix_fmt = AV_PIX_FMT_UYVY422; avctx->pix_fmt = AV_PIX_FMT_UYVY422;
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
@ -43,13 +39,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
int field, ret; int field, ret;
AVFrame *pic = avctx->coded_frame; AVFrame *pic = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = buf + avpkt->size; const uint8_t *buf_end = buf + avpkt->size;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
if (avpkt->size < avctx->width * 2 * avctx->height + 4 + 2*8) { if (avpkt->size < avctx->width * 2 * avctx->height + 4 + 2*8) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small.\n"); av_log(avctx, AV_LOG_ERROR, "Packet is too small.\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -59,8 +52,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
pic->reference = 0; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) {
if ((ret = ff_get_buffer(avctx, pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -98,27 +90,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = *pic;
return avpkt->size; return avpkt->size;
} }
static av_cold int decode_close(AVCodecContext *avctx)
{
AVFrame *pic = avctx->coded_frame;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
av_freep(&avctx->coded_frame);
return 0;
}
AVCodec ff_frwu_decoder = { AVCodec ff_frwu_decoder = {
.name = "frwu", .name = "frwu",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FRWU, .id = AV_CODEC_ID_FRWU,
.init = decode_init, .init = decode_init,
.close = decode_close,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Forward Uncompressed"), .long_name = NULL_IF_CONFIG_SMALL("Forward Uncompressed"),

View File

@ -94,7 +94,7 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = avpkt->size * 2; frame->nb_samples = avpkt->size * 2;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -1218,7 +1218,7 @@ static int g723_1_decode_frame(AVCodecContext *avctx, void *data,
} }
frame->nb_samples = FRAME_LEN; frame->nb_samples = FRAME_LEN;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -451,7 +451,7 @@ static int g726_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = out_samples; frame->nb_samples = out_samples;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -34,7 +34,6 @@
#define GCE_DISPOSAL_RESTORE 3 #define GCE_DISPOSAL_RESTORE 3
typedef struct GifState { typedef struct GifState {
AVFrame picture;
int screen_width; int screen_width;
int screen_height; int screen_height;
int bits_per_pixel; int bits_per_pixel;
@ -63,7 +62,7 @@ typedef struct GifState {
static const uint8_t gif87a_sig[6] = "GIF87a"; static const uint8_t gif87a_sig[6] = "GIF87a";
static const uint8_t gif89a_sig[6] = "GIF89a"; static const uint8_t gif89a_sig[6] = "GIF89a";
static int gif_read_image(GifState *s) static int gif_read_image(GifState *s, AVFrame *frame)
{ {
int left, top, width, height, bits_per_pixel, code_size, flags; int left, top, width, height, bits_per_pixel, code_size, flags;
int is_interleaved, has_local_palette, y, pass, y1, linesize, n, i; int is_interleaved, has_local_palette, y, pass, y1, linesize, n, i;
@ -112,8 +111,8 @@ static int gif_read_image(GifState *s)
s->bytestream_end - s->bytestream, FF_LZW_GIF); s->bytestream_end - s->bytestream, FF_LZW_GIF);
/* read all the image */ /* read all the image */
linesize = s->picture.linesize[0]; linesize = frame->linesize[0];
ptr1 = s->picture.data[0] + top * linesize + left; ptr1 = frame->data[0] + top * linesize + left;
ptr = ptr1; ptr = ptr1;
pass = 0; pass = 0;
y1 = 0; y1 = 0;
@ -245,7 +244,7 @@ static int gif_read_header1(GifState *s)
return 0; return 0;
} }
static int gif_parse_next_image(GifState *s) static int gif_parse_next_image(GifState *s, AVFrame *frame)
{ {
while (s->bytestream < s->bytestream_end) { while (s->bytestream < s->bytestream_end) {
int code = bytestream_get_byte(&s->bytestream); int code = bytestream_get_byte(&s->bytestream);
@ -255,7 +254,7 @@ static int gif_parse_next_image(GifState *s)
switch (code) { switch (code) {
case ',': case ',':
return gif_read_image(s); return gif_read_image(s, frame);
case '!': case '!':
if ((ret = gif_read_extension(s)) < 0) if ((ret = gif_read_extension(s)) < 0)
return ret; return ret;
@ -276,9 +275,6 @@ static av_cold int gif_decode_init(AVCodecContext *avctx)
s->avctx = avctx; s->avctx = avctx;
avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame= &s->picture;
s->picture.data[0] = NULL;
ff_lzw_decode_open(&s->lzw); ff_lzw_decode_open(&s->lzw);
return 0; return 0;
} }
@ -302,18 +298,15 @@ static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return ret; return ret;
avcodec_set_dimensions(avctx, s->screen_width, s->screen_height); avcodec_set_dimensions(avctx, s->screen_width, s->screen_height);
if (s->picture.data[0]) if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
avctx->release_buffer(avctx, &s->picture);
if ((ret = ff_get_buffer(avctx, &s->picture)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
s->image_palette = (uint32_t *)s->picture.data[1]; s->image_palette = (uint32_t *)picture->data[1];
ret = gif_parse_next_image(s); ret = gif_parse_next_image(s, picture);
if (ret < 0) if (ret < 0)
return ret; return ret;
*picture = s->picture;
*got_frame = 1; *got_frame = 1;
return s->bytestream - buf; return s->bytestream - buf;
} }
@ -323,8 +316,6 @@ static av_cold int gif_decode_close(AVCodecContext *avctx)
GifState *s = avctx->priv_data; GifState *s = avctx->priv_data;
ff_lzw_decode_close(&s->lzw); ff_lzw_decode_close(&s->lzw);
if(s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
return 0; return 0;
} }

View File

@ -69,7 +69,7 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = avctx->frame_size; frame->nb_samples = avctx->frame_size;
if ((res = ff_get_buffer(avctx, frame)) < 0) { if ((res = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res; return res;
} }

View File

@ -214,7 +214,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
s->mv_dir = MV_DIR_FORWARD; s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16; s->mv_type = MV_TYPE_16X16;
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0; s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0; s->mv[0][0][1] = 0;
s->mb_skipped = 1; s->mb_skipped = 1;
@ -322,14 +322,14 @@ static int h261_decode_mb(H261Context *h){
} }
if(s->mb_intra){ if(s->mb_intra){
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA; s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
goto intra; goto intra;
} }
//set motion vectors //set motion vectors
s->mv_dir = MV_DIR_FORWARD; s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16; s->mv_type = MV_TYPE_16X16;
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
s->mv[0][0][1] = h->current_mv_y * 2; s->mv[0][0][1] = h->current_mv_y * 2;
@ -624,8 +624,9 @@ retry:
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type); assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type); assert(s->current_picture.f.pict_type == s->pict_type);
*pict = s->current_picture_ptr->f; if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
ff_print_debug_info(s, pict); return ret;
ff_print_debug_info(s, s->current_picture_ptr);
*got_frame = 1; *got_frame = 1;

View File

@ -51,7 +51,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
const int wrap = s->b8_stride; const int wrap = s->b8_stride;
const int xy = s->block_index[0]; const int xy = s->block_index[0];
s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped; s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
if(s->mv_type != MV_TYPE_8X8){ if(s->mv_type != MV_TYPE_8X8){
int motion_x, motion_y; int motion_x, motion_y;
@ -70,30 +70,30 @@ void ff_h263_update_motion_val(MpegEncContext * s){
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0]; s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1]; s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
} }
s->current_picture.f.ref_index[0][4*mb_xy ] = s->current_picture.ref_index[0][4*mb_xy ] =
s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0]; s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
s->current_picture.f.ref_index[0][4*mb_xy + 2] = s->current_picture.ref_index[0][4*mb_xy + 2] =
s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1]; s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
} }
/* no update if 8X8 because it has been done during parsing */ /* no update if 8X8 because it has been done during parsing */
s->current_picture.f.motion_val[0][xy][0] = motion_x; s->current_picture.motion_val[0][xy][0] = motion_x;
s->current_picture.f.motion_val[0][xy][1] = motion_y; s->current_picture.motion_val[0][xy][1] = motion_y;
s->current_picture.f.motion_val[0][xy + 1][0] = motion_x; s->current_picture.motion_val[0][xy + 1][0] = motion_x;
s->current_picture.f.motion_val[0][xy + 1][1] = motion_y; s->current_picture.motion_val[0][xy + 1][1] = motion_y;
s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x; s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y; s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x; s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y; s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
} }
if(s->encoding){ //FIXME encoding MUST be cleaned up if(s->encoding){ //FIXME encoding MUST be cleaned up
if (s->mv_type == MV_TYPE_8X8) if (s->mv_type == MV_TYPE_8X8)
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8; s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
else if(s->mb_intra) else if(s->mb_intra)
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA; s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
else else
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16; s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
} }
} }
@ -153,7 +153,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
Diag Top Diag Top
Left Center Left Center
*/ */
if (!IS_SKIP(s->current_picture.f.mb_type[xy])) { if (!IS_SKIP(s->current_picture.mb_type[xy])) {
qp_c= s->qscale; qp_c= s->qscale;
s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c); s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c); s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
@ -163,10 +163,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
if(s->mb_y){ if(s->mb_y){
int qp_dt, qp_tt, qp_tc; int qp_dt, qp_tt, qp_tc;
if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride])) if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
qp_tt=0; qp_tt=0;
else else
qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride]; qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
if(qp_c) if(qp_c)
qp_tc= qp_c; qp_tc= qp_c;
@ -186,10 +186,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt); s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
if(s->mb_x){ if(s->mb_x){
if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride])) if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
qp_dt= qp_tt; qp_dt= qp_tt;
else else
qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride]; qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
if(qp_dt){ if(qp_dt){
const int chroma_qp= s->chroma_qscale_table[qp_dt]; const int chroma_qp= s->chroma_qscale_table[qp_dt];
@ -208,10 +208,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
if(s->mb_x){ if(s->mb_x){
int qp_lc; int qp_lc;
if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1])) if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
qp_lc= qp_c; qp_lc= qp_c;
else else
qp_lc = s->current_picture.f.qscale_table[xy - 1]; qp_lc = s->current_picture.qscale_table[xy - 1];
if(qp_lc){ if(qp_lc){
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc); s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
@ -320,7 +320,7 @@ int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
static const int off[4]= {2, 1, 1, -1}; static const int off[4]= {2, 1, 1, -1};
wrap = s->b8_stride; wrap = s->b8_stride;
mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block]; mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
A = mot_val[ - 1]; A = mot_val[ - 1];
/* special case for first (slice) line */ /* special case for first (slice) line */

View File

@ -359,7 +359,8 @@ uint64_t time= rdtsc();
if (buf_size == 0) { if (buf_size == 0) {
/* special case for last picture */ /* special case for last picture */
if (s->low_delay==0 && s->next_picture_ptr) { if (s->low_delay==0 && s->next_picture_ptr) {
*pict = s->next_picture_ptr->f; if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
return ret;
s->next_picture_ptr= NULL; s->next_picture_ptr= NULL;
*got_frame = 1; *got_frame = 1;
@ -722,14 +723,17 @@ intrax8_decoded:
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type); assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type); assert(s->current_picture.f.pict_type == s->pict_type);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict = s->current_picture_ptr->f; if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
return ret;
ff_print_debug_info(s, s->current_picture_ptr);
} else if (s->last_picture_ptr != NULL) { } else if (s->last_picture_ptr != NULL) {
*pict = s->last_picture_ptr->f; if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
return ret;
ff_print_debug_info(s, s->last_picture_ptr);
} }
if(s->last_picture_ptr || s->low_delay){ if(s->last_picture_ptr || s->low_delay){
*got_frame = 1; *got_frame = 1;
ff_print_debug_info(s, pict);
} }
#ifdef PRINT_FRAME_TIME #ifdef PRINT_FRAME_TIME

View File

@ -113,7 +113,7 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
* practice then correct remapping should be added. */ * practice then correct remapping should be added. */
if (ref >= h->ref_count[0]) if (ref >= h->ref_count[0])
ref = 0; ref = 0;
fill_rectangle(&h->cur_pic.f.ref_index[0][4 * h->mb_xy], fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
2, 2, 2, ref, 1); 2, 2, 2, ref, 1);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1); fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8, fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
@ -166,28 +166,25 @@ void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
} }
} }
static void free_frame_buffer(H264Context *h, Picture *pic) static void unref_picture(H264Context *h, Picture *pic)
{
ff_thread_release_buffer(h->avctx, &pic->f);
av_freep(&pic->f.hwaccel_picture_private);
}
static void free_picture(H264Context *h, Picture *pic)
{ {
int off = offsetof(Picture, tf) + sizeof(pic->tf);
int i; int i;
if (pic->f.data[0]) if (!pic->f.data[0])
free_frame_buffer(h, pic); return;
av_freep(&pic->qscale_table_base); ff_thread_release_buffer(h->avctx, &pic->tf);
pic->f.qscale_table = NULL; av_buffer_unref(&pic->hwaccel_priv_buf);
av_freep(&pic->mb_type_base);
pic->f.mb_type = NULL; av_buffer_unref(&pic->qscale_table_buf);
av_buffer_unref(&pic->mb_type_buf);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
av_freep(&pic->motion_val_base[i]); av_buffer_unref(&pic->motion_val_buf[i]);
av_freep(&pic->f.ref_index[i]); av_buffer_unref(&pic->ref_index_buf[i]);
pic->f.motion_val[i] = NULL;
} }
memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
} }
static void release_unused_pictures(H264Context *h, int remove_current) static void release_unused_pictures(H264Context *h, int remove_current)
@ -195,15 +192,74 @@ static void release_unused_pictures(H264Context *h, int remove_current)
int i; int i;
/* release non reference frames */ /* release non reference frames */
for (i = 0; i < h->picture_count; i++) { for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (h->DPB[i].f.data[0] && !h->DPB[i].f.reference && if (h->DPB[i].f.data[0] && !h->DPB[i].reference &&
(!h->DPB[i].owner2 || h->DPB[i].owner2 == h) &&
(remove_current || &h->DPB[i] != h->cur_pic_ptr)) { (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
free_frame_buffer(h, &h->DPB[i]); unref_picture(h, &h->DPB[i]);
} }
} }
} }
static int ref_picture(H264Context *h, Picture *dst, Picture *src)
{
int ret, i;
av_assert0(!dst->f.buf[0]);
av_assert0(src->f.buf[0]);
src->tf.f = &src->f;
dst->tf.f = &dst->f;
ret = ff_thread_ref_frame(&dst->tf, &src->tf);
if (ret < 0)
goto fail;
dst->qscale_table_buf = av_buffer_ref(src->qscale_table_buf);
dst->mb_type_buf = av_buffer_ref(src->mb_type_buf);
if (!dst->qscale_table_buf || !dst->mb_type_buf)
goto fail;
dst->qscale_table = src->qscale_table;
dst->mb_type = src->mb_type;
for (i = 0; i < 2; i ++) {
dst->motion_val_buf[i] = av_buffer_ref(src->motion_val_buf[i]);
dst->ref_index_buf[i] = av_buffer_ref(src->ref_index_buf[i]);
if (!dst->motion_val_buf[i] || !dst->ref_index_buf[i])
goto fail;
dst->motion_val[i] = src->motion_val[i];
dst->ref_index[i] = src->ref_index[i];
}
if (src->hwaccel_picture_private) {
dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
if (!dst->hwaccel_priv_buf)
goto fail;
dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
}
for (i = 0; i < 2; i++)
dst->field_poc[i] = src->field_poc[i];
memcpy(dst->ref_poc, src->ref_poc, sizeof(src->ref_poc));
memcpy(dst->ref_count, src->ref_count, sizeof(src->ref_count));
dst->poc = src->poc;
dst->frame_num = src->frame_num;
dst->mmco_reset = src->mmco_reset;
dst->pic_id = src->pic_id;
dst->long_ref = src->long_ref;
dst->mbaff = src->mbaff;
dst->field_picture = src->field_picture;
dst->needs_realloc = src->needs_realloc;
dst->reference = src->reference;
return 0;
fail:
unref_picture(h, dst);
return ret;
}
static int alloc_scratch_buffers(H264Context *h, int linesize) static int alloc_scratch_buffers(H264Context *h, int linesize)
{ {
int alloc_size = FFALIGN(FFABS(linesize) + 32, 32); int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
@ -229,60 +285,86 @@ static int alloc_scratch_buffers(H264Context *h, int linesize)
return 0; return 0;
} }
static int alloc_picture(H264Context *h, Picture *pic) static int init_table_pools(H264Context *h)
{ {
const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1; const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
const int mb_array_size = h->mb_stride * h->mb_height; const int mb_array_size = h->mb_stride * h->mb_height;
const int b4_stride = h->mb_width * 4 + 1; const int b4_stride = h->mb_width * 4 + 1;
const int b4_array_size = b4_stride * h->mb_height * 4; const int b4_array_size = b4_stride * h->mb_height * 4;
h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
av_buffer_allocz);
h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
sizeof(uint32_t), av_buffer_allocz);
h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
sizeof(int16_t), av_buffer_allocz);
h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
!h->ref_index_pool) {
av_buffer_pool_uninit(&h->qscale_table_pool);
av_buffer_pool_uninit(&h->mb_type_pool);
av_buffer_pool_uninit(&h->motion_val_pool);
av_buffer_pool_uninit(&h->ref_index_pool);
return AVERROR(ENOMEM);
}
return 0;
}
static int alloc_picture(H264Context *h, Picture *pic)
{
int i, ret = 0; int i, ret = 0;
av_assert0(!pic->f.data[0]); av_assert0(!pic->f.data[0]);
if (h->avctx->hwaccel) { if (h->avctx->hwaccel) {
const AVHWAccel *hwaccel = h->avctx->hwaccel; const AVHWAccel *hwaccel = h->avctx->hwaccel;
av_assert0(!pic->f.hwaccel_picture_private); av_assert0(!pic->hwaccel_picture_private);
if (hwaccel->priv_data_size) { if (hwaccel->priv_data_size) {
pic->f.hwaccel_picture_private = av_mallocz(hwaccel->priv_data_size); pic->hwaccel_priv_buf = av_buffer_allocz(hwaccel->priv_data_size);
if (!pic->f.hwaccel_picture_private) if (!pic->hwaccel_priv_buf)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
} }
} }
ret = ff_thread_get_buffer(h->avctx, &pic->f); pic->tf.f = &pic->f;
ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
AV_GET_BUFFER_FLAG_REF : 0);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
h->linesize = pic->f.linesize[0]; h->linesize = pic->f.linesize[0];
h->uvlinesize = pic->f.linesize[1]; h->uvlinesize = pic->f.linesize[1];
if (pic->f.qscale_table == NULL) { if (!h->qscale_table_pool) {
FF_ALLOCZ_OR_GOTO(h->avctx, pic->qscale_table_base, ret = init_table_pools(h);
(big_mb_num + h->mb_stride) * sizeof(uint8_t), if (ret < 0)
fail) goto fail;
FF_ALLOCZ_OR_GOTO(h->avctx, pic->mb_type_base,
(big_mb_num + h->mb_stride) * sizeof(uint32_t),
fail)
pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
pic->f.qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1;
for (i = 0; i < 2; i++) {
FF_ALLOCZ_OR_GOTO(h->avctx, pic->motion_val_base[i],
2 * (b4_array_size + 4) * sizeof(int16_t),
fail)
pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
FF_ALLOCZ_OR_GOTO(h->avctx, pic->f.ref_index[i],
4 * mb_array_size * sizeof(uint8_t), fail)
}
pic->f.motion_subsample_log2 = 2;
pic->f.qstride = h->mb_stride;
} }
pic->owner2 = h; pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
if (!pic->qscale_table_buf || !pic->mb_type_buf)
goto fail;
pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
for (i = 0; i < 2; i++) {
pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
goto fail;
pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
pic->ref_index[i] = pic->ref_index_buf[i]->data;
}
pic->f.motion_subsample_log2 = 2;
return 0; return 0;
fail: fail:
free_frame_buffer(h, pic); unref_picture(h, pic);
return (ret < 0) ? ret : AVERROR(ENOMEM); return (ret < 0) ? ret : AVERROR(ENOMEM);
} }
@ -290,9 +372,8 @@ static inline int pic_is_unused(H264Context *h, Picture *pic)
{ {
if (pic->f.data[0] == NULL) if (pic->f.data[0] == NULL)
return 1; return 1;
if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF)) if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
if (!pic->owner2 || pic->owner2 == h) return 1;
return 1;
return 0; return 0;
} }
@ -300,17 +381,16 @@ static int find_unused_picture(H264Context *h)
{ {
int i; int i;
for (i = h->picture_range_start; i < h->picture_range_end; i++) { for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (pic_is_unused(h, &h->DPB[i])) if (pic_is_unused(h, &h->DPB[i]))
break; break;
} }
if (i == h->picture_range_end) if (i == MAX_PICTURE_COUNT)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (h->DPB[i].needs_realloc) { if (h->DPB[i].needs_realloc) {
h->DPB[i].needs_realloc = 0; h->DPB[i].needs_realloc = 0;
free_picture(h, &h->DPB[i]); unref_picture(h, &h->DPB[i]);
avcodec_get_frame_defaults(&h->DPB[i].f);
} }
return i; return i;
@ -561,8 +641,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
// Error resilience puts the current picture in the ref list. // Error resilience puts the current picture in the ref list.
// Don't try to wait on these as it will cause a deadlock. // Don't try to wait on these as it will cause a deadlock.
// Fields can wait on each other, though. // Fields can wait on each other, though.
if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque || if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
(ref->f.reference & 3) != h->picture_structure) { (ref->reference & 3) != h->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0); my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
if (refs[0][ref_n] < 0) if (refs[0][ref_n] < 0)
nrefs[0] += 1; nrefs[0] += 1;
@ -574,8 +654,8 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
int ref_n = h->ref_cache[1][scan8[n]]; int ref_n = h->ref_cache[1][scan8[n]];
Picture *ref = &h->ref_list[1][ref_n]; Picture *ref = &h->ref_list[1][ref_n];
if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque || if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
(ref->f.reference & 3) != h->picture_structure) { (ref->reference & 3) != h->picture_structure) {
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1); my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
if (refs[1][ref_n] < 0) if (refs[1][ref_n] < 0)
nrefs[1] += 1; nrefs[1] += 1;
@ -592,7 +672,7 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
static void await_references(H264Context *h) static void await_references(H264Context *h)
{ {
const int mb_xy = h->mb_xy; const int mb_xy = h->mb_xy;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
int refs[2][48]; int refs[2][48];
int nrefs[2] = { 0 }; int nrefs[2] = { 0 };
int ref, list; int ref, list;
@ -664,7 +744,7 @@ static void await_references(H264Context *h)
int row = refs[list][ref]; int row = refs[list][ref];
if (row >= 0) { if (row >= 0) {
Picture *ref_pic = &h->ref_list[list][ref]; Picture *ref_pic = &h->ref_list[list][ref];
int ref_field = ref_pic->f.reference - 1; int ref_field = ref_pic->reference - 1;
int ref_field_picture = ref_pic->field_picture; int ref_field_picture = ref_pic->field_picture;
int pic_height = 16 * h->mb_height >> ref_field_picture; int pic_height = 16 * h->mb_height >> ref_field_picture;
@ -672,24 +752,24 @@ static void await_references(H264Context *h)
nrefs[list]--; nrefs[list]--;
if (!FIELD_PICTURE && ref_field_picture) { // frame referencing two fields if (!FIELD_PICTURE && ref_field_picture) { // frame referencing two fields
ff_thread_await_progress(&ref_pic->f, ff_thread_await_progress(&ref_pic->tf,
FFMIN((row >> 1) - !(row & 1), FFMIN((row >> 1) - !(row & 1),
pic_height - 1), pic_height - 1),
1); 1);
ff_thread_await_progress(&ref_pic->f, ff_thread_await_progress(&ref_pic->tf,
FFMIN((row >> 1), pic_height - 1), FFMIN((row >> 1), pic_height - 1),
0); 0);
} else if (FIELD_PICTURE && !ref_field_picture) { // field referencing one field of a frame } else if (FIELD_PICTURE && !ref_field_picture) { // field referencing one field of a frame
ff_thread_await_progress(&ref_pic->f, ff_thread_await_progress(&ref_pic->tf,
FFMIN(row * 2 + ref_field, FFMIN(row * 2 + ref_field,
pic_height - 1), pic_height - 1),
0); 0);
} else if (FIELD_PICTURE) { } else if (FIELD_PICTURE) {
ff_thread_await_progress(&ref_pic->f, ff_thread_await_progress(&ref_pic->tf,
FFMIN(row, pic_height - 1), FFMIN(row, pic_height - 1),
ref_field); ref_field);
} else { } else {
ff_thread_await_progress(&ref_pic->f, ff_thread_await_progress(&ref_pic->tf,
FFMIN(row, pic_height - 1), FFMIN(row, pic_height - 1),
0); 0);
} }
@ -781,7 +861,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic,
ysh = 3 - (chroma_idc == 2 /* yuv422 */); ysh = 3 - (chroma_idc == 2 /* yuv422 */);
if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) { if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) {
// chroma offset when predicting from a field of opposite parity // chroma offset when predicting from a field of opposite parity
my += 2 * ((h->mb_y & 1) - (pic->f.reference - 1)); my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1); emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
} }
@ -1009,13 +1089,17 @@ static void free_tables(H264Context *h, int free_rbsp)
av_freep(&h->mb2b_xy); av_freep(&h->mb2b_xy);
av_freep(&h->mb2br_xy); av_freep(&h->mb2br_xy);
if (free_rbsp) { av_buffer_pool_uninit(&h->qscale_table_pool);
for (i = 0; i < h->picture_count && !h->avctx->internal->is_copy; i++) av_buffer_pool_uninit(&h->mb_type_pool);
free_picture(h, &h->DPB[i]); av_buffer_pool_uninit(&h->motion_val_pool);
av_buffer_pool_uninit(&h->ref_index_pool);
if (free_rbsp && h->DPB) {
for (i = 0; i < MAX_PICTURE_COUNT; i++)
unref_picture(h, &h->DPB[i]);
av_freep(&h->DPB); av_freep(&h->DPB);
h->picture_count = 0;
} else if (h->DPB) { } else if (h->DPB) {
for (i = 0; i < h->picture_count; i++) for (i = 0; i < MAX_PICTURE_COUNT; i++)
h->DPB[i].needs_realloc = 1; h->DPB[i].needs_realloc = 1;
} }
@ -1164,11 +1248,10 @@ int ff_h264_alloc_tables(H264Context *h)
init_dequant_tables(h); init_dequant_tables(h);
if (!h->DPB) { if (!h->DPB) {
h->picture_count = MAX_PICTURE_COUNT * FFMAX(1, h->avctx->thread_count); h->DPB = av_mallocz_array(MAX_PICTURE_COUNT, sizeof(*h->DPB));
h->DPB = av_mallocz_array(h->picture_count, sizeof(*h->DPB));
if (!h->DPB) if (!h->DPB)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
for (i = 0; i < h->picture_count; i++) for (i = 0; i < MAX_PICTURE_COUNT; i++)
avcodec_get_frame_defaults(&h->DPB[i].f); avcodec_get_frame_defaults(&h->DPB[i].f);
avcodec_get_frame_defaults(&h->cur_pic.f); avcodec_get_frame_defaults(&h->cur_pic.f);
} }
@ -1367,8 +1450,6 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
common_init(h); common_init(h);
h->picture_structure = PICT_FRAME; h->picture_structure = PICT_FRAME;
h->picture_range_start = 0;
h->picture_range_end = MAX_PICTURE_COUNT;
h->slice_context_count = 1; h->slice_context_count = 1;
h->workaround_bugs = avctx->workaround_bugs; h->workaround_bugs = avctx->workaround_bugs;
h->flags = avctx->flags; h->flags = avctx->flags;
@ -1408,6 +1489,8 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
h->low_delay = 0; h->low_delay = 0;
} }
avctx->internal->allocate_progress = 1;
return 0; return 0;
} }
@ -1415,7 +1498,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
#undef REBASE_PICTURE #undef REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \ #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
((pic && pic >= old_ctx->DPB && \ ((pic && pic >= old_ctx->DPB && \
pic < old_ctx->DPB + old_ctx->picture_count) ? \ pic < old_ctx->DPB + MAX_PICTURE_COUNT) ? \
&new_ctx->DPB[pic - old_ctx->DPB] : NULL) &new_ctx->DPB[pic - old_ctx->DPB] : NULL)
static void copy_picture_range(Picture **to, Picture **from, int count, static void copy_picture_range(Picture **to, Picture **from, int count,
@ -1427,7 +1510,7 @@ static void copy_picture_range(Picture **to, Picture **from, int count,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) || assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
IN_RANGE(from[i], old_base->DPB, IN_RANGE(from[i], old_base->DPB,
sizeof(Picture) * old_base->picture_count) || sizeof(Picture) * MAX_PICTURE_COUNT) ||
!from[i])); !from[i]));
to[i] = REBASE_PICTURE(from[i], new_base, old_base); to[i] = REBASE_PICTURE(from[i], new_base, old_base);
} }
@ -1476,7 +1559,7 @@ static int decode_update_thread_context(AVCodecContext *dst,
H264Context *h = dst->priv_data, *h1 = src->priv_data; H264Context *h = dst->priv_data, *h1 = src->priv_data;
int inited = h->context_initialized, err = 0; int inited = h->context_initialized, err = 0;
int context_reinitialized = 0; int context_reinitialized = 0;
int i; int i, ret;
if (dst == src || !h1->context_initialized) if (dst == src || !h1->context_initialized)
return 0; return 0;
@ -1529,12 +1612,16 @@ static int decode_update_thread_context(AVCodecContext *dst,
memset(&h->me, 0, sizeof(h->me)); memset(&h->me, 0, sizeof(h->me));
h->context_initialized = 0; h->context_initialized = 0;
h->picture_range_start += MAX_PICTURE_COUNT; memset(&h->cur_pic, 0, sizeof(h->cur_pic));
h->picture_range_end += MAX_PICTURE_COUNT; avcodec_get_frame_defaults(&h->cur_pic.f);
h->cur_pic.tf.f = &h->cur_pic.f;
h->avctx = dst; h->avctx = dst;
h->DPB = NULL; h->DPB = NULL;
h->cur_pic.f.extended_data = h->cur_pic.f.data; h->qscale_table_pool = NULL;
h->mb_type_pool = NULL;
h->ref_index_pool = NULL;
h->motion_val_pool = NULL;
if (ff_h264_alloc_tables(h) < 0) { if (ff_h264_alloc_tables(h) < 0) {
av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n"); av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
@ -1568,15 +1655,17 @@ static int decode_update_thread_context(AVCodecContext *dst,
h->data_partitioning = h1->data_partitioning; h->data_partitioning = h1->data_partitioning;
h->low_delay = h1->low_delay; h->low_delay = h1->low_delay;
memcpy(h->DPB, h1->DPB, h1->picture_count * sizeof(*h1->DPB)); for (i = 0; i < MAX_PICTURE_COUNT; i++) {
unref_picture(h, &h->DPB[i]);
// reset s->picture[].f.extended_data to s->picture[].f.data if (h1->DPB[i].f.data[0] &&
for (i = 0; i < h->picture_count; i++) (ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
h->DPB[i].f.extended_data = h->DPB[i].f.data; return ret;
}
h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1); h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
h->cur_pic = h1->cur_pic; unref_picture(h, &h->cur_pic);
h->cur_pic.f.extended_data = h->cur_pic.f.data; if ((ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
return ret;
h->workaround_bugs = h1->workaround_bugs; h->workaround_bugs = h1->workaround_bugs;
h->low_delay = h1->low_delay; h->low_delay = h1->low_delay;
@ -1660,7 +1749,7 @@ int ff_h264_frame_start(H264Context *h)
} }
pic = &h->DPB[i]; pic = &h->DPB[i];
pic->f.reference = h->droppable ? 0 : h->picture_structure; pic->reference = h->droppable ? 0 : h->picture_structure;
pic->f.coded_picture_number = h->coded_picture_number++; pic->f.coded_picture_number = h->coded_picture_number++;
pic->field_picture = h->picture_structure != PICT_FRAME; pic->field_picture = h->picture_structure != PICT_FRAME;
/* /*
@ -1675,8 +1764,9 @@ int ff_h264_frame_start(H264Context *h)
return ret; return ret;
h->cur_pic_ptr = pic; h->cur_pic_ptr = pic;
h->cur_pic = *h->cur_pic_ptr; unref_picture(h, &h->cur_pic);
h->cur_pic.f.extended_data = h->cur_pic.f.data; if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
return ret;
ff_er_frame_start(&h->er); ff_er_frame_start(&h->er);
@ -1717,7 +1807,7 @@ int ff_h264_frame_start(H264Context *h)
* get released even with set reference, besides SVQ3 and others do not * get released even with set reference, besides SVQ3 and others do not
* mark frames as reference later "naturally". */ * mark frames as reference later "naturally". */
if (h->avctx->codec_id != AV_CODEC_ID_SVQ3) if (h->avctx->codec_id != AV_CODEC_ID_SVQ3)
h->cur_pic_ptr->f.reference = 0; h->cur_pic_ptr->reference = 0;
h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX; h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
@ -1743,7 +1833,6 @@ static void decode_postinit(H264Context *h, int setup_finished)
int i, pics, out_of_order, out_idx; int i, pics, out_of_order, out_idx;
int invalid = 0, cnt = 0; int invalid = 0, cnt = 0;
h->cur_pic_ptr->f.qscale_type = FF_QSCALE_TYPE_H264;
h->cur_pic_ptr->f.pict_type = h->pict_type; h->cur_pic_ptr->f.pict_type = h->pict_type;
if (h->next_output_pic) if (h->next_output_pic)
@ -1847,8 +1936,8 @@ static void decode_postinit(H264Context *h, int setup_finished)
assert(pics <= MAX_DELAYED_PIC_COUNT); assert(pics <= MAX_DELAYED_PIC_COUNT);
h->delayed_pic[pics++] = cur; h->delayed_pic[pics++] = cur;
if (cur->f.reference == 0) if (cur->reference == 0)
cur->f.reference = DELAYED_PIC_REF; cur->reference = DELAYED_PIC_REF;
/* Frame reordering. This code takes pictures from coding order and sorts /* Frame reordering. This code takes pictures from coding order and sorts
* them by their incremental POC value into display order. It supports POC * them by their incremental POC value into display order. It supports POC
@ -1913,10 +2002,9 @@ static void decode_postinit(H264Context *h, int setup_finished)
} }
if (pics > h->avctx->has_b_frames) { if (pics > h->avctx->has_b_frames) {
out->f.reference &= ~DELAYED_PIC_REF; out->reference &= ~DELAYED_PIC_REF;
// for frame threading, the owner must be the second field's thread or // for frame threading, the owner must be the second field's thread or
// else the first thread can release the picture and reuse it unsafely // else the first thread can release the picture and reuse it unsafely
out->owner2 = h;
for (i = out_idx; h->delayed_pic[i]; i++) for (i = out_idx; h->delayed_pic[i]; i++)
h->delayed_pic[i] = h->delayed_pic[i + 1]; h->delayed_pic[i] = h->delayed_pic[i + 1];
} }
@ -2350,7 +2438,7 @@ static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type,
void ff_h264_hl_decode_mb(H264Context *h) void ff_h264_hl_decode_mb(H264Context *h)
{ {
const int mb_xy = h->mb_xy; const int mb_xy = h->mb_xy;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0; int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0;
if (CHROMA444) { if (CHROMA444) {
@ -2516,7 +2604,7 @@ static void flush_change(H264Context *h)
h->prev_interlaced_frame = 1; h->prev_interlaced_frame = 1;
idr(h); idr(h);
if (h->cur_pic_ptr) if (h->cur_pic_ptr)
h->cur_pic_ptr->f.reference = 0; h->cur_pic_ptr->reference = 0;
h->first_field = 0; h->first_field = 0;
memset(h->ref_list[0], 0, sizeof(h->ref_list[0])); memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
memset(h->ref_list[1], 0, sizeof(h->ref_list[1])); memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
@ -2533,17 +2621,16 @@ static void flush_dpb(AVCodecContext *avctx)
for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) { for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
if (h->delayed_pic[i]) if (h->delayed_pic[i])
h->delayed_pic[i]->f.reference = 0; h->delayed_pic[i]->reference = 0;
h->delayed_pic[i] = NULL; h->delayed_pic[i] = NULL;
} }
flush_change(h); flush_change(h);
for (i = 0; i < h->picture_count; i++) { for (i = 0; i < MAX_PICTURE_COUNT; i++)
if (h->DPB[i].f.data[0]) unref_picture(h, &h->DPB[i]);
free_frame_buffer(h, &h->DPB[i]);
}
h->cur_pic_ptr = NULL; h->cur_pic_ptr = NULL;
unref_picture(h, &h->cur_pic);
h->mb_x = h->mb_y = 0; h->mb_x = h->mb_y = 0;
@ -2676,7 +2763,7 @@ static int field_end(H264Context *h, int in_setup)
h->mb_y = 0; h->mb_y = 0;
if (!in_setup && !h->droppable) if (!in_setup && !h->droppable)
ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD); h->picture_structure == PICT_BOTTOM_FIELD);
if (CONFIG_H264_VDPAU_DECODER && if (CONFIG_H264_VDPAU_DECODER &&
@ -3019,9 +3106,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h0->current_slice = 0; h0->current_slice = 0;
if (!h0->first_field) { if (!h0->first_field) {
if (h->cur_pic_ptr && !h->droppable && if (h->cur_pic_ptr && !h->droppable) {
h->cur_pic_ptr->owner2 == h) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD); h->picture_structure == PICT_BOTTOM_FIELD);
} }
h->cur_pic_ptr = NULL; h->cur_pic_ptr = NULL;
@ -3240,20 +3326,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
if (h0->first_field) { if (h0->first_field) {
assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.data[0]); assert(h0->cur_pic_ptr->f.data[0]);
assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* Mark old field/frame as completed */
if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) {
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
last_pic_structure == PICT_BOTTOM_FIELD);
}
/* figure out if we have a complementary field pair */ /* figure out if we have a complementary field pair */
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) { if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
/* Previous field is unmatched. Don't display it, but let it /* Previous field is unmatched. Don't display it, but let it
* remain for reference if marked as such. */ * remain for reference if marked as such. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX, ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD); last_pic_structure == PICT_TOP_FIELD);
} }
} else { } else {
@ -3263,7 +3343,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
* pair. Throw away previous field except for reference * pair. Throw away previous field except for reference
* purposes. */ * purposes. */
if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX, ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
last_pic_structure == PICT_TOP_FIELD); last_pic_structure == PICT_TOP_FIELD);
} }
} else { } else {
@ -3286,14 +3366,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h->droppable = last_pic_droppable; h->droppable = last_pic_droppable;
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
/* Take ownership of this buffer. Note that if another thread owned
* the first field of this buffer, we're not operating on that pointer,
* so the original thread is still responsible for reporting progress
* on that first field (or if that was us, we just did that above).
* By taking ownership, we assign responsibility to ourselves to
* report progress on the second field. */
h0->cur_pic_ptr->owner2 = h0;
} }
} }
} }
@ -3308,8 +3380,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h->prev_frame_num++; h->prev_frame_num++;
h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
h->cur_pic_ptr->frame_num = h->prev_frame_num; h->cur_pic_ptr->frame_num = h->prev_frame_num;
ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 1); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 && if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
h->avctx->err_recognition & AV_EF_EXPLODE) h->avctx->err_recognition & AV_EF_EXPLODE)
return ret; return ret;
@ -3339,7 +3411,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
if (h0->first_field) { if (h0->first_field) {
assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr);
assert(h0->cur_pic_ptr->f.data[0]); assert(h0->cur_pic_ptr->f.data[0]);
assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* figure out if we have a complementary field pair */ /* figure out if we have a complementary field pair */
if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) { if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
@ -3606,16 +3678,16 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j]; int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
id_list[i] = 60; id_list[i] = 60;
if (h->ref_list[j][i].f.data[0]) { if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) {
int k; int k;
uint8_t *base = h->ref_list[j][i].f.base[0]; AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer;
for (k = 0; k < h->short_ref_count; k++) for (k = 0; k < h->short_ref_count; k++)
if (h->short_ref[k]->f.base[0] == base) { if (h->short_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = k; id_list[i] = k;
break; break;
} }
for (k = 0; k < h->long_ref_count; k++) for (k = 0; k < h->long_ref_count; k++)
if (h->long_ref[k] && h->long_ref[k]->f.base[0] == base) { if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) {
id_list[i] = h->short_ref_count + k; id_list[i] = h->short_ref_count + k;
break; break;
} }
@ -3626,12 +3698,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
ref2frm[1] = -1; ref2frm[1] = -1;
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
ref2frm[i + 2] = 4 * id_list[i] + ref2frm[i + 2] = 4 * id_list[i] +
(h->ref_list[j][i].f.reference & 3); (h->ref_list[j][i].reference & 3);
ref2frm[18 + 0] = ref2frm[18 + 0] =
ref2frm[18 + 1] = -1; ref2frm[18 + 1] = -1;
for (i = 16; i < 48; i++) for (i = 16; i < 48; i++)
ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] + ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
(h->ref_list[j][i].f.reference & 3); (h->ref_list[j][i].reference & 3);
} }
if (h->avctx->debug & FF_DEBUG_PICT_INFO) { if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
@ -3691,11 +3763,11 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride; const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
const int b8_xy = 4 * top_xy + 2; const int b8_xy = 4 * top_xy + 2;
int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2); int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
AV_COPY128(mv_dst - 1 * 8, h->cur_pic.f.motion_val[list][b_xy + 0]); AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
ref_cache[0 - 1 * 8] = ref_cache[0 - 1 * 8] =
ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 0]]; ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 0]];
ref_cache[2 - 1 * 8] = ref_cache[2 - 1 * 8] =
ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 1]]; ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 1]];
} else { } else {
AV_ZERO128(mv_dst - 1 * 8); AV_ZERO128(mv_dst - 1 * 8);
AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u); AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
@ -3706,14 +3778,14 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3; const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
const int b8_xy = 4 * left_xy[LTOP] + 1; const int b8_xy = 4 * left_xy[LTOP] + 1;
int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2); int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
AV_COPY32(mv_dst - 1 + 0, h->cur_pic.f.motion_val[list][b_xy + b_stride * 0]); AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
AV_COPY32(mv_dst - 1 + 8, h->cur_pic.f.motion_val[list][b_xy + b_stride * 1]); AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
AV_COPY32(mv_dst - 1 + 16, h->cur_pic.f.motion_val[list][b_xy + b_stride * 2]); AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
AV_COPY32(mv_dst - 1 + 24, h->cur_pic.f.motion_val[list][b_xy + b_stride * 3]); AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
ref_cache[-1 + 0] = ref_cache[-1 + 0] =
ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 0]]; ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
ref_cache[-1 + 16] = ref_cache[-1 + 16] =
ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 1]]; ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
} else { } else {
AV_ZERO32(mv_dst - 1 + 0); AV_ZERO32(mv_dst - 1 + 0);
AV_ZERO32(mv_dst - 1 + 8); AV_ZERO32(mv_dst - 1 + 8);
@ -3737,7 +3809,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
} }
{ {
int8_t *ref = &h->cur_pic.f.ref_index[list][4 * mb_xy]; int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
int (*ref2frm)[64] = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2); int (*ref2frm)[64] = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101; uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101; uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
@ -3748,7 +3820,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
} }
{ {
int16_t(*mv_src)[2] = &h->cur_pic.f.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride]; int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride); AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride); AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride); AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
@ -3775,7 +3847,7 @@ static int fill_filter_caches(H264Context *h, int mb_type)
left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1; left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
if (FRAME_MBAFF) { if (FRAME_MBAFF) {
const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.f.mb_type[mb_xy - 1]); const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type); const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if (h->mb_y & 1) { if (h->mb_y & 1) {
if (left_mb_field_flag != curr_mb_field_flag) if (left_mb_field_flag != curr_mb_field_flag)
@ -3783,7 +3855,7 @@ static int fill_filter_caches(H264Context *h, int mb_type)
} else { } else {
if (curr_mb_field_flag) if (curr_mb_field_flag)
top_xy += h->mb_stride & top_xy += h->mb_stride &
(((h->cur_pic.f.mb_type[top_xy] >> 7) & 1) - 1); (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
if (left_mb_field_flag != curr_mb_field_flag) if (left_mb_field_flag != curr_mb_field_flag)
left_xy[LBOT] += h->mb_stride; left_xy[LBOT] += h->mb_stride;
} }
@ -3797,25 +3869,25 @@ static int fill_filter_caches(H264Context *h, int mb_type)
* This is a conservative estimate: could also check beta_offset * This is a conservative estimate: could also check beta_offset
* and more accurate chroma_qp. */ * and more accurate chroma_qp. */
int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
int qp = h->cur_pic.f.qscale_table[mb_xy]; int qp = h->cur_pic.qscale_table[mb_xy];
if (qp <= qp_thresh && if (qp <= qp_thresh &&
(left_xy[LTOP] < 0 || (left_xy[LTOP] < 0 ||
((qp + h->cur_pic.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) && ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
(top_xy < 0 || (top_xy < 0 ||
((qp + h->cur_pic.f.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) { ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
if (!FRAME_MBAFF) if (!FRAME_MBAFF)
return 1; return 1;
if ((left_xy[LTOP] < 0 || if ((left_xy[LTOP] < 0 ||
((qp + h->cur_pic.f.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) && ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
(top_xy < h->mb_stride || (top_xy < h->mb_stride ||
((qp + h->cur_pic.f.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh)) ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
return 1; return 1;
} }
} }
top_type = h->cur_pic.f.mb_type[top_xy]; top_type = h->cur_pic.mb_type[top_xy];
left_type[LTOP] = h->cur_pic.f.mb_type[left_xy[LTOP]]; left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
left_type[LBOT] = h->cur_pic.f.mb_type[left_xy[LBOT]]; left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
if (h->deblocking_filter == 2) { if (h->deblocking_filter == 2) {
if (h->slice_table[top_xy] != h->slice_num) if (h->slice_table[top_xy] != h->slice_num)
top_type = 0; top_type = 0;
@ -3920,7 +3992,7 @@ static void loop_filter(H264Context *h, int start_x, int end_x)
int mb_xy, mb_type; int mb_xy, mb_type;
mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride; mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride;
h->slice_num = h->slice_table[mb_xy]; h->slice_num = h->slice_table[mb_xy];
mb_type = h->cur_pic.f.mb_type[mb_xy]; mb_type = h->cur_pic.mb_type[mb_xy];
h->list_count = h->list_counts[mb_xy]; h->list_count = h->list_counts[mb_xy];
if (FRAME_MBAFF) if (FRAME_MBAFF)
@ -3955,8 +4027,8 @@ static void loop_filter(H264Context *h, int start_x, int end_x)
uvlinesize, 0); uvlinesize, 0);
if (fill_filter_caches(h, mb_type)) if (fill_filter_caches(h, mb_type))
continue; continue;
h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mb_xy]); h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.qscale_table[mb_xy]);
h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mb_xy]); h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.qscale_table[mb_xy]);
if (FRAME_MBAFF) { if (FRAME_MBAFF) {
ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
@ -3978,9 +4050,9 @@ static void predict_field_decoding_flag(H264Context *h)
{ {
const int mb_xy = h->mb_x + h->mb_y * h->mb_stride; const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ? int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
h->cur_pic.f.mb_type[mb_xy - 1] : h->cur_pic.mb_type[mb_xy - 1] :
(h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ? (h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ?
h->cur_pic.f.mb_type[mb_xy - h->mb_stride] : 0; h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0; h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
} }
@ -4014,7 +4086,7 @@ static void decode_finish_row(H264Context *h)
if (h->droppable) if (h->droppable)
return; return;
ff_thread_report_progress(&h->cur_pic_ptr->f, top + height - 1, ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
h->picture_structure == PICT_BOTTOM_FIELD); h->picture_structure == PICT_BOTTOM_FIELD);
} }
@ -4513,9 +4585,8 @@ again:
end: end:
/* clean up */ /* clean up */
if (h->cur_pic_ptr && h->cur_pic_ptr->owner2 == h && if (h->cur_pic_ptr && !h->droppable) {
!h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD); h->picture_structure == PICT_BOTTOM_FIELD);
} }
@ -4543,6 +4614,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
H264Context *h = avctx->priv_data; H264Context *h = avctx->priv_data;
AVFrame *pict = data; AVFrame *pict = data;
int buf_index = 0; int buf_index = 0;
int ret;
h->flags = avctx->flags; h->flags = avctx->flags;
@ -4571,8 +4643,9 @@ out:
h->delayed_pic[i] = h->delayed_pic[i + 1]; h->delayed_pic[i] = h->delayed_pic[i + 1];
if (out) { if (out) {
if ((ret = av_frame_ref(pict, &out->f)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*pict = out->f;
} }
return buf_index; return buf_index;
@ -4605,8 +4678,9 @@ out:
/* Wait for second field. */ /* Wait for second field. */
*got_frame = 0; *got_frame = 0;
} else { } else {
if ((ret = av_frame_ref(pict, &h->next_output_pic->f)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*pict = h->next_output_pic->f;
} }
} }
@ -4635,13 +4709,15 @@ static av_cold int h264_decode_end(AVCodecContext *avctx)
ff_h264_free_context(h); ff_h264_free_context(h);
if (h->DPB && !h->avctx->internal->is_copy) { if (h->DPB) {
for (i = 0; i < h->picture_count; i++) { for (i = 0; i < MAX_PICTURE_COUNT; i++) {
free_picture(h, &h->DPB[i]); unref_picture(h, &h->DPB[i]);
} }
} }
av_freep(&h->DPB); av_freep(&h->DPB);
unref_picture(h, &h->cur_pic);
return 0; return 0;
} }

View File

@ -266,8 +266,6 @@ typedef struct H264Context {
Picture *DPB; Picture *DPB;
Picture *cur_pic_ptr; Picture *cur_pic_ptr;
Picture cur_pic; Picture cur_pic;
int picture_count;
int picture_range_start, picture_range_end;
int pixel_shift; ///< 0 for 8-bit H264, 1 for high-bit-depth H264 int pixel_shift; ///< 0 for 8-bit H264, 1 for high-bit-depth H264
int chroma_qp[2]; // QPc int chroma_qp[2]; // QPc
@ -621,6 +619,11 @@ typedef struct H264Context {
uint8_t *bipred_scratchpad; uint8_t *bipred_scratchpad;
uint8_t *edge_emu_buffer; uint8_t *edge_emu_buffer;
int16_t *dc_val_base; int16_t *dc_val_base;
AVBufferPool *qscale_table_pool;
AVBufferPool *mb_type_pool;
AVBufferPool *motion_val_pool;
AVBufferPool *ref_index_pool;
} H264Context; } H264Context;
extern const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM + 1]; ///< One chroma qp table for each supported bit depth (8, 9, 10). extern const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM + 1]; ///< One chroma qp table for each supported bit depth (8, 9, 10).
@ -876,7 +879,7 @@ static av_always_inline void write_back_motion_list(H264Context *h,
int b_xy, int b8_xy, int b_xy, int b8_xy,
int mb_type, int list) int mb_type, int list)
{ {
int16_t(*mv_dst)[2] = &h->cur_pic.f.motion_val[list][b_xy]; int16_t(*mv_dst)[2] = &h->cur_pic.motion_val[list][b_xy];
int16_t(*mv_src)[2] = &h->mv_cache[list][scan8[0]]; int16_t(*mv_src)[2] = &h->mv_cache[list][scan8[0]];
AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0); AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1); AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
@ -897,7 +900,7 @@ static av_always_inline void write_back_motion_list(H264Context *h,
} }
{ {
int8_t *ref_index = &h->cur_pic.f.ref_index[list][b8_xy]; int8_t *ref_index = &h->cur_pic.ref_index[list][b8_xy];
int8_t *ref_cache = h->ref_cache[list]; int8_t *ref_cache = h->ref_cache[list];
ref_index[0 + 0 * 2] = ref_cache[scan8[0]]; ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
ref_index[1 + 0 * 2] = ref_cache[scan8[4]]; ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
@ -915,7 +918,7 @@ static av_always_inline void write_back_motion(H264Context *h, int mb_type)
if (USES_LIST(mb_type, 0)) { if (USES_LIST(mb_type, 0)) {
write_back_motion_list(h, b_stride, b_xy, b8_xy, mb_type, 0); write_back_motion_list(h, b_stride, b_xy, b8_xy, mb_type, 0);
} else { } else {
fill_rectangle(&h->cur_pic.f.ref_index[0][b8_xy], fill_rectangle(&h->cur_pic.ref_index[0][b8_xy],
2, 2, 2, (uint8_t)LIST_NOT_USED, 1); 2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
} }
if (USES_LIST(mb_type, 1)) if (USES_LIST(mb_type, 1))

View File

@ -1283,8 +1283,8 @@ static int decode_cabac_field_decoding_flag(H264Context *h) {
unsigned long ctx = 0; unsigned long ctx = 0;
ctx += h->mb_field_decoding_flag & !!h->mb_x; //for FMO:(s->current_picture.f.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num); ctx += h->mb_field_decoding_flag & !!h->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
ctx += (h->cur_pic.f.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num); ctx += (h->cur_pic.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num);
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] ); return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
} }
@ -1328,13 +1328,13 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
mba_xy = mb_xy - 1; mba_xy = mb_xy - 1;
if( (mb_y&1) if( (mb_y&1)
&& h->slice_table[mba_xy] == h->slice_num && h->slice_table[mba_xy] == h->slice_num
&& MB_FIELD == !!IS_INTERLACED( h->cur_pic.f.mb_type[mba_xy] ) ) && MB_FIELD == !!IS_INTERLACED( h->cur_pic.mb_type[mba_xy] ) )
mba_xy += h->mb_stride; mba_xy += h->mb_stride;
if( MB_FIELD ){ if( MB_FIELD ){
mbb_xy = mb_xy - h->mb_stride; mbb_xy = mb_xy - h->mb_stride;
if( !(mb_y&1) if( !(mb_y&1)
&& h->slice_table[mbb_xy] == h->slice_num && h->slice_table[mbb_xy] == h->slice_num
&& IS_INTERLACED( h->cur_pic.f.mb_type[mbb_xy] ) ) && IS_INTERLACED( h->cur_pic.mb_type[mbb_xy] ) )
mbb_xy -= h->mb_stride; mbb_xy -= h->mb_stride;
}else }else
mbb_xy = mb_x + (mb_y-1)*h->mb_stride; mbb_xy = mb_x + (mb_y-1)*h->mb_stride;
@ -1344,9 +1344,9 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
mbb_xy = mb_xy - (h->mb_stride << FIELD_PICTURE); mbb_xy = mb_xy - (h->mb_stride << FIELD_PICTURE);
} }
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP(h->cur_pic.f.mb_type[mba_xy] )) if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP(h->cur_pic.mb_type[mba_xy] ))
ctx++; ctx++;
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP(h->cur_pic.f.mb_type[mbb_xy] )) if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP(h->cur_pic.mb_type[mbb_xy] ))
ctx++; ctx++;
if( h->slice_type_nos == AV_PICTURE_TYPE_B ) if( h->slice_type_nos == AV_PICTURE_TYPE_B )
@ -1893,7 +1893,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
/* read skip flags */ /* read skip flags */
if( skip ) { if( skip ) {
if( FRAME_MBAFF && (h->mb_y&1)==0 ){ if( FRAME_MBAFF && (h->mb_y&1)==0 ){
h->cur_pic.f.mb_type[mb_xy] = MB_TYPE_SKIP; h->cur_pic.mb_type[mb_xy] = MB_TYPE_SKIP;
h->next_mb_skipped = decode_cabac_mb_skip( h, h->mb_x, h->mb_y+1 ); h->next_mb_skipped = decode_cabac_mb_skip( h, h->mb_x, h->mb_y+1 );
if(!h->next_mb_skipped) if(!h->next_mb_skipped)
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h); h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
@ -2012,10 +2012,10 @@ decode_intra_mb:
h->cbp_table[mb_xy] = 0xf7ef; h->cbp_table[mb_xy] = 0xf7ef;
h->chroma_pred_mode_table[mb_xy] = 0; h->chroma_pred_mode_table[mb_xy] = 0;
// In deblocking, the quantizer is 0 // In deblocking, the quantizer is 0
h->cur_pic.f.qscale_table[mb_xy] = 0; h->cur_pic.qscale_table[mb_xy] = 0;
// All coeffs are present // All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48); memset(h->non_zero_count[mb_xy], 16, 48);
h->cur_pic.f.mb_type[mb_xy] = mb_type; h->cur_pic.mb_type[mb_xy] = mb_type;
h->last_qscale_diff = 0; h->last_qscale_diff = 0;
return 0; return 0;
} }
@ -2309,7 +2309,7 @@ decode_intra_mb:
AV_WN32A(&nnz_cache[4+8*10], top_empty); AV_WN32A(&nnz_cache[4+8*10], top_empty);
} }
} }
h->cur_pic.f.mb_type[mb_xy] = mb_type; h->cur_pic.mb_type[mb_xy] = mb_type;
if( cbp || IS_INTRA16x16( mb_type ) ) { if( cbp || IS_INTRA16x16( mb_type ) ) {
const uint8_t *scan, *scan8x8; const uint8_t *scan, *scan8x8;
@ -2411,7 +2411,7 @@ decode_intra_mb:
h->last_qscale_diff = 0; h->last_qscale_diff = 0;
} }
h->cur_pic.f.qscale_table[mb_xy] = h->qscale; h->cur_pic.qscale_table[mb_xy] = h->qscale;
write_back_non_zero_count(h); write_back_non_zero_count(h);
return 0; return 0;

View File

@ -769,11 +769,11 @@ decode_intra_mb:
skip_bits_long(&h->gb, mb_size); skip_bits_long(&h->gb, mb_size);
// In deblocking, the quantizer is 0 // In deblocking, the quantizer is 0
h->cur_pic.f.qscale_table[mb_xy] = 0; h->cur_pic.qscale_table[mb_xy] = 0;
// All coeffs are present // All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48); memset(h->non_zero_count[mb_xy], 16, 48);
h->cur_pic.f.mb_type[mb_xy] = mb_type; h->cur_pic.mb_type[mb_xy] = mb_type;
return 0; return 0;
} }
@ -1068,7 +1068,7 @@ decode_intra_mb:
} }
h->cbp= h->cbp=
h->cbp_table[mb_xy]= cbp; h->cbp_table[mb_xy]= cbp;
h->cur_pic.f.mb_type[mb_xy] = mb_type; h->cur_pic.mb_type[mb_xy] = mb_type;
if(cbp || IS_INTRA16x16(mb_type)){ if(cbp || IS_INTRA16x16(mb_type)){
int i4x4, i8x8, chroma_idx; int i4x4, i8x8, chroma_idx;
@ -1168,7 +1168,7 @@ decode_intra_mb:
fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1); fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1); fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
} }
h->cur_pic.f.qscale_table[mb_xy] = h->qscale; h->cur_pic.qscale_table[mb_xy] = h->qscale;
write_back_non_zero_count(h); write_back_non_zero_count(h);
return 0; return 0;

View File

@ -87,7 +87,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field,
poc= (poc&~3) + rfield + 1; poc= (poc&~3) + rfield + 1;
for(j=start; j<end; j++){ for(j=start; j<end; j++){
if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) { if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference & 3) == poc) {
int cur_ref= mbafi ? (j-16)^field : j; int cur_ref= mbafi ? (j-16)^field : j;
if (ref1->mbaff) if (ref1->mbaff)
map[list][2 * old_ref + (rfield^field) + 16] = cur_ref; map[list][2 * old_ref + (rfield^field) + 16] = cur_ref;
@ -105,12 +105,12 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
Picture * const cur = h->cur_pic_ptr; Picture * const cur = h->cur_pic_ptr;
int list, j, field; int list, j, field;
int sidx= (h->picture_structure&1)^1; int sidx= (h->picture_structure&1)^1;
int ref1sidx = (ref1->f.reference&1)^1; int ref1sidx = (ref1->reference&1)^1;
for(list=0; list<2; list++){ for(list=0; list<2; list++){
cur->ref_count[sidx][list] = h->ref_count[list]; cur->ref_count[sidx][list] = h->ref_count[list];
for(j=0; j<h->ref_count[list]; j++) for(j=0; j<h->ref_count[list]; j++)
cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3); cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference & 3);
} }
if(h->picture_structure == PICT_FRAME){ if(h->picture_structure == PICT_FRAME){
@ -126,8 +126,8 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
int *col_poc = h->ref_list[1]->field_poc; int *col_poc = h->ref_list[1]->field_poc;
h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc)); h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
ref1sidx=sidx= h->col_parity; ref1sidx=sidx= h->col_parity;
} else if (!(h->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity } else if (!(h->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3; h->col_fieldoff = 2 * h->ref_list[1][0].reference - 3;
} }
if (h->slice_type_nos != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred) if (h->slice_type_nos != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
@ -143,7 +143,7 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y) static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
{ {
int ref_field = ref->f.reference - 1; int ref_field = ref->reference - 1;
int ref_field_picture = ref->field_picture; int ref_field_picture = ref->field_picture;
int ref_height = 16*h->mb_height >> ref_field_picture; int ref_height = 16*h->mb_height >> ref_field_picture;
@ -153,7 +153,7 @@ static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y
//FIXME it can be safe to access mb stuff //FIXME it can be safe to access mb stuff
//even if pixels aren't deblocked yet //even if pixels aren't deblocked yet
ff_thread_await_progress(&ref->f, ff_thread_await_progress(&ref->tf,
FFMIN(16 * mb_y >> ref_field_picture, ref_height - 1), FFMIN(16 * mb_y >> ref_field_picture, ref_height - 1),
ref_field_picture && ref_field); ref_field_picture && ref_field);
} }
@ -172,7 +172,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
int mv[2]; int mv[2];
int list; int list;
assert(h->ref_list[1][0].f.reference & 3); assert(h->ref_list[1][0].reference & 3);
await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type)); await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type));
@ -234,7 +234,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
return; return;
} }
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (h->mb_y&~1) + h->col_parity; mb_y = (h->mb_y&~1) + h->col_parity;
mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride; mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride;
@ -248,8 +248,8 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = h->mb_y&~1; mb_y = h->mb_y&~1;
mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride; mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride;
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + h->mb_stride]; mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
b8_stride = 2+4*h->mb_stride; b8_stride = 2+4*h->mb_stride;
b4_stride *= 6; b4_stride *= 6;
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
@ -268,7 +268,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
}else{ // AFR/FR -> AFR/FR }else{ // AFR/FR -> AFR/FR
single_col: single_col:
mb_type_col[0] = mb_type_col[0] =
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy]; mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){ if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
@ -288,10 +288,10 @@ single_col:
await_reference_mb_row(h, &h->ref_list[1][0], mb_y); await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]]; l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]]; l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; l1ref0 = &h->ref_list[1][0].ref_index [0][4 * mb_xy];
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; l1ref1 = &h->ref_list[1][0].ref_index [1][4 * mb_xy];
if(!b8_stride){ if(!b8_stride){
if(h->mb_y&1){ if(h->mb_y&1){
l1ref0 += 2; l1ref0 += 2;
@ -419,11 +419,11 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
unsigned int sub_mb_type; unsigned int sub_mb_type;
int i8, i4; int i8, i4;
assert(h->ref_list[1][0].f.reference & 3); assert(h->ref_list[1][0].reference & 3);
await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type)); await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type));
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (h->mb_y&~1) + h->col_parity; mb_y = (h->mb_y&~1) + h->col_parity;
mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride; mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride;
@ -437,8 +437,8 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = h->mb_y&~1; mb_y = h->mb_y&~1;
mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride; mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride;
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + h->mb_stride]; mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
b8_stride = 2+4*h->mb_stride; b8_stride = 2+4*h->mb_stride;
b4_stride *= 6; b4_stride *= 6;
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
@ -458,7 +458,7 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
}else{ // AFR/FR -> AFR/FR }else{ // AFR/FR -> AFR/FR
single_col: single_col:
mb_type_col[0] = mb_type_col[0] =
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy]; mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){ if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
@ -478,10 +478,10 @@ single_col:
await_reference_mb_row(h, &h->ref_list[1][0], mb_y); await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]]; l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]]; l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; l1ref0 = &h->ref_list[1][0].ref_index [0][4 * mb_xy];
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; l1ref1 = &h->ref_list[1][0].ref_index [1][4 * mb_xy];
if(!b8_stride){ if(!b8_stride){
if(h->mb_y&1){ if(h->mb_y&1){
l1ref0 += 2; l1ref0 += 2;

View File

@ -256,10 +256,10 @@ static av_always_inline void h264_filter_mb_fast_internal(H264Context *h,
int a = h->slice_alpha_c0_offset - qp_bd_offset; int a = h->slice_alpha_c0_offset - qp_bd_offset;
int b = h->slice_beta_offset - qp_bd_offset; int b = h->slice_beta_offset - qp_bd_offset;
int mb_type = h->cur_pic.f.mb_type[mb_xy]; int mb_type = h->cur_pic.mb_type[mb_xy];
int qp = h->cur_pic.f.qscale_table[mb_xy]; int qp = h->cur_pic.qscale_table[mb_xy];
int qp0 = h->cur_pic.f.qscale_table[mb_xy - 1]; int qp0 = h->cur_pic.qscale_table[mb_xy - 1];
int qp1 = h->cur_pic.f.qscale_table[h->top_mb_xy]; int qp1 = h->cur_pic.qscale_table[h->top_mb_xy];
int qpc = get_chroma_qp( h, 0, qp ); int qpc = get_chroma_qp( h, 0, qp );
int qpc0 = get_chroma_qp( h, 0, qp0 ); int qpc0 = get_chroma_qp( h, 0, qp0 );
int qpc1 = get_chroma_qp( h, 0, qp1 ); int qpc1 = get_chroma_qp( h, 0, qp1 );
@ -497,10 +497,10 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
for(j=0; j<2; j++, mbn_xy += h->mb_stride){ for(j=0; j<2; j++, mbn_xy += h->mb_stride){
DECLARE_ALIGNED(8, int16_t, bS)[4]; DECLARE_ALIGNED(8, int16_t, bS)[4];
int qp; int qp;
if (IS_INTRA(mb_type | h->cur_pic.f.mb_type[mbn_xy])) { if (IS_INTRA(mb_type | h->cur_pic.mb_type[mbn_xy])) {
AV_WN64A(bS, 0x0003000300030003ULL); AV_WN64A(bS, 0x0003000300030003ULL);
} else { } else {
if (!CABAC && IS_8x8DCT(h->cur_pic.f.mb_type[mbn_xy])) { if (!CABAC && IS_8x8DCT(h->cur_pic.mb_type[mbn_xy])) {
bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]); bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]); bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]); bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
@ -515,12 +515,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
} }
// Do not use s->qscale as luma quantizer because it has not the same // Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks. // value in IPCM macroblocks.
qp = (h->cur_pic.f.qscale_table[mb_xy] + h->cur_pic.f.qscale_table[mbn_xy] + 1) >> 1; qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbn_xy] + 1) >> 1;
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
{ int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); } { int i; for (i = 0; i < 4; i++) tprintf(h->avctx, " bS[%d]:%d", i, bS[i]); tprintf(h->avctx, "\n"); }
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 ); filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 );
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1; chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mbn_xy]) + 1) >> 1; chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
if (chroma) { if (chroma) {
if (chroma444) { if (chroma444) {
filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0); filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
@ -580,10 +580,10 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
// Do not use s->qscale as luma quantizer because it has not the same // Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks. // value in IPCM macroblocks.
if(bS[0]+bS[1]+bS[2]+bS[3]){ if(bS[0]+bS[1]+bS[2]+bS[3]){
qp = (h->cur_pic.f.qscale_table[mb_xy] + h->cur_pic.f.qscale_table[mbm_xy] + 1) >> 1; qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbm_xy] + 1) >> 1;
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1; chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mbm_xy]) + 1) >> 1; chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
if( dir == 0 ) { if( dir == 0 ) {
filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 ); filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
if (chroma) { if (chroma) {
@ -663,7 +663,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
/* Filter edge */ /* Filter edge */
// Do not use s->qscale as luma quantizer because it has not the same // Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks. // value in IPCM macroblocks.
qp = h->cur_pic.f.qscale_table[mb_xy]; qp = h->cur_pic.qscale_table[mb_xy];
tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); tprintf(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
if( dir == 0 ) { if( dir == 0 ) {
filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, a, b, h, 0 ); filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, a, b, h, 0 );
@ -702,7 +702,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
const int mb_xy= mb_x + mb_y*h->mb_stride; const int mb_xy= mb_x + mb_y*h->mb_stride;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
int first_vertical_edge_done = 0; int first_vertical_edge_done = 0;
av_unused int dir; av_unused int dir;
@ -758,9 +758,9 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
} }
} }
mb_qp = h->cur_pic.f.qscale_table[mb_xy]; mb_qp = h->cur_pic.qscale_table[mb_xy];
mbn0_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[0]]; mbn0_qp = h->cur_pic.qscale_table[h->left_mb_xy[0]];
mbn1_qp = h->cur_pic.f.qscale_table[h->left_mb_xy[1]]; mbn1_qp = h->cur_pic.qscale_table[h->left_mb_xy[1]];
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1; qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) + bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1; get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;

View File

@ -43,7 +43,7 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h)
const int mb_x = h->mb_x; const int mb_x = h->mb_x;
const int mb_y = h->mb_y; const int mb_y = h->mb_y;
const int mb_xy = h->mb_xy; const int mb_xy = h->mb_xy;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
uint8_t *dest_y, *dest_cb, *dest_cr; uint8_t *dest_y, *dest_cb, *dest_cr;
int linesize, uvlinesize /*dct_offset*/; int linesize, uvlinesize /*dct_offset*/;
int i, j; int i, j;
@ -279,7 +279,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h)
const int mb_x = h->mb_x; const int mb_x = h->mb_x;
const int mb_y = h->mb_y; const int mb_y = h->mb_y;
const int mb_xy = h->mb_xy; const int mb_xy = h->mb_xy;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
uint8_t *dest[3]; uint8_t *dest[3];
int linesize; int linesize;
int i, j, p; int i, j, p;

View File

@ -68,7 +68,7 @@ static void MCFUNC(hl_motion)(H264Context *h, uint8_t *dest_y,
h264_biweight_func *weight_avg) h264_biweight_func *weight_avg)
{ {
const int mb_xy = h->mb_xy; const int mb_xy = h->mb_xy;
const int mb_type = h->cur_pic.f.mb_type[mb_xy]; const int mb_type = h->cur_pic.mb_type[mb_xy];
assert(IS_INTER(mb_type)); assert(IS_INTER(mb_type));

View File

@ -48,15 +48,15 @@ static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C,
const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \ const int mb_type = mb_types[xy + (y4 >> 2) * h->mb_stride]; \
if (!USES_LIST(mb_type, list)) \ if (!USES_LIST(mb_type, list)) \
return LIST_NOT_USED; \ return LIST_NOT_USED; \
mv = h->cur_pic_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \ mv = h->cur_pic_ptr->motion_val[list][h->mb2b_xy[xy] + 3 + y4 * h->b_stride]; \
h->mv_cache[list][scan8[0] - 2][0] = mv[0]; \ h->mv_cache[list][scan8[0] - 2][0] = mv[0]; \
h->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \ h->mv_cache[list][scan8[0] - 2][1] = mv[1] MV_OP; \
return h->cur_pic_ptr->f.ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP; return h->cur_pic_ptr->ref_index[list][4 * xy + 1 + (y4 & ~1)] REF_OP;
if (topright_ref == PART_NOT_AVAILABLE if (topright_ref == PART_NOT_AVAILABLE
&& i >= scan8[0] + 8 && (i & 7) == 4 && i >= scan8[0] + 8 && (i & 7) == 4
&& h->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) { && h->ref_cache[list][scan8[0] - 1] != PART_NOT_AVAILABLE) {
const uint32_t *mb_types = h->cur_pic_ptr->f.mb_type; const uint32_t *mb_types = h->cur_pic_ptr->mb_type;
const int16_t *mv; const int16_t *mv;
AV_ZERO32(h->mv_cache[list][scan8[0] - 2]); AV_ZERO32(h->mv_cache[list][scan8[0] - 2]);
*C = h->mv_cache[list][scan8[0] - 2]; *C = h->mv_cache[list][scan8[0] - 2];
@ -253,8 +253,8 @@ static av_always_inline void pred_pskip_motion(H264Context *const h)
{ {
DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 }; DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = { 0 };
DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2]; DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
int8_t *ref = h->cur_pic.f.ref_index[0]; int8_t *ref = h->cur_pic.ref_index[0];
int16_t(*mv)[2] = h->cur_pic.f.motion_val[0]; int16_t(*mv)[2] = h->cur_pic.motion_val[0];
int top_ref, left_ref, diagonal_ref, match_count, mx, my; int top_ref, left_ref, diagonal_ref, match_count, mx, my;
const int16_t *A, *B, *C; const int16_t *A, *B, *C;
int b_stride = h->b_stride; int b_stride = h->b_stride;
@ -370,7 +370,7 @@ static void fill_decode_neighbors(H264Context *h, int mb_type)
left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1; left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
h->left_block = left_block_options[0]; h->left_block = left_block_options[0];
if (FRAME_MBAFF) { if (FRAME_MBAFF) {
const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.f.mb_type[mb_xy - 1]); const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type); const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if (h->mb_y & 1) { if (h->mb_y & 1) {
if (left_mb_field_flag != curr_mb_field_flag) { if (left_mb_field_flag != curr_mb_field_flag) {
@ -388,9 +388,9 @@ static void fill_decode_neighbors(H264Context *h, int mb_type)
} }
} else { } else {
if (curr_mb_field_flag) { if (curr_mb_field_flag) {
topleft_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy - 1] >> 7) & 1) - 1); topleft_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy - 1] >> 7) & 1) - 1);
topright_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy + 1] >> 7) & 1) - 1); topright_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy + 1] >> 7) & 1) - 1);
top_xy += h->mb_stride & (((h->cur_pic.f.mb_type[top_xy] >> 7) & 1) - 1); top_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
} }
if (left_mb_field_flag != curr_mb_field_flag) { if (left_mb_field_flag != curr_mb_field_flag) {
if (curr_mb_field_flag) { if (curr_mb_field_flag) {
@ -410,11 +410,11 @@ static void fill_decode_neighbors(H264Context *h, int mb_type)
h->left_mb_xy[LBOT] = left_xy[LBOT]; h->left_mb_xy[LBOT] = left_xy[LBOT];
//FIXME do we need all in the context? //FIXME do we need all in the context?
h->topleft_type = h->cur_pic.f.mb_type[topleft_xy]; h->topleft_type = h->cur_pic.mb_type[topleft_xy];
h->top_type = h->cur_pic.f.mb_type[top_xy]; h->top_type = h->cur_pic.mb_type[top_xy];
h->topright_type = h->cur_pic.f.mb_type[topright_xy]; h->topright_type = h->cur_pic.mb_type[topright_xy];
h->left_type[LTOP] = h->cur_pic.f.mb_type[left_xy[LTOP]]; h->left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
h->left_type[LBOT] = h->cur_pic.f.mb_type[left_xy[LBOT]]; h->left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
if (FMO) { if (FMO) {
if (h->slice_table[topleft_xy] != h->slice_num) if (h->slice_table[topleft_xy] != h->slice_num)
@ -480,7 +480,7 @@ static void fill_decode_caches(H264Context *h, int mb_type)
h->left_samples_available &= 0xFF5F; h->left_samples_available &= 0xFF5F;
} }
} else { } else {
int left_typei = h->cur_pic.f.mb_type[left_xy[LTOP] + h->mb_stride]; int left_typei = h->cur_pic.mb_type[left_xy[LTOP] + h->mb_stride];
assert(left_xy[LTOP] == left_xy[LBOT]); assert(left_xy[LTOP] == left_xy[LBOT]);
if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) { if (!((left_typei & type_mask) && (left_type[LTOP] & type_mask))) {
@ -602,9 +602,9 @@ static void fill_decode_caches(H264Context *h, int mb_type)
int b_stride = h->b_stride; int b_stride = h->b_stride;
for (list = 0; list < h->list_count; list++) { for (list = 0; list < h->list_count; list++) {
int8_t *ref_cache = &h->ref_cache[list][scan8[0]]; int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
int8_t *ref = h->cur_pic.f.ref_index[list]; int8_t *ref = h->cur_pic.ref_index[list];
int16_t(*mv_cache)[2] = &h->mv_cache[list][scan8[0]]; int16_t(*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
int16_t(*mv)[2] = h->cur_pic.f.motion_val[list]; int16_t(*mv)[2] = h->cur_pic.motion_val[list];
if (!USES_LIST(mb_type, list)) if (!USES_LIST(mb_type, list))
continue; continue;
assert(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)); assert(!(IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred));
@ -821,8 +821,8 @@ static void av_unused decode_mb_skip(H264Context *h)
} }
write_back_motion(h, mb_type); write_back_motion(h, mb_type);
h->cur_pic.f.mb_type[mb_xy] = mb_type; h->cur_pic.mb_type[mb_xy] = mb_type;
h->cur_pic.f.qscale_table[mb_xy] = h->qscale; h->cur_pic.qscale_table[mb_xy] = h->qscale;
h->slice_table[mb_xy] = h->slice_num; h->slice_table[mb_xy] = h->slice_num;
h->prev_mb_skipped = 1; h->prev_mb_skipped = 1;
} }

View File

@ -33,13 +33,20 @@
//#undef NDEBUG //#undef NDEBUG
#include <assert.h> #include <assert.h>
#define COPY_PICTURE(dst, src) \
do {\
*(dst) = *(src);\
(dst)->f.extended_data = (dst)->f.data;\
(dst)->tf.f = &(dst)->f;\
} while (0)
static void pic_as_field(Picture *pic, const int parity){ static void pic_as_field(Picture *pic, const int parity){
int i; int i;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
if (parity == PICT_BOTTOM_FIELD) if (parity == PICT_BOTTOM_FIELD)
pic->f.data[i] += pic->f.linesize[i]; pic->f.data[i] += pic->f.linesize[i];
pic->f.reference = parity; pic->reference = parity;
pic->f.linesize[i] *= 2; pic->f.linesize[i] *= 2;
} }
pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD]; pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
@ -47,10 +54,10 @@ static void pic_as_field(Picture *pic, const int parity){
static int split_field_copy(Picture *dest, Picture *src, static int split_field_copy(Picture *dest, Picture *src,
int parity, int id_add){ int parity, int id_add){
int match = !!(src->f.reference & parity); int match = !!(src->reference & parity);
if (match) { if (match) {
*dest = *src; COPY_PICTURE(dest, src);
if(parity != PICT_FRAME){ if(parity != PICT_FRAME){
pic_as_field(dest, parity); pic_as_field(dest, parity);
dest->pic_id *= 2; dest->pic_id *= 2;
@ -66,9 +73,9 @@ static int build_def_list(Picture *def, Picture **in, int len, int is_long, int
int index=0; int index=0;
while(i[0]<len || i[1]<len){ while(i[0]<len || i[1]<len){
while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->f.reference & sel))) while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel)))
i[0]++; i[0]++;
while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3)))) while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3))))
i[1]++; i[1]++;
if(i[0] < len){ if(i[0] < len){
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num; in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
@ -132,8 +139,12 @@ int ff_h264_fill_default_ref_list(H264Context *h){
if(lens[0] == lens[1] && lens[1] > 1){ if(lens[0] == lens[1] && lens[1] > 1){
for (i = 0; h->default_ref_list[0][i].f.data[0] == h->default_ref_list[1][i].f.data[0] && i < lens[0]; i++); for (i = 0; h->default_ref_list[0][i].f.data[0] == h->default_ref_list[1][i].f.data[0] && i < lens[0]; i++);
if(i == lens[0]) if (i == lens[0]) {
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]); Picture tmp;
COPY_PICTURE(&tmp, &h->default_ref_list[1][0]);
COPY_PICTURE(&h->default_ref_list[1][0], &h->default_ref_list[1][1]);
COPY_PICTURE(&h->default_ref_list[1][1], &tmp);
}
} }
}else{ }else{
len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, h->picture_structure); len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, h->picture_structure);
@ -181,13 +192,14 @@ static int pic_num_extract(H264Context *h, int pic_num, int *structure){
} }
int ff_h264_decode_ref_pic_list_reordering(H264Context *h){ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
int list, index, pic_structure; int list, index, pic_structure, i;
print_short_term(h); print_short_term(h);
print_long_term(h); print_long_term(h);
for(list=0; list<h->list_count; list++){ for(list=0; list<h->list_count; list++){
memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]); for (i = 0; i < h->ref_count[list]; i++)
COPY_PICTURE(&h->ref_list[list][i], &h->default_ref_list[list][i]);
if(get_bits1(&h->gb)){ if(get_bits1(&h->gb)){
int pred= h->curr_pic_num; int pred= h->curr_pic_num;
@ -224,11 +236,11 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
for(i= h->short_ref_count-1; i>=0; i--){ for(i= h->short_ref_count-1; i>=0; i--){
ref = h->short_ref[i]; ref = h->short_ref[i];
assert(ref->f.reference); assert(ref->reference);
assert(!ref->long_ref); assert(!ref->long_ref);
if( if(
ref->frame_num == frame_num && ref->frame_num == frame_num &&
(ref->f.reference & pic_structure) (ref->reference & pic_structure)
) )
break; break;
} }
@ -245,8 +257,8 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
return -1; return -1;
} }
ref = h->long_ref[long_idx]; ref = h->long_ref[long_idx];
assert(!(ref && !ref->f.reference)); assert(!(ref && !ref->reference));
if (ref && (ref->f.reference & pic_structure)) { if (ref && (ref->reference & pic_structure)) {
ref->pic_id= pic_id; ref->pic_id= pic_id;
assert(ref->long_ref); assert(ref->long_ref);
i=0; i=0;
@ -264,9 +276,9 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
break; break;
} }
for(; i > index; i--){ for(; i > index; i--){
h->ref_list[list][i]= h->ref_list[list][i-1]; COPY_PICTURE(&h->ref_list[list][i], &h->ref_list[list][i - 1]);
} }
h->ref_list[list][index]= *ref; COPY_PICTURE(&h->ref_list[list][index], ref);
if (FIELD_PICTURE){ if (FIELD_PICTURE){
pic_as_field(&h->ref_list[list][index], pic_structure); pic_as_field(&h->ref_list[list][index], pic_structure);
} }
@ -283,7 +295,7 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
if (!h->ref_list[list][index].f.data[0]) { if (!h->ref_list[list][index].f.data[0]) {
av_log(h->avctx, AV_LOG_ERROR, "Missing reference picture\n"); av_log(h->avctx, AV_LOG_ERROR, "Missing reference picture\n");
if (h->default_ref_list[list][0].f.data[0]) if (h->default_ref_list[list][0].f.data[0])
h->ref_list[list][index]= h->default_ref_list[list][0]; COPY_PICTURE(&h->ref_list[list][index], &h->default_ref_list[list][0]);
else else
return -1; return -1;
} }
@ -299,15 +311,15 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
for(i=0; i<h->ref_count[list]; i++){ for(i=0; i<h->ref_count[list]; i++){
Picture *frame = &h->ref_list[list][i]; Picture *frame = &h->ref_list[list][i];
Picture *field = &h->ref_list[list][16+2*i]; Picture *field = &h->ref_list[list][16+2*i];
field[0] = *frame; COPY_PICTURE(field, frame);
for(j=0; j<3; j++) for(j=0; j<3; j++)
field[0].f.linesize[j] <<= 1; field[0].f.linesize[j] <<= 1;
field[0].f.reference = PICT_TOP_FIELD; field[0].reference = PICT_TOP_FIELD;
field[0].poc= field[0].field_poc[0]; field[0].poc= field[0].field_poc[0];
field[1] = field[0]; COPY_PICTURE(field + 1, field);
for(j=0; j<3; j++) for(j=0; j<3; j++)
field[1].f.data[j] += frame->f.linesize[j]; field[1].f.data[j] += frame->f.linesize[j];
field[1].f.reference = PICT_BOTTOM_FIELD; field[1].reference = PICT_BOTTOM_FIELD;
field[1].poc= field[1].field_poc[1]; field[1].poc= field[1].field_poc[1];
h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0]; h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0];
@ -333,12 +345,12 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
*/ */
static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){ static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
int i; int i;
if (pic->f.reference &= refmask) { if (pic->reference &= refmask) {
return 0; return 0;
} else { } else {
for(i = 0; h->delayed_pic[i]; i++) for(i = 0; h->delayed_pic[i]; i++)
if(pic == h->delayed_pic[i]){ if(pic == h->delayed_pic[i]){
pic->f.reference = DELAYED_PIC_REF; pic->reference = DELAYED_PIC_REF;
break; break;
} }
return 1; return 1;
@ -490,7 +502,7 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
if (h->short_ref_count && if (h->short_ref_count &&
h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
!(FIELD_PICTURE && !h->first_field && h->cur_pic_ptr->f.reference)) { !(FIELD_PICTURE && !h->first_field && h->cur_pic_ptr->reference)) {
mmco[0].opcode = MMCO_SHORT2UNUSED; mmco[0].opcode = MMCO_SHORT2UNUSED;
mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num; mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num;
mmco_index = 1; mmco_index = 1;
@ -584,7 +596,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
h->long_ref_count++; h->long_ref_count++;
} }
h->cur_pic_ptr->f.reference |= h->picture_structure; h->cur_pic_ptr->reference |= h->picture_structure;
current_ref_assigned=1; current_ref_assigned=1;
break; break;
case MMCO_SET_MAX_LONG: case MMCO_SET_MAX_LONG:
@ -619,7 +631,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
*/ */
if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) { if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) {
/* Just mark the second field valid */ /* Just mark the second field valid */
h->cur_pic_ptr->f.reference = PICT_FRAME; h->cur_pic_ptr->reference = PICT_FRAME;
} else if (h->cur_pic_ptr->long_ref) { } else if (h->cur_pic_ptr->long_ref) {
av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference " av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference "
"assignment for second field " "assignment for second field "
@ -638,7 +650,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
h->short_ref[0]= h->cur_pic_ptr; h->short_ref[0]= h->cur_pic_ptr;
h->short_ref_count++; h->short_ref_count++;
h->cur_pic_ptr->f.reference |= h->picture_structure; h->cur_pic_ptr->reference |= h->picture_structure;
} }
} }

View File

@ -241,7 +241,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
ff_huffyuv_common_init(avctx); ff_huffyuv_common_init(avctx);
memset(s->vlc, 0, 3 * sizeof(VLC)); memset(s->vlc, 0, 3 * sizeof(VLC));
avctx->coded_frame = &s->picture;
s->interlaced = s->height > 288; s->interlaced = s->height > 288;
s->bgr32 = 1; s->bgr32 = 1;
@ -337,7 +336,6 @@ static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
HYuvContext *s = avctx->priv_data; HYuvContext *s = avctx->priv_data;
int i; int i;
avctx->coded_frame= &s->picture;
ff_huffyuv_alloc_temp(s); ff_huffyuv_alloc_temp(s);
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
@ -443,7 +441,7 @@ static void decode_bgr_bitstream(HYuvContext *s, int count)
} }
} }
static void draw_slice(HYuvContext *s, int y) static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
{ {
int h, cy, i; int h, cy, i;
int offset[AV_NUM_DATA_POINTERS]; int offset[AV_NUM_DATA_POINTERS];
@ -460,14 +458,14 @@ static void draw_slice(HYuvContext *s, int y)
cy = y; cy = y;
} }
offset[0] = s->picture.linesize[0]*y; offset[0] = frame->linesize[0] * y;
offset[1] = s->picture.linesize[1]*cy; offset[1] = frame->linesize[1] * cy;
offset[2] = s->picture.linesize[2]*cy; offset[2] = frame->linesize[2] * cy;
for (i = 3; i < AV_NUM_DATA_POINTERS; i++) for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
offset[i] = 0; offset[i] = 0;
emms_c(); emms_c();
s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h); s->avctx->draw_horiz_band(s->avctx, frame, offset, y, 3, h);
s->last_slice_end = y + h; s->last_slice_end = y + h;
} }
@ -482,11 +480,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
const int width2 = s->width>>1; const int width2 = s->width>>1;
const int height = s->height; const int height = s->height;
int fake_ystride, fake_ustride, fake_vstride; int fake_ystride, fake_ustride, fake_vstride;
AVFrame * const p = &s->picture; ThreadFrame frame = { .f = data };
AVFrame * const p = data;
int table_size = 0; int table_size = 0;
AVFrame *picture = data;
av_fast_malloc(&s->bitstream_buffer, av_fast_malloc(&s->bitstream_buffer,
&s->bitstream_buffer_size, &s->bitstream_buffer_size,
buf_size + FF_INPUT_BUFFER_PADDING_SIZE); buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
@ -497,11 +494,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer,
(const uint32_t*)buf, buf_size / 4); (const uint32_t*)buf, buf_size / 4);
if (p->data[0]) if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
ff_thread_release_buffer(avctx, p);
p->reference = 0;
if (ff_thread_get_buffer(avctx, p) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
@ -572,7 +565,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (y >= s->height) break; if (y >= s->height) break;
} }
draw_slice(s, y); draw_slice(s, p, y);
ydst = p->data[0] + p->linesize[0]*y; ydst = p->data[0] + p->linesize[0]*y;
udst = p->data[1] + p->linesize[1]*cy; udst = p->data[1] + p->linesize[1]*cy;
@ -594,7 +587,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
} }
} }
draw_slice(s, height); draw_slice(s, p, height);
break; break;
case MEDIAN: case MEDIAN:
@ -651,7 +644,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
if (y >= height) break; if (y >= height) break;
} }
draw_slice(s, y); draw_slice(s, p, y);
decode_422_bitstream(s, width); decode_422_bitstream(s, width);
@ -666,7 +659,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
} }
draw_slice(s, height); draw_slice(s, p, height);
break; break;
} }
} }
@ -710,7 +703,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
} }
// just 1 large slice as this is not possible in reverse order // just 1 large slice as this is not possible in reverse order
draw_slice(s, height); draw_slice(s, p, height);
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
@ -724,7 +717,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
emms_c(); emms_c();
*picture = *p;
*got_frame = 1; *got_frame = 1;
return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size; return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
@ -735,9 +727,6 @@ static av_cold int decode_end(AVCodecContext *avctx)
HYuvContext *s = avctx->priv_data; HYuvContext *s = avctx->priv_data;
int i; int i;
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
ff_huffyuv_common_end(s); ff_huffyuv_common_end(s);
av_freep(&s->bitstream_buffer); av_freep(&s->bitstream_buffer);

View File

@ -66,7 +66,6 @@ typedef struct
typedef struct IdcinContext { typedef struct IdcinContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
const unsigned char *buf; const unsigned char *buf;
int size; int size;
@ -168,12 +167,10 @@ static av_cold int idcin_decode_init(AVCodecContext *avctx)
huff_build_tree(s, i); huff_build_tree(s, i);
} }
avcodec_get_frame_defaults(&s->frame);
return 0; return 0;
} }
static void idcin_decode_vlcs(IdcinContext *s) static void idcin_decode_vlcs(IdcinContext *s, AVFrame *frame)
{ {
hnode *hnodes; hnode *hnodes;
long x, y; long x, y;
@ -182,8 +179,8 @@ static void idcin_decode_vlcs(IdcinContext *s)
int bit_pos, node_num, dat_pos; int bit_pos, node_num, dat_pos;
prev = bit_pos = dat_pos = 0; prev = bit_pos = dat_pos = 0;
for (y = 0; y < (s->frame.linesize[0] * s->avctx->height); for (y = 0; y < (frame->linesize[0] * s->avctx->height);
y += s->frame.linesize[0]) { y += frame->linesize[0]) {
for (x = y; x < y + s->avctx->width; x++) { for (x = y; x < y + s->avctx->width; x++) {
node_num = s->num_huff_nodes[prev]; node_num = s->num_huff_nodes[prev];
hnodes = s->huff_nodes[prev]; hnodes = s->huff_nodes[prev];
@ -203,7 +200,7 @@ static void idcin_decode_vlcs(IdcinContext *s)
bit_pos--; bit_pos--;
} }
s->frame.data[0][x] = node_num; frame->data[0][x] = node_num;
prev = node_num; prev = node_num;
} }
} }
@ -217,51 +214,38 @@ static int idcin_decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
IdcinContext *s = avctx->priv_data; IdcinContext *s = avctx->priv_data;
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
AVFrame *frame = data;
int ret;
s->buf = buf; s->buf = buf;
s->size = buf_size; s->size = buf_size;
if (s->frame.data[0]) if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &s->frame);
if (ff_get_buffer(avctx, &s->frame)) {
av_log(avctx, AV_LOG_ERROR, " id CIN Video: get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, " id CIN Video: get_buffer() failed\n");
return -1; return ret;
} }
idcin_decode_vlcs(s); idcin_decode_vlcs(s, frame);
if (pal) { if (pal) {
s->frame.palette_has_changed = 1; frame->palette_has_changed = 1;
memcpy(s->pal, pal, AVPALETTE_SIZE); memcpy(s->pal, pal, AVPALETTE_SIZE);
} }
/* make the palette available on the way out */ /* make the palette available on the way out */
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE); memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */ /* report that the buffer was completely consumed */
return buf_size; return buf_size;
} }
static av_cold int idcin_decode_end(AVCodecContext *avctx)
{
IdcinContext *s = avctx->priv_data;
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
return 0;
}
AVCodec ff_idcin_decoder = { AVCodec ff_idcin_decoder = {
.name = "idcinvideo", .name = "idcinvideo",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_IDCIN, .id = AV_CODEC_ID_IDCIN,
.priv_data_size = sizeof(IdcinContext), .priv_data_size = sizeof(IdcinContext),
.init = idcin_decode_init, .init = idcin_decode_init,
.close = idcin_decode_end,
.decode = idcin_decode_frame, .decode = idcin_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("id Quake II CIN video"), .long_name = NULL_IF_CONFIG_SMALL("id Quake II CIN video"),

View File

@ -168,8 +168,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
if (!s->planebuf) if (!s->planebuf)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
s->frame.reference = 1;
return 0; return 0;
} }
@ -256,15 +254,11 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
int y, plane, res; int y, plane, res;
if (s->init) { if ((res = ff_reget_buffer(avctx, &s->frame)) < 0)
if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return res;
}
} else if ((res = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res; return res;
} else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != AV_PIX_FMT_GRAY8) {
if (!s->init && avctx->bits_per_coded_sample <= 8 &&
avctx->pix_fmt != AV_PIX_FMT_GRAY8) {
if ((res = cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0) if ((res = cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
return res; return res;
} }
@ -298,8 +292,11 @@ static int decode_frame_ilbm(AVCodecContext *avctx,
} }
} }
if ((res = av_frame_ref(data, &s->frame)) < 0)
return res;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
@ -313,15 +310,11 @@ static int decode_frame_byterun1(AVCodecContext *avctx,
const uint8_t *buf_end = buf+buf_size; const uint8_t *buf_end = buf+buf_size;
int y, plane, res; int y, plane, res;
if (s->init) { if ((res = ff_reget_buffer(avctx, &s->frame)) < 0)
if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return res;
}
} else if ((res = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res; return res;
} else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != AV_PIX_FMT_GRAY8) {
if (!s->init && avctx->bits_per_coded_sample <= 8 &&
avctx->pix_fmt != AV_PIX_FMT_GRAY8) {
if ((res = cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0) if ((res = cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
return res; return res;
} }
@ -354,16 +347,18 @@ static int decode_frame_byterun1(AVCodecContext *avctx,
} }
} }
if ((res = av_frame_ref(data, &s->frame)) < 0)
return res;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
return buf_size; return buf_size;
} }
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
{ {
IffContext *s = avctx->priv_data; IffContext *s = avctx->priv_data;
if (s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
av_freep(&s->planebuf); av_freep(&s->planebuf);
return 0; return 0;
} }

View File

@ -940,7 +940,7 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */ /* get output buffer */
frame->nb_samples = COEFFS; frame->nb_samples = COEFFS;
if ((ret = ff_get_buffer(avctx, frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }

View File

@ -29,6 +29,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "get_bits.h" #include "get_bits.h"
#include "indeo2data.h" #include "indeo2data.h"
#include "internal.h"
#include "mathops.h" #include "mathops.h"
typedef struct Ir2Context{ typedef struct Ir2Context{
@ -148,12 +149,7 @@ static int ir2_decode_frame(AVCodecContext *avctx,
AVFrame * const p = &s->picture; AVFrame * const p = &s->picture;
int start, ret; int start, ret;
if (p->data[0]) if ((ret = ff_reget_buffer(avctx, p)) < 0) {
avctx->release_buffer(avctx, p);
p->reference = 1;
p->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); av_log(s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret; return ret;
} }
@ -206,7 +202,9 @@ static int ir2_decode_frame(AVCodecContext *avctx,
return ret; return ret;
} }
*picture = s->picture; if ((ret = av_frame_ref(picture, &s->picture)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
@ -241,8 +239,7 @@ static av_cold int ir2_decode_end(AVCodecContext *avctx)
Ir2Context * const ic = avctx->priv_data; Ir2Context * const ic = avctx->priv_data;
AVFrame *pic = &ic->picture; AVFrame *pic = &ic->picture;
if (pic->data[0]) av_frame_unref(pic);
avctx->release_buffer(avctx, pic);
return 0; return 0;
} }

View File

@ -81,7 +81,6 @@ typedef struct Cell {
typedef struct Indeo3DecodeContext { typedef struct Indeo3DecodeContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
DSPContext dsp; DSPContext dsp;
GetBitContext gb; GetBitContext gb;
@ -1034,6 +1033,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
Indeo3DecodeContext *ctx = avctx->priv_data; Indeo3DecodeContext *ctx = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AVFrame *frame = data;
int res; int res;
res = decode_frame_headers(ctx, avctx, buf, buf_size); res = decode_frame_headers(ctx, avctx, buf, buf_size);
@ -1070,27 +1070,22 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if ((res = decode_plane(ctx, avctx, &ctx->planes[2], ctx->v_data_ptr, ctx->v_data_size, 10))) if ((res = decode_plane(ctx, avctx, &ctx->planes[2], ctx->v_data_ptr, ctx->v_data_size, 10)))
return res; return res;
if (ctx->frame.data[0]) if ((res = ff_get_buffer(avctx, frame, 0)) < 0) {
avctx->release_buffer(avctx, &ctx->frame);
ctx->frame.reference = 0;
if ((res = ff_get_buffer(avctx, &ctx->frame)) < 0) {
av_log(ctx->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(ctx->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res; return res;
} }
output_plane(&ctx->planes[0], ctx->buf_sel, output_plane(&ctx->planes[0], ctx->buf_sel,
ctx->frame.data[0], ctx->frame.linesize[0], frame->data[0], frame->linesize[0],
avctx->height); avctx->height);
output_plane(&ctx->planes[1], ctx->buf_sel, output_plane(&ctx->planes[1], ctx->buf_sel,
ctx->frame.data[1], ctx->frame.linesize[1], frame->data[1], frame->linesize[1],
(avctx->height + 3) >> 2); (avctx->height + 3) >> 2);
output_plane(&ctx->planes[2], ctx->buf_sel, output_plane(&ctx->planes[2], ctx->buf_sel,
ctx->frame.data[2], ctx->frame.linesize[2], frame->data[2], frame->linesize[2],
(avctx->height + 3) >> 2); (avctx->height + 3) >> 2);
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = ctx->frame;
return buf_size; return buf_size;
} }
@ -1098,13 +1093,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
static av_cold int decode_close(AVCodecContext *avctx) static av_cold int decode_close(AVCodecContext *avctx)
{ {
Indeo3DecodeContext *ctx = avctx->priv_data;
free_frame_buffers(avctx->priv_data); free_frame_buffers(avctx->priv_data);
if (ctx->frame.data[0])
avctx->release_buffer(avctx, &ctx->frame);
return 0; return 0;
} }

View File

@ -26,34 +26,33 @@
#include <stdint.h> #include <stdint.h>
#include "libavutil/buffer.h"
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "libavutil/pixfmt.h" #include "libavutil/pixfmt.h"
#include "avcodec.h" #include "avcodec.h"
#define FF_SANE_NB_CHANNELS 128U #define FF_SANE_NB_CHANNELS 128U
typedef struct InternalBuffer { typedef struct FramePool {
uint8_t *base[AV_NUM_DATA_POINTERS]; /**
uint8_t *data[AV_NUM_DATA_POINTERS]; * Pools for each data plane. For audio all the planes have the same size,
int linesize[AV_NUM_DATA_POINTERS]; * so only pools[0] is used.
int width; */
int height; AVBufferPool *pools[4];
enum AVPixelFormat pix_fmt;
} InternalBuffer; /*
* Pool parameters
*/
int format;
int width, height;
int stride_align[AV_NUM_DATA_POINTERS];
int linesize[4];
int planes;
int channels;
int samples;
} FramePool;
typedef struct AVCodecInternal { typedef struct AVCodecInternal {
/**
* internal buffer count
* used by default get/release/reget_buffer().
*/
int buffer_count;
/**
* internal buffers
* used by default get/release/reget_buffer().
*/
InternalBuffer *buffer;
/** /**
* Whether the parent AVCodecContext is a copy of the context which had * Whether the parent AVCodecContext is a copy of the context which had
* init() called on it. * init() called on it.
@ -62,6 +61,21 @@ typedef struct AVCodecInternal {
*/ */
int is_copy; int is_copy;
/**
* Whether to allocate progress for frame threading.
*
* The codec must set it to 1 if it uses ff_thread_await/report_progress(),
* then progress will be allocated in ff_thread_get_buffer(). The frames
* then MUST be freed with ff_thread_release_buffer().
*
* If the codec does not need to call the progress functions (there are no
* dependencies between the frames), it should leave this at 0. Then it can
* decode straight to the user-provided frames (which the user will then
* free with av_frame_unref()), there is no need to call
* ff_thread_release_buffer().
*/
int allocate_progress;
#if FF_API_OLD_ENCODE_AUDIO #if FF_API_OLD_ENCODE_AUDIO
/** /**
* Internal sample count used by avcodec_encode_audio() to fabricate pts. * Internal sample count used by avcodec_encode_audio() to fabricate pts.
@ -76,11 +90,9 @@ typedef struct AVCodecInternal {
*/ */
int last_audio_frame; int last_audio_frame;
/** AVFrame to_free;
* The data for the last allocated audio frame.
* Stored here so we can free it. FramePool *pool;
*/
uint8_t *audio_data;
} AVCodecInternal; } AVCodecInternal;
struct AVCodecDefault { struct AVCodecDefault {
@ -149,6 +161,12 @@ static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx,
* AVCodecContext.get_buffer() and should be used instead calling get_buffer() * AVCodecContext.get_buffer() and should be used instead calling get_buffer()
* directly. * directly.
*/ */
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame); int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags);
/**
* Identical in function to av_frame_make_writable(), except it uses
* ff_get_buffer() to allocate the buffer when needed.
*/
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame);
#endif /* AVCODEC_INTERNAL_H */ #endif /* AVCODEC_INTERNAL_H */

View File

@ -51,9 +51,8 @@ typedef struct IpvideoContext {
AVCodecContext *avctx; AVCodecContext *avctx;
DSPContext dsp; DSPContext dsp;
AVFrame second_last_frame; AVFrame *second_last_frame;
AVFrame last_frame; AVFrame *last_frame;
AVFrame current_frame;
const unsigned char *decoding_map; const unsigned char *decoding_map;
int decoding_map_size; int decoding_map_size;
@ -67,10 +66,10 @@ typedef struct IpvideoContext {
uint32_t pal[256]; uint32_t pal[256];
} IpvideoContext; } IpvideoContext;
static int copy_from(IpvideoContext *s, AVFrame *src, int delta_x, int delta_y) static int copy_from(IpvideoContext *s, AVFrame *src, AVFrame *dst, int delta_x, int delta_y)
{ {
int current_offset = s->pixel_ptr - s->current_frame.data[0]; int current_offset = s->pixel_ptr - dst->data[0];
int motion_offset = current_offset + delta_y * s->current_frame.linesize[0] int motion_offset = current_offset + delta_y * dst->linesize[0]
+ delta_x * (1 + s->is_16bpp); + delta_x * (1 + s->is_16bpp);
if (motion_offset < 0) { if (motion_offset < 0) {
av_log(s->avctx, AV_LOG_ERROR, " Interplay video: motion offset < 0 (%d)\n", motion_offset); av_log(s->avctx, AV_LOG_ERROR, " Interplay video: motion offset < 0 (%d)\n", motion_offset);
@ -85,21 +84,21 @@ static int copy_from(IpvideoContext *s, AVFrame *src, int delta_x, int delta_y)
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
s->dsp.put_pixels_tab[!s->is_16bpp][0](s->pixel_ptr, src->data[0] + motion_offset, s->dsp.put_pixels_tab[!s->is_16bpp][0](s->pixel_ptr, src->data[0] + motion_offset,
s->current_frame.linesize[0], 8); dst->linesize[0], 8);
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0x0(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x0(IpvideoContext *s, AVFrame *frame)
{ {
return copy_from(s, &s->last_frame, 0, 0); return copy_from(s, s->last_frame, frame, 0, 0);
} }
static int ipvideo_decode_block_opcode_0x1(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x1(IpvideoContext *s, AVFrame *frame)
{ {
return copy_from(s, &s->second_last_frame, 0, 0); return copy_from(s, s->second_last_frame, frame, 0, 0);
} }
static int ipvideo_decode_block_opcode_0x2(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x2(IpvideoContext *s, AVFrame *frame)
{ {
unsigned char B; unsigned char B;
int x, y; int x, y;
@ -120,10 +119,10 @@ static int ipvideo_decode_block_opcode_0x2(IpvideoContext *s)
} }
av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y);
return copy_from(s, &s->second_last_frame, x, y); return copy_from(s, s->second_last_frame, frame, x, y);
} }
static int ipvideo_decode_block_opcode_0x3(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x3(IpvideoContext *s, AVFrame *frame)
{ {
unsigned char B; unsigned char B;
int x, y; int x, y;
@ -146,10 +145,10 @@ static int ipvideo_decode_block_opcode_0x3(IpvideoContext *s)
} }
av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y);
return copy_from(s, &s->current_frame, x, y); return copy_from(s, frame, frame, x, y);
} }
static int ipvideo_decode_block_opcode_0x4(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x4(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
unsigned char B, BL, BH; unsigned char B, BL, BH;
@ -167,10 +166,10 @@ static int ipvideo_decode_block_opcode_0x4(IpvideoContext *s)
y = -8 + BH; y = -8 + BH;
av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y);
return copy_from(s, &s->last_frame, x, y); return copy_from(s, s->last_frame, frame, x, y);
} }
static int ipvideo_decode_block_opcode_0x5(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x5(IpvideoContext *s, AVFrame *frame)
{ {
signed char x, y; signed char x, y;
@ -180,10 +179,10 @@ static int ipvideo_decode_block_opcode_0x5(IpvideoContext *s)
y = bytestream2_get_byte(&s->stream_ptr); y = bytestream2_get_byte(&s->stream_ptr);
av_dlog(NULL, " motion bytes = %d, %d\n", x, y); av_dlog(NULL, " motion bytes = %d, %d\n", x, y);
return copy_from(s, &s->last_frame, x, y); return copy_from(s, s->last_frame, frame, x, y);
} }
static int ipvideo_decode_block_opcode_0x6(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x6(IpvideoContext *s, AVFrame *frame)
{ {
/* mystery opcode? skip multiple blocks? */ /* mystery opcode? skip multiple blocks? */
av_log(s->avctx, AV_LOG_ERROR, " Interplay video: Help! Mystery opcode 0x6 seen\n"); av_log(s->avctx, AV_LOG_ERROR, " Interplay video: Help! Mystery opcode 0x6 seen\n");
@ -192,7 +191,7 @@ static int ipvideo_decode_block_opcode_0x6(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0x7(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x7(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
unsigned char P[2]; unsigned char P[2];
@ -231,7 +230,7 @@ static int ipvideo_decode_block_opcode_0x7(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0x8(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x8(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
unsigned char P[4]; unsigned char P[4];
@ -304,7 +303,7 @@ static int ipvideo_decode_block_opcode_0x8(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
unsigned char P[4]; unsigned char P[4];
@ -369,7 +368,7 @@ static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xA(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xA(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
unsigned char P[8]; unsigned char P[8];
@ -430,7 +429,7 @@ static int ipvideo_decode_block_opcode_0xA(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xB(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xB(IpvideoContext *s, AVFrame *frame)
{ {
int y; int y;
@ -444,7 +443,7 @@ static int ipvideo_decode_block_opcode_0xB(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xC(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xC(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
@ -463,7 +462,7 @@ static int ipvideo_decode_block_opcode_0xC(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xD(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xD(IpvideoContext *s, AVFrame *frame)
{ {
int y; int y;
unsigned char P[2]; unsigned char P[2];
@ -483,7 +482,7 @@ static int ipvideo_decode_block_opcode_0xD(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xE(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xE(IpvideoContext *s, AVFrame *frame)
{ {
int y; int y;
unsigned char pix; unsigned char pix;
@ -500,7 +499,7 @@ static int ipvideo_decode_block_opcode_0xE(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xF(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xF(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
unsigned char sample[2]; unsigned char sample[2];
@ -521,7 +520,7 @@ static int ipvideo_decode_block_opcode_0xF(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0x6_16(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x6_16(IpvideoContext *s, AVFrame *frame)
{ {
signed char x, y; signed char x, y;
@ -530,10 +529,10 @@ static int ipvideo_decode_block_opcode_0x6_16(IpvideoContext *s)
y = bytestream2_get_byte(&s->stream_ptr); y = bytestream2_get_byte(&s->stream_ptr);
av_dlog(NULL, " motion bytes = %d, %d\n", x, y); av_dlog(NULL, " motion bytes = %d, %d\n", x, y);
return copy_from(s, &s->second_last_frame, x, y); return copy_from(s, s->second_last_frame, frame, x, y);
} }
static int ipvideo_decode_block_opcode_0x7_16(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x7_16(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
uint16_t P[2]; uint16_t P[2];
@ -570,7 +569,7 @@ static int ipvideo_decode_block_opcode_0x7_16(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0x8_16(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x8_16(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
uint16_t P[4]; uint16_t P[4];
@ -646,7 +645,7 @@ static int ipvideo_decode_block_opcode_0x8_16(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0x9_16(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x9_16(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
uint16_t P[4]; uint16_t P[4];
@ -713,7 +712,7 @@ static int ipvideo_decode_block_opcode_0x9_16(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xA_16(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xA_16(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
uint16_t P[8]; uint16_t P[8];
@ -779,7 +778,7 @@ static int ipvideo_decode_block_opcode_0xA_16(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xB_16(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xB_16(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr; uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
@ -795,7 +794,7 @@ static int ipvideo_decode_block_opcode_0xB_16(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xC_16(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xC_16(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr; uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
@ -815,7 +814,7 @@ static int ipvideo_decode_block_opcode_0xC_16(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xD_16(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xD_16(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
uint16_t P[2]; uint16_t P[2];
@ -836,7 +835,7 @@ static int ipvideo_decode_block_opcode_0xD_16(IpvideoContext *s)
return 0; return 0;
} }
static int ipvideo_decode_block_opcode_0xE_16(IpvideoContext *s) static int ipvideo_decode_block_opcode_0xE_16(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
uint16_t pix; uint16_t pix;
@ -855,7 +854,7 @@ static int ipvideo_decode_block_opcode_0xE_16(IpvideoContext *s)
return 0; return 0;
} }
static int (* const ipvideo_decode_block[])(IpvideoContext *s) = { static int (* const ipvideo_decode_block[])(IpvideoContext *s, AVFrame *frame) = {
ipvideo_decode_block_opcode_0x0, ipvideo_decode_block_opcode_0x1, ipvideo_decode_block_opcode_0x0, ipvideo_decode_block_opcode_0x1,
ipvideo_decode_block_opcode_0x2, ipvideo_decode_block_opcode_0x3, ipvideo_decode_block_opcode_0x2, ipvideo_decode_block_opcode_0x3,
ipvideo_decode_block_opcode_0x4, ipvideo_decode_block_opcode_0x5, ipvideo_decode_block_opcode_0x4, ipvideo_decode_block_opcode_0x5,
@ -866,7 +865,7 @@ static int (* const ipvideo_decode_block[])(IpvideoContext *s) = {
ipvideo_decode_block_opcode_0xE, ipvideo_decode_block_opcode_0xF, ipvideo_decode_block_opcode_0xE, ipvideo_decode_block_opcode_0xF,
}; };
static int (* const ipvideo_decode_block16[])(IpvideoContext *s) = { static int (* const ipvideo_decode_block16[])(IpvideoContext *s, AVFrame *frame) = {
ipvideo_decode_block_opcode_0x0, ipvideo_decode_block_opcode_0x1, ipvideo_decode_block_opcode_0x0, ipvideo_decode_block_opcode_0x1,
ipvideo_decode_block_opcode_0x2, ipvideo_decode_block_opcode_0x3, ipvideo_decode_block_opcode_0x2, ipvideo_decode_block_opcode_0x3,
ipvideo_decode_block_opcode_0x4, ipvideo_decode_block_opcode_0x5, ipvideo_decode_block_opcode_0x4, ipvideo_decode_block_opcode_0x5,
@ -877,7 +876,7 @@ static int (* const ipvideo_decode_block16[])(IpvideoContext *s) = {
ipvideo_decode_block_opcode_0xE_16, ipvideo_decode_block_opcode_0x1, ipvideo_decode_block_opcode_0xE_16, ipvideo_decode_block_opcode_0x1,
}; };
static void ipvideo_decode_opcodes(IpvideoContext *s) static void ipvideo_decode_opcodes(IpvideoContext *s, AVFrame *frame)
{ {
int x, y; int x, y;
unsigned char opcode; unsigned char opcode;
@ -887,16 +886,16 @@ static void ipvideo_decode_opcodes(IpvideoContext *s)
bytestream2_skip(&s->stream_ptr, 14); /* data starts 14 bytes in */ bytestream2_skip(&s->stream_ptr, 14); /* data starts 14 bytes in */
if (!s->is_16bpp) { if (!s->is_16bpp) {
/* this is PAL8, so make the palette available */ /* this is PAL8, so make the palette available */
memcpy(s->current_frame.data[1], s->pal, AVPALETTE_SIZE); memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
s->stride = s->current_frame.linesize[0]; s->stride = frame->linesize[0];
} else { } else {
s->stride = s->current_frame.linesize[0] >> 1; s->stride = frame->linesize[0] >> 1;
s->mv_ptr = s->stream_ptr; s->mv_ptr = s->stream_ptr;
bytestream2_skip(&s->mv_ptr, bytestream2_get_le16(&s->stream_ptr)); bytestream2_skip(&s->mv_ptr, bytestream2_get_le16(&s->stream_ptr));
} }
s->line_inc = s->stride - 8; s->line_inc = s->stride - 8;
s->upper_motion_limit_offset = (s->avctx->height - 8) * s->current_frame.linesize[0] s->upper_motion_limit_offset = (s->avctx->height - 8) * frame->linesize[0]
+ (s->avctx->width - 8) * (1 + s->is_16bpp); + (s->avctx->width - 8) * (1 + s->is_16bpp);
init_get_bits(&gb, s->decoding_map, s->decoding_map_size * 8); init_get_bits(&gb, s->decoding_map, s->decoding_map_size * 8);
@ -909,13 +908,13 @@ static void ipvideo_decode_opcodes(IpvideoContext *s)
x, y, opcode, bytestream2_tell(&s->stream_ptr)); x, y, opcode, bytestream2_tell(&s->stream_ptr));
if (!s->is_16bpp) { if (!s->is_16bpp) {
s->pixel_ptr = s->current_frame.data[0] + x s->pixel_ptr = frame->data[0] + x
+ y*s->current_frame.linesize[0]; + y*frame->linesize[0];
ret = ipvideo_decode_block[opcode](s); ret = ipvideo_decode_block[opcode](s, frame);
} else { } else {
s->pixel_ptr = s->current_frame.data[0] + x*2 s->pixel_ptr = frame->data[0] + x*2
+ y*s->current_frame.linesize[0]; + y*frame->linesize[0];
ret = ipvideo_decode_block16[opcode](s); ret = ipvideo_decode_block16[opcode](s, frame);
} }
if (ret != 0) { if (ret != 0) {
av_log(s->avctx, AV_LOG_ERROR, " Interplay video: decode problem on frame %d, @ block (%d, %d)\n", av_log(s->avctx, AV_LOG_ERROR, " Interplay video: decode problem on frame %d, @ block (%d, %d)\n",
@ -942,8 +941,13 @@ static av_cold int ipvideo_decode_init(AVCodecContext *avctx)
ff_dsputil_init(&s->dsp, avctx); ff_dsputil_init(&s->dsp, avctx);
s->current_frame.data[0] = s->last_frame.data[0] = s->last_frame = av_frame_alloc();
s->second_last_frame.data[0] = NULL; s->second_last_frame = av_frame_alloc();
if (!s->last_frame || !s->second_last_frame) {
av_frame_free(&s->last_frame);
av_frame_free(&s->second_last_frame);
return AVERROR(ENOMEM);
}
return 0; return 0;
} }
@ -955,6 +959,7 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
IpvideoContext *s = avctx->priv_data; IpvideoContext *s = avctx->priv_data;
AVFrame *frame = data;
int ret; int ret;
/* decoding map contains 4 bits of information per 8x8 block */ /* decoding map contains 4 bits of information per 8x8 block */
@ -969,8 +974,7 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
bytestream2_init(&s->stream_ptr, buf + s->decoding_map_size, bytestream2_init(&s->stream_ptr, buf + s->decoding_map_size,
buf_size - s->decoding_map_size); buf_size - s->decoding_map_size);
s->current_frame.reference = 3; if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) {
if ((ret = ff_get_buffer(avctx, &s->current_frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, " Interplay Video: get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, " Interplay Video: get_buffer() failed\n");
return ret; return ret;
} }
@ -978,22 +982,20 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
if (!s->is_16bpp) { if (!s->is_16bpp) {
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
if (pal) { if (pal) {
s->current_frame.palette_has_changed = 1; frame->palette_has_changed = 1;
memcpy(s->pal, pal, AVPALETTE_SIZE); memcpy(s->pal, pal, AVPALETTE_SIZE);
} }
} }
ipvideo_decode_opcodes(s); ipvideo_decode_opcodes(s, frame);
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->current_frame;
/* shuffle frames */ /* shuffle frames */
if (s->second_last_frame.data[0]) av_frame_unref(s->second_last_frame);
avctx->release_buffer(avctx, &s->second_last_frame); FFSWAP(AVFrame*, s->second_last_frame, s->last_frame);
s->second_last_frame = s->last_frame; if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
s->last_frame = s->current_frame; return ret;
s->current_frame.data[0] = NULL; /* catch any access attempts */
/* report that the buffer was completely consumed */ /* report that the buffer was completely consumed */
return buf_size; return buf_size;
@ -1003,11 +1005,8 @@ static av_cold int ipvideo_decode_end(AVCodecContext *avctx)
{ {
IpvideoContext *s = avctx->priv_data; IpvideoContext *s = avctx->priv_data;
/* release the last frame */ av_frame_free(&s->last_frame);
if (s->last_frame.data[0]) av_frame_free(&s->second_last_frame);
avctx->release_buffer(avctx, &s->last_frame);
if (s->second_last_frame.data[0])
avctx->release_buffer(avctx, &s->second_last_frame);
return 0; return 0;
} }

View File

@ -774,7 +774,7 @@ int ff_intrax8_decode_picture(IntraX8Context * const w, int dquant, int quant_of
/*emulate MB info in the relevant tables*/ /*emulate MB info in the relevant tables*/
s->mbskip_table [mb_xy]=0; s->mbskip_table [mb_xy]=0;
s->mbintra_table[mb_xy]=1; s->mbintra_table[mb_xy]=1;
s->current_picture.f.qscale_table[mb_xy] = w->quant; s->current_picture.qscale_table[mb_xy] = w->quant;
mb_xy++; mb_xy++;
} }
s->dest[0]+= 8; s->dest[0]+= 8;

View File

@ -352,20 +352,20 @@ static void preview_obmc(MpegEncContext *s){
do{ do{
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
/* skip mb */ /* skip mb */
mot_val = s->current_picture.f.motion_val[0][s->block_index[0]]; mot_val = s->current_picture.motion_val[0][s->block_index[0]];
mot_val[0 ]= mot_val[2 ]= mot_val[0 ]= mot_val[2 ]=
mot_val[0+stride]= mot_val[2+stride]= 0; mot_val[0+stride]= mot_val[2+stride]= 0;
mot_val[1 ]= mot_val[3 ]= mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= 0; mot_val[1+stride]= mot_val[3+stride]= 0;
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
goto end; goto end;
} }
cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
}while(cbpc == 20); }while(cbpc == 20);
if(cbpc & 4){ if(cbpc & 4){
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA; s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
}else{ }else{
get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if (cbpc & 8) { if (cbpc & 8) {
@ -377,7 +377,7 @@ static void preview_obmc(MpegEncContext *s){
} }
if ((cbpc & 16) == 0) { if ((cbpc & 16) == 0) {
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */ /* 16x16 motion prediction */
mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus) if (s->umvplus)
@ -395,7 +395,7 @@ static void preview_obmc(MpegEncContext *s){
mot_val[1 ]= mot_val[3 ]= mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= my; mot_val[1+stride]= mot_val[3+stride]= my;
} else { } else {
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) { for(i=0;i<4;i++) {
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
if (s->umvplus) if (s->umvplus)
@ -617,7 +617,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->block_last_index[i] = -1; s->block_last_index[i] = -1;
s->mv_dir = MV_DIR_FORWARD; s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16; s->mv_type = MV_TYPE_16X16;
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0; s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0; s->mv[0][0][1] = 0;
s->mb_skipped = !(s->obmc | s->loop_filter); s->mb_skipped = !(s->obmc | s->loop_filter);
@ -650,7 +650,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->mv_dir = MV_DIR_FORWARD; s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) { if ((cbpc & 16) == 0) {
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */ /* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16; s->mv_type = MV_TYPE_16X16;
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
@ -675,7 +675,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
} else { } else {
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8; s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) { for(i=0;i<4;i++) {
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
@ -703,8 +703,8 @@ int ff_h263_decode_mb(MpegEncContext *s,
} else if(s->pict_type==AV_PICTURE_TYPE_B) { } else if(s->pict_type==AV_PICTURE_TYPE_B) {
int mb_type; int mb_type;
const int stride= s->b8_stride; const int stride= s->b8_stride;
int16_t *mot_val0 = s->current_picture.f.motion_val[0][2 * (s->mb_x + s->mb_y * stride)]; int16_t *mot_val0 = s->current_picture.motion_val[0][2 * (s->mb_x + s->mb_y * stride)];
int16_t *mot_val1 = s->current_picture.f.motion_val[1][2 * (s->mb_x + s->mb_y * stride)]; int16_t *mot_val1 = s->current_picture.motion_val[1][2 * (s->mb_x + s->mb_y * stride)];
// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride; // const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
//FIXME ugly //FIXME ugly
@ -787,7 +787,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
} }
} }
s->current_picture.f.mb_type[xy] = mb_type; s->current_picture.mb_type[xy] = mb_type;
} else { /* I-Frame */ } else { /* I-Frame */
do{ do{
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
@ -802,11 +802,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
dquant = cbpc & 4; dquant = cbpc & 4;
s->mb_intra = 1; s->mb_intra = 1;
intra: intra:
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA; s->current_picture.mb_type[xy] = MB_TYPE_INTRA;
if (s->h263_aic) { if (s->h263_aic) {
s->ac_pred = get_bits1(&s->gb); s->ac_pred = get_bits1(&s->gb);
if(s->ac_pred){ if(s->ac_pred){
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED; s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
s->h263_aic_dir = get_bits1(&s->gb); s->h263_aic_dir = get_bits1(&s->gb);
} }

View File

@ -274,7 +274,7 @@ void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line)
*/ */
void ff_clean_h263_qscales(MpegEncContext *s){ void ff_clean_h263_qscales(MpegEncContext *s){
int i; int i;
int8_t * const qscale_table = s->current_picture.f.qscale_table; int8_t * const qscale_table = s->current_picture.qscale_table;
ff_init_qscale_tab(s); ff_init_qscale_tab(s);
@ -528,8 +528,8 @@ void ff_h263_encode_mb(MpegEncContext * s,
/* motion vectors: 8x8 mode*/ /* motion vectors: 8x8 mode*/
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
motion_x = s->current_picture.f.motion_val[0][s->block_index[i]][0]; motion_x = s->current_picture.motion_val[0][s->block_index[i]][0];
motion_y = s->current_picture.f.motion_val[0][s->block_index[i]][1]; motion_y = s->current_picture.motion_val[0][s->block_index[i]][1];
if (!s->umvplus) { if (!s->umvplus) {
ff_h263_encode_motion_vector(s, motion_x - pred_x, ff_h263_encode_motion_vector(s, motion_x - pred_x,
motion_y - pred_y, 1); motion_y - pred_y, 1);

View File

@ -811,6 +811,7 @@ int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
{ {
IVI45DecContext *ctx = avctx->priv_data; IVI45DecContext *ctx = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
AVFrame *frame = data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int result, p, b; int result, p, b;
@ -861,30 +862,25 @@ int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
av_log(avctx, AV_LOG_ERROR, "Buffer contains IP frames!\n"); av_log(avctx, AV_LOG_ERROR, "Buffer contains IP frames!\n");
} }
if (ctx->frame.data[0])
avctx->release_buffer(avctx, &ctx->frame);
ctx->frame.reference = 0;
avcodec_set_dimensions(avctx, ctx->planes[0].width, ctx->planes[0].height); avcodec_set_dimensions(avctx, ctx->planes[0].width, ctx->planes[0].height);
if ((result = ff_get_buffer(avctx, &ctx->frame)) < 0) { if ((result = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return result; return result;
} }
if (ctx->is_scalable) { if (ctx->is_scalable) {
if (avctx->codec_id == AV_CODEC_ID_INDEO4) if (avctx->codec_id == AV_CODEC_ID_INDEO4)
ff_ivi_recompose_haar(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]); ff_ivi_recompose_haar(&ctx->planes[0], frame->data[0], frame->linesize[0]);
else else
ff_ivi_recompose53 (&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]); ff_ivi_recompose53 (&ctx->planes[0], frame->data[0], frame->linesize[0]);
} else { } else {
ivi_output_plane(&ctx->planes[0], ctx->frame.data[0], ctx->frame.linesize[0]); ivi_output_plane(&ctx->planes[0], frame->data[0], frame->linesize[0]);
} }
ivi_output_plane(&ctx->planes[2], ctx->frame.data[1], ctx->frame.linesize[1]); ivi_output_plane(&ctx->planes[2], frame->data[1], frame->linesize[1]);
ivi_output_plane(&ctx->planes[1], ctx->frame.data[2], ctx->frame.linesize[2]); ivi_output_plane(&ctx->planes[1], frame->data[2], frame->linesize[2]);
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = ctx->frame;
return buf_size; return buf_size;
} }
@ -901,9 +897,6 @@ av_cold int ff_ivi_decode_close(AVCodecContext *avctx)
if (ctx->mb_vlc.cust_tab.table) if (ctx->mb_vlc.cust_tab.table)
ff_free_vlc(&ctx->mb_vlc.cust_tab); ff_free_vlc(&ctx->mb_vlc.cust_tab);
if (ctx->frame.data[0])
avctx->release_buffer(avctx, &ctx->frame);
#if IVI4_STREAM_ANALYSER #if IVI4_STREAM_ANALYSER
if (avctx->codec_id == AV_CODEC_ID_INDEO4) { if (avctx->codec_id == AV_CODEC_ID_INDEO4) {
if (ctx->is_scalable) if (ctx->is_scalable)

View File

@ -195,7 +195,6 @@ typedef struct IVIPicConfig {
typedef struct IVI45DecContext { typedef struct IVI45DecContext {
GetBitContext gb; GetBitContext gb;
AVFrame frame;
RVMapDesc rvmap_tabs[9]; ///< local corrected copy of the static rvmap tables RVMapDesc rvmap_tabs[9]; ///< local corrected copy of the static rvmap tables
uint32_t frame_num; uint32_t frame_num;

View File

@ -28,6 +28,7 @@
#include "avcodec.h" #include "avcodec.h"
#include "dsputil.h" #include "dsputil.h"
#include "get_bits.h" #include "get_bits.h"
#include "internal.h"
#include "libavutil/intreadwrite.h" #include "libavutil/intreadwrite.h"
typedef struct JvContext { typedef struct JvContext {
@ -136,16 +137,16 @@ static int decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size; int buf_size = avpkt->size;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = buf + buf_size; const uint8_t *buf_end = buf + buf_size;
int video_size, video_type, i, j; int video_size, video_type, i, j, ret;
video_size = AV_RL32(buf); video_size = AV_RL32(buf);
video_type = buf[4]; video_type = buf[4];
buf += 5; buf += 5;
if (video_size) { if (video_size) {
if (avctx->reget_buffer(avctx, &s->frame) < 0) { if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return ret;
} }
if (video_type == 0 || video_type == 1) { if (video_type == 0 || video_type == 1) {
@ -185,8 +186,9 @@ static int decode_frame(AVCodecContext *avctx,
s->palette_has_changed = 0; s->palette_has_changed = 0;
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
if ((ret = av_frame_ref(data, &s->frame)) < 0)
return ret;
*got_frame = 1; *got_frame = 1;
*(AVFrame*)data = s->frame;
} }
return buf_size; return buf_size;
@ -196,8 +198,7 @@ static av_cold int decode_close(AVCodecContext *avctx)
{ {
JvContext *s = avctx->priv_data; JvContext *s = avctx->priv_data;
if(s->frame.data[0]) av_frame_unref(&s->frame);
avctx->release_buffer(avctx, &s->frame);
return 0; return 0;
} }

View File

@ -32,20 +32,20 @@
typedef struct { typedef struct {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame prev, cur; AVFrame prev;
} KgvContext; } KgvContext;
static void decode_flush(AVCodecContext *avctx) static void decode_flush(AVCodecContext *avctx)
{ {
KgvContext * const c = avctx->priv_data; KgvContext * const c = avctx->priv_data;
if (c->prev.data[0]) av_frame_unref(&c->prev);
avctx->release_buffer(avctx, &c->prev);
} }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
AVFrame *frame = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = buf + avpkt->size; const uint8_t *buf_end = buf + avpkt->size;
KgvContext * const c = avctx->priv_data; KgvContext * const c = avctx->priv_data;
@ -65,17 +65,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return res; return res;
if (w != avctx->width || h != avctx->height) { if (w != avctx->width || h != avctx->height) {
if (c->prev.data[0]) av_frame_unref(&c->prev);
avctx->release_buffer(avctx, &c->prev);
avcodec_set_dimensions(avctx, w, h); avcodec_set_dimensions(avctx, w, h);
} }
maxcnt = w * h; maxcnt = w * h;
c->cur.reference = 3; if ((res = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
if ((res = ff_get_buffer(avctx, &c->cur)) < 0)
return res; return res;
out = (uint16_t *) c->cur.data[0]; out = (uint16_t *) frame->data[0];
if (c->prev.data[0]) { if (c->prev.data[0]) {
prev = (uint16_t *) c->prev.data[0]; prev = (uint16_t *) c->prev.data[0];
} else { } else {
@ -156,12 +154,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (outcnt - maxcnt) if (outcnt - maxcnt)
av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt); av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt);
*got_frame = 1; av_frame_unref(&c->prev);
*(AVFrame*)data = c->cur; if ((res = av_frame_ref(&c->prev, frame)) < 0)
return res;
if (c->prev.data[0]) *got_frame = 1;
avctx->release_buffer(avctx, &c->prev);
FFSWAP(AVFrame, c->cur, c->prev);
return avpkt->size; return avpkt->size;
} }

View File

@ -41,7 +41,6 @@
*/ */
typedef struct KmvcContext { typedef struct KmvcContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame pic;
int setpal; int setpal;
int palsize; int palsize;
@ -247,6 +246,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame,
AVPacket *avpkt) AVPacket *avpkt)
{ {
KmvcContext *const ctx = avctx->priv_data; KmvcContext *const ctx = avctx->priv_data;
AVFrame *frame = data;
uint8_t *out, *src; uint8_t *out, *src;
int i, ret; int i, ret;
int header; int header;
@ -254,12 +254,8 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame,
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
bytestream2_init(&ctx->g, avpkt->data, avpkt->size); bytestream2_init(&ctx->g, avpkt->data, avpkt->size);
if (ctx->pic.data[0])
avctx->release_buffer(avctx, &ctx->pic);
ctx->pic.reference = 1; if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
ctx->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if ((ret = ff_get_buffer(avctx, &ctx->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
@ -277,15 +273,15 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame,
} }
if (header & KMVC_KEYFRAME) { if (header & KMVC_KEYFRAME) {
ctx->pic.key_frame = 1; frame->key_frame = 1;
ctx->pic.pict_type = AV_PICTURE_TYPE_I; frame->pict_type = AV_PICTURE_TYPE_I;
} else { } else {
ctx->pic.key_frame = 0; frame->key_frame = 0;
ctx->pic.pict_type = AV_PICTURE_TYPE_P; frame->pict_type = AV_PICTURE_TYPE_P;
} }
if (header & KMVC_PALETTE) { if (header & KMVC_PALETTE) {
ctx->pic.palette_has_changed = 1; frame->palette_has_changed = 1;
// palette starts from index 1 and has 127 entries // palette starts from index 1 and has 127 entries
for (i = 1; i <= ctx->palsize; i++) { for (i = 1; i <= ctx->palsize; i++) {
ctx->pal[i] = bytestream2_get_be24(&ctx->g); ctx->pal[i] = bytestream2_get_be24(&ctx->g);
@ -293,17 +289,17 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame,
} }
if (pal) { if (pal) {
ctx->pic.palette_has_changed = 1; frame->palette_has_changed = 1;
memcpy(ctx->pal, pal, AVPALETTE_SIZE); memcpy(ctx->pal, pal, AVPALETTE_SIZE);
} }
if (ctx->setpal) { if (ctx->setpal) {
ctx->setpal = 0; ctx->setpal = 0;
ctx->pic.palette_has_changed = 1; frame->palette_has_changed = 1;
} }
/* make the palette available on the way out */ /* make the palette available on the way out */
memcpy(ctx->pic.data[1], ctx->pal, 1024); memcpy(frame->data[1], ctx->pal, 1024);
blocksize = bytestream2_get_byte(&ctx->g); blocksize = bytestream2_get_byte(&ctx->g);
@ -328,12 +324,12 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
out = ctx->pic.data[0]; out = frame->data[0];
src = ctx->cur; src = ctx->cur;
for (i = 0; i < avctx->height; i++) { for (i = 0; i < avctx->height; i++) {
memcpy(out, src, avctx->width); memcpy(out, src, avctx->width);
src += 320; src += 320;
out += ctx->pic.linesize[0]; out += frame->linesize[0];
} }
/* flip buffers */ /* flip buffers */
@ -346,7 +342,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame,
} }
*got_frame = 1; *got_frame = 1;
*(AVFrame *) data = ctx->pic;
/* always report that the buffer was completely consumed */ /* always report that the buffer was completely consumed */
return avpkt->size; return avpkt->size;
@ -415,8 +410,6 @@ static av_cold int decode_end(AVCodecContext * avctx)
av_freep(&c->frm0); av_freep(&c->frm0);
av_freep(&c->frm1); av_freep(&c->frm1);
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
return 0; return 0;
} }

View File

@ -48,7 +48,6 @@ enum LagarithFrameType {
typedef struct LagarithContext { typedef struct LagarithContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame picture;
DSPContext dsp; DSPContext dsp;
int zeros; /**< number of consecutive zero bytes encountered */ int zeros; /**< number of consecutive zero bytes encountered */
int zeros_rem; /**< number of zero bytes remaining to output */ int zeros_rem; /**< number of zero bytes remaining to output */
@ -502,19 +501,14 @@ static int lag_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
LagarithContext *l = avctx->priv_data; LagarithContext *l = avctx->priv_data;
AVFrame *const p = &l->picture; ThreadFrame frame = { .f = data };
AVFrame *const p = data;
uint8_t frametype = 0; uint8_t frametype = 0;
uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9; uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
uint32_t offs[4]; uint32_t offs[4];
uint8_t *srcs[4], *dst; uint8_t *srcs[4], *dst;
int i, j, planes = 3; int i, j, planes = 3;
AVFrame *picture = data;
if (p->data[0])
ff_thread_release_buffer(avctx, p);
p->reference = 0;
p->key_frame = 1; p->key_frame = 1;
frametype = buf[0]; frametype = buf[0];
@ -526,7 +520,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
case FRAME_SOLID_RGBA: case FRAME_SOLID_RGBA:
avctx->pix_fmt = AV_PIX_FMT_RGB32; avctx->pix_fmt = AV_PIX_FMT_RGB32;
if (ff_thread_get_buffer(avctx, p) < 0) { if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
@ -548,7 +542,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24) if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
avctx->pix_fmt = AV_PIX_FMT_RGB24; avctx->pix_fmt = AV_PIX_FMT_RGB24;
if (ff_thread_get_buffer(avctx, p) < 0) { if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
@ -608,7 +602,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
case FRAME_ARITH_YUY2: case FRAME_ARITH_YUY2:
avctx->pix_fmt = AV_PIX_FMT_YUV422P; avctx->pix_fmt = AV_PIX_FMT_YUV422P;
if (ff_thread_get_buffer(avctx, p) < 0) { if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
@ -634,7 +628,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
case FRAME_ARITH_YV12: case FRAME_ARITH_YV12:
avctx->pix_fmt = AV_PIX_FMT_YUV420P; avctx->pix_fmt = AV_PIX_FMT_YUV420P;
if (ff_thread_get_buffer(avctx, p) < 0) { if (ff_thread_get_buffer(avctx, &frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
@ -663,7 +657,6 @@ static int lag_decode_frame(AVCodecContext *avctx,
return -1; return -1;
} }
*picture = *p;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
@ -683,8 +676,6 @@ static av_cold int lag_decode_end(AVCodecContext *avctx)
{ {
LagarithContext *l = avctx->priv_data; LagarithContext *l = avctx->priv_data;
if (l->picture.data[0])
ff_thread_release_buffer(avctx, &l->picture);
av_freep(&l->rgb_planes); av_freep(&l->rgb_planes);
return 0; return 0;

Some files were not shown because too many files have changed in this diff Show More