cosmetics: indentation, prettyprinting, K&R coding style

Originally committed as revision 19652 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Diego Biurrun 2009-08-15 11:42:15 +00:00
parent c64380009d
commit 0ebe3b8e2b
8 changed files with 267 additions and 304 deletions

View File

@ -45,18 +45,17 @@ static const FfmpegDiracSchroVideoFormatInfo ff_dirac_schro_video_format_info[]
{ 4096, 2160, 24, 1 }, { 4096, 2160, 24, 1 },
}; };
unsigned int ff_dirac_schro_get_video_format_idx (AVCodecContext *avccontext) unsigned int ff_dirac_schro_get_video_format_idx(AVCodecContext *avccontext)
{ {
unsigned int ret_idx = 0; unsigned int ret_idx = 0;
unsigned int idx; unsigned int idx;
unsigned int num_formats = sizeof(ff_dirac_schro_video_format_info) / unsigned int num_formats = sizeof(ff_dirac_schro_video_format_info) /
sizeof(ff_dirac_schro_video_format_info[0]); sizeof(ff_dirac_schro_video_format_info[0]);
for (idx = 1 ; idx < num_formats; ++idx ) { for (idx = 1; idx < num_formats; ++idx) {
const FfmpegDiracSchroVideoFormatInfo *vf = const FfmpegDiracSchroVideoFormatInfo *vf = &ff_dirac_schro_video_format_info[idx];
&ff_dirac_schro_video_format_info[idx];
if (avccontext->width == vf->width && if (avccontext->width == vf->width &&
avccontext->height == vf->height){ avccontext->height == vf->height) {
ret_idx = idx; ret_idx = idx;
if (avccontext->time_base.den == vf->frame_rate_num && if (avccontext->time_base.den == vf->frame_rate_num &&
avccontext->time_base.num == vf->frame_rate_denom) avccontext->time_base.num == vf->frame_rate_denom)
@ -66,23 +65,22 @@ unsigned int ff_dirac_schro_get_video_format_idx (AVCodecContext *avccontext)
return ret_idx; return ret_idx;
} }
void ff_dirac_schro_queue_init (FfmpegDiracSchroQueue *queue) void ff_dirac_schro_queue_init(FfmpegDiracSchroQueue *queue)
{ {
queue->p_head = queue->p_tail = NULL; queue->p_head = queue->p_tail = NULL;
queue->size = 0; queue->size = 0;
} }
void ff_dirac_schro_queue_free (FfmpegDiracSchroQueue *queue, void ff_dirac_schro_queue_free(FfmpegDiracSchroQueue *queue,
void (*free_func)(void *)) void (*free_func)(void *))
{ {
while (queue->p_head) while (queue->p_head)
free_func( ff_dirac_schro_queue_pop(queue) ); free_func(ff_dirac_schro_queue_pop(queue));
} }
int ff_dirac_schro_queue_push_back (FfmpegDiracSchroQueue *queue, void *p_data) int ff_dirac_schro_queue_push_back(FfmpegDiracSchroQueue *queue, void *p_data)
{ {
FfmpegDiracSchroQueueElement *p_new = FfmpegDiracSchroQueueElement *p_new = av_mallocz(sizeof(FfmpegDiracSchroQueueElement));
av_mallocz(sizeof(FfmpegDiracSchroQueueElement));
if (!p_new) if (!p_new)
return -1; return -1;
@ -99,7 +97,7 @@ int ff_dirac_schro_queue_push_back (FfmpegDiracSchroQueue *queue, void *p_data)
return 0; return 0;
} }
void *ff_dirac_schro_queue_pop (FfmpegDiracSchroQueue *queue) void *ff_dirac_schro_queue_pop(FfmpegDiracSchroQueue *queue)
{ {
FfmpegDiracSchroQueueElement *top = queue->p_head; FfmpegDiracSchroQueueElement *top = queue->p_head;
@ -107,7 +105,7 @@ void *ff_dirac_schro_queue_pop (FfmpegDiracSchroQueue *queue)
void *data = top->data; void *data = top->data;
queue->p_head = queue->p_head->next; queue->p_head = queue->p_head->next;
--queue->size; --queue->size;
av_freep (&top); av_freep(&top);
return data; return data;
} }

View File

@ -28,8 +28,7 @@
#include "avcodec.h" #include "avcodec.h"
typedef struct typedef struct {
{
uint16_t width; uint16_t width;
uint16_t height; uint16_t height;
uint16_t frame_rate_num; uint16_t frame_rate_num;
@ -39,13 +38,12 @@ typedef struct
/** /**
* Returns the index into the Dirac Schro common video format info table * Returns the index into the Dirac Schro common video format info table
*/ */
unsigned int ff_dirac_schro_get_video_format_idx (AVCodecContext *avccontext); unsigned int ff_dirac_schro_get_video_format_idx(AVCodecContext *avccontext);
/** /**
* contains a single encoded frame returned from Dirac or Schroedinger * contains a single encoded frame returned from Dirac or Schroedinger
*/ */
typedef struct FfmpegDiracSchroEncodedFrame typedef struct FfmpegDiracSchroEncodedFrame {
{
/** encoded frame data */ /** encoded frame data */
uint8_t *p_encbuf; uint8_t *p_encbuf;
@ -62,8 +60,7 @@ typedef struct FfmpegDiracSchroEncodedFrame
/** /**
* queue element * queue element
*/ */
typedef struct FfmpegDiracSchroQueueElement typedef struct FfmpegDiracSchroQueueElement {
{
/** Data to be stored in queue*/ /** Data to be stored in queue*/
void *data; void *data;
/** Pointer to next element queue */ /** Pointer to next element queue */
@ -74,8 +71,7 @@ typedef struct FfmpegDiracSchroQueueElement
/** /**
* A simple queue implementation used in libdirac and libschroedinger * A simple queue implementation used in libdirac and libschroedinger
*/ */
typedef struct FfmpegDiracSchroQueue typedef struct FfmpegDiracSchroQueue {
{
/** Pointer to head of queue */ /** Pointer to head of queue */
FfmpegDiracSchroQueueElement *p_head; FfmpegDiracSchroQueueElement *p_head;
/** Pointer to tail of queue */ /** Pointer to tail of queue */
@ -92,12 +88,12 @@ void ff_dirac_schro_queue_init(FfmpegDiracSchroQueue *queue);
/** /**
* Add an element to the end of the queue * Add an element to the end of the queue
*/ */
int ff_dirac_schro_queue_push_back (FfmpegDiracSchroQueue *queue, void *p_data); int ff_dirac_schro_queue_push_back(FfmpegDiracSchroQueue *queue, void *p_data);
/** /**
* Return the first element in the queue * Return the first element in the queue
*/ */
void *ff_dirac_schro_queue_pop (FfmpegDiracSchroQueue *queue); void *ff_dirac_schro_queue_pop(FfmpegDiracSchroQueue *queue);
/** /**
* Free the queue resources. free_func is a function supplied by the caller to * Free the queue resources. free_func is a function supplied by the caller to

View File

@ -36,8 +36,7 @@
#include <libdirac_decoder/dirac_parser.h> #include <libdirac_decoder/dirac_parser.h>
/** contains a single frame returned from Dirac */ /** contains a single frame returned from Dirac */
typedef struct FfmpegDiracDecoderParams typedef struct FfmpegDiracDecoderParams {
{
/** decoder handle */ /** decoder handle */
dirac_decoder_t* p_decoder; dirac_decoder_t* p_decoder;
@ -64,13 +63,13 @@ static enum PixelFormat GetFfmpegChromaFormat(dirac_chroma_t dirac_pix_fmt)
static av_cold int libdirac_decode_init(AVCodecContext *avccontext) static av_cold int libdirac_decode_init(AVCodecContext *avccontext)
{ {
FfmpegDiracDecoderParams *p_dirac_params = avccontext->priv_data ; FfmpegDiracDecoderParams *p_dirac_params = avccontext->priv_data;
p_dirac_params->p_decoder = dirac_decoder_init(avccontext->debug); p_dirac_params->p_decoder = dirac_decoder_init(avccontext->debug);
if (!p_dirac_params->p_decoder) if (!p_dirac_params->p_decoder)
return -1; return -1;
return 0 ; return 0;
} }
static int libdirac_decode_frame(AVCodecContext *avccontext, static int libdirac_decode_frame(AVCodecContext *avccontext,
@ -88,25 +87,23 @@ static int libdirac_decode_frame(AVCodecContext *avccontext,
*data_size = 0; *data_size = 0;
if (buf_size>0) { if (buf_size > 0) {
/* set data to decode into buffer */ /* set data to decode into buffer */
dirac_buffer (p_dirac_params->p_decoder, buf, buf+buf_size); dirac_buffer(p_dirac_params->p_decoder, buf, buf + buf_size);
if ((buf[4] &0x08) == 0x08 && (buf[4] & 0x03)) if ((buf[4] & 0x08) == 0x08 && (buf[4] & 0x03))
avccontext->has_b_frames = 1; avccontext->has_b_frames = 1;
} }
while (1) { while (1) {
/* parse data and process result */ /* parse data and process result */
DecoderState state = dirac_parse (p_dirac_params->p_decoder); DecoderState state = dirac_parse(p_dirac_params->p_decoder);
switch (state) switch (state) {
{
case STATE_BUFFER: case STATE_BUFFER:
return buf_size; return buf_size;
case STATE_SEQUENCE: case STATE_SEQUENCE:
{ {
/* tell FFmpeg about sequence details */ /* tell FFmpeg about sequence details */
dirac_sourceparams_t *src_params = dirac_sourceparams_t *src_params = &p_dirac_params->p_decoder->src_params;
&p_dirac_params->p_decoder->src_params;
if (avcodec_check_dimensions(avccontext, src_params->width, if (avcodec_check_dimensions(avccontext, src_params->width,
src_params->height) < 0) { src_params->height) < 0) {
@ -121,9 +118,9 @@ static int libdirac_decode_frame(AVCodecContext *avccontext,
avccontext->pix_fmt = GetFfmpegChromaFormat(src_params->chroma); avccontext->pix_fmt = GetFfmpegChromaFormat(src_params->chroma);
if (avccontext->pix_fmt == PIX_FMT_NONE) { if (avccontext->pix_fmt == PIX_FMT_NONE) {
av_log (avccontext, AV_LOG_ERROR, av_log(avccontext, AV_LOG_ERROR,
"Dirac chroma format %d not supported currently\n", "Dirac chroma format %d not supported currently\n",
src_params->chroma); src_params->chroma);
return -1; return -1;
} }
@ -140,7 +137,7 @@ static int libdirac_decode_frame(AVCodecContext *avccontext,
/* allocate output buffer */ /* allocate output buffer */
if (!p_dirac_params->p_out_frame_buf) if (!p_dirac_params->p_out_frame_buf)
p_dirac_params->p_out_frame_buf = av_malloc (pict_size); p_dirac_params->p_out_frame_buf = av_malloc(pict_size);
buffer[0] = p_dirac_params->p_out_frame_buf; buffer[0] = p_dirac_params->p_out_frame_buf;
buffer[1] = p_dirac_params->p_out_frame_buf + buffer[1] = p_dirac_params->p_out_frame_buf +
pic.linesize[0] * avccontext->height; pic.linesize[0] * avccontext->height;
@ -177,20 +174,20 @@ static int libdirac_decode_frame(AVCodecContext *avccontext,
static av_cold int libdirac_decode_close(AVCodecContext *avccontext) static av_cold int libdirac_decode_close(AVCodecContext *avccontext)
{ {
FfmpegDiracDecoderParams *p_dirac_params = avccontext->priv_data; FfmpegDiracDecoderParams *p_dirac_params = avccontext->priv_data;
dirac_decoder_close (p_dirac_params->p_decoder); dirac_decoder_close(p_dirac_params->p_decoder);
av_freep(&p_dirac_params->p_out_frame_buf); av_freep(&p_dirac_params->p_out_frame_buf);
return 0 ; return 0;
} }
static void libdirac_flush (AVCodecContext *avccontext) static void libdirac_flush(AVCodecContext *avccontext)
{ {
/* Got a seek request. We will need free memory held in the private /* Got a seek request. We will need free memory held in the private
* context and free the current Dirac decoder handle and then open * context and free the current Dirac decoder handle and then open
* a new decoder handle. */ * a new decoder handle. */
libdirac_decode_close (avccontext); libdirac_decode_close(avccontext);
libdirac_decode_init (avccontext); libdirac_decode_init(avccontext);
return; return;
} }
@ -208,4 +205,4 @@ AVCodec libdirac_decoder = {
CODEC_CAP_DELAY, CODEC_CAP_DELAY,
.flush = libdirac_flush, .flush = libdirac_flush,
.long_name = NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"), .long_name = NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"),
} ; };

View File

@ -38,8 +38,7 @@
#include <libdirac_encoder/dirac_encoder.h> #include <libdirac_encoder/dirac_encoder.h>
/** Dirac encoder private data */ /** Dirac encoder private data */
typedef struct FfmpegDiracEncoderParams typedef struct FfmpegDiracEncoderParams {
{
/** Dirac encoder context */ /** Dirac encoder context */
dirac_encoder_context_t enc_ctx; dirac_encoder_context_t enc_ctx;
@ -114,12 +113,12 @@ static const VideoFormat ff_dirac_video_formats[]={
* Returns the video format preset matching the input video dimensions and * Returns the video format preset matching the input video dimensions and
* time base. * time base.
*/ */
static VideoFormat GetDiracVideoFormatPreset (AVCodecContext *avccontext) static VideoFormat GetDiracVideoFormatPreset(AVCodecContext *avccontext)
{ {
unsigned int num_formats = sizeof(ff_dirac_video_formats) / unsigned int num_formats = sizeof(ff_dirac_video_formats) /
sizeof(ff_dirac_video_formats[0]); sizeof(ff_dirac_video_formats[0]);
unsigned int idx = ff_dirac_schro_get_video_format_idx (avccontext); unsigned int idx = ff_dirac_schro_get_video_format_idx(avccontext);
return (idx < num_formats) ? return (idx < num_formats) ?
ff_dirac_video_formats[idx] : VIDEO_FORMAT_CUSTOM; ff_dirac_video_formats[idx] : VIDEO_FORMAT_CUSTOM;
@ -130,30 +129,27 @@ static av_cold int libdirac_encode_init(AVCodecContext *avccontext)
FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data; FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data;
int no_local = 1; int no_local = 1;
int verbose = avccontext->debug; int verbose = avccontext->debug;
VideoFormat preset; VideoFormat preset;
/* get Dirac preset */ /* get Dirac preset */
preset = GetDiracVideoFormatPreset(avccontext); preset = GetDiracVideoFormatPreset(avccontext);
/* initialize the encoder context */ /* initialize the encoder context */
dirac_encoder_context_init (&(p_dirac_params->enc_ctx), preset); dirac_encoder_context_init(&(p_dirac_params->enc_ctx), preset);
p_dirac_params->enc_ctx.src_params.chroma = p_dirac_params->enc_ctx.src_params.chroma = GetDiracChromaFormat(avccontext->pix_fmt);
GetDiracChromaFormat(avccontext->pix_fmt);
if (p_dirac_params->enc_ctx.src_params.chroma == formatNK) { if (p_dirac_params->enc_ctx.src_params.chroma == formatNK) {
av_log (avccontext, AV_LOG_ERROR, av_log(avccontext, AV_LOG_ERROR,
"Unsupported pixel format %d. This codec supports only " "Unsupported pixel format %d. This codec supports only "
"Planar YUV formats (yuv420p, yuv422p, yuv444p\n", "Planar YUV formats (yuv420p, yuv422p, yuv444p\n",
avccontext->pix_fmt); avccontext->pix_fmt);
return -1; return -1;
} }
p_dirac_params->enc_ctx.src_params.frame_rate.numerator = p_dirac_params->enc_ctx.src_params.frame_rate.numerator = avccontext->time_base.den;
avccontext->time_base.den; p_dirac_params->enc_ctx.src_params.frame_rate.denominator = avccontext->time_base.num;
p_dirac_params->enc_ctx.src_params.frame_rate.denominator =
avccontext->time_base.num;
p_dirac_params->enc_ctx.src_params.width = avccontext->width; p_dirac_params->enc_ctx.src_params.width = avccontext->width;
p_dirac_params->enc_ctx.src_params.height = avccontext->height; p_dirac_params->enc_ctx.src_params.height = avccontext->height;
@ -182,20 +178,20 @@ static av_cold int libdirac_encode_init(AVCodecContext *avccontext)
if (avccontext->flags & CODEC_FLAG_QSCALE) { if (avccontext->flags & CODEC_FLAG_QSCALE) {
if (avccontext->global_quality) { if (avccontext->global_quality) {
p_dirac_params->enc_ctx.enc_params.qf = p_dirac_params->enc_ctx.enc_params.qf = avccontext->global_quality
avccontext->global_quality / (FF_QP2LAMBDA*10.0); / (FF_QP2LAMBDA * 10.0);
/* if it is not default bitrate then send target rate. */ /* if it is not default bitrate then send target rate. */
if (avccontext->bit_rate >= 1000 && if (avccontext->bit_rate >= 1000 &&
avccontext->bit_rate != 200000) avccontext->bit_rate != 200000)
p_dirac_params->enc_ctx.enc_params.trate = p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate
avccontext->bit_rate / 1000; / 1000;
} else } else
p_dirac_params->enc_ctx.enc_params.lossless = 1; p_dirac_params->enc_ctx.enc_params.lossless = 1;
} else if (avccontext->bit_rate >= 1000) } else if (avccontext->bit_rate >= 1000)
p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate / 1000; p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate / 1000;
if ((preset > VIDEO_FORMAT_QCIF || preset < VIDEO_FORMAT_QSIF525) && if ((preset > VIDEO_FORMAT_QCIF || preset < VIDEO_FORMAT_QSIF525) &&
avccontext->bit_rate == 200000) avccontext->bit_rate == 200000)
p_dirac_params->enc_ctx.enc_params.trate = 0; p_dirac_params->enc_ctx.enc_params.trate = 0;
if (avccontext->flags & CODEC_FLAG_INTERLACED_ME) if (avccontext->flags & CODEC_FLAG_INTERLACED_ME)
@ -203,8 +199,8 @@ static av_cold int libdirac_encode_init(AVCodecContext *avccontext)
* irrespective of the type of source material */ * irrespective of the type of source material */
p_dirac_params->enc_ctx.enc_params.picture_coding_mode = 1; p_dirac_params->enc_ctx.enc_params.picture_coding_mode = 1;
p_dirac_params->p_encoder = dirac_encoder_init (&(p_dirac_params->enc_ctx), p_dirac_params->p_encoder = dirac_encoder_init(&(p_dirac_params->enc_ctx),
verbose ); verbose);
if (!p_dirac_params->p_encoder) { if (!p_dirac_params->p_encoder) {
av_log(avccontext, AV_LOG_ERROR, av_log(avccontext, AV_LOG_ERROR,
@ -218,14 +214,14 @@ static av_cold int libdirac_encode_init(AVCodecContext *avccontext)
/* initialize the encoded frame queue */ /* initialize the encoded frame queue */
ff_dirac_schro_queue_init(&p_dirac_params->enc_frame_queue); ff_dirac_schro_queue_init(&p_dirac_params->enc_frame_queue);
return 0 ; return 0;
} }
static void DiracFreeFrame (void *data) static void DiracFreeFrame(void *data)
{ {
FfmpegDiracSchroEncodedFrame *enc_frame = data; FfmpegDiracSchroEncodedFrame *enc_frame = data;
av_freep (&(enc_frame->p_encbuf)); av_freep(&(enc_frame->p_encbuf));
av_free(enc_frame); av_free(enc_frame);
} }
@ -236,7 +232,7 @@ static int libdirac_encode_frame(AVCodecContext *avccontext,
int enc_size = 0; int enc_size = 0;
dirac_encoder_state_t state; dirac_encoder_state_t state;
FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data; FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data;
FfmpegDiracSchroEncodedFrame* p_frame_output = NULL; FfmpegDiracSchroEncodedFrame* p_frame_output = NULL;
FfmpegDiracSchroEncodedFrame* p_next_output_frame = NULL; FfmpegDiracSchroEncodedFrame* p_next_output_frame = NULL;
int go = 1; int go = 1;
int last_frame_in_sequence = 0; int last_frame_in_sequence = 0;
@ -244,7 +240,7 @@ static int libdirac_encode_frame(AVCodecContext *avccontext,
if (!data) { if (!data) {
/* push end of sequence if not already signalled */ /* push end of sequence if not already signalled */
if (!p_dirac_params->eos_signalled) { if (!p_dirac_params->eos_signalled) {
dirac_encoder_end_sequence( p_dirac_params->p_encoder ); dirac_encoder_end_sequence(p_dirac_params->p_encoder);
p_dirac_params->eos_signalled = 1; p_dirac_params->eos_signalled = 1;
} }
} else { } else {
@ -253,15 +249,15 @@ static int libdirac_encode_frame(AVCodecContext *avccontext,
* Input line size may differ from what the codec supports, * Input line size may differ from what the codec supports,
* especially when transcoding from one format to another. * especially when transcoding from one format to another.
* So use avpicture_layout to copy the frame. */ * So use avpicture_layout to copy the frame. */
avpicture_layout ((AVPicture *)data, avccontext->pix_fmt, avpicture_layout((AVPicture *)data, avccontext->pix_fmt,
avccontext->width, avccontext->height, avccontext->width, avccontext->height,
p_dirac_params->p_in_frame_buf, p_dirac_params->p_in_frame_buf,
p_dirac_params->frame_size); p_dirac_params->frame_size);
/* load next frame */ /* load next frame */
if (dirac_encoder_load (p_dirac_params->p_encoder, if (dirac_encoder_load(p_dirac_params->p_encoder,
p_dirac_params->p_in_frame_buf, p_dirac_params->p_in_frame_buf,
p_dirac_params->frame_size ) < 0) { p_dirac_params->frame_size) < 0) {
av_log(avccontext, AV_LOG_ERROR, "Unrecoverable Encoder Error." av_log(avccontext, AV_LOG_ERROR, "Unrecoverable Encoder Error."
" dirac_encoder_load failed...\n"); " dirac_encoder_load failed...\n");
return -1; return -1;
@ -271,34 +267,30 @@ static int libdirac_encode_frame(AVCodecContext *avccontext,
if (p_dirac_params->eos_pulled) if (p_dirac_params->eos_pulled)
go = 0; go = 0;
while(go) { while (go) {
p_dirac_params->p_encoder->enc_buf.buffer = frame; p_dirac_params->p_encoder->enc_buf.buffer = frame;
p_dirac_params->p_encoder->enc_buf.size = buf_size; p_dirac_params->p_encoder->enc_buf.size = buf_size;
/* process frame */ /* process frame */
state = dirac_encoder_output ( p_dirac_params->p_encoder ); state = dirac_encoder_output(p_dirac_params->p_encoder);
switch (state) switch (state) {
{
case ENC_STATE_AVAIL: case ENC_STATE_AVAIL:
case ENC_STATE_EOS: case ENC_STATE_EOS:
assert (p_dirac_params->p_encoder->enc_buf.size > 0); assert(p_dirac_params->p_encoder->enc_buf.size > 0);
/* All non-frame data is prepended to actual frame data to /* All non-frame data is prepended to actual frame data to
* be able to set the pts correctly. So we don't write data * be able to set the pts correctly. So we don't write data
* to the frame output queue until we actually have a frame * to the frame output queue until we actually have a frame
*/ */
p_dirac_params->enc_buf = av_realloc ( p_dirac_params->enc_buf = av_realloc(p_dirac_params->enc_buf,
p_dirac_params->enc_buf, p_dirac_params->enc_buf_size +
p_dirac_params->enc_buf_size + p_dirac_params->p_encoder->enc_buf.size);
p_dirac_params->p_encoder->enc_buf.size
);
memcpy(p_dirac_params->enc_buf + p_dirac_params->enc_buf_size, memcpy(p_dirac_params->enc_buf + p_dirac_params->enc_buf_size,
p_dirac_params->p_encoder->enc_buf.buffer, p_dirac_params->p_encoder->enc_buf.buffer,
p_dirac_params->p_encoder->enc_buf.size); p_dirac_params->p_encoder->enc_buf.size);
p_dirac_params->enc_buf_size += p_dirac_params->enc_buf_size += p_dirac_params->p_encoder->enc_buf.size;
p_dirac_params->p_encoder->enc_buf.size;
if (state == ENC_STATE_EOS) { if (state == ENC_STATE_EOS) {
p_dirac_params->eos_pulled = 1; p_dirac_params->eos_pulled = 1;
@ -313,17 +305,16 @@ static int libdirac_encode_frame(AVCodecContext *avccontext,
/* create output frame */ /* create output frame */
p_frame_output = av_mallocz(sizeof(FfmpegDiracSchroEncodedFrame)); p_frame_output = av_mallocz(sizeof(FfmpegDiracSchroEncodedFrame));
/* set output data */ /* set output data */
p_frame_output->size = p_dirac_params->enc_buf_size; p_frame_output->size = p_dirac_params->enc_buf_size;
p_frame_output->p_encbuf = p_dirac_params->enc_buf; p_frame_output->p_encbuf = p_dirac_params->enc_buf;
p_frame_output->frame_num = p_frame_output->frame_num = p_dirac_params->p_encoder->enc_pparams.pnum;
p_dirac_params->p_encoder->enc_pparams.pnum;
if (p_dirac_params->p_encoder->enc_pparams.ptype == INTRA_PICTURE && if (p_dirac_params->p_encoder->enc_pparams.ptype == INTRA_PICTURE &&
p_dirac_params->p_encoder->enc_pparams.rtype == REFERENCE_PICTURE) p_dirac_params->p_encoder->enc_pparams.rtype == REFERENCE_PICTURE)
p_frame_output->key_frame = 1; p_frame_output->key_frame = 1;
ff_dirac_schro_queue_push_back (&p_dirac_params->enc_frame_queue, ff_dirac_schro_queue_push_back(&p_dirac_params->enc_frame_queue,
p_frame_output); p_frame_output);
p_dirac_params->enc_buf_size = 0; p_dirac_params->enc_buf_size = 0;
p_dirac_params->enc_buf = NULL; p_dirac_params->enc_buf = NULL;
@ -346,12 +337,10 @@ static int libdirac_encode_frame(AVCodecContext *avccontext,
/* copy 'next' frame in queue */ /* copy 'next' frame in queue */
if (p_dirac_params->enc_frame_queue.size == 1 && if (p_dirac_params->enc_frame_queue.size == 1 && p_dirac_params->eos_pulled)
p_dirac_params->eos_pulled)
last_frame_in_sequence = 1; last_frame_in_sequence = 1;
p_next_output_frame = p_next_output_frame = ff_dirac_schro_queue_pop(&p_dirac_params->enc_frame_queue);
ff_dirac_schro_queue_pop(&p_dirac_params->enc_frame_queue);
if (!p_next_output_frame) if (!p_next_output_frame)
return 0; return 0;
@ -366,12 +355,11 @@ static int libdirac_encode_frame(AVCodecContext *avccontext,
/* Append the end of sequence information to the last frame in the /* Append the end of sequence information to the last frame in the
* sequence. */ * sequence. */
if (last_frame_in_sequence && p_dirac_params->enc_buf_size > 0) if (last_frame_in_sequence && p_dirac_params->enc_buf_size > 0) {
{ memcpy(frame + enc_size, p_dirac_params->enc_buf,
memcpy (frame + enc_size, p_dirac_params->enc_buf, p_dirac_params->enc_buf_size);
p_dirac_params->enc_buf_size);
enc_size += p_dirac_params->enc_buf_size; enc_size += p_dirac_params->enc_buf_size;
av_freep (&p_dirac_params->enc_buf); av_freep(&p_dirac_params->enc_buf);
p_dirac_params->enc_buf_size = 0; p_dirac_params->enc_buf_size = 0;
} }
@ -386,7 +374,7 @@ static av_cold int libdirac_encode_close(AVCodecContext *avccontext)
FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data; FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data;
/* close the encoder */ /* close the encoder */
dirac_encoder_close(p_dirac_params->p_encoder ); dirac_encoder_close(p_dirac_params->p_encoder);
/* free data in the output frame queue */ /* free data in the output frame queue */
ff_dirac_schro_queue_free(&p_dirac_params->enc_frame_queue, ff_dirac_schro_queue_free(&p_dirac_params->enc_frame_queue,
@ -399,7 +387,7 @@ static av_cold int libdirac_encode_close(AVCodecContext *avccontext)
/* free the input frame buffer */ /* free the input frame buffer */
av_freep(&p_dirac_params->p_in_frame_buf); av_freep(&p_dirac_params->p_in_frame_buf);
return 0 ; return 0;
} }
@ -411,7 +399,7 @@ AVCodec libdirac_encoder = {
libdirac_encode_init, libdirac_encode_init,
libdirac_encode_frame, libdirac_encode_frame,
libdirac_encode_close, libdirac_encode_close,
.capabilities= CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY,
.pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .pix_fmts = (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"), .long_name = NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"),
} ; };

View File

@ -57,8 +57,8 @@ SchroVideoFormatEnum ff_get_schro_video_format_preset(AVCodecContext *avccontext
unsigned int idx = ff_dirac_schro_get_video_format_idx (avccontext); unsigned int idx = ff_dirac_schro_get_video_format_idx (avccontext);
return (idx < num_formats) ? return (idx < num_formats) ? ff_schro_video_formats[idx] :
ff_schro_video_formats[idx] : SCHRO_VIDEO_FORMAT_CUSTOM; SCHRO_VIDEO_FORMAT_CUSTOM;
} }
int ff_get_schro_frame_format (SchroChromaFormat schro_pix_fmt, int ff_get_schro_frame_format (SchroChromaFormat schro_pix_fmt,
@ -71,8 +71,7 @@ int ff_get_schro_frame_format (SchroChromaFormat schro_pix_fmt,
for (idx = 0; idx < num_formats; ++idx) { for (idx = 0; idx < num_formats; ++idx) {
if (ffmpeg_schro_pixel_format_map[idx].schro_pix_fmt == schro_pix_fmt) { if (ffmpeg_schro_pixel_format_map[idx].schro_pix_fmt == schro_pix_fmt) {
*schro_frame_fmt = *schro_frame_fmt = ffmpeg_schro_pixel_format_map[idx].schro_frame_fmt;
ffmpeg_schro_pixel_format_map[idx].schro_frame_fmt;
return 0; return 0;
} }
} }

View File

@ -50,7 +50,7 @@ SchroVideoFormatEnum ff_get_schro_video_format_preset (AVCodecContext *avccontex
* Sets the Schroedinger frame format corresponding to the Schro chroma format * Sets the Schroedinger frame format corresponding to the Schro chroma format
* passed. Returns 0 on success, -1 on failure. * passed. Returns 0 on success, -1 on failure.
*/ */
int ff_get_schro_frame_format (SchroChromaFormat schro_chroma_fmt, int ff_get_schro_frame_format(SchroChromaFormat schro_chroma_fmt,
SchroFrameFormat *schro_frame_fmt); SchroFrameFormat *schro_frame_fmt);
#endif /* AVCODEC_LIBSCHROEDINGER_H */ #endif /* AVCODEC_LIBSCHROEDINGER_H */

View File

@ -40,8 +40,7 @@
#include <schroedinger/schrovideoformat.h> #include <schroedinger/schrovideoformat.h>
/** libschroedinger decoder private data */ /** libschroedinger decoder private data */
typedef struct FfmpegSchroDecoderParams typedef struct FfmpegSchroDecoderParams {
{
/** Schroedinger video format */ /** Schroedinger video format */
SchroVideoFormat *format; SchroVideoFormat *format;
@ -64,24 +63,23 @@ typedef struct FfmpegSchroDecoderParams
AVPicture dec_pic; AVPicture dec_pic;
} FfmpegSchroDecoderParams; } FfmpegSchroDecoderParams;
typedef struct FfmpegSchroParseUnitContext typedef struct FfmpegSchroParseUnitContext {
{
const uint8_t *buf; const uint8_t *buf;
int buf_size; int buf_size;
} FfmpegSchroParseUnitContext; } FfmpegSchroParseUnitContext;
static void libschroedinger_decode_buffer_free (SchroBuffer *schro_buf, static void libschroedinger_decode_buffer_free(SchroBuffer *schro_buf,
void *priv); void *priv);
static void FfmpegSchroParseContextInit (FfmpegSchroParseUnitContext *parse_ctx, static void FfmpegSchroParseContextInit(FfmpegSchroParseUnitContext *parse_ctx,
const uint8_t *buf, int buf_size) const uint8_t *buf, int buf_size)
{ {
parse_ctx->buf = buf; parse_ctx->buf = buf;
parse_ctx->buf_size = buf_size; parse_ctx->buf_size = buf_size;
} }
static SchroBuffer* FfmpegFindNextSchroParseUnit (FfmpegSchroParseUnitContext *parse_ctx) static SchroBuffer* FfmpegFindNextSchroParseUnit(FfmpegSchroParseUnitContext *parse_ctx)
{ {
SchroBuffer *enc_buf = NULL; SchroBuffer *enc_buf = NULL;
int next_pu_offset = 0; int next_pu_offset = 0;
@ -107,12 +105,12 @@ static SchroBuffer* FfmpegFindNextSchroParseUnit (FfmpegSchroParseUnitContext *p
return NULL; return NULL;
in_buf = av_malloc(next_pu_offset); in_buf = av_malloc(next_pu_offset);
memcpy (in_buf, parse_ctx->buf, next_pu_offset); memcpy(in_buf, parse_ctx->buf, next_pu_offset);
enc_buf = schro_buffer_new_with_data (in_buf, next_pu_offset); enc_buf = schro_buffer_new_with_data(in_buf, next_pu_offset);
enc_buf->free = libschroedinger_decode_buffer_free; enc_buf->free = libschroedinger_decode_buffer_free;
enc_buf->priv = in_buf; enc_buf->priv = in_buf;
parse_ctx->buf += next_pu_offset; parse_ctx->buf += next_pu_offset;
parse_ctx->buf_size -= next_pu_offset; parse_ctx->buf_size -= next_pu_offset;
return enc_buf; return enc_buf;
@ -136,29 +134,29 @@ static enum PixelFormat GetFfmpegChromaFormat(SchroChromaFormat schro_pix_fmt)
static av_cold int libschroedinger_decode_init(AVCodecContext *avccontext) static av_cold int libschroedinger_decode_init(AVCodecContext *avccontext)
{ {
FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data ; FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data;
/* First of all, initialize our supporting libraries. */ /* First of all, initialize our supporting libraries. */
schro_init(); schro_init();
schro_debug_set_level(avccontext->debug); schro_debug_set_level(avccontext->debug);
p_schro_params->decoder = schro_decoder_new(); p_schro_params->decoder = schro_decoder_new();
schro_decoder_set_skip_ratio(p_schro_params->decoder, 1); schro_decoder_set_skip_ratio(p_schro_params->decoder, 1);
if (!p_schro_params->decoder) if (!p_schro_params->decoder)
return -1; return -1;
/* Initialize the decoded frame queue. */ /* Initialize the decoded frame queue. */
ff_dirac_schro_queue_init (&p_schro_params->dec_frame_queue); ff_dirac_schro_queue_init(&p_schro_params->dec_frame_queue);
return 0 ; return 0;
} }
static void libschroedinger_decode_buffer_free (SchroBuffer *schro_buf, static void libschroedinger_decode_buffer_free(SchroBuffer *schro_buf,
void *priv) void *priv)
{ {
av_freep(&priv); av_freep(&priv);
} }
static void libschroedinger_decode_frame_free (void *frame) static void libschroedinger_decode_frame_free(void *frame)
{ {
schro_frame_unref(frame); schro_frame_unref(frame);
} }
@ -168,11 +166,11 @@ static void libschroedinger_handle_first_access_unit(AVCodecContext *avccontext)
FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data;
SchroDecoder *decoder = p_schro_params->decoder; SchroDecoder *decoder = p_schro_params->decoder;
p_schro_params->format = schro_decoder_get_video_format (decoder); p_schro_params->format = schro_decoder_get_video_format(decoder);
/* Tell FFmpeg about sequence details. */ /* Tell FFmpeg about sequence details. */
if(avcodec_check_dimensions(avccontext, p_schro_params->format->width, if (avcodec_check_dimensions(avccontext, p_schro_params->format->width,
p_schro_params->format->height) < 0) { p_schro_params->format->height) < 0) {
av_log(avccontext, AV_LOG_ERROR, "invalid dimensions (%dx%d)\n", av_log(avccontext, AV_LOG_ERROR, "invalid dimensions (%dx%d)\n",
p_schro_params->format->width, p_schro_params->format->height); p_schro_params->format->width, p_schro_params->format->height);
avccontext->height = avccontext->width = 0; avccontext->height = avccontext->width = 0;
@ -180,14 +178,13 @@ static void libschroedinger_handle_first_access_unit(AVCodecContext *avccontext)
} }
avccontext->height = p_schro_params->format->height; avccontext->height = p_schro_params->format->height;
avccontext->width = p_schro_params->format->width; avccontext->width = p_schro_params->format->width;
avccontext->pix_fmt = avccontext->pix_fmt = GetFfmpegChromaFormat(p_schro_params->format->chroma_format);
GetFfmpegChromaFormat(p_schro_params->format->chroma_format);
if (ff_get_schro_frame_format( p_schro_params->format->chroma_format, if (ff_get_schro_frame_format(p_schro_params->format->chroma_format,
&p_schro_params->frame_format) == -1) { &p_schro_params->frame_format) == -1) {
av_log (avccontext, AV_LOG_ERROR, av_log(avccontext, AV_LOG_ERROR,
"This codec currently only supports planar YUV 4:2:0, 4:2:2 " "This codec currently only supports planar YUV 4:2:0, 4:2:2 "
"and 4:4:4 formats.\n"); "and 4:4:4 formats.\n");
return; return;
} }
@ -221,7 +218,7 @@ static int libschroedinger_decode_frame(AVCodecContext *avccontext,
*data_size = 0; *data_size = 0;
FfmpegSchroParseContextInit (&parse_ctx, buf, buf_size); FfmpegSchroParseContextInit(&parse_ctx, buf, buf_size);
if (!buf_size) { if (!buf_size) {
if (!p_schro_params->eos_signalled) { if (!p_schro_params->eos_signalled) {
state = schro_decoder_push_end_of_stream(decoder); state = schro_decoder_push_end_of_stream(decoder);
@ -236,77 +233,74 @@ static int libschroedinger_decode_frame(AVCodecContext *avccontext,
if (SCHRO_PARSE_CODE_IS_PICTURE(enc_buf->data[4]) && if (SCHRO_PARSE_CODE_IS_PICTURE(enc_buf->data[4]) &&
SCHRO_PARSE_CODE_NUM_REFS(enc_buf->data[4]) > 0) SCHRO_PARSE_CODE_NUM_REFS(enc_buf->data[4]) > 0)
avccontext->has_b_frames = 1; avccontext->has_b_frames = 1;
state = schro_decoder_push (decoder, enc_buf); state = schro_decoder_push(decoder, enc_buf);
if (state == SCHRO_DECODER_FIRST_ACCESS_UNIT) if (state == SCHRO_DECODER_FIRST_ACCESS_UNIT)
libschroedinger_handle_first_access_unit(avccontext); libschroedinger_handle_first_access_unit(avccontext);
go = 1; go = 1;
} } else
else
outer = 0; outer = 0;
format = p_schro_params->format; format = p_schro_params->format;
while (go) { while (go) {
/* Parse data and process result. */ /* Parse data and process result. */
state = schro_decoder_wait (decoder); state = schro_decoder_wait(decoder);
switch (state) switch (state) {
{ case SCHRO_DECODER_FIRST_ACCESS_UNIT:
case SCHRO_DECODER_FIRST_ACCESS_UNIT: libschroedinger_handle_first_access_unit(avccontext);
libschroedinger_handle_first_access_unit (avccontext); break;
break;
case SCHRO_DECODER_NEED_BITS: case SCHRO_DECODER_NEED_BITS:
/* Need more input data - stop iterating over what we have. */ /* Need more input data - stop iterating over what we have. */
go = 0; go = 0;
break; break;
case SCHRO_DECODER_NEED_FRAME: case SCHRO_DECODER_NEED_FRAME:
/* Decoder needs a frame - create one and push it in. */ /* Decoder needs a frame - create one and push it in. */
frame = schro_frame_new_and_alloc(NULL, frame = schro_frame_new_and_alloc(NULL,
p_schro_params->frame_format, p_schro_params->frame_format,
format->width, format->width,
format->height); format->height);
schro_decoder_add_output_picture (decoder, frame); schro_decoder_add_output_picture(decoder, frame);
break; break;
case SCHRO_DECODER_OK: case SCHRO_DECODER_OK:
/* Pull a frame out of the decoder. */ /* Pull a frame out of the decoder. */
frame = schro_decoder_pull (decoder); frame = schro_decoder_pull(decoder);
if (frame) if (frame)
ff_dirac_schro_queue_push_back( ff_dirac_schro_queue_push_back(&p_schro_params->dec_frame_queue,
&p_schro_params->dec_frame_queue, frame);
frame); break;
break; case SCHRO_DECODER_EOS:
case SCHRO_DECODER_EOS: go = 0;
go = 0; p_schro_params->eos_pulled = 1;
p_schro_params->eos_pulled = 1; schro_decoder_reset(decoder);
schro_decoder_reset (decoder); outer = 0;
outer = 0; break;
break;
case SCHRO_DECODER_ERROR: case SCHRO_DECODER_ERROR:
return -1; return -1;
break; break;
}
} }
} } while (outer);
} while(outer);
/* Grab next frame to be returned from the top of the queue. */ /* Grab next frame to be returned from the top of the queue. */
frame = ff_dirac_schro_queue_pop(&p_schro_params->dec_frame_queue); frame = ff_dirac_schro_queue_pop(&p_schro_params->dec_frame_queue);
if (frame) { if (frame) {
memcpy (p_schro_params->dec_pic.data[0], memcpy(p_schro_params->dec_pic.data[0],
frame->components[0].data, frame->components[0].data,
frame->components[0].length); frame->components[0].length);
memcpy (p_schro_params->dec_pic.data[1], memcpy(p_schro_params->dec_pic.data[1],
frame->components[1].data, frame->components[1].data,
frame->components[1].length); frame->components[1].length);
memcpy (p_schro_params->dec_pic.data[2], memcpy(p_schro_params->dec_pic.data[2],
frame->components[2].data, frame->components[2].data,
frame->components[2].length); frame->components[2].length);
/* Fill picture with current buffer data from Schroedinger. */ /* Fill picture with current buffer data from Schroedinger. */
avpicture_fill(picture, p_schro_params->dec_pic.data[0], avpicture_fill(picture, p_schro_params->dec_pic.data[0],
@ -316,7 +310,7 @@ static int libschroedinger_decode_frame(AVCodecContext *avccontext,
*data_size = sizeof(AVPicture); *data_size = sizeof(AVPicture);
/* Now free the frame resources. */ /* Now free the frame resources. */
libschroedinger_decode_frame_free (frame); libschroedinger_decode_frame_free(frame);
} }
return buf_size; return buf_size;
} }
@ -326,36 +320,36 @@ static av_cold int libschroedinger_decode_close(AVCodecContext *avccontext)
{ {
FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data;
/* Free the decoder. */ /* Free the decoder. */
schro_decoder_free (p_schro_params->decoder); schro_decoder_free(p_schro_params->decoder);
av_freep(&p_schro_params->format); av_freep(&p_schro_params->format);
avpicture_free (&p_schro_params->dec_pic); avpicture_free(&p_schro_params->dec_pic);
/* Free data in the output frame queue. */ /* Free data in the output frame queue. */
ff_dirac_schro_queue_free (&p_schro_params->dec_frame_queue, ff_dirac_schro_queue_free(&p_schro_params->dec_frame_queue,
libschroedinger_decode_frame_free); libschroedinger_decode_frame_free);
return 0 ; return 0;
} }
static void libschroedinger_flush (AVCodecContext *avccontext) static void libschroedinger_flush(AVCodecContext *avccontext)
{ {
/* Got a seek request. Free the decoded frames queue and then reset /* Got a seek request. Free the decoded frames queue and then reset
* the decoder */ * the decoder */
FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data;
/* Free data in the output frame queue. */ /* Free data in the output frame queue. */
ff_dirac_schro_queue_free (&p_schro_params->dec_frame_queue, ff_dirac_schro_queue_free(&p_schro_params->dec_frame_queue,
libschroedinger_decode_frame_free); libschroedinger_decode_frame_free);
ff_dirac_schro_queue_init (&p_schro_params->dec_frame_queue); ff_dirac_schro_queue_init(&p_schro_params->dec_frame_queue);
schro_decoder_reset(p_schro_params->decoder); schro_decoder_reset(p_schro_params->decoder);
p_schro_params->eos_pulled = 0; p_schro_params->eos_pulled = 0;
p_schro_params->eos_signalled = 0; p_schro_params->eos_signalled = 0;
} }
AVCodec libschroedinger_decoder = { AVCodec libschroedinger_decoder = {
"libschroedinger", "libschroedinger",
CODEC_TYPE_VIDEO, CODEC_TYPE_VIDEO,
CODEC_ID_DIRAC, CODEC_ID_DIRAC,
sizeof(FfmpegSchroDecoderParams), sizeof(FfmpegSchroDecoderParams),

View File

@ -40,8 +40,7 @@
/** libschroedinger encoder private data */ /** libschroedinger encoder private data */
typedef struct FfmpegSchroEncoderParams typedef struct FfmpegSchroEncoderParams {
{
/** Schroedinger video format */ /** Schroedinger video format */
SchroVideoFormat *format; SchroVideoFormat *format;
@ -86,16 +85,16 @@ static int SetSchroChromaFormat(AVCodecContext *avccontext)
for (idx = 0; idx < num_formats; ++idx) { for (idx = 0; idx < num_formats; ++idx) {
if (ffmpeg_schro_pixel_format_map[idx].ff_pix_fmt == if (ffmpeg_schro_pixel_format_map[idx].ff_pix_fmt ==
avccontext->pix_fmt) { avccontext->pix_fmt) {
p_schro_params->format->chroma_format = p_schro_params->format->chroma_format =
ffmpeg_schro_pixel_format_map[idx].schro_pix_fmt; ffmpeg_schro_pixel_format_map[idx].schro_pix_fmt;
return 0; return 0;
} }
} }
av_log (avccontext, AV_LOG_ERROR, av_log(avccontext, AV_LOG_ERROR,
"This codec currently only supports planar YUV 4:2:0, 4:2:2" "This codec currently only supports planar YUV 4:2:0, 4:2:2"
" and 4:4:4 formats.\n"); " and 4:4:4 formats.\n");
return -1; return -1;
} }
@ -121,8 +120,8 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext)
preset = ff_get_schro_video_format_preset(avccontext); preset = ff_get_schro_video_format_preset(avccontext);
p_schro_params->format = p_schro_params->format =
schro_encoder_get_video_format(p_schro_params->encoder); schro_encoder_get_video_format(p_schro_params->encoder);
schro_video_format_set_std_video_format (p_schro_params->format, preset); schro_video_format_set_std_video_format(p_schro_params->format, preset);
p_schro_params->format->width = avccontext->width; p_schro_params->format->width = avccontext->width;
p_schro_params->format->height = avccontext->height; p_schro_params->format->height = avccontext->height;
if (SetSchroChromaFormat(avccontext) == -1) if (SetSchroChromaFormat(avccontext) == -1)
@ -130,9 +129,9 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext)
if (ff_get_schro_frame_format(p_schro_params->format->chroma_format, if (ff_get_schro_frame_format(p_schro_params->format->chroma_format,
&p_schro_params->frame_format) == -1) { &p_schro_params->frame_format) == -1) {
av_log (avccontext, AV_LOG_ERROR, av_log(avccontext, AV_LOG_ERROR,
"This codec currently supports only planar YUV 4:2:0, 4:2:2" "This codec currently supports only planar YUV 4:2:0, 4:2:2"
" and 4:4:4 formats.\n"); " and 4:4:4 formats.\n");
return -1; return -1;
} }
@ -146,18 +145,17 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext)
avccontext->coded_frame = &p_schro_params->picture; avccontext->coded_frame = &p_schro_params->picture;
if (!avccontext->gop_size) { if (!avccontext->gop_size) {
schro_encoder_setting_set_double (p_schro_params->encoder, schro_encoder_setting_set_double(p_schro_params->encoder,
"gop_structure", "gop_structure",
SCHRO_ENCODER_GOP_INTRA_ONLY); SCHRO_ENCODER_GOP_INTRA_ONLY);
if (avccontext->coder_type == FF_CODER_TYPE_VLC) if (avccontext->coder_type == FF_CODER_TYPE_VLC)
schro_encoder_setting_set_double (p_schro_params->encoder, schro_encoder_setting_set_double(p_schro_params->encoder,
"enable_noarith", 1); "enable_noarith", 1);
} } else {
else { schro_encoder_setting_set_double(p_schro_params->encoder,
schro_encoder_setting_set_double (p_schro_params->encoder, "gop_structure",
"gop_structure", SCHRO_ENCODER_GOP_BIREF);
SCHRO_ENCODER_GOP_BIREF);
avccontext->has_b_frames = 1; avccontext->has_b_frames = 1;
} }
@ -165,39 +163,38 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext)
if (avccontext->flags & CODEC_FLAG_QSCALE) { if (avccontext->flags & CODEC_FLAG_QSCALE) {
if (!avccontext->global_quality) { if (!avccontext->global_quality) {
/* lossless coding */ /* lossless coding */
schro_encoder_setting_set_double (p_schro_params->encoder, schro_encoder_setting_set_double(p_schro_params->encoder,
"rate_control", "rate_control",
SCHRO_ENCODER_RATE_CONTROL_LOSSLESS); SCHRO_ENCODER_RATE_CONTROL_LOSSLESS);
} else { } else {
int noise_threshold; int noise_threshold;
schro_encoder_setting_set_double (p_schro_params->encoder, schro_encoder_setting_set_double(p_schro_params->encoder,
"rate_control", "rate_control",
SCHRO_ENCODER_RATE_CONTROL_CONSTANT_NOISE_THRESHOLD); SCHRO_ENCODER_RATE_CONTROL_CONSTANT_NOISE_THRESHOLD);
noise_threshold = avccontext->global_quality/FF_QP2LAMBDA; noise_threshold = avccontext->global_quality / FF_QP2LAMBDA;
if (noise_threshold > 100) if (noise_threshold > 100)
noise_threshold = 100; noise_threshold = 100;
schro_encoder_setting_set_double (p_schro_params->encoder, schro_encoder_setting_set_double(p_schro_params->encoder,
"noise_threshold", "noise_threshold",
noise_threshold); noise_threshold);
} }
} } else {
else { schro_encoder_setting_set_double(p_schro_params->encoder,
schro_encoder_setting_set_double ( p_schro_params->encoder, "rate_control",
"rate_control", SCHRO_ENCODER_RATE_CONTROL_CONSTANT_BITRATE);
SCHRO_ENCODER_RATE_CONTROL_CONSTANT_BITRATE);
schro_encoder_setting_set_double (p_schro_params->encoder, schro_encoder_setting_set_double(p_schro_params->encoder,
"bitrate", "bitrate",
avccontext->bit_rate); avccontext->bit_rate);
} }
if (avccontext->flags & CODEC_FLAG_INTERLACED_ME) if (avccontext->flags & CODEC_FLAG_INTERLACED_ME)
/* All material can be coded as interlaced or progressive /* All material can be coded as interlaced or progressive
irrespective of the type of source material. */ irrespective of the type of source material. */
schro_encoder_setting_set_double (p_schro_params->encoder, schro_encoder_setting_set_double(p_schro_params->encoder,
"interlaced_coding", 1); "interlaced_coding", 1);
/* FIXME: Signal range hardcoded to 8-bit data until both libschroedinger /* FIXME: Signal range hardcoded to 8-bit data until both libschroedinger
* and libdirac support other bit-depth data. */ * and libdirac support other bit-depth data. */
@ -209,32 +206,32 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext)
p_schro_params->format); p_schro_params->format);
/* Set the debug level. */ /* Set the debug level. */
schro_debug_set_level (avccontext->debug); schro_debug_set_level(avccontext->debug);
schro_encoder_start (p_schro_params->encoder); schro_encoder_start(p_schro_params->encoder);
/* Initialize the encoded frame queue. */ /* Initialize the encoded frame queue. */
ff_dirac_schro_queue_init (&p_schro_params->enc_frame_queue); ff_dirac_schro_queue_init(&p_schro_params->enc_frame_queue);
return 0 ; return 0;
} }
static SchroFrame *libschroedinger_frame_from_data (AVCodecContext *avccontext, static SchroFrame *libschroedinger_frame_from_data(AVCodecContext *avccontext,
void *in_data) void *in_data)
{ {
FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data; FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data;
SchroFrame *in_frame; SchroFrame *in_frame;
/* Input line size may differ from what the codec supports. Especially /* Input line size may differ from what the codec supports. Especially
* when transcoding from one format to another. So use avpicture_layout * when transcoding from one format to another. So use avpicture_layout
* to copy the frame. */ * to copy the frame. */
in_frame = schro_frame_new_and_alloc (NULL, in_frame = schro_frame_new_and_alloc(NULL,
p_schro_params->frame_format, p_schro_params->frame_format,
p_schro_params->format->width, p_schro_params->format->width,
p_schro_params->format->height); p_schro_params->format->height);
avpicture_layout ((AVPicture *)in_data, avccontext->pix_fmt, avpicture_layout((AVPicture *)in_data, avccontext->pix_fmt,
avccontext->width, avccontext->height, avccontext->width, avccontext->height,
in_frame->components[0].data, in_frame->components[0].data,
p_schro_params->frame_size); p_schro_params->frame_size);
return in_frame; return in_frame;
} }
@ -243,7 +240,7 @@ static void SchroedingerFreeFrame(void *data)
{ {
FfmpegDiracSchroEncodedFrame *enc_frame = data; FfmpegDiracSchroEncodedFrame *enc_frame = data;
av_freep (&(enc_frame->p_encbuf)); av_freep(&(enc_frame->p_encbuf));
av_free(enc_frame); av_free(enc_frame);
} }
@ -269,8 +266,8 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext,
} }
} else { } else {
/* Allocate frame data to schro input buffer. */ /* Allocate frame data to schro input buffer. */
SchroFrame *in_frame = libschroedinger_frame_from_data (avccontext, SchroFrame *in_frame = libschroedinger_frame_from_data(avccontext,
data); data);
/* Load next frame. */ /* Load next frame. */
schro_encoder_push_frame(encoder, in_frame); schro_encoder_push_frame(encoder, in_frame);
} }
@ -280,28 +277,24 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext,
/* Now check to see if we have any output from the encoder. */ /* Now check to see if we have any output from the encoder. */
while (go) { while (go) {
SchroStateEnum state; SchroStateEnum state;
state = schro_encoder_wait(encoder); state = schro_encoder_wait(encoder);
switch (state) switch (state) {
{
case SCHRO_STATE_HAVE_BUFFER: case SCHRO_STATE_HAVE_BUFFER:
case SCHRO_STATE_END_OF_STREAM: case SCHRO_STATE_END_OF_STREAM:
enc_buf = schro_encoder_pull (encoder, enc_buf = schro_encoder_pull(encoder, &presentation_frame);
&presentation_frame); assert(enc_buf->length > 0);
assert (enc_buf->length > 0); assert(enc_buf->length <= buf_size);
assert (enc_buf->length <= buf_size);
parse_code = enc_buf->data[4]; parse_code = enc_buf->data[4];
/* All non-frame data is prepended to actual frame data to /* All non-frame data is prepended to actual frame data to
* be able to set the pts correctly. So we don't write data * be able to set the pts correctly. So we don't write data
* to the frame output queue until we actually have a frame * to the frame output queue until we actually have a frame
*/ */
p_schro_params->enc_buf = av_realloc ( p_schro_params->enc_buf = av_realloc(p_schro_params->enc_buf,
p_schro_params->enc_buf, p_schro_params->enc_buf_size + enc_buf->length);
p_schro_params->enc_buf_size + enc_buf->length
);
memcpy(p_schro_params->enc_buf+p_schro_params->enc_buf_size, memcpy(p_schro_params->enc_buf + p_schro_params->enc_buf_size,
enc_buf->data, enc_buf->length); enc_buf->data, enc_buf->length);
p_schro_params->enc_buf_size += enc_buf->length; p_schro_params->enc_buf_size += enc_buf->length;
@ -312,7 +305,7 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext,
} }
if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) { if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) {
schro_buffer_unref (enc_buf); schro_buffer_unref(enc_buf);
break; break;
} }
@ -332,12 +325,12 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext,
(enc_buf->data[15] << 8) + (enc_buf->data[15] << 8) +
enc_buf->data[16]; enc_buf->data[16];
ff_dirac_schro_queue_push_back (&p_schro_params->enc_frame_queue, ff_dirac_schro_queue_push_back(&p_schro_params->enc_frame_queue,
p_frame_output); p_frame_output);
p_schro_params->enc_buf_size = 0; p_schro_params->enc_buf_size = 0;
p_schro_params->enc_buf = NULL; p_schro_params->enc_buf = NULL;
schro_buffer_unref (enc_buf); schro_buffer_unref(enc_buf);
break; break;
@ -360,8 +353,7 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext,
p_schro_params->eos_pulled) p_schro_params->eos_pulled)
last_frame_in_sequence = 1; last_frame_in_sequence = 1;
p_frame_output = p_frame_output = ff_dirac_schro_queue_pop(&p_schro_params->enc_frame_queue);
ff_dirac_schro_queue_pop (&p_schro_params->enc_frame_queue);
if (!p_frame_output) if (!p_frame_output)
return 0; return 0;
@ -376,17 +368,16 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext,
/* Append the end of sequence information to the last frame in the /* Append the end of sequence information to the last frame in the
* sequence. */ * sequence. */
if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) {
{ memcpy(frame + enc_size, p_schro_params->enc_buf,
memcpy (frame + enc_size, p_schro_params->enc_buf, p_schro_params->enc_buf_size);
p_schro_params->enc_buf_size);
enc_size += p_schro_params->enc_buf_size; enc_size += p_schro_params->enc_buf_size;
av_freep (&p_schro_params->enc_buf); av_freep(&p_schro_params->enc_buf);
p_schro_params->enc_buf_size = 0; p_schro_params->enc_buf_size = 0;
} }
/* free frame */ /* free frame */
SchroedingerFreeFrame (p_frame_output); SchroedingerFreeFrame(p_frame_output);
return enc_size; return enc_size;
} }
@ -397,12 +388,12 @@ static int libschroedinger_encode_close(AVCodecContext *avccontext)
FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data; FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data;
/* Close the encoder. */ /* Close the encoder. */
schro_encoder_free(p_schro_params->encoder); schro_encoder_free(p_schro_params->encoder);
/* Free data in the output frame queue. */ /* Free data in the output frame queue. */
ff_dirac_schro_queue_free (&p_schro_params->enc_frame_queue, ff_dirac_schro_queue_free(&p_schro_params->enc_frame_queue,
SchroedingerFreeFrame); SchroedingerFreeFrame);
/* Free the encoder buffer. */ /* Free the encoder buffer. */
@ -412,7 +403,7 @@ static int libschroedinger_encode_close(AVCodecContext *avccontext)
/* Free the video format structure. */ /* Free the video format structure. */
av_freep(&p_schro_params->format); av_freep(&p_schro_params->format);
return 0 ; return 0;
} }
@ -424,7 +415,7 @@ AVCodec libschroedinger_encoder = {
libschroedinger_encode_init, libschroedinger_encode_init,
libschroedinger_encode_frame, libschroedinger_encode_frame,
libschroedinger_encode_close, libschroedinger_encode_close,
.capabilities= CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY,
.pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .pix_fmts = (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("libschroedinger Dirac 2.2"), .long_name = NULL_IF_CONFIG_SMALL("libschroedinger Dirac 2.2"),
}; };