mpegvideo_enc: K&R cosmetics

Signed-off-by: Ronald S. Bultje <rsbultje@gmail.com>
This commit is contained in:
Aneesh Dogra 2011-12-27 11:38:07 +05:30 committed by Ronald S. Bultje
parent 2702ec2ef4
commit bd96be6e27

View File

@ -62,8 +62,10 @@ static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int
static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1]; static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
static uint8_t default_fcode_tab[MAX_MV * 2 + 1]; static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64], void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra) uint16_t (*qmat16)[2][64],
const uint16_t *quant_matrix,
int bias, int qmin, int qmax, int intra)
{ {
int qscale; int qscale;
int shift = 0; int shift = 0;
@ -78,10 +80,11 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][6
) { ) {
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
const int j = dsp->idct_permutation[i]; const int j = dsp->idct_permutation[i];
/* 16 <= qscale * quant_matrix[i] <= 7905 */ /* 16 <= qscale * quant_matrix[i] <= 7905
/* 19952 <= ff_aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
/* (1 << 36) / 19952 >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= (1 << 36) / 249205026 */ * 19952 <= x <= 249205026
/* 3444240 >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= 275 */ * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
* 3444240 >= (1 << 36) / (x) >= 275 */
qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
(qscale * quant_matrix[j])); (qscale * quant_matrix[j]));
@ -93,28 +96,37 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][6
) { ) {
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
const int j = dsp->idct_permutation[i]; const int j = dsp->idct_permutation[i];
/* 16 <= qscale * quant_matrix[i] <= 7905 */ /* 16 <= qscale * quant_matrix[i] <= 7905
/* 19952 <= ff_aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
/* (1 << 36) / 19952 >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ * 19952 <= x <= 249205026
/* 3444240 >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= 275 */ * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
* 3444240 >= (1 << 36) / (x) >= 275 */
qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
(ff_aanscales[i] * qscale * quant_matrix[j])); (ff_aanscales[i] * qscale *
quant_matrix[j]));
} }
} else { } else {
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
const int j = dsp->idct_permutation[i]; const int j = dsp->idct_permutation[i];
/* We can safely suppose that 16 <= quant_matrix[i] <= 255 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
So 16 <= qscale * quant_matrix[i] <= 7905 * Assume x = qscale * quant_matrix[i]
so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905 * So 16 <= x <= 7905
so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
*/ * so 32768 >= (1 << 19) / (x) >= 67 */
qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j])); qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
// qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]); (qscale * quant_matrix[j]));
qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]); //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
// (qscale * quant_matrix[i]);
qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
(qscale * quant_matrix[j]);
if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1; if (qmat16[qscale][0][i] == 0 ||
qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]); qmat16[qscale][0][i] == 128 * 256)
qmat16[qscale][0][i] = 128 * 256 - 1;
qmat16[qscale][1][i] =
ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
qmat16[qscale][0][i]);
} }
} }
@ -133,18 +145,24 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][6
} }
} }
if (shift) { if (shift) {
av_log(NULL, AV_LOG_INFO, "Warning, QMAT_SHIFT is larger than %d, overflows possible\n", QMAT_SHIFT - shift); av_log(NULL, AV_LOG_INFO,
"Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
QMAT_SHIFT - shift);
} }
} }
static inline void update_qscale(MpegEncContext *s){ static inline void update_qscale(MpegEncContext *s)
s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); {
s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
(FF_LAMBDA_SHIFT + 7);
s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax); s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
FF_LAMBDA_SHIFT;
} }
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix){ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
{
int i; int i;
if (matrix) { if (matrix) {
@ -159,18 +177,23 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix){
/** /**
* init s->current_picture.qscale_table from s->lambda_table * init s->current_picture.qscale_table from s->lambda_table
*/ */
void ff_init_qscale_tab(MpegEncContext *s){ void ff_init_qscale_tab(MpegEncContext *s)
{
int8_t * const qscale_table = s->current_picture.f.qscale_table; int8_t * const qscale_table = s->current_picture.f.qscale_table;
int i; int i;
for (i = 0; i < s->mb_num; i++) { for (i = 0; i < s->mb_num; i++) {
unsigned int lam = s->lambda_table[s->mb_index2xy[i]]; unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7); int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
qscale_table[ s->mb_index2xy[i] ]= av_clip(qp, s->avctx->qmin, s->avctx->qmax); qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
s->avctx->qmax);
} }
} }
static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){ static void copy_picture_attributes(MpegEncContext *s,
AVFrame *dst,
AVFrame *src)
{
int i; int i;
dst->pict_type = src->pict_type; dst->pict_type = src->pict_type;
@ -190,26 +213,34 @@ static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *sr
if (!src->ref_index[0]) if (!src->ref_index[0])
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n"); av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
if (src->motion_subsample_log2 != dst->motion_subsample_log2) if (src->motion_subsample_log2 != dst->motion_subsample_log2)
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n", av_log(s->avctx, AV_LOG_ERROR,
"AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
src->motion_subsample_log2, dst->motion_subsample_log2); src->motion_subsample_log2, dst->motion_subsample_log2);
memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0])); memcpy(dst->mb_type, src->mb_type,
s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1; int stride = ((16 * s->mb_width ) >>
src->motion_subsample_log2) + 1;
int height = ((16 * s->mb_height) >> src->motion_subsample_log2); int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){ if (src->motion_val[i] &&
memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t)); src->motion_val[i] != dst->motion_val[i]) {
memcpy(dst->motion_val[i], src->motion_val[i],
2 * stride * height * sizeof(int16_t));
} }
if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) { if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
memcpy(dst->ref_index[i], src->ref_index[i], s->mb_stride*4*s->mb_height*sizeof(int8_t)); memcpy(dst->ref_index[i], src->ref_index[i],
s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
} }
} }
} }
} }
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src){ static void update_duplicate_context_after_me(MpegEncContext *dst,
MpegEncContext *src)
{
#define COPY(a) dst->a= src->a #define COPY(a) dst->a= src->a
COPY(pict_type); COPY(pict_type);
COPY(current_picture); COPY(current_picture);
@ -230,7 +261,8 @@ static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContex
* Set the given MpegEncContext to defaults for encoding. * Set the given MpegEncContext to defaults for encoding.
* the changed fields will not depend upon the prior state of the MpegEncContext. * the changed fields will not depend upon the prior state of the MpegEncContext.
*/ */
static void MPV_encode_defaults(MpegEncContext *s){ static void MPV_encode_defaults(MpegEncContext *s)
{
int i; int i;
MPV_common_defaults(s); MPV_common_defaults(s);
@ -252,21 +284,32 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
switch (avctx->codec_id) { switch (avctx->codec_id) {
case CODEC_ID_MPEG2VIDEO: case CODEC_ID_MPEG2VIDEO:
if(avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P){ if (avctx->pix_fmt != PIX_FMT_YUV420P &&
av_log(avctx, AV_LOG_ERROR, "only YUV420 and YUV422 are supported\n"); avctx->pix_fmt != PIX_FMT_YUV422P) {
av_log(avctx, AV_LOG_ERROR,
"only YUV420 and YUV422 are supported\n");
return -1; return -1;
} }
break; break;
case CODEC_ID_LJPEG: case CODEC_ID_LJPEG:
if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P && avctx->pix_fmt != PIX_FMT_YUVJ444P && avctx->pix_fmt != PIX_FMT_BGRA && if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P && avctx->pix_fmt != PIX_FMT_YUV444P) || avctx->strict_std_compliance>FF_COMPLIANCE_UNOFFICIAL)){ avctx->pix_fmt != PIX_FMT_YUVJ422P &&
avctx->pix_fmt != PIX_FMT_YUVJ444P &&
avctx->pix_fmt != PIX_FMT_BGRA &&
((avctx->pix_fmt != PIX_FMT_YUV420P &&
avctx->pix_fmt != PIX_FMT_YUV422P &&
avctx->pix_fmt != PIX_FMT_YUV444P) ||
avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n"); av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
return -1; return -1;
} }
break; break;
case CODEC_ID_MJPEG: case CODEC_ID_MJPEG:
if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P && if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P) || avctx->strict_std_compliance>FF_COMPLIANCE_UNOFFICIAL)){ avctx->pix_fmt != PIX_FMT_YUVJ422P &&
((avctx->pix_fmt != PIX_FMT_YUV420P &&
avctx->pix_fmt != PIX_FMT_YUV422P) ||
avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n"); av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
return -1; return -1;
} }
@ -293,8 +336,10 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
s->bit_rate = avctx->bit_rate; s->bit_rate = avctx->bit_rate;
s->width = avctx->width; s->width = avctx->width;
s->height = avctx->height; s->height = avctx->height;
if(avctx->gop_size > 600 && avctx->strict_std_compliance>FF_COMPLIANCE_EXPERIMENTAL){ if (avctx->gop_size > 600 &&
av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n"); avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
av_log(avctx, AV_LOG_ERROR,
"Warning keyframe interval too large! reducing it ...\n");
avctx->gop_size = 600; avctx->gop_size = 600;
} }
s->gop_size = avctx->gop_size; s->gop_size = avctx->gop_size;
@ -328,14 +373,14 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
/* Fixed QSCALE */ /* Fixed QSCALE */
s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE); s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
s->adaptive_quant= ( s->avctx->lumi_masking s->adaptive_quant = (s->avctx->lumi_masking ||
|| s->avctx->dark_masking s->avctx->dark_masking ||
|| s->avctx->temporal_cplx_masking s->avctx->temporal_cplx_masking ||
|| s->avctx->spatial_cplx_masking s->avctx->spatial_cplx_masking ||
|| s->avctx->p_masking s->avctx->p_masking ||
|| s->avctx->border_masking s->avctx->border_masking ||
|| (s->flags&CODEC_FLAG_QP_RD)) (s->flags & CODEC_FLAG_QP_RD)) &&
&& !s->fixed_qscale; !s->fixed_qscale;
s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER); s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
#if FF_API_MPEGVIDEO_GLOBAL_OPTS #if FF_API_MPEGVIDEO_GLOBAL_OPTS
@ -346,12 +391,15 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
#endif #endif
if (avctx->rc_max_rate && !avctx->rc_buffer_size) { if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n"); av_log(avctx, AV_LOG_ERROR,
"a vbv buffer size is needed, "
"for encoding with a maximum bitrate\n");
return -1; return -1;
} }
if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) { if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n"); av_log(avctx, AV_LOG_INFO,
"Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
} }
if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) { if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
@ -364,40 +412,55 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
return -1; return -1;
} }
if(avctx->rc_max_rate && avctx->rc_max_rate == avctx->bit_rate && avctx->rc_max_rate != avctx->rc_min_rate){ if (avctx->rc_max_rate &&
av_log(avctx, AV_LOG_INFO, "impossible bitrate constraints, this will fail\n"); avctx->rc_max_rate == avctx->bit_rate &&
avctx->rc_max_rate != avctx->rc_min_rate) {
av_log(avctx, AV_LOG_INFO,
"impossible bitrate constraints, this will fail\n");
} }
if(avctx->rc_buffer_size && avctx->bit_rate*(int64_t)avctx->time_base.num > avctx->rc_buffer_size * (int64_t)avctx->time_base.den){ if (avctx->rc_buffer_size &&
avctx->bit_rate * (int64_t)avctx->time_base.num >
avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n"); av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
return -1; return -1;
} }
if(!s->fixed_qscale && avctx->bit_rate*av_q2d(avctx->time_base) > avctx->bit_rate_tolerance){ if (!s->fixed_qscale &&
av_log(avctx, AV_LOG_ERROR, "bitrate tolerance too small for bitrate\n"); avctx->bit_rate * av_q2d(avctx->time_base) >
avctx->bit_rate_tolerance) {
av_log(avctx, AV_LOG_ERROR,
"bitrate tolerance too small for bitrate\n");
return -1; return -1;
} }
if( s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate if (s->avctx->rc_max_rate &&
&& (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
&& 90000LL * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*0xFFFFLL){ (s->codec_id == CODEC_ID_MPEG1VIDEO ||
s->codec_id == CODEC_ID_MPEG2VIDEO) &&
av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n"); 90000LL * (avctx->rc_buffer_size - 1) >
s->avctx->rc_max_rate * 0xFFFFLL) {
av_log(avctx, AV_LOG_INFO,
"Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
"specified vbv buffer is too large for the given bitrate!\n");
} }
if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 &&
&& s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P && s->codec_id != CODEC_ID_FLV1){ s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P &&
s->codec_id != CODEC_ID_FLV1) {
av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n"); av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
return -1; return -1;
} }
if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) { if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decision\n"); av_log(avctx, AV_LOG_ERROR,
"OBMC is only supported with simple mb decision\n");
return -1; return -1;
} }
#if FF_API_MPEGVIDEO_GLOBAL_OPTS #if FF_API_MPEGVIDEO_GLOBAL_OPTS
if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){ if (s->obmc && s->codec_id != CODEC_ID_H263 &&
s->codec_id != CODEC_ID_H263P) {
av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n"); av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
return -1; return -1;
} }
@ -410,32 +473,42 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
#if FF_API_MPEGVIDEO_GLOBAL_OPTS #if FF_API_MPEGVIDEO_GLOBAL_OPTS
if (s->data_partitioning && s->codec_id != CODEC_ID_MPEG4) { if (s->data_partitioning && s->codec_id != CODEC_ID_MPEG4) {
av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n"); av_log(avctx, AV_LOG_ERROR,
"data partitioning not supported by codec\n");
return -1; return -1;
} }
#endif #endif
if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){ if (s->max_b_frames &&
s->codec_id != CODEC_ID_MPEG4 &&
s->codec_id != CODEC_ID_MPEG1VIDEO &&
s->codec_id != CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n"); av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
return -1; return -1;
} }
if ((s->codec_id == CODEC_ID_MPEG4 || s->codec_id == CODEC_ID_H263 || if ((s->codec_id == CODEC_ID_MPEG4 ||
s->codec_id == CODEC_ID_H263 ||
s->codec_id == CODEC_ID_H263P) && s->codec_id == CODEC_ID_H263P) &&
(avctx->sample_aspect_ratio.num > 255 || avctx->sample_aspect_ratio.den > 255)) { (avctx->sample_aspect_ratio.num > 255 ||
av_log(avctx, AV_LOG_ERROR, "Invalid pixel aspect ratio %i/%i, limit is 255/255\n", avctx->sample_aspect_ratio.den > 255)) {
av_log(avctx, AV_LOG_ERROR,
"Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den); avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
return -1; return -1;
} }
if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN)) if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME |
&& s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO){ CODEC_FLAG_ALT_SCAN)) &&
s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n"); av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
return -1; return -1;
} }
if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too // FIXME mpeg2 uses that too
av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supported by codec\n"); if (s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4) {
av_log(avctx, AV_LOG_ERROR,
"mpeg2 style quantization not supported by codec\n");
return -1; return -1;
} }
@ -444,28 +517,36 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
return -1; return -1;
} }
if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){ if ((s->flags & CODEC_FLAG_QP_RD) &&
s->avctx->mb_decision != FF_MB_DECISION_RD) {
av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n"); av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
return -1; return -1;
} }
if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){ if (s->avctx->scenechange_threshold < 1000000000 &&
av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection are not supported yet, set threshold to 1000000000\n"); (s->flags & CODEC_FLAG_CLOSED_GOP)) {
av_log(avctx, AV_LOG_ERROR,
"closed gop with scene change detection are not supported yet, "
"set threshold to 1000000000\n");
return -1; return -1;
} }
if((s->flags2 & CODEC_FLAG2_INTRA_VLC) && s->codec_id != CODEC_ID_MPEG2VIDEO){ if ((s->flags2 & CODEC_FLAG2_INTRA_VLC) &&
av_log(avctx, AV_LOG_ERROR, "intra vlc table not supported by codec\n"); s->codec_id != CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR,
"intra vlc table not supported by codec\n");
return -1; return -1;
} }
if (s->flags & CODEC_FLAG_LOW_DELAY) { if (s->flags & CODEC_FLAG_LOW_DELAY) {
if (s->codec_id != CODEC_ID_MPEG2VIDEO) { if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg2\n"); av_log(avctx, AV_LOG_ERROR,
"low delay forcing is only available for mpeg2\n");
return -1; return -1;
} }
if (s->max_b_frames != 0) { if (s->max_b_frames != 0) {
av_log(avctx, AV_LOG_ERROR, "b frames cannot be used with low delay\n"); av_log(avctx, AV_LOG_ERROR,
"b frames cannot be used with low delay\n");
return -1; return -1;
} }
} }
@ -473,25 +554,33 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
if (s->q_scale_type == 1) { if (s->q_scale_type == 1) {
#if FF_API_MPEGVIDEO_GLOBAL_OPTS #if FF_API_MPEGVIDEO_GLOBAL_OPTS
if (s->codec_id != CODEC_ID_MPEG2VIDEO) { if (s->codec_id != CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR, "non linear quant is only available for mpeg2\n"); av_log(avctx, AV_LOG_ERROR,
"non linear quant is only available for mpeg2\n");
return -1; return -1;
} }
#endif #endif
if (avctx->qmax > 12) { if (avctx->qmax > 12) {
av_log(avctx, AV_LOG_ERROR, "non linear quant only supports qmax <= 12 currently\n"); av_log(avctx, AV_LOG_ERROR,
"non linear quant only supports qmax <= 12 currently\n");
return -1; return -1;
} }
} }
if(s->avctx->thread_count > 1 && s->codec_id != CODEC_ID_MPEG4 if (s->avctx->thread_count > 1 &&
&& s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO s->codec_id != CODEC_ID_MPEG4 &&
&& (s->codec_id != CODEC_ID_H263P || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))){ s->codec_id != CODEC_ID_MPEG1VIDEO &&
av_log(avctx, AV_LOG_ERROR, "multi threaded encoding not supported by codec\n"); s->codec_id != CODEC_ID_MPEG2VIDEO &&
(s->codec_id != CODEC_ID_H263P ||
!(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))) {
av_log(avctx, AV_LOG_ERROR,
"multi threaded encoding not supported by codec\n");
return -1; return -1;
} }
if (s->avctx->thread_count < 1) { if (s->avctx->thread_count < 1) {
av_log(avctx, AV_LOG_ERROR, "automatic thread number detection not supported by codec, patch welcome\n"); av_log(avctx, AV_LOG_ERROR,
"automatic thread number detection not supported by codec,"
"patch welcome\n");
return -1; return -1;
} }
@ -505,16 +594,19 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
i = (INT_MAX / 2 + 128) >> 8; i = (INT_MAX / 2 + 128) >> 8;
if (avctx->me_threshold >= i) { if (avctx->me_threshold >= i) {
av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n", i - 1); av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
i - 1);
return -1; return -1;
} }
if (avctx->mb_threshold >= i) { if (avctx->mb_threshold >= i) {
av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n", i - 1); av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
i - 1);
return -1; return -1;
} }
if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) { if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
av_log(avctx, AV_LOG_INFO, "notice: b_frame_strategy only affects the first pass\n"); av_log(avctx, AV_LOG_INFO,
"notice: b_frame_strategy only affects the first pass\n");
avctx->b_frame_strategy = 0; avctx->b_frame_strategy = 0;
} }
@ -526,12 +618,15 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
//return -1; //return -1;
} }
if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO || s->codec_id==CODEC_ID_MJPEG){ if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG1VIDEO ||
s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG) {
// (a + x * 3 / 8) / x
s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
s->inter_quant_bias = 0; s->inter_quant_bias = 0;
} else { } else {
s->intra_quant_bias = 0; s->intra_quant_bias = 0;
s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x // (a - x / 4) / x
s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
} }
if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS) if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
@ -539,12 +634,16 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS) if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
s->inter_quant_bias = avctx->inter_quant_bias; s->inter_quant_bias = avctx->inter_quant_bias;
avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
&chroma_v_shift);
if(avctx->codec_id == CODEC_ID_MPEG4 && s->avctx->time_base.den > (1<<16)-1){ if (avctx->codec_id == CODEC_ID_MPEG4 &&
av_log(avctx, AV_LOG_ERROR, "timebase %d/%d not supported by MPEG 4 standard, " s->avctx->time_base.den > (1 << 16) - 1) {
"the maximum admitted value for the timebase denominator is %d\n", av_log(avctx, AV_LOG_ERROR,
s->avctx->time_base.num, s->avctx->time_base.den, (1<<16)-1); "timebase %d/%d not supported by MPEG 4 standard, "
"the maximum admitted value for the timebase denominator "
"is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
(1 << 16) - 1);
return -1; return -1;
} }
s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1; s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
@ -565,7 +664,8 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
case CODEC_ID_MJPEG: case CODEC_ID_MJPEG:
s->out_format = FMT_MJPEG; s->out_format = FMT_MJPEG;
s->intra_only = 1; /* force intra only for jpeg */ s->intra_only = 1; /* force intra only for jpeg */
if(avctx->codec->id == CODEC_ID_LJPEG && avctx->pix_fmt == PIX_FMT_BGRA){ if (avctx->codec->id == CODEC_ID_LJPEG &&
avctx->pix_fmt == PIX_FMT_BGRA) {
s->mjpeg_vsample[0] = s->mjpeg_hsample[0] = s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
s->mjpeg_vsample[1] = s->mjpeg_hsample[1] = s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1; s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
@ -577,16 +677,20 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
s->mjpeg_hsample[1] = 2 >> chroma_h_shift; s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
s->mjpeg_hsample[2] = 2 >> chroma_h_shift; s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
} }
if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
|| ff_mjpeg_encode_init(s) < 0) ff_mjpeg_encode_init(s) < 0)
return -1; return -1;
avctx->delay = 0; avctx->delay = 0;
s->low_delay = 1; s->low_delay = 1;
break; break;
case CODEC_ID_H261: case CODEC_ID_H261:
if (!CONFIG_H261_ENCODER) return -1; if (!CONFIG_H261_ENCODER)
return -1;
if (ff_h261_get_picture_format(s->width, s->height) < 0) { if (ff_h261_get_picture_format(s->width, s->height) < 0) {
av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.261 codec.\nValid sizes are 176x144, 352x288\n", s->width, s->height); av_log(avctx, AV_LOG_ERROR,
"The specified picture size of %dx%d is not valid for the "
"H.261 codec.\nValid sizes are 176x144, 352x288\n",
s->width, s->height);
return -1; return -1;
} }
s->out_format = FMT_H261; s->out_format = FMT_H261;
@ -594,9 +698,15 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
s->low_delay = 1; s->low_delay = 1;
break; break;
case CODEC_ID_H263: case CODEC_ID_H263:
if (!CONFIG_H263_ENCODER) return -1; if (!CONFIG_H263_ENCODER)
if (ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height) == 8) { return -1;
av_log(avctx, AV_LOG_INFO, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height); if (ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format),
s->width, s->height) == 8) {
av_log(avctx, AV_LOG_INFO,
"The specified picture size of %dx%d is not valid for "
"the H.263 codec.\nValid sizes are 128x96, 176x144, "
"352x288, 704x576, and 1408x1152."
"Try H.263+.\n", s->width, s->height);
return -1; return -1;
} }
s->out_format = FMT_H263; s->out_format = FMT_H263;
@ -699,7 +809,9 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
s->encoding = 1; s->encoding = 1;
s->progressive_frame = s->progressive_frame =
s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN)); s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
CODEC_FLAG_INTERLACED_ME |
CODEC_FLAG_ALT_SCAN));
/* init */ /* init */
if (MPV_common_init(s) < 0) if (MPV_common_init(s) < 0)
@ -734,14 +846,15 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
/* init q matrix */ /* init q matrix */
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
int j = s->dsp.idct_permutation[i]; int j = s->dsp.idct_permutation[i];
if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){ if (CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4 &&
s->mpeg_quant) {
s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i]; s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
} else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) { } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
s->intra_matrix[j] = s->intra_matrix[j] =
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
}else } else {
{ /* mpeg1/2 */ /* mpeg1/2 */
s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i]; s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
} }
@ -755,9 +868,11 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
/* for mjpeg, we do include qscale in the matrix */ /* for mjpeg, we do include qscale in the matrix */
if (s->out_format != FMT_MJPEG) { if (s->out_format != FMT_MJPEG) {
ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1); s->intra_matrix, s->intra_quant_bias, avctx->qmin,
31, 1);
ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16, ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0); s->inter_matrix, s->inter_quant_bias, avctx->qmin,
31, 0);
} }
if (ff_rate_control_init(s) < 0) if (ff_rate_control_init(s) < 0)
@ -773,7 +888,8 @@ av_cold int MPV_encode_end(AVCodecContext *avctx)
ff_rate_control_uninit(s); ff_rate_control_uninit(s);
MPV_common_end(s); MPV_common_end(s);
if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) && s->out_format == FMT_MJPEG) if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
s->out_format == FMT_MJPEG)
ff_mjpeg_encode_close(s); ff_mjpeg_encode_close(s);
av_freep(&avctx->extradata); av_freep(&avctx->extradata);
@ -781,7 +897,8 @@ av_cold int MPV_encode_end(AVCodecContext *avctx)
return 0; return 0;
} }
static int get_sae(uint8_t *src, int ref, int stride){ static int get_sae(uint8_t *src, int ref, int stride)
{
int x,y; int x,y;
int acc = 0; int acc = 0;
@ -794,7 +911,9 @@ static int get_sae(uint8_t *src, int ref, int stride){
return acc; return acc;
} }
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){ static int get_intra_count(MpegEncContext *s, uint8_t *src,
uint8_t *ref, int stride)
{
int x, y, w, h; int x, y, w, h;
int acc = 0; int acc = 0;
@ -804,7 +923,8 @@ static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int st
for (y = 0; y < h; y += 16) { for (y = 0; y < h; y += 16) {
for (x = 0; x < w; x += 16) { for (x = 0; x < w; x += 16) {
int offset = x + y * stride; int offset = x + y * stride;
int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16); int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
16);
int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8; int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
int sae = get_sae(src + offset, mean, stride); int sae = get_sae(src + offset, mean, stride);
@ -815,7 +935,8 @@ static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int st
} }
static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
{
AVFrame *pic = NULL; AVFrame *pic = NULL;
int64_t pts; int64_t pts;
int i; int i;
@ -832,7 +953,9 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
int64_t last = s->user_specified_pts; int64_t last = s->user_specified_pts;
if (time <= last) { if (time <= last) {
av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%"PRId64", last=%"PRId64"\n", pts, s->user_specified_pts); av_log(s->avctx, AV_LOG_ERROR,
"Error, Invalid timestamp=%"PRId64", "
"last=%"PRId64"\n", pts, s->user_specified_pts);
return -1; return -1;
} }
} }
@ -841,7 +964,9 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
if (s->user_specified_pts != AV_NOPTS_VALUE) { if (s->user_specified_pts != AV_NOPTS_VALUE) {
s->user_specified_pts = s->user_specified_pts =
pts = s->user_specified_pts + 1; pts = s->user_specified_pts + 1;
av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n", pts); av_log(s->avctx, AV_LOG_INFO,
"Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
pts);
} else { } else {
pts = pic_arg->display_picture_number; pts = pic_arg->display_picture_number;
} }
@ -849,12 +974,17 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
} }
if (pic_arg) { if (pic_arg) {
if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0; if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
if(pic_arg->linesize[0] != s->linesize) direct=0; direct = 0;
if(pic_arg->linesize[1] != s->uvlinesize) direct=0; if (pic_arg->linesize[0] != s->linesize)
if(pic_arg->linesize[2] != s->uvlinesize) direct=0; direct = 0;
if (pic_arg->linesize[1] != s->uvlinesize)
direct = 0;
if (pic_arg->linesize[2] != s->uvlinesize)
direct = 0;
// av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize); //av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0],
// pic_arg->linesize[1], s->linesize, s->uvlinesize);
if (direct) { if (direct) {
i = ff_find_unused_picture(s, 1); i = ff_find_unused_picture(s, 1);
@ -883,13 +1013,14 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
return -1; return -1;
} }
if( pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
&& pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
&& pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]){ pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
// empty // empty
} else { } else {
int h_chroma_shift, v_chroma_shift; int h_chroma_shift, v_chroma_shift;
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift,
&v_chroma_shift);
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
int src_stride = pic_arg->linesize[i]; int src_stride = pic_arg->linesize[i];
@ -929,7 +1060,8 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
return 0; return 0;
} }
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
{
int x, y, plane; int x, y, plane;
int score = 0; int score = 0;
int64_t score64 = 0; int64_t score64 = 0;
@ -940,7 +1072,9 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
for (y = 0; y < s->mb_height * bw; y++) { for (y = 0; y < s->mb_height * bw; y++) {
for (x = 0; x < s->mb_width * bw; x++) { for (x = 0; x < s->mb_width * bw; x++) {
int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16; int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
int v = s->dsp.frame_skip_cmp[1](s, p->f.data[plane] + 8*(x + y*stride)+off, ref->f.data[plane] + 8*(x + y*stride), stride, 8); uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
switch (s->avctx->frame_skip_exp) { switch (s->avctx->frame_skip_exp) {
case 0: score = FFMAX(score, v); break; case 0: score = FFMAX(score, v); break;
@ -953,7 +1087,8 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
} }
} }
if(score) score64= score; if (score)
score64 = score;
if (score64 < s->avctx->frame_skip_threshold) if (score64 < s->avctx->frame_skip_threshold)
return 1; return 1;
@ -962,7 +1097,8 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
return 0; return 0;
} }
static int estimate_best_b_count(MpegEncContext *s){ static int estimate_best_b_count(MpegEncContext *s)
{
AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id); AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
AVCodecContext *c = avcodec_alloc_context3(NULL); AVCodecContext *c = avcodec_alloc_context3(NULL);
AVFrame input[FF_MAX_B_FRAMES + 2]; AVFrame input[FF_MAX_B_FRAMES + 2];
@ -976,14 +1112,19 @@ static int estimate_best_b_count(MpegEncContext *s){
assert(scale >= 0 && scale <= 3); assert(scale >= 0 && scale <= 3);
//emms_c(); //emms_c();
p_lambda= s->last_lambda_for[AV_PICTURE_TYPE_P]; //s->next_picture_ptr->quality; //s->next_picture_ptr->quality;
b_lambda= s->last_lambda_for[AV_PICTURE_TYPE_B]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset; p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT; b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
if (!b_lambda) // FIXME we should do this somewhere else
b_lambda = p_lambda;
lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
FF_LAMBDA_SHIFT;
c->width = s->width >> scale; c->width = s->width >> scale;
c->height = s->height >> scale; c->height = s->height >> scale;
c->flags= CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR | CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/; c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
c->flags |= s->avctx->flags & CODEC_FLAG_QPEL; c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
c->mb_decision = s->avctx->mb_decision; c->mb_decision = s->avctx->mb_decision;
c->me_cmp = s->avctx->me_cmp; c->me_cmp = s->avctx->me_cmp;