Merge remote-tracking branch 'qatar/master'
* qatar/master: lavf: don't guess r_frame_rate from either stream or codec timebase. avconv: set discard on input streams automatically. Fix parser not to clobber has_b_frames when extradata is set. lavf: don't set codec timebase in avformat_find_stream_info(). avconv: saner output video timebase. rawdec: set timebase to 1/fps. avconv: refactor vsync code. FATE: remove a bunch of useless -vsync 0 cdxl: bit line plane arrangement support cdxl: remove early check for bpp cdxl: set pix_fmt PAL8 only if palette is available Conflicts: ffmpeg.c libavcodec/h264_parser.c libavformat/rawdec.c tests/fate/demux.mak tests/fate/ea.mak tests/fate/h264.mak tests/fate/prores.mak tests/fate/video.mak tests/ref/fate/bethsoft-vid tests/ref/fate/creatureshock-avs tests/ref/fate/ea-cmv tests/ref/fate/interplay-mve-16bit tests/ref/fate/interplay-mve-8bit tests/ref/fate/nuv tests/ref/fate/prores-alpha tests/ref/fate/qtrle-16bit tests/ref/fate/qtrle-1bit tests/ref/fate/real-rv40 tests/ref/fate/rpza tests/ref/fate/wmv8-drm Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
574dcb5baf
@ -279,7 +279,8 @@ attachments.
|
|||||||
@item -vframes @var{number} (@emph{output})
|
@item -vframes @var{number} (@emph{output})
|
||||||
Set the number of video frames to record. This is an alias for @code{-frames:v}.
|
Set the number of video frames to record. This is an alias for @code{-frames:v}.
|
||||||
@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream})
|
@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream})
|
||||||
Set frame rate (Hz value, fraction or abbreviation), (default = 25).
|
Set frame rate (Hz value, fraction or abbreviation), (default = 25). For output
|
||||||
|
streams implies @code{-vsync cfr}.
|
||||||
@item -s[:@var{stream_specifier}] @var{size} (@emph{input/output,per-stream})
|
@item -s[:@var{stream_specifier}] @var{size} (@emph{input/output,per-stream})
|
||||||
Set frame size. The format is @samp{wxh} (default - same as source).
|
Set frame size. The format is @samp{wxh} (default - same as source).
|
||||||
The following abbreviations are recognized:
|
The following abbreviations are recognized:
|
||||||
|
58
ffmpeg.c
58
ffmpeg.c
@ -1506,7 +1506,7 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost,
|
|||||||
int nb_frames, i, ret, format_video_sync;
|
int nb_frames, i, ret, format_video_sync;
|
||||||
AVFrame *final_picture;
|
AVFrame *final_picture;
|
||||||
AVCodecContext *enc;
|
AVCodecContext *enc;
|
||||||
double sync_ipts;
|
double sync_ipts, delta;
|
||||||
double duration = 0;
|
double duration = 0;
|
||||||
int frame_size = 0;
|
int frame_size = 0;
|
||||||
float quality = same_quant ? in_picture->quality
|
float quality = same_quant ? in_picture->quality
|
||||||
@ -1523,6 +1523,7 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost,
|
|||||||
}
|
}
|
||||||
|
|
||||||
sync_ipts = get_sync_ipts(ost, in_picture->pts) / av_q2d(enc->time_base);
|
sync_ipts = get_sync_ipts(ost, in_picture->pts) / av_q2d(enc->time_base);
|
||||||
|
delta = sync_ipts - ost->sync_opts + duration;
|
||||||
|
|
||||||
/* by default, we output a single frame */
|
/* by default, we output a single frame */
|
||||||
nb_frames = 1;
|
nb_frames = 1;
|
||||||
@ -1531,31 +1532,37 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost,
|
|||||||
if (format_video_sync == VSYNC_AUTO)
|
if (format_video_sync == VSYNC_AUTO)
|
||||||
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : 1;
|
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : 1;
|
||||||
|
|
||||||
if (format_video_sync != VSYNC_PASSTHROUGH && format_video_sync != VSYNC_DROP) {
|
switch (format_video_sync) {
|
||||||
double vdelta = sync_ipts - ost->sync_opts + duration;
|
case VSYNC_CFR:
|
||||||
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
|
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
|
||||||
if (vdelta < -1.1)
|
if (delta < -1.1)
|
||||||
nb_frames = 0;
|
nb_frames = 0;
|
||||||
else if (format_video_sync == VSYNC_VFR) {
|
else if (delta > 1.1)
|
||||||
if (vdelta <= -0.6) {
|
nb_frames = lrintf(delta);
|
||||||
|
break;
|
||||||
|
case VSYNC_VFR:
|
||||||
|
if (delta <= -0.6)
|
||||||
nb_frames = 0;
|
nb_frames = 0;
|
||||||
} else if (vdelta > 0.6)
|
else if (delta > 0.6)
|
||||||
ost->sync_opts = lrintf(sync_ipts);
|
ost->sync_opts = lrintf(sync_ipts);
|
||||||
} else if (vdelta > 1.1)
|
break;
|
||||||
nb_frames = lrintf(vdelta);
|
case VSYNC_DROP:
|
||||||
|
case VSYNC_PASSTHROUGH:
|
||||||
|
ost->sync_opts = lrintf(sync_ipts);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
av_assert0(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
|
||||||
if (nb_frames == 0) {
|
if (nb_frames == 0) {
|
||||||
++nb_frames_drop;
|
nb_frames_drop++;
|
||||||
av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
|
av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
|
||||||
|
return;
|
||||||
} else if (nb_frames > 1) {
|
} else if (nb_frames > 1) {
|
||||||
nb_frames_dup += nb_frames - 1;
|
nb_frames_dup += nb_frames - 1;
|
||||||
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
|
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
|
||||||
}
|
}
|
||||||
} else
|
|
||||||
ost->sync_opts = lrintf(sync_ipts);
|
|
||||||
|
|
||||||
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
|
|
||||||
if (nb_frames <= 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
do_video_resample(ost, ist, in_picture, &final_picture);
|
do_video_resample(ost, ist, in_picture, &final_picture);
|
||||||
|
|
||||||
@ -3523,6 +3530,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
|||||||
ist->st = st;
|
ist->st = st;
|
||||||
ist->file_index = nb_input_files;
|
ist->file_index = nb_input_files;
|
||||||
ist->discard = 1;
|
ist->discard = 1;
|
||||||
|
st->discard = AVDISCARD_ALL;
|
||||||
ist->opts = filter_codec_opts(codec_opts, choose_decoder(o, ic, st), ic, st);
|
ist->opts = filter_codec_opts(codec_opts, choose_decoder(o, ic, st), ic, st);
|
||||||
|
|
||||||
ist->ts_scale = 1.0;
|
ist->ts_scale = 1.0;
|
||||||
@ -3539,12 +3547,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
|||||||
ist->dec = choose_decoder(o, ic, st);
|
ist->dec = choose_decoder(o, ic, st);
|
||||||
|
|
||||||
switch (dec->codec_type) {
|
switch (dec->codec_type) {
|
||||||
case AVMEDIA_TYPE_AUDIO:
|
|
||||||
if (!ist->dec)
|
|
||||||
ist->dec = avcodec_find_decoder(dec->codec_id);
|
|
||||||
if (o->audio_disable)
|
|
||||||
st->discard = AVDISCARD_ALL;
|
|
||||||
break;
|
|
||||||
case AVMEDIA_TYPE_VIDEO:
|
case AVMEDIA_TYPE_VIDEO:
|
||||||
if(!ist->dec)
|
if(!ist->dec)
|
||||||
ist->dec = avcodec_find_decoder(dec->codec_id);
|
ist->dec = avcodec_find_decoder(dec->codec_id);
|
||||||
@ -3552,20 +3554,12 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
|||||||
dec->flags |= CODEC_FLAG_EMU_EDGE;
|
dec->flags |= CODEC_FLAG_EMU_EDGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (o->video_disable)
|
|
||||||
st->discard = AVDISCARD_ALL;
|
|
||||||
else if (video_discard)
|
|
||||||
st->discard = video_discard;
|
|
||||||
break;
|
break;
|
||||||
|
case AVMEDIA_TYPE_AUDIO:
|
||||||
case AVMEDIA_TYPE_DATA:
|
case AVMEDIA_TYPE_DATA:
|
||||||
if (o->data_disable)
|
|
||||||
st->discard= AVDISCARD_ALL;
|
|
||||||
break;
|
|
||||||
case AVMEDIA_TYPE_SUBTITLE:
|
case AVMEDIA_TYPE_SUBTITLE:
|
||||||
if(!ist->dec)
|
if(!ist->dec)
|
||||||
ist->dec = avcodec_find_decoder(dec->codec_id);
|
ist->dec = avcodec_find_decoder(dec->codec_id);
|
||||||
if(o->subtitle_disable)
|
|
||||||
st->discard = AVDISCARD_ALL;
|
|
||||||
break;
|
break;
|
||||||
case AVMEDIA_TYPE_ATTACHMENT:
|
case AVMEDIA_TYPE_ATTACHMENT:
|
||||||
case AVMEDIA_TYPE_UNKNOWN:
|
case AVMEDIA_TYPE_UNKNOWN:
|
||||||
@ -4328,6 +4322,7 @@ static void opt_output_file(void *optctx, const char *filename)
|
|||||||
ost->source_index = index;\
|
ost->source_index = index;\
|
||||||
ost->sync_ist = &input_streams[index];\
|
ost->sync_ist = &input_streams[index];\
|
||||||
input_streams[index].discard = 0;\
|
input_streams[index].discard = 0;\
|
||||||
|
input_streams[index].st->discard = AVDISCARD_NONE;\
|
||||||
}
|
}
|
||||||
|
|
||||||
/* video: highest resolution */
|
/* video: highest resolution */
|
||||||
@ -4400,6 +4395,7 @@ static void opt_output_file(void *optctx, const char *filename)
|
|||||||
ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index +
|
ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index +
|
||||||
map->sync_stream_index];
|
map->sync_stream_index];
|
||||||
ist->discard = 0;
|
ist->discard = 0;
|
||||||
|
ist->st->discard = AVDISCARD_NONE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,10 +24,18 @@
|
|||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
|
|
||||||
|
#define BIT_PLANAR 0x00
|
||||||
|
#define BYTE_PLANAR 0x20
|
||||||
|
#define CHUNKY 0x40
|
||||||
|
#define BIT_LINE 0x80
|
||||||
|
#define BYTE_LINE 0xC0
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
AVFrame frame;
|
AVFrame frame;
|
||||||
int bpp;
|
int bpp;
|
||||||
|
int format;
|
||||||
|
int padded_bits;
|
||||||
const uint8_t *palette;
|
const uint8_t *palette;
|
||||||
int palette_size;
|
int palette_size;
|
||||||
const uint8_t *video;
|
const uint8_t *video;
|
||||||
@ -62,27 +70,54 @@ static void import_palette(CDXLVideoContext *c, uint32_t *new_palette)
|
|||||||
|
|
||||||
static void bitplanar2chunky(CDXLVideoContext *c, int linesize, uint8_t *out)
|
static void bitplanar2chunky(CDXLVideoContext *c, int linesize, uint8_t *out)
|
||||||
{
|
{
|
||||||
int skip = FFALIGN(c->avctx->width, 16) - c->avctx->width;
|
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int x, y, plane;
|
int x, y, plane;
|
||||||
|
|
||||||
init_get_bits(&gb, c->video, c->video_size * 8);
|
init_get_bits(&gb, c->video, c->video_size * 8);
|
||||||
memset(out, 0, linesize * c->avctx->height);
|
|
||||||
for (plane = 0; plane < c->bpp; plane++) {
|
for (plane = 0; plane < c->bpp; plane++) {
|
||||||
for (y = 0; y < c->avctx->height; y++) {
|
for (y = 0; y < c->avctx->height; y++) {
|
||||||
for (x = 0; x < c->avctx->width; x++)
|
for (x = 0; x < c->avctx->width; x++)
|
||||||
out[linesize * y + x] |= get_bits1(&gb) << plane;
|
out[linesize * y + x] |= get_bits1(&gb) << plane;
|
||||||
skip_bits(&gb, skip);
|
skip_bits(&gb, c->padded_bits);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void bitline2chunky(CDXLVideoContext *c, int linesize, uint8_t *out)
|
||||||
|
{
|
||||||
|
GetBitContext gb;
|
||||||
|
int x, y, plane;
|
||||||
|
|
||||||
|
init_get_bits(&gb, c->video, c->video_size * 8);
|
||||||
|
for (y = 0; y < c->avctx->height; y++) {
|
||||||
|
for (plane = 0; plane < c->bpp; plane++) {
|
||||||
|
for (x = 0; x < c->avctx->width; x++)
|
||||||
|
out[linesize * y + x] |= get_bits1(&gb) << plane;
|
||||||
|
skip_bits(&gb, c->padded_bits);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void import_format(CDXLVideoContext *c, int linesize, uint8_t *out)
|
||||||
|
{
|
||||||
|
memset(out, 0, linesize * c->avctx->height);
|
||||||
|
|
||||||
|
switch (c->format) {
|
||||||
|
case BIT_PLANAR:
|
||||||
|
bitplanar2chunky(c, linesize, out);
|
||||||
|
break;
|
||||||
|
case BIT_LINE:
|
||||||
|
bitline2chunky(c, linesize, out);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void cdxl_decode_rgb(CDXLVideoContext *c)
|
static void cdxl_decode_rgb(CDXLVideoContext *c)
|
||||||
{
|
{
|
||||||
uint32_t *new_palette = (uint32_t *)c->frame.data[1];
|
uint32_t *new_palette = (uint32_t *)c->frame.data[1];
|
||||||
|
|
||||||
import_palette(c, new_palette);
|
import_palette(c, new_palette);
|
||||||
bitplanar2chunky(c, c->frame.linesize[0], c->frame.data[0]);
|
import_format(c, c->frame.linesize[0], c->frame.data[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cdxl_decode_ham6(CDXLVideoContext *c)
|
static void cdxl_decode_ham6(CDXLVideoContext *c)
|
||||||
@ -96,7 +131,7 @@ static void cdxl_decode_ham6(CDXLVideoContext *c)
|
|||||||
out = c->frame.data[0];
|
out = c->frame.data[0];
|
||||||
|
|
||||||
import_palette(c, new_palette);
|
import_palette(c, new_palette);
|
||||||
bitplanar2chunky(c, avctx->width, c->new_video);
|
import_format(c, avctx->width, c->new_video);
|
||||||
|
|
||||||
for (y = 0; y < avctx->height; y++) {
|
for (y = 0; y < avctx->height; y++) {
|
||||||
r = new_palette[0] & 0xFF0000;
|
r = new_palette[0] & 0xFF0000;
|
||||||
@ -139,7 +174,7 @@ static void cdxl_decode_ham8(CDXLVideoContext *c)
|
|||||||
out = c->frame.data[0];
|
out = c->frame.data[0];
|
||||||
|
|
||||||
import_palette(c, new_palette);
|
import_palette(c, new_palette);
|
||||||
bitplanar2chunky(c, avctx->width, c->new_video);
|
import_format(c, avctx->width, c->new_video);
|
||||||
|
|
||||||
for (y = 0; y < avctx->height; y++) {
|
for (y = 0; y < avctx->height; y++) {
|
||||||
r = new_palette[0] & 0xFF0000;
|
r = new_palette[0] & 0xFF0000;
|
||||||
@ -176,13 +211,13 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
{
|
{
|
||||||
CDXLVideoContext *c = avctx->priv_data;
|
CDXLVideoContext *c = avctx->priv_data;
|
||||||
AVFrame * const p = &c->frame;
|
AVFrame * const p = &c->frame;
|
||||||
int ret, w, h, encoding, format, buf_size = pkt->size;
|
int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
|
||||||
const uint8_t *buf = pkt->data;
|
const uint8_t *buf = pkt->data;
|
||||||
|
|
||||||
if (buf_size < 32)
|
if (buf_size < 32)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
encoding = buf[1] & 7;
|
encoding = buf[1] & 7;
|
||||||
format = buf[1] & 0xE0;
|
c->format = buf[1] & 0xE0;
|
||||||
w = AV_RB16(&buf[14]);
|
w = AV_RB16(&buf[14]);
|
||||||
h = AV_RB16(&buf[16]);
|
h = AV_RB16(&buf[16]);
|
||||||
c->bpp = buf[19];
|
c->bpp = buf[19];
|
||||||
@ -197,12 +232,8 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
if (c->bpp < 1)
|
if (c->bpp < 1)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
if (c->bpp > 8) {
|
if (c->format != BIT_PLANAR && c->format != BIT_LINE) {
|
||||||
av_log_ask_for_sample(avctx, "unsupported pixel size: %d\n", c->bpp);
|
av_log_ask_for_sample(avctx, "unsupported pixel format: 0x%0x\n", c->format);
|
||||||
return AVERROR_PATCHWELCOME;
|
|
||||||
}
|
|
||||||
if (format) {
|
|
||||||
av_log_ask_for_sample(avctx, "unsupported pixel format: %d\n", format);
|
|
||||||
return AVERROR_PATCHWELCOME;
|
return AVERROR_PATCHWELCOME;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -211,9 +242,11 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (w != avctx->width || h != avctx->height)
|
if (w != avctx->width || h != avctx->height)
|
||||||
avcodec_set_dimensions(avctx, w, h);
|
avcodec_set_dimensions(avctx, w, h);
|
||||||
|
|
||||||
if (c->video_size < FFALIGN(avctx->width, 16) * avctx->height * c->bpp / 8)
|
aligned_width = FFALIGN(c->avctx->width, 16);
|
||||||
|
c->padded_bits = aligned_width - c->avctx->width;
|
||||||
|
if (c->video_size < aligned_width * avctx->height * c->bpp / 8)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
if (encoding == 0) {
|
if (!encoding && c->palette_size && c->bpp <= 8) {
|
||||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||||
} else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) {
|
} else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) {
|
||||||
if (c->palette_size != (1 << (c->bpp - 1)))
|
if (c->palette_size != (1 << (c->bpp - 1)))
|
||||||
|
@ -375,7 +375,7 @@ fate-h264-conformance-sva_fm1_e: CMD = framecrc -vsync drop -i $(SAMPLES)/h264-c
|
|||||||
fate-h264-conformance-sva_nl1_b: CMD = framecrc -vsync drop -i $(SAMPLES)/h264-conformance/SVA_NL1_B.264
|
fate-h264-conformance-sva_nl1_b: CMD = framecrc -vsync drop -i $(SAMPLES)/h264-conformance/SVA_NL1_B.264
|
||||||
fate-h264-conformance-sva_nl2_e: CMD = framecrc -vsync drop -i $(SAMPLES)/h264-conformance/SVA_NL2_E.264
|
fate-h264-conformance-sva_nl2_e: CMD = framecrc -vsync drop -i $(SAMPLES)/h264-conformance/SVA_NL2_E.264
|
||||||
|
|
||||||
fate-h264-interlace-crop: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264/interlaced_crop.mp4 -vframes 3
|
fate-h264-interlace-crop: CMD = framecrc -i $(SAMPLES)/h264/interlaced_crop.mp4 -vframes 3
|
||||||
fate-h264-lossless: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264/lossless.h264
|
fate-h264-lossless: CMD = framecrc -i $(SAMPLES)/h264/lossless.h264
|
||||||
fate-h264-extreme-plane-pred: CMD = framemd5 -vsync 0 -i $(SAMPLES)/h264/extreme-plane-pred.h264
|
fate-h264-extreme-plane-pred: CMD = framemd5 -i $(SAMPLES)/h264/extreme-plane-pred.h264
|
||||||
fate-h264-bsf-mp4toannexb: CMD = md5 -i $(SAMPLES)/h264/interlaced_crop.mp4 -vcodec copy -bsf h264_mp4toannexb -f h264
|
fate-h264-bsf-mp4toannexb: CMD = md5 -i $(SAMPLES)/h264/interlaced_crop.mp4 -vcodec copy -bsf h264_mp4toannexb -f h264
|
||||||
|
Loading…
x
Reference in New Issue
Block a user