ffmpeg: use intermediary variables in reap_filters, increase readability
This commit is contained in:
parent
68c5ba1f05
commit
9651239f67
23
ffmpeg.c
23
ffmpeg.c
@ -1084,10 +1084,13 @@ static int reap_filters(void)
|
|||||||
for (i = 0; i < nb_output_streams; i++) {
|
for (i = 0; i < nb_output_streams; i++) {
|
||||||
OutputStream *ost = output_streams[i];
|
OutputStream *ost = output_streams[i];
|
||||||
OutputFile *of = output_files[ost->file_index];
|
OutputFile *of = output_files[ost->file_index];
|
||||||
|
AVFilterContext *filter;
|
||||||
|
AVCodecContext *enc = ost->st->codec;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!ost->filter)
|
if (!ost->filter)
|
||||||
continue;
|
continue;
|
||||||
|
filter = ost->filter->filter;
|
||||||
|
|
||||||
if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
|
if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@ -1095,7 +1098,7 @@ static int reap_filters(void)
|
|||||||
filtered_frame = ost->filtered_frame;
|
filtered_frame = ost->filtered_frame;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
ret = av_buffersink_get_frame_flags(ost->filter->filter, filtered_frame,
|
ret = av_buffersink_get_frame_flags(filter, filtered_frame,
|
||||||
AV_BUFFERSINK_FLAG_NO_REQUEST);
|
AV_BUFFERSINK_FLAG_NO_REQUEST);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
|
if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
|
||||||
@ -1111,29 +1114,25 @@ static int reap_filters(void)
|
|||||||
frame_pts = AV_NOPTS_VALUE;
|
frame_pts = AV_NOPTS_VALUE;
|
||||||
if (filtered_frame->pts != AV_NOPTS_VALUE) {
|
if (filtered_frame->pts != AV_NOPTS_VALUE) {
|
||||||
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
|
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
|
||||||
filtered_frame->pts = frame_pts = av_rescale_q(filtered_frame->pts,
|
filtered_frame->pts = frame_pts =
|
||||||
ost->filter->filter->inputs[0]->time_base,
|
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
|
||||||
ost->st->codec->time_base) -
|
av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
|
||||||
av_rescale_q(start_time,
|
|
||||||
AV_TIME_BASE_Q,
|
|
||||||
ost->st->codec->time_base);
|
|
||||||
}
|
}
|
||||||
//if (ost->source_index >= 0)
|
//if (ost->source_index >= 0)
|
||||||
// *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
|
// *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
|
||||||
|
|
||||||
|
switch (filter->inputs[0]->type) {
|
||||||
switch (ost->filter->filter->inputs[0]->type) {
|
|
||||||
case AVMEDIA_TYPE_VIDEO:
|
case AVMEDIA_TYPE_VIDEO:
|
||||||
filtered_frame->pts = frame_pts;
|
filtered_frame->pts = frame_pts;
|
||||||
if (!ost->frame_aspect_ratio.num)
|
if (!ost->frame_aspect_ratio.num)
|
||||||
ost->st->codec->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
|
enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
|
||||||
|
|
||||||
do_video_out(of->ctx, ost, filtered_frame);
|
do_video_out(of->ctx, ost, filtered_frame);
|
||||||
break;
|
break;
|
||||||
case AVMEDIA_TYPE_AUDIO:
|
case AVMEDIA_TYPE_AUDIO:
|
||||||
filtered_frame->pts = frame_pts;
|
filtered_frame->pts = frame_pts;
|
||||||
if (!(ost->st->codec->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
|
if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
|
||||||
ost->st->codec->channels != av_frame_get_channels(filtered_frame)) {
|
enc->channels != av_frame_get_channels(filtered_frame)) {
|
||||||
av_log(NULL, AV_LOG_ERROR,
|
av_log(NULL, AV_LOG_ERROR,
|
||||||
"Audio filter graph output is not normalized and encoder does not support parameter changes\n");
|
"Audio filter graph output is not normalized and encoder does not support parameter changes\n");
|
||||||
break;
|
break;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user