examples: demuxing: simplify audio output
There is no reason why this should copy the audio data in a very complicated way. Also, strictly write the first plane, instead of writing the whole buffer. This is more helpful in context of the example. This way a user can clearly confirm that it works by playing the written data as raw audio.
This commit is contained in:
@@ -47,10 +47,6 @@ static uint8_t *video_dst_data[4] = {NULL};
|
|||||||
static int video_dst_linesize[4];
|
static int video_dst_linesize[4];
|
||||||
static int video_dst_bufsize;
|
static int video_dst_bufsize;
|
||||||
|
|
||||||
static uint8_t **audio_dst_data = NULL;
|
|
||||||
static int audio_dst_linesize;
|
|
||||||
static int audio_dst_bufsize;
|
|
||||||
|
|
||||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||||
static AVFrame *frame = NULL;
|
static AVFrame *frame = NULL;
|
||||||
static AVPacket pkt;
|
static AVPacket pkt;
|
||||||
@@ -99,31 +95,21 @@ static int decode_packet(int *got_frame, int cached)
|
|||||||
decoded = FFMIN(ret, pkt.size);
|
decoded = FFMIN(ret, pkt.size);
|
||||||
|
|
||||||
if (*got_frame) {
|
if (*got_frame) {
|
||||||
|
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||||
cached ? "(cached)" : "",
|
cached ? "(cached)" : "",
|
||||||
audio_frame_count++, frame->nb_samples,
|
audio_frame_count++, frame->nb_samples,
|
||||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||||
|
|
||||||
ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, av_frame_get_channels(frame),
|
/* Write the raw audio data samples of the first plane. This works
|
||||||
frame->nb_samples, frame->format, 1);
|
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||||
if (ret < 0) {
|
* most audio decoders output planar audio, which uses a separate
|
||||||
fprintf(stderr, "Could not allocate audio buffer\n");
|
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||||
return AVERROR(ENOMEM);
|
* In other words, this code will write only the first audio channel
|
||||||
}
|
* in these cases.
|
||||||
|
* You should use libswresample or libavfilter to convert the frame
|
||||||
/* TODO: extend return code of the av_samples_* functions so that this call is not needed */
|
* to packed data. */
|
||||||
audio_dst_bufsize =
|
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||||
av_samples_get_buffer_size(NULL, av_frame_get_channels(frame),
|
|
||||||
frame->nb_samples, frame->format, 1);
|
|
||||||
|
|
||||||
/* copy audio data to destination buffer:
|
|
||||||
* this is required since rawaudio expects non aligned data */
|
|
||||||
av_samples_copy(audio_dst_data, frame->data, 0, 0,
|
|
||||||
frame->nb_samples, av_frame_get_channels(frame), frame->format);
|
|
||||||
|
|
||||||
/* write to rawaudio file */
|
|
||||||
fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file);
|
|
||||||
av_freep(&audio_dst_data[0]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -250,8 +236,6 @@ int main (int argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
||||||
int nb_planes;
|
|
||||||
|
|
||||||
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
||||||
audio_dec_ctx = audio_stream->codec;
|
audio_dec_ctx = audio_stream->codec;
|
||||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||||
@@ -260,15 +244,6 @@ int main (int argc, char **argv)
|
|||||||
ret = 1;
|
ret = 1;
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
|
|
||||||
audio_dec_ctx->channels : 1;
|
|
||||||
audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
|
|
||||||
if (!audio_dst_data) {
|
|
||||||
fprintf(stderr, "Could not allocate audio data buffers\n");
|
|
||||||
ret = AVERROR(ENOMEM);
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* dump input information to stderr */
|
/* dump input information to stderr */
|
||||||
@@ -349,7 +324,6 @@ end:
|
|||||||
fclose(audio_dst_file);
|
fclose(audio_dst_file);
|
||||||
av_free(frame);
|
av_free(frame);
|
||||||
av_free(video_dst_data[0]);
|
av_free(video_dst_data[0]);
|
||||||
av_free(audio_dst_data);
|
|
||||||
|
|
||||||
return ret < 0;
|
return ret < 0;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user