Add support for WAV output in audioproc

The default output is a WAV file, except if the --pcm_output flag is set.

BUG=webrtc:3359
R=bjornv@webrtc.org, kwiberg@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/18359004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@7069 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
aluebs@webrtc.org 2014-09-04 18:12:00 +00:00
parent 52055a276d
commit 021e76fd39
3 changed files with 147 additions and 121 deletions

View File

@ -59,7 +59,7 @@ void usage() {
"when -ir or -i is used, the specified files will be processed directly in\n"
"a simulation mode. Otherwise the full set of legacy test files is expected\n"
"to be present in the working directory. OUT_FILE should be specified\n"
"without extension to support both int and float output.\n\n");
"without extension to support both raw and wav output.\n\n");
printf("Options\n");
printf("General configuration (only used for the simulation mode):\n");
printf(" -fs SAMPLE_RATE_HZ\n");
@ -112,6 +112,7 @@ void usage() {
printf(" --perf Measure performance.\n");
printf(" --quiet Suppress text output.\n");
printf(" --no_progress Suppress progress.\n");
printf(" --raw_output Raw output instead of WAV file.\n");
printf(" --debug_file FILE Dump a debug recording.\n");
}
@ -167,6 +168,7 @@ void void_main(int argc, char* argv[]) {
bool perf_testing = false;
bool verbose = true;
bool progress = true;
bool raw_output = false;
int extra_delay_ms = 0;
int override_delay_ms = 0;
@ -427,6 +429,9 @@ void void_main(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--no_progress") == 0) {
progress = false;
} else if (strcmp(argv[i], "--raw_output") == 0) {
raw_output = true;
} else if (strcmp(argv[i], "--debug_file") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify filename after --debug_file";
@ -464,8 +469,6 @@ void void_main(int argc, char* argv[]) {
if (out_filename.size() == 0) {
out_filename = out_path + "out";
}
std::string out_float_filename = out_filename + ".float";
out_filename += ".pcm";
if (!vad_out_filename) {
vad_out_filename = vad_file_default.c_str();
@ -486,6 +489,9 @@ void void_main(int argc, char* argv[]) {
FILE* aecm_echo_path_in_file = NULL;
FILE* aecm_echo_path_out_file = NULL;
scoped_ptr<WavFile> output_wav_file;
scoped_ptr<RawFile> output_raw_file;
if (pb_filename) {
pb_file = OpenFile(pb_filename, "rb");
} else {
@ -628,6 +634,14 @@ void void_main(int argc, char* argv[]) {
printf(" Reverse channels: %d\n", msg.num_reverse_channels());
}
if (!raw_output) {
// The WAV file needs to be reset every time, because it cant change
// it's sample rate or number of channels.
output_wav_file.reset(new WavFile(out_filename + ".wav",
output_sample_rate,
msg.num_output_channels()));
}
} else if (event_msg.type() == Event::REVERSE_STREAM) {
ASSERT_TRUE(event_msg.has_reverse_stream());
ReverseStream msg = event_msg.reverse_stream();
@ -772,20 +786,24 @@ void void_main(int argc, char* argv[]) {
}
}
size_t num_samples =
apm->num_output_channels() * output_sample_rate / 100;
const size_t samples_per_channel = output_sample_rate / 100;
if (msg.has_input_data()) {
static FILE* out_file = OpenFile(out_filename, "wb");
ASSERT_EQ(num_samples, fwrite(near_frame.data_,
sizeof(*near_frame.data_),
num_samples,
out_file));
if (raw_output && !output_raw_file) {
output_raw_file.reset(new RawFile(out_filename + ".pcm"));
}
WriteIntData(near_frame.data_,
apm->num_output_channels() * samples_per_channel,
output_wav_file.get(),
output_raw_file.get());
} else {
static FILE* out_float_file = OpenFile(out_float_filename, "wb");
ASSERT_EQ(num_samples, fwrite(primary_cb->data(),
sizeof(*primary_cb->data()),
num_samples,
out_float_file));
if (raw_output && !output_raw_file) {
output_raw_file.reset(new RawFile(out_filename + ".float"));
}
WriteFloatData(primary_cb->channels(),
samples_per_channel,
apm->num_output_channels(),
output_wav_file.get(),
output_raw_file.get());
}
}
}
@ -855,6 +873,14 @@ void void_main(int argc, char* argv[]) {
near_frame.sample_rate_hz_ = sample_rate_hz;
near_frame.samples_per_channel_ = samples_per_channel;
if (!raw_output) {
// The WAV file needs to be reset every time, because it cant change
// it's sample rate or number of channels.
output_wav_file.reset(new WavFile(out_filename + ".wav",
sample_rate_hz,
num_capture_output_channels));
}
if (verbose) {
printf("Init at frame: %d (primary), %d (reverse)\n",
primary_count, reverse_count);
@ -999,12 +1025,13 @@ void void_main(int argc, char* argv[]) {
}
}
size = samples_per_channel * near_frame.num_channels_;
static FILE* out_file = OpenFile(out_filename, "wb");
ASSERT_EQ(size, fwrite(near_frame.data_,
sizeof(int16_t),
size,
out_file));
if (raw_output && !output_raw_file) {
output_raw_file.reset(new RawFile(out_filename + ".pcm"));
}
WriteIntData(near_frame.data_,
size,
output_wav_file.get(),
output_raw_file.get());
}
else {
FAIL() << "Event " << event << " is unrecognized";

View File

@ -8,7 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <limits>
#include "webrtc/audio_processing/debug.pb.h"
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/wav_writer.h"
#include "webrtc/modules/audio_processing/common.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/interface/module_common_types.h"
@ -19,6 +23,64 @@ namespace webrtc {
static const AudioProcessing::Error kNoErr = AudioProcessing::kNoError;
#define EXPECT_NOERR(expr) EXPECT_EQ(kNoErr, (expr))
class RawFile {
public:
RawFile(const std::string& filename)
: file_handle_(fopen(filename.c_str(), "wb")) {}
~RawFile() {
fclose(file_handle_);
}
void WriteSamples(const int16_t* samples, size_t num_samples) {
#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
#error "Need to convert samples to little-endian when writing to PCM file"
#endif
fwrite(samples, sizeof(*samples), num_samples, file_handle_);
}
void WriteSamples(const float* samples, size_t num_samples) {
fwrite(samples, sizeof(*samples), num_samples, file_handle_);
}
private:
FILE* file_handle_;
};
static inline void WriteIntData(const int16_t* data,
size_t length,
WavFile* wav_file,
RawFile* raw_file) {
if (wav_file) {
wav_file->WriteSamples(data, length);
}
if (raw_file) {
raw_file->WriteSamples(data, length);
}
}
static inline void WriteFloatData(const float* const* data,
size_t samples_per_channel,
int num_channels,
WavFile* wav_file,
RawFile* raw_file) {
size_t length = num_channels * samples_per_channel;
scoped_ptr<float[]> buffer(new float[length]);
Interleave(data, samples_per_channel, num_channels, buffer.get());
if (raw_file) {
raw_file->WriteSamples(buffer.get(), length);
}
// TODO(aluebs): Use ScaleToInt16Range() from audio_util
for (size_t i = 0; i < length; ++i) {
buffer[i] = buffer[i] > 0 ?
buffer[i] * std::numeric_limits<int16_t>::max() :
-buffer[i] * std::numeric_limits<int16_t>::min();
}
if (wav_file) {
wav_file->WriteSamples(buffer.get(), length);
}
}
// Exits on failure; do not use in unit tests.
static inline FILE* OpenFile(const std::string& filename, const char* mode) {
FILE* file = fopen(filename.c_str(), mode);

View File

@ -14,28 +14,19 @@
// to unpack the file into its component parts: audio and other data.
#include <stdio.h>
#include <limits>
#include "gflags/gflags.h"
#include "webrtc/audio_processing/debug.pb.h"
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/wav_writer.h"
#include "webrtc/modules/audio_processing/test/test_utils.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/typedefs.h"
// TODO(andrew): unpack more of the data.
DEFINE_string(input_file, "input.pcm", "The name of the input stream file.");
DEFINE_string(input_wav_file, "input.wav",
"The name of the WAV input stream file.");
DEFINE_string(output_file, "ref_out.pcm",
DEFINE_string(input_file, "input", "The name of the input stream file.");
DEFINE_string(output_file, "ref_out",
"The name of the reference output stream file.");
DEFINE_string(output_wav_file, "ref_out.wav",
"The name of the WAV reference output stream file.");
DEFINE_string(reverse_file, "reverse.pcm",
DEFINE_string(reverse_file, "reverse",
"The name of the reverse input stream file.");
DEFINE_string(reverse_wav_file, "reverse.wav",
"The name of the WAV reverse input stream file.");
DEFINE_string(delay_file, "delay.int32", "The name of the delay file.");
DEFINE_string(drift_file, "drift.int32", "The name of the drift file.");
DEFINE_string(level_file, "level.int32", "The name of the level file.");
@ -43,7 +34,7 @@ DEFINE_string(keypress_file, "keypress.bool", "The name of the keypress file.");
DEFINE_string(settings_file, "settings.txt", "The name of the settings file.");
DEFINE_bool(full, false,
"Unpack the full set of files (normally not needed).");
DEFINE_bool(pcm, false, "Write to PCM instead of WAV file.");
DEFINE_bool(raw, false, "Write raw data instead of a WAV file.");
namespace webrtc {
@ -52,36 +43,6 @@ using audioproc::ReverseStream;
using audioproc::Stream;
using audioproc::Init;
class PcmFile {
public:
PcmFile(const std::string& filename)
: file_handle_(fopen(filename.c_str(), "wb")) {}
~PcmFile() {
fclose(file_handle_);
}
void WriteSamples(const int16_t* samples, size_t num_samples) {
#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
#error "Need to convert samples to little-endian when writing to PCM file"
#endif
fwrite(samples, sizeof(*samples), num_samples, file_handle_);
}
void WriteSamples(const float* samples, size_t num_samples) {
static const size_t kChunksize = 4096 / sizeof(uint16_t);
for (size_t i = 0; i < num_samples; i += kChunksize) {
int16_t isamples[kChunksize];
const size_t chunk = std::min(kChunksize, num_samples - i);
RoundToInt16(samples + i, chunk, isamples);
WriteSamples(isamples, chunk);
}
}
private:
FILE* file_handle_;
};
void WriteData(const void* data, size_t size, FILE* file,
const std::string& filename) {
if (fwrite(data, size, 1, file) != 1) {
@ -90,40 +51,6 @@ void WriteData(const void* data, size_t size, FILE* file,
}
}
void WriteIntData(const int16_t* data,
size_t length,
WavFile* wav_file,
PcmFile* pcm_file) {
if (wav_file) {
wav_file->WriteSamples(data, length);
}
if (pcm_file) {
pcm_file->WriteSamples(data, length);
}
}
void WriteFloatData(const float* const* data,
size_t samples_per_channel,
int num_channels,
WavFile* wav_file,
PcmFile* pcm_file) {
size_t length = num_channels * samples_per_channel;
scoped_ptr<float[]> buffer(new float[length]);
Interleave(data, samples_per_channel, num_channels, buffer.get());
// TODO(aluebs): Use ScaleToInt16Range() from audio_util
for (size_t i = 0; i < length; ++i) {
buffer[i] = buffer[i] > 0 ?
buffer[i] * std::numeric_limits<int16_t>::max() :
-buffer[i] * std::numeric_limits<int16_t>::min();
}
if (wav_file) {
wav_file->WriteSamples(buffer.get(), length);
}
if (pcm_file) {
pcm_file->WriteSamples(buffer.get(), length);
}
}
int do_main(int argc, char* argv[]) {
std::string program_name = argv[0];
std::string usage = "Commandline tool to unpack audioproc debug files.\n"
@ -149,9 +76,9 @@ int do_main(int argc, char* argv[]) {
scoped_ptr<WavFile> reverse_wav_file;
scoped_ptr<WavFile> input_wav_file;
scoped_ptr<WavFile> output_wav_file;
scoped_ptr<PcmFile> reverse_pcm_file;
scoped_ptr<PcmFile> input_pcm_file;
scoped_ptr<PcmFile> output_pcm_file;
scoped_ptr<RawFile> reverse_raw_file;
scoped_ptr<RawFile> input_raw_file;
scoped_ptr<RawFile> output_raw_file;
while (ReadMessageFromFile(debug_file, &event_msg)) {
if (event_msg.type() == Event::REVERSE_STREAM) {
if (!event_msg.has_reverse_stream()) {
@ -161,6 +88,9 @@ int do_main(int argc, char* argv[]) {
const ReverseStream msg = event_msg.reverse_stream();
if (msg.has_data()) {
if (FLAGS_raw && !reverse_raw_file) {
reverse_raw_file.reset(new RawFile(FLAGS_reverse_file + ".pcm"));
}
// TODO(aluebs): Replace "num_reverse_channels *
// reverse_samples_per_channel" with "msg.data().size() /
// sizeof(int16_t)" and so on when this fix in audio_processing has made
@ -168,8 +98,11 @@ int do_main(int argc, char* argv[]) {
WriteIntData(reinterpret_cast<const int16_t*>(msg.data().data()),
num_reverse_channels * reverse_samples_per_channel,
reverse_wav_file.get(),
reverse_pcm_file.get());
reverse_raw_file.get());
} else if (msg.channel_size() > 0) {
if (FLAGS_raw && !reverse_raw_file) {
reverse_raw_file.reset(new RawFile(FLAGS_reverse_file + ".float"));
}
scoped_ptr<const float*[]> data(new const float*[num_reverse_channels]);
for (int i = 0; i < num_reverse_channels; ++i) {
data[i] = reinterpret_cast<const float*>(msg.channel(i).data());
@ -178,7 +111,7 @@ int do_main(int argc, char* argv[]) {
reverse_samples_per_channel,
num_reverse_channels,
reverse_wav_file.get(),
reverse_pcm_file.get());
reverse_raw_file.get());
}
} else if (event_msg.type() == Event::STREAM) {
frame_count++;
@ -189,11 +122,17 @@ int do_main(int argc, char* argv[]) {
const Stream msg = event_msg.stream();
if (msg.has_input_data()) {
if (FLAGS_raw && !input_raw_file) {
input_raw_file.reset(new RawFile(FLAGS_input_file + ".pcm"));
}
WriteIntData(reinterpret_cast<const int16_t*>(msg.input_data().data()),
num_input_channels * input_samples_per_channel,
input_wav_file.get(),
input_pcm_file.get());
input_raw_file.get());
} else if (msg.input_channel_size() > 0) {
if (FLAGS_raw && !input_raw_file) {
input_raw_file.reset(new RawFile(FLAGS_input_file + ".float"));
}
scoped_ptr<const float*[]> data(new const float*[num_input_channels]);
for (int i = 0; i < num_input_channels; ++i) {
data[i] = reinterpret_cast<const float*>(msg.input_channel(i).data());
@ -202,15 +141,21 @@ int do_main(int argc, char* argv[]) {
input_samples_per_channel,
num_input_channels,
input_wav_file.get(),
input_pcm_file.get());
input_raw_file.get());
}
if (msg.has_output_data()) {
if (FLAGS_raw && !output_raw_file) {
output_raw_file.reset(new RawFile(FLAGS_output_file + ".pcm"));
}
WriteIntData(reinterpret_cast<const int16_t*>(msg.output_data().data()),
num_output_channels * output_samples_per_channel,
output_wav_file.get(),
output_pcm_file.get());
output_raw_file.get());
} else if (msg.output_channel_size() > 0) {
if (FLAGS_raw && !output_raw_file) {
output_raw_file.reset(new RawFile(FLAGS_output_file + ".float"));
}
scoped_ptr<const float*[]> data(new const float*[num_output_channels]);
for (int i = 0; i < num_output_channels; ++i) {
data[i] =
@ -220,7 +165,7 @@ int do_main(int argc, char* argv[]) {
output_samples_per_channel,
num_output_channels,
output_wav_file.get(),
output_pcm_file.get());
output_raw_file.get());
}
if (FLAGS_full) {
@ -287,24 +232,16 @@ int do_main(int argc, char* argv[]) {
input_samples_per_channel = input_sample_rate / 100;
output_samples_per_channel = output_sample_rate / 100;
if (FLAGS_pcm) {
if (!reverse_pcm_file.get()) {
reverse_pcm_file.reset(new PcmFile(FLAGS_reverse_file));
}
if (!input_pcm_file.get()) {
input_pcm_file.reset(new PcmFile(FLAGS_input_file));
}
if (!output_pcm_file.get()) {
output_pcm_file.reset(new PcmFile(FLAGS_output_file));
}
} else {
reverse_wav_file.reset(new WavFile(FLAGS_reverse_wav_file,
if (!FLAGS_raw) {
// The WAV files need to be reset every time, because they cant change
// their sample rate or number of channels.
reverse_wav_file.reset(new WavFile(FLAGS_reverse_file + ".wav",
reverse_sample_rate,
num_reverse_channels));
input_wav_file.reset(new WavFile(FLAGS_input_wav_file,
input_wav_file.reset(new WavFile(FLAGS_input_file + ".wav",
input_sample_rate,
num_input_channels));
output_wav_file.reset(new WavFile(FLAGS_output_wav_file,
output_wav_file.reset(new WavFile(FLAGS_output_file + ".wav",
output_sample_rate,
num_output_channels));
}