Add tests and modify tools for new float deinterleaved interface.
- Add an Initialize() overload to allow specification of format parameters. This is mainly useful for testing, but could be used in the cases where a consumer knows the format before the streams arrive. - Add a reverse_sample_rate_hz_ parameter to prepare for mismatched capture and render rates. There is no functional change as it is currently constrained to match the capture rate. - Fix a bug in the float dump: we need to use add_ rather than set_. - Add a debug dump test for both int and float interfaces. - Enable unpacking of float dumps. - Enable audioproc to read float dumps. - Move more shared functionality to test_utils.h, and generally tidy up a bit by consolidating repeated code. BUG=2894 TESTED=Verified that the output produced by the float debug dump test is correct. Processed the resulting debug dump file with audioproc and ensured that we get identical output. (This is crucial, as we need to be able to exactly reproduce online results offline.) R=aluebs@webrtc.org Review URL: https://webrtc-codereview.appspot.com/9489004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@5676 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
3046b843b2
commit
a8b97373d5
1
resources/ref03.aecdump.sha1
Normal file
1
resources/ref03.aecdump.sha1
Normal file
@ -0,0 +1 @@
|
||||
04f9f47938efa99d0389672ff2d83c10f04a1752
|
@ -104,8 +104,11 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config)
|
||||
event_msg_(new audioproc::Event()),
|
||||
#endif
|
||||
sample_rate_hz_(kSampleRate16kHz),
|
||||
reverse_sample_rate_hz_(kSampleRate16kHz),
|
||||
split_sample_rate_hz_(kSampleRate16kHz),
|
||||
samples_per_channel_(kChunkSizeMs * sample_rate_hz_ / 1000),
|
||||
reverse_samples_per_channel_(
|
||||
kChunkSizeMs * reverse_sample_rate_hz_ / 1000),
|
||||
stream_delay_ms_(0),
|
||||
delay_offset_ms_(0),
|
||||
was_stream_delay_set_(false),
|
||||
@ -178,6 +181,19 @@ int AudioProcessingImpl::Initialize() {
|
||||
return InitializeLocked();
|
||||
}
|
||||
|
||||
int AudioProcessingImpl::Initialize(int sample_rate_hz,
|
||||
int reverse_sample_rate_hz,
|
||||
int num_input_channels,
|
||||
int num_output_channels,
|
||||
int num_reverse_channels) {
|
||||
CriticalSectionScoped crit_scoped(crit_);
|
||||
return InitializeLocked(sample_rate_hz,
|
||||
reverse_sample_rate_hz,
|
||||
num_input_channels,
|
||||
num_output_channels,
|
||||
num_reverse_channels);
|
||||
}
|
||||
|
||||
int AudioProcessingImpl::InitializeLocked() {
|
||||
if (render_audio_ != NULL) {
|
||||
delete render_audio_;
|
||||
@ -190,7 +206,7 @@ int AudioProcessingImpl::InitializeLocked() {
|
||||
}
|
||||
|
||||
render_audio_ = new AudioBuffer(num_reverse_channels_,
|
||||
samples_per_channel_);
|
||||
reverse_samples_per_channel_);
|
||||
capture_audio_ = new AudioBuffer(num_input_channels_,
|
||||
samples_per_channel_);
|
||||
|
||||
@ -215,6 +231,79 @@ int AudioProcessingImpl::InitializeLocked() {
|
||||
return kNoError;
|
||||
}
|
||||
|
||||
int AudioProcessingImpl::InitializeLocked(int sample_rate_hz,
|
||||
int reverse_sample_rate_hz,
|
||||
int num_input_channels,
|
||||
int num_output_channels,
|
||||
int num_reverse_channels) {
|
||||
if (sample_rate_hz != kSampleRate8kHz &&
|
||||
sample_rate_hz != kSampleRate16kHz &&
|
||||
sample_rate_hz != kSampleRate32kHz) {
|
||||
return kBadSampleRateError;
|
||||
}
|
||||
if (reverse_sample_rate_hz != kSampleRate8kHz &&
|
||||
reverse_sample_rate_hz != kSampleRate16kHz &&
|
||||
reverse_sample_rate_hz != kSampleRate32kHz) {
|
||||
return kBadSampleRateError;
|
||||
}
|
||||
// TODO(ajm): The reverse sample rate is constrained to be identical to the
|
||||
// forward rate for now.
|
||||
if (reverse_sample_rate_hz != sample_rate_hz) {
|
||||
return kBadSampleRateError;
|
||||
}
|
||||
if (num_output_channels > num_input_channels) {
|
||||
return kBadNumberChannelsError;
|
||||
}
|
||||
// Only mono and stereo supported currently.
|
||||
if (num_input_channels > 2 || num_input_channels < 1 ||
|
||||
num_output_channels > 2 || num_output_channels < 1 ||
|
||||
num_reverse_channels > 2 || num_reverse_channels < 1) {
|
||||
return kBadNumberChannelsError;
|
||||
}
|
||||
if (echo_control_mobile_->is_enabled() && sample_rate_hz > kSampleRate16kHz) {
|
||||
LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
|
||||
return kUnsupportedComponentError;
|
||||
}
|
||||
|
||||
sample_rate_hz_ = sample_rate_hz;
|
||||
reverse_sample_rate_hz_ = reverse_sample_rate_hz;
|
||||
reverse_samples_per_channel_ = kChunkSizeMs * reverse_sample_rate_hz / 1000;
|
||||
samples_per_channel_ = kChunkSizeMs * sample_rate_hz / 1000;
|
||||
num_input_channels_ = num_input_channels;
|
||||
num_output_channels_ = num_output_channels;
|
||||
num_reverse_channels_ = num_reverse_channels;
|
||||
|
||||
if (sample_rate_hz_ == kSampleRate32kHz) {
|
||||
split_sample_rate_hz_ = kSampleRate16kHz;
|
||||
} else {
|
||||
split_sample_rate_hz_ = sample_rate_hz_;
|
||||
}
|
||||
|
||||
return InitializeLocked();
|
||||
}
|
||||
|
||||
// Calls InitializeLocked() if any of the audio parameters have changed from
|
||||
// their current values.
|
||||
int AudioProcessingImpl::MaybeInitializeLocked(int sample_rate_hz,
|
||||
int reverse_sample_rate_hz,
|
||||
int num_input_channels,
|
||||
int num_output_channels,
|
||||
int num_reverse_channels) {
|
||||
if (sample_rate_hz == sample_rate_hz_ &&
|
||||
reverse_sample_rate_hz == reverse_sample_rate_hz_ &&
|
||||
num_input_channels == num_input_channels_ &&
|
||||
num_output_channels == num_output_channels_ &&
|
||||
num_reverse_channels == num_reverse_channels_) {
|
||||
return kNoError;
|
||||
}
|
||||
|
||||
return InitializeLocked(sample_rate_hz,
|
||||
reverse_sample_rate_hz,
|
||||
num_input_channels,
|
||||
num_output_channels,
|
||||
num_reverse_channels);
|
||||
}
|
||||
|
||||
void AudioProcessingImpl::SetExtraOptions(const Config& config) {
|
||||
CriticalSectionScoped crit_scoped(crit_);
|
||||
std::list<ProcessingComponent*>::iterator it;
|
||||
@ -316,51 +405,6 @@ bool AudioProcessingImpl::output_will_be_muted() const {
|
||||
return output_will_be_muted_;
|
||||
}
|
||||
|
||||
// Calls InitializeLocked() if any of the audio parameters have changed from
|
||||
// their current values.
|
||||
int AudioProcessingImpl::MaybeInitializeLocked(int sample_rate_hz,
|
||||
int num_input_channels, int num_output_channels, int num_reverse_channels) {
|
||||
if (sample_rate_hz == sample_rate_hz_ &&
|
||||
num_input_channels == num_input_channels_ &&
|
||||
num_output_channels == num_output_channels_ &&
|
||||
num_reverse_channels == num_reverse_channels_) {
|
||||
return kNoError;
|
||||
}
|
||||
|
||||
if (sample_rate_hz != kSampleRate8kHz &&
|
||||
sample_rate_hz != kSampleRate16kHz &&
|
||||
sample_rate_hz != kSampleRate32kHz) {
|
||||
return kBadSampleRateError;
|
||||
}
|
||||
if (num_output_channels > num_input_channels) {
|
||||
return kBadNumberChannelsError;
|
||||
}
|
||||
// Only mono and stereo supported currently.
|
||||
if (num_input_channels > 2 || num_input_channels < 1 ||
|
||||
num_output_channels > 2 || num_output_channels < 1 ||
|
||||
num_reverse_channels > 2 || num_reverse_channels < 1) {
|
||||
return kBadNumberChannelsError;
|
||||
}
|
||||
if (echo_control_mobile_->is_enabled() && sample_rate_hz > kSampleRate16kHz) {
|
||||
LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
|
||||
return kUnsupportedComponentError;
|
||||
}
|
||||
|
||||
sample_rate_hz_ = sample_rate_hz;
|
||||
samples_per_channel_ = kChunkSizeMs * sample_rate_hz / 1000;
|
||||
num_input_channels_ = num_input_channels;
|
||||
num_output_channels_ = num_output_channels;
|
||||
num_reverse_channels_ = num_reverse_channels;
|
||||
|
||||
if (sample_rate_hz_ == kSampleRate32kHz) {
|
||||
split_sample_rate_hz_ = kSampleRate16kHz;
|
||||
} else {
|
||||
split_sample_rate_hz_ = sample_rate_hz_;
|
||||
}
|
||||
|
||||
return InitializeLocked();
|
||||
}
|
||||
|
||||
int AudioProcessingImpl::ProcessStream(float* const* data,
|
||||
int samples_per_channel,
|
||||
int sample_rate_hz,
|
||||
@ -374,7 +418,9 @@ int AudioProcessingImpl::ProcessStream(float* const* data,
|
||||
const int num_input_channels = ChannelsFromLayout(input_layout);
|
||||
// TODO(ajm): We now always set the output channels equal to the input
|
||||
// channels here. Restore the ability to downmix.
|
||||
RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz,
|
||||
// TODO(ajm): The reverse sample rate is constrained to be identical to the
|
||||
// forward rate for now.
|
||||
RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz, sample_rate_hz,
|
||||
num_input_channels, num_input_channels, num_reverse_channels_));
|
||||
if (samples_per_channel != samples_per_channel_) {
|
||||
return kBadDataLengthError;
|
||||
@ -386,7 +432,7 @@ int AudioProcessingImpl::ProcessStream(float* const* data,
|
||||
audioproc::Stream* msg = event_msg_->mutable_stream();
|
||||
const size_t channel_size = sizeof(float) * samples_per_channel;
|
||||
for (int i = 0; i < num_input_channels; ++i)
|
||||
msg->set_input_channel(i, data[i], channel_size);
|
||||
msg->add_input_channel(data[i], channel_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -401,7 +447,7 @@ int AudioProcessingImpl::ProcessStream(float* const* data,
|
||||
audioproc::Stream* msg = event_msg_->mutable_stream();
|
||||
const size_t channel_size = sizeof(float) * samples_per_channel;
|
||||
for (int i = 0; i < num_output_channels_; ++i)
|
||||
msg->set_output_channel(i, data[i], channel_size);
|
||||
msg->add_output_channel(data[i], channel_size);
|
||||
RETURN_ON_ERR(WriteMessageToDebugFile());
|
||||
}
|
||||
#endif
|
||||
@ -417,8 +463,11 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
|
||||
|
||||
// TODO(ajm): We now always set the output channels equal to the input
|
||||
// channels here. Restore the ability to downmix.
|
||||
// TODO(ajm): The reverse sample rate is constrained to be identical to the
|
||||
// forward rate for now.
|
||||
RETURN_ON_ERR(MaybeInitializeLocked(frame->sample_rate_hz_,
|
||||
frame->num_channels_, frame->num_channels_, num_reverse_channels_));
|
||||
frame->sample_rate_hz_, frame->num_channels_, frame->num_channels_,
|
||||
num_reverse_channels_));
|
||||
if (frame->samples_per_channel_ != samples_per_channel_) {
|
||||
return kBadDataLengthError;
|
||||
}
|
||||
@ -526,9 +575,11 @@ int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
|
||||
}
|
||||
|
||||
const int num_channels = ChannelsFromLayout(layout);
|
||||
RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz_, num_input_channels_,
|
||||
num_output_channels_, num_channels));
|
||||
if (samples_per_channel != samples_per_channel_) {
|
||||
// TODO(ajm): The reverse sample rate is constrained to be identical to the
|
||||
// forward rate for now.
|
||||
RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz_, sample_rate_hz_,
|
||||
num_input_channels_, num_output_channels_, num_channels));
|
||||
if (samples_per_channel != reverse_samples_per_channel_) {
|
||||
return kBadDataLengthError;
|
||||
}
|
||||
|
||||
@ -538,7 +589,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
|
||||
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
|
||||
const size_t channel_size = sizeof(float) * samples_per_channel;
|
||||
for (int i = 0; i < num_channels; ++i)
|
||||
msg->set_channel(i, data[i], channel_size);
|
||||
msg->add_channel(data[i], channel_size);
|
||||
RETURN_ON_ERR(WriteMessageToDebugFile());
|
||||
}
|
||||
#endif
|
||||
@ -555,9 +606,12 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
|
||||
if (frame->sample_rate_hz_ != sample_rate_hz_) {
|
||||
return kBadSampleRateError;
|
||||
}
|
||||
RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz_, num_input_channels_,
|
||||
num_output_channels_, frame->num_channels_));
|
||||
if (frame->samples_per_channel_ != samples_per_channel_) {
|
||||
|
||||
// TODO(ajm): The reverse sample rate is constrained to be identical to the
|
||||
// forward rate for now.
|
||||
RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz_, sample_rate_hz_,
|
||||
num_input_channels_, num_output_channels_, frame->num_channels_));
|
||||
if (frame->samples_per_channel_ != reverse_samples_per_channel_) {
|
||||
return kBadDataLengthError;
|
||||
}
|
||||
|
||||
@ -832,6 +886,7 @@ int AudioProcessingImpl::WriteInitMessage() {
|
||||
msg->set_num_input_channels(num_input_channels_);
|
||||
msg->set_num_output_channels(num_output_channels_);
|
||||
msg->set_num_reverse_channels(num_reverse_channels_);
|
||||
msg->set_reverse_sample_rate(reverse_sample_rate_hz_);
|
||||
|
||||
int err = WriteMessageToDebugFile();
|
||||
if (err != kNoError) {
|
||||
|
@ -46,6 +46,11 @@ class AudioProcessingImpl : public AudioProcessing {
|
||||
|
||||
// AudioProcessing methods.
|
||||
virtual int Initialize() OVERRIDE;
|
||||
virtual int Initialize(int sample_rate_hz,
|
||||
int reverse_sample_rate_hz,
|
||||
int num_input_channels,
|
||||
int num_output_channels,
|
||||
int num_reverse_channels) OVERRIDE;
|
||||
virtual void SetExtraOptions(const Config& config) OVERRIDE;
|
||||
virtual int EnableExperimentalNs(bool enable) OVERRIDE;
|
||||
virtual bool experimental_ns_enabled() const OVERRIDE {
|
||||
@ -93,11 +98,20 @@ class AudioProcessingImpl : public AudioProcessing {
|
||||
virtual VoiceDetection* voice_detection() const OVERRIDE;
|
||||
|
||||
protected:
|
||||
// Overridden in a mock.
|
||||
virtual int InitializeLocked();
|
||||
|
||||
private:
|
||||
int MaybeInitializeLocked(int sample_rate_hz, int num_input_channels,
|
||||
int num_output_channels, int num_reverse_channels);
|
||||
int InitializeLocked(int sample_rate_hz,
|
||||
int reverse_sample_rate_hz,
|
||||
int num_input_channels,
|
||||
int num_output_channels,
|
||||
int num_reverse_channels);
|
||||
int MaybeInitializeLocked(int sample_rate_hz,
|
||||
int reverse_sample_rate_hz,
|
||||
int num_input_channels,
|
||||
int num_output_channels,
|
||||
int num_reverse_channels);
|
||||
int ProcessStreamLocked();
|
||||
int AnalyzeReverseStreamLocked();
|
||||
|
||||
@ -129,8 +143,10 @@ class AudioProcessingImpl : public AudioProcessing {
|
||||
#endif
|
||||
|
||||
int sample_rate_hz_;
|
||||
int reverse_sample_rate_hz_;
|
||||
int split_sample_rate_hz_;
|
||||
int samples_per_channel_;
|
||||
int reverse_samples_per_channel_;
|
||||
int stream_delay_ms_;
|
||||
int delay_offset_ms_;
|
||||
bool was_stream_delay_set_;
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include "webrtc/modules/audio_processing/audio_processing_impl.h"
|
||||
|
||||
#include "webrtc/config.h"
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/modules/audio_processing/test/test_utils.h"
|
||||
@ -44,23 +45,23 @@ TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
|
||||
SetFrameSampleRate(&frame, 16000);
|
||||
EXPECT_CALL(mock, InitializeLocked())
|
||||
.Times(0);
|
||||
EXPECT_EQ(kNoErr, mock.ProcessStream(&frame));
|
||||
EXPECT_EQ(kNoErr, mock.AnalyzeReverseStream(&frame));
|
||||
EXPECT_NOERR(mock.ProcessStream(&frame));
|
||||
EXPECT_NOERR(mock.AnalyzeReverseStream(&frame));
|
||||
|
||||
// New sample rate. (Only impacts ProcessStream).
|
||||
SetFrameSampleRate(&frame, 32000);
|
||||
EXPECT_CALL(mock, InitializeLocked())
|
||||
.Times(1);
|
||||
EXPECT_EQ(kNoErr, mock.ProcessStream(&frame));
|
||||
EXPECT_NOERR(mock.ProcessStream(&frame));
|
||||
|
||||
// New number of channels.
|
||||
frame.num_channels_ = 2;
|
||||
EXPECT_CALL(mock, InitializeLocked())
|
||||
.Times(2);
|
||||
EXPECT_EQ(kNoErr, mock.ProcessStream(&frame));
|
||||
EXPECT_NOERR(mock.ProcessStream(&frame));
|
||||
// ProcessStream sets num_channels_ == num_output_channels.
|
||||
frame.num_channels_ = 2;
|
||||
EXPECT_EQ(kNoErr, mock.AnalyzeReverseStream(&frame));
|
||||
EXPECT_NOERR(mock.AnalyzeReverseStream(&frame));
|
||||
|
||||
// A new sample rate passed to AnalyzeReverseStream should be an error and
|
||||
// not cause an init.
|
||||
|
@ -8,6 +8,7 @@ message Init {
|
||||
optional int32 num_input_channels = 3;
|
||||
optional int32 num_output_channels = 4;
|
||||
optional int32 num_reverse_channels = 5;
|
||||
optional int32 reverse_sample_rate = 6;
|
||||
}
|
||||
|
||||
// May contain interleaved or deinterleaved data, but don't store both formats.
|
||||
|
@ -164,6 +164,11 @@ class AudioProcessing {
|
||||
// rate and number of channels) have changed. Passing updated parameters
|
||||
// directly to |ProcessStream()| and |AnalyzeReverseStream()| is permissible.
|
||||
virtual int Initialize() = 0;
|
||||
virtual int Initialize(int sample_rate_hz,
|
||||
int reverse_sample_rate_hz,
|
||||
int num_input_channels,
|
||||
int num_output_channels,
|
||||
int num_reverse_channels) = 0;
|
||||
|
||||
// Pass down additional options which don't have explicit setters. This
|
||||
// ensures the options are applied immediately.
|
||||
|
@ -181,6 +181,12 @@ class MockAudioProcessing : public AudioProcessing {
|
||||
|
||||
MOCK_METHOD0(Initialize,
|
||||
int());
|
||||
MOCK_METHOD5(Initialize,
|
||||
int(int sample_rate_hz,
|
||||
int reverse_sample_rate_hz,
|
||||
int num_input_channels,
|
||||
int num_output_channels,
|
||||
int num_reverse_channels));
|
||||
MOCK_METHOD1(SetExtraOptions,
|
||||
void(const Config& config));
|
||||
MOCK_METHOD1(EnableExperimentalNs,
|
||||
|
@ -9,7 +9,6 @@
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <queue>
|
||||
|
||||
@ -36,8 +35,6 @@
|
||||
# define WEBRTC_AUDIOPROC_BIT_EXACT
|
||||
#endif
|
||||
|
||||
#define EXPECT_NOERR(expr) EXPECT_EQ(AudioProcessing::kNoError, expr)
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
@ -64,27 +61,17 @@ const int kProcessSampleRates[] = {8000, 16000, 32000};
|
||||
const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) /
|
||||
sizeof(*kProcessSampleRates);
|
||||
|
||||
// Helper to encapsulate a contiguous data buffer with access to a pointer
|
||||
// array of the deinterleaved channels.
|
||||
template <typename T>
|
||||
class ChannelBuffer {
|
||||
public:
|
||||
ChannelBuffer(int samples_per_channel, int num_channels)
|
||||
: data_(new T[samples_per_channel * num_channels]),
|
||||
channels_(new T*[num_channels]) {
|
||||
memset(data_.get(), 0, sizeof(T) * samples_per_channel * num_channels);
|
||||
for (int i = 0; i < num_channels; ++i)
|
||||
channels_[i] = &data_[i * samples_per_channel];
|
||||
}
|
||||
~ChannelBuffer() {}
|
||||
|
||||
T* data() { return data_.get(); }
|
||||
T** channels() { return channels_.get(); }
|
||||
|
||||
private:
|
||||
scoped_ptr<T[]> data_;
|
||||
scoped_ptr<T*[]> channels_;
|
||||
};
|
||||
void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) {
|
||||
ChannelBuffer<int16_t> cb_int(frame.samples_per_channel_,
|
||||
frame.num_channels_);
|
||||
Deinterleave(frame.data_,
|
||||
frame.samples_per_channel_,
|
||||
frame.num_channels_,
|
||||
cb_int.channels());
|
||||
ScaleToFloat(cb_int.data(),
|
||||
frame.samples_per_channel_ * frame.num_channels_,
|
||||
cb->data());
|
||||
}
|
||||
|
||||
int TruncateToMultipleOf10(int value) {
|
||||
return (value / 10) * 10;
|
||||
@ -148,18 +135,6 @@ bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
|
||||
return true;
|
||||
}
|
||||
|
||||
AudioProcessing::ChannelLayout LayoutFromChannels(int num_channels) {
|
||||
switch (num_channels) {
|
||||
case 1:
|
||||
return AudioProcessing::kMono;
|
||||
case 2:
|
||||
return AudioProcessing::kStereo;
|
||||
default:
|
||||
assert(false);
|
||||
return AudioProcessing::kMono;
|
||||
}
|
||||
}
|
||||
|
||||
void EnableAllAPComponents(AudioProcessing* ap) {
|
||||
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
|
||||
EXPECT_NOERR(ap->echo_control_mobile()->Enable(true));
|
||||
@ -211,48 +186,36 @@ void TestStats(const AudioProcessing::Statistic& test,
|
||||
}
|
||||
|
||||
void WriteStatsMessage(const AudioProcessing::Statistic& output,
|
||||
audioproc::Test::Statistic* message) {
|
||||
message->set_instant(output.instant);
|
||||
message->set_average(output.average);
|
||||
message->set_maximum(output.maximum);
|
||||
message->set_minimum(output.minimum);
|
||||
audioproc::Test::Statistic* msg) {
|
||||
msg->set_instant(output.instant);
|
||||
msg->set_average(output.average);
|
||||
msg->set_maximum(output.maximum);
|
||||
msg->set_minimum(output.minimum);
|
||||
}
|
||||
#endif
|
||||
|
||||
void WriteMessageLiteToFile(const std::string filename,
|
||||
const ::google::protobuf::MessageLite& message) {
|
||||
void OpenFileAndWriteMessage(const std::string filename,
|
||||
const ::google::protobuf::MessageLite& msg) {
|
||||
FILE* file = fopen(filename.c_str(), "wb");
|
||||
ASSERT_TRUE(file != NULL) << "Could not open " << filename;
|
||||
int size = message.ByteSize();
|
||||
ASSERT_TRUE(file != NULL);
|
||||
|
||||
int32_t size = msg.ByteSize();
|
||||
ASSERT_GT(size, 0);
|
||||
unsigned char* array = new unsigned char[size];
|
||||
ASSERT_TRUE(message.SerializeToArray(array, size));
|
||||
scoped_ptr<uint8_t[]> array(new uint8_t[size]);
|
||||
ASSERT_TRUE(msg.SerializeToArray(array.get(), size));
|
||||
|
||||
ASSERT_EQ(1u, fwrite(&size, sizeof(int), 1, file));
|
||||
ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
|
||||
ASSERT_EQ(static_cast<size_t>(size),
|
||||
fwrite(array, sizeof(unsigned char), size, file));
|
||||
|
||||
delete [] array;
|
||||
fwrite(array.get(), sizeof(array[0]), size, file));
|
||||
fclose(file);
|
||||
}
|
||||
#endif // WEBRTC_AUDIOPROC_BIT_EXACT
|
||||
|
||||
void ReadMessageLiteFromFile(const std::string filename,
|
||||
::google::protobuf::MessageLite* message) {
|
||||
assert(message != NULL);
|
||||
|
||||
void OpenFileAndReadMessage(const std::string filename,
|
||||
::google::protobuf::MessageLite* msg) {
|
||||
FILE* file = fopen(filename.c_str(), "rb");
|
||||
ASSERT_TRUE(file != NULL) << "Could not open " << filename;
|
||||
int size = 0;
|
||||
ASSERT_EQ(1u, fread(&size, sizeof(int), 1, file));
|
||||
ASSERT_GT(size, 0);
|
||||
unsigned char* array = new unsigned char[size];
|
||||
ASSERT_EQ(static_cast<size_t>(size),
|
||||
fread(array, sizeof(unsigned char), size, file));
|
||||
|
||||
ASSERT_TRUE(message->ParseFromArray(array, size));
|
||||
|
||||
delete [] array;
|
||||
ASSERT_TRUE(file != NULL);
|
||||
ReadMessageFromFile(file, msg);
|
||||
fclose(file);
|
||||
}
|
||||
|
||||
@ -272,8 +235,17 @@ class ApmTest : public ::testing::Test {
|
||||
Trace::ReturnTrace();
|
||||
}
|
||||
|
||||
void Init(int sample_rate_hz, int num_reverse_channels,
|
||||
int num_input_channels, int num_output_channels,
|
||||
// Used to select between int and float interface tests.
|
||||
enum Format {
|
||||
kIntFormat,
|
||||
kFloatFormat
|
||||
};
|
||||
|
||||
void Init(int sample_rate_hz,
|
||||
int reverse_sample_rate_hz,
|
||||
int num_reverse_channels,
|
||||
int num_input_channels,
|
||||
int num_output_channels,
|
||||
bool open_output_file);
|
||||
void Init(AudioProcessing* ap);
|
||||
std::string ResourceFilePath(std::string name, int sample_rate_hz);
|
||||
@ -295,10 +267,14 @@ class ApmTest : public ::testing::Test {
|
||||
AudioProcessing::Error expected_return);
|
||||
void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
|
||||
void RunManualVolumeChangeIsPossibleTest(int sample_rate);
|
||||
void StreamParametersTest(bool int_format);
|
||||
void SampleRatesTest(bool int_format);
|
||||
int ProcessStreamChooser(bool int_format);
|
||||
int AnalyzeReverseStreamChooser(bool int_format);
|
||||
void StreamParametersTest(Format format);
|
||||
void SampleRatesTest(Format format);
|
||||
int ProcessStreamChooser(Format format);
|
||||
int AnalyzeReverseStreamChooser(Format format);
|
||||
void ProcessDebugDump(const std::string& in_filename,
|
||||
const std::string& out_filename,
|
||||
Format format);
|
||||
void VerifyDebugDumpTest(Format format);
|
||||
|
||||
const std::string output_path_;
|
||||
const std::string ref_path_;
|
||||
@ -308,6 +284,7 @@ class ApmTest : public ::testing::Test {
|
||||
AudioFrame* revframe_;
|
||||
scoped_ptr<ChannelBuffer<float> > float_cb_;
|
||||
scoped_ptr<ChannelBuffer<float> > revfloat_cb_;
|
||||
int num_output_channels_;
|
||||
FILE* far_file_;
|
||||
FILE* near_file_;
|
||||
FILE* out_file_;
|
||||
@ -323,6 +300,7 @@ ApmTest::ApmTest()
|
||||
#endif
|
||||
frame_(NULL),
|
||||
revframe_(NULL),
|
||||
num_output_channels_(0),
|
||||
far_file_(NULL),
|
||||
near_file_(NULL),
|
||||
out_file_(NULL) {
|
||||
@ -338,9 +316,9 @@ void ApmTest::SetUp() {
|
||||
revframe_ = new AudioFrame();
|
||||
|
||||
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
|
||||
Init(16000, 2, 2, 2, false);
|
||||
Init(16000, 16000, 2, 2, 2, false);
|
||||
#else
|
||||
Init(32000, 2, 2, 2, false);
|
||||
Init(32000, 32000, 2, 2, 2, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -400,29 +378,24 @@ std::string ApmTest::OutputFilePath(std::string name,
|
||||
}
|
||||
|
||||
void ApmTest::Init(AudioProcessing* ap) {
|
||||
// Make one process call to ensure the audio parameters are set. It might
|
||||
// result in a stream error which we can safely ignore.
|
||||
int err = ap->ProcessStream(frame_);
|
||||
ASSERT_TRUE(err == kNoErr || err == apm_->kStreamParameterNotSetError);
|
||||
ASSERT_EQ(ap->kNoError, ap->Initialize());
|
||||
ASSERT_EQ(ap->kNoError, ap->Initialize(frame_->sample_rate_hz_,
|
||||
revframe_->sample_rate_hz_,
|
||||
frame_->num_channels_,
|
||||
num_output_channels_,
|
||||
revframe_->num_channels_));
|
||||
}
|
||||
|
||||
void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
|
||||
int num_input_channels, int num_output_channels,
|
||||
void ApmTest::Init(int sample_rate_hz,
|
||||
int reverse_sample_rate_hz,
|
||||
int num_input_channels,
|
||||
int num_output_channels,
|
||||
int num_reverse_channels,
|
||||
bool open_output_file) {
|
||||
// We always use 10 ms frames.
|
||||
const int samples_per_channel = kChunkSizeMs * sample_rate_hz / 1000;
|
||||
frame_->samples_per_channel_ = samples_per_channel;
|
||||
frame_->num_channels_ = num_input_channels;
|
||||
frame_->sample_rate_hz_ = sample_rate_hz;
|
||||
float_cb_.reset(new ChannelBuffer<float>(samples_per_channel,
|
||||
num_input_channels));
|
||||
revframe_->samples_per_channel_ = samples_per_channel;
|
||||
revframe_->num_channels_ = num_reverse_channels;
|
||||
revframe_->sample_rate_hz_ = sample_rate_hz;
|
||||
revfloat_cb_.reset(new ChannelBuffer<float>(samples_per_channel,
|
||||
num_reverse_channels));
|
||||
SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_);
|
||||
num_output_channels_ = num_output_channels;
|
||||
|
||||
SetContainerFormat(reverse_sample_rate_hz, num_reverse_channels, revframe_,
|
||||
&revfloat_cb_);
|
||||
Init(apm_.get());
|
||||
|
||||
if (far_file_) {
|
||||
@ -476,17 +449,8 @@ bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame,
|
||||
frame->samples_per_channel_);
|
||||
}
|
||||
|
||||
// Convert to deinterleaved float.
|
||||
if (cb) {
|
||||
ChannelBuffer<int16_t> cb_int(frame->samples_per_channel_,
|
||||
frame->num_channels_);
|
||||
Deinterleave(frame->data_,
|
||||
frame->samples_per_channel_,
|
||||
frame->num_channels_,
|
||||
cb_int.channels());
|
||||
ScaleToFloat(cb_int.data(),
|
||||
frame->samples_per_channel_ * frame->num_channels_,
|
||||
cb->data());
|
||||
ConvertToFloat(*frame, cb);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -517,8 +481,8 @@ void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame));
|
||||
}
|
||||
|
||||
int ApmTest::ProcessStreamChooser(bool int_format) {
|
||||
if (int_format) {
|
||||
int ApmTest::ProcessStreamChooser(Format format) {
|
||||
if (format == kIntFormat) {
|
||||
return apm_->ProcessStream(frame_);
|
||||
}
|
||||
// TODO(ajm): Update to match the number of output channels when supported.
|
||||
@ -529,11 +493,10 @@ int ApmTest::ProcessStreamChooser(bool int_format) {
|
||||
LayoutFromChannels(frame_->num_channels_));
|
||||
}
|
||||
|
||||
int ApmTest::AnalyzeReverseStreamChooser(bool int_format) {
|
||||
if (int_format) {
|
||||
int ApmTest::AnalyzeReverseStreamChooser(Format format) {
|
||||
if (format == kIntFormat) {
|
||||
return apm_->AnalyzeReverseStream(revframe_);
|
||||
}
|
||||
// TODO(ajm): Update to match the number of output channels when supported.
|
||||
return apm_->AnalyzeReverseStream(
|
||||
revfloat_cb_->channels(),
|
||||
revframe_->samples_per_channel_,
|
||||
@ -628,21 +591,21 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
|
||||
EXPECT_LE(expected_median_low, median);
|
||||
}
|
||||
|
||||
void ApmTest::StreamParametersTest(bool int_format) {
|
||||
void ApmTest::StreamParametersTest(Format format) {
|
||||
// No errors when the components are disabled.
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(int_format));
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
|
||||
|
||||
// -- Missing AGC level --
|
||||
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
ProcessStreamChooser(int_format));
|
||||
ProcessStreamChooser(format));
|
||||
|
||||
// Resets after successful ProcessStream().
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_stream_analog_level(127));
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(frame_));
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
ProcessStreamChooser(int_format));
|
||||
ProcessStreamChooser(format));
|
||||
|
||||
// Other stream parameters set correctly.
|
||||
EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
|
||||
@ -651,22 +614,22 @@ void ApmTest::StreamParametersTest(bool int_format) {
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
ProcessStreamChooser(int_format));
|
||||
ProcessStreamChooser(format));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->enable_drift_compensation(false));
|
||||
|
||||
// -- Missing delay --
|
||||
EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(int_format));
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
ProcessStreamChooser(int_format));
|
||||
ProcessStreamChooser(format));
|
||||
|
||||
// Resets after successful ProcessStream().
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(int_format));
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
ProcessStreamChooser(int_format));
|
||||
ProcessStreamChooser(format));
|
||||
|
||||
// Other stream parameters set correctly.
|
||||
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
|
||||
@ -676,19 +639,19 @@ void ApmTest::StreamParametersTest(bool int_format) {
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_stream_analog_level(127));
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
ProcessStreamChooser(int_format));
|
||||
ProcessStreamChooser(format));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
|
||||
|
||||
// -- Missing drift --
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
ProcessStreamChooser(int_format));
|
||||
ProcessStreamChooser(format));
|
||||
|
||||
// Resets after successful ProcessStream().
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(int_format));
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
ProcessStreamChooser(int_format));
|
||||
ProcessStreamChooser(format));
|
||||
|
||||
// Other stream parameters set correctly.
|
||||
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
|
||||
@ -696,28 +659,28 @@ void ApmTest::StreamParametersTest(bool int_format) {
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_stream_analog_level(127));
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
ProcessStreamChooser(int_format));
|
||||
ProcessStreamChooser(format));
|
||||
|
||||
// -- No stream parameters --
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
AnalyzeReverseStreamChooser(int_format));
|
||||
AnalyzeReverseStreamChooser(format));
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
ProcessStreamChooser(int_format));
|
||||
ProcessStreamChooser(format));
|
||||
|
||||
// -- All there --
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_stream_analog_level(127));
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(int_format));
|
||||
EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(format));
|
||||
}
|
||||
|
||||
TEST_F(ApmTest, StreamParametersInt) {
|
||||
StreamParametersTest(true);
|
||||
StreamParametersTest(kIntFormat);
|
||||
}
|
||||
|
||||
TEST_F(ApmTest, StreamParametersFloat) {
|
||||
StreamParametersTest(false);
|
||||
StreamParametersTest(kFloatFormat);
|
||||
}
|
||||
|
||||
TEST_F(ApmTest, DefaultDelayOffsetIsZero) {
|
||||
@ -763,25 +726,25 @@ TEST_F(ApmTest, Channels) {
|
||||
}
|
||||
}
|
||||
|
||||
void ApmTest::SampleRatesTest(bool int_format) {
|
||||
void ApmTest::SampleRatesTest(Format format) {
|
||||
// Testing invalid sample rates
|
||||
SetFrameSampleRate(frame_, 10000);
|
||||
EXPECT_EQ(apm_->kBadSampleRateError, ProcessStreamChooser(int_format));
|
||||
SetContainerFormat(10000, 2, frame_, &float_cb_);
|
||||
EXPECT_EQ(apm_->kBadSampleRateError, ProcessStreamChooser(format));
|
||||
// Testing valid sample rates
|
||||
int fs[] = {8000, 16000, 32000};
|
||||
for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
|
||||
SetFrameSampleRate(frame_, fs[i]);
|
||||
EXPECT_EQ(kNoErr, ProcessStreamChooser(int_format));
|
||||
SetContainerFormat(fs[i], 2, frame_, &float_cb_);
|
||||
EXPECT_NOERR(ProcessStreamChooser(format));
|
||||
EXPECT_EQ(fs[i], apm_->sample_rate_hz());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(ApmTest, SampleRatesInt) {
|
||||
SampleRatesTest(true);
|
||||
SampleRatesTest(kIntFormat);
|
||||
}
|
||||
|
||||
TEST_F(ApmTest, SampleRatesFloat) {
|
||||
SampleRatesTest(false);
|
||||
SampleRatesTest(kFloatFormat);
|
||||
}
|
||||
|
||||
TEST_F(ApmTest, EchoCancellation) {
|
||||
@ -882,7 +845,7 @@ TEST_F(ApmTest, EchoCancellationReportsCorrectDelays) {
|
||||
// within a valid region (set to +-1.5 blocks). Note that these cases are
|
||||
// sampling frequency dependent.
|
||||
for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
|
||||
Init(kProcessSampleRates[i], 2, 2, 2, false);
|
||||
Init(kProcessSampleRates[i], kProcessSampleRates[i], 2, 2, 2, false);
|
||||
// Sampling frequency dependent variables.
|
||||
const int num_ms_per_block = std::max(4,
|
||||
640 / frame_->samples_per_channel_);
|
||||
@ -924,18 +887,18 @@ TEST_F(ApmTest, EchoCancellationReportsCorrectDelays) {
|
||||
TEST_F(ApmTest, EchoControlMobile) {
|
||||
// AECM won't use super-wideband.
|
||||
SetFrameSampleRate(frame_, 32000);
|
||||
EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
|
||||
EXPECT_NOERR(apm_->ProcessStream(frame_));
|
||||
EXPECT_EQ(apm_->kBadSampleRateError,
|
||||
apm_->echo_control_mobile()->Enable(true));
|
||||
SetFrameSampleRate(frame_, 16000);
|
||||
EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
|
||||
EXPECT_NOERR(apm_->ProcessStream(frame_));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_control_mobile()->Enable(true));
|
||||
SetFrameSampleRate(frame_, 32000);
|
||||
EXPECT_EQ(apm_->kUnsupportedComponentError, apm_->ProcessStream(frame_));
|
||||
|
||||
// Turn AECM on (and AEC off)
|
||||
Init(16000, 2, 2, 2, false);
|
||||
Init(16000, 16000, 2, 2, 2, false);
|
||||
EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
|
||||
EXPECT_TRUE(apm_->echo_control_mobile()->is_enabled());
|
||||
|
||||
@ -1098,7 +1061,7 @@ TEST_F(ApmTest, GainControl) {
|
||||
}
|
||||
|
||||
void ApmTest::RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate) {
|
||||
Init(sample_rate, 2, 2, 2, false);
|
||||
Init(sample_rate, sample_rate, 2, 2, 2, false);
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
|
||||
@ -1129,7 +1092,7 @@ TEST_F(ApmTest, QuantizedVolumeDoesNotGetStuck) {
|
||||
}
|
||||
|
||||
void ApmTest::RunManualVolumeChangeIsPossibleTest(int sample_rate) {
|
||||
Init(sample_rate, 2, 2, 2, false);
|
||||
Init(sample_rate, sample_rate, 2, 2, 2, false);
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
|
||||
@ -1351,7 +1314,7 @@ TEST_F(ApmTest, AllProcessingDisabledByDefault) {
|
||||
|
||||
TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) {
|
||||
for (size_t i = 0; i < kSampleRatesSize; i++) {
|
||||
Init(kSampleRates[i], 2, 2, 2, false);
|
||||
Init(kSampleRates[i], kSampleRates[i], 2, 2, 2, false);
|
||||
SetFrameTo(frame_, 1000, 2000);
|
||||
AudioFrame frame_copy;
|
||||
frame_copy.CopyFrom(*frame_);
|
||||
@ -1366,7 +1329,7 @@ TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
|
||||
EnableAllComponents();
|
||||
|
||||
for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
|
||||
Init(kProcessSampleRates[i], 2, 2, 2, false);
|
||||
Init(kProcessSampleRates[i], kProcessSampleRates[i], 2, 2, 2, false);
|
||||
int analog_level = 127;
|
||||
EXPECT_EQ(0, feof(far_file_));
|
||||
EXPECT_EQ(0, feof(near_file_));
|
||||
@ -1463,6 +1426,128 @@ TEST_F(ApmTest, SplittingFilter) {
|
||||
EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
|
||||
}
|
||||
|
||||
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
|
||||
void ApmTest::ProcessDebugDump(const std::string& in_filename,
|
||||
const std::string& out_filename,
|
||||
Format format) {
|
||||
FILE* in_file = fopen(in_filename.c_str(), "rb");
|
||||
ASSERT_TRUE(in_file != NULL);
|
||||
audioproc::Event event_msg;
|
||||
bool first_init = true;
|
||||
|
||||
while (ReadMessageFromFile(in_file, &event_msg)) {
|
||||
if (event_msg.type() == audioproc::Event::INIT) {
|
||||
const audioproc::Init msg = event_msg.init();
|
||||
int reverse_sample_rate = msg.sample_rate();
|
||||
if (msg.has_reverse_sample_rate()) {
|
||||
reverse_sample_rate = msg.reverse_sample_rate();
|
||||
}
|
||||
Init(msg.sample_rate(),
|
||||
reverse_sample_rate,
|
||||
msg.num_input_channels(),
|
||||
msg.num_output_channels(),
|
||||
msg.num_reverse_channels(),
|
||||
false);
|
||||
if (first_init) {
|
||||
// StartDebugRecording() writes an additional init message. Don't start
|
||||
// recording until after the first init to avoid the extra message.
|
||||
EXPECT_NOERR(apm_->StartDebugRecording(out_filename.c_str()));
|
||||
first_init = false;
|
||||
}
|
||||
|
||||
} else if (event_msg.type() == audioproc::Event::REVERSE_STREAM) {
|
||||
const audioproc::ReverseStream msg = event_msg.reverse_stream();
|
||||
|
||||
if (msg.channel_size() > 0) {
|
||||
ASSERT_EQ(revframe_->num_channels_, msg.channel_size());
|
||||
for (int i = 0; i < msg.channel_size(); ++i) {
|
||||
memcpy(revfloat_cb_->channel(i), msg.channel(i).data(),
|
||||
msg.channel(i).size());
|
||||
}
|
||||
} else {
|
||||
memcpy(revframe_->data_, msg.data().data(), msg.data().size());
|
||||
if (format == kFloatFormat) {
|
||||
// We're using an int16 input file; convert to float.
|
||||
ConvertToFloat(*revframe_, revfloat_cb_.get());
|
||||
}
|
||||
}
|
||||
AnalyzeReverseStreamChooser(format);
|
||||
|
||||
} else if (event_msg.type() == audioproc::Event::STREAM) {
|
||||
const audioproc::Stream msg = event_msg.stream();
|
||||
// ProcessStream could have changed this for the output frame.
|
||||
frame_->num_channels_ = apm_->num_input_channels();
|
||||
|
||||
EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(msg.level()));
|
||||
EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay()));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(msg.drift());
|
||||
if (msg.has_keypress()) {
|
||||
apm_->set_stream_key_pressed(msg.keypress());
|
||||
} else {
|
||||
apm_->set_stream_key_pressed(true);
|
||||
}
|
||||
|
||||
if (msg.input_channel_size() > 0) {
|
||||
ASSERT_EQ(frame_->num_channels_, msg.input_channel_size());
|
||||
for (int i = 0; i < msg.input_channel_size(); ++i) {
|
||||
memcpy(float_cb_->channel(i), msg.input_channel(i).data(),
|
||||
msg.input_channel(i).size());
|
||||
}
|
||||
} else {
|
||||
memcpy(frame_->data_, msg.input_data().data(), msg.input_data().size());
|
||||
if (format == kFloatFormat) {
|
||||
// We're using an int16 input file; convert to float.
|
||||
ConvertToFloat(*frame_, float_cb_.get());
|
||||
}
|
||||
}
|
||||
ProcessStreamChooser(format);
|
||||
}
|
||||
}
|
||||
EXPECT_NOERR(apm_->StopDebugRecording());
|
||||
fclose(in_file);
|
||||
}
|
||||
|
||||
void ApmTest::VerifyDebugDumpTest(Format format) {
|
||||
const std::string in_filename = test::ResourcePath("ref03", "aecdump");
|
||||
const std::string ref_filename = test::OutputPath() + "ref.aecdump";
|
||||
const std::string out_filename = test::OutputPath() + "out.aecdump";
|
||||
EnableAllComponents();
|
||||
ProcessDebugDump(in_filename, ref_filename, format);
|
||||
ProcessDebugDump(ref_filename, out_filename, format);
|
||||
|
||||
FILE* ref_file = fopen(ref_filename.c_str(), "rb");
|
||||
FILE* out_file = fopen(out_filename.c_str(), "rb");
|
||||
ASSERT_TRUE(ref_file != NULL);
|
||||
ASSERT_TRUE(out_file != NULL);
|
||||
scoped_ptr<uint8_t[]> ref_bytes;
|
||||
scoped_ptr<uint8_t[]> out_bytes;
|
||||
|
||||
size_t ref_size = ReadMessageBytesFromFile(ref_file, &ref_bytes);
|
||||
size_t out_size = ReadMessageBytesFromFile(out_file, &out_bytes);
|
||||
size_t bytes_read = 0;
|
||||
while (ref_size > 0 && out_size > 0) {
|
||||
bytes_read += ref_size;
|
||||
EXPECT_EQ(ref_size, out_size);
|
||||
EXPECT_EQ(0, memcmp(ref_bytes.get(), out_bytes.get(), ref_size));
|
||||
ref_size = ReadMessageBytesFromFile(ref_file, &ref_bytes);
|
||||
out_size = ReadMessageBytesFromFile(out_file, &out_bytes);
|
||||
}
|
||||
EXPECT_GT(bytes_read, 0u);
|
||||
EXPECT_NE(0, feof(ref_file));
|
||||
EXPECT_NE(0, feof(out_file));
|
||||
ASSERT_EQ(0, fclose(ref_file));
|
||||
ASSERT_EQ(0, fclose(out_file));
|
||||
}
|
||||
|
||||
TEST_F(ApmTest, VerifyDebugDumpInt) {
|
||||
VerifyDebugDumpTest(kIntFormat);
|
||||
}
|
||||
|
||||
TEST_F(ApmTest, VerifyDebugDumpFloat) {
|
||||
VerifyDebugDumpTest(kFloatFormat);
|
||||
}
|
||||
#endif
|
||||
|
||||
// TODO(andrew): expand test to verify output.
|
||||
TEST_F(ApmTest, DebugDump) {
|
||||
const std::string filename = test::OutputPath() + "debug.aec";
|
||||
@ -1530,7 +1615,7 @@ TEST_F(ApmTest, DebugDumpFromFileHandle) {
|
||||
|
||||
TEST_F(ApmTest, FloatAndIntInterfacesGiveIdenticalResults) {
|
||||
audioproc::OutputData ref_data;
|
||||
ReadMessageLiteFromFile(ref_filename_, &ref_data);
|
||||
OpenFileAndReadMessage(ref_filename_, &ref_data);
|
||||
|
||||
Config config;
|
||||
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
||||
@ -1551,8 +1636,8 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveIdenticalResults) {
|
||||
const int samples_per_channel = test->sample_rate() * kChunkSizeMs / 1000;
|
||||
const int output_length = samples_per_channel * num_output_channels;
|
||||
|
||||
Init(test->sample_rate(), num_render_channels, num_input_channels,
|
||||
num_output_channels, true);
|
||||
Init(test->sample_rate(), test->sample_rate(), num_input_channels,
|
||||
num_output_channels, num_render_channels, true);
|
||||
Init(fapm.get());
|
||||
|
||||
ChannelBuffer<int16_t> output_cb(samples_per_channel, num_input_channels);
|
||||
@ -1623,7 +1708,7 @@ TEST_F(ApmTest, DISABLED_ON_ANDROID(Process)) {
|
||||
audioproc::OutputData ref_data;
|
||||
|
||||
if (!write_ref_data) {
|
||||
ReadMessageLiteFromFile(ref_filename_, &ref_data);
|
||||
OpenFileAndReadMessage(ref_filename_, &ref_data);
|
||||
} else {
|
||||
// Write the desired tests to the protobuf reference file.
|
||||
for (size_t i = 0; i < kChannelsSize; i++) {
|
||||
@ -1650,8 +1735,8 @@ TEST_F(ApmTest, DISABLED_ON_ANDROID(Process)) {
|
||||
if (test->num_input_channels() != test->num_output_channels())
|
||||
continue;
|
||||
|
||||
Init(test->sample_rate(), test->num_reverse_channels(),
|
||||
test->num_input_channels(), test->num_output_channels(), true);
|
||||
Init(test->sample_rate(), test->sample_rate(), test->num_input_channels(),
|
||||
test->num_output_channels(), test->num_reverse_channels(), true);
|
||||
|
||||
int frame_count = 0;
|
||||
int has_echo_count = 0;
|
||||
@ -1791,7 +1876,7 @@ TEST_F(ApmTest, DISABLED_ON_ANDROID(Process)) {
|
||||
}
|
||||
|
||||
if (write_ref_data) {
|
||||
WriteMessageLiteToFile(ref_filename_, ref_data);
|
||||
OpenFileAndWriteMessage(ref_filename_, ref_data);
|
||||
}
|
||||
}
|
||||
#endif // WEBRTC_AUDIOPROC_BIT_EXACT
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
#include "webrtc/common.h"
|
||||
#include "webrtc/modules/audio_processing/include/audio_processing.h"
|
||||
#include "webrtc/modules/audio_processing/test/test_utils.h"
|
||||
#include "webrtc/modules/interface/module_common_types.h"
|
||||
#include "webrtc/system_wrappers/interface/cpu_features_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
@ -33,18 +34,7 @@
|
||||
#include "webrtc/audio_processing/debug.pb.h"
|
||||
#endif
|
||||
|
||||
using webrtc::AudioFrame;
|
||||
using webrtc::AudioProcessing;
|
||||
using webrtc::Config;
|
||||
using webrtc::DelayCorrection;
|
||||
using webrtc::EchoCancellation;
|
||||
using webrtc::GainControl;
|
||||
using webrtc::NoiseSuppression;
|
||||
using webrtc::scoped_array;
|
||||
using webrtc::scoped_ptr;
|
||||
using webrtc::TickInterval;
|
||||
using webrtc::TickTime;
|
||||
using webrtc::VoiceDetection;
|
||||
namespace webrtc {
|
||||
|
||||
using webrtc::audioproc::Event;
|
||||
using webrtc::audioproc::Init;
|
||||
@ -52,28 +42,6 @@ using webrtc::audioproc::ReverseStream;
|
||||
using webrtc::audioproc::Stream;
|
||||
|
||||
namespace {
|
||||
// Returns true on success, false on error or end-of-file.
|
||||
bool ReadMessageFromFile(FILE* file,
|
||||
::google::protobuf::MessageLite* msg) {
|
||||
// The "wire format" for the size is little-endian.
|
||||
// Assume process_test is running on a little-endian machine.
|
||||
int32_t size = 0;
|
||||
if (fread(&size, sizeof(int32_t), 1, file) != 1) {
|
||||
return false;
|
||||
}
|
||||
if (size <= 0) {
|
||||
return false;
|
||||
}
|
||||
const size_t usize = static_cast<size_t>(size);
|
||||
|
||||
scoped_array<char> array(new char[usize]);
|
||||
if (fread(array.get(), sizeof(char), usize, file) != usize) {
|
||||
return false;
|
||||
}
|
||||
|
||||
msg->Clear();
|
||||
return msg->ParseFromArray(array.get(), usize);
|
||||
}
|
||||
|
||||
void PrintStat(const AudioProcessing::Statistic& stat) {
|
||||
printf("%d, %d, %d\n", stat.average,
|
||||
@ -87,11 +55,11 @@ void usage() {
|
||||
" [-ir REVERSE_FILE] [-i PRIMARY_FILE] [-o OUT_FILE]\n");
|
||||
printf(
|
||||
"process_test is a test application for AudioProcessing.\n\n"
|
||||
"When a protobuf debug file is available, specify it with -pb.\n"
|
||||
"Alternately, when -ir or -i is used, the specified files will be\n"
|
||||
"processed directly in a simulation mode. Otherwise the full set of\n"
|
||||
"legacy test files is expected to be present in the working directory.\n");
|
||||
printf("\n");
|
||||
"When a protobuf debug file is available, specify it with -pb. Alternately,\n"
|
||||
"when -ir or -i is used, the specified files will be processed directly in\n"
|
||||
"a simulation mode. Otherwise the full set of legacy test files is expected\n"
|
||||
"to be present in the working directory. OUT_FILE should be specified\n"
|
||||
"without extension to support both int and float output.\n\n");
|
||||
printf("Options\n");
|
||||
printf("General configuration (only used for the simulation mode):\n");
|
||||
printf(" -fs SAMPLE_RATE_HZ\n");
|
||||
@ -174,13 +142,15 @@ void void_main(int argc, char* argv[]) {
|
||||
printf("Try `process_test --help' for more information.\n\n");
|
||||
}
|
||||
|
||||
scoped_ptr<AudioProcessing> apm(AudioProcessing::Create(0));
|
||||
Config config;
|
||||
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
||||
scoped_ptr<AudioProcessing> apm(AudioProcessing::Create(config));
|
||||
ASSERT_TRUE(apm.get() != NULL);
|
||||
|
||||
const char* pb_filename = NULL;
|
||||
const char* far_filename = NULL;
|
||||
const char* near_filename = NULL;
|
||||
const char* out_filename = NULL;
|
||||
std::string out_filename;
|
||||
const char* vad_out_filename = NULL;
|
||||
const char* ns_prob_filename = NULL;
|
||||
const char* aecm_echo_path_in_filename = NULL;
|
||||
@ -201,7 +171,6 @@ void void_main(int argc, char* argv[]) {
|
||||
bool progress = true;
|
||||
int extra_delay_ms = 0;
|
||||
int override_delay_ms = 0;
|
||||
//bool interleaved = true;
|
||||
|
||||
ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(true));
|
||||
for (int i = 1; i < argc; i++) {
|
||||
@ -224,7 +193,7 @@ void void_main(int argc, char* argv[]) {
|
||||
|
||||
} else if (strcmp(argv[i], "-o") == 0) {
|
||||
i++;
|
||||
ASSERT_LT(i, argc) << "Specify filename after -o";
|
||||
ASSERT_LT(i, argc) << "Specify filename without extension after -o";
|
||||
out_filename = argv[i];
|
||||
|
||||
} else if (strcmp(argv[i], "-fs") == 0) {
|
||||
@ -476,7 +445,6 @@ void void_main(int argc, char* argv[]) {
|
||||
const std::string out_path = webrtc::test::OutputPath();
|
||||
const char far_file_default[] = "apm_far.pcm";
|
||||
const char near_file_default[] = "apm_near.pcm";
|
||||
const std::string out_file_default = out_path + "out.pcm";
|
||||
const char event_filename[] = "apm_event.dat";
|
||||
const char delay_filename[] = "apm_delay.dat";
|
||||
const char drift_filename[] = "apm_drift.dat";
|
||||
@ -488,9 +456,11 @@ void void_main(int argc, char* argv[]) {
|
||||
near_filename = near_file_default;
|
||||
}
|
||||
|
||||
if (!out_filename) {
|
||||
out_filename = out_file_default.c_str();
|
||||
if (out_filename.size() == 0) {
|
||||
out_filename = out_path + "out";
|
||||
}
|
||||
std::string out_float_filename = out_filename + ".float";
|
||||
out_filename += ".pcm";
|
||||
|
||||
if (!vad_out_filename) {
|
||||
vad_out_filename = vad_file_default.c_str();
|
||||
@ -503,7 +473,6 @@ void void_main(int argc, char* argv[]) {
|
||||
FILE* pb_file = NULL;
|
||||
FILE* far_file = NULL;
|
||||
FILE* near_file = NULL;
|
||||
FILE* out_file = NULL;
|
||||
FILE* event_file = NULL;
|
||||
FILE* delay_file = NULL;
|
||||
FILE* drift_file = NULL;
|
||||
@ -513,38 +482,20 @@ void void_main(int argc, char* argv[]) {
|
||||
FILE* aecm_echo_path_out_file = NULL;
|
||||
|
||||
if (pb_filename) {
|
||||
pb_file = fopen(pb_filename, "rb");
|
||||
ASSERT_TRUE(NULL != pb_file) << "Unable to open protobuf file "
|
||||
<< pb_filename;
|
||||
pb_file = OpenFile(pb_filename, "rb");
|
||||
} else {
|
||||
if (far_filename) {
|
||||
far_file = fopen(far_filename, "rb");
|
||||
ASSERT_TRUE(NULL != far_file) << "Unable to open far-end audio file "
|
||||
<< far_filename;
|
||||
far_file = OpenFile(far_filename, "rb");
|
||||
}
|
||||
|
||||
near_file = fopen(near_filename, "rb");
|
||||
ASSERT_TRUE(NULL != near_file) << "Unable to open near-end audio file "
|
||||
<< near_filename;
|
||||
near_file = OpenFile(near_filename, "rb");
|
||||
if (!simulating) {
|
||||
event_file = fopen(event_filename, "rb");
|
||||
ASSERT_TRUE(NULL != event_file) << "Unable to open event file "
|
||||
<< event_filename;
|
||||
|
||||
delay_file = fopen(delay_filename, "rb");
|
||||
ASSERT_TRUE(NULL != delay_file) << "Unable to open buffer file "
|
||||
<< delay_filename;
|
||||
|
||||
drift_file = fopen(drift_filename, "rb");
|
||||
ASSERT_TRUE(NULL != drift_file) << "Unable to open drift file "
|
||||
<< drift_filename;
|
||||
event_file = OpenFile(event_filename, "rb");
|
||||
delay_file = OpenFile(delay_filename, "rb");
|
||||
drift_file = OpenFile(drift_filename, "rb");
|
||||
}
|
||||
}
|
||||
|
||||
out_file = fopen(out_filename, "wb");
|
||||
ASSERT_TRUE(NULL != out_file) << "Unable to open output audio file "
|
||||
<< out_filename;
|
||||
|
||||
int near_size_bytes = 0;
|
||||
if (pb_file) {
|
||||
struct stat st;
|
||||
@ -558,21 +509,15 @@ void void_main(int argc, char* argv[]) {
|
||||
}
|
||||
|
||||
if (apm->voice_detection()->is_enabled()) {
|
||||
vad_out_file = fopen(vad_out_filename, "wb");
|
||||
ASSERT_TRUE(NULL != vad_out_file) << "Unable to open VAD output file "
|
||||
<< vad_out_file;
|
||||
vad_out_file = OpenFile(vad_out_filename, "wb");
|
||||
}
|
||||
|
||||
if (apm->noise_suppression()->is_enabled()) {
|
||||
ns_prob_file = fopen(ns_prob_filename, "wb");
|
||||
ASSERT_TRUE(NULL != ns_prob_file) << "Unable to open NS output file "
|
||||
<< ns_prob_file;
|
||||
ns_prob_file = OpenFile(ns_prob_filename, "wb");
|
||||
}
|
||||
|
||||
if (aecm_echo_path_in_filename != NULL) {
|
||||
aecm_echo_path_in_file = fopen(aecm_echo_path_in_filename, "rb");
|
||||
ASSERT_TRUE(NULL != aecm_echo_path_in_file) << "Unable to open file "
|
||||
<< aecm_echo_path_in_filename;
|
||||
aecm_echo_path_in_file = OpenFile(aecm_echo_path_in_filename, "rb");
|
||||
|
||||
const size_t path_size =
|
||||
apm->echo_control_mobile()->echo_path_size_bytes();
|
||||
@ -589,9 +534,7 @@ void void_main(int argc, char* argv[]) {
|
||||
}
|
||||
|
||||
if (aecm_echo_path_out_filename != NULL) {
|
||||
aecm_echo_path_out_file = fopen(aecm_echo_path_out_filename, "wb");
|
||||
ASSERT_TRUE(NULL != aecm_echo_path_out_file) << "Unable to open file "
|
||||
<< aecm_echo_path_out_filename;
|
||||
aecm_echo_path_out_file = OpenFile(aecm_echo_path_out_filename, "wb");
|
||||
}
|
||||
|
||||
size_t read_count = 0;
|
||||
@ -620,6 +563,8 @@ void void_main(int argc, char* argv[]) {
|
||||
// but for now we want to share the variables.
|
||||
if (pb_file) {
|
||||
Event event_msg;
|
||||
scoped_ptr<ChannelBuffer<float> > reverse_cb;
|
||||
scoped_ptr<ChannelBuffer<float> > primary_cb;
|
||||
while (ReadMessageFromFile(pb_file, &event_msg)) {
|
||||
std::ostringstream trace_stream;
|
||||
trace_stream << "Processed frames: " << reverse_count << " (reverse), "
|
||||
@ -631,17 +576,22 @@ void void_main(int argc, char* argv[]) {
|
||||
const Init msg = event_msg.init();
|
||||
|
||||
ASSERT_TRUE(msg.has_sample_rate());
|
||||
// TODO(bjornv): Replace set_sample_rate_hz() when we have a smarter
|
||||
// AnalyzeReverseStream().
|
||||
ASSERT_EQ(apm->kNoError, apm->set_sample_rate_hz(msg.sample_rate()));
|
||||
ASSERT_TRUE(msg.has_num_input_channels());
|
||||
ASSERT_TRUE(msg.has_num_output_channels());
|
||||
ASSERT_TRUE(msg.has_num_reverse_channels());
|
||||
int reverse_sample_rate = msg.sample_rate();
|
||||
if (msg.has_reverse_sample_rate())
|
||||
reverse_sample_rate = msg.reverse_sample_rate();
|
||||
ASSERT_EQ(apm->kNoError, apm->Initialize(msg.sample_rate(),
|
||||
reverse_sample_rate,
|
||||
msg.num_input_channels(),
|
||||
msg.num_output_channels(),
|
||||
msg.num_reverse_channels()));
|
||||
ASSERT_TRUE(msg.has_device_sample_rate());
|
||||
ASSERT_EQ(apm->kNoError,
|
||||
apm->echo_cancellation()->set_device_sample_rate_hz(
|
||||
msg.device_sample_rate()));
|
||||
|
||||
ASSERT_TRUE(msg.has_num_input_channels());
|
||||
ASSERT_TRUE(msg.has_num_output_channels());
|
||||
ASSERT_TRUE(msg.has_num_reverse_channels());
|
||||
|
||||
samples_per_channel = msg.sample_rate() / 100;
|
||||
far_frame.sample_rate_hz_ = msg.sample_rate();
|
||||
@ -650,6 +600,10 @@ void void_main(int argc, char* argv[]) {
|
||||
near_frame.sample_rate_hz_ = msg.sample_rate();
|
||||
near_frame.samples_per_channel_ = samples_per_channel;
|
||||
near_frame.num_channels_ = msg.num_input_channels();
|
||||
reverse_cb.reset(new ChannelBuffer<float>(samples_per_channel,
|
||||
msg.num_reverse_channels()));
|
||||
primary_cb.reset(new ChannelBuffer<float>(samples_per_channel,
|
||||
msg.num_input_channels()));
|
||||
|
||||
if (verbose) {
|
||||
printf("Init at frame: %d (primary), %d (reverse)\n",
|
||||
@ -663,20 +617,35 @@ void void_main(int argc, char* argv[]) {
|
||||
|
||||
} else if (event_msg.type() == Event::REVERSE_STREAM) {
|
||||
ASSERT_TRUE(event_msg.has_reverse_stream());
|
||||
const ReverseStream msg = event_msg.reverse_stream();
|
||||
ReverseStream msg = event_msg.reverse_stream();
|
||||
reverse_count++;
|
||||
|
||||
ASSERT_TRUE(msg.has_data());
|
||||
ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
|
||||
far_frame.num_channels_, msg.data().size());
|
||||
memcpy(far_frame.data_, msg.data().data(), msg.data().size());
|
||||
ASSERT_TRUE(msg.has_data() ^ (msg.channel_size() > 0));
|
||||
if (msg.has_data()) {
|
||||
ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
|
||||
far_frame.num_channels_, msg.data().size());
|
||||
memcpy(far_frame.data_, msg.data().data(), msg.data().size());
|
||||
} else {
|
||||
for (int i = 0; i < msg.channel_size(); ++i) {
|
||||
reverse_cb->CopyFrom(msg.channel(i).data(), i);
|
||||
}
|
||||
}
|
||||
|
||||
if (perf_testing) {
|
||||
t0 = TickTime::Now();
|
||||
}
|
||||
|
||||
ASSERT_EQ(apm->kNoError,
|
||||
apm->AnalyzeReverseStream(&far_frame));
|
||||
if (msg.has_data()) {
|
||||
ASSERT_EQ(apm->kNoError,
|
||||
apm->AnalyzeReverseStream(&far_frame));
|
||||
} else {
|
||||
ASSERT_EQ(apm->kNoError,
|
||||
apm->AnalyzeReverseStream(
|
||||
reverse_cb->channels(),
|
||||
far_frame.samples_per_channel_,
|
||||
far_frame.sample_rate_hz_,
|
||||
LayoutFromChannels(far_frame.num_channels_)));
|
||||
}
|
||||
|
||||
if (perf_testing) {
|
||||
t1 = TickTime::Now();
|
||||
@ -698,12 +667,18 @@ void void_main(int argc, char* argv[]) {
|
||||
// ProcessStream could have changed this for the output frame.
|
||||
near_frame.num_channels_ = apm->num_input_channels();
|
||||
|
||||
ASSERT_TRUE(msg.has_input_data());
|
||||
ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
|
||||
near_frame.num_channels_, msg.input_data().size());
|
||||
memcpy(near_frame.data_,
|
||||
msg.input_data().data(),
|
||||
msg.input_data().size());
|
||||
ASSERT_TRUE(msg.has_input_data() ^ (msg.input_channel_size() > 0));
|
||||
if (msg.has_input_data()) {
|
||||
ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
|
||||
near_frame.num_channels_, msg.input_data().size());
|
||||
memcpy(near_frame.data_,
|
||||
msg.input_data().data(),
|
||||
msg.input_data().size());
|
||||
} else {
|
||||
for (int i = 0; i < msg.input_channel_size(); ++i) {
|
||||
primary_cb->CopyFrom(msg.input_channel(i).data(), i);
|
||||
}
|
||||
}
|
||||
|
||||
near_read_bytes += msg.input_data().size();
|
||||
if (progress && primary_count % 100 == 0) {
|
||||
@ -732,13 +707,24 @@ void void_main(int argc, char* argv[]) {
|
||||
apm->set_stream_key_pressed(true);
|
||||
}
|
||||
|
||||
int err = apm->ProcessStream(&near_frame);
|
||||
int err = apm->kNoError;
|
||||
if (msg.has_input_data()) {
|
||||
err = apm->ProcessStream(&near_frame);
|
||||
ASSERT_TRUE(near_frame.num_channels_ == apm->num_output_channels());
|
||||
} else {
|
||||
err = apm->ProcessStream(
|
||||
primary_cb->channels(),
|
||||
near_frame.samples_per_channel_,
|
||||
near_frame.sample_rate_hz_,
|
||||
LayoutFromChannels(near_frame.num_channels_),
|
||||
LayoutFromChannels(apm->num_output_channels()));
|
||||
}
|
||||
|
||||
if (err == apm->kBadStreamParameterWarning) {
|
||||
printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
|
||||
}
|
||||
ASSERT_TRUE(err == apm->kNoError ||
|
||||
err == apm->kBadStreamParameterWarning);
|
||||
ASSERT_TRUE(near_frame.num_channels_ == apm->num_output_channels());
|
||||
|
||||
stream_has_voice =
|
||||
static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
|
||||
@ -769,11 +755,20 @@ void void_main(int argc, char* argv[]) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t size = samples_per_channel * near_frame.num_channels_;
|
||||
ASSERT_EQ(size, fwrite(near_frame.data_,
|
||||
sizeof(int16_t),
|
||||
size,
|
||||
out_file));
|
||||
size_t num_samples = samples_per_channel * apm->num_output_channels();
|
||||
if (msg.has_input_data()) {
|
||||
static FILE* out_file = OpenFile(out_filename, "wb");
|
||||
ASSERT_EQ(num_samples, fwrite(near_frame.data_,
|
||||
sizeof(*near_frame.data_),
|
||||
num_samples,
|
||||
out_file));
|
||||
} else {
|
||||
static FILE* out_float_file = OpenFile(out_float_filename, "wb");
|
||||
ASSERT_EQ(num_samples, fwrite(primary_cb->data(),
|
||||
sizeof(*primary_cb->data()),
|
||||
num_samples,
|
||||
out_float_file));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -986,6 +981,7 @@ void void_main(int argc, char* argv[]) {
|
||||
}
|
||||
|
||||
size = samples_per_channel * near_frame.num_channels_;
|
||||
static FILE* out_file = OpenFile(out_filename, "wb");
|
||||
ASSERT_EQ(size, fwrite(near_frame.data_,
|
||||
sizeof(int16_t),
|
||||
size,
|
||||
@ -1079,11 +1075,13 @@ void void_main(int argc, char* argv[]) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace webrtc
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
void_main(argc, argv);
|
||||
webrtc::void_main(argc, argv);
|
||||
|
||||
// Optional, but removes memory leak noise from Valgrind.
|
||||
google::protobuf::ShutdownProtobufLibrary();
|
||||
|
@ -8,14 +8,112 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/audio_processing/debug.pb.h"
|
||||
#include "webrtc/modules/audio_processing/include/audio_processing.h"
|
||||
#include "webrtc/modules/interface/module_common_types.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
static const AudioProcessing::Error kNoErr = AudioProcessing::kNoError;
|
||||
#define EXPECT_NOERR(expr) EXPECT_EQ(kNoErr, (expr))
|
||||
|
||||
static const int kChunkSizeMs = 10;
|
||||
static const webrtc::AudioProcessing::Error kNoErr =
|
||||
webrtc::AudioProcessing::kNoError;
|
||||
|
||||
static void SetFrameSampleRate(webrtc::AudioFrame* frame, int sample_rate_hz) {
|
||||
// Helper to encapsulate a contiguous data buffer with access to a pointer
|
||||
// array of the deinterleaved channels.
|
||||
template <typename T>
|
||||
class ChannelBuffer {
|
||||
public:
|
||||
ChannelBuffer(int samples_per_channel, int num_channels)
|
||||
: data_(new T[samples_per_channel * num_channels]),
|
||||
channels_(new T*[num_channels]),
|
||||
samples_per_channel_(samples_per_channel) {
|
||||
memset(data_.get(), 0, sizeof(T) * samples_per_channel * num_channels);
|
||||
for (int i = 0; i < num_channels; ++i)
|
||||
channels_[i] = &data_[i * samples_per_channel];
|
||||
}
|
||||
~ChannelBuffer() {}
|
||||
|
||||
void CopyFrom(const void* channel_ptr, int index) {
|
||||
memcpy(channels_[index], channel_ptr, samples_per_channel_ * sizeof(T));
|
||||
}
|
||||
|
||||
T* data() { return data_.get(); }
|
||||
T* channel(int index) { return channels_[index]; }
|
||||
T** channels() { return channels_.get(); }
|
||||
|
||||
private:
|
||||
scoped_ptr<T[]> data_;
|
||||
scoped_ptr<T*[]> channels_;
|
||||
int samples_per_channel_;
|
||||
};
|
||||
|
||||
// Exits on failure; do not use in unit tests.
|
||||
static inline FILE* OpenFile(const std::string& filename, const char* mode) {
|
||||
FILE* file = fopen(filename.c_str(), mode);
|
||||
if (!file) {
|
||||
printf("Unable to open file %s\n", filename.c_str());
|
||||
exit(1);
|
||||
}
|
||||
return file;
|
||||
}
|
||||
|
||||
static inline void SetFrameSampleRate(AudioFrame* frame,
|
||||
int sample_rate_hz) {
|
||||
frame->sample_rate_hz_ = sample_rate_hz;
|
||||
frame->samples_per_channel_ = kChunkSizeMs * sample_rate_hz / 1000;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void SetContainerFormat(int sample_rate_hz,
|
||||
int num_channels,
|
||||
AudioFrame* frame,
|
||||
scoped_ptr<ChannelBuffer<T> >* cb) {
|
||||
SetFrameSampleRate(frame, sample_rate_hz);
|
||||
frame->num_channels_ = num_channels;
|
||||
cb->reset(new ChannelBuffer<T>(frame->samples_per_channel_, num_channels));
|
||||
}
|
||||
|
||||
static inline AudioProcessing::ChannelLayout LayoutFromChannels(
|
||||
int num_channels) {
|
||||
switch (num_channels) {
|
||||
case 1:
|
||||
return AudioProcessing::kMono;
|
||||
case 2:
|
||||
return AudioProcessing::kStereo;
|
||||
default:
|
||||
assert(false);
|
||||
return AudioProcessing::kMono;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocates new memory in the scoped_ptr to fit the raw message and returns the
|
||||
// number of bytes read.
|
||||
static inline size_t ReadMessageBytesFromFile(FILE* file,
|
||||
scoped_ptr<uint8_t[]>* bytes) {
|
||||
// The "wire format" for the size is little-endian. Assume we're running on
|
||||
// a little-endian machine.
|
||||
int32_t size = 0;
|
||||
if (fread(&size, sizeof(size), 1, file) != 1)
|
||||
return 0;
|
||||
if (size <= 0)
|
||||
return 0;
|
||||
|
||||
bytes->reset(new uint8_t[size]);
|
||||
return fread(bytes->get(), sizeof((*bytes)[0]), size, file);
|
||||
}
|
||||
|
||||
// Returns true on success, false on error or end-of-file.
|
||||
static inline bool ReadMessageFromFile(FILE* file,
|
||||
::google::protobuf::MessageLite* msg) {
|
||||
scoped_ptr<uint8_t[]> bytes;
|
||||
size_t size = ReadMessageBytesFromFile(file, &bytes);
|
||||
if (!size)
|
||||
return false;
|
||||
|
||||
msg->Clear();
|
||||
return msg->ParseFromArray(bytes.get(), size);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
@ -17,22 +17,22 @@
|
||||
|
||||
#include "gflags/gflags.h"
|
||||
#include "webrtc/audio_processing/debug.pb.h"
|
||||
#include "webrtc/modules/audio_processing/test/test_utils.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
|
||||
using webrtc::scoped_array;
|
||||
|
||||
using webrtc::audioproc::Event;
|
||||
using webrtc::audioproc::ReverseStream;
|
||||
using webrtc::audioproc::Stream;
|
||||
using webrtc::audioproc::Init;
|
||||
|
||||
// TODO(andrew): unpack more of the data.
|
||||
DEFINE_string(input_file, "input.pcm", "The name of the input stream file.");
|
||||
DEFINE_string(float_input_file, "input.float",
|
||||
"The name of the float input stream file.");
|
||||
DEFINE_string(output_file, "ref_out.pcm",
|
||||
"The name of the reference output stream file.");
|
||||
DEFINE_string(float_output_file, "ref_out.float",
|
||||
"The name of the float reference output stream file.");
|
||||
DEFINE_string(reverse_file, "reverse.pcm",
|
||||
"The name of the reverse input stream file.");
|
||||
DEFINE_string(float_reverse_file, "reverse.float",
|
||||
"The name of the float reverse input stream file.");
|
||||
DEFINE_string(delay_file, "delay.int32", "The name of the delay file.");
|
||||
DEFINE_string(drift_file, "drift.int32", "The name of the drift file.");
|
||||
DEFINE_string(level_file, "level.int32", "The name of the level file.");
|
||||
@ -41,31 +41,22 @@ DEFINE_string(settings_file, "settings.txt", "The name of the settings file.");
|
||||
DEFINE_bool(full, false,
|
||||
"Unpack the full set of files (normally not needed).");
|
||||
|
||||
// TODO(andrew): move this to a helper class to share with process_test.cc?
|
||||
// Returns true on success, false on error or end-of-file.
|
||||
bool ReadMessageFromFile(FILE* file,
|
||||
::google::protobuf::MessageLite* msg) {
|
||||
// The "wire format" for the size is little-endian.
|
||||
// Assume process_test is running on a little-endian machine.
|
||||
int32_t size = 0;
|
||||
if (fread(&size, sizeof(int32_t), 1, file) != 1) {
|
||||
return false;
|
||||
}
|
||||
if (size <= 0) {
|
||||
return false;
|
||||
}
|
||||
const size_t usize = static_cast<size_t>(size);
|
||||
namespace webrtc {
|
||||
|
||||
scoped_array<char> array(new char[usize]);
|
||||
if (fread(array.get(), sizeof(char), usize, file) != usize) {
|
||||
return false;
|
||||
}
|
||||
using audioproc::Event;
|
||||
using audioproc::ReverseStream;
|
||||
using audioproc::Stream;
|
||||
using audioproc::Init;
|
||||
|
||||
msg->Clear();
|
||||
return msg->ParseFromArray(array.get(), usize);
|
||||
void WriteData(const void* data, size_t size, FILE* file,
|
||||
const std::string& filename) {
|
||||
if (fwrite(data, size, 1, file) != 1) {
|
||||
printf("Error when writing to %s\n", filename.c_str());
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
int do_main(int argc, char* argv[]) {
|
||||
std::string program_name = argv[0];
|
||||
std::string usage = "Commandline tool to unpack audioproc debug files.\n"
|
||||
"Example usage:\n" + program_name + " debug_dump.pb\n";
|
||||
@ -77,139 +68,99 @@ int main(int argc, char* argv[]) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
FILE* debug_file = fopen(argv[1], "rb");
|
||||
if (debug_file == NULL) {
|
||||
printf("Unable to open %s\n", argv[1]);
|
||||
return 1;
|
||||
}
|
||||
FILE* input_file = fopen(FLAGS_input_file.c_str(), "wb");
|
||||
if (input_file == NULL) {
|
||||
printf("Unable to open %s\n", FLAGS_input_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
FILE* output_file = fopen(FLAGS_output_file.c_str(), "wb");
|
||||
if (output_file == NULL) {
|
||||
printf("Unable to open %s\n", FLAGS_output_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
FILE* reverse_file = fopen(FLAGS_reverse_file.c_str(), "wb");
|
||||
if (reverse_file == NULL) {
|
||||
printf("Unable to open %s\n", FLAGS_reverse_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
FILE* settings_file = fopen(FLAGS_settings_file.c_str(), "wb");
|
||||
if (settings_file == NULL) {
|
||||
printf("Unable to open %s\n", FLAGS_settings_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
|
||||
FILE* delay_file = NULL;
|
||||
FILE* drift_file = NULL;
|
||||
FILE* level_file = NULL;
|
||||
FILE* keypress_file = NULL;
|
||||
if (FLAGS_full) {
|
||||
delay_file = fopen(FLAGS_delay_file.c_str(), "wb");
|
||||
if (delay_file == NULL) {
|
||||
printf("Unable to open %s\n", FLAGS_delay_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
drift_file = fopen(FLAGS_drift_file.c_str(), "wb");
|
||||
if (drift_file == NULL) {
|
||||
printf("Unable to open %s\n", FLAGS_drift_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
level_file = fopen(FLAGS_level_file.c_str(), "wb");
|
||||
if (level_file == NULL) {
|
||||
printf("Unable to open %s\n", FLAGS_level_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
keypress_file = fopen(FLAGS_keypress_file.c_str(), "wb");
|
||||
if (keypress_file == NULL) {
|
||||
printf("Unable to open %s\n", FLAGS_keypress_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
FILE* debug_file = OpenFile(argv[1], "rb");
|
||||
|
||||
Event event_msg;
|
||||
int frame_count = 0;
|
||||
while (ReadMessageFromFile(debug_file, &event_msg)) {
|
||||
while (ReadMessageFromFile(debug_file, &event_msg)) {
|
||||
if (event_msg.type() == Event::REVERSE_STREAM) {
|
||||
if (!event_msg.has_reverse_stream()) {
|
||||
printf("Corrupted input file: ReverseStream missing.\n");
|
||||
printf("Corrupt input file: ReverseStream missing.\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
const ReverseStream msg = event_msg.reverse_stream();
|
||||
if (msg.has_data()) {
|
||||
if (fwrite(msg.data().data(), msg.data().size(), 1, reverse_file) !=
|
||||
1) {
|
||||
printf("Error when writing to %s\n", FLAGS_reverse_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
static FILE* reverse_file = OpenFile(FLAGS_reverse_file, "wb");
|
||||
WriteData(msg.data().data(), msg.data().size(), reverse_file,
|
||||
FLAGS_reverse_file);
|
||||
|
||||
} else if (msg.channel_size() > 0) {
|
||||
static FILE* float_reverse_file = OpenFile(FLAGS_float_reverse_file,
|
||||
"wb");
|
||||
// TODO(ajm): Interleave multiple channels.
|
||||
assert(msg.channel_size() == 1);
|
||||
WriteData(msg.channel(0).data(), msg.channel(0).size(),
|
||||
float_reverse_file, FLAGS_reverse_file);
|
||||
}
|
||||
} else if (event_msg.type() == Event::STREAM) {
|
||||
frame_count++;
|
||||
if (!event_msg.has_stream()) {
|
||||
printf("Corrupted input file: Stream missing.\n");
|
||||
printf("Corrupt input file: Stream missing.\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
const Stream msg = event_msg.stream();
|
||||
if (msg.has_input_data()) {
|
||||
if (fwrite(msg.input_data().data(), msg.input_data().size(), 1,
|
||||
input_file) != 1) {
|
||||
printf("Error when writing to %s\n", FLAGS_input_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
static FILE* input_file = OpenFile(FLAGS_input_file, "wb");
|
||||
WriteData(msg.input_data().data(), msg.input_data().size(),
|
||||
input_file, FLAGS_input_file);
|
||||
|
||||
} else if (msg.input_channel_size() > 0) {
|
||||
static FILE* float_input_file = OpenFile(FLAGS_float_input_file, "wb");
|
||||
// TODO(ajm): Interleave multiple channels.
|
||||
assert(msg.input_channel_size() == 1);
|
||||
WriteData(msg.input_channel(0).data(), msg.input_channel(0).size(),
|
||||
float_input_file, FLAGS_float_input_file);
|
||||
}
|
||||
|
||||
if (msg.has_output_data()) {
|
||||
if (fwrite(msg.output_data().data(), msg.output_data().size(), 1,
|
||||
output_file) != 1) {
|
||||
printf("Error when writing to %s\n", FLAGS_output_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
static FILE* output_file = OpenFile(FLAGS_output_file, "wb");
|
||||
WriteData(msg.output_data().data(), msg.output_data().size(),
|
||||
output_file, FLAGS_output_file);
|
||||
|
||||
} else if (msg.output_channel_size() > 0) {
|
||||
static FILE* float_output_file = OpenFile(FLAGS_float_output_file,
|
||||
"wb");
|
||||
// TODO(ajm): Interleave multiple channels.
|
||||
assert(msg.output_channel_size() == 1);
|
||||
WriteData(msg.output_channel(0).data(), msg.output_channel(0).size(),
|
||||
float_output_file, FLAGS_float_output_file);
|
||||
}
|
||||
|
||||
if (FLAGS_full) {
|
||||
if (msg.has_delay()) {
|
||||
static FILE* delay_file = OpenFile(FLAGS_delay_file, "wb");
|
||||
int32_t delay = msg.delay();
|
||||
if (fwrite(&delay, sizeof(int32_t), 1, delay_file) != 1) {
|
||||
printf("Error when writing to %s\n", FLAGS_delay_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
WriteData(&delay, sizeof(delay), delay_file, FLAGS_delay_file);
|
||||
}
|
||||
|
||||
if (msg.has_drift()) {
|
||||
static FILE* drift_file = OpenFile(FLAGS_drift_file, "wb");
|
||||
int32_t drift = msg.drift();
|
||||
if (fwrite(&drift, sizeof(int32_t), 1, drift_file) != 1) {
|
||||
printf("Error when writing to %s\n", FLAGS_drift_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
WriteData(&drift, sizeof(drift), drift_file, FLAGS_drift_file);
|
||||
}
|
||||
|
||||
if (msg.has_level()) {
|
||||
static FILE* level_file = OpenFile(FLAGS_level_file, "wb");
|
||||
int32_t level = msg.level();
|
||||
if (fwrite(&level, sizeof(int32_t), 1, level_file) != 1) {
|
||||
printf("Error when writing to %s\n", FLAGS_level_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
WriteData(&level, sizeof(level), level_file, FLAGS_level_file);
|
||||
}
|
||||
|
||||
if (msg.has_keypress()) {
|
||||
static FILE* keypress_file = OpenFile(FLAGS_keypress_file, "wb");
|
||||
bool keypress = msg.keypress();
|
||||
if (fwrite(&keypress, sizeof(bool), 1, keypress_file) != 1) {
|
||||
printf("Error when writing to %s\n", FLAGS_keypress_file.c_str());
|
||||
return 1;
|
||||
}
|
||||
WriteData(&keypress, sizeof(keypress), keypress_file,
|
||||
FLAGS_keypress_file);
|
||||
}
|
||||
}
|
||||
} else if (event_msg.type() == Event::INIT) {
|
||||
if (!event_msg.has_init()) {
|
||||
printf("Corrupted input file: Init missing.\n");
|
||||
printf("Corrupt input file: Init missing.\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
static FILE* settings_file = OpenFile(FLAGS_settings_file, "wb");
|
||||
const Init msg = event_msg.init();
|
||||
// These should print out zeros if they're missing.
|
||||
fprintf(settings_file, "Init at frame: %d\n", frame_count);
|
||||
@ -229,3 +180,9 @@ int main(int argc, char* argv[]) {
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
return webrtc::do_main(argc, argv);
|
||||
}
|
||||
|
@ -47,6 +47,7 @@
|
||||
'../../resources/near16_stereo.pcm',
|
||||
'../../resources/near32_stereo.pcm',
|
||||
'../../resources/near8_stereo.pcm',
|
||||
'../../resources/ref03.aecdump',
|
||||
'../../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke1_0_AST.bin',
|
||||
'../../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke1_0_TOF.bin',
|
||||
'../../resources/remote_bitrate_estimator/VideoSendersTest_BweTest_IncreasingChoke1_1_AST.bin',
|
||||
|
Loading…
x
Reference in New Issue
Block a user