Add a simple AudioConverter class.
This will be used to refactor AudioProcessing/AudioBuffer. We can enable alternate downmixing schemes in AudioProcessing by pulling the conversion logic out of AudioBuffer. The unit test is largely stolen from voice_engine/utility_unittest.cc. As commented, the voice_engine routines should be replaced with AudioConverter. BUG=chromium:405270 R=aluebs@webrtc.org, mgraczyk@chromium.org TBR=kwiberg Review URL: https://webrtc-codereview.appspot.com/30779004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@7538 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
33a0e2d7ef
commit
aada86b261
@ -19,6 +19,8 @@ config("common_audio_config") {
|
||||
|
||||
source_set("common_audio") {
|
||||
sources = [
|
||||
"audio_converter.cc",
|
||||
"audio_converter.h",
|
||||
"audio_util.cc",
|
||||
"blocker.cc",
|
||||
"blocker.h",
|
||||
|
104
webrtc/common_audio/audio_converter.cc
Normal file
104
webrtc/common_audio/audio_converter.cc
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/common_audio/audio_converter.h"
|
||||
#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
|
||||
void DownmixToMono(const float* const* src,
|
||||
int src_channels,
|
||||
int frames,
|
||||
float* dst) {
|
||||
DCHECK_GT(src_channels, 0);
|
||||
for (int i = 0; i < frames; ++i) {
|
||||
float sum = 0;
|
||||
for (int j = 0; j < src_channels; ++j)
|
||||
sum += src[j][i];
|
||||
dst[i] = sum / src_channels;
|
||||
}
|
||||
}
|
||||
|
||||
void UpmixFromMono(const float* src,
|
||||
int dst_channels,
|
||||
int frames,
|
||||
float* const* dst) {
|
||||
DCHECK_GT(dst_channels, 0);
|
||||
for (int i = 0; i < frames; ++i) {
|
||||
float value = src[i];
|
||||
for (int j = 0; j < dst_channels; ++j)
|
||||
dst[j][i] = value;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
AudioConverter::AudioConverter(int src_channels, int src_frames,
|
||||
int dst_channels, int dst_frames) {
|
||||
CHECK(dst_channels == src_channels || dst_channels == 1 || src_channels == 1);
|
||||
const int resample_channels = src_channels < dst_channels ? src_channels :
|
||||
dst_channels;
|
||||
|
||||
// Prepare buffers as needed for intermediate stages.
|
||||
if (dst_channels < src_channels)
|
||||
downmix_buffer_.reset(new ChannelBuffer<float>(src_frames,
|
||||
resample_channels));
|
||||
|
||||
if (src_frames != dst_frames) {
|
||||
resamplers_.reserve(resample_channels);
|
||||
for (int i = 0; i < resample_channels; ++i)
|
||||
resamplers_.push_back(new PushSincResampler(src_frames, dst_frames));
|
||||
}
|
||||
}
|
||||
|
||||
void AudioConverter::Convert(const float* const* src,
|
||||
int src_channels,
|
||||
int src_frames,
|
||||
int dst_channels,
|
||||
int dst_frames,
|
||||
float* const* dst) {
|
||||
DCHECK(dst_channels == src_channels || dst_channels == 1 ||
|
||||
src_channels == 1);
|
||||
if (src_channels == dst_channels && src_frames == dst_frames) {
|
||||
// Shortcut copy.
|
||||
if (src != dst) {
|
||||
for (int i = 0; i < src_channels; ++i)
|
||||
memcpy(dst[i], src[i], dst_frames * sizeof(*dst[i]));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const float* const* src_ptr = src;
|
||||
if (dst_channels < src_channels) {
|
||||
float* const* dst_ptr = dst;
|
||||
if (src_frames != dst_frames) {
|
||||
// Downmix to a buffer for subsequent resampling.
|
||||
DCHECK_EQ(downmix_buffer_->num_channels(), dst_channels);
|
||||
DCHECK_EQ(downmix_buffer_->samples_per_channel(), src_frames);
|
||||
dst_ptr = downmix_buffer_->channels();
|
||||
}
|
||||
|
||||
DownmixToMono(src, src_channels, src_frames, dst_ptr[0]);
|
||||
src_ptr = dst_ptr;
|
||||
}
|
||||
|
||||
if (src_frames != dst_frames) {
|
||||
for (size_t i = 0; i < resamplers_.size(); ++i)
|
||||
resamplers_[i]->Resample(src_ptr[i], src_frames, dst[i], dst_frames);
|
||||
src_ptr = dst;
|
||||
}
|
||||
|
||||
if (dst_channels > src_channels)
|
||||
UpmixFromMono(src_ptr[0], dst_channels, dst_frames, dst);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
51
webrtc/common_audio/audio_converter.h
Normal file
51
webrtc/common_audio/audio_converter.h
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_COMMON_AUDIO_AUDIO_CONVERTER_H_
|
||||
#define WEBRTC_COMMON_AUDIO_AUDIO_CONVERTER_H_
|
||||
|
||||
// TODO(ajm): Move channel buffer to common_audio.
|
||||
#include "webrtc/base/constructormagic.h"
|
||||
#include "webrtc/modules/audio_processing/common.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_vector.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class PushSincResampler;
|
||||
|
||||
// Format conversion (remixing and resampling) for audio. Only simple remixing
|
||||
// conversions are supported: downmix to mono (i.e. |dst_channels| == 1) or
|
||||
// upmix from mono (i.e. |src_channels == 1|).
|
||||
//
|
||||
// The source and destination chunks have the same duration in time; specifying
|
||||
// the number of frames is equivalent to specifying the sample rates.
|
||||
class AudioConverter {
|
||||
public:
|
||||
AudioConverter(int src_channels, int src_frames,
|
||||
int dst_channels, int dst_frames);
|
||||
|
||||
void Convert(const float* const* src,
|
||||
int src_channels,
|
||||
int src_frames,
|
||||
int dst_channels,
|
||||
int dst_frames,
|
||||
float* const* dest);
|
||||
|
||||
private:
|
||||
scoped_ptr<ChannelBuffer<float>> downmix_buffer_;
|
||||
ScopedVector<PushSincResampler> resamplers_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(AudioConverter);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_COMMON_AUDIO_AUDIO_CONVERTER_H_
|
155
webrtc/common_audio/audio_converter_unittest.cc
Normal file
155
webrtc/common_audio/audio_converter_unittest.cc
Normal file
@ -0,0 +1,155 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/common_audio/audio_converter.h"
|
||||
#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
|
||||
#include "webrtc/modules/audio_processing/common.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
typedef scoped_ptr<ChannelBuffer<float>> ScopedBuffer;
|
||||
|
||||
// Sets the signal value to increase by |data| with every sample.
|
||||
ScopedBuffer CreateBuffer(const std::vector<float>& data, int frames) {
|
||||
const int num_channels = static_cast<int>(data.size());
|
||||
ScopedBuffer sb(new ChannelBuffer<float>(frames, num_channels));
|
||||
for (int i = 0; i < num_channels; ++i)
|
||||
for (int j = 0; j < frames; ++j)
|
||||
sb->channel(i)[j] = data[i] * j;
|
||||
return sb;
|
||||
}
|
||||
|
||||
void VerifyParams(const ChannelBuffer<float>& ref,
|
||||
const ChannelBuffer<float>& test) {
|
||||
EXPECT_EQ(ref.num_channels(), test.num_channels());
|
||||
EXPECT_EQ(ref.samples_per_channel(), test.samples_per_channel());
|
||||
}
|
||||
|
||||
// Computes the best SNR based on the error between |ref_frame| and
|
||||
// |test_frame|. It searches around |expected_delay| in samples between the
|
||||
// signals to compensate for the resampling delay.
|
||||
float ComputeSNR(const ChannelBuffer<float>& ref,
|
||||
const ChannelBuffer<float>& test,
|
||||
int expected_delay) {
|
||||
VerifyParams(ref, test);
|
||||
float best_snr = 0;
|
||||
int best_delay = 0;
|
||||
|
||||
// Search within one sample of the expected delay.
|
||||
for (int delay = std::max(expected_delay - 1, 0);
|
||||
delay <= std::min(expected_delay + 1, ref.samples_per_channel());
|
||||
++delay) {
|
||||
float mse = 0;
|
||||
float variance = 0;
|
||||
float mean = 0;
|
||||
for (int i = 0; i < ref.num_channels(); ++i) {
|
||||
for (int j = 0; j < ref.samples_per_channel() - delay; ++j) {
|
||||
float error = ref.channel(i)[j] - test.channel(i)[j + delay];
|
||||
mse += error * error;
|
||||
variance += ref.channel(i)[j] * ref.channel(i)[j];
|
||||
mean += ref.channel(i)[j];
|
||||
}
|
||||
}
|
||||
const int length = ref.num_channels() * (ref.samples_per_channel() - delay);
|
||||
mse /= length;
|
||||
variance /= length;
|
||||
mean /= length;
|
||||
variance -= mean * mean;
|
||||
float snr = 100; // We assign 100 dB to the zero-error case.
|
||||
if (mse > 0)
|
||||
snr = 10 * log10(variance / mse);
|
||||
if (snr > best_snr) {
|
||||
best_snr = snr;
|
||||
best_delay = delay;
|
||||
}
|
||||
}
|
||||
printf("SNR=%.1f dB at delay=%d\n", best_snr, best_delay);
|
||||
return best_snr;
|
||||
}
|
||||
|
||||
// Sets the source to a linearly increasing signal for which we can easily
|
||||
// generate a reference. Runs the AudioConverter and ensures the output has
|
||||
// sufficiently high SNR relative to the reference.
|
||||
void RunAudioConverterTest(int src_channels,
|
||||
int src_sample_rate_hz,
|
||||
int dst_channels,
|
||||
int dst_sample_rate_hz) {
|
||||
const float kSrcLeft = 0.0002f;
|
||||
const float kSrcRight = 0.0001f;
|
||||
const float resampling_factor = (1.f * src_sample_rate_hz) /
|
||||
dst_sample_rate_hz;
|
||||
const float dst_left = resampling_factor * kSrcLeft;
|
||||
const float dst_right = resampling_factor * kSrcRight;
|
||||
const float dst_mono = (dst_left + dst_right) / 2;
|
||||
const int src_frames = src_sample_rate_hz / 100;
|
||||
const int dst_frames = dst_sample_rate_hz / 100;
|
||||
|
||||
std::vector<float> src_data(1, kSrcLeft);
|
||||
if (src_channels == 2)
|
||||
src_data.push_back(kSrcRight);
|
||||
ScopedBuffer src_buffer = CreateBuffer(src_data, src_frames);
|
||||
|
||||
std::vector<float> dst_data(1, 0);
|
||||
std::vector<float> ref_data;
|
||||
if (dst_channels == 1) {
|
||||
if (src_channels == 1)
|
||||
ref_data.push_back(dst_left);
|
||||
else
|
||||
ref_data.push_back(dst_mono);
|
||||
} else {
|
||||
dst_data.push_back(0);
|
||||
ref_data.push_back(dst_left);
|
||||
if (src_channels == 1)
|
||||
ref_data.push_back(dst_left);
|
||||
else
|
||||
ref_data.push_back(dst_right);
|
||||
}
|
||||
ScopedBuffer dst_buffer = CreateBuffer(dst_data, dst_frames);
|
||||
ScopedBuffer ref_buffer = CreateBuffer(ref_data, dst_frames);
|
||||
|
||||
// The sinc resampler has a known delay, which we compute here.
|
||||
const int delay_frames = src_sample_rate_hz == dst_sample_rate_hz ? 0 :
|
||||
PushSincResampler::AlgorithmicDelaySeconds(src_sample_rate_hz) *
|
||||
dst_sample_rate_hz;
|
||||
printf("(%d, %d Hz) -> (%d, %d Hz) ", // SNR reported on the same line later.
|
||||
src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
|
||||
|
||||
AudioConverter converter(src_channels, src_frames, dst_channels, dst_frames);
|
||||
converter.Convert(src_buffer->channels(), src_channels, src_frames,
|
||||
dst_channels, dst_frames, dst_buffer->channels());
|
||||
|
||||
EXPECT_LT(43.f,
|
||||
ComputeSNR(*ref_buffer.get(), *dst_buffer.get(), delay_frames));
|
||||
}
|
||||
|
||||
TEST(AudioConverterTest, ConversionsPassSNRThreshold) {
|
||||
const int kSampleRates[] = {8000, 16000, 32000, 44100, 48000};
|
||||
const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
|
||||
const int kChannels[] = {1, 2};
|
||||
const int kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);
|
||||
for (int src_rate = 0; src_rate < kSampleRatesSize; ++src_rate) {
|
||||
for (int dst_rate = 0; dst_rate < kSampleRatesSize; ++dst_rate) {
|
||||
for (int src_channel = 0; src_channel < kChannelsSize; ++src_channel) {
|
||||
for (int dst_channel = 0; dst_channel < kChannelsSize; ++dst_channel) {
|
||||
RunAudioConverterTest(kChannels[src_channel], kSampleRates[src_rate],
|
||||
kChannels[dst_channel], kSampleRates[dst_rate]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
@ -29,6 +29,8 @@
|
||||
],
|
||||
},
|
||||
'sources': [
|
||||
'audio_converter.cc',
|
||||
'audio_converter.h',
|
||||
'audio_util.cc',
|
||||
'blocker.cc',
|
||||
'blocker.h',
|
||||
@ -222,6 +224,7 @@
|
||||
'<(DEPTH)/testing/gtest.gyp:gtest',
|
||||
],
|
||||
'sources': [
|
||||
'audio_converter_unittest.cc',
|
||||
'audio_util_unittest.cc',
|
||||
'blocker_unittest.cc',
|
||||
'fir_filter_unittest.cc',
|
||||
|
@ -22,8 +22,7 @@ namespace webrtc {
|
||||
namespace voe {
|
||||
|
||||
// TODO(ajm): There is significant overlap between RemixAndResample and
|
||||
// ConvertToCodecFormat, but if we're to consolidate we should probably make a
|
||||
// real converter class.
|
||||
// ConvertToCodecFormat. Consolidate using AudioConverter.
|
||||
void RemixAndResample(const AudioFrame& src_frame,
|
||||
PushResampler<int16_t>* resampler,
|
||||
AudioFrame* dst_frame) {
|
||||
|
Loading…
Reference in New Issue
Block a user