Revert of Allow intelligibility to compile in apm (patchset #1 id:1 of https://codereview.webrtc.org/1182323005/)

Reason for revert:
Breaking the build bots: http://build.chromium.org/p/client.webrtc/builders/Mac32%20Release%20%5Blarge%20tests%5D/builds/4544

Fails to compile with this error:

../../webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc:218:25: error: no member named 'accumulate' in namespace 'std'
    power_target = std::accumulate(clear_variance_.variance(),

Original issue's description:
> Allow intelligibility to compile in apm
>
> - Added files to gyp and BUILD
> - Made minor fixes to get everything to compile
>     and intelligibility_proc to run
> - Added comments
> - Auto-reformatting
>
> Original cl is at: https://webrtc-codereview.appspot.com/57579004/
>
> TBR=aluebs@webrtc.org
>
> Committed: b7553dfdbb

TBR=ekmeyerson@webrtc.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Review URL: https://codereview.webrtc.org/1187033005

Cr-Commit-Position: refs/heads/master@{#9455}
This commit is contained in:
aluebs 2015-06-16 20:26:16 -07:00 committed by Commit bot
parent b7553dfdbb
commit c555b99c13
8 changed files with 205 additions and 336 deletions

View File

@ -89,10 +89,6 @@ source_set("audio_processing") {
"high_pass_filter_impl.cc", "high_pass_filter_impl.cc",
"high_pass_filter_impl.h", "high_pass_filter_impl.h",
"include/audio_processing.h", "include/audio_processing.h",
"intelligibility/intelligibility_enhancer.cc",
"intelligibility/intelligibility_enhancer.h",
"intelligibility/intelligibility_utils.cc",
"intelligibility/intelligibility_utils.h",
"level_estimator_impl.cc", "level_estimator_impl.cc",
"level_estimator_impl.h", "level_estimator_impl.h",
"noise_suppression_impl.cc", "noise_suppression_impl.cc",

View File

@ -99,10 +99,6 @@
'high_pass_filter_impl.cc', 'high_pass_filter_impl.cc',
'high_pass_filter_impl.h', 'high_pass_filter_impl.h',
'include/audio_processing.h', 'include/audio_processing.h',
'intelligibility/intelligibility_enhancer.cc',
'intelligibility/intelligibility_enhancer.h',
'intelligibility/intelligibility_utils.cc',
'intelligibility/intelligibility_utils.h',
'level_estimator_impl.cc', 'level_estimator_impl.cc',
'level_estimator_impl.h', 'level_estimator_impl.h',
'noise_suppression_impl.cc', 'noise_suppression_impl.cc',

View File

@ -59,19 +59,6 @@
'beamformer/nonlinear_beamformer_test.cc', 'beamformer/nonlinear_beamformer_test.cc',
], ],
}, # nonlinear_beamformer_test }, # nonlinear_beamformer_test
{
'target_name': 'intelligibility_proc',
'type': 'executable',
'dependencies': [
'audioproc_test_utils',
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
'<(DEPTH)/testing/gtest.gyp:gtest',
'<(webrtc_root)/modules/modules.gyp:audio_processing',
],
'sources': [
'intelligibility/intelligibility_proc.cc',
],
}, # intelligibility_proc
], ],
'conditions': [ 'conditions': [
['enable_protobuf==1', { ['enable_protobuf==1', {

View File

@ -8,13 +8,6 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
//
// Implements core class for intelligibility enhancer.
//
// Details of the model and algorithm can be found in the original paper:
// http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6882788
//
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h" #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h"
#include <cmath> #include <cmath>
@ -34,16 +27,13 @@ namespace webrtc {
const int IntelligibilityEnhancer::kErbResolution = 2; const int IntelligibilityEnhancer::kErbResolution = 2;
const int IntelligibilityEnhancer::kWindowSizeMs = 2; const int IntelligibilityEnhancer::kWindowSizeMs = 2;
const int IntelligibilityEnhancer::kChunkSizeMs = 10; // Size provided by APM. // The size of the chunk provided by APM, in milliseconds.
const int IntelligibilityEnhancer::kChunkSizeMs = 10;
const int IntelligibilityEnhancer::kAnalyzeRate = 800; const int IntelligibilityEnhancer::kAnalyzeRate = 800;
const int IntelligibilityEnhancer::kVarianceRate = 2; const int IntelligibilityEnhancer::kVarianceRate = 2;
const float IntelligibilityEnhancer::kClipFreq = 200.0f; const float IntelligibilityEnhancer::kClipFreq = 200.0f;
const float IntelligibilityEnhancer::kConfigRho = 0.02f; const float IntelligibilityEnhancer::kConfigRho = 0.02f;
const float IntelligibilityEnhancer::kKbdAlpha = 1.5f; const float IntelligibilityEnhancer::kKbdAlpha = 1.5f;
// To disable gain update smoothing, set gain limit to be VERY high.
// TODO(ekmeyerson): Add option to disable gain smoothing altogether
// to avoid the extra computation.
const float IntelligibilityEnhancer::kGainChangeLimit = 0.0125f; const float IntelligibilityEnhancer::kGainChangeLimit = 0.0125f;
using VarianceType = intelligibility::VarianceArray::StepType; using VarianceType = intelligibility::VarianceArray::StepType;
@ -51,14 +41,12 @@ using VarianceType = intelligibility::VarianceArray::StepType;
IntelligibilityEnhancer::TransformCallback::TransformCallback( IntelligibilityEnhancer::TransformCallback::TransformCallback(
IntelligibilityEnhancer* parent, IntelligibilityEnhancer* parent,
IntelligibilityEnhancer::AudioSource source) IntelligibilityEnhancer::AudioSource source)
: parent_(parent), source_(source) { : parent_(parent),
} source_(source) {}
void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock( void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock(
const complex<float>* const* in_block, const complex<float>* const* in_block,
int in_channels, int in_channels, int frames, int /* out_channels */,
int frames,
int /* out_channels */,
complex<float>* const* out_block) { complex<float>* const* out_block) {
DCHECK_EQ(parent_->freqs_, frames); DCHECK_EQ(parent_->freqs_, frames);
for (int i = 0; i < in_channels; ++i) { for (int i = 0; i < in_channels; ++i) {
@ -69,14 +57,13 @@ void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock(
IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution, IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution,
int sample_rate_hz, int sample_rate_hz,
int channels, int channels,
int cv_type, int cv_type, float cv_alpha,
float cv_alpha,
int cv_win, int cv_win,
int analysis_rate, int analysis_rate,
int variance_rate, int variance_rate,
float gain_limit) float gain_limit)
: freqs_(RealFourier::ComplexLength( : freqs_(RealFourier::ComplexLength(RealFourier::FftOrder(
RealFourier::FftOrder(sample_rate_hz * kWindowSizeMs / 1000))), sample_rate_hz * kWindowSizeMs / 1000))),
window_size_(1 << RealFourier::FftOrder(freqs_)), window_size_(1 << RealFourier::FftOrder(freqs_)),
chunk_length_(sample_rate_hz * kChunkSizeMs / 1000), chunk_length_(sample_rate_hz * kChunkSizeMs / 1000),
bank_size_(GetBankSize(sample_rate_hz, erb_resolution)), bank_size_(GetBankSize(sample_rate_hz, erb_resolution)),
@ -85,9 +72,7 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution,
channels_(channels), channels_(channels),
analysis_rate_(analysis_rate), analysis_rate_(analysis_rate),
variance_rate_(variance_rate), variance_rate_(variance_rate),
clear_variance_(freqs_, clear_variance_(freqs_, static_cast<VarianceType>(cv_type), cv_win,
static_cast<VarianceType>(cv_type),
cv_win,
cv_alpha), cv_alpha),
noise_variance_(freqs_, VarianceType::kStepInfinite, 475, 0.01f), noise_variance_(freqs_, VarianceType::kStepInfinite, 475, 0.01f),
filtered_clear_var_(new float[bank_size_]), filtered_clear_var_(new float[bank_size_]),
@ -98,51 +83,58 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution,
gains_eq_(new float[bank_size_]), gains_eq_(new float[bank_size_]),
gain_applier_(freqs_, gain_limit), gain_applier_(freqs_, gain_limit),
temp_out_buffer_(nullptr), temp_out_buffer_(nullptr),
input_audio_(new float* [channels]), input_audio_(new float*[channels]),
kbd_window_(new float[window_size_]), kbd_window_(new float[window_size_]),
render_callback_(this, AudioSource::kRenderStream), render_callback_(this, AudioSource::kRenderStream),
capture_callback_(this, AudioSource::kCaptureStream), capture_callback_(this, AudioSource::kCaptureStream),
block_count_(0), block_count_(0),
analysis_step_(0), analysis_step_(0),
vad_high_(WebRtcVad_Create()), vad_high_(nullptr),
vad_low_(WebRtcVad_Create()), vad_low_(nullptr),
vad_tmp_buffer_(new int16_t[chunk_length_]) { vad_tmp_buffer_(new int16_t[chunk_length_]) {
DCHECK_LE(kConfigRho, 1.0f); DCHECK_LE(kConfigRho, 1.0f);
CreateErbBank(); CreateErbBank();
WebRtcVad_Create(&vad_high_);
WebRtcVad_Init(vad_high_); WebRtcVad_Init(vad_high_);
WebRtcVad_set_mode(vad_high_, 0); // High likelihood of speech. WebRtcVad_set_mode(vad_high_, 0); // high likelihood of speech
WebRtcVad_Create(&vad_low_);
WebRtcVad_Init(vad_low_); WebRtcVad_Init(vad_low_);
WebRtcVad_set_mode(vad_low_, 3); // Low likelihood of speech. WebRtcVad_set_mode(vad_low_, 3); // low likelihood of speech
temp_out_buffer_ = static_cast<float**>( temp_out_buffer_ = static_cast<float**>(malloc(
malloc(sizeof(*temp_out_buffer_) * channels_ + sizeof(*temp_out_buffer_) * channels_ +
sizeof(**temp_out_buffer_) * chunk_length_ * channels_)); sizeof(**temp_out_buffer_) * chunk_length_ * channels_));
for (int i = 0; i < channels_; ++i) { for (int i = 0; i < channels_; ++i) {
temp_out_buffer_[i] = temp_out_buffer_[i] = reinterpret_cast<float*>(temp_out_buffer_ + channels_)
reinterpret_cast<float*>(temp_out_buffer_ + channels_) + + chunk_length_ * i;
chunk_length_ * i;
} }
// Assumes all rho equal.
for (int i = 0; i < bank_size_; ++i) { for (int i = 0; i < bank_size_; ++i) {
rho_[i] = kConfigRho * kConfigRho; rho_[i] = kConfigRho * kConfigRho;
} }
float freqs_khz = kClipFreq / 1000.0f; float freqs_khz = kClipFreq / 1000.0f;
int erb_index = static_cast<int>(ceilf( int erb_index = static_cast<int>(ceilf(11.17f * logf((freqs_khz + 0.312f) /
11.17f * logf((freqs_khz + 0.312f) / (freqs_khz + 14.6575f)) + 43.0f)); (freqs_khz + 14.6575f))
+ 43.0f));
start_freq_ = max(1, erb_index * kErbResolution); start_freq_ = max(1, erb_index * kErbResolution);
WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size_, WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size_,
kbd_window_.get()); kbd_window_.get());
render_mangler_.reset(new LappedTransform( render_mangler_.reset(new LappedTransform(channels_, channels_,
channels_, channels_, chunk_length_, kbd_window_.get(), window_size_, chunk_length_,
window_size_ / 2, &render_callback_)); kbd_window_.get(),
capture_mangler_.reset(new LappedTransform( window_size_,
channels_, channels_, chunk_length_, kbd_window_.get(), window_size_, window_size_ / 2,
window_size_ / 2, &capture_callback_)); &render_callback_));
capture_mangler_.reset(new LappedTransform(channels_, channels_,
chunk_length_,
kbd_window_.get(),
window_size_,
window_size_ / 2,
&capture_callback_));
} }
IntelligibilityEnhancer::~IntelligibilityEnhancer() { IntelligibilityEnhancer::~IntelligibilityEnhancer() {
@ -158,9 +150,7 @@ void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio) {
has_voice_low_ = WebRtcVad_Process(vad_low_, sample_rate_hz_, has_voice_low_ = WebRtcVad_Process(vad_low_, sample_rate_hz_,
vad_tmp_buffer_.get(), chunk_length_) == 1; vad_tmp_buffer_.get(), chunk_length_) == 1;
// Process and enhance chunk of |audio|
render_mangler_->ProcessChunk(audio, temp_out_buffer_); render_mangler_->ProcessChunk(audio, temp_out_buffer_);
for (int i = 0; i < channels_; ++i) { for (int i = 0; i < channels_; ++i) {
memcpy(audio[i], temp_out_buffer_[i], memcpy(audio[i], temp_out_buffer_[i],
chunk_length_ * sizeof(**temp_out_buffer_)); chunk_length_ * sizeof(**temp_out_buffer_));
@ -171,25 +161,21 @@ void IntelligibilityEnhancer::ProcessCaptureAudio(float* const* audio) {
for (int i = 0; i < chunk_length_; ++i) { for (int i = 0; i < chunk_length_; ++i) {
vad_tmp_buffer_[i] = (int16_t)audio[0][i]; vad_tmp_buffer_[i] = (int16_t)audio[0][i];
} }
// TODO(bercic): The VAD was always detecting voice in the noise stream, // TODO(bercic): the VAD was always detecting voice in the noise stream,
// no matter what the aggressiveness, so it was temporarily disabled here. // no matter what the aggressiveness, so it was temporarily disabled here
#if 0
if (WebRtcVad_Process(vad_high_, sample_rate_hz_, vad_tmp_buffer_.get(),
chunk_length_) == 1) {
printf("capture HAS speech\n");
return;
}
printf("capture NO speech\n");
#endif
//if (WebRtcVad_Process(vad_high_, sample_rate_hz_, vad_tmp_buffer_.get(),
// chunk_length_) == 1) {
// printf("capture HAS speech\n");
// return;
//}
//printf("capture NO speech\n");
capture_mangler_->ProcessChunk(audio, temp_out_buffer_); capture_mangler_->ProcessChunk(audio, temp_out_buffer_);
} }
void IntelligibilityEnhancer::DispatchAudio( void IntelligibilityEnhancer::DispatchAudio(
IntelligibilityEnhancer::AudioSource source, IntelligibilityEnhancer::AudioSource source,
const complex<float>* in_block, const complex<float>* in_block, complex<float>* out_block) {
complex<float>* out_block) {
switch (source) { switch (source) {
case kRenderStream: case kRenderStream:
ProcessClearBlock(in_block, out_block); ProcessClearBlock(in_block, out_block);
@ -210,9 +196,6 @@ void IntelligibilityEnhancer::ProcessClearBlock(const complex<float>* in_block,
return; return;
} }
// For now, always assumes enhancement is necessary.
// TODO(ekmeyerson): Change to only enhance if necessary,
// based on experiments with different cutoffs.
if (has_voice_low_ || true) { if (has_voice_low_ || true) {
clear_variance_.Step(in_block, false); clear_variance_.Step(in_block, false);
power_target = std::accumulate(clear_variance_.variance(), power_target = std::accumulate(clear_variance_.variance(),
@ -238,25 +221,23 @@ void IntelligibilityEnhancer::AnalyzeClearBlock(float power_target) {
FilterVariance(clear_variance_.variance(), filtered_clear_var_.get()); FilterVariance(clear_variance_.variance(), filtered_clear_var_.get());
FilterVariance(noise_variance_.variance(), filtered_noise_var_.get()); FilterVariance(noise_variance_.variance(), filtered_noise_var_.get());
// Bisection search for optimal |lambda| /* lambda binary search */
float lambda_bot = -1.0f, lambda_top = -10e-18f, lambda; float lambda_bot = -1.0f, lambda_top = -10e-18f, lambda;
float power_bot, power_top, power; float power_bot, power_top, power;
SolveForGainsGivenLambda(lambda_top, start_freq_, gains_eq_.get()); SolveEquation14(lambda_top, start_freq_, gains_eq_.get());
power_top = power_top = DotProduct(gains_eq_.get(), filtered_clear_var_.get(),
DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_); bank_size_);
SolveForGainsGivenLambda(lambda_bot, start_freq_, gains_eq_.get()); SolveEquation14(lambda_bot, start_freq_, gains_eq_.get());
power_bot = power_bot = DotProduct(gains_eq_.get(), filtered_clear_var_.get(),
DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_); bank_size_);
DCHECK(power_target >= power_bot && power_target <= power_top); DCHECK(power_target >= power_bot && power_target <= power_top);
float power_ratio = 2.0f; // Ratio of achieved power to target power. float power_ratio = 2.0f;
const float kConvergeThresh = 0.001f; // TODO(ekmeyerson): Find best values
const int kMaxIters = 100; // for these, based on experiments.
int iters = 0; int iters = 0;
while (fabs(power_ratio - 1.0f) > kConvergeThresh && iters <= kMaxIters) { while (fabs(power_ratio - 1.0f) > 0.001f && iters <= 100) {
lambda = lambda_bot + (lambda_top - lambda_bot) / 2.0f; lambda = lambda_bot + (lambda_top - lambda_bot) / 2.0f;
SolveForGainsGivenLambda(lambda, start_freq_, gains_eq_.get()); SolveEquation14(lambda, start_freq_, gains_eq_.get());
power = DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_); power = DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_);
if (power < power_target) { if (power < power_target) {
lambda_bot = lambda; lambda_bot = lambda;
@ -267,7 +248,7 @@ void IntelligibilityEnhancer::AnalyzeClearBlock(float power_target) {
++iters; ++iters;
} }
// (ERB gain) = filterbank' * (freq gain) /* b = filterbank' * b */
float* gains = gain_applier_.target(); float* gains = gain_applier_.target();
for (int i = 0; i < freqs_; ++i) { for (int i = 0; i < freqs_; ++i) {
gains[i] = 0.0f; gains[i] = 0.0f;
@ -284,8 +265,8 @@ void IntelligibilityEnhancer::ProcessNoiseBlock(const complex<float>* in_block,
int IntelligibilityEnhancer::GetBankSize(int sample_rate, int erb_resolution) { int IntelligibilityEnhancer::GetBankSize(int sample_rate, int erb_resolution) {
float freq_limit = sample_rate / 2000.0f; float freq_limit = sample_rate / 2000.0f;
int erb_scale = ceilf( int erb_scale = ceilf(11.17f * logf((freq_limit + 0.312f) /
11.17f * logf((freq_limit + 0.312f) / (freq_limit + 14.6575f)) + 43.0f); (freq_limit + 14.6575f)) + 43.0f);
return erb_scale * erb_resolution; return erb_scale * erb_resolution;
} }
@ -302,29 +283,29 @@ void IntelligibilityEnhancer::CreateErbBank() {
center_freqs_[i] *= 0.5f * sample_rate_hz_ / last_center_freq; center_freqs_[i] *= 0.5f * sample_rate_hz_ / last_center_freq;
} }
filter_bank_ = static_cast<float**>( filter_bank_ = static_cast<float**>(malloc(
malloc(sizeof(*filter_bank_) * bank_size_ + sizeof(*filter_bank_) * bank_size_ +
sizeof(**filter_bank_) * freqs_ * bank_size_)); sizeof(**filter_bank_) * freqs_ * bank_size_));
for (int i = 0; i < bank_size_; ++i) { for (int i = 0; i < bank_size_; ++i) {
filter_bank_[i] = filter_bank_[i] = reinterpret_cast<float*>(filter_bank_ + bank_size_) +
reinterpret_cast<float*>(filter_bank_ + bank_size_) + freqs_ * i; freqs_ * i;
} }
for (int i = 1; i <= bank_size_; ++i) { for (int i = 1; i <= bank_size_; ++i) {
int lll, ll, rr, rrr; int lll, ll, rr, rrr;
lll = round(center_freqs_[max(1, i - lf) - 1] * freqs_ / lll = round(center_freqs_[max(1, i - lf) - 1] * freqs_ /
(0.5f * sample_rate_hz_)); (0.5f * sample_rate_hz_));
ll = ll = round(center_freqs_[max(1, i ) - 1] * freqs_ /
round(center_freqs_[max(1, i) - 1] * freqs_ / (0.5f * sample_rate_hz_)); (0.5f * sample_rate_hz_));
lll = min(freqs_, max(lll, 1)) - 1; lll = min(freqs_, max(lll, 1)) - 1;
ll = min(freqs_, max(ll, 1)) - 1; ll = min(freqs_, max(ll, 1)) - 1;
rrr = round(center_freqs_[min(bank_size_, i + rf) - 1] * freqs_ / rrr = round(center_freqs_[min(bank_size_, i + rf) - 1] * freqs_ /
(0.5f * sample_rate_hz_)); (0.5f * sample_rate_hz_));
rr = round(center_freqs_[min(bank_size_, i + 1) - 1] * freqs_ / rr = round(center_freqs_[min(bank_size_, i + 1) - 1] * freqs_ /
(0.5f * sample_rate_hz_)); (0.5f * sample_rate_hz_));
rrr = min(freqs_, max(rrr, 1)) - 1; rrr = min(freqs_, max(rrr, 1)) - 1;
rr = min(freqs_, max(rr, 1)) - 1; rr = min(freqs_, max(rr, 1)) - 1;
float step, element; float step, element;
@ -357,9 +338,8 @@ void IntelligibilityEnhancer::CreateErbBank() {
} }
} }
void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda, void IntelligibilityEnhancer::SolveEquation14(float lambda, int start_freq,
int start_freq, float* sols) {
float* sols) {
bool quadratic = (kConfigRho < 1.0f); bool quadratic = (kConfigRho < 1.0f);
const float* var_x0 = filtered_clear_var_.get(); const float* var_x0 = filtered_clear_var_.get();
const float* var_n0 = filtered_noise_var_.get(); const float* var_n0 = filtered_noise_var_.get();
@ -367,17 +347,15 @@ void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda,
for (int n = 0; n < start_freq; ++n) { for (int n = 0; n < start_freq; ++n) {
sols[n] = 1.0f; sols[n] = 1.0f;
} }
// Analytic solution for optimal gains. See paper for derivation.
for (int n = start_freq - 1; n < bank_size_; ++n) { for (int n = start_freq - 1; n < bank_size_; ++n) {
float alpha0, beta0, gamma0; float alpha0, beta0, gamma0;
gamma0 = 0.5f * rho_[n] * var_x0[n] * var_n0[n] + gamma0 = 0.5f * rho_[n] * var_x0[n] * var_n0[n] +
lambda * var_x0[n] * var_n0[n] * var_n0[n]; lambda * var_x0[n] * var_n0[n] * var_n0[n];
beta0 = lambda * var_x0[n] * (2 - rho_[n]) * var_x0[n] * var_n0[n]; beta0 = lambda * var_x0[n] * (2 - rho_[n]) * var_x0[n] * var_n0[n];
if (quadratic) { if (quadratic) {
alpha0 = lambda * var_x0[n] * (1 - rho_[n]) * var_x0[n] * var_x0[n]; alpha0 = lambda * var_x0[n] * (1 - rho_[n]) * var_x0[n] * var_x0[n];
sols[n] = sols[n] = (-beta0 - sqrtf(beta0 * beta0 - 4 * alpha0 * gamma0))
(-beta0 - sqrtf(beta0 * beta0 - 4 * alpha0 * gamma0)) / (2 * alpha0); / (2 * alpha0);
} else { } else {
sols[n] = -gamma0 / beta0; sols[n] = -gamma0 / beta0;
} }
@ -391,9 +369,8 @@ void IntelligibilityEnhancer::FilterVariance(const float* var, float* result) {
} }
} }
float IntelligibilityEnhancer::DotProduct(const float* a, float IntelligibilityEnhancer::DotProduct(const float* a, const float* b,
const float* b, int length) {
int length) {
float ret = 0.0f; float ret = 0.0f;
for (int i = 0; i < length; ++i) { for (int i = 0; i < length; ++i) {
@ -403,3 +380,4 @@ float IntelligibilityEnhancer::DotProduct(const float* a,
} }
} // namespace webrtc } // namespace webrtc

View File

@ -8,18 +8,14 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
//
// Specifies core class for intelligbility enhancement.
//
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_ #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_ #define WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_
#include <complex> #include <complex>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/lapped_transform.h" #include "webrtc/common_audio/lapped_transform.h"
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h" #include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
struct WebRtcVadInst; struct WebRtcVadInst;
typedef struct WebRtcVadInst VadInst; typedef struct WebRtcVadInst VadInst;
@ -29,7 +25,6 @@ namespace webrtc {
// Speech intelligibility enhancement module. Reads render and capture // Speech intelligibility enhancement module. Reads render and capture
// audio streams and modifies the render stream with a set of gains per // audio streams and modifies the render stream with a set of gains per
// frequency bin to enhance speech against the noise background. // frequency bin to enhance speech against the noise background.
// Note: assumes speech and noise streams are already separated.
class IntelligibilityEnhancer { class IntelligibilityEnhancer {
public: public:
// Construct a new instance with the given filter bank resolution, // Construct a new instance with the given filter bank resolution,
@ -38,43 +33,30 @@ class IntelligibilityEnhancer {
// to elapse before a new gain computation is made. |variance_rate| specifies // to elapse before a new gain computation is made. |variance_rate| specifies
// the number of gain recomputations after which the variances are reset. // the number of gain recomputations after which the variances are reset.
// |cv_*| are parameters for the VarianceArray constructor for the // |cv_*| are parameters for the VarianceArray constructor for the
// clear speech stream. // lear speech stream.
// TODO(bercic): the |cv_*|, |*_rate| and |gain_limit| parameters should // TODO(bercic): the |cv_*|, |*_rate| and |gain_limit| parameters should
// probably go away once fine tuning is done. They override the internal // probably go away once fine tuning is done. They override the internal
// constants in the class (kGainChangeLimit, kAnalyzeRate, kVarianceRate). // constants in the class (kGainChangeLimit, kAnalyzeRate, kVarianceRate).
IntelligibilityEnhancer(int erb_resolution, IntelligibilityEnhancer(int erb_resolution, int sample_rate_hz, int channels,
int sample_rate_hz, int cv_type, float cv_alpha, int cv_win,
int channels, int analysis_rate, int variance_rate,
int cv_type,
float cv_alpha,
int cv_win,
int analysis_rate,
int variance_rate,
float gain_limit); float gain_limit);
~IntelligibilityEnhancer(); ~IntelligibilityEnhancer();
// Reads and processes chunk of noise stream in time domain.
void ProcessCaptureAudio(float* const* audio);
// Reads chunk of speech in time domain and updates with modified signal.
void ProcessRenderAudio(float* const* audio); void ProcessRenderAudio(float* const* audio);
void ProcessCaptureAudio(float* const* audio);
private: private:
enum AudioSource { enum AudioSource {
kRenderStream = 0, // Clear speech stream. kRenderStream = 0,
kCaptureStream, // Noise stream. kCaptureStream,
}; };
// Provides access point to the frequency domain.
class TransformCallback : public LappedTransform::Callback { class TransformCallback : public LappedTransform::Callback {
public: public:
TransformCallback(IntelligibilityEnhancer* parent, AudioSource source); TransformCallback(IntelligibilityEnhancer* parent, AudioSource source);
// All in frequency domain, receives input |in_block|, applies
// intelligibility enhancement, and writes result to |out_block|.
virtual void ProcessAudioBlock(const std::complex<float>* const* in_block, virtual void ProcessAudioBlock(const std::complex<float>* const* in_block,
int in_channels, int in_channels, int frames,
int frames,
int out_channels, int out_channels,
std::complex<float>* const* out_block); std::complex<float>* const* out_block);
@ -84,95 +66,72 @@ class IntelligibilityEnhancer {
}; };
friend class TransformCallback; friend class TransformCallback;
// Sends streams to ProcessClearBlock or ProcessNoiseBlock based on source. void DispatchAudio(AudioSource source, const std::complex<float>* in_block,
void DispatchAudio(AudioSource source,
const std::complex<float>* in_block,
std::complex<float>* out_block); std::complex<float>* out_block);
// Updates variance computation and analysis with |in_block_|,
// and writes modified speech to |out_block|.
void ProcessClearBlock(const std::complex<float>* in_block, void ProcessClearBlock(const std::complex<float>* in_block,
std::complex<float>* out_block); std::complex<float>* out_block);
// Computes and sets modified gains.
void AnalyzeClearBlock(float power_target); void AnalyzeClearBlock(float power_target);
// Updates variance calculation for noise input with |in_block|.
void ProcessNoiseBlock(const std::complex<float>* in_block, void ProcessNoiseBlock(const std::complex<float>* in_block,
std::complex<float>* out_block); std::complex<float>* out_block);
// Returns number of ERB filters.
static int GetBankSize(int sample_rate, int erb_resolution); static int GetBankSize(int sample_rate, int erb_resolution);
// Initializes ERB filterbank.
void CreateErbBank(); void CreateErbBank();
void SolveEquation14(float lambda, int start_freq, float* sols);
// Analytically solves quadratic for optimal gains given |lambda|.
// Negative gains are set to 0. Stores the results in |sols|.
void SolveForGainsGivenLambda(float lambda, int start_freq, float* sols);
// Computes variance across ERB filters from freq variance |var|.
// Stores in |result|.
void FilterVariance(const float* var, float* result); void FilterVariance(const float* var, float* result);
// Returns dot product of vectors specified by size |length| arrays |a|,|b|.
static float DotProduct(const float* a, const float* b, int length); static float DotProduct(const float* a, const float* b, int length);
static const int kErbResolution; static const int kErbResolution;
static const int kWindowSizeMs; static const int kWindowSizeMs;
static const int kChunkSizeMs; static const int kChunkSizeMs;
static const int kAnalyzeRate; // Default for |analysis_rate_|. static const int kAnalyzeRate;
static const int kVarianceRate; // Default for |variance_rate_|. static const int kVarianceRate;
static const float kClipFreq; static const float kClipFreq;
static const float kConfigRho; // Default production and interpretation SNR. static const float kConfigRho;
static const float kKbdAlpha; static const float kKbdAlpha;
static const float kGainChangeLimit; static const float kGainChangeLimit;
const int freqs_; // Num frequencies in frequency domain. const int freqs_;
const int window_size_; // Window size in samples; also the block size. const int window_size_; // window size in samples; also the block size
const int chunk_length_; // Chunk size in samples. const int chunk_length_; // chunk size in samples
const int bank_size_; // Num ERB filters. const int bank_size_;
const int sample_rate_hz_; const int sample_rate_hz_;
const int erb_resolution_; const int erb_resolution_;
const int channels_; // Num channels. const int channels_;
const int analysis_rate_; // Num blocks before gains recalculated. const int analysis_rate_;
const int variance_rate_; // Num recalculations before history is cleared. const int variance_rate_;
intelligibility::VarianceArray clear_variance_; intelligibility::VarianceArray clear_variance_;
intelligibility::VarianceArray noise_variance_; intelligibility::VarianceArray noise_variance_;
rtc::scoped_ptr<float[]> filtered_clear_var_; scoped_ptr<float[]> filtered_clear_var_;
rtc::scoped_ptr<float[]> filtered_noise_var_; scoped_ptr<float[]> filtered_noise_var_;
float** filter_bank_; // TODO(ekmeyerson): Switch to using ChannelBuffer. float** filter_bank_;
rtc::scoped_ptr<float[]> center_freqs_; scoped_ptr<float[]> center_freqs_;
int start_freq_; int start_freq_;
rtc::scoped_ptr<float[]> rho_; // Production and interpretation SNR. scoped_ptr<float[]> rho_;
// for each ERB band. scoped_ptr<float[]> gains_eq_;
rtc::scoped_ptr<float[]> gains_eq_; // Pre-filter modified gains.
intelligibility::GainApplier gain_applier_; intelligibility::GainApplier gain_applier_;
// Destination buffer used to reassemble blocked chunks before overwriting // Destination buffer used to reassemble blocked chunks before overwriting
// the original input array with modifications. // the original input array with modifications.
// TODO(ekmeyerson): Switch to using ChannelBuffer.
float** temp_out_buffer_; float** temp_out_buffer_;
scoped_ptr<float*[]> input_audio_;
rtc::scoped_ptr<float* []> input_audio_; scoped_ptr<float[]> kbd_window_;
rtc::scoped_ptr<float[]> kbd_window_;
TransformCallback render_callback_; TransformCallback render_callback_;
TransformCallback capture_callback_; TransformCallback capture_callback_;
rtc::scoped_ptr<LappedTransform> render_mangler_; scoped_ptr<LappedTransform> render_mangler_;
rtc::scoped_ptr<LappedTransform> capture_mangler_; scoped_ptr<LappedTransform> capture_mangler_;
int block_count_; int block_count_;
int analysis_step_; int analysis_step_;
// TODO(bercic): Quick stopgap measure for voice detection in the clear // TODO(bercic): Quick stopgap measure for voice detection in the clear
// and noise streams. // and noise streams.
// Note: VAD currently does not affect anything in IntelligibilityEnhancer.
VadInst* vad_high_; VadInst* vad_high_;
VadInst* vad_low_; VadInst* vad_low_;
rtc::scoped_ptr<int16_t[]> vad_tmp_buffer_; scoped_ptr<int16_t[]> vad_tmp_buffer_;
bool has_voice_low_; // Whether voice detected in speech stream. bool has_voice_low_;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_ #endif // WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_

View File

@ -8,12 +8,6 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
//
// Command line tool for speech intelligibility enhancement. Provides for
// running and testing intelligibility_enhancer as an independent process.
// Use --help for options.
//
#include <arpa/inet.h> #include <arpa/inet.h>
#include <fcntl.h> #include <fcntl.h>
#include <stdint.h> #include <stdint.h>
@ -30,71 +24,53 @@
#include <complex> #include <complex>
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/checks.h" #include "webrtc/base/checks.h"
#include "webrtc/common_audio/real_fourier.h" #include "webrtc/common_audio/real_fourier.h"
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h" #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h"
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h" #include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h" #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
// PCM data simulating streams
const int16_t* in_ipcm; const int16_t* in_ipcm;
int16_t* out_ipcm; int16_t* out_ipcm;
const int16_t* noise_ipcm; const int16_t* noise_ipcm;
float* in_fpcm; float* in_fpcm;
float* out_fpcm; float* out_fpcm;
float* noise_fpcm; float* noise_fpcm;
// Current locations in streams
float* noise_cursor; float* noise_cursor;
float* clear_cursor; float* clear_cursor;
int samples; // Number of samples in input PCM file int samples;
int fragment_size; // Number of samples to process at a time int fragment_size;
// to simulate APM stream processing
using std::complex; using std::complex;
namespace webrtc {
using webrtc::RealFourier; using webrtc::RealFourier;
using webrtc::IntelligibilityEnhancer; using webrtc::IntelligibilityEnhancer;
DEFINE_int32(clear_type, DEFINE_int32(clear_type, webrtc::intelligibility::VarianceArray::kStepInfinite,
webrtc::intelligibility::VarianceArray::kStepInfinite,
"Variance algorithm for clear data."); "Variance algorithm for clear data.");
DEFINE_double(clear_alpha, 0.9, "Variance decay factor for clear data."); DEFINE_double(clear_alpha, 0.9,
DEFINE_int32(clear_window, "Variance decay factor for clear data.");
475, DEFINE_int32(clear_window, 475,
"Window size for windowed variance for clear data."); "Window size for windowed variance for clear data.");
DEFINE_int32(sample_rate, DEFINE_int32(sample_rate, 16000,
16000,
"Audio sample rate used in the input and output files."); "Audio sample rate used in the input and output files.");
DEFINE_int32(ana_rate, DEFINE_int32(ana_rate, 800,
800,
"Analysis rate; gains recalculated every N blocks."); "Analysis rate; gains recalculated every N blocks.");
DEFINE_int32( DEFINE_int32(var_rate, 2,
var_rate, "Variance clear rate; history is forgotten every N gain recalculations.");
2,
"Variance clear rate; history is forgotten every N gain recalculations.");
DEFINE_double(gain_limit, 1000.0, "Maximum gain change in one block."); DEFINE_double(gain_limit, 1000.0, "Maximum gain change in one block.");
DEFINE_bool(repeat, false, "Repeat input file ad nauseam."); DEFINE_bool(repeat, false, "Repeat input file ad nauseam.");
DEFINE_string(clear_file, "speech.pcm", "Input file with clear speech."); DEFINE_string(clear_file, "speech.pcm", "Input file with clear speech.");
DEFINE_string(noise_file, "noise.pcm", "Input file with noise data."); DEFINE_string(noise_file, "noise.pcm", "Input file with noise data.");
DEFINE_string(out_file, DEFINE_string(out_file, "proc_enhanced.pcm", "Enhanced output. Use '-' to "
"proc_enhanced.pcm",
"Enhanced output. Use '-' to "
"pipe through aplay internally."); "pipe through aplay internally.");
// Constant IntelligibilityEnhancer constructor parameters. // Write an Sun AU-formatted audio chunk into file descriptor |fd|. Can be used
const int kErbResolution = 2; // to pipe the audio stream directly into aplay.
const int kNumChannels = 1;
// Converts output stream to Sun AU format and writes to file descriptor |fd|.
// Can be used to pipe output directly into aplay.
// TODO(ekmeyerson): Modify to write WAV instead.
void writeau(int fd) { void writeau(int fd) {
uint32_t thing; uint32_t thing;
@ -116,14 +92,12 @@ void writeau(int fd) {
write(fd, out_ipcm, sizeof(*out_ipcm) * samples); write(fd, out_ipcm, sizeof(*out_ipcm) * samples);
} }
// void function for gtest int main(int argc, char* argv[]) {
void void_main(int argc, char* argv[]) { google::SetUsageMessage("\n\nVariance algorithm types are:\n"
google::SetUsageMessage( " 0 - infinite/normal,\n"
"\n\nVariance algorithm types are:\n" " 1 - exponentially decaying,\n"
" 0 - infinite/normal,\n" " 2 - rolling window.\n"
" 1 - exponentially decaying,\n" "\nInput files must be little-endian 16-bit signed raw PCM.\n");
" 2 - rolling window.\n"
"\nInput files must be little-endian 16-bit signed raw PCM.\n");
google::ParseCommandLineFlags(&argc, &argv, true); google::ParseCommandLineFlags(&argc, &argv, true);
const char* in_name = FLAGS_clear_file.c_str(); const char* in_name = FLAGS_clear_file.c_str();
@ -133,15 +107,10 @@ void void_main(int argc, char* argv[]) {
int in_fd, out_fd, noise_fd; int in_fd, out_fd, noise_fd;
FILE* aplay_file = nullptr; FILE* aplay_file = nullptr;
// Load settings and set up PCMs. fragment_size = FLAGS_sample_rate / 100;
fragment_size = FLAGS_sample_rate / 100; // Mirror real time APM chunk size.
// Duplicates chunk_length_ in
// IntelligibilityEnhancer.
ASSERT_EQ(stat(in_name, &in_stat), 0) << "Empty speech input.";
ASSERT_EQ(stat(noise_name, &noise_stat), 0) << "Empty noise input.";
stat(in_name, &in_stat);
stat(noise_name, &noise_stat);
samples = in_stat.st_size / sizeof(*in_ipcm); samples = in_stat.st_size / sizeof(*in_ipcm);
in_fd = open(in_name, O_RDONLY); in_fd = open(in_name, O_RDONLY);
@ -154,10 +123,10 @@ void void_main(int argc, char* argv[]) {
} }
noise_fd = open(noise_name, O_RDONLY); noise_fd = open(noise_name, O_RDONLY);
in_ipcm = static_cast<int16_t*>( in_ipcm = static_cast<int16_t*>(mmap(nullptr, in_stat.st_size, PROT_READ,
mmap(nullptr, in_stat.st_size, PROT_READ, MAP_PRIVATE, in_fd, 0)); MAP_PRIVATE, in_fd, 0));
noise_ipcm = static_cast<int16_t*>( noise_ipcm = static_cast<int16_t*>(mmap(nullptr, noise_stat.st_size,
mmap(nullptr, noise_stat.st_size, PROT_READ, MAP_PRIVATE, noise_fd, 0)); PROT_READ, MAP_PRIVATE, noise_fd, 0));
out_ipcm = new int16_t[samples]; out_ipcm = new int16_t[samples];
out_fpcm = new float[samples]; out_fpcm = new float[samples];
in_fpcm = new float[samples]; in_fpcm = new float[samples];
@ -167,17 +136,18 @@ void void_main(int argc, char* argv[]) {
noise_fpcm[i] = noise_ipcm[i % (noise_stat.st_size / sizeof(*noise_ipcm))]; noise_fpcm[i] = noise_ipcm[i % (noise_stat.st_size / sizeof(*noise_ipcm))];
} }
// Run intelligibility enhancement. //feenableexcept(FE_INVALID | FE_OVERFLOW);
IntelligibilityEnhancer enh(2,
IntelligibilityEnhancer enh( FLAGS_sample_rate, 1,
kErbResolution, FLAGS_clear_type,
FLAGS_sample_rate, static_cast<float>(FLAGS_clear_alpha),
kNumChannels, FLAGS_clear_window,
FLAGS_clear_type, static_cast<float>(FLAGS_clear_alpha), FLAGS_ana_rate,
FLAGS_clear_window, FLAGS_ana_rate, FLAGS_var_rate, FLAGS_gain_limit); FLAGS_var_rate,
FLAGS_gain_limit);
// Slice the input into smaller chunks, as the APM would do, and feed them // Slice the input into smaller chunks, as the APM would do, and feed them
// through the enhancer. Repeat indefinitely if FLAGS_repeat is set. // into the enhancer. Repeat indefinitely if FLAGS_repeat is set.
do { do {
noise_cursor = noise_fpcm; noise_cursor = noise_fpcm;
clear_cursor = in_fpcm; clear_cursor = in_fpcm;
@ -211,11 +181,7 @@ void void_main(int argc, char* argv[]) {
close(out_fd); close(out_fd);
} }
close(in_fd); close(in_fd);
}
} // namespace webrtc
int main(int argc, char* argv[]) {
webrtc::void_main(argc, argv);
return 0; return 0;
} }

View File

@ -8,10 +8,6 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
//
// Implements helper functions and classes for intelligibility enhancement.
//
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h" #include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h"
#include <algorithm> #include <algorithm>
@ -44,13 +40,10 @@ inline bool cplxnormal(complex<float> c) {
// were chosen randomly, so that even a series of all zeroes has some small // were chosen randomly, so that even a series of all zeroes has some small
// variability. // variability.
inline complex<float> zerofudge(complex<float> c) { inline complex<float> zerofudge(complex<float> c) {
const static complex<float> fudge[7] = {{0.001f, 0.002f}, const static complex<float> fudge[7] = {
{0.008f, 0.001f}, {0.001f, 0.002f}, {0.008f, 0.001f}, {0.003f, 0.008f}, {0.0006f, 0.0009f},
{0.003f, 0.008f}, {0.001f, 0.004f}, {0.003f, 0.004f}, {0.002f, 0.009f}
{0.0006f, 0.0009f}, };
{0.001f, 0.004f},
{0.003f, 0.004f},
{0.002f, 0.009f}};
static int fudge_index = 0; static int fudge_index = 0;
if (cplxfinite(c) && !cplxnormal(c)) { if (cplxfinite(c) && !cplxnormal(c)) {
fudge_index = (fudge_index + 1) % 7; fudge_index = (fudge_index + 1) % 7;
@ -61,9 +54,8 @@ inline complex<float> zerofudge(complex<float> c) {
// Incremental mean computation. Return the mean of the series with the // Incremental mean computation. Return the mean of the series with the
// mean |mean| with added |data|. // mean |mean| with added |data|.
inline complex<float> NewMean(complex<float> mean, inline complex<float> NewMean(complex<float> mean, complex<float> data,
complex<float> data, int count) {
int count) {
return mean + (data - mean) / static_cast<float>(count); return mean + (data - mean) / static_cast<float>(count);
} }
@ -81,9 +73,7 @@ namespace intelligibility {
static const int kWindowBlockSize = 10; static const int kWindowBlockSize = 10;
VarianceArray::VarianceArray(int freqs, VarianceArray::VarianceArray(int freqs, StepType type, int window_size,
StepType type,
int window_size,
float decay) float decay)
: running_mean_(new complex<float>[freqs]()), : running_mean_(new complex<float>[freqs]()),
running_mean_sq_(new complex<float>[freqs]()), running_mean_sq_(new complex<float>[freqs]()),
@ -97,15 +87,15 @@ VarianceArray::VarianceArray(int freqs,
history_cursor_(0), history_cursor_(0),
count_(0), count_(0),
array_mean_(0.0f) { array_mean_(0.0f) {
history_.reset(new rtc::scoped_ptr<complex<float>[]>[freqs_]()); history_.reset(new scoped_ptr<complex<float>[]>[freqs_]());
for (int i = 0; i < freqs_; ++i) { for (int i = 0; i < freqs_; ++i) {
history_[i].reset(new complex<float>[window_size_]()); history_[i].reset(new complex<float>[window_size_]());
} }
subhistory_.reset(new rtc::scoped_ptr<complex<float>[]>[freqs_]()); subhistory_.reset(new scoped_ptr<complex<float>[]>[freqs_]());
for (int i = 0; i < freqs_; ++i) { for (int i = 0; i < freqs_; ++i) {
subhistory_[i].reset(new complex<float>[window_size_]()); subhistory_[i].reset(new complex<float>[window_size_]());
} }
subhistory_sq_.reset(new rtc::scoped_ptr<complex<float>[]>[freqs_]()); subhistory_sq_.reset(new scoped_ptr<complex<float>[]>[freqs_]());
for (int i = 0; i < freqs_; ++i) { for (int i = 0; i < freqs_; ++i) {
subhistory_sq_[i].reset(new complex<float>[window_size_]()); subhistory_sq_[i].reset(new complex<float>[window_size_]());
} }
@ -141,15 +131,13 @@ void VarianceArray::InfiniteStep(const complex<float>* data, bool skip_fudge) {
} else { } else {
float old_sum = conj_sum_[i]; float old_sum = conj_sum_[i];
complex<float> old_mean = running_mean_[i]; complex<float> old_mean = running_mean_[i];
running_mean_[i] = running_mean_[i] = old_mean + (sample - old_mean) /
old_mean + (sample - old_mean) / static_cast<float>(count_); static_cast<float>(count_);
conj_sum_[i] = conj_sum_[i] = (old_sum + std::conj(sample - old_mean) *
(old_sum + std::conj(sample - old_mean) * (sample - running_mean_[i])) (sample - running_mean_[i])).real();
.real(); variance_[i] = conj_sum_[i] / (count_ - 1); // + fudge[fudge_index].real();
variance_[i] =
conj_sum_[i] / (count_ - 1); // + fudge[fudge_index].real();
if (skip_fudge && false) { if (skip_fudge && false) {
// variance_[i] -= fudge[fudge_index].real(); //variance_[i] -= fudge[fudge_index].real();
} }
} }
array_mean_ += (variance_[i] - array_mean_) / (i + 1); array_mean_ += (variance_[i] - array_mean_) / (i + 1);
@ -173,13 +161,11 @@ void VarianceArray::DecayStep(const complex<float>* data, bool /*dummy*/) {
complex<float> prev = running_mean_[i]; complex<float> prev = running_mean_[i];
complex<float> prev2 = running_mean_sq_[i]; complex<float> prev2 = running_mean_sq_[i];
running_mean_[i] = decay_ * prev + (1.0f - decay_) * sample; running_mean_[i] = decay_ * prev + (1.0f - decay_) * sample;
running_mean_sq_[i] = running_mean_sq_[i] = decay_ * prev2 +
decay_ * prev2 + (1.0f - decay_) * sample * std::conj(sample); (1.0f - decay_) * sample * std::conj(sample);
// variance_[i] = decay_ * variance_[i] + (1.0f - decay_) * ( //variance_[i] = decay_ * variance_[i] + (1.0f - decay_) * (
// (sample - running_mean_[i]) * std::conj(sample - // (sample - running_mean_[i]) * std::conj(sample - running_mean_[i])).real();
// running_mean_[i])).real(); variance_[i] = (running_mean_sq_[i] - running_mean_[i] * std::conj(running_mean_[i])).real();
variance_[i] = (running_mean_sq_[i] -
running_mean_[i] * std::conj(running_mean_[i])).real();
} }
array_mean_ += (variance_[i] - array_mean_) / (i + 1); array_mean_ += (variance_[i] - array_mean_) / (i + 1);
@ -200,15 +186,15 @@ void VarianceArray::WindowedStep(const complex<float>* data, bool /*dummy*/) {
mean = history_[i][history_cursor_]; mean = history_[i][history_cursor_];
variance_[i] = 0.0f; variance_[i] = 0.0f;
for (int j = 1; j < num; ++j) { for (int j = 1; j < num; ++j) {
complex<float> sample = complex<float> sample = zerofudge(
zerofudge(history_[i][(history_cursor_ + j) % window_size_]); history_[i][(history_cursor_ + j) % window_size_]);
sample = history_[i][(history_cursor_ + j) % window_size_]; sample = history_[i][(history_cursor_ + j) % window_size_];
float old_sum = conj_sum; float old_sum = conj_sum;
complex<float> old_mean = mean; complex<float> old_mean = mean;
mean = old_mean + (sample - old_mean) / static_cast<float>(j + 1); mean = old_mean + (sample - old_mean) / static_cast<float>(j + 1);
conj_sum = conj_sum = (old_sum + std::conj(sample - old_mean) *
(old_sum + std::conj(sample - old_mean) * (sample - mean)).real(); (sample - mean)).real();
variance_[i] = conj_sum / (j); variance_[i] = conj_sum / (j);
} }
array_mean_ += (variance_[i] - array_mean_) / (i + 1); array_mean_ += (variance_[i] - array_mean_) / (i + 1);
@ -231,11 +217,11 @@ void VarianceArray::BlockedStep(const complex<float>* data, bool /*dummy*/) {
subhistory_[i][history_cursor_ % window_size_] = sub_running_mean_[i]; subhistory_[i][history_cursor_ % window_size_] = sub_running_mean_[i];
subhistory_sq_[i][history_cursor_ % window_size_] = sub_running_mean_sq_[i]; subhistory_sq_[i][history_cursor_ % window_size_] = sub_running_mean_sq_[i];
variance_[i] = variance_[i] = (NewMean(running_mean_sq_[i], sub_running_mean_sq_[i],
(NewMean(running_mean_sq_[i], sub_running_mean_sq_[i], blocks) - blocks) -
NewMean(running_mean_[i], sub_running_mean_[i], blocks) * NewMean(running_mean_[i], sub_running_mean_[i], blocks) *
std::conj(NewMean(running_mean_[i], sub_running_mean_[i], blocks))) std::conj(NewMean(running_mean_[i], sub_running_mean_[i],
.real(); blocks))).real();
if (count_ == kWindowBlockSize - 1) { if (count_ == kWindowBlockSize - 1) {
sub_running_mean_[i] = complex<float>(0.0f, 0.0f); sub_running_mean_[i] = complex<float>(0.0f, 0.0f);
sub_running_mean_sq_[i] = complex<float>(0.0f, 0.0f); sub_running_mean_sq_[i] = complex<float>(0.0f, 0.0f);
@ -298,3 +284,4 @@ void GainApplier::Apply(const complex<float>* in_block,
} // namespace intelligibility } // namespace intelligibility
} // namespace webrtc } // namespace webrtc

View File

@ -8,16 +8,12 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
//
// Specifies helper classes for intelligibility enhancement.
//
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_ #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_ #define WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_
#include <complex> #include <complex>
#include "webrtc/base/scoped_ptr.h" #include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc { namespace webrtc {
@ -67,10 +63,14 @@ class VarianceArray {
void ApplyScale(float scale); void ApplyScale(float scale);
// The current set of variances. // The current set of variances.
const float* variance() const { return variance_.get(); } const float* variance() const {
return variance_.get();
}
// The mean value of the current set of variances. // The mean value of the current set of variances.
float array_mean() const { return array_mean_; } float array_mean() const {
return array_mean_;
}
private: private:
void InfiniteStep(const std::complex<float>* data, bool dummy); void InfiniteStep(const std::complex<float>* data, bool dummy);
@ -78,26 +78,23 @@ class VarianceArray {
void WindowedStep(const std::complex<float>* data, bool dummy); void WindowedStep(const std::complex<float>* data, bool dummy);
void BlockedStep(const std::complex<float>* data, bool dummy); void BlockedStep(const std::complex<float>* data, bool dummy);
// TODO(ekmeyerson): Switch the following running means
// and histories from rtc::scoped_ptr to std::vector.
// The current average X and X^2. // The current average X and X^2.
rtc::scoped_ptr<std::complex<float>[]> running_mean_; scoped_ptr<std::complex<float>[]> running_mean_;
rtc::scoped_ptr<std::complex<float>[]> running_mean_sq_; scoped_ptr<std::complex<float>[]> running_mean_sq_;
// Average X and X^2 for the current block in kStepBlocked. // Average X and X^2 for the current block in kStepBlocked.
rtc::scoped_ptr<std::complex<float>[]> sub_running_mean_; scoped_ptr<std::complex<float>[]> sub_running_mean_;
rtc::scoped_ptr<std::complex<float>[]> sub_running_mean_sq_; scoped_ptr<std::complex<float>[]> sub_running_mean_sq_;
// Sample history for the rolling window in kStepWindowed and block-wise // Sample history for the rolling window in kStepWindowed and block-wise
// histories for kStepBlocked. // histories for kStepBlocked.
rtc::scoped_ptr<rtc::scoped_ptr<std::complex<float>[]>[]> history_; scoped_ptr<scoped_ptr<std::complex<float>[]>[]> history_;
rtc::scoped_ptr<rtc::scoped_ptr<std::complex<float>[]>[]> subhistory_; scoped_ptr<scoped_ptr<std::complex<float>[]>[]> subhistory_;
rtc::scoped_ptr<rtc::scoped_ptr<std::complex<float>[]>[]> subhistory_sq_; scoped_ptr<scoped_ptr<std::complex<float>[]>[]> subhistory_sq_;
// The current set of variances and sums for Welford's algorithm. // The current set of variances and sums for Welford's algorithm.
rtc::scoped_ptr<float[]> variance_; scoped_ptr<float[]> variance_;
rtc::scoped_ptr<float[]> conj_sum_; scoped_ptr<float[]> conj_sum_;
const int freqs_; const int freqs_;
const int window_size_; const int window_size_;
@ -121,13 +118,15 @@ class GainApplier {
std::complex<float>* out_block); std::complex<float>* out_block);
// Return the current target gain set. Modify this array to set the targets. // Return the current target gain set. Modify this array to set the targets.
float* target() const { return target_.get(); } float* target() const {
return target_.get();
}
private: private:
const int freqs_; const int freqs_;
const float change_limit_; const float change_limit_;
rtc::scoped_ptr<float[]> target_; scoped_ptr<float[]> target_;
rtc::scoped_ptr<float[]> current_; scoped_ptr<float[]> current_;
}; };
} // namespace intelligibility } // namespace intelligibility
@ -135,3 +134,4 @@ class GainApplier {
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_ #endif // WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_