From c555b99c13aa25cf891dd34602f1b620b3f33bfb Mon Sep 17 00:00:00 2001 From: aluebs Date: Tue, 16 Jun 2015 20:26:16 -0700 Subject: [PATCH] Revert of Allow intelligibility to compile in apm (patchset #1 id:1 of https://codereview.webrtc.org/1182323005/) Reason for revert: Breaking the build bots: http://build.chromium.org/p/client.webrtc/builders/Mac32%20Release%20%5Blarge%20tests%5D/builds/4544 Fails to compile with this error: ../../webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc:218:25: error: no member named 'accumulate' in namespace 'std' power_target = std::accumulate(clear_variance_.variance(), Original issue's description: > Allow intelligibility to compile in apm > > - Added files to gyp and BUILD > - Made minor fixes to get everything to compile > and intelligibility_proc to run > - Added comments > - Auto-reformatting > > Original cl is at: https://webrtc-codereview.appspot.com/57579004/ > > TBR=aluebs@webrtc.org > > Committed: https://chromium.googlesource.com/external/webrtc/+/b7553dfdbb1ca7779eb0d80b5f509523c9b00086 TBR=ekmeyerson@webrtc.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true Review URL: https://codereview.webrtc.org/1187033005 Cr-Commit-Position: refs/heads/master@{#9455} --- webrtc/modules/audio_processing/BUILD.gn | 4 - .../audio_processing/audio_processing.gypi | 4 - .../audio_processing_tests.gypi | 13 -- .../intelligibility_enhancer.cc | 180 ++++++++---------- .../intelligibility_enhancer.h | 109 ++++------- .../intelligibility/intelligibility_proc.cc | 112 ++++------- .../intelligibility/intelligibility_utils.cc | 75 +++----- .../intelligibility/intelligibility_utils.h | 44 ++--- 8 files changed, 205 insertions(+), 336 deletions(-) diff --git a/webrtc/modules/audio_processing/BUILD.gn b/webrtc/modules/audio_processing/BUILD.gn index ce750b636..092be1ef1 100644 --- a/webrtc/modules/audio_processing/BUILD.gn +++ b/webrtc/modules/audio_processing/BUILD.gn @@ -89,10 +89,6 @@ source_set("audio_processing") { "high_pass_filter_impl.cc", "high_pass_filter_impl.h", "include/audio_processing.h", - "intelligibility/intelligibility_enhancer.cc", - "intelligibility/intelligibility_enhancer.h", - "intelligibility/intelligibility_utils.cc", - "intelligibility/intelligibility_utils.h", "level_estimator_impl.cc", "level_estimator_impl.h", "noise_suppression_impl.cc", diff --git a/webrtc/modules/audio_processing/audio_processing.gypi b/webrtc/modules/audio_processing/audio_processing.gypi index a4f9b39a9..8eb277503 100644 --- a/webrtc/modules/audio_processing/audio_processing.gypi +++ b/webrtc/modules/audio_processing/audio_processing.gypi @@ -99,10 +99,6 @@ 'high_pass_filter_impl.cc', 'high_pass_filter_impl.h', 'include/audio_processing.h', - 'intelligibility/intelligibility_enhancer.cc', - 'intelligibility/intelligibility_enhancer.h', - 'intelligibility/intelligibility_utils.cc', - 'intelligibility/intelligibility_utils.h', 'level_estimator_impl.cc', 'level_estimator_impl.h', 'noise_suppression_impl.cc', diff --git a/webrtc/modules/audio_processing/audio_processing_tests.gypi b/webrtc/modules/audio_processing/audio_processing_tests.gypi index d2ef78489..7658e1096 100644 --- a/webrtc/modules/audio_processing/audio_processing_tests.gypi +++ b/webrtc/modules/audio_processing/audio_processing_tests.gypi @@ -59,19 +59,6 @@ 'beamformer/nonlinear_beamformer_test.cc', ], }, # nonlinear_beamformer_test - { - 'target_name': 'intelligibility_proc', - 'type': 'executable', - 'dependencies': [ - 'audioproc_test_utils', - '<(DEPTH)/third_party/gflags/gflags.gyp:gflags', - '<(DEPTH)/testing/gtest.gyp:gtest', - '<(webrtc_root)/modules/modules.gyp:audio_processing', - ], - 'sources': [ - 'intelligibility/intelligibility_proc.cc', - ], - }, # intelligibility_proc ], 'conditions': [ ['enable_protobuf==1', { diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc index 6a25104bd..932eff109 100644 --- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc +++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc @@ -8,13 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ -// -// Implements core class for intelligibility enhancer. -// -// Details of the model and algorithm can be found in the original paper: -// http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6882788 -// - #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h" #include @@ -34,16 +27,13 @@ namespace webrtc { const int IntelligibilityEnhancer::kErbResolution = 2; const int IntelligibilityEnhancer::kWindowSizeMs = 2; -const int IntelligibilityEnhancer::kChunkSizeMs = 10; // Size provided by APM. +// The size of the chunk provided by APM, in milliseconds. +const int IntelligibilityEnhancer::kChunkSizeMs = 10; const int IntelligibilityEnhancer::kAnalyzeRate = 800; const int IntelligibilityEnhancer::kVarianceRate = 2; const float IntelligibilityEnhancer::kClipFreq = 200.0f; const float IntelligibilityEnhancer::kConfigRho = 0.02f; const float IntelligibilityEnhancer::kKbdAlpha = 1.5f; - -// To disable gain update smoothing, set gain limit to be VERY high. -// TODO(ekmeyerson): Add option to disable gain smoothing altogether -// to avoid the extra computation. const float IntelligibilityEnhancer::kGainChangeLimit = 0.0125f; using VarianceType = intelligibility::VarianceArray::StepType; @@ -51,14 +41,12 @@ using VarianceType = intelligibility::VarianceArray::StepType; IntelligibilityEnhancer::TransformCallback::TransformCallback( IntelligibilityEnhancer* parent, IntelligibilityEnhancer::AudioSource source) - : parent_(parent), source_(source) { -} + : parent_(parent), + source_(source) {} void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock( const complex* const* in_block, - int in_channels, - int frames, - int /* out_channels */, + int in_channels, int frames, int /* out_channels */, complex* const* out_block) { DCHECK_EQ(parent_->freqs_, frames); for (int i = 0; i < in_channels; ++i) { @@ -69,14 +57,13 @@ void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock( IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution, int sample_rate_hz, int channels, - int cv_type, - float cv_alpha, + int cv_type, float cv_alpha, int cv_win, int analysis_rate, int variance_rate, float gain_limit) - : freqs_(RealFourier::ComplexLength( - RealFourier::FftOrder(sample_rate_hz * kWindowSizeMs / 1000))), + : freqs_(RealFourier::ComplexLength(RealFourier::FftOrder( + sample_rate_hz * kWindowSizeMs / 1000))), window_size_(1 << RealFourier::FftOrder(freqs_)), chunk_length_(sample_rate_hz * kChunkSizeMs / 1000), bank_size_(GetBankSize(sample_rate_hz, erb_resolution)), @@ -85,9 +72,7 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution, channels_(channels), analysis_rate_(analysis_rate), variance_rate_(variance_rate), - clear_variance_(freqs_, - static_cast(cv_type), - cv_win, + clear_variance_(freqs_, static_cast(cv_type), cv_win, cv_alpha), noise_variance_(freqs_, VarianceType::kStepInfinite, 475, 0.01f), filtered_clear_var_(new float[bank_size_]), @@ -98,51 +83,58 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution, gains_eq_(new float[bank_size_]), gain_applier_(freqs_, gain_limit), temp_out_buffer_(nullptr), - input_audio_(new float* [channels]), + input_audio_(new float*[channels]), kbd_window_(new float[window_size_]), render_callback_(this, AudioSource::kRenderStream), capture_callback_(this, AudioSource::kCaptureStream), block_count_(0), analysis_step_(0), - vad_high_(WebRtcVad_Create()), - vad_low_(WebRtcVad_Create()), + vad_high_(nullptr), + vad_low_(nullptr), vad_tmp_buffer_(new int16_t[chunk_length_]) { DCHECK_LE(kConfigRho, 1.0f); CreateErbBank(); + WebRtcVad_Create(&vad_high_); WebRtcVad_Init(vad_high_); - WebRtcVad_set_mode(vad_high_, 0); // High likelihood of speech. + WebRtcVad_set_mode(vad_high_, 0); // high likelihood of speech + WebRtcVad_Create(&vad_low_); WebRtcVad_Init(vad_low_); - WebRtcVad_set_mode(vad_low_, 3); // Low likelihood of speech. + WebRtcVad_set_mode(vad_low_, 3); // low likelihood of speech - temp_out_buffer_ = static_cast( - malloc(sizeof(*temp_out_buffer_) * channels_ + - sizeof(**temp_out_buffer_) * chunk_length_ * channels_)); + temp_out_buffer_ = static_cast(malloc( + sizeof(*temp_out_buffer_) * channels_ + + sizeof(**temp_out_buffer_) * chunk_length_ * channels_)); for (int i = 0; i < channels_; ++i) { - temp_out_buffer_[i] = - reinterpret_cast(temp_out_buffer_ + channels_) + - chunk_length_ * i; + temp_out_buffer_[i] = reinterpret_cast(temp_out_buffer_ + channels_) + + chunk_length_ * i; } - // Assumes all rho equal. for (int i = 0; i < bank_size_; ++i) { rho_[i] = kConfigRho * kConfigRho; } float freqs_khz = kClipFreq / 1000.0f; - int erb_index = static_cast(ceilf( - 11.17f * logf((freqs_khz + 0.312f) / (freqs_khz + 14.6575f)) + 43.0f)); + int erb_index = static_cast(ceilf(11.17f * logf((freqs_khz + 0.312f) / + (freqs_khz + 14.6575f)) + + 43.0f)); start_freq_ = max(1, erb_index * kErbResolution); WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size_, kbd_window_.get()); - render_mangler_.reset(new LappedTransform( - channels_, channels_, chunk_length_, kbd_window_.get(), window_size_, - window_size_ / 2, &render_callback_)); - capture_mangler_.reset(new LappedTransform( - channels_, channels_, chunk_length_, kbd_window_.get(), window_size_, - window_size_ / 2, &capture_callback_)); + render_mangler_.reset(new LappedTransform(channels_, channels_, + chunk_length_, + kbd_window_.get(), + window_size_, + window_size_ / 2, + &render_callback_)); + capture_mangler_.reset(new LappedTransform(channels_, channels_, + chunk_length_, + kbd_window_.get(), + window_size_, + window_size_ / 2, + &capture_callback_)); } IntelligibilityEnhancer::~IntelligibilityEnhancer() { @@ -158,9 +150,7 @@ void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio) { has_voice_low_ = WebRtcVad_Process(vad_low_, sample_rate_hz_, vad_tmp_buffer_.get(), chunk_length_) == 1; - // Process and enhance chunk of |audio| render_mangler_->ProcessChunk(audio, temp_out_buffer_); - for (int i = 0; i < channels_; ++i) { memcpy(audio[i], temp_out_buffer_[i], chunk_length_ * sizeof(**temp_out_buffer_)); @@ -171,25 +161,21 @@ void IntelligibilityEnhancer::ProcessCaptureAudio(float* const* audio) { for (int i = 0; i < chunk_length_; ++i) { vad_tmp_buffer_[i] = (int16_t)audio[0][i]; } - // TODO(bercic): The VAD was always detecting voice in the noise stream, - // no matter what the aggressiveness, so it was temporarily disabled here. - - #if 0 - if (WebRtcVad_Process(vad_high_, sample_rate_hz_, vad_tmp_buffer_.get(), - chunk_length_) == 1) { - printf("capture HAS speech\n"); - return; - } - printf("capture NO speech\n"); - #endif + // TODO(bercic): the VAD was always detecting voice in the noise stream, + // no matter what the aggressiveness, so it was temporarily disabled here + //if (WebRtcVad_Process(vad_high_, sample_rate_hz_, vad_tmp_buffer_.get(), + // chunk_length_) == 1) { + // printf("capture HAS speech\n"); + // return; + //} + //printf("capture NO speech\n"); capture_mangler_->ProcessChunk(audio, temp_out_buffer_); } void IntelligibilityEnhancer::DispatchAudio( IntelligibilityEnhancer::AudioSource source, - const complex* in_block, - complex* out_block) { + const complex* in_block, complex* out_block) { switch (source) { case kRenderStream: ProcessClearBlock(in_block, out_block); @@ -210,9 +196,6 @@ void IntelligibilityEnhancer::ProcessClearBlock(const complex* in_block, return; } - // For now, always assumes enhancement is necessary. - // TODO(ekmeyerson): Change to only enhance if necessary, - // based on experiments with different cutoffs. if (has_voice_low_ || true) { clear_variance_.Step(in_block, false); power_target = std::accumulate(clear_variance_.variance(), @@ -238,25 +221,23 @@ void IntelligibilityEnhancer::AnalyzeClearBlock(float power_target) { FilterVariance(clear_variance_.variance(), filtered_clear_var_.get()); FilterVariance(noise_variance_.variance(), filtered_noise_var_.get()); - // Bisection search for optimal |lambda| + /* lambda binary search */ float lambda_bot = -1.0f, lambda_top = -10e-18f, lambda; float power_bot, power_top, power; - SolveForGainsGivenLambda(lambda_top, start_freq_, gains_eq_.get()); - power_top = - DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_); - SolveForGainsGivenLambda(lambda_bot, start_freq_, gains_eq_.get()); - power_bot = - DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_); + SolveEquation14(lambda_top, start_freq_, gains_eq_.get()); + power_top = DotProduct(gains_eq_.get(), filtered_clear_var_.get(), + bank_size_); + SolveEquation14(lambda_bot, start_freq_, gains_eq_.get()); + power_bot = DotProduct(gains_eq_.get(), filtered_clear_var_.get(), + bank_size_); DCHECK(power_target >= power_bot && power_target <= power_top); - float power_ratio = 2.0f; // Ratio of achieved power to target power. - const float kConvergeThresh = 0.001f; // TODO(ekmeyerson): Find best values - const int kMaxIters = 100; // for these, based on experiments. + float power_ratio = 2.0f; int iters = 0; - while (fabs(power_ratio - 1.0f) > kConvergeThresh && iters <= kMaxIters) { + while (fabs(power_ratio - 1.0f) > 0.001f && iters <= 100) { lambda = lambda_bot + (lambda_top - lambda_bot) / 2.0f; - SolveForGainsGivenLambda(lambda, start_freq_, gains_eq_.get()); + SolveEquation14(lambda, start_freq_, gains_eq_.get()); power = DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_); if (power < power_target) { lambda_bot = lambda; @@ -267,7 +248,7 @@ void IntelligibilityEnhancer::AnalyzeClearBlock(float power_target) { ++iters; } - // (ERB gain) = filterbank' * (freq gain) + /* b = filterbank' * b */ float* gains = gain_applier_.target(); for (int i = 0; i < freqs_; ++i) { gains[i] = 0.0f; @@ -284,8 +265,8 @@ void IntelligibilityEnhancer::ProcessNoiseBlock(const complex* in_block, int IntelligibilityEnhancer::GetBankSize(int sample_rate, int erb_resolution) { float freq_limit = sample_rate / 2000.0f; - int erb_scale = ceilf( - 11.17f * logf((freq_limit + 0.312f) / (freq_limit + 14.6575f)) + 43.0f); + int erb_scale = ceilf(11.17f * logf((freq_limit + 0.312f) / + (freq_limit + 14.6575f)) + 43.0f); return erb_scale * erb_resolution; } @@ -302,29 +283,29 @@ void IntelligibilityEnhancer::CreateErbBank() { center_freqs_[i] *= 0.5f * sample_rate_hz_ / last_center_freq; } - filter_bank_ = static_cast( - malloc(sizeof(*filter_bank_) * bank_size_ + - sizeof(**filter_bank_) * freqs_ * bank_size_)); + filter_bank_ = static_cast(malloc( + sizeof(*filter_bank_) * bank_size_ + + sizeof(**filter_bank_) * freqs_ * bank_size_)); for (int i = 0; i < bank_size_; ++i) { - filter_bank_[i] = - reinterpret_cast(filter_bank_ + bank_size_) + freqs_ * i; + filter_bank_[i] = reinterpret_cast(filter_bank_ + bank_size_) + + freqs_ * i; } for (int i = 1; i <= bank_size_; ++i) { int lll, ll, rr, rrr; lll = round(center_freqs_[max(1, i - lf) - 1] * freqs_ / - (0.5f * sample_rate_hz_)); - ll = - round(center_freqs_[max(1, i) - 1] * freqs_ / (0.5f * sample_rate_hz_)); + (0.5f * sample_rate_hz_)); + ll = round(center_freqs_[max(1, i ) - 1] * freqs_ / + (0.5f * sample_rate_hz_)); lll = min(freqs_, max(lll, 1)) - 1; - ll = min(freqs_, max(ll, 1)) - 1; + ll = min(freqs_, max(ll, 1)) - 1; rrr = round(center_freqs_[min(bank_size_, i + rf) - 1] * freqs_ / - (0.5f * sample_rate_hz_)); - rr = round(center_freqs_[min(bank_size_, i + 1) - 1] * freqs_ / - (0.5f * sample_rate_hz_)); + (0.5f * sample_rate_hz_)); + rr = round(center_freqs_[min(bank_size_, i + 1) - 1] * freqs_ / + (0.5f * sample_rate_hz_)); rrr = min(freqs_, max(rrr, 1)) - 1; - rr = min(freqs_, max(rr, 1)) - 1; + rr = min(freqs_, max(rr, 1)) - 1; float step, element; @@ -357,9 +338,8 @@ void IntelligibilityEnhancer::CreateErbBank() { } } -void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda, - int start_freq, - float* sols) { +void IntelligibilityEnhancer::SolveEquation14(float lambda, int start_freq, + float* sols) { bool quadratic = (kConfigRho < 1.0f); const float* var_x0 = filtered_clear_var_.get(); const float* var_n0 = filtered_noise_var_.get(); @@ -367,17 +347,15 @@ void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda, for (int n = 0; n < start_freq; ++n) { sols[n] = 1.0f; } - - // Analytic solution for optimal gains. See paper for derivation. for (int n = start_freq - 1; n < bank_size_; ++n) { float alpha0, beta0, gamma0; gamma0 = 0.5f * rho_[n] * var_x0[n] * var_n0[n] + - lambda * var_x0[n] * var_n0[n] * var_n0[n]; + lambda * var_x0[n] * var_n0[n] * var_n0[n]; beta0 = lambda * var_x0[n] * (2 - rho_[n]) * var_x0[n] * var_n0[n]; if (quadratic) { alpha0 = lambda * var_x0[n] * (1 - rho_[n]) * var_x0[n] * var_x0[n]; - sols[n] = - (-beta0 - sqrtf(beta0 * beta0 - 4 * alpha0 * gamma0)) / (2 * alpha0); + sols[n] = (-beta0 - sqrtf(beta0 * beta0 - 4 * alpha0 * gamma0)) + / (2 * alpha0); } else { sols[n] = -gamma0 / beta0; } @@ -391,9 +369,8 @@ void IntelligibilityEnhancer::FilterVariance(const float* var, float* result) { } } -float IntelligibilityEnhancer::DotProduct(const float* a, - const float* b, - int length) { +float IntelligibilityEnhancer::DotProduct(const float* a, const float* b, + int length) { float ret = 0.0f; for (int i = 0; i < length; ++i) { @@ -403,3 +380,4 @@ float IntelligibilityEnhancer::DotProduct(const float* a, } } // namespace webrtc + diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h index 8125707f1..d0818f688 100644 --- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h +++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h @@ -8,18 +8,14 @@ * be found in the AUTHORS file in the root of the source tree. */ -// -// Specifies core class for intelligbility enhancement. -// - #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_ #define WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_ #include -#include "webrtc/base/scoped_ptr.h" #include "webrtc/common_audio/lapped_transform.h" #include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h" +#include "webrtc/system_wrappers/interface/scoped_ptr.h" struct WebRtcVadInst; typedef struct WebRtcVadInst VadInst; @@ -29,7 +25,6 @@ namespace webrtc { // Speech intelligibility enhancement module. Reads render and capture // audio streams and modifies the render stream with a set of gains per // frequency bin to enhance speech against the noise background. -// Note: assumes speech and noise streams are already separated. class IntelligibilityEnhancer { public: // Construct a new instance with the given filter bank resolution, @@ -38,43 +33,30 @@ class IntelligibilityEnhancer { // to elapse before a new gain computation is made. |variance_rate| specifies // the number of gain recomputations after which the variances are reset. // |cv_*| are parameters for the VarianceArray constructor for the - // clear speech stream. + // lear speech stream. // TODO(bercic): the |cv_*|, |*_rate| and |gain_limit| parameters should // probably go away once fine tuning is done. They override the internal // constants in the class (kGainChangeLimit, kAnalyzeRate, kVarianceRate). - IntelligibilityEnhancer(int erb_resolution, - int sample_rate_hz, - int channels, - int cv_type, - float cv_alpha, - int cv_win, - int analysis_rate, - int variance_rate, + IntelligibilityEnhancer(int erb_resolution, int sample_rate_hz, int channels, + int cv_type, float cv_alpha, int cv_win, + int analysis_rate, int variance_rate, float gain_limit); ~IntelligibilityEnhancer(); - // Reads and processes chunk of noise stream in time domain. - void ProcessCaptureAudio(float* const* audio); - - // Reads chunk of speech in time domain and updates with modified signal. void ProcessRenderAudio(float* const* audio); + void ProcessCaptureAudio(float* const* audio); private: enum AudioSource { - kRenderStream = 0, // Clear speech stream. - kCaptureStream, // Noise stream. + kRenderStream = 0, + kCaptureStream, }; - // Provides access point to the frequency domain. class TransformCallback : public LappedTransform::Callback { public: TransformCallback(IntelligibilityEnhancer* parent, AudioSource source); - - // All in frequency domain, receives input |in_block|, applies - // intelligibility enhancement, and writes result to |out_block|. virtual void ProcessAudioBlock(const std::complex* const* in_block, - int in_channels, - int frames, + int in_channels, int frames, int out_channels, std::complex* const* out_block); @@ -84,95 +66,72 @@ class IntelligibilityEnhancer { }; friend class TransformCallback; - // Sends streams to ProcessClearBlock or ProcessNoiseBlock based on source. - void DispatchAudio(AudioSource source, - const std::complex* in_block, + void DispatchAudio(AudioSource source, const std::complex* in_block, std::complex* out_block); - - // Updates variance computation and analysis with |in_block_|, - // and writes modified speech to |out_block|. void ProcessClearBlock(const std::complex* in_block, std::complex* out_block); - - // Computes and sets modified gains. void AnalyzeClearBlock(float power_target); - - // Updates variance calculation for noise input with |in_block|. void ProcessNoiseBlock(const std::complex* in_block, std::complex* out_block); - // Returns number of ERB filters. static int GetBankSize(int sample_rate, int erb_resolution); - - // Initializes ERB filterbank. void CreateErbBank(); - - // Analytically solves quadratic for optimal gains given |lambda|. - // Negative gains are set to 0. Stores the results in |sols|. - void SolveForGainsGivenLambda(float lambda, int start_freq, float* sols); - - // Computes variance across ERB filters from freq variance |var|. - // Stores in |result|. + void SolveEquation14(float lambda, int start_freq, float* sols); void FilterVariance(const float* var, float* result); - - // Returns dot product of vectors specified by size |length| arrays |a|,|b|. static float DotProduct(const float* a, const float* b, int length); static const int kErbResolution; static const int kWindowSizeMs; static const int kChunkSizeMs; - static const int kAnalyzeRate; // Default for |analysis_rate_|. - static const int kVarianceRate; // Default for |variance_rate_|. + static const int kAnalyzeRate; + static const int kVarianceRate; static const float kClipFreq; - static const float kConfigRho; // Default production and interpretation SNR. + static const float kConfigRho; static const float kKbdAlpha; static const float kGainChangeLimit; - const int freqs_; // Num frequencies in frequency domain. - const int window_size_; // Window size in samples; also the block size. - const int chunk_length_; // Chunk size in samples. - const int bank_size_; // Num ERB filters. + const int freqs_; + const int window_size_; // window size in samples; also the block size + const int chunk_length_; // chunk size in samples + const int bank_size_; const int sample_rate_hz_; const int erb_resolution_; - const int channels_; // Num channels. - const int analysis_rate_; // Num blocks before gains recalculated. - const int variance_rate_; // Num recalculations before history is cleared. + const int channels_; + const int analysis_rate_; + const int variance_rate_; intelligibility::VarianceArray clear_variance_; intelligibility::VarianceArray noise_variance_; - rtc::scoped_ptr filtered_clear_var_; - rtc::scoped_ptr filtered_noise_var_; - float** filter_bank_; // TODO(ekmeyerson): Switch to using ChannelBuffer. - rtc::scoped_ptr center_freqs_; + scoped_ptr filtered_clear_var_; + scoped_ptr filtered_noise_var_; + float** filter_bank_; + scoped_ptr center_freqs_; int start_freq_; - rtc::scoped_ptr rho_; // Production and interpretation SNR. - // for each ERB band. - rtc::scoped_ptr gains_eq_; // Pre-filter modified gains. + scoped_ptr rho_; + scoped_ptr gains_eq_; intelligibility::GainApplier gain_applier_; // Destination buffer used to reassemble blocked chunks before overwriting // the original input array with modifications. - // TODO(ekmeyerson): Switch to using ChannelBuffer. float** temp_out_buffer_; - - rtc::scoped_ptr input_audio_; - rtc::scoped_ptr kbd_window_; + scoped_ptr input_audio_; + scoped_ptr kbd_window_; TransformCallback render_callback_; TransformCallback capture_callback_; - rtc::scoped_ptr render_mangler_; - rtc::scoped_ptr capture_mangler_; + scoped_ptr render_mangler_; + scoped_ptr capture_mangler_; int block_count_; int analysis_step_; // TODO(bercic): Quick stopgap measure for voice detection in the clear // and noise streams. - // Note: VAD currently does not affect anything in IntelligibilityEnhancer. VadInst* vad_high_; VadInst* vad_low_; - rtc::scoped_ptr vad_tmp_buffer_; - bool has_voice_low_; // Whether voice detected in speech stream. + scoped_ptr vad_tmp_buffer_; + bool has_voice_low_; }; } // namespace webrtc #endif // WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_ + diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_proc.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_proc.cc index dbe3d35e0..b0ea2dfee 100644 --- a/webrtc/modules/audio_processing/intelligibility/intelligibility_proc.cc +++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_proc.cc @@ -8,12 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ -// -// Command line tool for speech intelligibility enhancement. Provides for -// running and testing intelligibility_enhancer as an independent process. -// Use --help for options. -// - #include #include #include @@ -30,71 +24,53 @@ #include #include "gflags/gflags.h" -#include "testing/gtest/include/gtest/gtest.h" #include "webrtc/base/checks.h" #include "webrtc/common_audio/real_fourier.h" #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h" #include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h" #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" +#include "webrtc/system_wrappers/interface/scoped_ptr.h" -// PCM data simulating streams const int16_t* in_ipcm; int16_t* out_ipcm; const int16_t* noise_ipcm; + float* in_fpcm; float* out_fpcm; float* noise_fpcm; - -// Current locations in streams float* noise_cursor; float* clear_cursor; -int samples; // Number of samples in input PCM file -int fragment_size; // Number of samples to process at a time - // to simulate APM stream processing +int samples; +int fragment_size; using std::complex; - -namespace webrtc { - using webrtc::RealFourier; using webrtc::IntelligibilityEnhancer; -DEFINE_int32(clear_type, - webrtc::intelligibility::VarianceArray::kStepInfinite, +DEFINE_int32(clear_type, webrtc::intelligibility::VarianceArray::kStepInfinite, "Variance algorithm for clear data."); -DEFINE_double(clear_alpha, 0.9, "Variance decay factor for clear data."); -DEFINE_int32(clear_window, - 475, +DEFINE_double(clear_alpha, 0.9, + "Variance decay factor for clear data."); +DEFINE_int32(clear_window, 475, "Window size for windowed variance for clear data."); -DEFINE_int32(sample_rate, - 16000, +DEFINE_int32(sample_rate, 16000, "Audio sample rate used in the input and output files."); -DEFINE_int32(ana_rate, - 800, +DEFINE_int32(ana_rate, 800, "Analysis rate; gains recalculated every N blocks."); -DEFINE_int32( - var_rate, - 2, - "Variance clear rate; history is forgotten every N gain recalculations."); +DEFINE_int32(var_rate, 2, + "Variance clear rate; history is forgotten every N gain recalculations."); DEFINE_double(gain_limit, 1000.0, "Maximum gain change in one block."); DEFINE_bool(repeat, false, "Repeat input file ad nauseam."); DEFINE_string(clear_file, "speech.pcm", "Input file with clear speech."); DEFINE_string(noise_file, "noise.pcm", "Input file with noise data."); -DEFINE_string(out_file, - "proc_enhanced.pcm", - "Enhanced output. Use '-' to " +DEFINE_string(out_file, "proc_enhanced.pcm", "Enhanced output. Use '-' to " "pipe through aplay internally."); -// Constant IntelligibilityEnhancer constructor parameters. -const int kErbResolution = 2; -const int kNumChannels = 1; - -// Converts output stream to Sun AU format and writes to file descriptor |fd|. -// Can be used to pipe output directly into aplay. -// TODO(ekmeyerson): Modify to write WAV instead. +// Write an Sun AU-formatted audio chunk into file descriptor |fd|. Can be used +// to pipe the audio stream directly into aplay. void writeau(int fd) { uint32_t thing; @@ -116,14 +92,12 @@ void writeau(int fd) { write(fd, out_ipcm, sizeof(*out_ipcm) * samples); } -// void function for gtest -void void_main(int argc, char* argv[]) { - google::SetUsageMessage( - "\n\nVariance algorithm types are:\n" - " 0 - infinite/normal,\n" - " 1 - exponentially decaying,\n" - " 2 - rolling window.\n" - "\nInput files must be little-endian 16-bit signed raw PCM.\n"); +int main(int argc, char* argv[]) { + google::SetUsageMessage("\n\nVariance algorithm types are:\n" + " 0 - infinite/normal,\n" + " 1 - exponentially decaying,\n" + " 2 - rolling window.\n" + "\nInput files must be little-endian 16-bit signed raw PCM.\n"); google::ParseCommandLineFlags(&argc, &argv, true); const char* in_name = FLAGS_clear_file.c_str(); @@ -133,15 +107,10 @@ void void_main(int argc, char* argv[]) { int in_fd, out_fd, noise_fd; FILE* aplay_file = nullptr; - // Load settings and set up PCMs. - - fragment_size = FLAGS_sample_rate / 100; // Mirror real time APM chunk size. - // Duplicates chunk_length_ in - // IntelligibilityEnhancer. - - ASSERT_EQ(stat(in_name, &in_stat), 0) << "Empty speech input."; - ASSERT_EQ(stat(noise_name, &noise_stat), 0) << "Empty noise input."; + fragment_size = FLAGS_sample_rate / 100; + stat(in_name, &in_stat); + stat(noise_name, &noise_stat); samples = in_stat.st_size / sizeof(*in_ipcm); in_fd = open(in_name, O_RDONLY); @@ -154,10 +123,10 @@ void void_main(int argc, char* argv[]) { } noise_fd = open(noise_name, O_RDONLY); - in_ipcm = static_cast( - mmap(nullptr, in_stat.st_size, PROT_READ, MAP_PRIVATE, in_fd, 0)); - noise_ipcm = static_cast( - mmap(nullptr, noise_stat.st_size, PROT_READ, MAP_PRIVATE, noise_fd, 0)); + in_ipcm = static_cast(mmap(nullptr, in_stat.st_size, PROT_READ, + MAP_PRIVATE, in_fd, 0)); + noise_ipcm = static_cast(mmap(nullptr, noise_stat.st_size, + PROT_READ, MAP_PRIVATE, noise_fd, 0)); out_ipcm = new int16_t[samples]; out_fpcm = new float[samples]; in_fpcm = new float[samples]; @@ -167,17 +136,18 @@ void void_main(int argc, char* argv[]) { noise_fpcm[i] = noise_ipcm[i % (noise_stat.st_size / sizeof(*noise_ipcm))]; } - // Run intelligibility enhancement. - - IntelligibilityEnhancer enh( - kErbResolution, - FLAGS_sample_rate, - kNumChannels, - FLAGS_clear_type, static_cast(FLAGS_clear_alpha), - FLAGS_clear_window, FLAGS_ana_rate, FLAGS_var_rate, FLAGS_gain_limit); + //feenableexcept(FE_INVALID | FE_OVERFLOW); + IntelligibilityEnhancer enh(2, + FLAGS_sample_rate, 1, + FLAGS_clear_type, + static_cast(FLAGS_clear_alpha), + FLAGS_clear_window, + FLAGS_ana_rate, + FLAGS_var_rate, + FLAGS_gain_limit); // Slice the input into smaller chunks, as the APM would do, and feed them - // through the enhancer. Repeat indefinitely if FLAGS_repeat is set. + // into the enhancer. Repeat indefinitely if FLAGS_repeat is set. do { noise_cursor = noise_fpcm; clear_cursor = in_fpcm; @@ -211,11 +181,7 @@ void void_main(int argc, char* argv[]) { close(out_fd); } close(in_fd); -} -} // namespace webrtc - -int main(int argc, char* argv[]) { - webrtc::void_main(argc, argv); return 0; } + diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc index 145cc0872..e6fc3fa6a 100644 --- a/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc +++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc @@ -8,10 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ -// -// Implements helper functions and classes for intelligibility enhancement. -// - #include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h" #include @@ -44,13 +40,10 @@ inline bool cplxnormal(complex c) { // were chosen randomly, so that even a series of all zeroes has some small // variability. inline complex zerofudge(complex c) { - const static complex fudge[7] = {{0.001f, 0.002f}, - {0.008f, 0.001f}, - {0.003f, 0.008f}, - {0.0006f, 0.0009f}, - {0.001f, 0.004f}, - {0.003f, 0.004f}, - {0.002f, 0.009f}}; + const static complex fudge[7] = { + {0.001f, 0.002f}, {0.008f, 0.001f}, {0.003f, 0.008f}, {0.0006f, 0.0009f}, + {0.001f, 0.004f}, {0.003f, 0.004f}, {0.002f, 0.009f} + }; static int fudge_index = 0; if (cplxfinite(c) && !cplxnormal(c)) { fudge_index = (fudge_index + 1) % 7; @@ -61,9 +54,8 @@ inline complex zerofudge(complex c) { // Incremental mean computation. Return the mean of the series with the // mean |mean| with added |data|. -inline complex NewMean(complex mean, - complex data, - int count) { +inline complex NewMean(complex mean, complex data, + int count) { return mean + (data - mean) / static_cast(count); } @@ -81,9 +73,7 @@ namespace intelligibility { static const int kWindowBlockSize = 10; -VarianceArray::VarianceArray(int freqs, - StepType type, - int window_size, +VarianceArray::VarianceArray(int freqs, StepType type, int window_size, float decay) : running_mean_(new complex[freqs]()), running_mean_sq_(new complex[freqs]()), @@ -97,15 +87,15 @@ VarianceArray::VarianceArray(int freqs, history_cursor_(0), count_(0), array_mean_(0.0f) { - history_.reset(new rtc::scoped_ptr[]>[freqs_]()); + history_.reset(new scoped_ptr[]>[freqs_]()); for (int i = 0; i < freqs_; ++i) { history_[i].reset(new complex[window_size_]()); } - subhistory_.reset(new rtc::scoped_ptr[]>[freqs_]()); + subhistory_.reset(new scoped_ptr[]>[freqs_]()); for (int i = 0; i < freqs_; ++i) { subhistory_[i].reset(new complex[window_size_]()); } - subhistory_sq_.reset(new rtc::scoped_ptr[]>[freqs_]()); + subhistory_sq_.reset(new scoped_ptr[]>[freqs_]()); for (int i = 0; i < freqs_; ++i) { subhistory_sq_[i].reset(new complex[window_size_]()); } @@ -141,15 +131,13 @@ void VarianceArray::InfiniteStep(const complex* data, bool skip_fudge) { } else { float old_sum = conj_sum_[i]; complex old_mean = running_mean_[i]; - running_mean_[i] = - old_mean + (sample - old_mean) / static_cast(count_); - conj_sum_[i] = - (old_sum + std::conj(sample - old_mean) * (sample - running_mean_[i])) - .real(); - variance_[i] = - conj_sum_[i] / (count_ - 1); // + fudge[fudge_index].real(); + running_mean_[i] = old_mean + (sample - old_mean) / + static_cast(count_); + conj_sum_[i] = (old_sum + std::conj(sample - old_mean) * + (sample - running_mean_[i])).real(); + variance_[i] = conj_sum_[i] / (count_ - 1); // + fudge[fudge_index].real(); if (skip_fudge && false) { - // variance_[i] -= fudge[fudge_index].real(); + //variance_[i] -= fudge[fudge_index].real(); } } array_mean_ += (variance_[i] - array_mean_) / (i + 1); @@ -173,13 +161,11 @@ void VarianceArray::DecayStep(const complex* data, bool /*dummy*/) { complex prev = running_mean_[i]; complex prev2 = running_mean_sq_[i]; running_mean_[i] = decay_ * prev + (1.0f - decay_) * sample; - running_mean_sq_[i] = - decay_ * prev2 + (1.0f - decay_) * sample * std::conj(sample); - // variance_[i] = decay_ * variance_[i] + (1.0f - decay_) * ( - // (sample - running_mean_[i]) * std::conj(sample - - // running_mean_[i])).real(); - variance_[i] = (running_mean_sq_[i] - - running_mean_[i] * std::conj(running_mean_[i])).real(); + running_mean_sq_[i] = decay_ * prev2 + + (1.0f - decay_) * sample * std::conj(sample); + //variance_[i] = decay_ * variance_[i] + (1.0f - decay_) * ( + // (sample - running_mean_[i]) * std::conj(sample - running_mean_[i])).real(); + variance_[i] = (running_mean_sq_[i] - running_mean_[i] * std::conj(running_mean_[i])).real(); } array_mean_ += (variance_[i] - array_mean_) / (i + 1); @@ -200,15 +186,15 @@ void VarianceArray::WindowedStep(const complex* data, bool /*dummy*/) { mean = history_[i][history_cursor_]; variance_[i] = 0.0f; for (int j = 1; j < num; ++j) { - complex sample = - zerofudge(history_[i][(history_cursor_ + j) % window_size_]); + complex sample = zerofudge( + history_[i][(history_cursor_ + j) % window_size_]); sample = history_[i][(history_cursor_ + j) % window_size_]; float old_sum = conj_sum; complex old_mean = mean; mean = old_mean + (sample - old_mean) / static_cast(j + 1); - conj_sum = - (old_sum + std::conj(sample - old_mean) * (sample - mean)).real(); + conj_sum = (old_sum + std::conj(sample - old_mean) * + (sample - mean)).real(); variance_[i] = conj_sum / (j); } array_mean_ += (variance_[i] - array_mean_) / (i + 1); @@ -231,11 +217,11 @@ void VarianceArray::BlockedStep(const complex* data, bool /*dummy*/) { subhistory_[i][history_cursor_ % window_size_] = sub_running_mean_[i]; subhistory_sq_[i][history_cursor_ % window_size_] = sub_running_mean_sq_[i]; - variance_[i] = - (NewMean(running_mean_sq_[i], sub_running_mean_sq_[i], blocks) - - NewMean(running_mean_[i], sub_running_mean_[i], blocks) * - std::conj(NewMean(running_mean_[i], sub_running_mean_[i], blocks))) - .real(); + variance_[i] = (NewMean(running_mean_sq_[i], sub_running_mean_sq_[i], + blocks) - + NewMean(running_mean_[i], sub_running_mean_[i], blocks) * + std::conj(NewMean(running_mean_[i], sub_running_mean_[i], + blocks))).real(); if (count_ == kWindowBlockSize - 1) { sub_running_mean_[i] = complex(0.0f, 0.0f); sub_running_mean_sq_[i] = complex(0.0f, 0.0f); @@ -298,3 +284,4 @@ void GainApplier::Apply(const complex* in_block, } // namespace intelligibility } // namespace webrtc + diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h b/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h index 075b8ad46..550f293a7 100644 --- a/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h +++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h @@ -8,16 +8,12 @@ * be found in the AUTHORS file in the root of the source tree. */ -// -// Specifies helper classes for intelligibility enhancement. -// - #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_ #define WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_ #include -#include "webrtc/base/scoped_ptr.h" +#include "webrtc/system_wrappers/interface/scoped_ptr.h" namespace webrtc { @@ -67,10 +63,14 @@ class VarianceArray { void ApplyScale(float scale); // The current set of variances. - const float* variance() const { return variance_.get(); } + const float* variance() const { + return variance_.get(); + } // The mean value of the current set of variances. - float array_mean() const { return array_mean_; } + float array_mean() const { + return array_mean_; + } private: void InfiniteStep(const std::complex* data, bool dummy); @@ -78,26 +78,23 @@ class VarianceArray { void WindowedStep(const std::complex* data, bool dummy); void BlockedStep(const std::complex* data, bool dummy); - // TODO(ekmeyerson): Switch the following running means - // and histories from rtc::scoped_ptr to std::vector. - // The current average X and X^2. - rtc::scoped_ptr[]> running_mean_; - rtc::scoped_ptr[]> running_mean_sq_; + scoped_ptr[]> running_mean_; + scoped_ptr[]> running_mean_sq_; // Average X and X^2 for the current block in kStepBlocked. - rtc::scoped_ptr[]> sub_running_mean_; - rtc::scoped_ptr[]> sub_running_mean_sq_; + scoped_ptr[]> sub_running_mean_; + scoped_ptr[]> sub_running_mean_sq_; // Sample history for the rolling window in kStepWindowed and block-wise // histories for kStepBlocked. - rtc::scoped_ptr[]>[]> history_; - rtc::scoped_ptr[]>[]> subhistory_; - rtc::scoped_ptr[]>[]> subhistory_sq_; + scoped_ptr[]>[]> history_; + scoped_ptr[]>[]> subhistory_; + scoped_ptr[]>[]> subhistory_sq_; // The current set of variances and sums for Welford's algorithm. - rtc::scoped_ptr variance_; - rtc::scoped_ptr conj_sum_; + scoped_ptr variance_; + scoped_ptr conj_sum_; const int freqs_; const int window_size_; @@ -121,13 +118,15 @@ class GainApplier { std::complex* out_block); // Return the current target gain set. Modify this array to set the targets. - float* target() const { return target_.get(); } + float* target() const { + return target_.get(); + } private: const int freqs_; const float change_limit_; - rtc::scoped_ptr target_; - rtc::scoped_ptr current_; + scoped_ptr target_; + scoped_ptr current_; }; } // namespace intelligibility @@ -135,3 +134,4 @@ class GainApplier { } // namespace webrtc #endif // WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_UTILS_H_ +