Remove the use of AudioFrame::energy_ from AudioProcessing and VoE.
We want to remove energy_ entirely as we've seen that carrying around this potentially invalid value is dangerous. Results in the removal of AudioBuffer::is_muted(). This wasn't used in practice any longer, after the level calculation moved directly to channel.cc Instead, now use ProcessMuted() in channel.cc, to shortcut the level computation when the signal is muted. BUG=3315 TESTED=Muting the channel in voe_cmd_test results in rms=127. R=bjornv@webrtc.org, kwiberg@webrtc.org Review URL: https://webrtc-codereview.appspot.com/12529004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@6159 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
688ed699e0
commit
21299d4e00
@ -99,7 +99,6 @@ AudioBuffer::AudioBuffer(int input_samples_per_channel,
|
|||||||
num_mixed_low_pass_channels_(0),
|
num_mixed_low_pass_channels_(0),
|
||||||
reference_copied_(false),
|
reference_copied_(false),
|
||||||
activity_(AudioFrame::kVadUnknown),
|
activity_(AudioFrame::kVadUnknown),
|
||||||
is_muted_(false),
|
|
||||||
data_(NULL),
|
data_(NULL),
|
||||||
keyboard_data_(NULL),
|
keyboard_data_(NULL),
|
||||||
channels_(new ChannelBuffer<int16_t>(proc_samples_per_channel_,
|
channels_(new ChannelBuffer<int16_t>(proc_samples_per_channel_,
|
||||||
@ -223,7 +222,6 @@ void AudioBuffer::InitForNewData() {
|
|||||||
num_mixed_low_pass_channels_ = 0;
|
num_mixed_low_pass_channels_ = 0;
|
||||||
reference_copied_ = false;
|
reference_copied_ = false;
|
||||||
activity_ = AudioFrame::kVadUnknown;
|
activity_ = AudioFrame::kVadUnknown;
|
||||||
is_muted_ = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const int16_t* AudioBuffer::data(int channel) const {
|
const int16_t* AudioBuffer::data(int channel) const {
|
||||||
@ -307,10 +305,6 @@ AudioFrame::VADActivity AudioBuffer::activity() const {
|
|||||||
return activity_;
|
return activity_;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioBuffer::is_muted() const {
|
|
||||||
return is_muted_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int AudioBuffer::num_channels() const {
|
int AudioBuffer::num_channels() const {
|
||||||
return num_proc_channels_;
|
return num_proc_channels_;
|
||||||
}
|
}
|
||||||
@ -336,9 +330,6 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
|
|||||||
assert(frame->samples_per_channel_ == proc_samples_per_channel_);
|
assert(frame->samples_per_channel_ == proc_samples_per_channel_);
|
||||||
InitForNewData();
|
InitForNewData();
|
||||||
activity_ = frame->vad_activity_;
|
activity_ = frame->vad_activity_;
|
||||||
if (frame->energy_ == 0) {
|
|
||||||
is_muted_ = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (num_proc_channels_ == 1) {
|
if (num_proc_channels_ == 1) {
|
||||||
// We can get away with a pointer assignment in this case.
|
// We can get away with a pointer assignment in this case.
|
||||||
|
@ -71,8 +71,6 @@ class AudioBuffer {
|
|||||||
void set_activity(AudioFrame::VADActivity activity);
|
void set_activity(AudioFrame::VADActivity activity);
|
||||||
AudioFrame::VADActivity activity() const;
|
AudioFrame::VADActivity activity() const;
|
||||||
|
|
||||||
bool is_muted() const;
|
|
||||||
|
|
||||||
// Use for int16 interleaved data.
|
// Use for int16 interleaved data.
|
||||||
void DeinterleaveFrom(AudioFrame* audioFrame);
|
void DeinterleaveFrom(AudioFrame* audioFrame);
|
||||||
void InterleaveTo(AudioFrame* audioFrame) const;
|
void InterleaveTo(AudioFrame* audioFrame) const;
|
||||||
@ -106,7 +104,6 @@ class AudioBuffer {
|
|||||||
int num_mixed_low_pass_channels_;
|
int num_mixed_low_pass_channels_;
|
||||||
bool reference_copied_;
|
bool reference_copied_;
|
||||||
AudioFrame::VADActivity activity_;
|
AudioFrame::VADActivity activity_;
|
||||||
bool is_muted_;
|
|
||||||
|
|
||||||
// If non-null, use this instead of channels_->channel(0). This is an
|
// If non-null, use this instead of channels_->channel(0). This is an
|
||||||
// optimization for the case num_proc_channels_ == 1 that allows us to point
|
// optimization for the case num_proc_channels_ == 1 that allows us to point
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
|
|
||||||
#include "webrtc/modules/audio_processing/audio_buffer.h"
|
#include "webrtc/modules/audio_processing/audio_buffer.h"
|
||||||
#include "webrtc/modules/audio_processing/include/audio_processing.h"
|
#include "webrtc/modules/audio_processing/include/audio_processing.h"
|
||||||
|
#include "webrtc/modules/audio_processing/rms_level.h"
|
||||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
@ -29,13 +30,8 @@ int LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
RMSLevel* rms_level = static_cast<RMSLevel*>(handle(0));
|
RMSLevel* rms_level = static_cast<RMSLevel*>(handle(0));
|
||||||
if (audio->is_muted()) {
|
for (int i = 0; i < audio->num_channels(); ++i) {
|
||||||
rms_level->ProcessMuted(audio->samples_per_channel() *
|
rms_level->Process(audio->data(i), audio->samples_per_channel());
|
||||||
audio->num_channels());
|
|
||||||
} else {
|
|
||||||
for (int i = 0; i < audio->num_channels(); ++i) {
|
|
||||||
rms_level->Process(audio->data(i), audio->samples_per_channel());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return AudioProcessing::kNoError;
|
return AudioProcessing::kNoError;
|
||||||
|
@ -15,16 +15,16 @@
|
|||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
static const float kMaxSquaredLevel = 32768.0 * 32768.0;
|
static const float kMaxSquaredLevel = 32768 * 32768;
|
||||||
|
|
||||||
RMSLevel::RMSLevel()
|
RMSLevel::RMSLevel()
|
||||||
: sum_square_(0.0),
|
: sum_square_(0),
|
||||||
sample_count_(0) {}
|
sample_count_(0) {}
|
||||||
|
|
||||||
RMSLevel::~RMSLevel() {}
|
RMSLevel::~RMSLevel() {}
|
||||||
|
|
||||||
void RMSLevel::Reset() {
|
void RMSLevel::Reset() {
|
||||||
sum_square_ = 0.0;
|
sum_square_ = 0;
|
||||||
sample_count_ = 0;
|
sample_count_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ void RMSLevel::ProcessMuted(int length) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int RMSLevel::RMS() {
|
int RMSLevel::RMS() {
|
||||||
if (sample_count_ == 0 || sum_square_ == 0.0) {
|
if (sample_count_ == 0 || sum_square_ == 0) {
|
||||||
Reset();
|
Reset();
|
||||||
return kMinLevel;
|
return kMinLevel;
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,9 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
|
||||||
|
#define WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
|
||||||
|
|
||||||
#include "webrtc/typedefs.h"
|
#include "webrtc/typedefs.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
@ -49,3 +52,6 @@ class RMSLevel {
|
|||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
|
||||||
|
|
||||||
|
@ -1229,15 +1229,6 @@ TEST_F(ApmTest, LevelEstimator) {
|
|||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||||
EXPECT_EQ(70, apm_->level_estimator()->RMS());
|
EXPECT_EQ(70, apm_->level_estimator()->RMS());
|
||||||
|
|
||||||
// Min value if energy_ == 0.
|
|
||||||
SetFrameTo(frame_, 10000);
|
|
||||||
uint32_t energy = frame_->energy_; // Save default to restore below.
|
|
||||||
frame_->energy_ = 0;
|
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
|
||||||
EXPECT_EQ(127, apm_->level_estimator()->RMS());
|
|
||||||
frame_->energy_ = energy;
|
|
||||||
|
|
||||||
// Verify reset after enable/disable.
|
// Verify reset after enable/disable.
|
||||||
SetFrameTo(frame_, 32767);
|
SetFrameTo(frame_, 32767);
|
||||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||||
|
@ -72,7 +72,6 @@ void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
|
|||||||
void AudioFrameOperations::Mute(AudioFrame& frame) {
|
void AudioFrameOperations::Mute(AudioFrame& frame) {
|
||||||
memset(frame.data_, 0, sizeof(int16_t) *
|
memset(frame.data_, 0, sizeof(int16_t) *
|
||||||
frame.samples_per_channel_ * frame.num_channels_);
|
frame.samples_per_channel_ * frame.num_channels_);
|
||||||
frame.energy_ = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
|
int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
|
||||||
|
@ -142,17 +142,13 @@ TEST_F(AudioFrameOperationsTest, SwapStereoChannelsFailsOnMono) {
|
|||||||
|
|
||||||
TEST_F(AudioFrameOperationsTest, MuteSucceeds) {
|
TEST_F(AudioFrameOperationsTest, MuteSucceeds) {
|
||||||
SetFrameData(&frame_, 1000, 1000);
|
SetFrameData(&frame_, 1000, 1000);
|
||||||
frame_.energy_ = 1000 * 1000 * frame_.samples_per_channel_ *
|
|
||||||
frame_.num_channels_;
|
|
||||||
AudioFrameOperations::Mute(frame_);
|
AudioFrameOperations::Mute(frame_);
|
||||||
|
|
||||||
AudioFrame muted_frame;
|
AudioFrame muted_frame;
|
||||||
muted_frame.samples_per_channel_ = 320;
|
muted_frame.samples_per_channel_ = 320;
|
||||||
muted_frame.num_channels_ = 2;
|
muted_frame.num_channels_ = 2;
|
||||||
SetFrameData(&muted_frame, 0, 0);
|
SetFrameData(&muted_frame, 0, 0);
|
||||||
muted_frame.energy_ = 0;
|
|
||||||
VerifyFramesAreEqual(muted_frame, frame_);
|
VerifyFramesAreEqual(muted_frame, frame_);
|
||||||
EXPECT_EQ(muted_frame.energy_, frame_.energy_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(andrew): should not allow negative scales.
|
// TODO(andrew): should not allow negative scales.
|
||||||
|
@ -3689,9 +3689,9 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
|
|||||||
MixOrReplaceAudioWithFile(mixingFrequency);
|
MixOrReplaceAudioWithFile(mixingFrequency);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Mute())
|
bool is_muted = Mute(); // Cache locally as Mute() takes a lock.
|
||||||
{
|
if (is_muted) {
|
||||||
AudioFrameOperations::Mute(_audioFrame);
|
AudioFrameOperations::Mute(_audioFrame);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (channel_state_.Get().input_external_media)
|
if (channel_state_.Get().input_external_media)
|
||||||
@ -3714,7 +3714,11 @@ Channel::PrepareEncodeAndSend(int mixingFrequency)
|
|||||||
|
|
||||||
if (_includeAudioLevelIndication) {
|
if (_includeAudioLevelIndication) {
|
||||||
int length = _audioFrame.samples_per_channel_ * _audioFrame.num_channels_;
|
int length = _audioFrame.samples_per_channel_ * _audioFrame.num_channels_;
|
||||||
rms_level_.Process(_audioFrame.data_, length);
|
if (is_muted) {
|
||||||
|
rms_level_.ProcessMuted(length);
|
||||||
|
} else {
|
||||||
|
rms_level_.Process(_audioFrame.data_, length);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user