Properly error check calls to AudioProcessing.
Checks must be made with "!= 0", not "== -1". Additionally: * Clean up the function calling into AudioProcessing. * Remove the unused _noiseWarning. * Make the other warnings bool. BUG=chromium:178040 Review URL: https://webrtc-codereview.appspot.com/1147004 git-svn-id: http://webrtc.googlecode.com/svn/trunk@3590 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
9ee5a4ccd8
commit
6be1e934ad
@ -212,10 +212,9 @@ int EchoCancellationImpl::device_sample_rate_hz() const {
|
||||
return device_sample_rate_hz_;
|
||||
}
|
||||
|
||||
int EchoCancellationImpl::set_stream_drift_samples(int drift) {
|
||||
void EchoCancellationImpl::set_stream_drift_samples(int drift) {
|
||||
was_stream_drift_set_ = true;
|
||||
stream_drift_samples_ = drift;
|
||||
return apm_->kNoError;
|
||||
}
|
||||
|
||||
int EchoCancellationImpl::stream_drift_samples() const {
|
||||
|
@ -41,7 +41,7 @@ class EchoCancellationImpl : public EchoCancellation,
|
||||
virtual int enable_drift_compensation(bool enable);
|
||||
virtual bool is_drift_compensation_enabled() const;
|
||||
virtual int set_device_sample_rate_hz(int rate);
|
||||
virtual int set_stream_drift_samples(int drift);
|
||||
virtual void set_stream_drift_samples(int drift);
|
||||
virtual int set_suppression_level(SuppressionLevel level);
|
||||
virtual SuppressionLevel suppression_level() const;
|
||||
virtual int enable_metrics(bool enable);
|
||||
|
@ -285,8 +285,8 @@ class EchoCancellation {
|
||||
|
||||
// Sets the difference between the number of samples rendered and captured by
|
||||
// the audio devices since the last call to |ProcessStream()|. Must be called
|
||||
// if and only if drift compensation is enabled, prior to |ProcessStream()|.
|
||||
virtual int set_stream_drift_samples(int drift) = 0;
|
||||
// if drift compensation is enabled, prior to |ProcessStream()|.
|
||||
virtual void set_stream_drift_samples(int drift) = 0;
|
||||
virtual int stream_drift_samples() const = 0;
|
||||
|
||||
enum SuppressionLevel {
|
||||
|
@ -689,8 +689,7 @@ void void_main(int argc, char* argv[]) {
|
||||
apm->gain_control()->set_stream_analog_level(msg.level()));
|
||||
ASSERT_EQ(apm->kNoError,
|
||||
apm->set_stream_delay_ms(msg.delay() + extra_delay_ms));
|
||||
ASSERT_EQ(apm->kNoError,
|
||||
apm->echo_cancellation()->set_stream_drift_samples(msg.drift()));
|
||||
apm->echo_cancellation()->set_stream_drift_samples(msg.drift());
|
||||
|
||||
int err = apm->ProcessStream(&near_frame);
|
||||
if (err == apm->kBadStreamParameterWarning) {
|
||||
@ -893,8 +892,7 @@ void void_main(int argc, char* argv[]) {
|
||||
apm->gain_control()->set_stream_analog_level(capture_level));
|
||||
ASSERT_EQ(apm->kNoError,
|
||||
apm->set_stream_delay_ms(delay_ms + extra_delay_ms));
|
||||
ASSERT_EQ(apm->kNoError,
|
||||
apm->echo_cancellation()->set_stream_drift_samples(drift_samples));
|
||||
apm->echo_cancellation()->set_stream_drift_samples(drift_samples);
|
||||
|
||||
int err = apm->ProcessStream(&near_frame);
|
||||
if (err == apm->kBadStreamParameterWarning) {
|
||||
|
@ -444,8 +444,7 @@ bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
|
||||
|
||||
void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_stream_analog_level(127));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame));
|
||||
@ -603,8 +602,7 @@ TEST_F(ApmTest, StreamParameters) {
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->enable_drift_compensation(true));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError,
|
||||
apm_->ProcessStream(frame_));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
|
||||
@ -625,8 +623,7 @@ TEST_F(ApmTest, StreamParameters) {
|
||||
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->enable_drift_compensation(true));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_stream_analog_level(127));
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
|
||||
@ -638,8 +635,7 @@ TEST_F(ApmTest, StreamParameters) {
|
||||
|
||||
// Resets after successful ProcessStream().
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
|
||||
|
||||
@ -660,8 +656,7 @@ TEST_F(ApmTest, StreamParameters) {
|
||||
// -- All there --
|
||||
EXPECT_EQ(apm_->kNoError, apm_->Initialize());
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_stream_analog_level(127));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
@ -1275,8 +1270,7 @@ TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
|
||||
frame_->vad_activity_ = AudioFrame::kVadUnknown;
|
||||
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_stream_analog_level(analog_level));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
@ -1341,12 +1335,10 @@ TEST_F(ApmTest, SplittingFilter) {
|
||||
SetFrameTo(frame_, 1000);
|
||||
frame_copy.CopyFrom(*frame_);
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
|
||||
|
||||
@ -1359,8 +1351,7 @@ TEST_F(ApmTest, SplittingFilter) {
|
||||
SetFrameTo(frame_, 1000);
|
||||
frame_copy.CopyFrom(*frame_);
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
|
||||
EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
|
||||
}
|
||||
@ -1452,8 +1443,7 @@ TEST_F(ApmTest, Process) {
|
||||
frame_->vad_activity_ = AudioFrame::kVadUnknown;
|
||||
|
||||
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0));
|
||||
apm_->echo_cancellation()->set_stream_drift_samples(0);
|
||||
EXPECT_EQ(apm_->kNoError,
|
||||
apm_->gain_control()->set_stream_analog_level(analog_level));
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "utility.h"
|
||||
#include "voe_base_impl.h"
|
||||
#include "voe_external_media.h"
|
||||
#include "webrtc/system_wrappers/interface/logging.h"
|
||||
|
||||
#define WEBRTC_ABS(a) (((a) < 0) ? -(a) : (a))
|
||||
|
||||
@ -28,10 +29,11 @@ namespace webrtc {
|
||||
namespace voe {
|
||||
|
||||
// Used for downmixing before resampling.
|
||||
// TODO(andrew): audio_device should advertise the maximum sample rate it can
|
||||
// provide.
|
||||
// TODO(ajm): audio_device should advertise the maximum sample rate it can
|
||||
// provide.
|
||||
static const int kMaxMonoDeviceDataSizeSamples = 960; // 10 ms, 96 kHz, mono.
|
||||
|
||||
// TODO(ajm): The thread safety of this is dubious...
|
||||
void
|
||||
TransmitMixer::OnPeriodicProcess()
|
||||
{
|
||||
@ -39,7 +41,7 @@ TransmitMixer::OnPeriodicProcess()
|
||||
"TransmitMixer::OnPeriodicProcess()");
|
||||
|
||||
#if defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
|
||||
if (_typingNoiseWarning > 0)
|
||||
if (_typingNoiseWarning)
|
||||
{
|
||||
CriticalSectionScoped cs(&_callbackCritSect);
|
||||
if (_voiceEngineObserverPtr)
|
||||
@ -50,11 +52,11 @@ TransmitMixer::OnPeriodicProcess()
|
||||
_voiceEngineObserverPtr->CallbackOnError(-1,
|
||||
VE_TYPING_NOISE_WARNING);
|
||||
}
|
||||
_typingNoiseWarning = 0;
|
||||
_typingNoiseWarning = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (_saturationWarning > 0)
|
||||
if (_saturationWarning)
|
||||
{
|
||||
CriticalSectionScoped cs(&_callbackCritSect);
|
||||
if (_voiceEngineObserverPtr)
|
||||
@ -63,21 +65,8 @@ TransmitMixer::OnPeriodicProcess()
|
||||
"TransmitMixer::OnPeriodicProcess() =>"
|
||||
" CallbackOnError(VE_SATURATION_WARNING)");
|
||||
_voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING);
|
||||
}
|
||||
_saturationWarning = 0;
|
||||
}
|
||||
|
||||
if (_noiseWarning > 0)
|
||||
{
|
||||
CriticalSectionScoped cs(&_callbackCritSect);
|
||||
if (_voiceEngineObserverPtr)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"TransmitMixer::OnPeriodicProcess() =>"
|
||||
"CallbackOnError(VE_NOISE_WARNING)");
|
||||
_voiceEngineObserverPtr->CallbackOnError(-1, VE_NOISE_WARNING);
|
||||
}
|
||||
_noiseWarning = 0;
|
||||
_saturationWarning = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -169,7 +158,7 @@ TransmitMixer::Destroy(TransmitMixer*& mixer)
|
||||
TransmitMixer::TransmitMixer(const WebRtc_UWord32 instanceId) :
|
||||
_engineStatisticsPtr(NULL),
|
||||
_channelManagerPtr(NULL),
|
||||
_audioProcessingModulePtr(NULL),
|
||||
audioproc_(NULL),
|
||||
_voiceEngineObserverPtr(NULL),
|
||||
_processThreadPtr(NULL),
|
||||
_filePlayerPtr(NULL),
|
||||
@ -190,15 +179,14 @@ TransmitMixer::TransmitMixer(const WebRtc_UWord32 instanceId) :
|
||||
_timeActive(0),
|
||||
_timeSinceLastTyping(0),
|
||||
_penaltyCounter(0),
|
||||
_typingNoiseWarning(0),
|
||||
_typingNoiseWarning(false),
|
||||
_timeWindow(10), // 10ms slots accepted to count as a hit
|
||||
_costPerTyping(100), // Penalty added for a typing + activity coincide
|
||||
_reportingThreshold(300), // Threshold for _penaltyCounter
|
||||
_penaltyDecay(1), // how much we reduce _penaltyCounter every 10 ms.
|
||||
_typeEventDelay(2), // how "old" event we check for
|
||||
#endif
|
||||
_saturationWarning(0),
|
||||
_noiseWarning(0),
|
||||
_saturationWarning(false),
|
||||
_instanceId(instanceId),
|
||||
_mixFileWithMicrophone(false),
|
||||
_captureLevel(0),
|
||||
@ -303,7 +291,7 @@ TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
|
||||
"TransmitMixer::SetAudioProcessingModule("
|
||||
"audioProcessingModule=0x%x)",
|
||||
audioProcessingModule);
|
||||
_audioProcessingModulePtr = audioProcessingModule;
|
||||
audioproc_ = audioProcessingModule;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -370,8 +358,8 @@ TransmitMixer::PrepareDemux(const void* audioSamples,
|
||||
}
|
||||
}
|
||||
|
||||
// --- Near-end Voice Quality Enhancement (APM) processing
|
||||
APMProcessStream(totalDelayMS, clockDrift, currentMicLevel);
|
||||
// --- Near-end audio processing.
|
||||
ProcessAudio(totalDelayMS, clockDrift, currentMicLevel);
|
||||
|
||||
if (swap_stereo_channels_ && stereo_codec_)
|
||||
// Only bother swapping if we're using a stereo codec.
|
||||
@ -1293,94 +1281,43 @@ WebRtc_Word32 TransmitMixer::MixOrReplaceAudioWithFile(
|
||||
return 0;
|
||||
}
|
||||
|
||||
WebRtc_Word32 TransmitMixer::APMProcessStream(
|
||||
const WebRtc_UWord16 totalDelayMS,
|
||||
const WebRtc_Word32 clockDrift,
|
||||
const WebRtc_UWord16 currentMicLevel)
|
||||
{
|
||||
WebRtc_UWord16 captureLevel(currentMicLevel);
|
||||
void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
|
||||
int current_mic_level) {
|
||||
if (audioproc_->set_num_channels(_audioFrame.num_channels_,
|
||||
_audioFrame.num_channels_) != 0) {
|
||||
LOG_FERR2(LS_ERROR, set_num_channels, _audioFrame.num_channels_,
|
||||
_audioFrame.num_channels_);
|
||||
}
|
||||
|
||||
// Check if the number of incoming channels has changed. This has taken
|
||||
// both the capture device and send codecs into account.
|
||||
if (_audioFrame.num_channels_ !=
|
||||
_audioProcessingModulePtr->num_input_channels())
|
||||
{
|
||||
if (_audioProcessingModulePtr->set_num_channels(
|
||||
_audioFrame.num_channels_,
|
||||
_audioFrame.num_channels_))
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"AudioProcessing::set_num_channels(%d, %d) => error",
|
||||
_audioFrame.num_channels_,
|
||||
_audioProcessingModulePtr->num_output_channels());
|
||||
}
|
||||
}
|
||||
if (audioproc_->set_sample_rate_hz(_audioFrame.sample_rate_hz_) != 0) {
|
||||
LOG_FERR1(LS_ERROR, set_sample_rate_hz, _audioFrame.sample_rate_hz_);
|
||||
}
|
||||
|
||||
// If the frequency has changed we need to change APM settings
|
||||
// Sending side is "master"
|
||||
if (_audioProcessingModulePtr->sample_rate_hz() !=
|
||||
_audioFrame.sample_rate_hz_)
|
||||
{
|
||||
if (_audioProcessingModulePtr->set_sample_rate_hz(
|
||||
_audioFrame.sample_rate_hz_))
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"AudioProcessing::set_sample_rate_hz(%u) => error",
|
||||
_audioFrame.sample_rate_hz_);
|
||||
}
|
||||
}
|
||||
if (audioproc_->set_stream_delay_ms(delay_ms) != 0) {
|
||||
// Report as a warning; we can occasionally run into very large delays.
|
||||
LOG_FERR1(LS_WARNING, set_stream_delay_ms, delay_ms);
|
||||
}
|
||||
|
||||
if (_audioProcessingModulePtr->set_stream_delay_ms(totalDelayMS) == -1)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"AudioProcessing::set_stream_delay_ms(%u) => error",
|
||||
totalDelayMS);
|
||||
}
|
||||
if (_audioProcessingModulePtr->gain_control()->set_stream_analog_level(
|
||||
captureLevel) == -1)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"AudioProcessing::set_stream_analog_level(%u) => error",
|
||||
captureLevel);
|
||||
}
|
||||
if (_audioProcessingModulePtr->echo_cancellation()->
|
||||
is_drift_compensation_enabled())
|
||||
{
|
||||
if (_audioProcessingModulePtr->echo_cancellation()->
|
||||
set_stream_drift_samples(clockDrift) == -1)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"AudioProcessing::set_stream_drift_samples(%u) => error",
|
||||
clockDrift);
|
||||
}
|
||||
}
|
||||
if (_audioProcessingModulePtr->ProcessStream(&_audioFrame) == -1)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"AudioProcessing::ProcessStream() => error");
|
||||
}
|
||||
captureLevel =
|
||||
_audioProcessingModulePtr->gain_control()->stream_analog_level();
|
||||
GainControl* agc = audioproc_->gain_control();
|
||||
if (agc->set_stream_analog_level(current_mic_level) != 0) {
|
||||
LOG_FERR1(LS_ERROR, set_stream_analog_level, current_mic_level);
|
||||
}
|
||||
|
||||
// Store new capture level (only updated when analog AGC is enabled)
|
||||
_captureLevel = captureLevel;
|
||||
EchoCancellation* aec = audioproc_->echo_cancellation();
|
||||
if (aec->is_drift_compensation_enabled()) {
|
||||
aec->set_stream_drift_samples(clock_drift);
|
||||
}
|
||||
|
||||
// Log notifications
|
||||
if (_audioProcessingModulePtr->gain_control()->stream_is_saturated())
|
||||
{
|
||||
if (_saturationWarning == 1)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"TransmitMixer::APMProcessStream() pending "
|
||||
"saturation warning exists");
|
||||
}
|
||||
_saturationWarning = 1; // triggers callback from moduleprocess thread
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"TransmitMixer::APMProcessStream() VE_SATURATION_WARNING "
|
||||
"message has been posted for callback");
|
||||
}
|
||||
int err = audioproc_->ProcessStream(&_audioFrame);
|
||||
if (err != 0) {
|
||||
LOG(LS_ERROR) << "ProcessStream() error: " << err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
// Store new capture level. Only updated when analog AGC is enabled.
|
||||
_captureLevel = agc->stream_analog_level();
|
||||
|
||||
// Triggers a callback in OnPeriodicProcess().
|
||||
_saturationWarning |= agc->stream_is_saturated();
|
||||
}
|
||||
|
||||
#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
|
||||
@ -1422,19 +1359,8 @@ int TransmitMixer::TypingDetection()
|
||||
_penaltyCounter += _costPerTyping;
|
||||
if (_penaltyCounter > _reportingThreshold)
|
||||
{
|
||||
if (_typingNoiseWarning == 1)
|
||||
{
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice,
|
||||
VoEId(_instanceId, -1),
|
||||
"TransmitMixer::TypingDetection() pending "
|
||||
"noise-saturation warning exists");
|
||||
}
|
||||
// triggers callback from the module process thread
|
||||
_typingNoiseWarning = 1;
|
||||
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
|
||||
"TransmitMixer::TypingDetection() "
|
||||
"VE_TYPING_NOISE_WARNING message has been posted for"
|
||||
"callback");
|
||||
// Triggers a callback in OnPeriodicProcess().
|
||||
_typingNoiseWarning = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,9 +175,7 @@ private:
|
||||
WebRtc_Word32 MixOrReplaceAudioWithFile(
|
||||
const int mixingFrequency);
|
||||
|
||||
WebRtc_Word32 APMProcessStream(const WebRtc_UWord16 totalDelayMS,
|
||||
const WebRtc_Word32 clockDrift,
|
||||
const WebRtc_UWord16 currentMicLevel);
|
||||
void ProcessAudio(int delay_ms, int clock_drift, int current_mic_level);
|
||||
|
||||
#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
|
||||
int TypingDetection();
|
||||
@ -186,7 +184,7 @@ private:
|
||||
// uses
|
||||
Statistics* _engineStatisticsPtr;
|
||||
ChannelManager* _channelManagerPtr;
|
||||
AudioProcessing* _audioProcessingModulePtr;
|
||||
AudioProcessing* audioproc_;
|
||||
VoiceEngineObserver* _voiceEngineObserverPtr;
|
||||
ProcessThread* _processThreadPtr;
|
||||
|
||||
@ -212,7 +210,7 @@ private:
|
||||
WebRtc_Word32 _timeActive;
|
||||
WebRtc_Word32 _timeSinceLastTyping;
|
||||
WebRtc_Word32 _penaltyCounter;
|
||||
WebRtc_UWord32 _typingNoiseWarning;
|
||||
bool _typingNoiseWarning;
|
||||
|
||||
// Tunable treshold values
|
||||
int _timeWindow; // nr of10ms slots accepted to count as a hit.
|
||||
@ -222,8 +220,7 @@ private:
|
||||
int _typeEventDelay; // How old typing events we allow
|
||||
|
||||
#endif
|
||||
WebRtc_UWord32 _saturationWarning;
|
||||
WebRtc_UWord32 _noiseWarning;
|
||||
bool _saturationWarning;
|
||||
|
||||
int _instanceId;
|
||||
bool _mixFileWithMicrophone;
|
||||
|
Loading…
x
Reference in New Issue
Block a user