Add an API to offset system delay.

Plumb it through VoiceEngine.

BUG=
TEST=voe_auto_test,audioproc_unittest

Review URL: https://webrtc-codereview.appspot.com/428010

git-svn-id: http://webrtc.googlecode.com/svn/trunk@1846 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
andrew@webrtc.org 2012-03-06 19:03:39 +00:00
parent 9b3ab115ad
commit 6f9f817e06
8 changed files with 88 additions and 19 deletions

View File

@ -71,6 +71,7 @@ AudioProcessingImpl::AudioProcessingImpl(int id)
split_sample_rate_hz_(kSampleRate16kHz),
samples_per_channel_(sample_rate_hz_ / 100),
stream_delay_ms_(0),
delay_offset_ms_(0),
was_stream_delay_set_(false),
num_reverse_channels_(1),
num_input_channels_(1),
@ -450,6 +451,8 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
int AudioProcessingImpl::set_stream_delay_ms(int delay) {
was_stream_delay_set_ = true;
delay += delay_offset_ms_;
if (delay < 0) {
return kBadParameterError;
}
@ -472,6 +475,15 @@ bool AudioProcessingImpl::was_stream_delay_set() const {
return was_stream_delay_set_;
}
void AudioProcessingImpl::set_delay_offset_ms(int offset) {
CriticalSectionScoped crit_scoped(crit_);
delay_offset_ms_ = offset;
}
int AudioProcessingImpl::delay_offset_ms() const {
return delay_offset_ms_;
}
int AudioProcessingImpl::StartDebugRecording(
const char filename[AudioProcessing::kMaxFilenameSize]) {
CriticalSectionScoped crit_scoped(crit_);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@ -69,6 +69,8 @@ class AudioProcessingImpl : public AudioProcessing {
virtual int AnalyzeReverseStream(AudioFrame* frame);
virtual int set_stream_delay_ms(int delay);
virtual int stream_delay_ms() const;
virtual void set_delay_offset_ms(int offset);
virtual int delay_offset_ms() const;
virtual int StartDebugRecording(const char filename[kMaxFilenameSize]);
virtual int StopDebugRecording();
virtual EchoCancellation* echo_cancellation() const;
@ -115,6 +117,7 @@ class AudioProcessingImpl : public AudioProcessing {
int split_sample_rate_hz_;
int samples_per_channel_;
int stream_delay_ms_;
int delay_offset_ms_;
bool was_stream_delay_set_;
int num_reverse_channels_;

View File

@ -187,6 +187,14 @@ class AudioProcessing : public Module {
virtual int set_stream_delay_ms(int delay) = 0;
virtual int stream_delay_ms() const = 0;
// Sets a delay |offset| in ms to add to the values passed in through
// set_stream_delay_ms(). May be positive or negative.
//
// Note that this could cause an otherwise valid value passed to
// set_stream_delay_ms() to return an error.
virtual void set_delay_offset_ms(int offset) = 0;
virtual int delay_offset_ms() const = 0;
// Starts recording debugging information to a file specified by |filename|,
// a NULL-terminated string. If there is an ongoing recording, the old file
// will be closed, and recording will continue in the newly specified file.

View File

@ -174,7 +174,6 @@ std::string ApmTest::OutputFilePath(std::string name,
return output_path_ + ss.str();
}
void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
int num_input_channels, int num_output_channels,
bool open_output_file) {
@ -516,6 +515,20 @@ TEST_F(ApmTest, StreamParameters) {
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
}
TEST_F(ApmTest, DelayOffset) {
apm_->set_delay_offset_ms(100);
EXPECT_EQ(100, apm_->delay_offset_ms());
EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(450));
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
EXPECT_EQ(200, apm_->stream_delay_ms());
apm_->set_delay_offset_ms(-50);
EXPECT_EQ(-50, apm_->delay_offset_ms());
EXPECT_EQ(apm_->kBadParameterError, apm_->set_stream_delay_ms(20));
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
EXPECT_EQ(50, apm_->stream_delay_ms());
}
TEST_F(ApmTest, Channels) {
// Testing number of invalid channels
EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(0, 1));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@ -97,6 +97,14 @@ public:
// Gets the EC status and mode.
virtual int GetEcStatus(bool& enabled, EcModes& mode) = 0;
// Sets a delay |offset| in ms to add to the system delay reported by the
// OS, which is used by the AEC to synchronize far- and near-end streams.
// In some cases a system may introduce a delay which goes unreported by the
// OS, but which is known to the user. This method can be used to compensate
// for the unreported delay.
virtual void SetDelayOffsetMs(int offset) = 0;
virtual int DelayOffsetMs() = 0;
// Modifies settings for the AEC designed for mobile devices (AECM).
virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
bool enableCNG = true) = 0;

View File

@ -174,7 +174,7 @@ int VoEAudioProcessingImpl::GetNsStatus(bool& enabled, NsModes& mode)
VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetNsStatus() Ns is not supported");
return -1;
#endif
#endif
}
int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode)
@ -204,7 +204,7 @@ int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode)
{
case kAgcDefault:
agcMode = (GainControl::Mode)WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE;
break;
break;
case kAgcUnchanged:
agcMode = _audioProcessingModulePtr->gain_control()->mode();;
break;
@ -369,9 +369,9 @@ int VoEAudioProcessingImpl::GetAgcConfig(AgcConfig &config)
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetAgcConfig() => targetLeveldBOv=%u, "
"digitalCompressionGaindB=%u, limiterEnable=%d",
config.targetLeveldBOv,
config.digitalCompressionGaindB,
config.limiterEnable);
config.targetLeveldBOv,
config.digitalCompressionGaindB,
config.limiterEnable);
return 0;
#else
@ -724,19 +724,33 @@ int VoEAudioProcessingImpl::GetEcStatus(bool& enabled, EcModes& mode)
enabled = _audioProcessingModulePtr->echo_control_mobile()->
is_enabled();
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetEcStatus() => enabled=%i, mode=%i",
enabled, (int)mode);
return 0;
return 0;
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetEcStatus() EC is not supported");
return -1;
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetEcStatus() EC is not supported");
return -1;
#endif
}
void VoEAudioProcessingImpl::SetDelayOffsetMs(int offset)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetDelayOffsetMs(offset = %d)", offset);
_audioProcessingModulePtr->set_delay_offset_ms(offset);
}
int VoEAudioProcessingImpl::DelayOffsetMs()
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"DelayOffsetMs()");
return _audioProcessingModulePtr->delay_offset_ms();
}
int VoEAudioProcessingImpl::SetAecmMode(AecmModes mode, bool enableCNG)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
@ -747,7 +761,7 @@ int VoEAudioProcessingImpl::SetAecmMode(AecmModes mode, bool enableCNG)
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
EchoControlMobile::RoutingMode aecmMode(
EchoControlMobile::kQuietEarpieceOrHeadset);
@ -807,7 +821,7 @@ int VoEAudioProcessingImpl::GetAecmMode(AecmModes& mode, bool& enabledCNG)
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
enabledCNG = false;
EchoControlMobile::RoutingMode aecmMode =
@ -832,7 +846,7 @@ int VoEAudioProcessingImpl::GetAecmMode(AecmModes& mode, bool& enabledCNG)
case EchoControlMobile::kLoudSpeakerphone:
mode = kAecmLoudSpeakerphone;
break;
}
}
return 0;
#else
@ -1157,7 +1171,7 @@ int VoEAudioProcessingImpl::GetTypingDetectionStatus(bool& enabled)
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
// Just use the VAD state to determine if we should enable typing
// Just use the VAD state to determine if we should enable typing
// detection or not
enabled = _audioProcessingModulePtr->voice_detection()->is_enabled();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@ -58,6 +58,10 @@ public:
virtual int GetEcStatus(bool& enabled, EcModes& mode);
virtual void SetDelayOffsetMs(int offset);
virtual int DelayOffsetMs();
virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
bool enableCNG = true);

View File

@ -326,6 +326,13 @@ TEST_F(AudioProcessingTest, VoiceActivityIndicatorReturns1WithSpeechOn) {
EXPECT_EQ(1, voe_apm_->VoiceActivityIndicator(channel_));
}
TEST_F(AudioProcessingTest, CanSetDelayOffset) {
voe_apm_->SetDelayOffsetMs(50);
EXPECT_EQ(50, voe_apm_->DelayOffsetMs());
voe_apm_->SetDelayOffsetMs(-50);
EXPECT_EQ(-50, voe_apm_->DelayOffsetMs());
}
#if defined(MAC_IPHONE) || defined(WEBRTC_ANDROID)
TEST_F(AudioProcessingTest, AgcIsOffByDefaultAndDigital) {