Removed level estimator calls, since it is not supported. There are still one place left; used within SetRTPAudioLevelIndicationStatus(). The error return value of level_estimator() has no effect there.

The VoE auto tests have been updated as well.
Review URL: http://webrtc-codereview.appspot.com/178003

git-svn-id: http://webrtc.googlecode.com/svn/trunk@658 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
bjornv@google.com 2011-09-28 14:08:19 +00:00
parent 2f56ff48a4
commit 0beae6798d
11 changed files with 104 additions and 463 deletions

View File

@ -150,19 +150,11 @@ public:
virtual int VoiceActivityIndicator(int channel) = 0;
// Enables or disables the possibility to retrieve instantaneous
// speech, noise and echo metrics during an active call.
virtual int SetMetricsStatus(bool enable) = 0;
// echo metrics during an active call.
virtual int SetEchoMetricsStatus(bool enable) = 0;
// Gets the current speech, noise and echo metric status.
virtual int GetMetricsStatus(bool& enabled) = 0;
// Gets the instantaneous speech level metrics for the transmitted
// and received signals.
virtual int GetSpeechMetrics(int& levelTx, int& levelRx) = 0;
// Gets the instantaneous noise level metrics for the transmitted
// and received signals.
virtual int GetNoiseMetrics(int& levelTx, int& levelRx) = 0;
// Gets the current echo metric status.
virtual int GetEchoMetricsStatus(bool& enabled) = 0;
// Gets the instantaneous echo level metrics for the near-end and
// far-end signals.

View File

@ -59,10 +59,6 @@ public:
// the call report for a specified |channel|.
virtual int ResetCallReportStatistics(int channel) = 0;
// Gets minimum, maximum and average levels for long-term speech and
// noise metrics.
virtual int GetSpeechAndNoiseSummary(LevelStatistics& stats) = 0;
// Gets minimum, maximum and average levels for long-term echo metrics.
virtual int GetEchoMetricSummary(EchoStatistics& stats) = 0;

View File

@ -946,148 +946,56 @@ int VoEAudioProcessingImpl::VoiceActivityIndicator(int channel)
return activity;
}
int VoEAudioProcessingImpl::SetMetricsStatus(bool enable)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"SetMetricsStatus(enable=%d)", enable);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
int VoEAudioProcessingImpl::SetEchoMetricsStatus(bool enable) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"SetEchoMetricsStatus(enable=%d)", enable);
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if ((_audioProcessingModulePtr->level_estimator()->Enable(enable)!= 0) ||
(_audioProcessingModulePtr->echo_cancellation()->enable_metrics(enable)
!= 0))
{
_engineStatistics.SetLastError(
VE_APM_ERROR, kTraceError,
"SetMetricsStatus() unable to set metrics mode");
return -1;
}
return 0;
#ifdef WEBRTC_VOICE_ENGINE_ECHO
if (!_engineStatistics.Initialized()) {
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
if (_audioProcessingModulePtr->echo_cancellation()->enable_metrics(enable) !=
0) {
_engineStatistics.SetLastError(
VE_APM_ERROR, kTraceError,
"SetEchoMetricsStatus() unable to set echo metrics mode");
return -1;
}
return 0;
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError, "SetEcStatus() EC is not supported");
return -1;
#endif
}
int VoEAudioProcessingImpl::GetMetricsStatus(bool& enabled)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetMetricsStatus(enabled=?)");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
int VoEAudioProcessingImpl::GetEchoMetricsStatus(bool& enabled) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetEchoMetricsStatus(enabled=?)");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
#ifdef WEBRTC_VOICE_ENGINE_ECHO
if (!_engineStatistics.Initialized()) {
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
bool levelMode =
_audioProcessingModulePtr->level_estimator()->is_enabled();
bool echoMode =
_audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
enabled =
_audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
if (levelMode != echoMode)
{
_engineStatistics.SetLastError(
VE_APM_ERROR, kTraceError,
"GetMetricsStatus() level mode and echo mode are not the same");
return -1;
}
enabled = levelMode;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetMetricsStatus() => enabled=%d", enabled);
return 0;
}
int VoEAudioProcessingImpl::GetSpeechMetrics(int& levelTx, int& levelRx)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechMetrics(levelTx=?, levelRx=?)");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
LevelEstimator::Metrics levelMetrics;
LevelEstimator::Metrics reverseLevelMetrics;
bool levelMode = _audioProcessingModulePtr->level_estimator()->is_enabled();
if (levelMode == false)
{
_engineStatistics.SetLastError(
VE_APM_ERROR, kTraceWarning,
"GetSpeechMetrics() AudioProcessingModule level metrics is "
"not enabled");
return -1;
}
if (_audioProcessingModulePtr->level_estimator()->GetMetrics(
&levelMetrics, &reverseLevelMetrics))
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechMetrics(), AudioProcessingModule level metrics"
" error");
return -1;
}
levelTx = levelMetrics.speech.instant;
levelRx = reverseLevelMetrics.speech.instant;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechMetrics() => levelTx=%d, levelRx=%d",
levelTx, levelRx);
return 0;
}
int VoEAudioProcessingImpl::GetNoiseMetrics(int& levelTx, int& levelRx)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetNoiseMetrics(levelTx=?, levelRx=?)");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
bool levelMode =
_audioProcessingModulePtr->level_estimator()->is_enabled();
LevelEstimator::Metrics levelMetrics;
LevelEstimator::Metrics reverseLevelMetrics;
if (levelMode == false)
{
_engineStatistics.SetLastError(
VE_APM_ERROR, kTraceWarning,
"GetNoiseMetrics() AudioProcessingModule level metrics is not"
"enabled");
return -1;
}
if (_audioProcessingModulePtr->level_estimator()->GetMetrics(
&levelMetrics, &reverseLevelMetrics))
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
"GetNoiseMetrics(), AudioProcessingModule level metrics"
" error");
return -1;
}
levelTx = levelMetrics.noise.instant;
levelRx = reverseLevelMetrics.noise.instant;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetNoiseMetrics() => levelTx=%d, levelRx=%d", levelTx, levelRx);
return 0;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
"GetEchoMetricsStatus() => enabled=%d", enabled);
return 0;
#else
_engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError, "SetEcStatus() EC is not supported");
return -1;
#endif
}
int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL,
@ -1095,7 +1003,7 @@ int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL,
int& RERL,
int& A_NLP)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetEchoMetrics(ERL=?, ERLE=?, RERL=?, A_NLP=?)");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();

View File

@ -70,13 +70,9 @@ public:
virtual int VoiceActivityIndicator(int channel);
virtual int SetMetricsStatus(bool enable);
virtual int SetEchoMetricsStatus(bool enable);
virtual int GetMetricsStatus(bool& enabled);
virtual int GetSpeechMetrics(int& levelTx, int& levelRx);
virtual int GetNoiseMetrics(int& levelTx, int& levelRx);
virtual int GetEchoMetricsStatus(bool& enabled);
virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP);

View File

@ -497,16 +497,6 @@ int VoEBaseImpl::Init(AudioDeviceModule* external_adm)
"Init() failed to set AGC state for "
"AP module");
}
// Level Metrics
if (_audioProcessingModulePtr->level_estimator()->Enable(
WEBRTC_VOICE_ENGINE_LEVEL_ESTIMATOR_DEFAULT_STATE)
!= 0)
{
_engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
"Init() failed to set Level "
"Estimator state for AP"
"module");
}
// VAD
if (_audioProcessingModulePtr->voice_detection()->Enable(
WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE)

View File

@ -85,35 +85,23 @@ int VoECallReportImpl::ResetCallReportStatistics(int channel)
}
assert(_audioProcessingModulePtr != NULL);
bool levelMode =
_audioProcessingModulePtr->level_estimator()->is_enabled();
bool echoMode =
_audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
// We always set the same mode for the level and echo
if (levelMode != echoMode)
{
_engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
"ResetCallReportStatistics() level mode "
"and echo mode are not the same");
return -1;
}
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
" current AudioProcessingModule metric currentState %d",
levelMode);
" current AudioProcessingModule echo metric currentState %d",
echoMode);
// Reset the APM statistics
if ((_audioProcessingModulePtr->level_estimator()->Enable(true) != 0)
|| (_audioProcessingModulePtr->echo_cancellation()->enable_metrics(true)
!= 0))
if (_audioProcessingModulePtr->echo_cancellation()->enable_metrics(true)
!= 0)
{
_engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
"ResetCallReportStatistics() unable to "
"set the AudioProcessingModule metrics "
"state");
"set the AudioProcessingModule echo "
"metrics state");
return -1;
}
// Restore metric states
_audioProcessingModulePtr->level_estimator()->Enable(levelMode);
_audioProcessingModulePtr->echo_cancellation()->enable_metrics(echoMode);
// Reset channel dependent statistics
@ -156,102 +144,6 @@ int VoECallReportImpl::ResetCallReportStatistics(int channel)
return 0;
}
int VoECallReportImpl::GetSpeechAndNoiseSummary(LevelStatistics& stats)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetSpeechAndNoiseSummary()");
ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
assert(_audioProcessingModulePtr != NULL);
return (GetSpeechAndNoiseSummaryInternal(stats));
}
int VoECallReportImpl::GetSpeechAndNoiseSummaryInternal(LevelStatistics& stats)
{
int ret(0);
bool mode(false);
LevelEstimator::Metrics metrics;
LevelEstimator::Metrics reverseMetrics;
// Ensure that level metrics is enabled
mode = _audioProcessingModulePtr->level_estimator()->is_enabled();
if (mode != false)
{
ret = _audioProcessingModulePtr->level_estimator()->GetMetrics(
&metrics, &reverseMetrics);
if (ret != 0)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" GetSpeechAndNoiseSummary(), AudioProcessingModule "
"level metrics error");
}
}
else
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" GetSpeechAndNoiseSummary(), AudioProcessingModule level "
"metrics is not enabled");
}
if ((ret != 0) || (mode == false))
{
// Mark complete struct as invalid (-100 dBm0)
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" unable to retrieve level metrics from the "
"AudioProcessingModule");
stats.noise_rx.min = -100;
stats.noise_rx.max = -100;
stats.noise_rx.average = -100;
stats.speech_rx.min = -100;
stats.speech_rx.max = -100;
stats.speech_rx.average = -100;
stats.noise_tx.min = -100;
stats.noise_tx.max = -100;
stats.noise_tx.average = -100;
stats.speech_tx.min = -100;
stats.speech_tx.max = -100;
stats.speech_tx.average = -100;
}
else
{
// Deliver output results to user
stats.noise_rx.min = reverseMetrics.noise.minimum;
stats.noise_rx.max = reverseMetrics.noise.maximum;
stats.noise_rx.average = reverseMetrics.noise.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" noise_rx: min=%d, max=%d, avg=%d", stats.noise_rx.min,
stats.noise_rx.max, stats.noise_rx.average);
stats.noise_tx.min = metrics.noise.minimum;
stats.noise_tx.max = metrics.noise.maximum;
stats.noise_tx.average = metrics.noise.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" noise_tx: min=%d, max=%d, avg=%d", stats.noise_tx.min,
stats.noise_tx.max, stats.noise_tx.average);
stats.speech_rx.min = reverseMetrics.speech.minimum;
stats.speech_rx.max = reverseMetrics.speech.maximum;
stats.speech_rx.average = reverseMetrics.speech.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" speech_rx: min=%d, max=%d, avg=%d", stats.speech_rx.min,
stats.speech_rx.max, stats.speech_rx.average);
stats.speech_tx.min = metrics.speech.minimum;
stats.speech_tx.max = metrics.speech.maximum;
stats.speech_tx.average = metrics.speech.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" speech_tx: min=%d, max=%d, avg=%d", stats.speech_tx.min,
stats.speech_tx.max, stats.speech_tx.average);
}
return 0;
}
int VoECallReportImpl::GetEchoMetricSummary(EchoStatistics& stats)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
@ -499,33 +391,6 @@ int VoECallReportImpl::WriteReportToFile(const char* fileNameUTF8)
delete[] channelsArray;
LevelStatistics stats;
GetSpeechAndNoiseSummary(stats);
_file.WriteText("\nLong-term Speech Levels\n");
_file.WriteText("-----------------------\n\n");
_file.WriteText("Transmitting side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.speech_tx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.speech_tx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.speech_tx.average);
_file.WriteText("\nReceiving side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.speech_rx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.speech_rx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.speech_rx.average);
_file.WriteText("\nLong-term Noise Levels\n");
_file.WriteText("----------------------\n\n");
_file.WriteText("Transmitting side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.noise_tx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.noise_tx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.noise_tx.average);
_file.WriteText("\nReceiving side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.noise_rx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.noise_rx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.noise_rx.average);
EchoStatistics echo;
GetEchoMetricSummary(echo);

View File

@ -30,8 +30,6 @@ public:
virtual int ResetCallReportStatistics(int channel);
virtual int GetSpeechAndNoiseSummary(LevelStatistics& stats);
virtual int GetEchoMetricSummary(EchoStatistics& stats);
virtual int GetRoundTripTimeSummary(int channel,

View File

@ -263,8 +263,7 @@ int VoERTP_RTCPImpl::SetRTPAudioLevelIndicationStatus(int channel,
}
// Set AudioProcessingModule level-metric mode based on user input.
// Note that this setting may conflict with the
// AudioProcessing::SetMetricsStatus API.
// Note that the Level Estimator component is currently not supported
if (_audioProcessingModulePtr->level_estimator()->Enable(enable) != 0)
{
_engineStatistics.SetLastError(

View File

@ -139,9 +139,6 @@ enum { kVoiceEngineMaxRtpExtensionId = 14 };
// AudioProcessing AGC on
#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing EC off
#define WEBRTC_VOICE_ENGINE_LEVEL_ESTIMATOR_DEFAULT_STATE \
WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing Estimator off
#define WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing off
#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF

View File

@ -1646,6 +1646,8 @@ int VoEExtendedTest::TestCallReport()
VoEBase* base = _mgr.BasePtr();
VoECallReport* report = _mgr.CallReportPtr();
VoEFile* file = _mgr.FilePtr();
VoEAudioProcessing* apm = _mgr.APMPtr();
VoENetwork* netw = _mgr.NetworkPtr();
PrepareTest("CallReport");
@ -1680,8 +1682,6 @@ int VoEExtendedTest::TestCallReport()
///////////////////////////
// Actual test starts here
// TODO(xians), enable the tests when APM is ready
/*
TEST(ResetCallReportStatistics);
ANL();
TEST_MUSTPASS(!report->ResetCallReportStatistics(-2));
@ -1695,51 +1695,19 @@ int VoEExtendedTest::TestCallReport()
AOK();
ANL();
LevelStatistics stats;
bool enabled;
TEST(GetSpeechAndNoiseSummary);
ANL();
TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
TEST_MUSTPASS(enabled != false);
// All values should be -100 dBm0 when metrics are disabled
TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
MARK();
TEST_MUSTPASS(stats.noise_rx.min != -100);
TEST_MUSTPASS(stats.noise_rx.max != -100);
TEST_MUSTPASS(stats.noise_rx.average != -100);
TEST_MUSTPASS(stats.noise_tx.min != -100);
TEST_MUSTPASS(stats.noise_tx.max != -100);
TEST_MUSTPASS(stats.noise_tx.average != -100);
TEST_MUSTPASS(stats.speech_rx.min != -100);
TEST_MUSTPASS(stats.speech_rx.max != -100);
TEST_MUSTPASS(stats.speech_rx.average != -100);
TEST_MUSTPASS(stats.speech_tx.min != -100);
TEST_MUSTPASS(stats.speech_tx.max != -100);
TEST_MUSTPASS(stats.speech_tx.average != -100);
//
TEST_MUSTPASS(apm->SetMetricsStatus(true));
SLEEP(7000);
// All values should *not* be -100 dBm0 when metrics are enabled (check
// Rx side only since user might be silent)
TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
MARK();
TEST_MUSTPASS(stats.noise_rx.min == -100);
TEST_MUSTPASS(stats.noise_rx.max == -100);
TEST_MUSTPASS(stats.noise_rx.average == -100);
TEST_MUSTPASS(stats.speech_rx.min == -100);
TEST_MUSTPASS(stats.speech_rx.max == -100);
TEST_MUSTPASS(stats.speech_rx.average == -100);
AOK();
ANL();
bool enabled = false;
EchoStatistics echo;
TEST(GetEchoMetricSummary);
ANL();
TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled));
TEST_MUSTPASS(enabled != false);
TEST_MUSTPASS(apm->SetEchoMetricsStatus(true));
TEST_MUSTPASS(report->GetEchoMetricSummary(echo)); // all outputs will be
// -100 in loopback (skip further tests)
AOK();
ANL();
// TODO(xians): investigate the cause of test failure before enabling.
/*
StatVal delays;
TEST(GetRoundTripTimeSummary);
ANL();
@ -1748,20 +1716,21 @@ int VoEExtendedTest::TestCallReport()
MARK();
TEST_MUSTPASS(delays.min == -1);
TEST_MUSTPASS(delays.max == -1);
TEST_MUSTPASS(delays.max == -1);
TEST_MUSTPASS(delays.average == -1);
rtp_rtcp->SetRTCPStatus(0, false);
// All values should be -1 since RTCP is off
TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
MARK();
TEST_MUSTPASS(delays.min != -1);
TEST_MUSTPASS(delays.max != -1);
TEST_MUSTPASS(delays.max != -1);
TEST_MUSTPASS(delays.average != -1);
rtp_rtcp->SetRTCPStatus(0, true);
AOK();
ANL();
*/
int nDead(0);
int nAlive(0);
int nDead = 0;
int nAlive = 0;
TEST(GetDeadOrAliveSummary);
ANL();
// All results should be -1 since dead-or-alive is not active
@ -1806,7 +1775,7 @@ int VoEExtendedTest::TestCallReport()
MARK(); // should work with UTF-8 as well (κλνξ.txt)
AOK();
ANL();
*/
TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
TEST_MUSTPASS(base->StopSend(0));
TEST_MUSTPASS(base->StopPlayout(0));
@ -6952,6 +6921,8 @@ int VoEExtendedTest::TestRTP_RTCP()
TEST_ERROR(VE_INVALID_ARGUMENT);
TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false, 15));
MARK();
// TODO(bjornv): Activate tests below when APM supports level estimation.
/*
TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(1, true, 5));
MARK();
TEST_ERROR(VE_CHANNEL_NOT_VALID);
@ -6978,6 +6949,7 @@ int VoEExtendedTest::TestRTP_RTCP()
// disable audio-level-rtp-header-extension
TEST_MUSTPASS(rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false));
*/
MARK();
ANL();
@ -8145,67 +8117,32 @@ digitalCompressionGaindBDefault);
SLEEP(NSSleep);
//////////////////////////////////
// Speech, Noise and Echo Metrics
// Echo Metrics
#if (!defined(MAC_IPHONE) && !defined(WEBRTC_ANDROID))
// TODO(xians), enable the tests when APM is ready
/*
TEST(GetMetricsStatus);
TEST(GetEchoMetricsStatus);
ANL();
TEST(SetMetricsStatus);
TEST(SetEchoMetricsStatus);
ANL();
TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled));
MARK();
TEST_MUSTPASS(enabled != false);
MARK(); // should be OFF by default
TEST_MUSTPASS(apm->SetMetricsStatus(true));
TEST_MUSTPASS(apm->SetEchoMetricsStatus(true));
MARK();
TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled));
MARK();
TEST_MUSTPASS(enabled != true);
MARK();
TEST_MUSTPASS(apm->SetMetricsStatus(false));
TEST_MUSTPASS(apm->SetEchoMetricsStatus(false));
MARK();
TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled));
MARK();
TEST_MUSTPASS(enabled != false);
MARK();
AOK();
ANL();
TEST(GetSpeechMetrics);
ANL();
int levelTx, levelRx;
TEST_MUSTPASS(-1 != apm->GetSpeechMetrics(levelTx, levelRx));
MARK(); // should fail since not activated
err = base->LastError();
TEST_MUSTPASS(err != VE_APM_ERROR);
TEST_MUSTPASS(apm->SetMetricsStatus(true));
TEST_MUSTPASS(apm->GetSpeechMetrics(levelTx, levelRx));
MARK();
TEST_LOG("\nSpeech: levelTx=%d, levelRx=%d [dBm0]\n",
levelTx, levelTx);
TEST_MUSTPASS(apm->SetMetricsStatus(false));
AOK();
ANL();
TEST(GetNoiseMetrics);
ANL();
TEST_MUSTPASS(-1 != apm->GetNoiseMetrics(levelTx, levelRx));
MARK(); // should fail since not activated
err = base->LastError();
TEST_MUSTPASS(err != VE_APM_ERROR);
TEST_MUSTPASS(apm->SetMetricsStatus(true));
TEST_MUSTPASS(apm->GetNoiseMetrics(levelTx, levelRx));
MARK();
TEST_LOG("\nNoise: levelTx=%d, levelRx=%d [dBm0]\n",
levelTx, levelTx);
TEST_MUSTPASS(apm->SetMetricsStatus(false));
AOK( );
ANL();
TEST(GetEchoMetrics);
ANL();
@ -8214,7 +8151,7 @@ digitalCompressionGaindBDefault);
MARK(); // should fail since not activated
err = base->LastError();
TEST_MUSTPASS(err != VE_APM_ERROR);
TEST_MUSTPASS(apm->SetMetricsStatus(true));
TEST_MUSTPASS(apm->SetEchoMetricsStatus(true));
TEST_MUSTPASS(-1 != apm->GetEchoMetrics(ERL, ERLE, RERL, A_NLP));
MARK(); // should fail since AEC is off
err = base->LastError();
@ -8225,11 +8162,11 @@ digitalCompressionGaindBDefault);
TEST_LOG(
"\nEcho: ERL=%d, ERLE=%d, RERL=%d, A_NLP=%d [dB]\n",
ERL, ERLE, RERL, A_NLP);
TEST_MUSTPASS(apm->SetMetricsStatus(false));
TEST_MUSTPASS(apm->SetEchoMetricsStatus(false));
TEST_MUSTPASS(apm->SetEcStatus(false));
AOK();
ANL();
*/
#endif // #if (!defined(MAC_IPHONE) && !defined(WEBRTC_ANDROID))
// far-end AudioProcessing
///////

View File

@ -2789,44 +2789,36 @@ int VoETestManager::DoStandardTest()
TEST_LOG("Skipping NS tests - WEBRTC_VOICE_ENGINE_NR not defined \n");
#endif // #ifdef WEBRTC_VOICE_ENGINE_NR
// TODO(xians), enable the metrics test when APM is ready
/*
#if (!defined(MAC_IPHONE) && !defined(WEBRTC_ANDROID) && defined(WEBRTC_VOICE_ENGINE_NR))
TEST_LOG("Speech, Noise and Echo Metric calls\n");
TEST_MUSTPASS(apm->GetMetricsStatus(enabled)); // check default
TEST_MUSTPASS(enabled != false);
TEST_MUSTPASS(apm->SetMetricsStatus(true)); // enable metrics
#ifdef WEBRTC_VOICE_ENGINE_ECHO
bool enabled = false;
TEST_LOG("Echo Metric calls\n");
TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled)); // check default
TEST_MUSTPASS(enabled != false);
TEST_MUSTPASS(apm->SetEchoMetricsStatus(true)); // enable echo metrics
// must enable AEC to get valid echo metrics
TEST_MUSTPASS(apm->SetEcStatus(true, kEcAec));
#endif
TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled));
TEST_MUSTPASS(enabled != true);
TEST_LOG("Speak into microphone and check metrics for 10 seconds...\n");
int speech_tx, speech_rx;
int noise_tx, noise_rx;
#ifdef WEBRTC_VOICE_ENGINE_ECHO
int ERLE, ERL, RERL, A_NLP;
#endif
for (int t = 0; t < 5; t++)
{
SLEEP(2000);
TEST_MUSTPASS(apm->GetSpeechMetrics(speech_tx, speech_rx));
TEST_LOG(" Speech: Tx=%5d, Rx=%5d [dBm0]\n", speech_tx, speech_rx);
TEST_MUSTPASS(apm->GetNoiseMetrics(noise_tx, noise_rx));
TEST_LOG(" Noise : Tx=%5d, Rx=%5d [dBm0]\n", noise_tx, noise_rx);
#ifdef WEBRTC_VOICE_ENGINE_ECHO
TEST_MUSTPASS(apm->GetEchoMetrics(ERL, ERLE, RERL, A_NLP));
TEST_LOG(" Echo : ERL=%5d, ERLE=%5d, RERL=%5d, A_NLP=%5d [dB]\n",
ERL, ERLE, RERL, A_NLP);
#endif
}
TEST_MUSTPASS(apm->SetMetricsStatus(false)); // disable metrics
TEST_MUSTPASS(apm->SetEchoMetricsStatus(false)); // disable echo metrics
#else
TEST_LOG("Skipping echo metrics tests -"
" WEBRTC_VOICE_ENGINE_ECHO not defined \n");
#endif // #ifdef WEBRTC_VOICE_ENGINE_ECHO
#else
TEST_LOG("Skipping apm metrics tests - MAC_IPHONE/WEBRTC_ANDROID defined \n");
#endif // #if (!defined(MAC_IPHONE) && !d...
*/
// VAD/DTX indication
TEST_LOG("Get voice activity indication \n");
if (codec)
@ -3256,8 +3248,6 @@ int VoETestManager::DoStandardTest()
#ifdef _TEST_CALL_REPORT_
TEST_LOG("\n\n+++ CallReport tests +++\n\n");
#if (defined(WEBRTC_VOICE_ENGINE_ECHO) && defined(WEBRTC_VOICE_ENGINE_NR))
// TODO(xians), enale the tests when APM is ready
/*
TEST(ResetCallReportStatistics);ANL();
TEST_MUSTPASS(!report->ResetCallReportStatistics(-2));
TEST_MUSTPASS(!report->ResetCallReportStatistics(1));
@ -3265,36 +3255,10 @@ int VoETestManager::DoStandardTest()
TEST_MUSTPASS(report->ResetCallReportStatistics(-1));
bool onOff;
LevelStatistics stats;
TEST_MUSTPASS(apm->GetMetricsStatus(onOff));
TEST_MUSTPASS(apm->GetEchoMetricsStatus(onOff));
TEST_MUSTPASS(onOff != false);
// All values should be -100 dBm0 when metrics are disabled
TEST(GetSpeechAndNoiseSummary);ANL();
TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
TEST_MUSTPASS(stats.noise_rx.min != -100);
TEST_MUSTPASS(stats.noise_rx.max != -100);
TEST_MUSTPASS(stats.noise_rx.average != -100);
TEST_MUSTPASS(stats.noise_tx.min != -100);
TEST_MUSTPASS(stats.noise_tx.max != -100);
TEST_MUSTPASS(stats.noise_tx.average != -100);
TEST_MUSTPASS(stats.speech_rx.min != -100);
TEST_MUSTPASS(stats.speech_rx.max != -100);
TEST_MUSTPASS(stats.speech_rx.average != -100);
TEST_MUSTPASS(stats.speech_tx.min != -100);
TEST_MUSTPASS(stats.speech_tx.max != -100);
TEST_MUSTPASS(stats.speech_tx.average != -100);
TEST_MUSTPASS(apm->SetMetricsStatus(true));
TEST_MUSTPASS(apm->SetEchoMetricsStatus(true));
SLEEP(3000);
// All values should *not* be -100 dBm0 when metrics are enabled
// (check Rx side only since user might be silent)
TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
TEST_MUSTPASS(stats.noise_rx.min == -100);
TEST_MUSTPASS(stats.noise_rx.max == -100);
TEST_MUSTPASS(stats.noise_rx.average == -100);
TEST_MUSTPASS(stats.speech_rx.min == -100);
TEST_MUSTPASS(stats.speech_rx.max == -100);
TEST_MUSTPASS(stats.speech_rx.average == -100);
EchoStatistics echo;
TEST(GetEchoMetricSummary);ANL();
// all outputs will be -100 in loopback (skip further tests)
@ -3307,17 +3271,17 @@ int VoETestManager::DoStandardTest()
TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
TEST_MUSTPASS(delays.min != -1);
TEST_MUSTPASS(delays.max != -1);
TEST_MUSTPASS(delays.max != -1);
TEST_MUSTPASS(delays.average != -1);
rtp_rtcp->SetRTCPStatus(0, true);
SLEEP(5000); // gives time for RTCP
TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
TEST_MUSTPASS(delays.min == -1);
TEST_MUSTPASS(delays.max == -1);
TEST_MUSTPASS(delays.max == -1);
TEST_MUSTPASS(delays.average == -1);
rtp_rtcp->SetRTCPStatus(0, false);
int nDead;
int nAlive;
int nDead = 0;
int nAlive = 0;
// -1 will be returned since dead-or-alive is not active
TEST(GetDeadOrAliveSummary);ANL();
TEST_MUSTPASS(report->GetDeadOrAliveSummary(0, nDead, nAlive) != -1);
@ -3333,7 +3297,6 @@ int VoETestManager::DoStandardTest()
TEST(WriteReportToFile);ANL();
TEST_MUSTPASS(!report->WriteReportToFile(NULL));
TEST_MUSTPASS(report->WriteReportToFile("call_report.txt"));
*/
#else
TEST_LOG("Skipping CallReport tests since both EC and NS are required\n");
#endif