Removed level estimator calls, since it is not supported. There are still one place left; used within SetRTPAudioLevelIndicationStatus(). The error return value of level_estimator() has no effect there.

The VoE auto tests have been updated as well.
Review URL: http://webrtc-codereview.appspot.com/178003

git-svn-id: http://webrtc.googlecode.com/svn/trunk@658 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
bjornv@google.com 2011-09-28 14:08:19 +00:00
parent 2f56ff48a4
commit 0beae6798d
11 changed files with 104 additions and 463 deletions

View File

@ -150,19 +150,11 @@ public:
virtual int VoiceActivityIndicator(int channel) = 0; virtual int VoiceActivityIndicator(int channel) = 0;
// Enables or disables the possibility to retrieve instantaneous // Enables or disables the possibility to retrieve instantaneous
// speech, noise and echo metrics during an active call. // echo metrics during an active call.
virtual int SetMetricsStatus(bool enable) = 0; virtual int SetEchoMetricsStatus(bool enable) = 0;
// Gets the current speech, noise and echo metric status. // Gets the current echo metric status.
virtual int GetMetricsStatus(bool& enabled) = 0; virtual int GetEchoMetricsStatus(bool& enabled) = 0;
// Gets the instantaneous speech level metrics for the transmitted
// and received signals.
virtual int GetSpeechMetrics(int& levelTx, int& levelRx) = 0;
// Gets the instantaneous noise level metrics for the transmitted
// and received signals.
virtual int GetNoiseMetrics(int& levelTx, int& levelRx) = 0;
// Gets the instantaneous echo level metrics for the near-end and // Gets the instantaneous echo level metrics for the near-end and
// far-end signals. // far-end signals.

View File

@ -59,10 +59,6 @@ public:
// the call report for a specified |channel|. // the call report for a specified |channel|.
virtual int ResetCallReportStatistics(int channel) = 0; virtual int ResetCallReportStatistics(int channel) = 0;
// Gets minimum, maximum and average levels for long-term speech and
// noise metrics.
virtual int GetSpeechAndNoiseSummary(LevelStatistics& stats) = 0;
// Gets minimum, maximum and average levels for long-term echo metrics. // Gets minimum, maximum and average levels for long-term echo metrics.
virtual int GetEchoMetricSummary(EchoStatistics& stats) = 0; virtual int GetEchoMetricSummary(EchoStatistics& stats) = 0;

View File

@ -946,148 +946,56 @@ int VoEAudioProcessingImpl::VoiceActivityIndicator(int channel)
return activity; return activity;
} }
int VoEAudioProcessingImpl::SetMetricsStatus(bool enable) int VoEAudioProcessingImpl::SetEchoMetricsStatus(bool enable) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1), "SetEchoMetricsStatus(enable=%d)", enable);
"SetMetricsStatus(enable=%d)", enable); ANDROID_NOT_SUPPORTED();
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized()) #ifdef WEBRTC_VOICE_ENGINE_ECHO
{ if (!_engineStatistics.Initialized()) {
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError); _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1; return -1;
} }
if ((_audioProcessingModulePtr->level_estimator()->Enable(enable)!= 0) || if (_audioProcessingModulePtr->echo_cancellation()->enable_metrics(enable) !=
(_audioProcessingModulePtr->echo_cancellation()->enable_metrics(enable) 0) {
!= 0)) _engineStatistics.SetLastError(
{ VE_APM_ERROR, kTraceError,
_engineStatistics.SetLastError( "SetEchoMetricsStatus() unable to set echo metrics mode");
VE_APM_ERROR, kTraceError, return -1;
"SetMetricsStatus() unable to set metrics mode"); }
return -1; return 0;
} #else
return 0; _engineStatistics.SetLastError(
VE_FUNC_NOT_SUPPORTED, kTraceError, "SetEcStatus() EC is not supported");
return -1;
#endif
} }
int VoEAudioProcessingImpl::GetMetricsStatus(bool& enabled) int VoEAudioProcessingImpl::GetEchoMetricsStatus(bool& enabled) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1), "GetEchoMetricsStatus(enabled=?)");
"GetMetricsStatus(enabled=?)"); ANDROID_NOT_SUPPORTED();
ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized()) #ifdef WEBRTC_VOICE_ENGINE_ECHO
{ if (!_engineStatistics.Initialized()) {
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError); _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1; return -1;
} }
bool levelMode = enabled =
_audioProcessingModulePtr->level_estimator()->is_enabled(); _audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
bool echoMode =
_audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
if (levelMode != echoMode) WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
{ "GetEchoMetricsStatus() => enabled=%d", enabled);
_engineStatistics.SetLastError( return 0;
VE_APM_ERROR, kTraceError, #else
"GetMetricsStatus() level mode and echo mode are not the same"); _engineStatistics.SetLastError(
return -1; VE_FUNC_NOT_SUPPORTED, kTraceError, "SetEcStatus() EC is not supported");
} return -1;
#endif
enabled = levelMode;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetMetricsStatus() => enabled=%d", enabled);
return 0;
}
int VoEAudioProcessingImpl::GetSpeechMetrics(int& levelTx, int& levelRx)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechMetrics(levelTx=?, levelRx=?)");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
LevelEstimator::Metrics levelMetrics;
LevelEstimator::Metrics reverseLevelMetrics;
bool levelMode = _audioProcessingModulePtr->level_estimator()->is_enabled();
if (levelMode == false)
{
_engineStatistics.SetLastError(
VE_APM_ERROR, kTraceWarning,
"GetSpeechMetrics() AudioProcessingModule level metrics is "
"not enabled");
return -1;
}
if (_audioProcessingModulePtr->level_estimator()->GetMetrics(
&levelMetrics, &reverseLevelMetrics))
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechMetrics(), AudioProcessingModule level metrics"
" error");
return -1;
}
levelTx = levelMetrics.speech.instant;
levelRx = reverseLevelMetrics.speech.instant;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetSpeechMetrics() => levelTx=%d, levelRx=%d",
levelTx, levelRx);
return 0;
}
int VoEAudioProcessingImpl::GetNoiseMetrics(int& levelTx, int& levelRx)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
"GetNoiseMetrics(levelTx=?, levelRx=?)");
ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
bool levelMode =
_audioProcessingModulePtr->level_estimator()->is_enabled();
LevelEstimator::Metrics levelMetrics;
LevelEstimator::Metrics reverseLevelMetrics;
if (levelMode == false)
{
_engineStatistics.SetLastError(
VE_APM_ERROR, kTraceWarning,
"GetNoiseMetrics() AudioProcessingModule level metrics is not"
"enabled");
return -1;
}
if (_audioProcessingModulePtr->level_estimator()->GetMetrics(
&levelMetrics, &reverseLevelMetrics))
{
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
"GetNoiseMetrics(), AudioProcessingModule level metrics"
" error");
return -1;
}
levelTx = levelMetrics.noise.instant;
levelRx = reverseLevelMetrics.noise.instant;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
"GetNoiseMetrics() => levelTx=%d, levelRx=%d", levelTx, levelRx);
return 0;
} }
int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL, int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL,
@ -1095,7 +1003,7 @@ int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL,
int& RERL, int& RERL,
int& A_NLP) int& A_NLP)
{ {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetEchoMetrics(ERL=?, ERLE=?, RERL=?, A_NLP=?)"); "GetEchoMetrics(ERL=?, ERLE=?, RERL=?, A_NLP=?)");
ANDROID_NOT_SUPPORTED(); ANDROID_NOT_SUPPORTED();
IPHONE_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();

View File

@ -70,13 +70,9 @@ public:
virtual int VoiceActivityIndicator(int channel); virtual int VoiceActivityIndicator(int channel);
virtual int SetMetricsStatus(bool enable); virtual int SetEchoMetricsStatus(bool enable);
virtual int GetMetricsStatus(bool& enabled); virtual int GetEchoMetricsStatus(bool& enabled);
virtual int GetSpeechMetrics(int& levelTx, int& levelRx);
virtual int GetNoiseMetrics(int& levelTx, int& levelRx);
virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP); virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP);

View File

@ -497,16 +497,6 @@ int VoEBaseImpl::Init(AudioDeviceModule* external_adm)
"Init() failed to set AGC state for " "Init() failed to set AGC state for "
"AP module"); "AP module");
} }
// Level Metrics
if (_audioProcessingModulePtr->level_estimator()->Enable(
WEBRTC_VOICE_ENGINE_LEVEL_ESTIMATOR_DEFAULT_STATE)
!= 0)
{
_engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
"Init() failed to set Level "
"Estimator state for AP"
"module");
}
// VAD // VAD
if (_audioProcessingModulePtr->voice_detection()->Enable( if (_audioProcessingModulePtr->voice_detection()->Enable(
WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE) WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE)

View File

@ -85,35 +85,23 @@ int VoECallReportImpl::ResetCallReportStatistics(int channel)
} }
assert(_audioProcessingModulePtr != NULL); assert(_audioProcessingModulePtr != NULL);
bool levelMode =
_audioProcessingModulePtr->level_estimator()->is_enabled();
bool echoMode = bool echoMode =
_audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled(); _audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
// We always set the same mode for the level and echo
if (levelMode != echoMode)
{
_engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
"ResetCallReportStatistics() level mode "
"and echo mode are not the same");
return -1;
}
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
" current AudioProcessingModule metric currentState %d", " current AudioProcessingModule echo metric currentState %d",
levelMode); echoMode);
// Reset the APM statistics // Reset the APM statistics
if ((_audioProcessingModulePtr->level_estimator()->Enable(true) != 0) if (_audioProcessingModulePtr->echo_cancellation()->enable_metrics(true)
|| (_audioProcessingModulePtr->echo_cancellation()->enable_metrics(true) != 0)
!= 0))
{ {
_engineStatistics.SetLastError(VE_APM_ERROR, kTraceError, _engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
"ResetCallReportStatistics() unable to " "ResetCallReportStatistics() unable to "
"set the AudioProcessingModule metrics " "set the AudioProcessingModule echo "
"state"); "metrics state");
return -1; return -1;
} }
// Restore metric states // Restore metric states
_audioProcessingModulePtr->level_estimator()->Enable(levelMode);
_audioProcessingModulePtr->echo_cancellation()->enable_metrics(echoMode); _audioProcessingModulePtr->echo_cancellation()->enable_metrics(echoMode);
// Reset channel dependent statistics // Reset channel dependent statistics
@ -156,102 +144,6 @@ int VoECallReportImpl::ResetCallReportStatistics(int channel)
return 0; return 0;
} }
int VoECallReportImpl::GetSpeechAndNoiseSummary(LevelStatistics& stats)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
"GetSpeechAndNoiseSummary()");
ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
if (!_engineStatistics.Initialized())
{
_engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
assert(_audioProcessingModulePtr != NULL);
return (GetSpeechAndNoiseSummaryInternal(stats));
}
int VoECallReportImpl::GetSpeechAndNoiseSummaryInternal(LevelStatistics& stats)
{
int ret(0);
bool mode(false);
LevelEstimator::Metrics metrics;
LevelEstimator::Metrics reverseMetrics;
// Ensure that level metrics is enabled
mode = _audioProcessingModulePtr->level_estimator()->is_enabled();
if (mode != false)
{
ret = _audioProcessingModulePtr->level_estimator()->GetMetrics(
&metrics, &reverseMetrics);
if (ret != 0)
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" GetSpeechAndNoiseSummary(), AudioProcessingModule "
"level metrics error");
}
}
else
{
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" GetSpeechAndNoiseSummary(), AudioProcessingModule level "
"metrics is not enabled");
}
if ((ret != 0) || (mode == false))
{
// Mark complete struct as invalid (-100 dBm0)
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
" unable to retrieve level metrics from the "
"AudioProcessingModule");
stats.noise_rx.min = -100;
stats.noise_rx.max = -100;
stats.noise_rx.average = -100;
stats.speech_rx.min = -100;
stats.speech_rx.max = -100;
stats.speech_rx.average = -100;
stats.noise_tx.min = -100;
stats.noise_tx.max = -100;
stats.noise_tx.average = -100;
stats.speech_tx.min = -100;
stats.speech_tx.max = -100;
stats.speech_tx.average = -100;
}
else
{
// Deliver output results to user
stats.noise_rx.min = reverseMetrics.noise.minimum;
stats.noise_rx.max = reverseMetrics.noise.maximum;
stats.noise_rx.average = reverseMetrics.noise.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" noise_rx: min=%d, max=%d, avg=%d", stats.noise_rx.min,
stats.noise_rx.max, stats.noise_rx.average);
stats.noise_tx.min = metrics.noise.minimum;
stats.noise_tx.max = metrics.noise.maximum;
stats.noise_tx.average = metrics.noise.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" noise_tx: min=%d, max=%d, avg=%d", stats.noise_tx.min,
stats.noise_tx.max, stats.noise_tx.average);
stats.speech_rx.min = reverseMetrics.speech.minimum;
stats.speech_rx.max = reverseMetrics.speech.maximum;
stats.speech_rx.average = reverseMetrics.speech.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" speech_rx: min=%d, max=%d, avg=%d", stats.speech_rx.min,
stats.speech_rx.max, stats.speech_rx.average);
stats.speech_tx.min = metrics.speech.minimum;
stats.speech_tx.max = metrics.speech.maximum;
stats.speech_tx.average = metrics.speech.average;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
" speech_tx: min=%d, max=%d, avg=%d", stats.speech_tx.min,
stats.speech_tx.max, stats.speech_tx.average);
}
return 0;
}
int VoECallReportImpl::GetEchoMetricSummary(EchoStatistics& stats) int VoECallReportImpl::GetEchoMetricSummary(EchoStatistics& stats)
{ {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
@ -499,33 +391,6 @@ int VoECallReportImpl::WriteReportToFile(const char* fileNameUTF8)
delete[] channelsArray; delete[] channelsArray;
LevelStatistics stats;
GetSpeechAndNoiseSummary(stats);
_file.WriteText("\nLong-term Speech Levels\n");
_file.WriteText("-----------------------\n\n");
_file.WriteText("Transmitting side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.speech_tx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.speech_tx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.speech_tx.average);
_file.WriteText("\nReceiving side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.speech_rx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.speech_rx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.speech_rx.average);
_file.WriteText("\nLong-term Noise Levels\n");
_file.WriteText("----------------------\n\n");
_file.WriteText("Transmitting side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.noise_tx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.noise_tx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.noise_tx.average);
_file.WriteText("\nReceiving side:\n");
_file.WriteText(" min:%5d [dBm0]\n", stats.noise_rx.min);
_file.WriteText(" max:%5d [dBm0]\n", stats.noise_rx.max);
_file.WriteText(" avg:%5d [dBm0]\n", stats.noise_rx.average);
EchoStatistics echo; EchoStatistics echo;
GetEchoMetricSummary(echo); GetEchoMetricSummary(echo);

View File

@ -30,8 +30,6 @@ public:
virtual int ResetCallReportStatistics(int channel); virtual int ResetCallReportStatistics(int channel);
virtual int GetSpeechAndNoiseSummary(LevelStatistics& stats);
virtual int GetEchoMetricSummary(EchoStatistics& stats); virtual int GetEchoMetricSummary(EchoStatistics& stats);
virtual int GetRoundTripTimeSummary(int channel, virtual int GetRoundTripTimeSummary(int channel,

View File

@ -263,8 +263,7 @@ int VoERTP_RTCPImpl::SetRTPAudioLevelIndicationStatus(int channel,
} }
// Set AudioProcessingModule level-metric mode based on user input. // Set AudioProcessingModule level-metric mode based on user input.
// Note that this setting may conflict with the // Note that the Level Estimator component is currently not supported
// AudioProcessing::SetMetricsStatus API.
if (_audioProcessingModulePtr->level_estimator()->Enable(enable) != 0) if (_audioProcessingModulePtr->level_estimator()->Enable(enable) != 0)
{ {
_engineStatistics.SetLastError( _engineStatistics.SetLastError(

View File

@ -139,9 +139,6 @@ enum { kVoiceEngineMaxRtpExtensionId = 14 };
// AudioProcessing AGC on // AudioProcessing AGC on
#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF #define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing EC off // AudioProcessing EC off
#define WEBRTC_VOICE_ENGINE_LEVEL_ESTIMATOR_DEFAULT_STATE \
WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing Estimator off
#define WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF #define WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
// AudioProcessing off // AudioProcessing off
#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF #define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF

View File

@ -1646,6 +1646,8 @@ int VoEExtendedTest::TestCallReport()
VoEBase* base = _mgr.BasePtr(); VoEBase* base = _mgr.BasePtr();
VoECallReport* report = _mgr.CallReportPtr(); VoECallReport* report = _mgr.CallReportPtr();
VoEFile* file = _mgr.FilePtr(); VoEFile* file = _mgr.FilePtr();
VoEAudioProcessing* apm = _mgr.APMPtr();
VoENetwork* netw = _mgr.NetworkPtr();
PrepareTest("CallReport"); PrepareTest("CallReport");
@ -1680,8 +1682,6 @@ int VoEExtendedTest::TestCallReport()
/////////////////////////// ///////////////////////////
// Actual test starts here // Actual test starts here
// TODO(xians), enable the tests when APM is ready
/*
TEST(ResetCallReportStatistics); TEST(ResetCallReportStatistics);
ANL(); ANL();
TEST_MUSTPASS(!report->ResetCallReportStatistics(-2)); TEST_MUSTPASS(!report->ResetCallReportStatistics(-2));
@ -1695,51 +1695,19 @@ int VoEExtendedTest::TestCallReport()
AOK(); AOK();
ANL(); ANL();
LevelStatistics stats; bool enabled = false;
bool enabled;
TEST(GetSpeechAndNoiseSummary);
ANL();
TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
TEST_MUSTPASS(enabled != false);
// All values should be -100 dBm0 when metrics are disabled
TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
MARK();
TEST_MUSTPASS(stats.noise_rx.min != -100);
TEST_MUSTPASS(stats.noise_rx.max != -100);
TEST_MUSTPASS(stats.noise_rx.average != -100);
TEST_MUSTPASS(stats.noise_tx.min != -100);
TEST_MUSTPASS(stats.noise_tx.max != -100);
TEST_MUSTPASS(stats.noise_tx.average != -100);
TEST_MUSTPASS(stats.speech_rx.min != -100);
TEST_MUSTPASS(stats.speech_rx.max != -100);
TEST_MUSTPASS(stats.speech_rx.average != -100);
TEST_MUSTPASS(stats.speech_tx.min != -100);
TEST_MUSTPASS(stats.speech_tx.max != -100);
TEST_MUSTPASS(stats.speech_tx.average != -100);
//
TEST_MUSTPASS(apm->SetMetricsStatus(true));
SLEEP(7000);
// All values should *not* be -100 dBm0 when metrics are enabled (check
// Rx side only since user might be silent)
TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
MARK();
TEST_MUSTPASS(stats.noise_rx.min == -100);
TEST_MUSTPASS(stats.noise_rx.max == -100);
TEST_MUSTPASS(stats.noise_rx.average == -100);
TEST_MUSTPASS(stats.speech_rx.min == -100);
TEST_MUSTPASS(stats.speech_rx.max == -100);
TEST_MUSTPASS(stats.speech_rx.average == -100);
AOK();
ANL();
EchoStatistics echo; EchoStatistics echo;
TEST(GetEchoMetricSummary); TEST(GetEchoMetricSummary);
ANL(); ANL();
TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled));
TEST_MUSTPASS(enabled != false);
TEST_MUSTPASS(apm->SetEchoMetricsStatus(true));
TEST_MUSTPASS(report->GetEchoMetricSummary(echo)); // all outputs will be TEST_MUSTPASS(report->GetEchoMetricSummary(echo)); // all outputs will be
// -100 in loopback (skip further tests) // -100 in loopback (skip further tests)
AOK(); AOK();
ANL(); ANL();
// TODO(xians): investigate the cause of test failure before enabling.
/*
StatVal delays; StatVal delays;
TEST(GetRoundTripTimeSummary); TEST(GetRoundTripTimeSummary);
ANL(); ANL();
@ -1748,20 +1716,21 @@ int VoEExtendedTest::TestCallReport()
MARK(); MARK();
TEST_MUSTPASS(delays.min == -1); TEST_MUSTPASS(delays.min == -1);
TEST_MUSTPASS(delays.max == -1); TEST_MUSTPASS(delays.max == -1);
TEST_MUSTPASS(delays.max == -1); TEST_MUSTPASS(delays.average == -1);
rtp_rtcp->SetRTCPStatus(0, false); rtp_rtcp->SetRTCPStatus(0, false);
// All values should be -1 since RTCP is off // All values should be -1 since RTCP is off
TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays)); TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
MARK(); MARK();
TEST_MUSTPASS(delays.min != -1); TEST_MUSTPASS(delays.min != -1);
TEST_MUSTPASS(delays.max != -1); TEST_MUSTPASS(delays.max != -1);
TEST_MUSTPASS(delays.max != -1); TEST_MUSTPASS(delays.average != -1);
rtp_rtcp->SetRTCPStatus(0, true); rtp_rtcp->SetRTCPStatus(0, true);
AOK(); AOK();
ANL(); ANL();
*/
int nDead(0); int nDead = 0;
int nAlive(0); int nAlive = 0;
TEST(GetDeadOrAliveSummary); TEST(GetDeadOrAliveSummary);
ANL(); ANL();
// All results should be -1 since dead-or-alive is not active // All results should be -1 since dead-or-alive is not active
@ -1806,7 +1775,7 @@ int VoEExtendedTest::TestCallReport()
MARK(); // should work with UTF-8 as well (κλνξ.txt) MARK(); // should work with UTF-8 as well (κλνξ.txt)
AOK(); AOK();
ANL(); ANL();
*/
TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0)); TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
TEST_MUSTPASS(base->StopSend(0)); TEST_MUSTPASS(base->StopSend(0));
TEST_MUSTPASS(base->StopPlayout(0)); TEST_MUSTPASS(base->StopPlayout(0));
@ -6952,6 +6921,8 @@ int VoEExtendedTest::TestRTP_RTCP()
TEST_ERROR(VE_INVALID_ARGUMENT); TEST_ERROR(VE_INVALID_ARGUMENT);
TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false, 15)); TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false, 15));
MARK(); MARK();
// TODO(bjornv): Activate tests below when APM supports level estimation.
/*
TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(1, true, 5)); TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(1, true, 5));
MARK(); MARK();
TEST_ERROR(VE_CHANNEL_NOT_VALID); TEST_ERROR(VE_CHANNEL_NOT_VALID);
@ -6978,6 +6949,7 @@ int VoEExtendedTest::TestRTP_RTCP()
// disable audio-level-rtp-header-extension // disable audio-level-rtp-header-extension
TEST_MUSTPASS(rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false)); TEST_MUSTPASS(rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false));
*/
MARK(); MARK();
ANL(); ANL();
@ -8145,67 +8117,32 @@ digitalCompressionGaindBDefault);
SLEEP(NSSleep); SLEEP(NSSleep);
////////////////////////////////// //////////////////////////////////
// Speech, Noise and Echo Metrics // Echo Metrics
#if (!defined(MAC_IPHONE) && !defined(WEBRTC_ANDROID)) #if (!defined(MAC_IPHONE) && !defined(WEBRTC_ANDROID))
// TODO(xians), enable the tests when APM is ready TEST(GetEchoMetricsStatus);
/*
TEST(GetMetricsStatus);
ANL(); ANL();
TEST(SetMetricsStatus); TEST(SetEchoMetricsStatus);
ANL(); ANL();
TEST_MUSTPASS(apm->GetMetricsStatus(enabled)); TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled));
MARK(); MARK();
TEST_MUSTPASS(enabled != false); TEST_MUSTPASS(enabled != false);
MARK(); // should be OFF by default MARK(); // should be OFF by default
TEST_MUSTPASS(apm->SetMetricsStatus(true)); TEST_MUSTPASS(apm->SetEchoMetricsStatus(true));
MARK(); MARK();
TEST_MUSTPASS(apm->GetMetricsStatus(enabled)); TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled));
MARK(); MARK();
TEST_MUSTPASS(enabled != true); TEST_MUSTPASS(enabled != true);
MARK(); MARK();
TEST_MUSTPASS(apm->SetMetricsStatus(false)); TEST_MUSTPASS(apm->SetEchoMetricsStatus(false));
MARK(); MARK();
TEST_MUSTPASS(apm->GetMetricsStatus(enabled)); TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled));
MARK(); MARK();
TEST_MUSTPASS(enabled != false); TEST_MUSTPASS(enabled != false);
MARK(); MARK();
AOK(); AOK();
ANL(); ANL();
TEST(GetSpeechMetrics);
ANL();
int levelTx, levelRx;
TEST_MUSTPASS(-1 != apm->GetSpeechMetrics(levelTx, levelRx));
MARK(); // should fail since not activated
err = base->LastError();
TEST_MUSTPASS(err != VE_APM_ERROR);
TEST_MUSTPASS(apm->SetMetricsStatus(true));
TEST_MUSTPASS(apm->GetSpeechMetrics(levelTx, levelRx));
MARK();
TEST_LOG("\nSpeech: levelTx=%d, levelRx=%d [dBm0]\n",
levelTx, levelTx);
TEST_MUSTPASS(apm->SetMetricsStatus(false));
AOK();
ANL();
TEST(GetNoiseMetrics);
ANL();
TEST_MUSTPASS(-1 != apm->GetNoiseMetrics(levelTx, levelRx));
MARK(); // should fail since not activated
err = base->LastError();
TEST_MUSTPASS(err != VE_APM_ERROR);
TEST_MUSTPASS(apm->SetMetricsStatus(true));
TEST_MUSTPASS(apm->GetNoiseMetrics(levelTx, levelRx));
MARK();
TEST_LOG("\nNoise: levelTx=%d, levelRx=%d [dBm0]\n",
levelTx, levelTx);
TEST_MUSTPASS(apm->SetMetricsStatus(false));
AOK( );
ANL();
TEST(GetEchoMetrics); TEST(GetEchoMetrics);
ANL(); ANL();
@ -8214,7 +8151,7 @@ digitalCompressionGaindBDefault);
MARK(); // should fail since not activated MARK(); // should fail since not activated
err = base->LastError(); err = base->LastError();
TEST_MUSTPASS(err != VE_APM_ERROR); TEST_MUSTPASS(err != VE_APM_ERROR);
TEST_MUSTPASS(apm->SetMetricsStatus(true)); TEST_MUSTPASS(apm->SetEchoMetricsStatus(true));
TEST_MUSTPASS(-1 != apm->GetEchoMetrics(ERL, ERLE, RERL, A_NLP)); TEST_MUSTPASS(-1 != apm->GetEchoMetrics(ERL, ERLE, RERL, A_NLP));
MARK(); // should fail since AEC is off MARK(); // should fail since AEC is off
err = base->LastError(); err = base->LastError();
@ -8225,11 +8162,11 @@ digitalCompressionGaindBDefault);
TEST_LOG( TEST_LOG(
"\nEcho: ERL=%d, ERLE=%d, RERL=%d, A_NLP=%d [dB]\n", "\nEcho: ERL=%d, ERLE=%d, RERL=%d, A_NLP=%d [dB]\n",
ERL, ERLE, RERL, A_NLP); ERL, ERLE, RERL, A_NLP);
TEST_MUSTPASS(apm->SetMetricsStatus(false)); TEST_MUSTPASS(apm->SetEchoMetricsStatus(false));
TEST_MUSTPASS(apm->SetEcStatus(false)); TEST_MUSTPASS(apm->SetEcStatus(false));
AOK(); AOK();
ANL(); ANL();
*/
#endif // #if (!defined(MAC_IPHONE) && !defined(WEBRTC_ANDROID)) #endif // #if (!defined(MAC_IPHONE) && !defined(WEBRTC_ANDROID))
// far-end AudioProcessing // far-end AudioProcessing
/////// ///////

View File

@ -2789,44 +2789,36 @@ int VoETestManager::DoStandardTest()
TEST_LOG("Skipping NS tests - WEBRTC_VOICE_ENGINE_NR not defined \n"); TEST_LOG("Skipping NS tests - WEBRTC_VOICE_ENGINE_NR not defined \n");
#endif // #ifdef WEBRTC_VOICE_ENGINE_NR #endif // #ifdef WEBRTC_VOICE_ENGINE_NR
// TODO(xians), enable the metrics test when APM is ready
/*
#if (!defined(MAC_IPHONE) && !defined(WEBRTC_ANDROID) && defined(WEBRTC_VOICE_ENGINE_NR)) #if (!defined(MAC_IPHONE) && !defined(WEBRTC_ANDROID) && defined(WEBRTC_VOICE_ENGINE_NR))
TEST_LOG("Speech, Noise and Echo Metric calls\n");
TEST_MUSTPASS(apm->GetMetricsStatus(enabled)); // check default
TEST_MUSTPASS(enabled != false);
TEST_MUSTPASS(apm->SetMetricsStatus(true)); // enable metrics
#ifdef WEBRTC_VOICE_ENGINE_ECHO #ifdef WEBRTC_VOICE_ENGINE_ECHO
bool enabled = false;
TEST_LOG("Echo Metric calls\n");
TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled)); // check default
TEST_MUSTPASS(enabled != false);
TEST_MUSTPASS(apm->SetEchoMetricsStatus(true)); // enable echo metrics
// must enable AEC to get valid echo metrics // must enable AEC to get valid echo metrics
TEST_MUSTPASS(apm->SetEcStatus(true, kEcAec)); TEST_MUSTPASS(apm->SetEcStatus(true, kEcAec));
#endif TEST_MUSTPASS(apm->GetEchoMetricsStatus(enabled));
TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
TEST_MUSTPASS(enabled != true); TEST_MUSTPASS(enabled != true);
TEST_LOG("Speak into microphone and check metrics for 10 seconds...\n"); TEST_LOG("Speak into microphone and check metrics for 10 seconds...\n");
int speech_tx, speech_rx;
int noise_tx, noise_rx;
#ifdef WEBRTC_VOICE_ENGINE_ECHO
int ERLE, ERL, RERL, A_NLP; int ERLE, ERL, RERL, A_NLP;
#endif
for (int t = 0; t < 5; t++) for (int t = 0; t < 5; t++)
{ {
SLEEP(2000); SLEEP(2000);
TEST_MUSTPASS(apm->GetSpeechMetrics(speech_tx, speech_rx));
TEST_LOG(" Speech: Tx=%5d, Rx=%5d [dBm0]\n", speech_tx, speech_rx);
TEST_MUSTPASS(apm->GetNoiseMetrics(noise_tx, noise_rx));
TEST_LOG(" Noise : Tx=%5d, Rx=%5d [dBm0]\n", noise_tx, noise_rx);
#ifdef WEBRTC_VOICE_ENGINE_ECHO
TEST_MUSTPASS(apm->GetEchoMetrics(ERL, ERLE, RERL, A_NLP)); TEST_MUSTPASS(apm->GetEchoMetrics(ERL, ERLE, RERL, A_NLP));
TEST_LOG(" Echo : ERL=%5d, ERLE=%5d, RERL=%5d, A_NLP=%5d [dB]\n", TEST_LOG(" Echo : ERL=%5d, ERLE=%5d, RERL=%5d, A_NLP=%5d [dB]\n",
ERL, ERLE, RERL, A_NLP); ERL, ERLE, RERL, A_NLP);
#endif
} }
TEST_MUSTPASS(apm->SetMetricsStatus(false)); // disable metrics TEST_MUSTPASS(apm->SetEchoMetricsStatus(false)); // disable echo metrics
#else
TEST_LOG("Skipping echo metrics tests -"
" WEBRTC_VOICE_ENGINE_ECHO not defined \n");
#endif // #ifdef WEBRTC_VOICE_ENGINE_ECHO
#else #else
TEST_LOG("Skipping apm metrics tests - MAC_IPHONE/WEBRTC_ANDROID defined \n"); TEST_LOG("Skipping apm metrics tests - MAC_IPHONE/WEBRTC_ANDROID defined \n");
#endif // #if (!defined(MAC_IPHONE) && !d... #endif // #if (!defined(MAC_IPHONE) && !d...
*/
// VAD/DTX indication // VAD/DTX indication
TEST_LOG("Get voice activity indication \n"); TEST_LOG("Get voice activity indication \n");
if (codec) if (codec)
@ -3256,8 +3248,6 @@ int VoETestManager::DoStandardTest()
#ifdef _TEST_CALL_REPORT_ #ifdef _TEST_CALL_REPORT_
TEST_LOG("\n\n+++ CallReport tests +++\n\n"); TEST_LOG("\n\n+++ CallReport tests +++\n\n");
#if (defined(WEBRTC_VOICE_ENGINE_ECHO) && defined(WEBRTC_VOICE_ENGINE_NR)) #if (defined(WEBRTC_VOICE_ENGINE_ECHO) && defined(WEBRTC_VOICE_ENGINE_NR))
// TODO(xians), enale the tests when APM is ready
/*
TEST(ResetCallReportStatistics);ANL(); TEST(ResetCallReportStatistics);ANL();
TEST_MUSTPASS(!report->ResetCallReportStatistics(-2)); TEST_MUSTPASS(!report->ResetCallReportStatistics(-2));
TEST_MUSTPASS(!report->ResetCallReportStatistics(1)); TEST_MUSTPASS(!report->ResetCallReportStatistics(1));
@ -3265,36 +3255,10 @@ int VoETestManager::DoStandardTest()
TEST_MUSTPASS(report->ResetCallReportStatistics(-1)); TEST_MUSTPASS(report->ResetCallReportStatistics(-1));
bool onOff; bool onOff;
LevelStatistics stats; TEST_MUSTPASS(apm->GetEchoMetricsStatus(onOff));
TEST_MUSTPASS(apm->GetMetricsStatus(onOff));
TEST_MUSTPASS(onOff != false); TEST_MUSTPASS(onOff != false);
// All values should be -100 dBm0 when metrics are disabled TEST_MUSTPASS(apm->SetEchoMetricsStatus(true));
TEST(GetSpeechAndNoiseSummary);ANL();
TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
TEST_MUSTPASS(stats.noise_rx.min != -100);
TEST_MUSTPASS(stats.noise_rx.max != -100);
TEST_MUSTPASS(stats.noise_rx.average != -100);
TEST_MUSTPASS(stats.noise_tx.min != -100);
TEST_MUSTPASS(stats.noise_tx.max != -100);
TEST_MUSTPASS(stats.noise_tx.average != -100);
TEST_MUSTPASS(stats.speech_rx.min != -100);
TEST_MUSTPASS(stats.speech_rx.max != -100);
TEST_MUSTPASS(stats.speech_rx.average != -100);
TEST_MUSTPASS(stats.speech_tx.min != -100);
TEST_MUSTPASS(stats.speech_tx.max != -100);
TEST_MUSTPASS(stats.speech_tx.average != -100);
TEST_MUSTPASS(apm->SetMetricsStatus(true));
SLEEP(3000); SLEEP(3000);
// All values should *not* be -100 dBm0 when metrics are enabled
// (check Rx side only since user might be silent)
TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
TEST_MUSTPASS(stats.noise_rx.min == -100);
TEST_MUSTPASS(stats.noise_rx.max == -100);
TEST_MUSTPASS(stats.noise_rx.average == -100);
TEST_MUSTPASS(stats.speech_rx.min == -100);
TEST_MUSTPASS(stats.speech_rx.max == -100);
TEST_MUSTPASS(stats.speech_rx.average == -100);
EchoStatistics echo; EchoStatistics echo;
TEST(GetEchoMetricSummary);ANL(); TEST(GetEchoMetricSummary);ANL();
// all outputs will be -100 in loopback (skip further tests) // all outputs will be -100 in loopback (skip further tests)
@ -3307,17 +3271,17 @@ int VoETestManager::DoStandardTest()
TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays)); TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
TEST_MUSTPASS(delays.min != -1); TEST_MUSTPASS(delays.min != -1);
TEST_MUSTPASS(delays.max != -1); TEST_MUSTPASS(delays.max != -1);
TEST_MUSTPASS(delays.max != -1); TEST_MUSTPASS(delays.average != -1);
rtp_rtcp->SetRTCPStatus(0, true); rtp_rtcp->SetRTCPStatus(0, true);
SLEEP(5000); // gives time for RTCP SLEEP(5000); // gives time for RTCP
TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays)); TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
TEST_MUSTPASS(delays.min == -1); TEST_MUSTPASS(delays.min == -1);
TEST_MUSTPASS(delays.max == -1); TEST_MUSTPASS(delays.max == -1);
TEST_MUSTPASS(delays.max == -1); TEST_MUSTPASS(delays.average == -1);
rtp_rtcp->SetRTCPStatus(0, false); rtp_rtcp->SetRTCPStatus(0, false);
int nDead; int nDead = 0;
int nAlive; int nAlive = 0;
// -1 will be returned since dead-or-alive is not active // -1 will be returned since dead-or-alive is not active
TEST(GetDeadOrAliveSummary);ANL(); TEST(GetDeadOrAliveSummary);ANL();
TEST_MUSTPASS(report->GetDeadOrAliveSummary(0, nDead, nAlive) != -1); TEST_MUSTPASS(report->GetDeadOrAliveSummary(0, nDead, nAlive) != -1);
@ -3333,7 +3297,6 @@ int VoETestManager::DoStandardTest()
TEST(WriteReportToFile);ANL(); TEST(WriteReportToFile);ANL();
TEST_MUSTPASS(!report->WriteReportToFile(NULL)); TEST_MUSTPASS(!report->WriteReportToFile(NULL));
TEST_MUSTPASS(report->WriteReportToFile("call_report.txt")); TEST_MUSTPASS(report->WriteReportToFile("call_report.txt"));
*/
#else #else
TEST_LOG("Skipping CallReport tests since both EC and NS are required\n"); TEST_LOG("Skipping CallReport tests since both EC and NS are required\n");
#endif #endif