From de66b91274440b1dda0545eb2024fb261824eac8 Mon Sep 17 00:00:00 2001 From: "kma@webrtc.org" Date: Wed, 1 Feb 2012 18:39:44 +0000 Subject: [PATCH] In voice engine, added member audioFrame to classes AudioCodingModuleImpl and VoEBaseImpl, and touched VoEBaseImpl::NeedMorePlayData and AudioCodingModuleImpl::PlayoutData10Ms(), for performance reasons in Android platforms. The two functions used about 6% of VoE originally. After the change, the percentage reduced to about 0.2%. Review URL: https://webrtc-codereview.appspot.com/379001 git-svn-id: http://webrtc.googlecode.com/svn/trunk@1589 4adac7df-926f-26a2-2b94-8c16560cd09d --- .../main/source/audio_coding_module_impl.cc | 35 +++++++++---------- .../main/source/audio_coding_module_impl.h | 4 ++- src/voice_engine/main/source/voe_base_impl.cc | 16 ++++----- src/voice_engine/main/source/voe_base_impl.h | 5 ++- 4 files changed, 31 insertions(+), 29 deletions(-) diff --git a/src/modules/audio_coding/main/source/audio_coding_module_impl.cc b/src/modules/audio_coding/main/source/audio_coding_module_impl.cc index 2abe46b7e..38c973db6 100644 --- a/src/modules/audio_coding/main/source/audio_coding_module_impl.cc +++ b/src/modules/audio_coding/main/source/audio_coding_module_impl.cc @@ -2002,25 +2002,24 @@ AudioCodingModuleImpl::PlayoutData10Ms( AudioFrame& audioFrame) { bool stereoMode; - AudioFrame audioFrameTmp; // recOut always returns 10 ms - if (_netEq.RecOut(audioFrameTmp) != 0) + if (_netEq.RecOut(_audioFrame) != 0) { WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id, "PlayoutData failed, RecOut Failed"); return -1; } - audioFrame._audioChannel = audioFrameTmp._audioChannel; - audioFrame._vadActivity = audioFrameTmp._vadActivity; - audioFrame._speechType = audioFrameTmp._speechType; + audioFrame._audioChannel = _audioFrame._audioChannel; + audioFrame._vadActivity = _audioFrame._vadActivity; + audioFrame._speechType = _audioFrame._speechType; - stereoMode = (audioFrameTmp._audioChannel > 1); + stereoMode = (_audioFrame._audioChannel > 1); //For stereo playout: // Master and Slave samples are interleaved starting with Master - const WebRtc_UWord16 recvFreq = static_cast(audioFrameTmp._frequencyInHz); + const WebRtc_UWord16 recvFreq = static_cast(_audioFrame._frequencyInHz); bool toneDetected = false; WebRtc_Word16 lastDetectedTone; WebRtc_Word16 tone; @@ -2036,8 +2035,8 @@ AudioCodingModuleImpl::PlayoutData10Ms( { // resample payloadData WebRtc_Word16 tmpLen = _outputResampler.Resample10Msec( - audioFrameTmp._payloadData, recvFreq, audioFrame._payloadData, desiredFreqHz, - audioFrameTmp._audioChannel); + _audioFrame._payloadData, recvFreq, audioFrame._payloadData, desiredFreqHz, + _audioFrame._audioChannel); if(tmpLen < 0) { @@ -2053,11 +2052,11 @@ AudioCodingModuleImpl::PlayoutData10Ms( } else { - memcpy(audioFrame._payloadData, audioFrameTmp._payloadData, - audioFrameTmp._payloadDataLengthInSamples * audioFrame._audioChannel + memcpy(audioFrame._payloadData, _audioFrame._payloadData, + _audioFrame._payloadDataLengthInSamples * audioFrame._audioChannel * sizeof(WebRtc_Word16)); // set the payload length - audioFrame._payloadDataLengthInSamples = audioFrameTmp._payloadDataLengthInSamples; + audioFrame._payloadDataLengthInSamples = _audioFrame._payloadDataLengthInSamples; // set the sampling frequency audioFrame._frequencyInHz = recvFreq; } @@ -2091,22 +2090,22 @@ AudioCodingModuleImpl::PlayoutData10Ms( } else { - // Do the detection on the audio that we got from NetEQ (audioFrameTmp). + // Do the detection on the audio that we got from NetEQ (_audioFrame). if(!stereoMode) { - _dtmfDetector->Detect(audioFrameTmp._payloadData, - audioFrameTmp._payloadDataLengthInSamples, recvFreq, + _dtmfDetector->Detect(_audioFrame._payloadData, + _audioFrame._payloadDataLengthInSamples, recvFreq, toneDetected, tone); } else { WebRtc_Word16 masterChannel[WEBRTC_10MS_PCM_AUDIO]; - for(int n = 0; n < audioFrameTmp._payloadDataLengthInSamples; n++) + for(int n = 0; n < _audioFrame._payloadDataLengthInSamples; n++) { - masterChannel[n] = audioFrameTmp._payloadData[n<<1]; + masterChannel[n] = _audioFrame._payloadData[n<<1]; } _dtmfDetector->Detect(masterChannel, - audioFrameTmp._payloadDataLengthInSamples, recvFreq, + _audioFrame._payloadDataLengthInSamples, recvFreq, toneDetected, tone); } } diff --git a/src/modules/audio_coding/main/source/audio_coding_module_impl.h b/src/modules/audio_coding/main/source/audio_coding_module_impl.h index d81d7fc82..365472865 100644 --- a/src/modules/audio_coding/main/source/audio_coding_module_impl.h +++ b/src/modules/audio_coding/main/source/audio_coding_module_impl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -370,6 +370,8 @@ private: TimedTrace _trace; #endif + AudioFrame _audioFrame; + #ifdef ACM_QA_TEST FILE* _outgoingPL; FILE* _incomingPL; diff --git a/src/voice_engine/main/source/voe_base_impl.cc b/src/voice_engine/main/source/voe_base_impl.cc index 6fe6ba9af..e1cd08377 100644 --- a/src/voice_engine/main/source/voe_base_impl.cc +++ b/src/voice_engine/main/source/voe_base_impl.cc @@ -259,8 +259,6 @@ WebRtc_Word32 VoEBaseImpl::NeedMorePlayData( assert(_outputMixerPtr != NULL); - AudioFrame audioFrame; - // Perform mixing of all active participants (channel-based mixing) _outputMixerPtr->MixActiveChannels(); @@ -268,20 +266,20 @@ WebRtc_Word32 VoEBaseImpl::NeedMorePlayData( _outputMixerPtr->DoOperationsOnCombinedSignal(); // Retrieve the final output mix (resampled to match the ADM) - _outputMixerPtr->GetMixedAudio(samplesPerSec, nChannels, audioFrame); + _outputMixerPtr->GetMixedAudio(samplesPerSec, nChannels, _audioFrame); - assert(nSamples == audioFrame._payloadDataLengthInSamples); + assert(nSamples == _audioFrame._payloadDataLengthInSamples); assert(samplesPerSec == - static_cast(audioFrame._frequencyInHz)); + static_cast(_audioFrame._frequencyInHz)); // Deliver audio (PCM) samples to the ADM memcpy( (WebRtc_Word16*) audioSamples, - (const WebRtc_Word16*) audioFrame._payloadData, - sizeof(WebRtc_Word16) * (audioFrame._payloadDataLengthInSamples - * audioFrame._audioChannel)); + (const WebRtc_Word16*) _audioFrame._payloadData, + sizeof(WebRtc_Word16) * (_audioFrame._payloadDataLengthInSamples + * _audioFrame._audioChannel)); - nSamplesOut = audioFrame._payloadDataLengthInSamples; + nSamplesOut = _audioFrame._payloadDataLengthInSamples; return 0; } diff --git a/src/voice_engine/main/source/voe_base_impl.h b/src/voice_engine/main/source/voe_base_impl.h index 922187db2..6d470a0ff 100644 --- a/src/voice_engine/main/source/voe_base_impl.h +++ b/src/voice_engine/main/source/voe_base_impl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -13,6 +13,7 @@ #include "voe_base.h" +#include "module_common_types.h" #include "ref_count.h" #include "shared_data.h" @@ -142,6 +143,8 @@ private: bool _voiceEngineObserver; WebRtc_UWord32 _oldVoEMicLevel; WebRtc_UWord32 _oldMicLevel; + AudioFrame _audioFrame; + }; } // namespace webrtc