In voice engine, added member audioFrame to classes AudioCodingModuleImpl and VoEBaseImpl,

and touched VoEBaseImpl::NeedMorePlayData and AudioCodingModuleImpl::PlayoutData10Ms(), for
performance reasons in Android platforms.
The two functions used about 6% of VoE originally. After the change, the percentage reduced
to about 0.2%.
Review URL: https://webrtc-codereview.appspot.com/379001

git-svn-id: http://webrtc.googlecode.com/svn/trunk@1589 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
kma@webrtc.org 2012-02-01 18:39:44 +00:00
parent 2b87891901
commit de66b91274
4 changed files with 31 additions and 29 deletions

View File

@ -2002,25 +2002,24 @@ AudioCodingModuleImpl::PlayoutData10Ms(
AudioFrame& audioFrame)
{
bool stereoMode;
AudioFrame audioFrameTmp;
// recOut always returns 10 ms
if (_netEq.RecOut(audioFrameTmp) != 0)
if (_netEq.RecOut(_audioFrame) != 0)
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
"PlayoutData failed, RecOut Failed");
return -1;
}
audioFrame._audioChannel = audioFrameTmp._audioChannel;
audioFrame._vadActivity = audioFrameTmp._vadActivity;
audioFrame._speechType = audioFrameTmp._speechType;
audioFrame._audioChannel = _audioFrame._audioChannel;
audioFrame._vadActivity = _audioFrame._vadActivity;
audioFrame._speechType = _audioFrame._speechType;
stereoMode = (audioFrameTmp._audioChannel > 1);
stereoMode = (_audioFrame._audioChannel > 1);
//For stereo playout:
// Master and Slave samples are interleaved starting with Master
const WebRtc_UWord16 recvFreq = static_cast<WebRtc_UWord16>(audioFrameTmp._frequencyInHz);
const WebRtc_UWord16 recvFreq = static_cast<WebRtc_UWord16>(_audioFrame._frequencyInHz);
bool toneDetected = false;
WebRtc_Word16 lastDetectedTone;
WebRtc_Word16 tone;
@ -2036,8 +2035,8 @@ AudioCodingModuleImpl::PlayoutData10Ms(
{
// resample payloadData
WebRtc_Word16 tmpLen = _outputResampler.Resample10Msec(
audioFrameTmp._payloadData, recvFreq, audioFrame._payloadData, desiredFreqHz,
audioFrameTmp._audioChannel);
_audioFrame._payloadData, recvFreq, audioFrame._payloadData, desiredFreqHz,
_audioFrame._audioChannel);
if(tmpLen < 0)
{
@ -2053,11 +2052,11 @@ AudioCodingModuleImpl::PlayoutData10Ms(
}
else
{
memcpy(audioFrame._payloadData, audioFrameTmp._payloadData,
audioFrameTmp._payloadDataLengthInSamples * audioFrame._audioChannel
memcpy(audioFrame._payloadData, _audioFrame._payloadData,
_audioFrame._payloadDataLengthInSamples * audioFrame._audioChannel
* sizeof(WebRtc_Word16));
// set the payload length
audioFrame._payloadDataLengthInSamples = audioFrameTmp._payloadDataLengthInSamples;
audioFrame._payloadDataLengthInSamples = _audioFrame._payloadDataLengthInSamples;
// set the sampling frequency
audioFrame._frequencyInHz = recvFreq;
}
@ -2091,22 +2090,22 @@ AudioCodingModuleImpl::PlayoutData10Ms(
}
else
{
// Do the detection on the audio that we got from NetEQ (audioFrameTmp).
// Do the detection on the audio that we got from NetEQ (_audioFrame).
if(!stereoMode)
{
_dtmfDetector->Detect(audioFrameTmp._payloadData,
audioFrameTmp._payloadDataLengthInSamples, recvFreq,
_dtmfDetector->Detect(_audioFrame._payloadData,
_audioFrame._payloadDataLengthInSamples, recvFreq,
toneDetected, tone);
}
else
{
WebRtc_Word16 masterChannel[WEBRTC_10MS_PCM_AUDIO];
for(int n = 0; n < audioFrameTmp._payloadDataLengthInSamples; n++)
for(int n = 0; n < _audioFrame._payloadDataLengthInSamples; n++)
{
masterChannel[n] = audioFrameTmp._payloadData[n<<1];
masterChannel[n] = _audioFrame._payloadData[n<<1];
}
_dtmfDetector->Detect(masterChannel,
audioFrameTmp._payloadDataLengthInSamples, recvFreq,
_audioFrame._payloadDataLengthInSamples, recvFreq,
toneDetected, tone);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@ -370,6 +370,8 @@ private:
TimedTrace _trace;
#endif
AudioFrame _audioFrame;
#ifdef ACM_QA_TEST
FILE* _outgoingPL;
FILE* _incomingPL;

View File

@ -259,8 +259,6 @@ WebRtc_Word32 VoEBaseImpl::NeedMorePlayData(
assert(_outputMixerPtr != NULL);
AudioFrame audioFrame;
// Perform mixing of all active participants (channel-based mixing)
_outputMixerPtr->MixActiveChannels();
@ -268,20 +266,20 @@ WebRtc_Word32 VoEBaseImpl::NeedMorePlayData(
_outputMixerPtr->DoOperationsOnCombinedSignal();
// Retrieve the final output mix (resampled to match the ADM)
_outputMixerPtr->GetMixedAudio(samplesPerSec, nChannels, audioFrame);
_outputMixerPtr->GetMixedAudio(samplesPerSec, nChannels, _audioFrame);
assert(nSamples == audioFrame._payloadDataLengthInSamples);
assert(nSamples == _audioFrame._payloadDataLengthInSamples);
assert(samplesPerSec ==
static_cast<WebRtc_UWord32>(audioFrame._frequencyInHz));
static_cast<WebRtc_UWord32>(_audioFrame._frequencyInHz));
// Deliver audio (PCM) samples to the ADM
memcpy(
(WebRtc_Word16*) audioSamples,
(const WebRtc_Word16*) audioFrame._payloadData,
sizeof(WebRtc_Word16) * (audioFrame._payloadDataLengthInSamples
* audioFrame._audioChannel));
(const WebRtc_Word16*) _audioFrame._payloadData,
sizeof(WebRtc_Word16) * (_audioFrame._payloadDataLengthInSamples
* _audioFrame._audioChannel));
nSamplesOut = audioFrame._payloadDataLengthInSamples;
nSamplesOut = _audioFrame._payloadDataLengthInSamples;
return 0;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@ -13,6 +13,7 @@
#include "voe_base.h"
#include "module_common_types.h"
#include "ref_count.h"
#include "shared_data.h"
@ -142,6 +143,8 @@ private:
bool _voiceEngineObserver;
WebRtc_UWord32 _oldVoEMicLevel;
WebRtc_UWord32 _oldMicLevel;
AudioFrame _audioFrame;
};
} // namespace webrtc