In voice engine, added member audioFrame to classes AudioCodingModuleImpl and VoEBaseImpl,
and touched VoEBaseImpl::NeedMorePlayData and AudioCodingModuleImpl::PlayoutData10Ms(), for performance reasons in Android platforms. The two functions used about 6% of VoE originally. After the change, the percentage reduced to about 0.2%. Review URL: https://webrtc-codereview.appspot.com/379001 git-svn-id: http://webrtc.googlecode.com/svn/trunk@1589 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
parent
2b87891901
commit
de66b91274
@ -2002,25 +2002,24 @@ AudioCodingModuleImpl::PlayoutData10Ms(
|
|||||||
AudioFrame& audioFrame)
|
AudioFrame& audioFrame)
|
||||||
{
|
{
|
||||||
bool stereoMode;
|
bool stereoMode;
|
||||||
AudioFrame audioFrameTmp;
|
|
||||||
|
|
||||||
// recOut always returns 10 ms
|
// recOut always returns 10 ms
|
||||||
if (_netEq.RecOut(audioFrameTmp) != 0)
|
if (_netEq.RecOut(_audioFrame) != 0)
|
||||||
{
|
{
|
||||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
|
||||||
"PlayoutData failed, RecOut Failed");
|
"PlayoutData failed, RecOut Failed");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
audioFrame._audioChannel = audioFrameTmp._audioChannel;
|
audioFrame._audioChannel = _audioFrame._audioChannel;
|
||||||
audioFrame._vadActivity = audioFrameTmp._vadActivity;
|
audioFrame._vadActivity = _audioFrame._vadActivity;
|
||||||
audioFrame._speechType = audioFrameTmp._speechType;
|
audioFrame._speechType = _audioFrame._speechType;
|
||||||
|
|
||||||
stereoMode = (audioFrameTmp._audioChannel > 1);
|
stereoMode = (_audioFrame._audioChannel > 1);
|
||||||
//For stereo playout:
|
//For stereo playout:
|
||||||
// Master and Slave samples are interleaved starting with Master
|
// Master and Slave samples are interleaved starting with Master
|
||||||
|
|
||||||
const WebRtc_UWord16 recvFreq = static_cast<WebRtc_UWord16>(audioFrameTmp._frequencyInHz);
|
const WebRtc_UWord16 recvFreq = static_cast<WebRtc_UWord16>(_audioFrame._frequencyInHz);
|
||||||
bool toneDetected = false;
|
bool toneDetected = false;
|
||||||
WebRtc_Word16 lastDetectedTone;
|
WebRtc_Word16 lastDetectedTone;
|
||||||
WebRtc_Word16 tone;
|
WebRtc_Word16 tone;
|
||||||
@ -2036,8 +2035,8 @@ AudioCodingModuleImpl::PlayoutData10Ms(
|
|||||||
{
|
{
|
||||||
// resample payloadData
|
// resample payloadData
|
||||||
WebRtc_Word16 tmpLen = _outputResampler.Resample10Msec(
|
WebRtc_Word16 tmpLen = _outputResampler.Resample10Msec(
|
||||||
audioFrameTmp._payloadData, recvFreq, audioFrame._payloadData, desiredFreqHz,
|
_audioFrame._payloadData, recvFreq, audioFrame._payloadData, desiredFreqHz,
|
||||||
audioFrameTmp._audioChannel);
|
_audioFrame._audioChannel);
|
||||||
|
|
||||||
if(tmpLen < 0)
|
if(tmpLen < 0)
|
||||||
{
|
{
|
||||||
@ -2053,11 +2052,11 @@ AudioCodingModuleImpl::PlayoutData10Ms(
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
memcpy(audioFrame._payloadData, audioFrameTmp._payloadData,
|
memcpy(audioFrame._payloadData, _audioFrame._payloadData,
|
||||||
audioFrameTmp._payloadDataLengthInSamples * audioFrame._audioChannel
|
_audioFrame._payloadDataLengthInSamples * audioFrame._audioChannel
|
||||||
* sizeof(WebRtc_Word16));
|
* sizeof(WebRtc_Word16));
|
||||||
// set the payload length
|
// set the payload length
|
||||||
audioFrame._payloadDataLengthInSamples = audioFrameTmp._payloadDataLengthInSamples;
|
audioFrame._payloadDataLengthInSamples = _audioFrame._payloadDataLengthInSamples;
|
||||||
// set the sampling frequency
|
// set the sampling frequency
|
||||||
audioFrame._frequencyInHz = recvFreq;
|
audioFrame._frequencyInHz = recvFreq;
|
||||||
}
|
}
|
||||||
@ -2091,22 +2090,22 @@ AudioCodingModuleImpl::PlayoutData10Ms(
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Do the detection on the audio that we got from NetEQ (audioFrameTmp).
|
// Do the detection on the audio that we got from NetEQ (_audioFrame).
|
||||||
if(!stereoMode)
|
if(!stereoMode)
|
||||||
{
|
{
|
||||||
_dtmfDetector->Detect(audioFrameTmp._payloadData,
|
_dtmfDetector->Detect(_audioFrame._payloadData,
|
||||||
audioFrameTmp._payloadDataLengthInSamples, recvFreq,
|
_audioFrame._payloadDataLengthInSamples, recvFreq,
|
||||||
toneDetected, tone);
|
toneDetected, tone);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
WebRtc_Word16 masterChannel[WEBRTC_10MS_PCM_AUDIO];
|
WebRtc_Word16 masterChannel[WEBRTC_10MS_PCM_AUDIO];
|
||||||
for(int n = 0; n < audioFrameTmp._payloadDataLengthInSamples; n++)
|
for(int n = 0; n < _audioFrame._payloadDataLengthInSamples; n++)
|
||||||
{
|
{
|
||||||
masterChannel[n] = audioFrameTmp._payloadData[n<<1];
|
masterChannel[n] = _audioFrame._payloadData[n<<1];
|
||||||
}
|
}
|
||||||
_dtmfDetector->Detect(masterChannel,
|
_dtmfDetector->Detect(masterChannel,
|
||||||
audioFrameTmp._payloadDataLengthInSamples, recvFreq,
|
_audioFrame._payloadDataLengthInSamples, recvFreq,
|
||||||
toneDetected, tone);
|
toneDetected, tone);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@ -370,6 +370,8 @@ private:
|
|||||||
TimedTrace _trace;
|
TimedTrace _trace;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
AudioFrame _audioFrame;
|
||||||
|
|
||||||
#ifdef ACM_QA_TEST
|
#ifdef ACM_QA_TEST
|
||||||
FILE* _outgoingPL;
|
FILE* _outgoingPL;
|
||||||
FILE* _incomingPL;
|
FILE* _incomingPL;
|
||||||
|
@ -259,8 +259,6 @@ WebRtc_Word32 VoEBaseImpl::NeedMorePlayData(
|
|||||||
|
|
||||||
assert(_outputMixerPtr != NULL);
|
assert(_outputMixerPtr != NULL);
|
||||||
|
|
||||||
AudioFrame audioFrame;
|
|
||||||
|
|
||||||
// Perform mixing of all active participants (channel-based mixing)
|
// Perform mixing of all active participants (channel-based mixing)
|
||||||
_outputMixerPtr->MixActiveChannels();
|
_outputMixerPtr->MixActiveChannels();
|
||||||
|
|
||||||
@ -268,20 +266,20 @@ WebRtc_Word32 VoEBaseImpl::NeedMorePlayData(
|
|||||||
_outputMixerPtr->DoOperationsOnCombinedSignal();
|
_outputMixerPtr->DoOperationsOnCombinedSignal();
|
||||||
|
|
||||||
// Retrieve the final output mix (resampled to match the ADM)
|
// Retrieve the final output mix (resampled to match the ADM)
|
||||||
_outputMixerPtr->GetMixedAudio(samplesPerSec, nChannels, audioFrame);
|
_outputMixerPtr->GetMixedAudio(samplesPerSec, nChannels, _audioFrame);
|
||||||
|
|
||||||
assert(nSamples == audioFrame._payloadDataLengthInSamples);
|
assert(nSamples == _audioFrame._payloadDataLengthInSamples);
|
||||||
assert(samplesPerSec ==
|
assert(samplesPerSec ==
|
||||||
static_cast<WebRtc_UWord32>(audioFrame._frequencyInHz));
|
static_cast<WebRtc_UWord32>(_audioFrame._frequencyInHz));
|
||||||
|
|
||||||
// Deliver audio (PCM) samples to the ADM
|
// Deliver audio (PCM) samples to the ADM
|
||||||
memcpy(
|
memcpy(
|
||||||
(WebRtc_Word16*) audioSamples,
|
(WebRtc_Word16*) audioSamples,
|
||||||
(const WebRtc_Word16*) audioFrame._payloadData,
|
(const WebRtc_Word16*) _audioFrame._payloadData,
|
||||||
sizeof(WebRtc_Word16) * (audioFrame._payloadDataLengthInSamples
|
sizeof(WebRtc_Word16) * (_audioFrame._payloadDataLengthInSamples
|
||||||
* audioFrame._audioChannel));
|
* _audioFrame._audioChannel));
|
||||||
|
|
||||||
nSamplesOut = audioFrame._payloadDataLengthInSamples;
|
nSamplesOut = _audioFrame._payloadDataLengthInSamples;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||||
*
|
*
|
||||||
* Use of this source code is governed by a BSD-style license
|
* Use of this source code is governed by a BSD-style license
|
||||||
* that can be found in the LICENSE file in the root of the source
|
* that can be found in the LICENSE file in the root of the source
|
||||||
@ -13,6 +13,7 @@
|
|||||||
|
|
||||||
#include "voe_base.h"
|
#include "voe_base.h"
|
||||||
|
|
||||||
|
#include "module_common_types.h"
|
||||||
#include "ref_count.h"
|
#include "ref_count.h"
|
||||||
#include "shared_data.h"
|
#include "shared_data.h"
|
||||||
|
|
||||||
@ -142,6 +143,8 @@ private:
|
|||||||
bool _voiceEngineObserver;
|
bool _voiceEngineObserver;
|
||||||
WebRtc_UWord32 _oldVoEMicLevel;
|
WebRtc_UWord32 _oldVoEMicLevel;
|
||||||
WebRtc_UWord32 _oldMicLevel;
|
WebRtc_UWord32 _oldMicLevel;
|
||||||
|
AudioFrame _audioFrame;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
Loading…
Reference in New Issue
Block a user