Added a new OnMoreData() interface which will not feed the playout data to APM.

BUG=3147
R=andrew@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/11059005

git-svn-id: http://webrtc.googlecode.com/svn/trunk@5895 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
xians@webrtc.org 2014-04-14 10:50:37 +00:00
parent a956ec2019
commit 5692531f18
9 changed files with 118 additions and 54 deletions

View File

@ -101,13 +101,32 @@ public:
// Method to pass the captured audio data to the specific VoE channel. // Method to pass the captured audio data to the specific VoE channel.
// |voe_channel| is the id of the VoE channel which is the sink to the // |voe_channel| is the id of the VoE channel which is the sink to the
// capture data. // capture data.
// TODO(xians): Make the interface pure virtual after libjingle // TODO(xians): Remove this interface after Libjingle switches to
// has its implementation. // PushCaptureData().
virtual void OnData(int voe_channel, const void* audio_data, virtual void OnData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate, int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_channels,
int number_of_frames) {} int number_of_frames) {}
// Method to push the captured audio data to the specific VoE channel.
// The data will not undergo audio processing.
// |voe_channel| is the id of the VoE channel which is the sink to the
// capture data.
// TODO(xians): Make the interface pure virtual after Libjingle
// has its implementation.
virtual void PushCaptureData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate,
int number_of_channels,
int number_of_frames) {}
// Method to pull mixed render audio data from all active VoE channels.
// The data will not be passed as reference for audio processing internally.
// TODO(xians): Support getting the unmixed render data from specific VoE
// channel.
virtual void PullRenderData(int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_frames,
void* audio_data) {}
protected: protected:
virtual ~AudioTransport() {} virtual ~AudioTransport() {}
}; };

View File

@ -142,10 +142,14 @@ class AudioTransportAPI: public AudioTransport {
return 0; return 0;
} }
virtual void OnData(int voe_channel, const void* audio_data, virtual void PushCaptureData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate, int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_channels,
int number_of_frames) {} int number_of_frames) {}
virtual void PullRenderData(int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_frames,
void* audio_data) {}
private: private:
uint32_t rec_count_; uint32_t rec_count_;
uint32_t play_count_; uint32_t play_count_;

View File

@ -542,11 +542,16 @@ int AudioTransportImpl::OnDataAvailable(const int voe_channels[],
return 0; return 0;
} }
void AudioTransportImpl::OnData(int voe_channel, void AudioTransportImpl::PushCaptureData(int voe_channel,
const void* audio_data, const void* audio_data,
int bits_per_sample, int sample_rate, int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_channels,
int number_of_frames) {} int number_of_frames) {}
void AudioTransportImpl::PullRenderData(int bits_per_sample, int sample_rate,
int number_of_channels,
int number_of_frames,
void* audio_data) {}
FuncTestManager::FuncTestManager() : FuncTestManager::FuncTestManager() :
_processThread(NULL), _processThread(NULL),

View File

@ -131,10 +131,14 @@ public:
bool key_pressed, bool key_pressed,
bool need_audio_processing); bool need_audio_processing);
virtual void OnData(int voe_channel, const void* audio_data, virtual void PushCaptureData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate, int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_channels,
int number_of_frames); int number_of_frames);
virtual void PullRenderData(int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_frames,
void* audio_data);
AudioTransportImpl(AudioDeviceModule* audioDevice); AudioTransportImpl(AudioDeviceModule* audioDevice);
~AudioTransportImpl(); ~AudioTransportImpl();

View File

@ -532,7 +532,7 @@ int OutputMixer::GetMixedAudio(int sample_rate_hz,
} }
int32_t int32_t
OutputMixer::DoOperationsOnCombinedSignal() OutputMixer::DoOperationsOnCombinedSignal(bool feed_data_to_apm)
{ {
if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz) if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz)
{ {
@ -565,10 +565,8 @@ OutputMixer::DoOperationsOnCombinedSignal()
} }
// --- Far-end Voice Quality Enhancement (AudioProcessing Module) // --- Far-end Voice Quality Enhancement (AudioProcessing Module)
// TODO(ajm): Check with VoEBase if |need_audio_processing| is false. if (feed_data_to_apm)
// If so, we don't need to call this method and can avoid the subsequent APMAnalyzeReverseStream();
// resampling. See: https://code.google.com/p/webrtc/issues/detail?id=3147
APMAnalyzeReverseStream();
// --- External media processing // --- External media processing
{ {

View File

@ -60,7 +60,7 @@ public:
int32_t MixActiveChannels(); int32_t MixActiveChannels();
int32_t DoOperationsOnCombinedSignal(); int32_t DoOperationsOnCombinedSignal(bool feed_data_to_apm);
int32_t SetMixabilityStatus(MixerParticipant& participant, int32_t SetMixabilityStatus(MixerParticipant& participant,
bool mixable); bool mixable);

View File

@ -150,39 +150,18 @@ int32_t VoEBaseImpl::NeedMorePlayData(
void* audioSamples, void* audioSamples,
uint32_t& nSamplesOut) uint32_t& nSamplesOut)
{ {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
"VoEBaseImpl::NeedMorePlayData(nSamples=%u, " "VoEBaseImpl::NeedMorePlayData(nSamples=%u, "
"nBytesPerSample=%d, nChannels=%d, samplesPerSec=%u)", "nBytesPerSample=%d, nChannels=%d, samplesPerSec=%u)",
nSamples, nBytesPerSample, nChannels, samplesPerSec); nSamples, nBytesPerSample, nChannels, samplesPerSec);
assert(_shared->output_mixer() != NULL); GetPlayoutData(static_cast<int>(samplesPerSec),
static_cast<int>(nChannels),
static_cast<int>(nSamples), true, audioSamples);
// TODO(andrew): if the device is running in mono, we should tell the mixer nSamplesOut = _audioFrame.samples_per_channel_;
// here so that it will only request mono from AudioCodingModule.
// Perform mixing of all active participants (channel-based mixing)
_shared->output_mixer()->MixActiveChannels();
// Additional operations on the combined signal return 0;
_shared->output_mixer()->DoOperationsOnCombinedSignal();
// Retrieve the final output mix (resampled to match the ADM)
_shared->output_mixer()->GetMixedAudio(samplesPerSec, nChannels,
&_audioFrame);
assert(static_cast<int>(nSamples) == _audioFrame.samples_per_channel_);
assert(samplesPerSec ==
static_cast<uint32_t>(_audioFrame.sample_rate_hz_));
// Deliver audio (PCM) samples to the ADM
memcpy(
(int16_t*) audioSamples,
(const int16_t*) _audioFrame.data_,
sizeof(int16_t) * (_audioFrame.samples_per_channel_
* _audioFrame.num_channels_));
nSamplesOut = _audioFrame.samples_per_channel_;
return 0;
} }
int VoEBaseImpl::OnDataAvailable(const int voe_channels[], int VoEBaseImpl::OnDataAvailable(const int voe_channels[],
@ -219,8 +198,8 @@ int VoEBaseImpl::OnDataAvailable(const int voe_channels[],
// TODO(ajm): In the case where multiple channels are using the same codec // TODO(ajm): In the case where multiple channels are using the same codec
// rate, this path needlessly does extra conversions. We should convert once // rate, this path needlessly does extra conversions. We should convert once
// and share between channels. // and share between channels.
OnData(voe_channels[i], audio_data, 16, sample_rate, PushCaptureData(voe_channels[i], audio_data, 16, sample_rate,
number_of_channels, number_of_frames); number_of_channels, number_of_frames);
} }
// Return 0 to indicate no need to change the volume. // Return 0 to indicate no need to change the volume.
@ -231,6 +210,14 @@ void VoEBaseImpl::OnData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate, int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_channels,
int number_of_frames) { int number_of_frames) {
PushCaptureData(voe_channel, audio_data, bits_per_sample, sample_rate,
number_of_channels, number_of_frames);
}
void VoEBaseImpl::PushCaptureData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate,
int number_of_channels,
int number_of_frames) {
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(voe_channel); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(voe_channel);
voe::Channel* channel_ptr = ch.channel(); voe::Channel* channel_ptr = ch.channel();
if (!channel_ptr) if (!channel_ptr)
@ -246,6 +233,16 @@ void VoEBaseImpl::OnData(int voe_channel, const void* audio_data,
} }
} }
void VoEBaseImpl::PullRenderData(int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_frames,
void* audio_data) {
assert(bits_per_sample == 16);
assert(number_of_frames == static_cast<int>(sample_rate / 100));
GetPlayoutData(sample_rate, number_of_channels, number_of_frames, false,
audio_data);
}
int VoEBaseImpl::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) int VoEBaseImpl::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
{ {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
@ -1188,4 +1185,29 @@ int VoEBaseImpl::ProcessRecordedDataWithAPM(
return 0; return 0;
} }
void VoEBaseImpl::GetPlayoutData(int sample_rate, int number_of_channels,
int number_of_frames, bool feed_data_to_apm,
void* audio_data) {
assert(_shared->output_mixer() != NULL);
// TODO(andrew): if the device is running in mono, we should tell the mixer
// here so that it will only request mono from AudioCodingModule.
// Perform mixing of all active participants (channel-based mixing)
_shared->output_mixer()->MixActiveChannels();
// Additional operations on the combined signal
_shared->output_mixer()->DoOperationsOnCombinedSignal(feed_data_to_apm);
// Retrieve the final output mix (resampled to match the ADM)
_shared->output_mixer()->GetMixedAudio(sample_rate, number_of_channels,
&_audioFrame);
assert(number_of_frames == _audioFrame.samples_per_channel_);
assert(sample_rate == _audioFrame.sample_rate_hz_);
// Deliver audio (PCM) samples to the ADM
memcpy(audio_data, _audioFrame.data_,
sizeof(int16_t) * number_of_frames * number_of_channels);
}
} // namespace webrtc } // namespace webrtc

View File

@ -106,6 +106,14 @@ public:
int bits_per_sample, int sample_rate, int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_frames); int number_of_channels, int number_of_frames);
virtual void PushCaptureData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_frames);
virtual void PullRenderData(int bits_per_sample, int sample_rate,
int number_of_channels, int number_of_frames,
void* audio_data);
// AudioDeviceObserver // AudioDeviceObserver
virtual void OnErrorIsReported(ErrorCode error); virtual void OnErrorIsReported(ErrorCode error);
virtual void OnWarningIsReported(WarningCode warning); virtual void OnWarningIsReported(WarningCode warning);
@ -138,6 +146,10 @@ private:
uint32_t volume, uint32_t volume,
bool key_pressed); bool key_pressed);
void GetPlayoutData(int sample_rate, int number_of_channels,
int number_of_frames, bool feed_data_to_apm,
void* audio_data);
int32_t AddBuildInfo(char* str) const; int32_t AddBuildInfo(char* str) const;
int32_t AddVoEVersion(char* str) const; int32_t AddVoEVersion(char* str) const;

View File

@ -320,7 +320,7 @@ int VoEExternalMediaImpl::ExternalPlayoutGetData(
// Retrieve mixed output at the specified rate // Retrieve mixed output at the specified rate
shared_->output_mixer()->MixActiveChannels(); shared_->output_mixer()->MixActiveChannels();
shared_->output_mixer()->DoOperationsOnCombinedSignal(); shared_->output_mixer()->DoOperationsOnCombinedSignal(true);
shared_->output_mixer()->GetMixedAudio(samplingFreqHz, 1, &audioFrame); shared_->output_mixer()->GetMixedAudio(samplingFreqHz, 1, &audioFrame);
// Deliver audio (PCM) samples to the external sink // Deliver audio (PCM) samples to the external sink