diff --git a/src/voice_engine/main/source/transmit_mixer.cc b/src/voice_engine/main/source/transmit_mixer.cc index 294ccb699..25f70d37c 100644 --- a/src/voice_engine/main/source/transmit_mixer.cc +++ b/src/voice_engine/main/source/transmit_mixer.cc @@ -1273,17 +1273,32 @@ WebRtc_Word32 TransmitMixer::APMProcessStream( { WebRtc_UWord16 captureLevel(currentMicLevel); - // If the frequency has changed we need to change APM settings - // Sending side is "master" - if (_audioProcessingModulePtr->sample_rate_hz() - != _audioFrame._frequencyInHz) + // Check if the number of input channels has changed. Retain the number + // of output channels. + if (_audioFrame._audioChannel != + _audioProcessingModulePtr->num_input_channels()) { - if (_audioProcessingModulePtr->set_sample_rate_hz( - _audioFrame._frequencyInHz)) + if (_audioProcessingModulePtr->set_num_channels( + _audioFrame._audioChannel, + _audioProcessingModulePtr->num_output_channels())) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), - "AudioProcessingModule::set_sample_rate_hz(" - "_frequencyInHz=%u) => error", + "AudioProcessing::set_num_channels(%d, %d) => error", + _audioFrame._frequencyInHz, + _audioProcessingModulePtr->num_output_channels()); + } + } + + // If the frequency has changed we need to change APM settings + // Sending side is "master" + if (_audioProcessingModulePtr->sample_rate_hz() != + _audioFrame._frequencyInHz) + { + if (_audioProcessingModulePtr->set_sample_rate_hz( + _audioFrame._frequencyInHz)) + { + WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), + "AudioProcessing::set_sample_rate_hz(%u) => error", _audioFrame._frequencyInHz); } } @@ -1291,37 +1306,34 @@ WebRtc_Word32 TransmitMixer::APMProcessStream( if (_audioProcessingModulePtr->set_stream_delay_ms(totalDelayMS) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), - "AudioProcessingModule::set_stream_delay_ms(" - "totalDelayMS=%u) => error", + "AudioProcessing::set_stream_delay_ms(%u) => error", totalDelayMS); } if (_audioProcessingModulePtr->gain_control()->set_stream_analog_level( - captureLevel) == -1) + captureLevel) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), - "AudioProcessingModule::set_stream_analog_level " - "(captureLevel=%u,) => error", - captureLevel); + "AudioProcessing::set_stream_analog_level(%u) => error", + captureLevel); } if (_audioProcessingModulePtr->echo_cancellation()-> - is_drift_compensation_enabled()) + is_drift_compensation_enabled()) { if (_audioProcessingModulePtr->echo_cancellation()-> - set_stream_drift_samples(clockDrift) == -1) + set_stream_drift_samples(clockDrift) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), - "AudioProcessingModule::set_stream_drift_samples(" - "clockDrift=%u,) => error", - clockDrift); + "AudioProcessing::set_stream_drift_samples(%u) => error", + clockDrift); } } if (_audioProcessingModulePtr->ProcessStream(&_audioFrame) == -1) { WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), - "AudioProcessingModule::ProcessStream() => error"); + "AudioProcessing::ProcessStream() => error"); } - captureLevel - = _audioProcessingModulePtr->gain_control()->stream_analog_level(); + captureLevel = + _audioProcessingModulePtr->gain_control()->stream_analog_level(); // Store new capture level (only updated when analog AGC is enabled) _captureLevel = captureLevel; diff --git a/src/voice_engine/main/source/voe_base_impl.cc b/src/voice_engine/main/source/voe_base_impl.cc index 2b91db906..723c92290 100644 --- a/src/voice_engine/main/source/voe_base_impl.cc +++ b/src/voice_engine/main/source/voe_base_impl.cc @@ -365,7 +365,7 @@ int VoEBaseImpl::Init(AudioDeviceModule* external_adm) } } - // Create and internal ADM if the user has not added and external + // Create an internal ADM if the user has not added an external // ADM implementation as input to Init(). if (external_adm == NULL) { @@ -478,13 +478,20 @@ int VoEBaseImpl::Init(AudioDeviceModule* external_adm) _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning, "Init() failed to set mono/stereo playout mode"); } + + // TODO(andrew): These functions don't tell us whether stereo recording + // is truly available. We simply set the AudioProcessing input to stereo + // here, because we have to wait until receiving the first frame to + // determine the actual number of channels anyway. + // + // These functions may be changed; tracked here: + // http://code.google.com/p/webrtc/issues/detail?id=204 _audioDevicePtr->StereoRecordingIsAvailable(&available); if (_audioDevicePtr->SetStereoRecording(available) != 0) { _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning, "Init() failed to set mono/stereo recording mode"); } - int recordingChannels = available ? 2 : 1; // APM initialization done after sound card since we need // to know if we support stereo recording or not. @@ -525,8 +532,11 @@ int VoEBaseImpl::Init(AudioDeviceModule* external_adm) return -1; } - // Assume mono sending until a send codec is set. - if (_audioProcessingModulePtr->set_num_channels(recordingChannels, 1) != 0) + // Assume mono output until a send codec is set, and stereo input until + // we receive the first captured frame. We set stereo input here to + // avoid triggering a possible error in SetSendCodec when a stereo + // codec is selected. + if (_audioProcessingModulePtr->set_num_channels(2, 1) != 0) { _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceError, "Init() failed to set channels for the primary audio stream"); diff --git a/src/voice_engine/main/source/voe_codec_impl.cc b/src/voice_engine/main/source/voe_codec_impl.cc index b4f7cdce2..1f3df5841 100644 --- a/src/voice_engine/main/source/voe_codec_impl.cc +++ b/src/voice_engine/main/source/voe_codec_impl.cc @@ -176,27 +176,32 @@ int VoECodecImpl::SetSendCodec(int channel, const CodecInst& codec) channelPtr = sc2.GetFirstChannel(iterator); int maxNumChannels = 1; while (channelPtr != NULL) - { + { CodecInst tmpCdc; channelPtr->GetSendCodec(tmpCdc); if (tmpCdc.channels > maxNumChannels) - maxNumChannels = tmpCdc.channels; - + maxNumChannels = tmpCdc.channels; + channelPtr = sc2.GetNextChannel(iterator); } - bool available(false); - _audioDevicePtr->StereoRecordingIsAvailable(&available); - int recordingChannels = available ? 2 : 1; - - if (_audioProcessingModulePtr->set_num_channels(recordingChannels, maxNumChannels) != 0) + // Reuse the currently set number of capture channels. We need to wait + // until receiving a frame to determine the true number. + // + // TODO(andrew): AudioProcessing will return an error if there are more + // output than input channels (it doesn't want to produce fake channels). + // This will happen with a stereo codec and a device which doesn't support + // stereo. AudioCoding should probably do the faking; look into how to + // handle this case properly. + if (_audioProcessingModulePtr->set_num_channels( + _audioProcessingModulePtr->num_input_channels(), + maxNumChannels) != 0) { _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceError, "Init() failed to set APM channels for the send audio stream"); return -1; } - return 0; }