diff --git a/src/modules/audio_device/main/source/Android.mk b/src/modules/audio_device/main/source/Android.mk index 42ae10d00..762ef4ee5 100644 --- a/src/modules/audio_device/main/source/Android.mk +++ b/src/modules/audio_device/main/source/Android.mk @@ -24,8 +24,8 @@ LOCAL_SRC_FILES := \ audio_device_impl.cc \ Android/audio_device_android_opensles.cc \ Android/audio_device_utility_android.cc \ - Dummy/audio_device_utility_dummy.cc \ - Dummy/audio_device_dummy.cc + dummy/audio_device_utility_dummy.cc \ + dummy/audio_device_dummy.cc # Flags passed to both C and C++ files. LOCAL_CFLAGS := \ @@ -35,8 +35,8 @@ LOCAL_CFLAGS := \ LOCAL_C_INCLUDES := \ $(LOCAL_PATH) \ $(LOCAL_PATH)/Android \ - $(LOCAL_PATH)/Dummy \ - $(LOCAL_PATH)/Linux \ + $(LOCAL_PATH)/dummy \ + $(LOCAL_PATH)/linux \ $(LOCAL_PATH)/../interface \ $(LOCAL_PATH)/../../../.. \ $(LOCAL_PATH)/../../../interface \ diff --git a/src/modules/audio_device/main/source/audio_device.gyp b/src/modules/audio_device/main/source/audio_device.gyp index a97e963c6..a5b0f8c4c 100644 --- a/src/modules/audio_device/main/source/audio_device.gyp +++ b/src/modules/audio_device/main/source/audio_device.gyp @@ -23,7 +23,7 @@ '.', '../../../interface', '../interface', - 'Dummy', # Dummy audio device + 'dummy', # dummy audio device ], 'direct_dependent_settings': { 'include_dirs': [ @@ -45,84 +45,45 @@ 'audio_device_impl.cc', 'audio_device_impl.h', 'audio_device_config.h', - 'Dummy/audio_device_dummy.cc', - 'Dummy/audio_device_dummy.h', - 'Dummy/audio_device_utility_dummy.cc', - 'Dummy/audio_device_utility_dummy.h', - 'Linux/alsasymboltable.cc', - 'Linux/alsasymboltable.h', - 'Linux/audio_device_linux_alsa.cc', - 'Linux/audio_device_linux_alsa.h', - 'Linux/audio_device_utility_linux.cc', - 'Linux/audio_device_utility_linux.h', - 'Linux/audio_mixer_manager_linux_alsa.cc', - 'Linux/audio_mixer_manager_linux_alsa.h', - 'Linux/latebindingsymboltable.cc', - 'Linux/latebindingsymboltable.h', - 'Mac/audio_device_mac.cc', - 'Mac/audio_device_mac.h', - 'Mac/audio_device_utility_mac.cc', - 'Mac/audio_device_utility_mac.h', - 'Mac/audio_mixer_manager_mac.cc', - 'Mac/audio_mixer_manager_mac.h', - 'Mac/portaudio/pa_memorybarrier.h', - 'Mac/portaudio/pa_ringbuffer.c', - 'Mac/portaudio/pa_ringbuffer.h', - 'Windows/audio_device_utility_windows.cc', - 'Windows/audio_device_utility_windows.h', - 'Windows/audio_device_windows_core.cc', - 'Windows/audio_device_windows_core.h', - 'Windows/audio_device_windows_wave.cc', - 'Windows/audio_device_windows_wave.h', - 'Windows/audio_mixer_manager.cc', - 'Windows/audio_mixer_manager.h', + 'dummy/audio_device_dummy.cc', + 'dummy/audio_device_dummy.h', + 'dummy/audio_device_utility_dummy.cc', + 'dummy/audio_device_utility_dummy.h', + 'linux/alsasymboltable_linux.cc', + 'linux/alsasymboltable_linux.h', + 'linux/audio_device_alsa_linux.cc', + 'linux/audio_device_alsa_linux.h', + 'linux/audio_device_utility_linux.cc', + 'linux/audio_device_utility_linux.h', + 'linux/audio_mixer_manager_alsa_linux.cc', + 'linux/audio_mixer_manager_alsa_linux.h', + 'linux/latebindingsymboltable_linux.cc', + 'linux/latebindingsymboltable_linux.h', + 'mac/audio_device_mac.cc', + 'mac/audio_device_mac.h', + 'mac/audio_device_utility_mac.cc', + 'mac/audio_device_utility_mac.h', + 'mac/audio_mixer_manager_mac.cc', + 'mac/audio_mixer_manager_mac.h', + 'mac/portaudio/pa_memorybarrier.h', + 'mac/portaudio/pa_ringbuffer.c', + 'mac/portaudio/pa_ringbuffer.h', + 'win/audio_device_utility_win.cc', + 'win/audio_device_utility_win.h', + 'win/audio_device_core_win.cc', + 'win/audio_device_core_win.h', + 'win/audio_device_wave_win.cc', + 'win/audio_device_wave_win.h', + 'win/audio_mixer_manager_win.cc', + 'win/audio_mixer_manager_win.h', ], 'conditions': [ - ['OS!="linux"', { - 'sources!': [ - 'Linux/alsasymboltable.cc', - 'Linux/alsasymboltable.h', - 'Linux/audio_device_linux_alsa.cc', - 'Linux/audio_device_linux_alsa.h', - 'Linux/audio_mixer_manager_linux_alsa.cc', - 'Linux/audio_mixer_manager_linux_alsa.h', - 'Linux/latebindingsymboltable.cc', - 'Linux/latebindingsymboltable.h', - 'Linux/audio_device_utility_linux.cc', - 'Linux/audio_device_utility_linux.h', - ], - }], - ['OS!="mac"', { - 'sources!': [ - 'Mac/audio_device_mac.cc', - 'Mac/audio_device_mac.h', - 'Mac/audio_device_utility_mac.cc', - 'Mac/audio_device_utility_mac.h', - 'Mac/audio_mixer_manager_mac.cc', - 'Mac/audio_mixer_manager_mac.h', - 'Mac/portaudio/pa_memorybarrier.h', - 'Mac/portaudio/pa_ringbuffer.c', - 'Mac/portaudio/pa_ringbuffer.h', - ], - }], - ['OS!="win"', { - 'sources!': [ - 'Windows/audio_device_utility_windows.cc', - 'Windows/audio_device_utility_windows.h', - 'Windows/audio_device_windows_core.cc', - 'Windows/audio_device_windows_core.h', - 'Windows/audio_device_windows_wave.cc', - 'Windows/audio_device_windows_wave.h', - 'Windows/audio_mixer_manager.cc', - 'Windows/audio_mixer_manager.h', - ], - }], ['OS=="linux"', { 'defines': [ 'LINUX_ALSA', ], 'include_dirs': [ - 'Linux', + 'linux', ], 'link_settings': { 'libraries': [ @@ -136,12 +97,12 @@ 'LINUX_PULSE', ], 'sources': [ - 'Linux/audio_device_linux_pulse.cc', - 'Linux/audio_device_linux_pulse.h', - 'Linux/audio_mixer_manager_linux_pulse.cc', - 'Linux/audio_mixer_manager_linux_pulse.h', - 'Linux/pulseaudiosymboltable.cc', - 'Linux/pulseaudiosymboltable.h', + 'linux/audio_device_pulse_linux.cc', + 'linux/audio_device_pulse_linux.h', + 'linux/audio_mixer_manager_pulse_linux.cc', + 'linux/audio_mixer_manager_pulse_linux.h', + 'linux/pulseaudiosymboltable_linux.cc', + 'linux/pulseaudiosymboltable_linux.h', ], 'link_settings': { 'libraries': [ @@ -153,7 +114,7 @@ }], ['OS=="mac"', { 'include_dirs': [ - 'Mac', + 'mac', ], 'link_settings': { 'libraries': [ @@ -164,7 +125,7 @@ }], ['OS=="win"', { 'include_dirs': [ - 'Windows', + 'win', '../../../../../..', ], }], diff --git a/src/modules/audio_device/main/source/audio_device_buffer.h b/src/modules/audio_device/main/source/audio_device_buffer.h index c7d7e5d38..b61ba1e3a 100644 --- a/src/modules/audio_device/main/source/audio_device_buffer.h +++ b/src/modules/audio_device/main/source/audio_device_buffer.h @@ -12,7 +12,7 @@ #define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_BUFFER_H #include "typedefs.h" -#include "resampler.h" +#include "../../../../common_audio/resampler/main/interface/resampler.h" #include "file_wrapper.h" #include "audio_device.h" #include "list_wrapper.h" diff --git a/src/modules/audio_device/main/source/audio_device_impl.cc b/src/modules/audio_device/main/source/audio_device_impl.cc index a76e7fe8f..932f0c28f 100644 --- a/src/modules/audio_device/main/source/audio_device_impl.cc +++ b/src/modules/audio_device/main/source/audio_device_impl.cc @@ -15,10 +15,10 @@ #include "trace.h" #if defined(_WIN32) - #include "audio_device_utility_windows.h" - #include "audio_device_windows_wave.h" + #include "audio_device_utility_win.h" + #include "audio_device_wave_win.h" #if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD) - #include "audio_device_windows_core.h" + #include "audio_device_core_win.h" #endif #elif defined(WEBRTC_ANDROID_OPENSLES) #include @@ -31,10 +31,10 @@ #elif defined(WEBRTC_LINUX) #include "audio_device_utility_linux.h" #if defined(LINUX_ALSA) - #include "audio_device_linux_alsa.h" + #include "audio_device_alsa_linux.h" #endif #if defined(LINUX_PULSE) - #include "audio_device_linux_pulse.h" + #include "audio_device_pulse_linux.h" #endif #elif defined(MAC_IPHONE) #include "audio_device_utility_iphone.h" diff --git a/src/modules/audio_device/main/source/dummy/audio_device_dummy.cc b/src/modules/audio_device/main/source/dummy/audio_device_dummy.cc new file mode 100644 index 000000000..92598ce40 --- /dev/null +++ b/src/modules/audio_device/main/source/dummy/audio_device_dummy.cc @@ -0,0 +1,1395 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_device_dummy.h" + +#include + +#include "trace.h" +#include "thread_wrapper.h" +#include "event_wrapper.h" + +// Enable to record playout data +//#define RECORD_PLAYOUT 1 + +namespace webrtc { + +const WebRtc_UWord32 REC_TIMER_PERIOD_MS = 10; +const WebRtc_UWord32 PLAY_TIMER_PERIOD_MS = 10; + +// ============================================================================ +// Construction & Destruction +// ============================================================================ + +// ---------------------------------------------------------------------------- +// AudioDeviceDummy() - ctor +// ---------------------------------------------------------------------------- + +AudioDeviceDummy::AudioDeviceDummy(const WebRtc_Word32 id) : + _ptrAudioBuffer(NULL), + _critSect(*CriticalSectionWrapper::CreateCriticalSection()), + _id(id), + _timeEventRec(*EventWrapper::Create()), + _timeEventPlay(*EventWrapper::Create()), + _recStartEvent(*EventWrapper::Create()), + _playStartEvent(*EventWrapper::Create()), + _ptrThreadRec(NULL), + _ptrThreadPlay(NULL), + _recThreadID(0), + _playThreadID(0), + _initialized(false), + _recording(false), + _playing(false), + _recIsInitialized(false), + _playIsInitialized(false), + _speakerIsInitialized(false), + _microphoneIsInitialized(false), + _playDataFile(NULL) +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__); + + memset(_recBuffer, 0, sizeof(_recBuffer)); + WebRtc_Word16* tmp = (WebRtc_Word16*)_recBuffer; + + // Saw tooth -16000 to 16000, 100 Hz @ fs = 16 kHz +// for(int i=0; i<160; ++i) +// { +// tmp[i] = i*200-16000; +// } + + // Rough sinus 2 kHz @ fs = 16 kHz + for(int i=0; i<20; ++i) + { + tmp[i*8] = 0; + tmp[i*8+1] = -5000; + tmp[i*8+2] = -16000; + tmp[i*8+3] = -5000; + tmp[i*8+4] = 0; + tmp[i*8+5] = 5000; + tmp[i*8+6] = 16000; + tmp[i*8+7] = 5000; + } + +#ifdef RECORD_PLAYOUT + _playDataFile = fopen("webrtc_VoiceEngine_playout.pcm", "wb"); + if (!_playDataFile) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Could not open file for writing playout data"); + } +#endif +} + +// ---------------------------------------------------------------------------- +// AudioDeviceDummy() - dtor +// ---------------------------------------------------------------------------- + +AudioDeviceDummy::~AudioDeviceDummy() +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", __FUNCTION__); + + Terminate(); + + _ptrAudioBuffer = NULL; + + delete &_recStartEvent; + delete &_playStartEvent; + delete &_timeEventRec; + delete &_timeEventPlay; + delete &_critSect; + + if (_playDataFile) + { + fclose(_playDataFile); + } +} + +// ============================================================================ +// API +// ============================================================================ + +// ---------------------------------------------------------------------------- +// AttachAudioBuffer +// ---------------------------------------------------------------------------- + +void AudioDeviceDummy::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + _ptrAudioBuffer = audioBuffer; + + // Inform the AudioBuffer about default settings for this implementation. + _ptrAudioBuffer->SetRecordingSampleRate(16000); + _ptrAudioBuffer->SetPlayoutSampleRate(16000); + _ptrAudioBuffer->SetRecordingChannels(1); + _ptrAudioBuffer->SetPlayoutChannels(1); +} + +// ---------------------------------------------------------------------------- +// ActiveAudioLayer +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + audioLayer = AudioDeviceModule::kDummyAudio; + return 0; +} + +// ---------------------------------------------------------------------------- +// Init +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::Init() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_initialized) + { + return 0; + } + + const bool periodic(true); + unsigned int threadID(0); + char threadName[64] = {0}; + + // RECORDING + strncpy(threadName, "webrtc_audio_module_rec_thread", 63); + _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this, kRealtimePriority, threadName); + if (_ptrThreadRec == NULL) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, " failed to create the rec audio thread"); + return -1; + } + + if (!_ptrThreadRec->Start(threadID)) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, " failed to start the rec audio thread"); + delete _ptrThreadRec; + _ptrThreadRec = NULL; + return -1; + } + _recThreadID = threadID; + + if (!_timeEventRec.StartTimer(periodic, REC_TIMER_PERIOD_MS)) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, " failed to start the rec timer event"); + if (_ptrThreadRec->Stop()) + { + delete _ptrThreadRec; + _ptrThreadRec = NULL; + } + else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " unable to stop the activated rec thread"); + } + return -1; + } + + // PLAYOUT + strncpy(threadName, "webrtc_audio_module_play_thread", 63); + _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc, this, kRealtimePriority, threadName); + if (_ptrThreadPlay == NULL) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, " failed to create the play audio thread"); + return -1; + } + + threadID = 0; + if (!_ptrThreadPlay->Start(threadID)) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, " failed to start the play audio thread"); + delete _ptrThreadPlay; + _ptrThreadPlay = NULL; + return -1; + } + _playThreadID = threadID; + + if (!_timeEventPlay.StartTimer(periodic, PLAY_TIMER_PERIOD_MS)) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, " failed to start the play timer event"); + if (_ptrThreadPlay->Stop()) + { + delete _ptrThreadPlay; + _ptrThreadPlay = NULL; + } + else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " unable to stop the activated play thread"); + } + return -1; + } + + _initialized = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// Terminate +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::Terminate() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!_initialized) + { + return 0; + } + + // RECORDING + if (_ptrThreadRec) + { + ThreadWrapper* tmpThread = _ptrThreadRec; + _ptrThreadRec = NULL; + _critSect.Leave(); + + tmpThread->SetNotAlive(); + _timeEventRec.Set(); + + if (tmpThread->Stop()) + { + delete tmpThread; + } + else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " failed to close down the rec audio thread"); + } + + _critSect.Enter(); + } + + _timeEventRec.StopTimer(); + + // PLAYOUT + if (_ptrThreadPlay) + { + ThreadWrapper* tmpThread = _ptrThreadPlay; + _ptrThreadPlay = NULL; + _critSect.Leave(); + + tmpThread->SetNotAlive(); + _timeEventPlay.Set(); + + if (tmpThread->Stop()) + { + delete tmpThread; + } + else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " failed to close down the play audio thread"); + } + + _critSect.Enter(); + } + + _timeEventPlay.StopTimer(); + + _initialized = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// Initialized +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::Initialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + return (_initialized); +} + +// ---------------------------------------------------------------------------- +// SpeakerIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SpeakerIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + available = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// InitSpeaker +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::InitSpeaker() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_playing) + { + return -1; + } + + _speakerIsInitialized = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// MicrophoneIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MicrophoneIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + available = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// InitMicrophone +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::InitMicrophone() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_recording) + { + return -1; + } + + _microphoneIsInitialized = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SpeakerIsInitialized +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::SpeakerIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return (_speakerIsInitialized); +} + +// ---------------------------------------------------------------------------- +// MicrophoneIsInitialized +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::MicrophoneIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return (_microphoneIsInitialized); +} + +// ---------------------------------------------------------------------------- +// SpeakerVolumeIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SpeakerVolumeIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + available = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetSpeakerVolume +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetSpeakerVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetSpeakerVolume(volume=%u)", volume); + + return -1; +} + +// ---------------------------------------------------------------------------- +// SpeakerVolume +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SpeakerVolume(WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// SetWaveOutVolume +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetWaveOutVolume(WebRtc_UWord16 volumeLeft, WebRtc_UWord16 volumeRight) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)", + volumeLeft, volumeRight); + + return -1; +} + +// ---------------------------------------------------------------------------- +// WaveOutVolume +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::WaveOutVolume(WebRtc_UWord16& volumeLeft, WebRtc_UWord16& volumeRight) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// MaxSpeakerVolume +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// MinSpeakerVolume +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MinSpeakerVolume(WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// SpeakerVolumeStepSize +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// SpeakerMuteIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SpeakerMuteIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + available = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetSpeakerMute +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetSpeakerMute(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetSpeakerMute(enable=%u)", enable); + + return -1; +} + +// ---------------------------------------------------------------------------- +// SpeakerMute +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SpeakerMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// MicrophoneMuteIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MicrophoneMuteIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + available = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetMicrophoneMute +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetMicrophoneMute(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetMicrophoneMute(enable=%u)", enable); + + return -1; +} + +// ---------------------------------------------------------------------------- +// MicrophoneMute +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MicrophoneMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// MicrophoneBoostIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MicrophoneBoostIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + available = false; + return 0; +} + +// ---------------------------------------------------------------------------- +// SetMicrophoneBoost +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetMicrophoneBoost(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetMicrophoneBoost(enable=%u)", enable); + + return -1; +} + +// ---------------------------------------------------------------------------- +// MicrophoneBoost +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MicrophoneBoost(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// StereoRecordingIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::StereoRecordingIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + available = false; + return 0; +} + +// ---------------------------------------------------------------------------- +// SetStereoRecording +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetStereoRecording(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetStereoRecording(enable=%u)", enable); + + CriticalSectionScoped lock(_critSect); + + if (enable) + { + return -1; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// StereoRecording +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::StereoRecording(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + enabled = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// StereoPlayoutIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::StereoPlayoutIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + available = false; + return 0; +} + +// ---------------------------------------------------------------------------- +// SetStereoPlayout +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetStereoPlayout(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetStereoPlayout(enable=%u)", enable); + + CriticalSectionScoped lock(_critSect); + + if (enable) + { + return -1; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// StereoPlayout +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::StereoPlayout(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + enabled = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetAGC +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetAGC(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetAGC(enable=%d)", enable); + + return -1; +} + +// ---------------------------------------------------------------------------- +// AGC +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::AGC() const +{ + // WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "%s", __FUNCTION__); + return false; +} + +// ---------------------------------------------------------------------------- +// MicrophoneVolumeIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MicrophoneVolumeIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + available = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetMicrophoneVolume +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetMicrophoneVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "AudioDeviceDummy::SetMicrophoneVolume(volume=%u)", volume); + + CriticalSectionScoped lock(_critSect); + + return -1; +} + +// ---------------------------------------------------------------------------- +// MicrophoneVolume +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MicrophoneVolume(WebRtc_UWord32& volume) const +{ + // WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + return -1; +} + +// ---------------------------------------------------------------------------- +// MaxMicrophoneVolume +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// MinMicrophoneVolume +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MinMicrophoneVolume(WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// MicrophoneVolumeStepSize +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return -1; +} + +// ---------------------------------------------------------------------------- +// PlayoutDevices +// ---------------------------------------------------------------------------- + +WebRtc_Word16 AudioDeviceDummy::PlayoutDevices() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + return 1; +} + +// ---------------------------------------------------------------------------- +// SetPlayoutDevice I (II) +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetPlayoutDevice(WebRtc_UWord16 index) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetPlayoutDevice(index=%u)", index); + + if (_playIsInitialized) + { + return -1; + } + + if (index != 0) + { + return -1; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetPlayoutDevice II (II) +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device) +{ + return -1; +} + +// ---------------------------------------------------------------------------- +// PlayoutDeviceName +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::PlayoutDeviceName(WebRtc_UWord16 index, WebRtc_Word8 name[kAdmMaxDeviceNameSize], WebRtc_Word8 guid[kAdmMaxGuidSize]) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::PlayoutDeviceName(index=%u)", index); + + if (index != 0) + { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) + { + memset(guid, 0, kAdmMaxGuidSize); + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// RecordingDeviceName +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::RecordingDeviceName(WebRtc_UWord16 index, WebRtc_Word8 name[kAdmMaxDeviceNameSize], WebRtc_Word8 guid[kAdmMaxGuidSize]) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::RecordingDeviceName(index=%u)", index); + + if (index != 0) + { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) + { + memset(guid, 0, kAdmMaxGuidSize); + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// RecordingDevices +// ---------------------------------------------------------------------------- + +WebRtc_Word16 AudioDeviceDummy::RecordingDevices() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + return 1; +} + +// ---------------------------------------------------------------------------- +// SetRecordingDevice I (II) +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetRecordingDevice(WebRtc_UWord16 index) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetRecordingDevice(index=%u)", index); + + if (_recIsInitialized) + { + return -1; + } + + if (index != 0 ) + { + return -1; + } + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetRecordingDevice II (II) +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device) +{ + return -1; +} + +// ---------------------------------------------------------------------------- +// PlayoutIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::PlayoutIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + available = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// RecordingIsAvailable +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::RecordingIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + available = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// InitPlayout +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::InitPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_playing) + { + return -1; + } + + if (_playIsInitialized) + { + return 0; + } + + // Initialize the speaker (devices might have been added or removed) + if (InitSpeaker() == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " InitSpeaker() failed"); + } + + _playIsInitialized = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// InitRecording +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::InitRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_recording) + { + return -1; + } + + if (_recIsInitialized) + { + return 0; + } + + // Initialize the microphone (devices might have been added or removed) + if (InitMicrophone() == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " InitMicrophone() failed"); + } + + _recIsInitialized = true; + + return 0; + +} + +// ---------------------------------------------------------------------------- +// StartRecording +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::StartRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!_recIsInitialized) + { + return -1; + } + + if (_recording) + { + return 0; + } + + _recording = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// StopRecording +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::StopRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!_recIsInitialized) + { + return 0; + } + + _recIsInitialized = false; + _recording = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// RecordingIsInitialized +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::RecordingIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + return (_recIsInitialized); +} + +// ---------------------------------------------------------------------------- +// Recording +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::Recording() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + return (_recording); +} + +// ---------------------------------------------------------------------------- +// PlayoutIsInitialized +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::PlayoutIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return (_playIsInitialized); +} + +// ---------------------------------------------------------------------------- +// StartPlayout +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::StartPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!_playIsInitialized) + { + return -1; + } + + if (_playing) + { + return 0; + } + + _playing = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// StopPlayout +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::StopPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + if (!_playIsInitialized) + { + return 0; + } + + _playIsInitialized = false; + _playing = false; + + return 0; +} + +// ---------------------------------------------------------------------------- +// PlayoutDelay +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::PlayoutDelay(WebRtc_UWord16& delayMS) const +{ + CriticalSectionScoped lock(_critSect); + delayMS = 0; + return 0; +} + +// ---------------------------------------------------------------------------- +// RecordingDelay +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::RecordingDelay(WebRtc_UWord16& delayMS) const +{ + CriticalSectionScoped lock(_critSect); + delayMS = 0; + return 0; +} + +// ---------------------------------------------------------------------------- +// Playing +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::Playing() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + return (_playing); +} +// ---------------------------------------------------------------------------- +// SetPlayoutBuffer +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::SetPlayoutBuffer(const AudioDeviceModule::BufferType type, WebRtc_UWord16 sizeMS) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "AudioDeviceDummy::SetPlayoutBuffer(type=%u, sizeMS=%u)", type, sizeMS); + + CriticalSectionScoped lock(_critSect); + + // Just ignore + + return 0; +} + +// ---------------------------------------------------------------------------- +// PlayoutBuffer +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::PlayoutBuffer(AudioDeviceModule::BufferType& type, WebRtc_UWord16& sizeMS) const +{ + CriticalSectionScoped lock(_critSect); + + type = AudioDeviceModule::kAdaptiveBufferSize; + sizeMS = 0; + + return 0; +} + +// ---------------------------------------------------------------------------- +// CPULoad +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceDummy::CPULoad(WebRtc_UWord16& load) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + load = 0; + + return 0; +} + +// ---------------------------------------------------------------------------- +// PlayoutWarning +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::PlayoutWarning() const +{ + return false; +} + +// ---------------------------------------------------------------------------- +// PlayoutError +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::PlayoutError() const +{ + return false; +} + +// ---------------------------------------------------------------------------- +// RecordingWarning +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::RecordingWarning() const +{ + return false; +} + +// ---------------------------------------------------------------------------- +// RecordingError +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::RecordingError() const +{ + return false; +} + +// ---------------------------------------------------------------------------- +// ClearPlayoutWarning +// ---------------------------------------------------------------------------- + +void AudioDeviceDummy::ClearPlayoutWarning() +{ +} + +// ---------------------------------------------------------------------------- +// ClearPlayoutError +// ---------------------------------------------------------------------------- + +void AudioDeviceDummy::ClearPlayoutError() +{ +} + +// ---------------------------------------------------------------------------- +// ClearRecordingWarning +// ---------------------------------------------------------------------------- + +void AudioDeviceDummy::ClearRecordingWarning() +{ +} + +// ---------------------------------------------------------------------------- +// ClearRecordingError +// ---------------------------------------------------------------------------- + +void AudioDeviceDummy::ClearRecordingError() +{ +} + +// ============================================================================ +// Thread Methods +// ============================================================================ + +// ---------------------------------------------------------------------------- +// PlayThreadFunc +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::PlayThreadFunc(void* pThis) +{ + return (static_cast(pThis)->PlayThreadProcess()); +} + +// ---------------------------------------------------------------------------- +// RecThreadFunc +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::RecThreadFunc(void* pThis) +{ + return (static_cast(pThis)->RecThreadProcess()); +} + +// ---------------------------------------------------------------------------- +// PlayThreadProcess +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::PlayThreadProcess() +{ + switch (_timeEventPlay.Wait(1000)) + { + case kEventSignaled: + break; + case kEventError: + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "EventWrapper::Wait() failed => restarting timer"); + _timeEventPlay.StopTimer(); + _timeEventPlay.StartTimer(true, PLAY_TIMER_PERIOD_MS); + return true; + case kEventTimeout: + return true; + } + + Lock(); + + if(_playing) + { + WebRtc_Word8 playBuffer[2*160]; + + UnLock(); + WebRtc_Word32 nSamples = (WebRtc_Word32)_ptrAudioBuffer->RequestPlayoutData(160); + Lock(); + + if (!_playing) + { + UnLock(); + return true; + } + + nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); + if (nSamples != 160) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " invalid number of output samples(%d)", nSamples); + } + + if (_playDataFile) + { + int wr = fwrite(playBuffer, 2, 160, _playDataFile); + if (wr != 160) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Could not write playout data to file (%d) ferror = %d", + wr, ferror(_playDataFile)); + } + } + } + + UnLock(); + return true; +} + +// ---------------------------------------------------------------------------- +// RecThreadProcess +// ---------------------------------------------------------------------------- + +bool AudioDeviceDummy::RecThreadProcess() +{ + switch (_timeEventRec.Wait(1000)) + { + case kEventSignaled: + break; + case kEventError: + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "EventWrapper::Wait() failed => restarting timer"); + _timeEventRec.StopTimer(); + _timeEventRec.StartTimer(true, REC_TIMER_PERIOD_MS); + return true; + case kEventTimeout: + return true; + } + + Lock(); + + if (_recording) + { + // store the recorded buffer + _ptrAudioBuffer->SetRecordedBuffer(_recBuffer, 160); + + // store vqe delay values + _ptrAudioBuffer->SetVQEData(0, 0, 0); + + // deliver recorded samples at specified sample rate, mic level etc. to the observer using callback + UnLock(); + _ptrAudioBuffer->DeliverRecordedData(); + } + else + { + UnLock(); + } + + return true; +} + +} // namespace webrtc diff --git a/src/modules/audio_device/main/source/dummy/audio_device_dummy.h b/src/modules/audio_device/main/source/dummy/audio_device_dummy.h new file mode 100644 index 000000000..5a979d2af --- /dev/null +++ b/src/modules/audio_device/main/source/dummy/audio_device_dummy.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H + +#include + +#include "audio_device_generic.h" +#include "critical_section_wrapper.h" + +namespace webrtc { +class EventWrapper; +class ThreadWrapper; + +class AudioDeviceDummy : public AudioDeviceGeneric +{ +public: + AudioDeviceDummy(const WebRtc_Word32 id); + ~AudioDeviceDummy(); + + // Retrieve the currently utilized audio layer + virtual WebRtc_Word32 ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const; + + // Main initializaton and termination + virtual WebRtc_Word32 Init(); + virtual WebRtc_Word32 Terminate(); + virtual bool Initialized() const; + + // Device enumeration + virtual WebRtc_Word16 PlayoutDevices(); + virtual WebRtc_Word16 RecordingDevices(); + virtual WebRtc_Word32 PlayoutDeviceName(WebRtc_UWord16 index, WebRtc_Word8 name[kAdmMaxDeviceNameSize], WebRtc_Word8 guid[kAdmMaxGuidSize]); + virtual WebRtc_Word32 RecordingDeviceName(WebRtc_UWord16 index, WebRtc_Word8 name[kAdmMaxDeviceNameSize], WebRtc_Word8 guid[kAdmMaxGuidSize]); + + // Device selection + virtual WebRtc_Word32 SetPlayoutDevice(WebRtc_UWord16 index); + virtual WebRtc_Word32 SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device); + virtual WebRtc_Word32 SetRecordingDevice(WebRtc_UWord16 index); + virtual WebRtc_Word32 SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device); + + // Audio transport initialization + virtual WebRtc_Word32 PlayoutIsAvailable(bool& available); + virtual WebRtc_Word32 InitPlayout(); + virtual bool PlayoutIsInitialized() const; + virtual WebRtc_Word32 RecordingIsAvailable(bool& available); + virtual WebRtc_Word32 InitRecording(); + virtual bool RecordingIsInitialized() const; + + // Audio transport control + virtual WebRtc_Word32 StartPlayout(); + virtual WebRtc_Word32 StopPlayout(); + virtual bool Playing() const; + virtual WebRtc_Word32 StartRecording(); + virtual WebRtc_Word32 StopRecording(); + virtual bool Recording() const; + + // Microphone Automatic Gain Control (AGC) + virtual WebRtc_Word32 SetAGC(bool enable); + virtual bool AGC() const; + + // Volume control based on the Windows Wave API (Windows only) + virtual WebRtc_Word32 SetWaveOutVolume(WebRtc_UWord16 volumeLeft, WebRtc_UWord16 volumeRight); + virtual WebRtc_Word32 WaveOutVolume(WebRtc_UWord16& volumeLeft, WebRtc_UWord16& volumeRight) const; + + // Audio mixer initialization + virtual WebRtc_Word32 SpeakerIsAvailable(bool& available); + virtual WebRtc_Word32 InitSpeaker(); + virtual bool SpeakerIsInitialized() const; + virtual WebRtc_Word32 MicrophoneIsAvailable(bool& available); + virtual WebRtc_Word32 InitMicrophone(); + virtual bool MicrophoneIsInitialized() const; + + // Speaker volume controls + virtual WebRtc_Word32 SpeakerVolumeIsAvailable(bool& available); + virtual WebRtc_Word32 SetSpeakerVolume(WebRtc_UWord32 volume); + virtual WebRtc_Word32 SpeakerVolume(WebRtc_UWord32& volume) const; + virtual WebRtc_Word32 MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const; + virtual WebRtc_Word32 MinSpeakerVolume(WebRtc_UWord32& minVolume) const; + virtual WebRtc_Word32 SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const; + + // Microphone volume controls + virtual WebRtc_Word32 MicrophoneVolumeIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneVolume(WebRtc_UWord32 volume); + virtual WebRtc_Word32 MicrophoneVolume(WebRtc_UWord32& volume) const; + virtual WebRtc_Word32 MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const; + virtual WebRtc_Word32 MinMicrophoneVolume(WebRtc_UWord32& minVolume) const; + virtual WebRtc_Word32 MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const; + + // Speaker mute control + virtual WebRtc_Word32 SpeakerMuteIsAvailable(bool& available); + virtual WebRtc_Word32 SetSpeakerMute(bool enable); + virtual WebRtc_Word32 SpeakerMute(bool& enabled) const; + + // Microphone mute control + virtual WebRtc_Word32 MicrophoneMuteIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneMute(bool enable); + virtual WebRtc_Word32 MicrophoneMute(bool& enabled) const; + + // Microphone boost control + virtual WebRtc_Word32 MicrophoneBoostIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneBoost(bool enable); + virtual WebRtc_Word32 MicrophoneBoost(bool& enabled) const; + + // Stereo support + virtual WebRtc_Word32 StereoPlayoutIsAvailable(bool& available); + virtual WebRtc_Word32 SetStereoPlayout(bool enable); + virtual WebRtc_Word32 StereoPlayout(bool& enabled) const; + virtual WebRtc_Word32 StereoRecordingIsAvailable(bool& available); + virtual WebRtc_Word32 SetStereoRecording(bool enable); + virtual WebRtc_Word32 StereoRecording(bool& enabled) const; + + // Delay information and control + virtual WebRtc_Word32 SetPlayoutBuffer(const AudioDeviceModule::BufferType type, WebRtc_UWord16 sizeMS); + virtual WebRtc_Word32 PlayoutBuffer(AudioDeviceModule::BufferType& type, WebRtc_UWord16& sizeMS) const; + virtual WebRtc_Word32 PlayoutDelay(WebRtc_UWord16& delayMS) const; + virtual WebRtc_Word32 RecordingDelay(WebRtc_UWord16& delayMS) const; + + // CPU load + virtual WebRtc_Word32 CPULoad(WebRtc_UWord16& load) const; + + virtual bool PlayoutWarning() const; + virtual bool PlayoutError() const; + virtual bool RecordingWarning() const; + virtual bool RecordingError() const; + virtual void ClearPlayoutWarning(); + virtual void ClearPlayoutError(); + virtual void ClearRecordingWarning(); + virtual void ClearRecordingError(); + + virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer); + +private: + void Lock() { _critSect.Enter(); }; + void UnLock() { _critSect.Leave(); }; + + static bool RecThreadFunc(void*); + static bool PlayThreadFunc(void*); + bool RecThreadProcess(); + bool PlayThreadProcess(); + + AudioDeviceBuffer* _ptrAudioBuffer; + CriticalSectionWrapper& _critSect; + WebRtc_Word32 _id; + + EventWrapper& _timeEventRec; + EventWrapper& _timeEventPlay; + EventWrapper& _recStartEvent; + EventWrapper& _playStartEvent; + + ThreadWrapper* _ptrThreadRec; + ThreadWrapper* _ptrThreadPlay; + WebRtc_UWord32 _recThreadID; + WebRtc_UWord32 _playThreadID; + + bool _initialized; + bool _recording; + bool _playing; + bool _recIsInitialized; + bool _playIsInitialized; + bool _speakerIsInitialized; + bool _microphoneIsInitialized; + + WebRtc_Word8 _recBuffer[2*160]; + + FILE* _playDataFile; +}; + +} // namespace webrtc + +#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_DUMMY_H diff --git a/src/modules/audio_device/main/source/dummy/audio_device_utility_dummy.cc b/src/modules/audio_device/main/source/dummy/audio_device_utility_dummy.cc new file mode 100644 index 000000000..8c1945964 --- /dev/null +++ b/src/modules/audio_device/main/source/dummy/audio_device_utility_dummy.cc @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_device_utility_dummy.h" +#include "audio_device_config.h" // DEBUG_PRINT() +#include "critical_section_wrapper.h" +#include "trace.h" + +namespace webrtc +{ + +AudioDeviceUtilityDummy::AudioDeviceUtilityDummy(const WebRtc_Word32 id) : + _critSect(*CriticalSectionWrapper::CreateCriticalSection()), + _id(id), + _lastError(AudioDeviceModule::kAdmErrNone) +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, + "%s created", __FUNCTION__); +} + +AudioDeviceUtilityDummy::~AudioDeviceUtilityDummy() +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s destroyed", __FUNCTION__); + { + CriticalSectionScoped lock(_critSect); + + // free stuff here... + } + + delete &_critSect; +} + +// ============================================================================ +// API +// ============================================================================ + + +WebRtc_Word32 AudioDeviceUtilityDummy::Init() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id, + " OS info: %s", "Dummy"); + + return 0; +} + + +} // namespace webrtc diff --git a/src/modules/audio_device/main/source/dummy/audio_device_utility_dummy.h b/src/modules/audio_device/main/source/dummy/audio_device_utility_dummy.h new file mode 100644 index 000000000..601c4489e --- /dev/null +++ b/src/modules/audio_device/main/source/dummy/audio_device_utility_dummy.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_DUMMY_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_DUMMY_H + +#include "audio_device_utility.h" +#include "audio_device.h" + +namespace webrtc +{ +class CriticalSectionWrapper; + +class AudioDeviceUtilityDummy: public AudioDeviceUtility +{ +public: + AudioDeviceUtilityDummy(const WebRtc_Word32 id); + ~AudioDeviceUtilityDummy(); + + virtual WebRtc_Word32 Init(); + +private: + CriticalSectionWrapper& _critSect; + WebRtc_Word32 _id; + AudioDeviceModule::ErrorCode _lastError; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_UTILITY_DUMMY_H_ diff --git a/src/modules/audio_device/main/source/linux/alsasymboltable_linux.cc b/src/modules/audio_device/main/source/linux/alsasymboltable_linux.cc new file mode 100644 index 000000000..1b1707cb4 --- /dev/null +++ b/src/modules/audio_device/main/source/linux/alsasymboltable_linux.cc @@ -0,0 +1,39 @@ +/* + * libjingle + * Copyright 2004--2010, Google Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "alsasymboltable_linux.h" + +namespace webrtc_adm_linux_alsa { + +LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(AlsaSymbolTable, "libasound.so.2") +#define X(sym) \ + LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(AlsaSymbolTable, sym) +ALSA_SYMBOLS_LIST +#undef X +LATE_BINDING_SYMBOL_TABLE_DEFINE_END(AlsaSymbolTable) + +} // namespace webrtc_adm_linux_alsa diff --git a/src/modules/audio_device/main/source/linux/alsasymboltable_linux.h b/src/modules/audio_device/main/source/linux/alsasymboltable_linux.h new file mode 100644 index 000000000..1915a6889 --- /dev/null +++ b/src/modules/audio_device/main/source/linux/alsasymboltable_linux.h @@ -0,0 +1,144 @@ +/* + * libjingle + * Copyright 2004--2010, Google Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H +#define WEBRTC_AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H + +#include "latebindingsymboltable_linux.h" + +namespace webrtc_adm_linux_alsa { + +// The ALSA symbols we need, as an X-Macro list. +// This list must contain precisely every libasound function that is used in +// alsasoundsystem.cc. +#define ALSA_SYMBOLS_LIST \ + X(snd_device_name_free_hint) \ + X(snd_device_name_get_hint) \ + X(snd_device_name_hint) \ + X(snd_pcm_avail_update) \ + X(snd_pcm_close) \ + X(snd_pcm_delay) \ + X(snd_pcm_drop) \ + X(snd_pcm_open) \ + X(snd_pcm_prepare) \ + X(snd_pcm_readi) \ + X(snd_pcm_recover) \ + X(snd_pcm_resume) \ + X(snd_pcm_reset) \ + X(snd_pcm_state) \ + X(snd_pcm_set_params) \ + X(snd_pcm_start) \ + X(snd_pcm_stream) \ + X(snd_pcm_wait) \ + X(snd_pcm_writei) \ + X(snd_pcm_info_get_class) \ + X(snd_pcm_info_get_subdevices_avail) \ + X(snd_pcm_info_get_subdevice_name) \ + X(snd_pcm_info_set_subdevice) \ + X(snd_pcm_info_get_id) \ + X(snd_pcm_info_set_device) \ + X(snd_pcm_info_set_stream) \ + X(snd_pcm_info_get_name) \ + X(snd_pcm_info_get_subdevices_count) \ + X(snd_pcm_info_sizeof) \ + X(snd_pcm_hw_params) \ + X(snd_pcm_hw_params_malloc) \ + X(snd_pcm_hw_params_free) \ + X(snd_pcm_hw_params_any) \ + X(snd_pcm_hw_params_set_access) \ + X(snd_pcm_hw_params_set_format) \ + X(snd_pcm_hw_params_set_channels) \ + X(snd_pcm_hw_params_set_rate_near) \ + X(snd_pcm_hw_params_set_buffer_size_near) \ + X(snd_card_next) \ + X(snd_card_get_name) \ + X(snd_config_update) \ + X(snd_config_copy) \ + X(snd_config_get_id) \ + X(snd_ctl_open) \ + X(snd_ctl_close) \ + X(snd_ctl_card_info) \ + X(snd_ctl_card_info_sizeof) \ + X(snd_ctl_card_info_get_id) \ + X(snd_ctl_card_info_get_name) \ + X(snd_ctl_pcm_next_device) \ + X(snd_ctl_pcm_info) \ + X(snd_mixer_load) \ + X(snd_mixer_free) \ + X(snd_mixer_detach) \ + X(snd_mixer_close) \ + X(snd_mixer_open) \ + X(snd_mixer_attach) \ + X(snd_mixer_first_elem) \ + X(snd_mixer_elem_next) \ + X(snd_mixer_selem_get_name) \ + X(snd_mixer_selem_is_active) \ + X(snd_mixer_selem_register) \ + X(snd_mixer_selem_set_playback_volume_all) \ + X(snd_mixer_selem_get_playback_volume) \ + X(snd_mixer_selem_has_playback_volume) \ + X(snd_mixer_selem_get_playback_volume_range) \ + X(snd_mixer_selem_has_playback_switch) \ + X(snd_mixer_selem_get_playback_switch) \ + X(snd_mixer_selem_set_playback_switch_all) \ + X(snd_mixer_selem_has_capture_switch) \ + X(snd_mixer_selem_get_capture_switch) \ + X(snd_mixer_selem_set_capture_switch_all) \ + X(snd_mixer_selem_has_capture_volume) \ + X(snd_mixer_selem_set_capture_volume_all) \ + X(snd_mixer_selem_get_capture_volume) \ + X(snd_mixer_selem_get_capture_volume_range) \ + X(snd_dlopen) \ + X(snd_dlclose) \ + X(snd_config) \ + X(snd_config_search) \ + X(snd_config_get_string) \ + X(snd_config_search_definition) \ + X(snd_config_get_type) \ + X(snd_config_delete) \ + X(snd_config_iterator_entry) \ + X(snd_config_iterator_first) \ + X(snd_config_iterator_next) \ + X(snd_config_iterator_end) \ + X(snd_config_delete_compound_members) \ + X(snd_config_get_integer) \ + X(snd_config_get_bool) \ + X(snd_dlsym) \ + X(snd_strerror) \ + X(snd_lib_error) \ + X(snd_lib_error_set_handler) + +LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(AlsaSymbolTable) +#define X(sym) \ + LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(AlsaSymbolTable, sym) +ALSA_SYMBOLS_LIST +#undef X +LATE_BINDING_SYMBOL_TABLE_DECLARE_END(AlsaSymbolTable) + +} // namespace webrtc_adm_linux_alsa + +#endif // WEBRTC_AUDIO_DEVICE_ALSASYMBOLTABLE_LINUX_H diff --git a/src/modules/audio_device/main/source/linux/audio_device_alsa_linux.cc b/src/modules/audio_device/main/source/linux/audio_device_alsa_linux.cc new file mode 100644 index 000000000..7d36ea9b4 --- /dev/null +++ b/src/modules/audio_device/main/source/linux/audio_device_alsa_linux.cc @@ -0,0 +1,3690 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "audio_device_utility.h" +#include "audio_device_alsa_linux.h" +#include "audio_device_config.h" + +#include "event_wrapper.h" +#include "trace.h" +#include "thread_wrapper.h" + + +webrtc_adm_linux_alsa::AlsaSymbolTable AlsaSymbolTable; + +// Accesses ALSA functions through our late-binding symbol table instead of +// directly. This way we don't have to link to libasound, which means our binary +// will work on systems that don't have it. +#define LATE(sym) \ + LATESYM_GET(webrtc_adm_linux_alsa::AlsaSymbolTable, &AlsaSymbolTable, sym) + +// Redefine these here to be able to do late-binding +#undef snd_ctl_card_info_alloca +#define snd_ctl_card_info_alloca(ptr) \ + do { *ptr = (snd_ctl_card_info_t *) \ + __builtin_alloca (LATE(snd_ctl_card_info_sizeof)()); \ + memset(*ptr, 0, LATE(snd_ctl_card_info_sizeof)()); } while (0) + +#undef snd_pcm_info_alloca +#define snd_pcm_info_alloca(pInfo) \ + do { *pInfo = (snd_pcm_info_t *) \ + __builtin_alloca (LATE(snd_pcm_info_sizeof)()); \ + memset(*pInfo, 0, LATE(snd_pcm_info_sizeof)()); } while (0) + +// snd_lib_error_handler_t +void WebrtcAlsaErrorHandler(const char *file, + int line, + const char *function, + int err, + const char *fmt,...){}; + +namespace webrtc +{ +AudioDeviceLinuxALSA::AudioDeviceLinuxALSA(const WebRtc_Word32 id) : + _ptrAudioBuffer(NULL), + _critSect(*CriticalSectionWrapper::CreateCriticalSection()), + _timeEventRec(*EventWrapper::Create()), + _timeEventPlay(*EventWrapper::Create()), + _recStartEvent(*EventWrapper::Create()), + _playStartEvent(*EventWrapper::Create()), + _ptrThreadRec(NULL), + _ptrThreadPlay(NULL), + _recThreadID(0), + _playThreadID(0), + _id(id), + _mixerManager(id), + _inputDeviceIndex(0), + _outputDeviceIndex(0), + _inputDeviceIsSpecified(false), + _outputDeviceIsSpecified(false), + _handleRecord(NULL), + _handlePlayout(NULL), + _recSndcardBuffsize(ALSA_SNDCARD_BUFF_SIZE_REC), + _playSndcardBuffsize(ALSA_SNDCARD_BUFF_SIZE_PLAY), + _samplingFreqRec(REC_SAMPLES_PER_MS), + _samplingFreqPlay(PLAY_SAMPLES_PER_MS), + _recChannels(1), + _playChannels(1), + _playbackBufferSize(0), + _recordBufferSize(0), + _recBuffer(NULL), + _playBufType(AudioDeviceModule::kAdaptiveBufferSize), + _initialized(false), + _recording(false), + _playing(false), + _recIsInitialized(false), + _playIsInitialized(false), + _startRec(false), + _stopRec(false), + _startPlay(false), + _stopPlay(false), + _AGC(false), + _buffersizeFromZeroAvail(true), + _buffersizeFromZeroDelay(true), + _sndCardPlayDelay(0), + _previousSndCardPlayDelay(0), + _delayMonitorStatePlay(0), + _largeDelayCountPlay(0), + _sndCardRecDelay(0), + _numReadyRecSamples(0), + _bufferCheckMethodPlay(0), + _bufferCheckMethodRec(0), + _bufferCheckErrorsPlay(0), + _bufferCheckErrorsRec(0), + _lastBufferCheckValuePlay(0), + _writeErrors(0), + _playWarning(0), + _playError(0), + _recWarning(0), + _recError(0), + _playBufDelay(80), + _playBufDelayFixed(80) +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, + "%s created", __FUNCTION__); +} + +// ---------------------------------------------------------------------------- +// AudioDeviceLinuxALSA - dtor +// ---------------------------------------------------------------------------- + +AudioDeviceLinuxALSA::~AudioDeviceLinuxALSA() +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s destroyed", __FUNCTION__); + + Terminate(); + + if (_recBuffer) + { + delete _recBuffer; + } + delete &_recStartEvent; + delete &_playStartEvent; + delete &_timeEventRec; + delete &_timeEventPlay; + delete &_critSect; +} + +void AudioDeviceLinuxALSA::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + _ptrAudioBuffer = audioBuffer; + + // Inform the AudioBuffer about default settings for this implementation. + // Set all values to zero here since the actual settings will be done by + // InitPlayout and InitRecording later. + _ptrAudioBuffer->SetRecordingSampleRate(0); + _ptrAudioBuffer->SetPlayoutSampleRate(0); + _ptrAudioBuffer->SetRecordingChannels(0); + _ptrAudioBuffer->SetPlayoutChannels(0); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + audioLayer = AudioDeviceModule::kLinuxAlsaAudio; + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::Init() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + // Load libasound + if (!AlsaSymbolTable.Load()) + { + // Alsa is not installed on + // this system + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to load symbol table"); + return -1; + } + + if (_initialized) + { + return 0; + } + + _playWarning = 0; + _playError = 0; + _recWarning = 0; + _recError = 0; + + // RECORDING + const char* threadName = "webrtc_audio_module_rec_thread"; + _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, + this, + kRealtimePriority, + threadName); + if (_ptrThreadRec == NULL) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " failed to create the rec audio thread"); + return -1; + } + + unsigned int threadID(0); + if (!_ptrThreadRec->Start(threadID)) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " failed to start the rec audio thread"); + delete _ptrThreadRec; + _ptrThreadRec = NULL; + return -1; + } + _recThreadID = threadID; + + const bool periodic(true); + if (!_timeEventRec.StartTimer(periodic, REC_TIMER_PERIOD_MS)) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " failed to start the rec timer event"); + if (_ptrThreadRec->Stop()) + { + delete _ptrThreadRec; + _ptrThreadRec = NULL; + } + else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " unable to stop the activated rec thread"); + } + return -1; + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " periodic rec timer (dT=%d) is now active", + REC_TIMER_PERIOD_MS); + + // PLAYOUT + threadName = "webrtc_audio_module_play_thread"; + _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc, + this, + kRealtimePriority, + threadName); + if (_ptrThreadPlay == NULL) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " failed to create the play audio thread"); + return -1; + } + + threadID = 0; + if (!_ptrThreadPlay->Start(threadID)) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " failed to start the play audio thread"); + delete _ptrThreadPlay; + _ptrThreadPlay = NULL; + return -1; + } + _playThreadID = threadID; + + if (!_timeEventPlay.StartTimer(periodic, PLAY_TIMER_PERIOD_MS)) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " failed to start the play timer event"); + if (_ptrThreadPlay->Stop()) + { + delete _ptrThreadPlay; + _ptrThreadPlay = NULL; + } + else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " unable to stop the activated play thread"); + } + return -1; + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " periodic play timer (dT=%d) is now active", PLAY_TIMER_PERIOD_MS); + + _initialized = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::Terminate() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (!_initialized) + { + return 0; + } + + CriticalSectionScoped lock(_critSect); + + _mixerManager.Close(); + + // RECORDING + if (_ptrThreadRec) + { + ThreadWrapper* tmpThread = _ptrThreadRec; + _ptrThreadRec = NULL; + _critSect.Leave(); + + tmpThread->SetNotAlive(); + _timeEventRec.Set(); + + if (tmpThread->Stop()) + { + delete tmpThread; + } + else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " failed to close down the rec audio thread"); + } + + _critSect.Enter(); + } + + _timeEventRec.StopTimer(); + + // PLAYOUT + if (_ptrThreadPlay) + { + ThreadWrapper* tmpThread = _ptrThreadPlay; + _ptrThreadPlay = NULL; + _critSect.Leave(); + + tmpThread->SetNotAlive(); + _timeEventPlay.Set(); + + if (tmpThread->Stop()) + { + delete tmpThread; + } + else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " failed to close down the play audio thread"); + } + + _critSect.Enter(); + } + + _timeEventPlay.StopTimer(); + + _initialized = false; + _outputDeviceIsSpecified = false; + _inputDeviceIsSpecified = false; + + return 0; +} + +bool AudioDeviceLinuxALSA::Initialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_initialized); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SpeakerIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) + { + available = false; + return 0; + } + + // Given that InitSpeaker was successful, we know that a valid speaker + // exists + available = true; + + // Close the initialized output mixer + // + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::InitSpeaker() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_playing) + { + return -1; + } + + char devName[kAdmMaxDeviceNameSize] = {0}; + GetDevicesInfo(2, true, _outputDeviceIndex, devName, kAdmMaxDeviceNameSize); + return _mixerManager.OpenSpeaker(devName); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MicrophoneIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitMicrophone() == -1) + { + available = false; + return 0; + } + + // Given that InitMicrophone was successful, we know that a valid + // microphone exists + available = true; + + // Close the initialized input mixer + // + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::InitMicrophone() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_recording) + { + return -1; + } + + char devName[kAdmMaxDeviceNameSize] = {0}; + GetDevicesInfo(2, false, _inputDeviceIndex, devName, kAdmMaxDeviceNameSize); + return _mixerManager.OpenMicrophone(devName); +} + +bool AudioDeviceLinuxALSA::SpeakerIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_mixerManager.SpeakerIsInitialized()); +} + +bool AudioDeviceLinuxALSA::MicrophoneIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_mixerManager.MicrophoneIsInitialized()); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SpeakerVolumeIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + if (!wasInitialized && InitSpeaker() == -1) + { + // If we end up here it means that the selected speaker has no volume + // control. + available = false; + return 0; + } + + // Given that InitSpeaker was successful, we know that a volume control + // exists + available = true; + + // Close the initialized output mixer + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetSpeakerVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetSpeakerVolume(volume=%u)", volume); + + return (_mixerManager.SetSpeakerVolume(volume)); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SpeakerVolume(WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 level(0); + + if (_mixerManager.SpeakerVolume(level) == -1) + { + return -1; + } + + volume = level; + + return 0; +} + + +WebRtc_Word32 AudioDeviceLinuxALSA::SetWaveOutVolume(WebRtc_UWord16 volumeLeft, + WebRtc_UWord16 volumeRight) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)", + volumeLeft, volumeRight); + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " API call not supported on this platform"); + return -1; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::WaveOutVolume( + WebRtc_UWord16& /*volumeLeft*/, + WebRtc_UWord16& /*volumeRight*/) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " API call not supported on this platform"); + return -1; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MaxSpeakerVolume( + WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 maxVol(0); + + if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) + { + return -1; + } + + maxVolume = maxVol; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MinSpeakerVolume( + WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 minVol(0); + + if (_mixerManager.MinSpeakerVolume(minVol) == -1) + { + return -1; + } + + minVolume = minVol; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SpeakerVolumeStepSize( + WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord16 delta(0); + + if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) + { + return -1; + } + + stepSize = delta; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SpeakerMuteIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) + { + // If we end up here it means that the selected speaker has no volume + // control, hence it is safe to state that there is no mute control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected speaker has a mute control + _mixerManager.SpeakerMuteIsAvailable(isAvailable); + + available = isAvailable; + + // Close the initialized output mixer + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetSpeakerMute(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetSpeakerMute(enable=%u)", enable); + return (_mixerManager.SetSpeakerMute(enable)); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SpeakerMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool muted(0); + + if (_mixerManager.SpeakerMute(muted) == -1) + { + return -1; + } + + enabled = muted; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MicrophoneMuteIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected input device. + // + if (!wasInitialized && InitMicrophone() == -1) + { + // If we end up here it means that the selected microphone has no volume + // control, hence it is safe to state that there is no mute control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected microphone has a mute control + // + _mixerManager.MicrophoneMuteIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetMicrophoneMute(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetMicrophoneMute(enable=%u)", enable); + return (_mixerManager.SetMicrophoneMute(enable)); +} + +// ---------------------------------------------------------------------------- +// MicrophoneMute +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceLinuxALSA::MicrophoneMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool muted(0); + + if (_mixerManager.MicrophoneMute(muted) == -1) + { + return -1; + } + + enabled = muted; + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MicrophoneBoostIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Enumerate all avaliable microphone and make an attempt to open up the + // input mixer corresponding to the currently selected input device. + // + if (!wasInitialized && InitMicrophone() == -1) + { + // If we end up here it means that the selected microphone has no volume + // control, hence it is safe to state that there is no boost control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected microphone has a boost control + _mixerManager.MicrophoneBoostIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetMicrophoneBoost(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetMicrophoneBoost(enable=%u)", enable); + + return (_mixerManager.SetMicrophoneBoost(enable)); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MicrophoneBoost(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool onOff(0); + + if (_mixerManager.MicrophoneBoost(onOff) == -1) + { + return -1; + } + + enabled = onOff; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::StereoRecordingIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + // If we already have initialized in stereo it's obviously available + if (_recIsInitialized && (2 == _recChannels)) + { + available = true; + return 0; + } + + // Save rec states and the number of rec channels + bool recIsInitialized = _recIsInitialized; + bool recording = _recording; + int recChannels = _recChannels; + + available = false; + + // Stop/uninitialize recording if initialized (and possibly started) + if (_recIsInitialized) + { + StopRecording(); + } + + // Try init in stereo; + _recChannels = 2; + if (InitRecording() == 0) + { + available = true; + } + + // Stop/uninitialize recording + StopRecording(); + + // Recover previous states + _recChannels = recChannels; + if (recIsInitialized) + { + InitRecording(); + } + if (recording) + { + StartRecording(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetStereoRecording(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetStereoRecording(enable=%u)", enable); + + if (enable) + _recChannels = 2; + else + _recChannels = 1; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::StereoRecording(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_recChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::StereoPlayoutIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + // If we already have initialized in stereo it's obviously available + if (_playIsInitialized && (2 == _playChannels)) + { + available = true; + return 0; + } + + // Save rec states and the number of rec channels + bool playIsInitialized = _playIsInitialized; + bool playing = _playing; + int playChannels = _playChannels; + + available = false; + + // Stop/uninitialize recording if initialized (and possibly started) + if (_playIsInitialized) + { + StopPlayout(); + } + + // Try init in stereo; + _playChannels = 2; + if (InitPlayout() == 0) + { + available = true; + } + + // Stop/uninitialize recording + StopPlayout(); + + // Recover previous states + _playChannels = playChannels; + if (playIsInitialized) + { + InitPlayout(); + } + if (playing) + { + StartPlayout(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetStereoPlayout(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetStereoPlayout(enable=%u)", enable); + + if (enable) + _playChannels = 2; + else + _playChannels = 1; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::StereoPlayout(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_playChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetAGC(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetAGC(enable=%d)", enable); + + _AGC = enable; + + return 0; +} + +bool AudioDeviceLinuxALSA::AGC() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + return _AGC; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MicrophoneVolumeIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected output device. + if (!wasInitialized && InitMicrophone() == -1) + { + // If we end up here it means that the selected microphone has no volume + // control. + available = false; + return 0; + } + + // Given that InitMicrophone was successful, we know that a volume control + // exists + available = true; + + // Close the initialized input mixer + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetMicrophoneVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetMicrophoneVolume(volume=%u)", volume); + + return (_mixerManager.SetMicrophoneVolume(volume)); + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MicrophoneVolume(WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 level(0); + + if (_mixerManager.MicrophoneVolume(level) == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " failed to retrive current microphone level"); + return -1; + } + + volume = level; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MaxMicrophoneVolume( + WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 maxVol(0); + + if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) + { + return -1; + } + + maxVolume = maxVol; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MinMicrophoneVolume( + WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 minVol(0); + + if (_mixerManager.MinMicrophoneVolume(minVol) == -1) + { + return -1; + } + + minVolume = minVol; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::MicrophoneVolumeStepSize( + WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord16 delta(0); + + if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) + { + return -1; + } + + stepSize = delta; + + return 0; +} + +WebRtc_Word16 AudioDeviceLinuxALSA::PlayoutDevices() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + return (WebRtc_Word16)GetDevicesInfo(0, true); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetPlayoutDevice(WebRtc_UWord16 index) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetPlayoutDevice(index=%u)", index); + + if (_playIsInitialized) + { + return -1; + } + + WebRtc_UWord32 nDevices = GetDevicesInfo(0, true); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " number of availiable audio output devices is %u", nDevices); + + if (index > (nDevices-1)) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " device index is out of range [0,%u]", (nDevices-1)); + return -1; + } + + _outputDeviceIndex = index; + _outputDeviceIsSpecified = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) +{ + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "WindowsDeviceType not supported"); + return -1; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::PlayoutDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "PlayoutDeviceName(index=%u)", index); + + const WebRtc_UWord16 nDevices(PlayoutDevices()); + + if ((index > (nDevices-1)) || (name == NULL)) + { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) + { + memset(guid, 0, kAdmMaxGuidSize); + } + + return GetDevicesInfo(1, true, index, name, kAdmMaxDeviceNameSize); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::RecordingDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "RecordingDeviceName(index=%u)", index); + + const WebRtc_UWord16 nDevices(RecordingDevices()); + + if ((index > (nDevices-1)) || (name == NULL)) + { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) + { + memset(guid, 0, kAdmMaxGuidSize); + } + + return GetDevicesInfo(1, false, index, name, kAdmMaxDeviceNameSize); +} + +WebRtc_Word16 AudioDeviceLinuxALSA::RecordingDevices() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + return (WebRtc_Word16)GetDevicesInfo(0, false); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::SetRecordingDevice(WebRtc_UWord16 index) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetRecordingDevice(index=%u)", index); + + if (_recIsInitialized) + { + return -1; + } + + WebRtc_UWord32 nDevices = GetDevicesInfo(0, false); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " number of availiable audio input devices is %u", nDevices); + + if (index > (nDevices-1)) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " device index is out of range [0,%u]", (nDevices-1)); + return -1; + } + + _inputDeviceIndex = index; + _inputDeviceIsSpecified = true; + + return 0; +} + +// ---------------------------------------------------------------------------- +// SetRecordingDevice II (II) +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceLinuxALSA::SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) +{ + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "WindowsDeviceType not supported"); + return -1; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::PlayoutIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + available = false; + + // Try to initialize the playout side with mono + // Assumes that user set num channels after calling this function + _playChannels = 1; + WebRtc_Word32 res = InitPlayout(); + + // Cancel effect of initialization + StopPlayout(); + + if (res != -1) + { + available = true; + } + else + { + // It may be possible to play out in stereo + res = StereoPlayoutIsAvailable(available); + if (available) + { + // Then set channels to 2 so InitPlayout doesn't fail + _playChannels = 2; + } + } + + return res; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::RecordingIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + available = false; + + // Try to initialize the recording side with mono + // Assumes that user set num channels after calling this function + _recChannels = 1; + WebRtc_Word32 res = InitRecording(); + + // Cancel effect of initialization + StopRecording(); + + if (res != -1) + { + available = true; + } + else + { + // It may be possible to record in stereo + res = StereoRecordingIsAvailable(available); + if (available) + { + // Then set channels to 2 so InitPlayout doesn't fail + _recChannels = 2; + } + } + + return res; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::InitPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + int errVal = 0; + + snd_pcm_uframes_t numFrames = 0; + snd_pcm_hw_params_t *paramsPlayout; + + CriticalSectionScoped lock(_critSect); + if (_playing) + { + return -1; + } + + if (!_outputDeviceIsSpecified) + { + return -1; + } + + if (_playIsInitialized) + { + return 0; + } + // Initialize the speaker (devices might have been added or removed) + if (InitSpeaker() == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " InitSpeaker() failed"); + } + + // Start by closing any existing wave-output devices + // + if (_handlePlayout != NULL) + { + LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout=NULL; + _playIsInitialized = false; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing current playout sound device, error:" + " %s", LATE(snd_strerror)(errVal)); + } + } + + // Open PCM device for playout + char deviceName[kAdmMaxDeviceNameSize] = {0}; + GetDevicesInfo(2, true, _outputDeviceIndex, deviceName, + kAdmMaxDeviceNameSize); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " InitPlayout open (%s)", deviceName); + + errVal = LATE(snd_pcm_open) + (&_handlePlayout, + deviceName, + SND_PCM_STREAM_PLAYBACK, + SND_PCM_NONBLOCK); + + if (errVal == -EBUSY) // Device busy - try some more! + { + for (int i=0; i < 5; i++) + { + sleep(1); + errVal = LATE(snd_pcm_open) + (&_handlePlayout, + deviceName, + SND_PCM_STREAM_PLAYBACK, + SND_PCM_NONBLOCK); + if (errVal == 0) + { + break; + } + } + } + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " unable to open playback device: %s (%d)", + LATE(snd_strerror)(errVal), + errVal); + _handlePlayout=NULL; + return -1; + } + + // Allocate hardware paramterers + errVal = LATE(snd_pcm_hw_params_malloc)(¶msPlayout); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params malloc, error: %s", + LATE(snd_strerror)(errVal)); + if (_handlePlayout) + { + LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout=NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing playout sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + errVal = LATE(snd_pcm_hw_params_any)(_handlePlayout, paramsPlayout); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params_any, error: %s", + LATE(snd_strerror)(errVal)); + if (_handlePlayout) + { + LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout=NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing playout sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + // Set stereo sample order + errVal = LATE(snd_pcm_hw_params_set_access) + (_handlePlayout, + paramsPlayout, + SND_PCM_ACCESS_RW_INTERLEAVED); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params set access, error: %s", + LATE(snd_strerror)(errVal)); + if (_handlePlayout) + { + LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout=NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing playout sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + // Set sample format +#if defined(WEBRTC_BIG_ENDIAN) + errVal = LATE(snd_pcm_hw_params_set_format) + (_handlePlayout, + paramsPlayout, + SND_PCM_FORMAT_S16_BE); +#else + errVal = LATE(snd_pcm_hw_params_set_format) + (_handlePlayout, + paramsPlayout, + SND_PCM_FORMAT_S16_LE); +#endif + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params set format, error: %s", + LATE(snd_strerror)(errVal)); + if (_handlePlayout) + { + LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout=NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing playout sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + // Set stereo/mono + errVal = LATE(snd_pcm_hw_params_set_channels) + (_handlePlayout, + paramsPlayout, + _playChannels); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params set channels(%d), error: %s", + _playChannels, + LATE(snd_strerror)(errVal)); + + if (_handlePlayout) + { + LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing playout sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + // Set sampling rate to use + _samplingFreqPlay = PLAY_SAMPLES_PER_MS; + WebRtc_UWord32 samplingRate = _samplingFreqPlay*1000; + + // Set sample rate + unsigned int exactRate = samplingRate; + errVal = LATE(snd_pcm_hw_params_set_rate_near) + (_handlePlayout, + paramsPlayout, + &exactRate, + 0); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params set rate near(%d), error: %s", + samplingRate, + LATE(snd_strerror)(errVal)); + if (_handlePlayout) + { + LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout=NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing playout sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + if (exactRate != samplingRate) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Soundcard does not support sample rate %d Hz, %d Hz" + " used instead.", + samplingRate, + exactRate); + + // We use this rate instead + _samplingFreqPlay = (WebRtc_UWord32)(exactRate / 1000); + } + + // Set buffer size, in frames + numFrames = ALSA_SNDCARD_BUFF_SIZE_PLAY; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " set playout, numFrames: %d, bufer size: %d", + numFrames, + _playSndcardBuffsize); + errVal = LATE(snd_pcm_hw_params_set_buffer_size_near) + (_handlePlayout, + paramsPlayout, + &_playSndcardBuffsize); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params set buffer size near(%d), error: %s", + (int) numFrames, + LATE(snd_strerror)(errVal)); + if (_handlePlayout) + { + LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing playout sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + if (numFrames != _playSndcardBuffsize) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Allocated record buffersize: %d frames", + (int)_playSndcardBuffsize); + } + + // Write settings to the devices + errVal = LATE(snd_pcm_hw_params)(_handlePlayout, paramsPlayout); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params(_handlePlayout, paramsPlayout)," + " error: %s", + LATE(snd_strerror)(errVal)); + if (_handlePlayout) + { + LATE(snd_pcm_close)(_handlePlayout); + _handlePlayout = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing playout sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + // Free parameter struct memory + LATE(snd_pcm_hw_params_free)(paramsPlayout); + paramsPlayout = NULL; + + if (_ptrAudioBuffer) + { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreqPlay*1000); + _ptrAudioBuffer->SetPlayoutChannels((WebRtc_UWord8)_playChannels); + } + + // Set play buffer size + _playbackBufferSize = _samplingFreqPlay * 10 * _playChannels * 2; + + // Init varaibles used for play + _previousSndCardPlayDelay = 0; + _largeDelayCountPlay = 0; + _delayMonitorStatePlay = 0; + _bufferCheckMethodPlay = 0; + _bufferCheckErrorsPlay = 0; + _lastBufferCheckValuePlay = 0; + _playWarning = 0; + _playError = 0; + + if (_handlePlayout != NULL) + { + _playIsInitialized = true; + return 0; + } + else + { + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::InitRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + int errVal = 0; + snd_pcm_uframes_t numFrames = 0; + snd_pcm_hw_params_t *paramsRecord; + + CriticalSectionScoped lock(_critSect); + + if (_recording) + { + return -1; + } + + if (!_inputDeviceIsSpecified) + { + return -1; + } + + if (_recIsInitialized) + { + return 0; + } + + // Initialize the microphone (devices might have been added or removed) + if (InitMicrophone() == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " InitMicrophone() failed"); + } + + // Start by closing any existing pcm-input devices + // + if (_handleRecord != NULL) + { + int errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + _recIsInitialized = false; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing current recording sound device," + " error: %s", + LATE(snd_strerror)(errVal)); + } + } + + // Open PCM device for recording + // The corresponding settings for playout are made after the record settings + char deviceName[kAdmMaxDeviceNameSize] = {0}; + GetDevicesInfo(2, false, _inputDeviceIndex, deviceName, + kAdmMaxDeviceNameSize); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "InitRecording open (%s)", deviceName); + errVal = LATE(snd_pcm_open) + (&_handleRecord, + deviceName, + SND_PCM_STREAM_CAPTURE, + SND_PCM_NONBLOCK); + + // Available modes: 0 = blocking, SND_PCM_NONBLOCK, SND_PCM_ASYNC + if (errVal == -EBUSY) // Device busy - try some more! + { + for (int i=0; i < 5; i++) + { + sleep(1); + errVal = LATE(snd_pcm_open) + (&_handleRecord, + deviceName, + SND_PCM_STREAM_CAPTURE, + SND_PCM_NONBLOCK); + if (errVal == 0) + { + break; + } + } + } + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " unable to open record device: %s", + LATE(snd_strerror)(errVal)); + _handleRecord = NULL; + return -1; + } + + // Allocate hardware paramterers + errVal = LATE(snd_pcm_hw_params_malloc)(¶msRecord); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params malloc, error: %s", + LATE(snd_strerror)(errVal)); + if (_handleRecord) + { + errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing recording sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + errVal = LATE(snd_pcm_hw_params_any)(_handleRecord, paramsRecord); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params any, error: %s", + LATE(snd_strerror)(errVal)); + if (_handleRecord) + { + errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing recording sound device, error:" + " %s", LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + // Set stereo sample order + errVal = LATE(snd_pcm_hw_params_set_access) + (_handleRecord, + paramsRecord, + SND_PCM_ACCESS_RW_INTERLEAVED); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " harware params set access, error: %s", + LATE(snd_strerror)(errVal)); + if (_handleRecord) + { + errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing recording sound device, error:" + " %s", LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + // Set sample format +#if defined(WEBRTC_BIG_ENDIAN) + errVal = LATE(snd_pcm_hw_params_set_format) + (_handleRecord, + paramsRecord, + SND_PCM_FORMAT_S16_BE); +#else + errVal = LATE(snd_pcm_hw_params_set_format) + (_handleRecord, + paramsRecord, + SND_PCM_FORMAT_S16_LE); +#endif + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " harware params set format, error: %s", + LATE(snd_strerror)(errVal)); + if (_handleRecord) + { + errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing recording sound device," + " error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + // Set stereo/mono + errVal = LATE(snd_pcm_hw_params_set_channels)( + _handleRecord, paramsRecord, _recChannels); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " harware params set channels (%d), error: %s", + _recChannels, + LATE(snd_strerror)(errVal)); + if (_handleRecord) + { + errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing recording sound device, " + "error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + // Set sampling rate to use + _samplingFreqRec = REC_SAMPLES_PER_MS; + WebRtc_UWord32 samplingRate = _samplingFreqRec*1000; + + // Set sample rate + unsigned int exactRate = samplingRate; + errVal = LATE(snd_pcm_hw_params_set_rate_near) + (_handleRecord, + paramsRecord, + &exactRate, + 0); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params set rate near(%d), error: %s", + samplingRate, + LATE(snd_strerror)(errVal)); + if (_handleRecord) + { + errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing recording sound device," + " error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + if (exactRate != samplingRate) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Sound device does not support sample rate %d Hz, %d Hz" + " used instead.", + samplingRate, + exactRate); + + // We use this rate instead + _samplingFreqRec = (WebRtc_UWord32)(exactRate / 1000); + } + + // Set buffer size, in frames. + numFrames = ALSA_SNDCARD_BUFF_SIZE_REC; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " set record, numFrames: %d, buffer size: %d", + numFrames, + _recSndcardBuffsize); + + errVal = LATE(snd_pcm_hw_params_set_buffer_size_near) + (_handleRecord, + paramsRecord, + &_recSndcardBuffsize); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params set buffer size near(%d), error: %s", + (int) numFrames, + LATE(snd_strerror)(errVal)); + + if (_handleRecord) + { + errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing recording sound device, " + "error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + if (numFrames != _recSndcardBuffsize) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Allocated record buffersize: %d frames", + (int)_recSndcardBuffsize); + } + + // Write settings to the devices + errVal = LATE(snd_pcm_hw_params)(_handleRecord, paramsRecord); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " hardware params, error: %s", + LATE(snd_strerror)(errVal)); + if (_handleRecord) + { + errVal = LATE(snd_pcm_close)(_handleRecord); + _handleRecord = NULL; + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing recording sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + } + return -1; + } + + // Free prameter struct memory + LATE(snd_pcm_hw_params_free)(paramsRecord); + paramsRecord = NULL; + + if (_ptrAudioBuffer) + { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreqRec*1000); + _ptrAudioBuffer->SetRecordingChannels((WebRtc_UWord8)_recChannels); + } + + // Set rec buffer size and create buffer + _recordBufferSize = _samplingFreqRec * 10 * _recChannels * 2; + _recBuffer = new WebRtc_Word16[_recordBufferSize / 2]; + + // Init rec varaibles + _bufferCheckMethodRec = 0; + _bufferCheckErrorsRec = 0; + + if (_handleRecord != NULL) + { + // Mark recording side as initialized + _recIsInitialized = true; + return 0; + } + else + { + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::StartRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (!_recIsInitialized) + { + return -1; + } + + if (_recording) + { + return 0; + } + + // prepare and start the recording + int errVal=0; + errVal = LATE(snd_pcm_prepare)(_handleRecord); + if (errVal<0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " cannot prepare audio record interface for use (%s)\n", + LATE(snd_strerror)(errVal)); + return -1; + } + + errVal = LATE(snd_pcm_start)(_handleRecord); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error starting record interface: %s", + LATE(snd_strerror)(errVal)); + return -1; + } + +/* + // DEBUG: Write info about PCM + snd_output_t *output = NULL; + errVal = LATE(snd_output_stdio_attach)(&output, stdout, 0); + if (errVal < 0) { + printf("Output failed: %s\n", snd_strerror(errVal)); + return 0; + } + LATE(snd_pcm_dump)(_handleRecord, output); +*/ + + // set state to ensure that the recording starts from the audio thread + _startRec = true; + + // the audio thread will signal when recording has stopped + if (kEventTimeout == _recStartEvent.Wait(10000)) + { + _startRec = false; + StopRecording(); + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to activate recording"); + return -1; + } + + if (_recording) + { + // the recording state is set by the audio thread after recording has + // started + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + " recording is now active"); + } + else + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to activate recording"); + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::StopRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!_recIsInitialized) + { + return 0; + } + + if (_handleRecord == NULL) + { + return -1; + } + + // make sure we don't start recording (it's asynchronous), assuming that + // we are under lock + _startRec = false; + + // stop and close pcm recording device + int errVal = LATE(snd_pcm_drop)(_handleRecord); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error stop recording: %s", + LATE(snd_strerror)(errVal)); + } + + errVal = LATE(snd_pcm_close)(_handleRecord); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing record sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + + // check if we have muted and unmute if so + bool muteEnabled = false; + MicrophoneMute(muteEnabled); + if (muteEnabled) + { + SetMicrophoneMute(false); + } + + _recIsInitialized = false; + _recording = false; + + // set the pcm input handle to NULL + _handleRecord = NULL; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " _handleRecord is now set to NULL"); + + // delete the rec buffer + if (_recBuffer) + { + delete _recBuffer; + _recBuffer = NULL; + } + + return 0; +} + +bool AudioDeviceLinuxALSA::RecordingIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_recIsInitialized); +} + +bool AudioDeviceLinuxALSA::Recording() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_recording); +} + +bool AudioDeviceLinuxALSA::PlayoutIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_playIsInitialized); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::StartPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + if (!_playIsInitialized) + { + return -1; + } + + if (_playing) + { + return 0; + } + // prepare playout + int errVal=0; + errVal = LATE(snd_pcm_prepare)(_handlePlayout); + if (errVal<0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " cannot prepare audio playout interface for use: %s", + LATE(snd_strerror)(errVal)); + return -1; + } + + // Don't call snd_pcm_start here, it will start implicitly at first write. +/* + // DEBUG: Write info about PCM + snd_output_t *output = NULL; + errVal = LATE(snd_output_stdio_attach)(&output, stdout, 0); + if (errVal < 0) { + printf("Output failed: %s\n", snd_strerror(errVal)); + return 0; + } + LATE(snd_pcm_dump)(_handlePlayout, output); +*/ + + // set state to ensure that playout starts from the audio thread + _startPlay = true; + + // the audio thread will signal when recording has started + if (kEventTimeout == _playStartEvent.Wait(10000)) + { + _startPlay = false; + StopPlayout(); + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to activate playout"); + return -1; + } + + if (_playing) + { + // the playing state is set by the audio thread after playout has started + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + " playing is now active"); + } + else + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to activate playing"); + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::StopPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!_playIsInitialized) + { + return 0; + } + + if (_handlePlayout == NULL) + { + return -1; + } + + _playIsInitialized = false; + _playing = false; + + // stop and close pcm playout device + int errVal = LATE(snd_pcm_drop)(_handlePlayout); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error stop playing: %s", + LATE(snd_strerror)(errVal)); + } + + errVal = LATE(snd_pcm_close)(_handlePlayout); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error closing playout sound device, error: %s", + LATE(snd_strerror)(errVal)); + } + + // set the pcm input handle to NULL + _handlePlayout = NULL; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " _handlePlayout is now set to NULL"); + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::PlayoutDelay(WebRtc_UWord16& delayMS) const +{ + delayMS = (WebRtc_UWord16)_sndCardPlayDelay; + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::RecordingDelay(WebRtc_UWord16& delayMS) const +{ + delayMS = (WebRtc_UWord16)_sndCardRecDelay; + return 0; +} + +bool AudioDeviceLinuxALSA::Playing() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_playing); +} +// ---------------------------------------------------------------------------- +// SetPlayoutBuffer +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceLinuxALSA::SetPlayoutBuffer( + const AudioDeviceModule::BufferType type, + WebRtc_UWord16 sizeMS) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetPlayoutBuffer(type=%u, sizeMS=%u)", type, sizeMS); + _playBufType = type; + if (type == AudioDeviceModule::kFixedBufferSize) + { + _playBufDelayFixed = sizeMS; + } + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::PlayoutBuffer( + AudioDeviceModule::BufferType& type, + WebRtc_UWord16& sizeMS) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + type = _playBufType; + if (type == AudioDeviceModule::kFixedBufferSize) + { + sizeMS = _playBufDelayFixed; + } + else + { + sizeMS = _playBufDelay; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::CPULoad(WebRtc_UWord16& load) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " API call not supported on this platform"); + return -1; +} + +bool AudioDeviceLinuxALSA::PlayoutWarning() const +{ + return (_playWarning > 0); +} + +bool AudioDeviceLinuxALSA::PlayoutError() const +{ + return (_playError > 0); +} + +bool AudioDeviceLinuxALSA::RecordingWarning() const +{ + return (_recWarning > 0); +} + +bool AudioDeviceLinuxALSA::RecordingError() const +{ + return (_recError > 0); +} + +void AudioDeviceLinuxALSA::ClearPlayoutWarning() +{ + _playWarning = 0; +} + +void AudioDeviceLinuxALSA::ClearPlayoutError() +{ + _playError = 0; +} + +void AudioDeviceLinuxALSA::ClearRecordingWarning() +{ + _recWarning = 0; +} + +void AudioDeviceLinuxALSA::ClearRecordingError() +{ + _recError = 0; +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +WebRtc_Word32 AudioDeviceLinuxALSA::GetDevicesInfo( + const WebRtc_Word32 function, + const bool playback, + const WebRtc_Word32 enumDeviceNo, + char* enumDeviceName, + const WebRtc_Word32 ednLen) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + // Device enumeration based on libjingle implementation + // by Tristan Schmelcher at Google Inc. + + const char *type = playback ? "Output" : "Input"; + // dmix and dsnoop are only for playback and capture, respectively, but ALSA + // stupidly includes them in both lists. + const char *ignorePrefix = playback ? "dsnoop:" : "dmix:" ; + // (ALSA lists many more "devices" of questionable interest, but we show them + // just in case the weird devices may actually be desirable for some + // users/systems.) + + int err; + int enumCount(0); + bool keepSearching(true); + + void **hints; + err = LATE(snd_device_name_hint)(-1, // All cards + "pcm", // Only PCM devices + &hints); + if (err != 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "GetDevicesInfo - device name hint error: %s", + LATE(snd_strerror)(err)); + return -1; + } + + for (void **list = hints; *list != NULL; ++list) + { + char *actualType = LATE(snd_device_name_get_hint)(*list, "IOID"); + if (actualType) + { // NULL means it's both. + bool wrongType = (strcmp(actualType, type) != 0); + free(actualType); + if (wrongType) + { + // Wrong type of device (i.e., input vs. output). + continue; + } + } + + char *name = LATE(snd_device_name_get_hint)(*list, "NAME"); + if (!name) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Device has no name"); + // Skip it. + continue; + } + + // Now check if we actually want to show this device. + if (strcmp(name, "default") != 0 && + strcmp(name, "null") != 0 && + strcmp(name, "pulse") != 0 && + strncmp(name, ignorePrefix, strlen(ignorePrefix)) != 0) + { + // Yes, we do. + char *desc = LATE(snd_device_name_get_hint)(*list, "DESC"); + if (!desc) + { + // Virtual devices don't necessarily have descriptions. + // Use their names instead + desc = name; + } + + if (0 == function) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Enum device %d - %s", enumCount, name); + + } + if ((1 == function) && (enumDeviceNo == enumCount)) + { + + // We have found the enum device, copy the name to buffer + strncpy(enumDeviceName, desc, ednLen); + enumDeviceName[ednLen-1] = '\0'; + keepSearching = false; + } + if ((2 == function) && (enumDeviceNo == enumCount)) + { + // We have found the enum device, copy the name to buffer + strncpy(enumDeviceName, name, ednLen); + enumDeviceName[ednLen-1] = '\0'; + keepSearching = false; + } + if (keepSearching) + { + ++enumCount; + } + + if (desc != name) + { + free(desc); + } + } + + free(name); + + if (!keepSearching) + { + break; + } + } + + err = LATE(snd_device_name_free_hint)(hints); + if (err != 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "GetDevicesInfo - device name free hint error: %s", + LATE(snd_strerror)(err)); + // Continue and return true anyways, since we did get the whole list. + } + + if (0 == function) + { + return enumCount; // Normal return point for function 0 + } + + if (keepSearching) + { + // If we get here for function 1 and 2, we didn't find the specified + // enum device + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "GetDevicesInfo - Could not find device name or numbers"); + return -1; + } + + return 0; +} + +void AudioDeviceLinuxALSA::FillPlayoutBuffer() +{ + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + "Filling playout buffer"); + + WebRtc_Word32 sizeBytes = _playbackBufferSize; + WebRtc_Word32 blockFrames = sizeBytes / (2 * _playChannels); + WebRtc_Word16 sendoutOnCard[sizeBytes / 2]; + WebRtc_Word32 samplingFreq = _samplingFreqPlay * 1000; + + if (samplingFreq == 44000) + { + // Convert to sndcard samplerate + samplingFreq = 44100; + } + + memset(sendoutOnCard, 0, sizeBytes); + + int maxWrites = 3; + int avail = blockFrames+1; + if (0 == _bufferCheckMethodPlay) + { + // Normal case + maxWrites = (_playSndcardBuffsize / samplingFreq) / 10 + 3; + avail = LATE(snd_pcm_avail_update)(_handlePlayout); + } + + while ((avail >= blockFrames) && (maxWrites > 0)) + { + int written = LATE(snd_pcm_writei) + (_handlePlayout, + sendoutOnCard, + blockFrames); + + if (written != blockFrames) + { + if (written < 0) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Error writing to sound device (1), error: %s", + LATE(snd_strerror)(written)); + } + else + { + int remainingFrames = (blockFrames-written); + WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, + "Written %d playout frames to soundcard, trying to " + "write the remaining %d frames", + written, remainingFrames); + + written = LATE(snd_pcm_writei) + (_handlePlayout, + &sendoutOnCard[written*2], + remainingFrames); + + if( written == remainingFrames ) + { + WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, + _id, " %d frames were written", + written); + written = blockFrames; + } + else + { + WEBRTC_TRACE(kTraceWarning, + kTraceAudioDevice, _id, + " Error writing to sound device (2)," + " error: %s", + LATE(snd_strerror)(written)); + + // Try to recover + ErrorRecovery(written, _handlePlayout); + } + } + } + + --maxWrites; + if (0 == _bufferCheckMethodPlay) + { + avail = LATE(snd_pcm_avail_update)(_handlePlayout); + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " snd_pcm_avail_update returned %d", avail); + } + } + + // Write one extra so that we push the buffer full + LATE(snd_pcm_writei)(_handlePlayout, sendoutOnCard, blockFrames); + avail = LATE(snd_pcm_avail_update)(_handlePlayout); + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " snd_pcm_avail_update returned %d", avail); +} + +WebRtc_Word32 AudioDeviceLinuxALSA::InputSanityCheckAfterUnlockedPeriod() const +{ + if (_handleRecord == NULL) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " input state has been modified during unlocked period"); + return -1; + } + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::OutputSanityCheckAfterUnlockedPeriod() const +{ + if (_handlePlayout == NULL) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " output state has been modified during unlocked period"); + return -1; + } + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::PrepareStartRecording() +{ + WebRtc_Word32 res(0); + snd_pcm_sframes_t delayInFrames(0); + + // Check if mic is muted + bool muteEnabled = false; + MicrophoneMute(muteEnabled); + if (muteEnabled) + { + SetMicrophoneMute(false); + } + + // Check delay and available frames before reset + delayInFrames = -1; + res = LATE(snd_pcm_delay)(_handleRecord, &delayInFrames); + res = LATE(snd_pcm_avail_update)(_handleRecord); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Before reset: delayInFrames = %d, available frames = %d", + delayInFrames, res); + + // Reset pcm + res = LATE(snd_pcm_reset)(_handleRecord); + if (res < 0 ) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "Error resetting pcm: %s (%d)", + LATE(snd_strerror)(res), res); + } + + // Check delay and available frames after reset + delayInFrames = -1; + res = LATE(snd_pcm_delay)(_handleRecord, &delayInFrames); + res = LATE(snd_pcm_avail_update)(_handleRecord); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "After reset: delayInFrames = %d, available frames = %d " + "(rec buf size = %u)", + delayInFrames, res, _recSndcardBuffsize); + + if (res < 0) + { + res = 0; + } + + if (delayInFrames < 0) + { + delayInFrames = 0; + } + + // True if the driver gives the actual number of frames in the buffer (normal case). + // Cast is safe after check above. + _buffersizeFromZeroAvail = (unsigned int)res < (_recSndcardBuffsize/2); + _buffersizeFromZeroDelay = (unsigned int)delayInFrames < (_recSndcardBuffsize/2); + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::GetPlayoutBufferDelay() +{ + WebRtc_Word32 msPlay(0); + WebRtc_Word32 res(0); + WebRtc_UWord32 samplesPerMs = _samplingFreqPlay; + + snd_pcm_sframes_t delayInFrames(0); + + // Check how much is in playout buffer and check delay + if (0 == _bufferCheckMethodPlay) + { + // Using snd_pcm_avail_update for checking buffer is the method that + // shall be used according to documentation. If we however detect that + // returned available buffer is larger than the buffer size, we switch + // to using snd_pcm_delay. See -391. + + // Get delay - distance between current application frame position and + // sound frame position. + // This is only used for giving delay measurement to VE. + bool calcDelayFromAvail = false; + res = LATE(snd_pcm_delay)(_handlePlayout, &delayInFrames); + if (res < 0) + { + _writeErrors++; + if ( _writeErrors > 50 ) + { + if (_playError == 1) + { + WEBRTC_TRACE(kTraceWarning, + kTraceAudioDevice, _id, + " pending playout error exists"); + } + _playError = 1; // triggers callback from module process thread + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " kPlayoutError message posted: _writeErrors=%u", + _writeErrors); + } + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "LinuxALSASndCardStream::playThreadProcess(), " + "snd_pcm_delay error (1): %s (%d)", + LATE(snd_strerror)(res), res); + calcDelayFromAvail = true; + ErrorRecovery(res, _handlePlayout); + _delayMonitorStatePlay = 1; // Go to delay monitor state + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Going to delay monitor state"); + } + else + { + _writeErrors=0; + _sndCardPlayDelay = delayInFrames / samplesPerMs; + } + + // Check if we should write more data to the soundcard. Updates + // the r/w pointer. + int avail = LATE(snd_pcm_avail_update)(_handlePlayout); + if (avail < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "LinuxALSASndCardStream::playThreadProcess()," + " snd_pcm_avail_update error: %s (%d)", + LATE(snd_strerror)(avail), avail); + res = ErrorRecovery(avail, _handlePlayout); + if (avail == -EPIPE) + { + res = LATE(snd_pcm_prepare)(_handlePlayout); + if (res < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, + _id, "ErrorRecovery failed: %s", + LATE(snd_strerror)(res)); + } + FillPlayoutBuffer(); + msPlay = 0; + } + else + { + msPlay = 25; + } + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Guessed ms in playout buffer = %d", msPlay); + _delayMonitorStatePlay = 1; // Go to delay monitor state + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Going to delay monitor state"); + } + else + { + // Calculate filled part of playout buffer size in ms + // Safe since _playSndcardBuffsize is a small number + int pb = (int)_playSndcardBuffsize; + assert(pb >= 0); + // If avail_update returns a value larger than playout buffer and it + // doesn't keep decreasing we switch method of checking the buffer. + if ((avail > pb) && (avail >= _lastBufferCheckValuePlay)) + { + msPlay = 0; // Continue to write to buffer + ++_bufferCheckErrorsPlay; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " _bufferCheckErrorsPlay = %d", + _bufferCheckErrorsPlay); + if (_bufferCheckErrorsPlay > 50) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, + _id, + " Switching to delay buffer check method " + "for playout"); + _bufferCheckMethodPlay = 1; // Switch to using snd_pcm_delay + _bufferCheckErrorsPlay = 0; + } + } + else + { + msPlay = pb > avail ? (pb - avail) / samplesPerMs : 0; + _bufferCheckErrorsPlay = 0; + } + _lastBufferCheckValuePlay = avail; + } + + if (calcDelayFromAvail) + { + _sndCardPlayDelay = msPlay; + } + // Here we monitor the delay value if we had an error + if (0 == _delayMonitorStatePlay) + { + // Normal state, just store delay value + _previousSndCardPlayDelay = _sndCardPlayDelay; + } + else if (1 == _delayMonitorStatePlay) + { + // We had an error, check if we get stuck in a long delay in playout. + // If so, restart device completely. Workaround for PulseAudio. + if ((_sndCardPlayDelay > 200) && + ((_sndCardPlayDelay > _previousSndCardPlayDelay * 2) || + (_sndCardPlayDelay > _previousSndCardPlayDelay + 200))) + { + if (_largeDelayCountPlay < 0) _largeDelayCountPlay = 0; + ++_largeDelayCountPlay; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " _largeDelayCountPlay = %d", + _largeDelayCountPlay); + if (_largeDelayCountPlay > 50) + { + WEBRTC_TRACE(kTraceWarning, + kTraceAudioDevice, _id, + " Detected stuck in long delay after error " + "- restarting playout device"); + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, + _id, + " _previousSndCardPlayDelay = %d," + " _sndCardPlayDelay = %d", + _previousSndCardPlayDelay, _sndCardPlayDelay); + StopPlayout(); + InitPlayout(); + res = LATE(snd_pcm_prepare)(_handlePlayout); + if (res < 0) + { + WEBRTC_TRACE(kTraceError, + kTraceAudioDevice, _id, + " Cannot prepare audio playout " + "interface for use: %s (%d)", + LATE(snd_strerror)(res), res); + } + FillPlayoutBuffer(); + _startPlay = true; + _delayMonitorStatePlay = 0; + _largeDelayCountPlay = 0; + // Make sure we only restart the device once. We could have had + // an error due to e.g. changed sink route in PulseAudio which would correctly + // lead to a larger delay. In this case we shouldn't get stuck restarting. + _previousSndCardPlayDelay = _sndCardPlayDelay; + return -1; + } + } + else + { + // No error, keep count of OK tests + if (_largeDelayCountPlay > 0) _largeDelayCountPlay = 0; + --_largeDelayCountPlay; + if (_largeDelayCountPlay < -50) + { + // After a couple of OK monitor tests, go back to normal state + _delayMonitorStatePlay = 0; + _largeDelayCountPlay = 0; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, + _id, " Leaving delay monitor state"); + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, + _id, + " _previousSndCardPlayDelay = %d," + " _sndCardPlayDelay = %d", + _previousSndCardPlayDelay, _sndCardPlayDelay); + } + } + } + else + { + // Should never happen + assert(false); + } + } + else if (1 == _bufferCheckMethodPlay) + { + // Check if we should write more data to the soundcard + // alternative method to get the delay (snd_pcm_avail_update() seem to + // give unreliable vaules in some cases!, i.e. with dmix) <- TL + // distance between current application frame position and sound frame + // position + res = LATE(snd_pcm_delay)(_handlePlayout, &delayInFrames); + if (res < 0 || res > (int)_playSndcardBuffsize) + { + int recoveryRes = ErrorRecovery(res, _handlePlayout); + if (res == -EPIPE) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "LinuxALSASndCardStream::playThreadProcess(), " + "outbuffer underrun"); + if (recoveryRes < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "ErrorRecovery failed: %s", + LATE(snd_strerror)(res)); + } + msPlay = 0; + } + else + { + _writeErrors++; + if (_writeErrors > 50) + { + if (_playError == 1) + { + WEBRTC_TRACE(kTraceWarning, + kTraceAudioDevice, _id, + " pending playout error exists"); + } + _playError = 1; // triggers callback from module process thread + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, + _id, + " kPlayoutError message posted:" + " _writeErrors=%u", _writeErrors); + } + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, + _id, + "LinuxALSASndCardStream::playThreadProcess()," + " snd_pcm_delay error (2): %s (%d)", + LATE(snd_strerror)(res), res); + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Playout buffer size=%d", _playSndcardBuffsize); + msPlay = 25; + WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, + " Guessed ms in playout buffer = %d", msPlay); + } + } + else + { + _writeErrors = 0; + msPlay = delayInFrames / samplesPerMs; // playout buffer delay in ms + _sndCardPlayDelay = msPlay; + } + } + else + { + // Unknown _bufferCheckMethodPlay value, should never happen + assert(false); + } + + /* + delayInFrames = -1; + snd_pcm_delay(_handlePlayout, &delayInFrames); + // DEBUG END +*/ + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "msplay = %d", msPlay); + return msPlay; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::GetRecordingBufferDelay(bool preRead) +{ + WebRtc_Word32 msRec(0); + WebRtc_Word32 res(0); + WebRtc_UWord32 samplesPerMs = _samplingFreqRec; + + snd_pcm_sframes_t delayInFrames(0); + + if ((0 == _bufferCheckMethodRec) || (1 == _bufferCheckMethodRec)) + { + // Get delay, only used for input to VE + bool calcDelayFromAvail = false; + res = LATE(snd_pcm_delay)(_handleRecord, &delayInFrames); + if (res < 0) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "LinuxALSASndCardStream::recThreadfun()," + " snd_pcm_delay (3) error: %s (%d)", + LATE(snd_strerror)(res), res); + ErrorRecovery(res, _handleRecord); + calcDelayFromAvail = true; // Must get estimate below instead + } + else if (0 == _bufferCheckMethodRec) + { + if (_buffersizeFromZeroDelay) + { + // Normal case + _sndCardRecDelay = delayInFrames / samplesPerMs; + } + else + { + // Safe since _recSndcardBuffsize is a small number + int rb = (int)_recSndcardBuffsize; + assert(rb >= 0); + _sndCardRecDelay = (rb >= delayInFrames ? + rb - delayInFrames : rb) / samplesPerMs; + } + } + // if method == 1 we calculate delay below to keep algorithm same as + // when we didn't have method 0. + + // Check if we have data in rec buffer. Updates the r/w pointer. + int avail = -1; + if (0 == _bufferCheckMethodRec) + { + avail = res = LATE(snd_pcm_avail_update)(_handleRecord); + } + if (res >= 0) + { + // We must check that state == RUNNING, otherwise we might have a + // false buffer value. + // Normal case + if (LATE(snd_pcm_state)(_handleRecord) == SND_PCM_STATE_RUNNING) + { + if (0 == _bufferCheckMethodRec) + { // Safe since _recSndcardBuffsize is a small number + int rb = (int)_recSndcardBuffsize; + if (_buffersizeFromZeroAvail) + { + // Normal case + msRec = avail / samplesPerMs; + } + else + { + assert(rb >= 0); + msRec = (rb >= avail ? rb - avail : rb) / samplesPerMs; + } + + if (calcDelayFromAvail) + { + _sndCardRecDelay = msRec; + } + + if ((msRec == 0) || (avail > rb)) + { + ++_bufferCheckErrorsRec; + WEBRTC_TRACE(kTraceInfo, + kTraceAudioDevice, _id, + " _bufferCheckErrorsRec: %d (avail=%d)", + _bufferCheckErrorsRec, avail); + if (_bufferCheckErrorsRec >= THR_OLD_BUFFER_CHECK_METHOD) + { + WEBRTC_TRACE(kTraceInfo, + kTraceAudioDevice, _id, + " Switching to delay buffer check" + " method for recording"); + _bufferCheckMethodRec = 1; + _bufferCheckErrorsRec = 0; + } + } + else + { + _bufferCheckErrorsRec = 0; + } + } + else // 1 == _bufferCheckMethodRec + { + if (_buffersizeFromZeroDelay) + { + msRec = delayInFrames / samplesPerMs; + } + else + { + msRec = + (_recSndcardBuffsize - delayInFrames) / samplesPerMs; + } + _sndCardRecDelay = msRec; + + if (msRec == 0) + { + ++_bufferCheckErrorsRec; + WEBRTC_TRACE(kTraceInfo, + kTraceAudioDevice, _id, + " _bufferCheckErrorsRec: %d", + _bufferCheckErrorsRec); + if (_bufferCheckErrorsRec >= THR_IGNORE_BUFFER_CHECK) + { + // The delay has been zero too many times, ignore + // the delay value! + WEBRTC_TRACE(kTraceInfo, + kTraceAudioDevice, _id, + " Switching to Ignore Delay Mode"); + _bufferCheckMethodRec = 2; + _bufferCheckErrorsRec = 0; + } + } + } + } + else if (LATE(snd_pcm_state)(_handleRecord) == SND_PCM_STATE_XRUN) + { + // We've probably had a buffer overrun + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Record buffer overrun, trying to recover"); + // Handle pipe error (overrun) + res = ErrorRecovery(-EPIPE, _handleRecord); + if (res < 0) + { + // We were not able to recover from the error. + // CRITICAL? + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, + _id, + "Can't recover from buffer overrun, " + "error: %s (%d)", + LATE(snd_strerror)(res), res); + return -1; + } + msRec = _recSndcardBuffsize / samplesPerMs; + } + } + else + { + // Something went wrong asking for the delay / buffer. Try to + // recover and make a guess. + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "LinuxALSASndCardStream::recThreadfun(), " + "snd_pcm_avail_update: %s (%d)", + LATE(snd_strerror)(res), res); + res = ErrorRecovery(avail, _handleRecord); + if (preRead) + { + if (res == 1) + { + // Recovered from buffer overrun, continue and read data. + msRec = _recSndcardBuffsize / samplesPerMs; + } + else + { + return -1; + } + } + else // We have a previous msRec value and have read maximum 10 ms since then. + { + if (res < 0) + { + return -1; + } + + msRec = _sndCardRecDelay - 10; + + if (calcDelayFromAvail) + { + _sndCardRecDelay = msRec; + } + } + } + } + else if (2 == _bufferCheckMethodRec) + { + // We've stopped asking for the number of samples on soundcard. + msRec = 0; + } + else + { + // Should never happen + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + "Unknown buffer check method (%d)", _bufferCheckMethodRec); + assert(false); + } + + return msRec; +} + +WebRtc_Word32 AudioDeviceLinuxALSA::ErrorRecovery(WebRtc_Word32 error, + snd_pcm_t* deviceHandle) +{ + int st = LATE(snd_pcm_state)(deviceHandle); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Trying to recover from error: %s (%d) (state %d)", + LATE(snd_strerror)(error), error, st); + + // It is recommended to use snd_pcm_recover for all errors. If that function + // cannot handle the error, the input error code will be returned, otherwise + // 0 is returned. From snd_pcm_recover API doc: "This functions handles + // -EINTR (interrupted system call),-EPIPE (overrun or underrun) and + // -ESTRPIPE (stream is suspended) error codes trying to prepare given + // stream for next I/O." + + // snd_pcm_recover isn't available in older alsa, e.g. on the FC4 machine + // in Sthlm lab. + + int res = LATE(snd_pcm_recover)(deviceHandle, error, 1); + if (0 == res) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Recovery - snd_pcm_recover OK"); + + if (error == -EPIPE && // Buffer underrun/overrun. + LATE(snd_pcm_stream)(deviceHandle) == SND_PCM_STREAM_CAPTURE) + { + // For capture streams we also have to repeat the explicit start() + // to get data flowing again. + int err = LATE(snd_pcm_start)(deviceHandle); + if (err != 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Recovery - snd_pcm_start error: %u", err); + return -1; + } + } + + return -EPIPE == error ? 1 : 0; + } + + return res; +} + +// ============================================================================ +// Thread Methods +// ============================================================================ + +bool AudioDeviceLinuxALSA::PlayThreadFunc(void* pThis) +{ + return (static_cast(pThis)->PlayThreadProcess()); +} + +bool AudioDeviceLinuxALSA::RecThreadFunc(void* pThis) +{ + return (static_cast(pThis)->RecThreadProcess()); +} + +bool AudioDeviceLinuxALSA::PlayThreadProcess() +{ + WebRtc_Word32 written(0); + WebRtc_Word32 msPlay(0); + + // Number of (stereo) samples + WebRtc_Word32 numPlaySamples = _playbackBufferSize / (2 * _playChannels); + WebRtc_Word8 playBuffer[_playbackBufferSize]; + + switch (_timeEventPlay.Wait(1000)) + { + case kEventSignaled: + break; + case kEventError: + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "EventWrapper::Wait() failed => restarting timer"); + _timeEventPlay.StopTimer(); + _timeEventPlay.StartTimer(true, PLAY_TIMER_PERIOD_MS); + return true; + case kEventTimeout: + return true; + } + + Lock(); + + if (_startPlay) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "_startPlay true, performing initial actions"); + + _startPlay = false; + + // Fill playout buffer with zeroes + FillPlayoutBuffer(); + + _bufferCheckErrorsPlay = 0; + _playing = true; + _playStartEvent.Set(); + } + + if(_playing) + { + // get number of ms of sound that remains in the sound card buffer for + // playback + msPlay = GetPlayoutBufferDelay(); + if (msPlay == -1) + { + UnLock(); + return true; + } + + // write more data if below threshold + if (msPlay < PLAYBACK_THRESHOLD) + { + // ask for new PCM data to be played out using the AudioDeviceBuffer + // ensure that this callback is executed without taking the + // audio-thread lock + // + UnLock(); + WebRtc_Word32 nSamples = + (WebRtc_Word32)_ptrAudioBuffer->RequestPlayoutData(numPlaySamples); + Lock(); + + if (OutputSanityCheckAfterUnlockedPeriod() == -1) + { + UnLock(); + return true; + } + + nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); + if (nSamples != numPlaySamples) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " invalid number of output samples(%d)", nSamples); + } + + written = LATE(snd_pcm_writei)(_handlePlayout, playBuffer, numPlaySamples); + if (written != numPlaySamples) + { + if (written < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, + _id, + "Error writing to sound device (7), error: %d/%s", + written, + LATE(snd_strerror)(written)); + + // Try to recover + ErrorRecovery(written, _handlePlayout); + _delayMonitorStatePlay = 1; // Go to delay monitor state + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, + _id, + " Going to delay monitor state"); + } + else + { + _writeErrors = 0; + int remainingFrames = (numPlaySamples - written); + written = LATE(snd_pcm_writei) + (_handlePlayout, + &playBuffer[written*2], + remainingFrames); + if( written == remainingFrames ) + { + written = numPlaySamples; + } + else + { + if (written < 0) + { + WEBRTC_TRACE(kTraceError, + kTraceAudioDevice, _id, + "Error writing to sound device (8), " + "error: %d/%s, numPlaySamples=%d, " + "remainingFrames=%d", + written, LATE(snd_strerror)(written), + numPlaySamples, remainingFrames); + + // Try to recover + ErrorRecovery(written, _handlePlayout); + } + else + { + WEBRTC_TRACE(kTraceWarning, + kTraceAudioDevice, _id, + "Could not write all playout data (1)," + " numPlaySamples=%d, remainingFrames=%d," + " written=%d", + numPlaySamples, remainingFrames, written); + } + } + } + } + else + { + _writeErrors = 0; + } + + // Write more data if we are more than 10 ms under the threshold. + if (msPlay < PLAYBACK_THRESHOLD - 10) + { + // ask for new PCM data to be played out using the + // AudioDeviceBuffer ensure that this callback is executed + // without taking the audio-thread lock + // + UnLock(); + WebRtc_Word32 nSamples = (WebRtc_Word32) + _ptrAudioBuffer->RequestPlayoutData(numPlaySamples); + Lock(); + + if (OutputSanityCheckAfterUnlockedPeriod() == -1) + { + UnLock(); + return true; + } + + nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); + if (nSamples != numPlaySamples) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, + _id, " invalid number of output samples(%d)", + nSamples); + } + + written = LATE(snd_pcm_writei)( + _handlePlayout, playBuffer, numPlaySamples); + if (written != numPlaySamples) + { + if (written < 0) + { + WEBRTC_TRACE(kTraceError, + kTraceAudioDevice, _id, + "Error writing to sound device (9), " + "error: %s", LATE(snd_strerror)(written)); + + // Try to recover + ErrorRecovery(written, _handlePlayout); + _delayMonitorStatePlay = 1; // Go to delay monitor state + WEBRTC_TRACE(kTraceInfo, + kTraceAudioDevice, _id, + " Going to delay monitor state"); + } + else + { + int remainingFrames = (numPlaySamples - written); + written = LATE(snd_pcm_writei) + (_handlePlayout, + &playBuffer[written*2], + remainingFrames); + if (written == remainingFrames) + { + written = numPlaySamples; + } + else + { + if (written < 0) + { + WEBRTC_TRACE(kTraceError, + kTraceAudioDevice, _id, + "Error writing to sound device (10)," + " error: %d/%s, numPlaySamples=%d," + " remainingFrames=%d", + written, LATE(snd_strerror)(written), + numPlaySamples, remainingFrames); + + // Try to recover + ErrorRecovery(written, _handlePlayout); + } + else + { + WEBRTC_TRACE(kTraceWarning, + kTraceAudioDevice, _id, + "Could not write all playout data" + " (2), numPlaySamples=%d, " + "remainingFrames=%d, written=%d", + numPlaySamples, remainingFrames, + written); + } + } + } + + } + } // msPlay < PLAYBACK_THRESHOLD - 10 + + } // msPlay < PLAYBACK_THRESHOLD + + } // _playing + + UnLock(); + return true; +} + +bool AudioDeviceLinuxALSA::RecThreadProcess() +{ + WebRtc_Word32 msRec(0); + WebRtc_Word32 framesInRecData(0); + + // Number of (stereo) samples to record + WebRtc_Word32 recBufSizeInSamples = _recordBufferSize / (2 * _recChannels); + WebRtc_Word16 tmpBuffer[_recordBufferSize / 2]; + WebRtc_UWord32 samplesPerMs = _samplingFreqRec; + + switch (_timeEventRec.Wait(1000)) + { + case kEventSignaled: + break; + case kEventError: + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "EventWrapper::Wait() failed => restarting timer"); + _timeEventRec.StopTimer(); + _timeEventRec.StartTimer(true, REC_TIMER_PERIOD_MS); + return true; + case kEventTimeout: + return true; + } + + Lock(); + + if (_startRec) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "_startRec true, performing initial actions"); + + if (PrepareStartRecording() == 0) + { + _bufferCheckErrorsRec = 0; + _startRec = false; + _recording = true; + _recStartEvent.Set(); + } + } + + if (_recording) + { + // get number of ms of sound that remains in the sound card buffer for + // playback + msRec = GetRecordingBufferDelay(true); + if (msRec == -1) + { + UnLock(); + return true; + } + + // read data if a whole frame has been captured + // or if we are in ignore delay mode (check method 2) + if ((msRec > 10) || (2 == _bufferCheckMethodRec)) + { + // Read 10 ms of data from soundcard + framesInRecData = LATE(snd_pcm_readi) + (_handleRecord, + tmpBuffer, + recBufSizeInSamples); + + if (framesInRecData < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "pcm read error (1)"); + ErrorRecovery(framesInRecData, _handleRecord); + UnLock(); + return true; + } + else if (framesInRecData + (WebRtc_Word32)_numReadyRecSamples < + recBufSizeInSamples) + { + for (int idx = 0; idx < framesInRecData*_recChannels; idx++) + { + _recBuffer[_numReadyRecSamples*_recChannels + idx] = + tmpBuffer[idx]; + } + _numReadyRecSamples += framesInRecData; + + framesInRecData = LATE(snd_pcm_readi) + (_handleRecord, + tmpBuffer, + recBufSizeInSamples - _numReadyRecSamples); + + if (framesInRecData < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, + _id, "pcm read error (2)"); + ErrorRecovery(framesInRecData, _handleRecord); + UnLock(); + return true; + } + else if (framesInRecData + (WebRtc_Word32)_numReadyRecSamples == + recBufSizeInSamples) + { + // We got all the data we need, go on as normal. + } + else + { + // We still don't have enough data, copy what we have and leave. + for (int idx = 0; idx < framesInRecData*_recChannels; idx++) + { + _recBuffer[_numReadyRecSamples*_recChannels + idx] = + tmpBuffer[idx]; + } + _numReadyRecSamples += framesInRecData; + WEBRTC_TRACE(kTraceStream, + kTraceAudioDevice, _id, + " %d samples copied. Not enough, return and" + " wait for more.", + framesInRecData); + UnLock(); + return true; + } + } + + // get recording buffer delay after reading + // to have a value to use for the AEC + msRec = GetRecordingBufferDelay(false); + if (msRec == -1) + { + UnLock(); + return true; + } + + // calculate the number of samples to copy + // to have a full buffer + int copySamples = 0; + if ((WebRtc_Word32)_numReadyRecSamples + framesInRecData >= + recBufSizeInSamples) + { + copySamples = recBufSizeInSamples - _numReadyRecSamples; + } + else + { + copySamples = framesInRecData; + } + + // fill up buffer + for (int idx = 0; idx < copySamples*_recChannels; idx++) + { + _recBuffer[_numReadyRecSamples*_recChannels + idx] = + tmpBuffer[idx]; + } + + _numReadyRecSamples += copySamples; + framesInRecData -= copySamples; + + // Send data, if we have 10ms data... + if ((WebRtc_Word32)_numReadyRecSamples == recBufSizeInSamples) + { + WebRtc_UWord32 currentMicLevel(0); + WebRtc_UWord32 newMicLevel(0); + WebRtc_Word32 msRecDelay = 0 == _bufferCheckMethodRec ? + _sndCardRecDelay : msRec; + WebRtc_Word32 msReady = _numReadyRecSamples / samplesPerMs; + WebRtc_Word32 msStored = framesInRecData / samplesPerMs; + WebRtc_Word32 blockSize = recBufSizeInSamples / samplesPerMs; + + // TODO(xians): The blockSize - 25 term brings the delay measurement + // into line with the Windows interpretation. Investigate if this + // works properly with different block sizes. + // TODO(xians): Should only the rec delay from snd_pcm_delay be taken + // into account? See ALSA API doc. + // Probably we want to add the remaining data in the buffer as + // well or is that already in any of the variables? + WebRtc_Word32 msTotalRecDelay = msRecDelay + msReady + + msStored + blockSize - 25; + if (msTotalRecDelay < 0) + { + msTotalRecDelay = 0; + } + // store the recorded buffer (no action will be taken if the + // #recorded samples is not a full buffer) + _ptrAudioBuffer->SetRecordedBuffer( + (WebRtc_Word8 *)&_recBuffer[0], _numReadyRecSamples); + + if (AGC()) + { + // store current mic level in the audio buffer if AGC is enabled + if (MicrophoneVolume(currentMicLevel) == 0) + { + if (currentMicLevel == 0xffffffff) + { + currentMicLevel = 100; + } + // this call does not affect the actual microphone volume + _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel); + } + } + + // store vqe delay values + _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, + msTotalRecDelay, + 0); + + // deliver recorded samples at specified sample rate, mic level + // etc. to the observer using callback + UnLock(); + _ptrAudioBuffer->DeliverRecordedData(); + Lock(); + + if (InputSanityCheckAfterUnlockedPeriod() == -1) + { + UnLock(); + return true; + } + + if (AGC()) + { + newMicLevel = _ptrAudioBuffer->NewMicLevel(); + if (newMicLevel != 0) + { + // The VQE will only deliver non-zero microphone levels + //when a change is needed. + // Set this new mic level (received from the observer + // as return value in the callback). + WEBRTC_TRACE(kTraceStream, + kTraceAudioDevice, _id, + " AGC change of volume: old=%u => new=%u", + currentMicLevel, newMicLevel); + if (SetMicrophoneVolume(newMicLevel) == -1) + { + WEBRTC_TRACE(kTraceWarning, + kTraceAudioDevice, _id, + " the required modification of the" + " microphone volume failed"); + } + } + } + + _numReadyRecSamples = 0; + + // if there are remaining samples in tmpBuffer + // copy those to _recBuffer + if (framesInRecData > 0) + { + WEBRTC_TRACE(kTraceStream, + kTraceAudioDevice, _id, + " Got rest samples, copy %d samples to rec" + " buffer", framesInRecData); + for (int idx = 0; idx < framesInRecData; idx++) + { + _recBuffer[idx] = tmpBuffer[copySamples+idx]; + } + + _numReadyRecSamples = framesInRecData; + } + + } // if (_numReadyRecSamples == recBufSizeInSamples) + + } // (msRec > 10) || (2 == _bufferCheckMethodRec) + + } // _recording + + UnLock(); + return true; +} + +} diff --git a/src/modules/audio_device/main/source/linux/audio_device_alsa_linux.h b/src/modules/audio_device/main/source/linux/audio_device_alsa_linux.h new file mode 100644 index 000000000..0c14f8bef --- /dev/null +++ b/src/modules/audio_device/main/source/linux/audio_device_alsa_linux.h @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H + +#include "audio_device_generic.h" +#include "critical_section_wrapper.h" +#include "audio_mixer_manager_alsa_linux.h" + +#include +#include + +#include + +namespace webrtc +{ +class EventWrapper; +class ThreadWrapper; + +// Number of continuous buffer check errors before going 0->1 +const WebRtc_UWord16 THR_OLD_BUFFER_CHECK_METHOD = 30; +// Number of buffer check errors before going 1->2 +const WebRtc_UWord16 THR_IGNORE_BUFFER_CHECK = 30; +// 2.7 seconds (decimal 131071) +const WebRtc_UWord32 ALSA_SNDCARD_BUFF_SIZE_REC = 0x1ffff; +// ~170 ms (decimal 8191) - enough since we only write to buffer if it contains +// less than 50 ms +const WebRtc_UWord32 ALSA_SNDCARD_BUFF_SIZE_PLAY = 0x1fff; + +const WebRtc_UWord32 REC_TIMER_PERIOD_MS = 2; +const WebRtc_UWord32 PLAY_TIMER_PERIOD_MS = 5; +const WebRtc_UWord16 PLAYBACK_THRESHOLD = 50; + +const WebRtc_UWord32 REC_SAMPLES_PER_MS = 48; +const WebRtc_UWord32 PLAY_SAMPLES_PER_MS = 48; + +class AudioDeviceLinuxALSA : public AudioDeviceGeneric +{ +public: + AudioDeviceLinuxALSA(const WebRtc_Word32 id); + ~AudioDeviceLinuxALSA(); + + // Retrieve the currently utilized audio layer + virtual WebRtc_Word32 ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const; + + // Main initializaton and termination + virtual WebRtc_Word32 Init(); + virtual WebRtc_Word32 Terminate(); + virtual bool Initialized() const; + + // Device enumeration + virtual WebRtc_Word16 PlayoutDevices(); + virtual WebRtc_Word16 RecordingDevices(); + virtual WebRtc_Word32 PlayoutDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]); + virtual WebRtc_Word32 RecordingDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]); + + // Device selection + virtual WebRtc_Word32 SetPlayoutDevice(WebRtc_UWord16 index); + virtual WebRtc_Word32 SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device); + virtual WebRtc_Word32 SetRecordingDevice(WebRtc_UWord16 index); + virtual WebRtc_Word32 SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device); + + // Audio transport initialization + virtual WebRtc_Word32 PlayoutIsAvailable(bool& available); + virtual WebRtc_Word32 InitPlayout(); + virtual bool PlayoutIsInitialized() const; + virtual WebRtc_Word32 RecordingIsAvailable(bool& available); + virtual WebRtc_Word32 InitRecording(); + virtual bool RecordingIsInitialized() const; + + // Audio transport control + virtual WebRtc_Word32 StartPlayout(); + virtual WebRtc_Word32 StopPlayout(); + virtual bool Playing() const; + virtual WebRtc_Word32 StartRecording(); + virtual WebRtc_Word32 StopRecording(); + virtual bool Recording() const; + + // Microphone Automatic Gain Control (AGC) + virtual WebRtc_Word32 SetAGC(bool enable); + virtual bool AGC() const; + + // Volume control based on the Windows Wave API (Windows only) + virtual WebRtc_Word32 SetWaveOutVolume(WebRtc_UWord16 volumeLeft, + WebRtc_UWord16 volumeRight); + virtual WebRtc_Word32 WaveOutVolume(WebRtc_UWord16& volumeLeft, + WebRtc_UWord16& volumeRight) const; + + // Audio mixer initialization + virtual WebRtc_Word32 SpeakerIsAvailable(bool& available); + virtual WebRtc_Word32 InitSpeaker(); + virtual bool SpeakerIsInitialized() const; + virtual WebRtc_Word32 MicrophoneIsAvailable(bool& available); + virtual WebRtc_Word32 InitMicrophone(); + virtual bool MicrophoneIsInitialized() const; + + // Speaker volume controls + virtual WebRtc_Word32 SpeakerVolumeIsAvailable(bool& available); + virtual WebRtc_Word32 SetSpeakerVolume(WebRtc_UWord32 volume); + virtual WebRtc_Word32 SpeakerVolume(WebRtc_UWord32& volume) const; + virtual WebRtc_Word32 MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const; + virtual WebRtc_Word32 MinSpeakerVolume(WebRtc_UWord32& minVolume) const; + virtual WebRtc_Word32 SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const; + + // Microphone volume controls + virtual WebRtc_Word32 MicrophoneVolumeIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneVolume(WebRtc_UWord32 volume); + virtual WebRtc_Word32 MicrophoneVolume(WebRtc_UWord32& volume) const; + virtual WebRtc_Word32 MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const; + virtual WebRtc_Word32 MinMicrophoneVolume(WebRtc_UWord32& minVolume) const; + virtual WebRtc_Word32 MicrophoneVolumeStepSize( + WebRtc_UWord16& stepSize) const; + + // Speaker mute control + virtual WebRtc_Word32 SpeakerMuteIsAvailable(bool& available); + virtual WebRtc_Word32 SetSpeakerMute(bool enable); + virtual WebRtc_Word32 SpeakerMute(bool& enabled) const; + + // Microphone mute control + virtual WebRtc_Word32 MicrophoneMuteIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneMute(bool enable); + virtual WebRtc_Word32 MicrophoneMute(bool& enabled) const; + + // Microphone boost control + virtual WebRtc_Word32 MicrophoneBoostIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneBoost(bool enable); + virtual WebRtc_Word32 MicrophoneBoost(bool& enabled) const; + + // Stereo support + virtual WebRtc_Word32 StereoPlayoutIsAvailable(bool& available); + virtual WebRtc_Word32 SetStereoPlayout(bool enable); + virtual WebRtc_Word32 StereoPlayout(bool& enabled) const; + virtual WebRtc_Word32 StereoRecordingIsAvailable(bool& available); + virtual WebRtc_Word32 SetStereoRecording(bool enable); + virtual WebRtc_Word32 StereoRecording(bool& enabled) const; + + // Delay information and control + virtual WebRtc_Word32 SetPlayoutBuffer( + const AudioDeviceModule::BufferType type, + WebRtc_UWord16 sizeMS); + virtual WebRtc_Word32 PlayoutBuffer( + AudioDeviceModule::BufferType& type, + WebRtc_UWord16& sizeMS) const; + virtual WebRtc_Word32 PlayoutDelay(WebRtc_UWord16& delayMS) const; + virtual WebRtc_Word32 RecordingDelay(WebRtc_UWord16& delayMS) const; + + // CPU load + virtual WebRtc_Word32 CPULoad(WebRtc_UWord16& load) const; + +public: + virtual bool PlayoutWarning() const; + virtual bool PlayoutError() const; + virtual bool RecordingWarning() const; + virtual bool RecordingError() const; + virtual void ClearPlayoutWarning(); + virtual void ClearPlayoutError(); + virtual void ClearRecordingWarning(); + virtual void ClearRecordingError(); + +public: + virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer); + +private: + WebRtc_Word32 GetDevicesInfo(const WebRtc_Word32 function, + const bool playback, + const WebRtc_Word32 enumDeviceNo = 0, + char* enumDeviceName = NULL, + const WebRtc_Word32 ednLen = 0) const; + WebRtc_Word32 ErrorRecovery(WebRtc_Word32 error, snd_pcm_t* deviceHandle); + void FillPlayoutBuffer(); + +private: + void Lock() { _critSect.Enter(); }; + void UnLock() { _critSect.Leave(); }; +private: + inline WebRtc_Word32 InputSanityCheckAfterUnlockedPeriod() const; + inline WebRtc_Word32 OutputSanityCheckAfterUnlockedPeriod() const; + + WebRtc_Word32 PrepareStartRecording(); + WebRtc_Word32 GetPlayoutBufferDelay(); + WebRtc_Word32 GetRecordingBufferDelay(bool preRead); + +private: + static bool RecThreadFunc(void*); + static bool PlayThreadFunc(void*); + bool RecThreadProcess(); + bool PlayThreadProcess(); + +private: + AudioDeviceBuffer* _ptrAudioBuffer; + + CriticalSectionWrapper& _critSect; + EventWrapper& _timeEventRec; + EventWrapper& _timeEventPlay; + EventWrapper& _recStartEvent; + EventWrapper& _playStartEvent; + + ThreadWrapper* _ptrThreadRec; + ThreadWrapper* _ptrThreadPlay; + WebRtc_UWord32 _recThreadID; + WebRtc_UWord32 _playThreadID; + + WebRtc_Word32 _id; + + AudioMixerManagerLinuxALSA _mixerManager; + + WebRtc_UWord16 _inputDeviceIndex; + WebRtc_UWord16 _outputDeviceIndex; + bool _inputDeviceIsSpecified; + bool _outputDeviceIsSpecified; + + snd_pcm_t* _handleRecord; + snd_pcm_t* _handlePlayout; + + snd_pcm_uframes_t _recSndcardBuffsize; + snd_pcm_uframes_t _playSndcardBuffsize; + + WebRtc_UWord32 _samplingFreqRec; + WebRtc_UWord32 _samplingFreqPlay; + WebRtc_UWord8 _recChannels; + WebRtc_UWord8 _playChannels; + + WebRtc_UWord32 _playbackBufferSize; + WebRtc_UWord32 _recordBufferSize; + WebRtc_Word16* _recBuffer; + AudioDeviceModule::BufferType _playBufType; + +private: + bool _initialized; + bool _recording; + bool _playing; + bool _recIsInitialized; + bool _playIsInitialized; + bool _startRec; + bool _stopRec; + bool _startPlay; + bool _stopPlay; + bool _AGC; + bool _buffersizeFromZeroAvail; + bool _buffersizeFromZeroDelay; + + WebRtc_UWord32 _sndCardPlayDelay; // Just to store last value + WebRtc_UWord32 _previousSndCardPlayDelay; // Stores previous _sndCardPlayDelay value + WebRtc_UWord8 _delayMonitorStatePlay; // 0 normal, 1 monitor delay change (after error) + WebRtc_Word16 _largeDelayCountPlay; // Used when monitoring delay change + WebRtc_UWord32 _sndCardRecDelay; + WebRtc_UWord32 _numReadyRecSamples; + + WebRtc_UWord8 _bufferCheckMethodPlay; + WebRtc_UWord8 _bufferCheckMethodRec; + WebRtc_UWord32 _bufferCheckErrorsPlay; + WebRtc_UWord32 _bufferCheckErrorsRec; + WebRtc_Word32 _lastBufferCheckValuePlay; + WebRtc_Word32 _writeErrors; + + WebRtc_UWord16 _playWarning; + WebRtc_UWord16 _playError; + WebRtc_UWord16 _recWarning; + WebRtc_UWord16 _recError; + + WebRtc_UWord16 _playBufDelay; // playback delay + WebRtc_UWord16 _playBufDelayFixed; // fixed playback delay +}; + +} + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_ALSA_LINUX_H_ diff --git a/src/modules/audio_device/main/source/linux/audio_device_pulse_linux.cc b/src/modules/audio_device/main/source/linux/audio_device_pulse_linux.cc new file mode 100644 index 000000000..231b186a9 --- /dev/null +++ b/src/modules/audio_device/main/source/linux/audio_device_pulse_linux.cc @@ -0,0 +1,3248 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "audio_device_utility.h" +#include "audio_device_pulse_linux.h" +#include "audio_device_config.h" + +#include "event_wrapper.h" +#include "trace.h" +#include "thread_wrapper.h" + +webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable; + +// Accesses Pulse functions through our late-binding symbol table instead of +// directly. This way we don't have to link to libpulse, which means our binary +// will work on systems that don't have it. +#define LATE(sym) \ + LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, sym) + +namespace webrtc +{ + +// ============================================================================ +// Static Methods +// ============================================================================ + +bool AudioDeviceLinuxPulse::PulseAudioIsSupported() +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1, "%s", + __FUNCTION__); + + bool pulseAudioIsSupported(true); + + // Check that we can initialize + AudioDeviceLinuxPulse* admPulse = new AudioDeviceLinuxPulse(-1); + if (admPulse->InitPulseAudio() == -1) + { + pulseAudioIsSupported = false; + } + admPulse->TerminatePulseAudio(); + delete admPulse; + + if (pulseAudioIsSupported) + { + WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, + "*** Linux Pulse Audio is supported ***"); + } else + { + WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, + "*** Linux Pulse Audio is NOT supported => will revert to the ALSA API ***"); + } + + return (pulseAudioIsSupported); +} + +AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const WebRtc_Word32 id) : + _ptrAudioBuffer(NULL), + _critSect(*CriticalSectionWrapper::CreateCriticalSection()), + _timeEventRec(*EventWrapper::Create()), + _timeEventPlay(*EventWrapper::Create()), + _recStartEvent(*EventWrapper::Create()), + _playStartEvent(*EventWrapper::Create()), + _ptrThreadPlay(NULL), + _ptrThreadRec(NULL), + _recThreadID(0), + _playThreadID(0), + _id(id), + _mixerManager(id), + _inputDeviceIndex(0), + _outputDeviceIndex(0), + _inputDeviceIsSpecified(false), + _outputDeviceIsSpecified(false), + _samplingFreq(0), + _recChannels(1), + _playChannels(1), + _playBufType(AudioDeviceModule::kFixedBufferSize), + _initialized(false), + _recording(false), + _playing(false), + _recIsInitialized(false), + _playIsInitialized(false), + _startRec(false), + _stopRec(false), + _startPlay(false), + _stopPlay(false), + _AGC(false), + _playBufDelayFixed(20), + _sndCardPlayDelay(0), + _sndCardRecDelay(0), + _writeErrors(0), + _playWarning(0), + _playError(0), + _recWarning(0), + _recError(0), + _deviceIndex(-1), + _numPlayDevices(0), + _numRecDevices(0), + _playDeviceName(NULL), + _recDeviceName(NULL), + _playDisplayDeviceName(NULL), + _recDisplayDeviceName(NULL), + _playBuffer(NULL), + _playbackBufferSize(0), + _playbackBufferUnused(0), + _tempBufferSpace(0), + _recBuffer(NULL), + _recordBufferSize(0), + _recordBufferUsed(0), + _tempSampleData(NULL), + _tempSampleDataSize(0), + _configuredLatencyPlay(0), + _configuredLatencyRec(0), + _paDeviceIndex(-1), + _paStateChanged(false), + _paMainloop(NULL), + _paMainloopApi(NULL), + _paContext(NULL), + _recStream(NULL), + _playStream(NULL), + _recStreamFlags(0), + _playStreamFlags(0) +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, + "%s created", __FUNCTION__); + + memset(_paServerVersion, 0, sizeof(_paServerVersion)); +} + +AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s destroyed", __FUNCTION__); + + Terminate(); + + if (_recBuffer) + { + delete _recBuffer; + } + if (_playBuffer) + { + delete _playBuffer; + } + if (_playDeviceName) + { + delete _playDeviceName; + } + if (_recDeviceName) + { + delete _recDeviceName; + } + if (_playDisplayDeviceName) + { + delete _playDisplayDeviceName; + } + if (_recDisplayDeviceName) + { + delete _recDisplayDeviceName; + } + + delete &_recStartEvent; + delete &_playStartEvent; + delete &_timeEventRec; + delete &_timeEventPlay; + delete &_critSect; +} + +void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + _ptrAudioBuffer = audioBuffer; + + // Inform the AudioBuffer about default settings for this implementation. + // Set all values to zero here since the actual settings will be done by + // InitPlayout and InitRecording later. + _ptrAudioBuffer->SetRecordingSampleRate(0); + _ptrAudioBuffer->SetPlayoutSampleRate(0); + _ptrAudioBuffer->SetRecordingChannels(0); + _ptrAudioBuffer->SetPlayoutChannels(0); +} + +// ---------------------------------------------------------------------------- +// ActiveAudioLayer +// ---------------------------------------------------------------------------- + +WebRtc_Word32 AudioDeviceLinuxPulse::ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + audioLayer = AudioDeviceModule::kLinuxPulseAudio; + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::Init() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_initialized) + { + return 0; + } + + // Initialize PulseAudio + if (InitPulseAudio() < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to initialize PulseAudio"); + + if (TerminatePulseAudio() < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to terminate PulseAudio"); + } + + return -1; + } + + _playWarning = 0; + _playError = 0; + _recWarning = 0; + _recError = 0; + + // RECORDING + const char* threadName = "webrtc_audio_module_rec_thread"; + _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this, + kRealtimePriority, threadName); + if (_ptrThreadRec == NULL) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " failed to create the rec audio thread"); + return -1; + } + + unsigned int threadID(0); + if (!_ptrThreadRec->Start(threadID)) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " failed to start the rec audio thread"); + + delete _ptrThreadRec; + _ptrThreadRec = NULL; + return -1; + } + _recThreadID = threadID; + + // PLAYOUT + threadName = "webrtc_audio_module_play_thread"; + _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc, this, + kRealtimePriority, threadName); + if (_ptrThreadPlay == NULL) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " failed to create the play audio thread"); + return -1; + } + + threadID = 0; + if (!_ptrThreadPlay->Start(threadID)) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " failed to start the play audio thread"); + + delete _ptrThreadPlay; + _ptrThreadPlay = NULL; + return -1; + } + _playThreadID = threadID; + + _initialized = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::Terminate() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (!_initialized) + { + return 0; + } + + _critSect.Enter(); + + _mixerManager.Close(); + + // RECORDING + if (_ptrThreadRec) + { + ThreadWrapper* tmpThread = _ptrThreadRec; + _ptrThreadRec = NULL; + _critSect.Leave(); + + tmpThread->SetNotAlive(); + _timeEventRec.Set(); + + if (tmpThread->Stop()) + { + delete tmpThread; + } else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " failed to close down the rec audio thread"); + } + } + + // PLAYOUT + if (_ptrThreadPlay) + { + ThreadWrapper* tmpThread = _ptrThreadPlay; + _ptrThreadPlay = NULL; + _critSect.Leave(); + + tmpThread->SetNotAlive(); + _timeEventPlay.Set(); + + if (tmpThread->Stop()) + { + delete tmpThread; + } else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " failed to close down the play audio thread"); + } + } + + // Terminate PulseAudio + if (TerminatePulseAudio() < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to terminate PulseAudio"); + return -1; + } + + _initialized = false; + _outputDeviceIsSpecified = false; + _inputDeviceIsSpecified = false; + + return 0; +} + +bool AudioDeviceLinuxPulse::Initialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_initialized); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SpeakerIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) + { + available = false; + return 0; + } + + // Given that InitSpeaker was successful, we know that a valid speaker exists + // + available = true; + + // Close the initialized output mixer + // + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::InitSpeaker() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_playing) + { + return -1; + } + + if (!_outputDeviceIsSpecified) + { + return -1; + } + + // check if default device + if (_outputDeviceIndex == 0) + { + WebRtc_UWord16 deviceIndex = 0; + GetDefaultDeviceInfo(false, NULL, deviceIndex); + _paDeviceIndex = deviceIndex; + } else + { + // get the PA device index from + // the callback + _deviceIndex = _outputDeviceIndex; + + // get playout devices + PlayoutDevices(); + } + + // the callback has now set the _paDeviceIndex to + // the PulseAudio index of the device + if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1) + { + return -1; + } + + // clear _deviceIndex + _deviceIndex = -1; + _paDeviceIndex = -1; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MicrophoneIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitMicrophone() == -1) + { + available = false; + return 0; + } + + // Given that InitMicrophone was successful, we know that a valid microphone + // exists + available = true; + + // Close the initialized input mixer + // + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::InitMicrophone() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_recording) + { + return -1; + } + + if (!_inputDeviceIsSpecified) + { + return -1; + } + + // Check if default device + if (_inputDeviceIndex == 0) + { + WebRtc_UWord16 deviceIndex = 0; + GetDefaultDeviceInfo(true, NULL, deviceIndex); + _paDeviceIndex = deviceIndex; + } else + { + // Get the PA device index from + // the callback + _deviceIndex = _inputDeviceIndex; + + // get recording devices + RecordingDevices(); + } + + // The callback has now set the _paDeviceIndex to + // the PulseAudio index of the device + if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1) + { + return -1; + } + + // Clear _deviceIndex + _deviceIndex = -1; + _paDeviceIndex = -1; + + return 0; +} + +bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_mixerManager.SpeakerIsInitialized()); +} + +bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_mixerManager.MicrophoneIsInitialized()); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + if (!wasInitialized && InitSpeaker() == -1) + { + // If we end up here it means that the selected speaker has no volume + // control. + available = false; + return 0; + } + + // Given that InitSpeaker was successful, we know that a volume control exists + available = true; + + // Close the initialized output mixer + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetSpeakerVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::SetSpeakerVolume(volume=%u)", volume); + + return (_mixerManager.SetSpeakerVolume(volume)); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SpeakerVolume(WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 level(0); + + if (_mixerManager.SpeakerVolume(level) == -1) + { + return -1; + } + + volume = level; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetWaveOutVolume( + WebRtc_UWord16 volumeLeft, + WebRtc_UWord16 volumeRight) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)", + volumeLeft, + volumeRight); + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " API call not supported on this platform"); + return -1; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::WaveOutVolume( + WebRtc_UWord16& /*volumeLeft*/, + WebRtc_UWord16& /*volumeRight*/) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " API call not supported on this platform"); + return -1; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MaxSpeakerVolume( + WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 maxVol(0); + + if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) + { + return -1; + } + + maxVolume = maxVol; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MinSpeakerVolume( + WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 minVol(0); + + if (_mixerManager.MinSpeakerVolume(minVol) == -1) + { + return -1; + } + + minVolume = minVol; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SpeakerVolumeStepSize( + WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord16 delta(0); + + if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) + { + return -1; + } + + stepSize = delta; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) + { + // If we end up here it means that the selected speaker has no volume + // control, hence it is safe to state that there is no mute control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected speaker has a mute control + _mixerManager.SpeakerMuteIsAvailable(isAvailable); + + available = isAvailable; + + // Close the initialized output mixer + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetSpeakerMute(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::SetSpeakerMute(enable=%u)", enable); + + return (_mixerManager.SetSpeakerMute(enable)); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool muted(0); + if (_mixerManager.SpeakerMute(muted) == -1) + { + return -1; + } + + enabled = muted; + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected input device. + // + if (!wasInitialized && InitMicrophone() == -1) + { + // If we end up here it means that the selected microphone has no volume + // control, hence it is safe to state that there is no boost control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected microphone has a mute control + // + _mixerManager.MicrophoneMuteIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "SetMicrophoneMute(enable=%u)", enable); + + return (_mixerManager.SetMicrophoneMute(enable)); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool muted(0); + if (_mixerManager.MicrophoneMute(muted) == -1) + { + return -1; + } + + enabled = muted; + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Enumerate all avaliable microphone and make an attempt to open up the + // input mixer corresponding to the currently selected input device. + // + if (!wasInitialized && InitMicrophone() == -1) + { + // If we end up here it means that the selected microphone has no volume + // control, hence it is safe to state that there is no boost control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected microphone has a boost control + _mixerManager.MicrophoneBoostIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::SetMicrophoneBoost(enable=%u)", enable); + + return (_mixerManager.SetMicrophoneBoost(enable)); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool onOff(0); + + if (_mixerManager.MicrophoneBoost(onOff) == -1) + { + return -1; + } + + enabled = onOff; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + if (!wasInitialized && InitMicrophone() == -1) + { + // Cannot open the specified device + available = false; + return 0; + } + +#ifndef WEBRTC_PA_GTALK + // Check if the selected microphone can record stereo + bool isAvailable(false); + _mixerManager.StereoRecordingIsAvailable(isAvailable); + available = isAvailable; +#endif + + // Close the initialized input mixer + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetStereoRecording(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::SetStereoRecording(enable=%u)", enable); + +#ifndef WEBRTC_PA_GTALK + if (enable) + _recChannels = 2; + else + _recChannels = 1; +#endif + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_recChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + if (!wasInitialized && InitSpeaker() == -1) + { + // Cannot open the specified device + available = false; + return 0; + } + +#ifndef WEBRTC_PA_GTALK + // Check if the selected microphone can record stereo + bool isAvailable(false); + _mixerManager.StereoPlayoutIsAvailable(isAvailable); + available = isAvailable; +#endif + + // Close the initialized input mixer + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetStereoPlayout(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::SetStereoPlayout(enable=%u)", enable); + +#ifndef WEBRTC_PA_GTALK + if (enable) + _playChannels = 2; + else + _playChannels = 1; +#endif + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_playChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetAGC(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::SetAGC(enable=%d)", enable); + + _AGC = enable; + + return 0; +} + +bool AudioDeviceLinuxPulse::AGC() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + return _AGC; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable( + bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected output device. + if (!wasInitialized && InitMicrophone() == -1) + { + // If we end up here it means that the selected microphone has no volume + // control. + available = false; + return 0; + } + + // Given that InitMicrophone was successful, we know that a volume control + // exists + available = true; + + // Close the initialized input mixer + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetMicrophoneVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::SetMicrophoneVolume(volume=%u)", + volume); + + return (_mixerManager.SetMicrophoneVolume(volume)); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MicrophoneVolume( + WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 level(0); + + if (_mixerManager.MicrophoneVolume(level) == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " failed to retrive current microphone level"); + return -1; + } + + volume = level; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MaxMicrophoneVolume( + WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 maxVol(0); + + if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) + { + return -1; + } + + maxVolume = maxVol; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MinMicrophoneVolume( + WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 minVol(0); + + if (_mixerManager.MinMicrophoneVolume(minVol) == -1) + { + return -1; + } + + minVolume = minVol; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::MicrophoneVolumeStepSize( + WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord16 delta(0); + + if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) + { + return -1; + } + + stepSize = delta; + + return 0; +} + +WebRtc_Word16 AudioDeviceLinuxPulse::PlayoutDevices() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + PaLock(); + + pa_operation* paOperation = NULL; + _numPlayDevices = 1; // init to 1 to account for "default" + + // get the whole list of devices and update _numPlayDevices + paOperation = LATE(pa_context_get_sink_info_list)(_paContext, + PaSinkInfoCallback, + this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + return _numPlayDevices; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetPlayoutDevice(WebRtc_UWord16 index) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::SetPlayoutDevice(index=%u)", index); + + if (_playIsInitialized) + { + return -1; + } + + const WebRtc_UWord16 nDevices(PlayoutDevices()); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " number of availiable output devices is %u", nDevices); + + if (index > (nDevices - 1)) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " device index is out of range [0,%u]", (nDevices - 1)); + return -1; + } + + _outputDeviceIndex = index; + _outputDeviceIsSpecified = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) +{ + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "WindowsDeviceType not supported"); + return -1; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::PlayoutDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::PlayoutDeviceName(index=%u)", index); + + const WebRtc_UWord16 nDevices(PlayoutDevices()); + + if ((index > (nDevices - 1)) || (name == NULL)) + { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) + { + memset(guid, 0, kAdmMaxGuidSize); + } + + // Check if default device + if (index == 0) + { + WebRtc_UWord16 deviceIndex = 0; + return GetDefaultDeviceInfo(false, name, deviceIndex); + } + + // Tell the callback that we want + // The name for this device + _playDisplayDeviceName = name; + _deviceIndex = index; + + // get playout devices + PlayoutDevices(); + + // clear device name and index + _playDisplayDeviceName = NULL; + _deviceIndex = -1; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::RecordingDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::RecordingDeviceName(index=%u)", index); + + const WebRtc_UWord16 nDevices(RecordingDevices()); + + if ((index > (nDevices - 1)) || (name == NULL)) + { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) + { + memset(guid, 0, kAdmMaxGuidSize); + } + + // Check if default device + if (index == 0) + { + WebRtc_UWord16 deviceIndex = 0; + return GetDefaultDeviceInfo(true, name, deviceIndex); + } + + // Tell the callback that we want + // the name for this device + _recDisplayDeviceName = name; + _deviceIndex = index; + + // Get recording devices + RecordingDevices(); + + // Clear device name and index + _recDisplayDeviceName = NULL; + _deviceIndex = -1; + + return 0; +} + +WebRtc_Word16 AudioDeviceLinuxPulse::RecordingDevices() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + PaLock(); + + pa_operation* paOperation = NULL; + _numRecDevices = 1; // Init to 1 to account for "default" + + // Get the whole list of devices and update _numRecDevices + paOperation = LATE(pa_context_get_source_info_list)(_paContext, + PaSourceInfoCallback, + this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + return _numRecDevices; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetRecordingDevice(WebRtc_UWord16 index) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::SetRecordingDevice(index=%u)", index); + + if (_recIsInitialized) + { + return -1; + } + + const WebRtc_UWord16 nDevices(RecordingDevices()); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " number of availiable input devices is %u", nDevices); + + if (index > (nDevices - 1)) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " device index is out of range [0,%u]", (nDevices - 1)); + return -1; + } + + _inputDeviceIndex = index; + _inputDeviceIsSpecified = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) +{ + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "WindowsDeviceType not supported"); + return -1; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + available = false; + + // Try to initialize the playout side + WebRtc_Word32 res = InitPlayout(); + + // Cancel effect of initialization + StopPlayout(); + + if (res != -1) + { + available = true; + } + + return res; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + available = false; + + // Try to initialize the playout side + WebRtc_Word32 res = InitRecording(); + + // Cancel effect of initialization + StopRecording(); + + if (res != -1) + { + available = true; + } + + return res; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::InitPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_playing) + { + return -1; + } + + if (!_outputDeviceIsSpecified) + { + return -1; + } + + if (_playIsInitialized) + { + return 0; + } + + // Initialize the speaker (devices might have been added or removed) + if (InitSpeaker() == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " InitSpeaker() failed"); + } + + // Set sampling rate to use + WebRtc_UWord32 samplingRate = _samplingFreq * 1000; + if (samplingRate == 44000) + { + samplingRate = 44100; + } + + // Set the play sample specification + pa_sample_spec playSampleSpec; + playSampleSpec.channels = _playChannels; + playSampleSpec.format = PA_SAMPLE_S16LE; + playSampleSpec.rate = samplingRate; + + // Create a new play stream + _playStream = LATE(pa_stream_new)(_paContext, "playStream", + &playSampleSpec, NULL); + + if (!_playStream) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to create play stream, err=%d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + // Provide the playStream to the mixer + _mixerManager.SetPlayStream(_playStream); + + if (_ptrAudioBuffer) + { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetPlayoutSampleRate(_samplingFreq * 1000); + _ptrAudioBuffer->SetPlayoutChannels((WebRtc_UWord8) _playChannels); + } + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " stream state %d\n", LATE(pa_stream_get_state)(_playStream)); + + // Set stream flags + _playStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE + | PA_STREAM_INTERPOLATE_TIMING); + + if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) + { + // If configuring a specific latency then we want to specify + // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters + // automatically to reach that target latency. However, that flag doesn't + // exist in Ubuntu 8.04 and many people still use that, so we have to check + // the protocol version of libpulse. + if (LATE(pa_context_get_protocol_version)(_paContext) + >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) + { + _playStreamFlags |= PA_STREAM_ADJUST_LATENCY; + } + + const pa_sample_spec *spec = + LATE(pa_stream_get_sample_spec)(_playStream); + if (!spec) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " pa_stream_get_sample_spec()"); + return -1; + } + + size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); + WebRtc_UWord32 latency = bytesPerSec + * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / WEBRTC_PA_MSECS_PER_SEC; + + // Set the play buffer attributes + _playBufferAttr.maxlength = latency; // num bytes stored in the buffer + _playBufferAttr.tlength = latency; // target fill level of play buffer + // minimum free num bytes before server request more data + _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; + _playBufferAttr.prebuf = _playBufferAttr.tlength + - _playBufferAttr.minreq; // prebuffer tlength before starting playout + + _configuredLatencyPlay = latency; + } + + // num samples in bytes * num channels + _playbackBufferSize = _samplingFreq * 10 * 2 * _playChannels; + _playbackBufferUnused = _playbackBufferSize; + _playBuffer = new WebRtc_Word8[_playbackBufferSize]; + + // Enable underflow callback + LATE(pa_stream_set_underflow_callback)(_playStream, + PaStreamUnderflowCallback, this); + + // Set the state callback function for the stream + LATE(pa_stream_set_state_callback)(_playStream, PaStreamStateCallback, this); + + // Mark playout side as initialized + _playIsInitialized = true; + _sndCardPlayDelay = 0; + _sndCardRecDelay = 0; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::InitRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_recording) + { + return -1; + } + + if (!_inputDeviceIsSpecified) + { + return -1; + } + + if (_recIsInitialized) + { + return 0; + } + + // Initialize the microphone (devices might have been added or removed) + if (InitMicrophone() == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " InitMicrophone() failed"); + } + + // Set sampling rate to use + WebRtc_UWord32 samplingRate = _samplingFreq * 1000; + if (samplingRate == 44000) + { + samplingRate = 44100; + } + + // Set the rec sample specification + pa_sample_spec recSampleSpec; + recSampleSpec.channels = _recChannels; + recSampleSpec.format = PA_SAMPLE_S16LE; + recSampleSpec.rate = samplingRate; + + // Create a new rec stream + _recStream = LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, + NULL); + if (!_recStream) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to create rec stream, err=%d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + // Provide the recStream to the mixer + _mixerManager.SetRecStream(_recStream); + + if (_ptrAudioBuffer) + { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetRecordingSampleRate(_samplingFreq * 1000); + _ptrAudioBuffer->SetRecordingChannels((WebRtc_UWord8) _recChannels); + } + + if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) + { + _recStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE + | PA_STREAM_INTERPOLATE_TIMING); + + // If configuring a specific latency then we want to specify + // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters + // automatically to reach that target latency. However, that flag doesn't + // exist in Ubuntu 8.04 and many people still use that, so we have to check + // the protocol version of libpulse. + if (LATE(pa_context_get_protocol_version)(_paContext) + >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) + { + _recStreamFlags |= PA_STREAM_ADJUST_LATENCY; + } + + const pa_sample_spec *spec = + LATE(pa_stream_get_sample_spec)(_recStream); + if (!spec) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " pa_stream_get_sample_spec(rec)"); + return -1; + } + + size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); + WebRtc_UWord32 latency = bytesPerSec + * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / WEBRTC_PA_MSECS_PER_SEC; + + // Set the rec buffer attributes + // Note: fragsize specifies a maximum transfer size, not a minimum, so + // it is not possible to force a high latency setting, only a low one. + _recBufferAttr.fragsize = latency; // size of fragment + _recBufferAttr.maxlength = latency + bytesPerSec + * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / WEBRTC_PA_MSECS_PER_SEC; + + _configuredLatencyRec = latency; + } + + _recordBufferSize = _samplingFreq * 10 * 2 * _recChannels; + _recordBufferUsed = 0; + _recBuffer = new WebRtc_Word8[_recordBufferSize]; + + // Enable overflow callback + LATE(pa_stream_set_overflow_callback)(_recStream, PaStreamOverflowCallback, + this); + + // Set the state callback function for the stream + LATE(pa_stream_set_state_callback)(_recStream, PaStreamStateCallback, this); + + // Mark recording side as initialized + _recIsInitialized = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::StartRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (!_recIsInitialized) + { + return -1; + } + + if (_recording) + { + return 0; + } + + // set state to ensure that the recording starts from the audio thread + _startRec = true; + + // the audio thread will signal when recording has started + _timeEventRec.Set(); + if (kEventTimeout == _recStartEvent.Wait(10000)) + { + _startRec = false; + StopRecording(); + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to activate recording"); + return -1; + } + + if (_recording) + { + // the recording state is set by the audio thread after recording has started + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + " recording is now active"); + } else + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to activate recording"); + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::StopRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!_recIsInitialized) + { + return 0; + } + + if (_recStream == NULL) + { + return -1; + } + + _recIsInitialized = false; + _recording = false; + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " stopping recording"); + + // Stop Recording + PaLock(); + + DisableReadCallback(); + LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL); + + // Unset this here so that we don't get a TERMINATED callback + LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL); + + if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) + { + // Disconnect the stream + if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to disconnect rec stream, err=%d\n", + LATE(pa_context_errno)(_paContext)); + PaUnLock(); + return -1; + } + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " disconnected recording"); + } + + LATE(pa_stream_unref)(_recStream); + _recStream = NULL; + + PaUnLock(); + + // Provide the recStream to the mixer + _mixerManager.SetRecStream(_recStream); + + if (_recBuffer) + { + delete _recBuffer; + _recBuffer = NULL; + } + + return 0; +} + +bool AudioDeviceLinuxPulse::RecordingIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_recIsInitialized); +} + +bool AudioDeviceLinuxPulse::Recording() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_recording); +} + +bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_playIsInitialized); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::StartPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (!_playIsInitialized) + { + return -1; + } + + if (_playing) + { + return 0; + } + + // set state to ensure that playout starts from the audio thread + _startPlay = true; + + // the audio thread will signal when playout has started + _timeEventPlay.Set(); + if (kEventTimeout == _playStartEvent.Wait(10000)) + { + _startPlay = false; + StopPlayout(); + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to activate playout"); + return -1; + } + + if (_playing) + { + // the playing state is set by the audio thread after playout has started + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + " playing is now active"); + } else + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to activate playing"); + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::StopPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!_playIsInitialized) + { + return 0; + } + + if (_playStream == NULL) + { + return -1; + } + + _playIsInitialized = false; + _playing = false; + _sndCardPlayDelay = 0; + _sndCardRecDelay = 0; + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " stopping playback"); + + // Stop Playout + PaLock(); + + DisableWriteCallback(); + LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL); + + // Unset this here so that we don't get a TERMINATED callback + LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL); + + if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) + { + // Disconnect the stream + if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to disconnect play stream, err=%d", + LATE(pa_context_errno)(_paContext)); + PaUnLock(); + return -1; + } + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " disconnected playback"); + } + + LATE(pa_stream_unref)(_playStream); + _playStream = NULL; + + PaUnLock(); + + // Provide the playStream to the mixer + _mixerManager.SetPlayStream(_playStream); + + if (_playBuffer) + { + delete _playBuffer; + _playBuffer = NULL; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::PlayoutDelay(WebRtc_UWord16& delayMS) const +{ + delayMS = (WebRtc_UWord16) _sndCardPlayDelay; + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::RecordingDelay(WebRtc_UWord16& delayMS) const +{ + delayMS = (WebRtc_UWord16) _sndCardRecDelay; + return 0; +} + +bool AudioDeviceLinuxPulse::Playing() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_playing); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::SetPlayoutBuffer( + const AudioDeviceModule::BufferType type, + WebRtc_UWord16 sizeMS) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceLinuxPulse::SetPlayoutBuffer(type=%u, sizeMS=%u)", + type, sizeMS); + + if (type != AudioDeviceModule::kFixedBufferSize) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Adaptive buffer size not supported on this platform"); + return -1; + } + + _playBufType = type; + _playBufDelayFixed = sizeMS; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::PlayoutBuffer( + AudioDeviceModule::BufferType& type, + WebRtc_UWord16& sizeMS) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + type = _playBufType; + sizeMS = _playBufDelayFixed; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::CPULoad(WebRtc_UWord16& /*load*/) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " API call not supported on this platform"); + return -1; +} + +bool AudioDeviceLinuxPulse::PlayoutWarning() const +{ + return (_playWarning > 0); +} + +bool AudioDeviceLinuxPulse::PlayoutError() const +{ + return (_playError > 0); +} + +bool AudioDeviceLinuxPulse::RecordingWarning() const +{ + return (_recWarning > 0); +} + +bool AudioDeviceLinuxPulse::RecordingError() const +{ + return (_recError > 0); +} + +void AudioDeviceLinuxPulse::ClearPlayoutWarning() +{ + _playWarning = 0; +} + +void AudioDeviceLinuxPulse::ClearPlayoutError() +{ + _playError = 0; +} + +void AudioDeviceLinuxPulse::ClearRecordingWarning() +{ + _recWarning = 0; +} + +void AudioDeviceLinuxPulse::ClearRecordingError() +{ + _recError = 0; +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context *c, void *pThis) +{ + static_cast (pThis)->PaContextStateCallbackHandler( + c); +} + +// ---------------------------------------------------------------------------- +// PaSinkInfoCallback +// ---------------------------------------------------------------------------- + +void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context */*c*/, + const pa_sink_info *i, int eol, + void *pThis) +{ + static_cast (pThis)->PaSinkInfoCallbackHandler( + i, eol); +} + +void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context */*c*/, + const pa_source_info *i, + int eol, void *pThis) +{ + static_cast (pThis)->PaSourceInfoCallbackHandler( + i, eol); +} + +void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context */*c*/, + const pa_server_info *i, + void *pThis) +{ + static_cast (pThis)->PaServerInfoCallbackHandler(i); +} + +void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream *p, void *pThis) +{ + static_cast (pThis)->PaStreamStateCallbackHandler(p); +} + +void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context *c) +{ + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " context state cb"); + + pa_context_state_t state = LATE(pa_context_get_state)(c); + switch (state) + { + case PA_CONTEXT_UNCONNECTED: + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " unconnected"); + break; + case PA_CONTEXT_CONNECTING: + case PA_CONTEXT_AUTHORIZING: + case PA_CONTEXT_SETTING_NAME: + default: + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " no state"); + break; + case PA_CONTEXT_FAILED: + case PA_CONTEXT_TERMINATED: + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " failed"); + _paStateChanged = true; + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + break; + case PA_CONTEXT_READY: + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " ready"); + _paStateChanged = true; + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + break; + } +} + +void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info *i, + int eol) +{ + if (eol) + { + // Signal that we are done + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + return; + } + + if (_numPlayDevices == _deviceIndex) + { + // Convert the device index to the one of the sink + _paDeviceIndex = i->index; + + if (_playDeviceName) + { + // Copy the sink name + strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize); + _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + if (_playDisplayDeviceName) + { + // Copy the sink display name + strncpy(_playDisplayDeviceName, i->description, + kAdmMaxDeviceNameSize); + _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + } + + _numPlayDevices++; +} + +void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler( + const pa_source_info *i, + int eol) +{ + if (eol) + { + // Signal that we are done + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + return; + } + + // We don't want to list output devices + if (i->monitor_of_sink == PA_INVALID_INDEX) + { + if (_numRecDevices == _deviceIndex) + { + // Convert the device index to the one of the source + _paDeviceIndex = i->index; + + if (_recDeviceName) + { + // copy the source name + strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize); + _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + if (_recDisplayDeviceName) + { + // Copy the source display name + strncpy(_recDisplayDeviceName, i->description, + kAdmMaxDeviceNameSize); + _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + } + + _numRecDevices++; + } +} + +void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(const pa_server_info *i) +{ + // Use PA native sampling rate + WebRtc_UWord32 paSampleRate = i->sample_spec.rate; + if (paSampleRate == 44100) + { +#ifdef WEBRTC_PA_GTALK + paSampleRate = 48000; +#else + paSampleRate = 44000; +#endif + } + + _samplingFreq = paSampleRate / 1000; + + // Copy the PA server version + if (_paServerVersion) + { + strncpy(_paServerVersion, i->server_version, 31); + _paServerVersion[31] = '\0'; + } + + if (_recDisplayDeviceName) + { + // Copy the source name + strncpy(_recDisplayDeviceName, i->default_source_name, + kAdmMaxDeviceNameSize); + _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + + if (_playDisplayDeviceName) + { + // Copy the sink name + strncpy(_playDisplayDeviceName, i->default_sink_name, + kAdmMaxDeviceNameSize); + _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; + } + + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); +} + +void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream *p) +{ + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " stream state cb"); + + pa_stream_state_t state = LATE(pa_stream_get_state)(p); + switch (state) + { + case PA_STREAM_UNCONNECTED: + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " unconnected"); + break; + case PA_STREAM_CREATING: + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " creating"); + break; + default: + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " no state"); + break; + case PA_STREAM_FAILED: + case PA_STREAM_TERMINATED: + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " failed"); + break; + case PA_STREAM_READY: + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " ready"); + break; + } + + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::CheckPulseAudioVersion() +{ + /*WebRtc_Word32 index = 0; + WebRtc_Word32 partIndex = 0; + WebRtc_Word32 partNum = 1; + WebRtc_Word32 minVersion[3] = {0, 9, 15}; + bool versionOk = false; + char str[8] = {0};*/ + + PaLock(); + + pa_operation* paOperation = NULL; + + // get the server info and update deviceName + paOperation = LATE(pa_context_get_server_info)(_paContext, + PaServerInfoCallback, this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, + " checking PulseAudio version: %s", _paServerVersion); + + /* Saved because it may turn out that we need to check the version in the future + while (true) + { + if (_paServerVersion[index] == '.') + { + index++; + str[partIndex] = '\0'; + partIndex = 0; + + if(partNum == 2) + { + if (atoi(str) < minVersion[1]) + { + break; + } + partNum = 3; + } + else + { + if (atoi(str) > minVersion[0]) + { + versionOk = true; + break; + } + partNum = 2; + } + } + else if (_paServerVersion[index] == '\0' || _paServerVersion[index] == '-') + { + str[partIndex] = '\0'; + if (atoi(str) >= minVersion[2]) + { + versionOk = true; + } + break; + } + + str[partIndex] = _paServerVersion[index]; + index++; + partIndex++; + } + + if (!versionOk) + { + return -1; + } + */ + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::InitSamplingFrequency() +{ + PaLock(); + + pa_operation* paOperation = NULL; + + // Get the server info and update _samplingFreq + paOperation = LATE(pa_context_get_server_info)(_paContext, + PaServerInfoCallback, this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice, + WebRtc_Word8* name, + WebRtc_UWord16& index) +{ + WebRtc_Word8 tmpName[kAdmMaxDeviceNameSize]; + // subtract length of "default: " + WebRtc_UWord16 nameLen = kAdmMaxDeviceNameSize - 9; + WebRtc_Word8* pName = NULL; + + if (name) + { + // Add "default: " + strcpy(name, "default: "); + pName = &name[9]; + } + + // Tell the callback that we want + // the name for this device + if (recDevice) + { + _recDisplayDeviceName = tmpName; + } else + { + _playDisplayDeviceName = tmpName; + } + + // Set members + _paDeviceIndex = -1; + _deviceIndex = 0; + _numPlayDevices = 0; + _numRecDevices = 0; + + PaLock(); + + pa_operation* paOperation = NULL; + + // Get the server info and update deviceName + paOperation = LATE(pa_context_get_server_info)(_paContext, + PaServerInfoCallback, this); + + WaitForOperationCompletion(paOperation); + + // Get the device index + if (recDevice) + { + paOperation + = LATE(pa_context_get_source_info_by_name)(_paContext, + (char *) tmpName, + PaSourceInfoCallback, + this); + } else + { + paOperation + = LATE(pa_context_get_sink_info_by_name)(_paContext, + (char *) tmpName, + PaSinkInfoCallback, this); + } + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + // Set the index + index = _paDeviceIndex; + + if (name) + { + // Copy to name string + strncpy(pName, tmpName, nameLen); + } + + // Clear members + _playDisplayDeviceName = NULL; + _recDisplayDeviceName = NULL; + _paDeviceIndex = -1; + _deviceIndex = -1; + _numPlayDevices = 0; + _numRecDevices = 0; + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::InitPulseAudio() +{ + int retVal = 0; + + // Load libpulse + if (!PaSymbolTable.Load()) + { + // Most likely the Pulse library and sound server are not installed on + // this system + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to load symbol table"); + return -1; + } + + // Create a mainloop API and connection to the default server + // the mainloop is the internal asynchronous API event loop + _paMainloop = LATE(pa_threaded_mainloop_new)(); + if (!_paMainloop) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " could not create mainloop"); + return -1; + } + + // Start the threaded main loop + retVal = LATE(pa_threaded_mainloop_start)(_paMainloop); + if (retVal != PA_OK) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to start main loop, error=%d", retVal); + return -1; + } + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " mainloop running!"); + + PaLock(); + + _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop); + if (!_paMainloopApi) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " could not create mainloop API"); + PaUnLock(); + return -1; + } + + // Create a new PulseAudio context + _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine"); + + if (!_paContext) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " could not create context"); + PaUnLock(); + return -1; + } + + // Set state callback function + LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, + this); + + // Connect the context to a server (default) + _paStateChanged = false; + retVal = LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, + NULL); + + if (retVal != PA_OK) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to connect context, error=%d", retVal); + PaUnLock(); + return -1; + } + + // Wait for state change + while (!_paStateChanged) + { + LATE(pa_threaded_mainloop_wait)(_paMainloop); + } + + // Now check to see what final state we reached. + pa_context_state_t state = LATE(pa_context_get_state)(_paContext); + + if (state != PA_CONTEXT_READY) + { + if (state == PA_CONTEXT_FAILED) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to connect to PulseAudio sound server"); + } else if (state == PA_CONTEXT_TERMINATED) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " PulseAudio connection terminated early"); + } else + { + // Shouldn't happen, because we only signal on one of those three + // states + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " unknown problem connecting to PulseAudio"); + } + PaUnLock(); + return -1; + } + + PaUnLock(); + + // Give the objects to the mixer manager + _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext); + + // Check the version + if (CheckPulseAudioVersion() < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " PulseAudio version %s not supported", _paServerVersion); + return -1; + } + + // Initialize sampling frequency + if (InitSamplingFrequency() < 0 || _samplingFreq == 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to initialize sampling frequency, set to %d", + _samplingFreq); + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::TerminatePulseAudio() +{ + // Do nothing if the instance doesn't exist + // likely PaSymbolTable.Load() fails + if (!_paMainloop) { + return 0; + } + + PaLock(); + + // Disconnect the context + if (_paContext) + { + LATE(pa_context_disconnect)(_paContext); + } + + // Unreference the context + if (_paContext) + { + LATE(pa_context_unref)(_paContext); + } + + PaUnLock(); + _paContext = NULL; + + // Stop the threaded main loop + if (_paMainloop) + { + LATE(pa_threaded_mainloop_stop)(_paMainloop); + } + + // Free the mainloop + if (_paMainloop) + { + LATE(pa_threaded_mainloop_free)(_paMainloop); + } + + _paMainloop = NULL; + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " PulseAudio terminated"); + + return 0; +} + +void AudioDeviceLinuxPulse::PaLock() +{ + LATE(pa_threaded_mainloop_lock)(_paMainloop); +} + +void AudioDeviceLinuxPulse::PaUnLock() +{ + LATE(pa_threaded_mainloop_unlock)(_paMainloop); +} + +void AudioDeviceLinuxPulse::WaitForOperationCompletion( + pa_operation* paOperation) const +{ + while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) + { + LATE(pa_threaded_mainloop_wait)(_paMainloop); + } + + LATE(pa_operation_unref)(paOperation); +} + +// ============================================================================ +// Thread Methods +// ============================================================================ + +void AudioDeviceLinuxPulse::EnableWriteCallback() +{ + if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY) + { + // May already have available space. Must check. + _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream); + if (_tempBufferSpace > 0) + { + // Yup, there is already space available, so if we register a write + // callback then it will not receive any event. So dispatch one ourself + // instead + _timeEventPlay.Set(); + return; + } + } + + LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback, + this); +} + +void AudioDeviceLinuxPulse::DisableWriteCallback() +{ + LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL); +} + +void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream */*unused*/, + size_t buffer_space, + void *pThis) +{ + static_cast (pThis)->PaStreamWriteCallbackHandler( + buffer_space); +} + +void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace) +{ + _tempBufferSpace = bufferSpace; + + // Since we write the data asynchronously on a different thread, we have + // to temporarily disable the write callback or else Pulse will call it + // continuously until we write the data. We re-enable it below. + DisableWriteCallback(); + _timeEventPlay.Set(); +} + +void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream */*unused*/, + void *pThis) +{ + static_cast (pThis)->PaStreamUnderflowCallbackHandler(); +} + +void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() +{ + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Playout underflow"); + + if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) + { + // We didn't configure a pa_buffer_attr before, so switching to one now + // would be questionable. + return; + } + + // Otherwise reconfigure the stream with a higher target latency. + + const pa_sample_spec *spec = LATE(pa_stream_get_sample_spec)(_playStream); + if (!spec) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " pa_stream_get_sample_spec()"); + return; + } + + size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); + WebRtc_UWord32 newLatency = _configuredLatencyPlay + bytesPerSec + * WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / WEBRTC_PA_MSECS_PER_SEC; + + // Set the play buffer attributes + _playBufferAttr.maxlength = newLatency; + _playBufferAttr.tlength = newLatency; + _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; + _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; + + pa_operation *op = LATE(pa_stream_set_buffer_attr)(_playStream, + &_playBufferAttr, NULL, + NULL); + if (!op) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " pa_stream_set_buffer_attr()"); + return; + } + + // Don't need to wait for this to complete. + LATE(pa_operation_unref)(op); + + // Save the new latency in case we underflow again. + _configuredLatencyPlay = newLatency; +} + +void AudioDeviceLinuxPulse::EnableReadCallback() +{ + LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this); +} + +void AudioDeviceLinuxPulse::DisableReadCallback() +{ + LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL); +} + +void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream */*unused1*/, + size_t /*unused2*/, + void *pThis) +{ + static_cast (pThis)->PaStreamReadCallbackHandler(); +} + +void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() +{ + // We get the data pointer and size now in order to save one Lock/Unlock + // in the worker thread + if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, &_tempSampleDataSize) + != 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Can't read data!"); + return; + } + + // Since we consume the data asynchronously on a different thread, we have + // to temporarily disable the read callback or else Pulse will call it + // continuously until we consume the data. We re-enable it below + DisableReadCallback(); + _timeEventRec.Set(); +} + +void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/, + void *pThis) +{ + static_cast (pThis)->PaStreamOverflowCallbackHandler(); +} + +void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() +{ + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Recording overflow"); +} + +WebRtc_Word32 AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream) +{ + if (!WEBRTC_PA_REPORT_LATENCY) + { + return 0; + } + + if (!stream) + { + return 0; + } + + pa_usec_t latency; + int negative; + if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Can't query latency"); + // We'd rather continue playout/capture with an incorrect delay than stop + // it altogether, so return a valid value. + return 0; + } + + if (negative) + { + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " warning: pa_stream_get_latency reported negative delay"); + + // The delay can be negative for monitoring streams if the captured + // samples haven't been played yet. In such a case, "latency" contains the + // magnitude, so we must negate it to get the real value. + WebRtc_Word32 tmpLatency = (WebRtc_Word32) -latency; + if (tmpLatency < 0) + { + // Make sure that we don't use a negative delay + tmpLatency = 0; + } + + return tmpLatency; + } else + { + return (WebRtc_Word32) latency; + } +} + +WebRtc_Word32 AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData, + size_t bufferSize) +{ + size_t size = bufferSize; + WebRtc_UWord32 numRecSamples = _recordBufferSize / (2 * _recChannels); + + // Account for the peeked data and the used data + WebRtc_UWord32 recDelay = (WebRtc_UWord32) ((LatencyUsecs(_recStream) + / 1000) + 10 * ((size + _recordBufferUsed) / _recordBufferSize)); + + _sndCardRecDelay = recDelay; + + if (_playStream) + { + // Get the playout delay + _sndCardPlayDelay = (WebRtc_UWord32) (LatencyUsecs(_playStream) / 1000); + } + + if (_recordBufferUsed > 0) + { + // Have to copy to the buffer until it is full + size_t copy = _recordBufferSize - _recordBufferUsed; + if (size < copy) + { + copy = size; + } + + memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy); + _recordBufferUsed += copy; + bufferData = static_cast (bufferData) + copy; + size -= copy; + + if (_recordBufferUsed != _recordBufferSize) + { + // Not enough data yet to pass to VoE + return 0; + } + + // Provide data to VoiceEngine + if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1) + { + // We have stopped recording + return -1; + } + + _recordBufferUsed = 0; + } + + // Now process full 10ms sample sets directly from the input + while (size >= _recordBufferSize) + { + // Provide data to VoiceEngine + if (ProcessRecordedData( + static_cast (const_cast (bufferData)), + numRecSamples, recDelay) == -1) + { + // We have stopped recording + return -1; + } + + bufferData = static_cast (bufferData) + _recordBufferSize; + size -= _recordBufferSize; + + // We have consumed 10ms of data + recDelay -= 10; + } + + // Now save any leftovers for later. + if (size > 0) + { + memcpy(_recBuffer, bufferData, size); + _recordBufferUsed = size; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceLinuxPulse::ProcessRecordedData( + WebRtc_Word8 *bufferData, + WebRtc_UWord32 bufferSizeInSamples, + WebRtc_UWord32 recDelay) +{ + WebRtc_UWord32 currentMicLevel(0); + WebRtc_UWord32 newMicLevel(0); + + _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples); + + if (AGC()) + { + // Store current mic level in the audio buffer if AGC is enabled + if (MicrophoneVolume(currentMicLevel) == 0) + { + // This call does not affect the actual microphone volume + _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel); + } + } + + // Set vqe data + const WebRtc_UWord32 clockDrift(0); + _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift); + + // Deliver recorded samples at specified sample rate, + // mic level etc. to the observer using callback + UnLock(); + _ptrAudioBuffer->DeliverRecordedData(); + Lock(); + + // We have been unlocked - check the flag again + if (!_recording) + { + return -1; + } + + if (AGC()) + { + newMicLevel = _ptrAudioBuffer->NewMicLevel(); + if (newMicLevel != 0) + { + // The VQE will only deliver non-zero microphone levels when a + // change is needed. + // Set this new mic level (received from the observer as return + // value in the callback). + WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, + " AGC change of volume: old=%u => new=%u", + currentMicLevel, newMicLevel); + if (SetMicrophoneVolume(newMicLevel) == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, + _id, + " the required modification of the microphone " + "volume failed"); + } + } + } + + return 0; +} + +bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis) +{ + return (static_cast (pThis)->PlayThreadProcess()); +} + +bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis) +{ + return (static_cast (pThis)->RecThreadProcess()); +} + +bool AudioDeviceLinuxPulse::PlayThreadProcess() +{ + switch (_timeEventPlay.Wait(1000)) + { + case kEventSignaled: + _timeEventPlay.Reset(); + break; + case kEventError: + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "EventWrapper::Wait() failed"); + return true; + case kEventTimeout: + return true; + } + + Lock(); + + if (_startPlay) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "_startPlay true, performing initial actions"); + + _startPlay = false; + _playDeviceName = NULL; + + // Set if not default device + if (_outputDeviceIndex > 0) + { + // Get the playout device name + _playDeviceName = new WebRtc_Word8[kAdmMaxDeviceNameSize]; + _deviceIndex = _outputDeviceIndex; + PlayoutDevices(); + } + + // Start muted only supported on 0.9.11 and up + if (LATE(pa_context_get_protocol_version)(_paContext) + >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) + { + // Get the currently saved speaker mute status + // and set the initial mute status accordingly + bool enabled(false); + _mixerManager.SpeakerMute(enabled); + if (enabled) + { + _playStreamFlags |= PA_STREAM_START_MUTED; + } + } + + // Get the currently saved speaker volume + WebRtc_UWord32 volume = 0; + _mixerManager.SpeakerVolume(volume); + + PaLock(); + + // Set the same volume for all channels + pa_cvolume cVolumes; + const pa_sample_spec *spec = + LATE(pa_stream_get_sample_spec)(_playStream); + LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume); + + // Connect the stream to a sink + if (LATE(pa_stream_connect_playback)( + _playStream, + _playDeviceName, + &_playBufferAttr, + (pa_stream_flags_t) _playStreamFlags, + &cVolumes, NULL) != PA_OK) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to connect play stream, err=%d", + LATE(pa_context_errno)(_paContext)); + } + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " play stream connected"); + + // Wait for state change + while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) + { + LATE(pa_threaded_mainloop_wait)(_paMainloop); + } + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " play stream ready"); + + // We can now handle write callbacks + EnableWriteCallback(); + + PaUnLock(); + + // Clear device name + if (_playDeviceName) + { + delete[] _playDeviceName; + _playDeviceName = NULL; + } + + _playing = true; + _playStartEvent.Set(); + + UnLock(); + return true; + } + + if (_playing) + { + if (!_recording) + { + // Update the playout delay + _sndCardPlayDelay = (WebRtc_UWord32) (LatencyUsecs(_playStream) + / 1000); + } + + if (_playbackBufferUnused < _playbackBufferSize) + { + + size_t write = _playbackBufferSize - _playbackBufferUnused; + if (_tempBufferSpace < write) + { + write = _tempBufferSpace; + } + + PaLock(); + if (LATE(pa_stream_write)( + _playStream, + (void *) &_playBuffer[_playbackBufferUnused], + write, NULL, (int64_t) 0, + PA_SEEK_RELATIVE) != PA_OK) + { + _writeErrors++; + if (_writeErrors > 10) + { + if (_playError == 1) + { + WEBRTC_TRACE(kTraceWarning, + kTraceUtility, _id, + " pending playout error exists"); + } + _playError = 1; // Triggers callback from module process thread + WEBRTC_TRACE( + kTraceError, + kTraceUtility, + _id, + " kPlayoutError message posted: " + "_writeErrors=%u, error=%d", + _writeErrors, + LATE(pa_context_errno)(_paContext)); + _writeErrors = 0; + } + } + PaUnLock(); + + _playbackBufferUnused += write; + _tempBufferSpace -= write; + } + + WebRtc_UWord32 numPlaySamples = _playbackBufferSize / (2 + * _playChannels); + if (_tempBufferSpace > 0) // Might have been reduced to zero by the above + { + // Ask for new PCM data to be played out using the AudioDeviceBuffer + // ensure that this callback is executed without taking the + // audio-thread lock + UnLock(); + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " requesting data"); + WebRtc_UWord32 nSamples = + _ptrAudioBuffer->RequestPlayoutData(numPlaySamples); + Lock(); + + // We have been unlocked - check the flag again + if (!_playing) + { + UnLock(); + return true; + } + + nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer); + if (nSamples != numPlaySamples) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, + _id, " invalid number of output samples(%d)", + nSamples); + } + + size_t write = _playbackBufferSize; + if (_tempBufferSpace < write) + { + write = _tempBufferSpace; + } + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " will write"); + PaLock(); + if (LATE(pa_stream_write)(_playStream, (void *) &_playBuffer[0], + write, NULL, (int64_t) 0, + PA_SEEK_RELATIVE) != PA_OK) + { + _writeErrors++; + if (_writeErrors > 10) + { + if (_playError == 1) + { + WEBRTC_TRACE(kTraceWarning, + kTraceUtility, _id, + " pending playout error exists"); + } + _playError = 1; // triggers callback from module process thread + WEBRTC_TRACE( + kTraceError, + kTraceUtility, + _id, + " kPlayoutError message posted: " + "_writeErrors=%u, error=%d", + _writeErrors, + LATE(pa_context_errno)(_paContext)); + _writeErrors = 0; + } + } + PaUnLock(); + + _playbackBufferUnused = write; + } + + _tempBufferSpace = 0; + PaLock(); + EnableWriteCallback(); + PaUnLock(); + + } // _playing + + UnLock(); + return true; +} + +bool AudioDeviceLinuxPulse::RecThreadProcess() +{ + switch (_timeEventRec.Wait(1000)) + { + case kEventSignaled: + _timeEventRec.Reset(); + break; + case kEventError: + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "EventWrapper::Wait() failed"); + return true; + case kEventTimeout: + return true; + } + + Lock(); + + if (_startRec) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "_startRec true, performing initial actions"); + + _recDeviceName = NULL; + + // Set if not default device + if (_inputDeviceIndex > 0) + { + // Get the recording device name + _recDeviceName = new WebRtc_Word8[kAdmMaxDeviceNameSize]; + _deviceIndex = _inputDeviceIndex; + RecordingDevices(); + } + + PaLock(); + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " connecting stream"); + + // Connect the stream to a source + if (LATE(pa_stream_connect_record)(_recStream, _recDeviceName, + &_recBufferAttr, + (pa_stream_flags_t) _recStreamFlags) + != PA_OK) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " failed to connect rec stream, err=%d", + LATE(pa_context_errno)(_paContext)); + } + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " connected"); + + // Wait for state change + while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) + { + LATE(pa_threaded_mainloop_wait)(_paMainloop); + } + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " done"); + + // We can now handle read callbacks + EnableReadCallback(); + + PaUnLock(); + + // Clear device name + if (_recDeviceName) + { + delete[] _recDeviceName; + _recDeviceName = NULL; + } + + _startRec = false; + _recording = true; + _recStartEvent.Set(); + + UnLock(); + return true; + } + + if (_recording) + { + // Read data and provide it to VoiceEngine + if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1) + { + UnLock(); + return true; + } + + _tempSampleData = NULL; + _tempSampleDataSize = 0; + + PaLock(); + while (true) + { + // Ack the last thing we read + if (LATE(pa_stream_drop)(_recStream) != 0) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, + _id, " failed to drop, err=%d\n", + LATE(pa_context_errno)(_paContext)); + } + + if (LATE(pa_stream_readable_size)(_recStream) <= 0) + { + // Then that was all the data + break; + } + + // Else more data. + const void *sampleData; + size_t sampleDataSize; + + if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) + != 0) + { + _recError = 1; // triggers callback from module process thread + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, + _id, " RECORD_ERROR message posted, error = %d", + LATE(pa_context_errno)(_paContext)); + break; + } + + _sndCardRecDelay = (WebRtc_UWord32) (LatencyUsecs(_recStream) + / 1000); + + // Drop lock for sigslot dispatch, which could take a while. + PaUnLock(); + // Read data and provide it to VoiceEngine + if (ReadRecordedData(sampleData, sampleDataSize) == -1) + { + UnLock(); + return true; + } + PaLock(); + + // Return to top of loop for the ack and the check for more data. + } + + EnableReadCallback(); + PaUnLock(); + + } // _recording + + UnLock(); + return true; +} + +} diff --git a/src/modules/audio_device/main/source/linux/audio_device_pulse_linux.h b/src/modules/audio_device/main/source/linux/audio_device_pulse_linux.h new file mode 100644 index 000000000..693a32bec --- /dev/null +++ b/src/modules/audio_device/main/source/linux/audio_device_pulse_linux.h @@ -0,0 +1,385 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H + +#include "audio_device_generic.h" +#include "audio_mixer_manager_pulse_linux.h" +#include "critical_section_wrapper.h" + +#include + +// Set this define to make the code behave like in GTalk/libjingle +//#define WEBRTC_PA_GTALK + +// We define this flag if it's missing from our headers, because we want to be +// able to compile against old headers but still use PA_STREAM_ADJUST_LATENCY +// if run against a recent version of the library. +#ifndef PA_STREAM_ADJUST_LATENCY +#define PA_STREAM_ADJUST_LATENCY 0x2000U +#endif +#ifndef PA_STREAM_START_MUTED +#define PA_STREAM_START_MUTED 0x1000U +#endif + +// Set this constant to 0 to disable latency reading +const WebRtc_UWord32 WEBRTC_PA_REPORT_LATENCY = 1; + +// Constants from implementation by Tristan Schmelcher [tschmelcher@google.com] + +// First PulseAudio protocol version that supports PA_STREAM_ADJUST_LATENCY. +const WebRtc_UWord32 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION = 13; + +// Some timing constants for optimal operation. See +// https://tango.0pointer.de/pipermail/pulseaudio-discuss/2008-January/001170.html +// for a good explanation of some of the factors that go into this. + +// Playback. + +// For playback, there is a round-trip delay to fill the server-side playback +// buffer, so setting too low of a latency is a buffer underflow risk. We will +// automatically increase the latency if a buffer underflow does occur, but we +// also enforce a sane minimum at start-up time. Anything lower would be +// virtually guaranteed to underflow at least once, so there's no point in +// allowing lower latencies. +const WebRtc_UWord32 WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS = 20; + +// Every time a playback stream underflows, we will reconfigure it with target +// latency that is greater by this amount. +const WebRtc_UWord32 WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS = 20; + +// We also need to configure a suitable request size. Too small and we'd burn +// CPU from the overhead of transfering small amounts of data at once. Too large +// and the amount of data remaining in the buffer right before refilling it +// would be a buffer underflow risk. We set it to half of the buffer size. +const WebRtc_UWord32 WEBRTC_PA_PLAYBACK_REQUEST_FACTOR = 2; + +// Capture. + +// For capture, low latency is not a buffer overflow risk, but it makes us burn +// CPU from the overhead of transfering small amounts of data at once, so we set +// a recommended value that we use for the kLowLatency constant (but if the user +// explicitly requests something lower then we will honour it). +// 1ms takes about 6-7% CPU. 5ms takes about 5%. 10ms takes about 4.x%. +const WebRtc_UWord32 WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS = 10; + +// There is a round-trip delay to ack the data to the server, so the +// server-side buffer needs extra space to prevent buffer overflow. 20ms is +// sufficient, but there is no penalty to making it bigger, so we make it huge. +// (750ms is libpulse's default value for the _total_ buffer size in the +// kNoLatencyRequirements case.) +const WebRtc_UWord32 WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS = 750; + +const WebRtc_UWord32 WEBRTC_PA_MSECS_PER_SEC = 1000; + +// Init _configuredLatencyRec/Play to this value to disable latency requirements +const WebRtc_Word32 WEBRTC_PA_NO_LATENCY_REQUIREMENTS = -1; + +// Set this const to 1 to account for peeked and used data in latency calculation +const WebRtc_UWord32 WEBRTC_PA_CAPTURE_BUFFER_LATENCY_ADJUSTMENT = 0; + +namespace webrtc +{ +class EventWrapper; +class ThreadWrapper; + +class AudioDeviceLinuxPulse: public AudioDeviceGeneric +{ +public: + AudioDeviceLinuxPulse(const WebRtc_Word32 id); + ~AudioDeviceLinuxPulse(); + + static bool PulseAudioIsSupported(); + + // Retrieve the currently utilized audio layer + virtual WebRtc_Word32 + ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const; + + // Main initializaton and termination + virtual WebRtc_Word32 Init(); + virtual WebRtc_Word32 Terminate(); + virtual bool Initialized() const; + + // Device enumeration + virtual WebRtc_Word16 PlayoutDevices(); + virtual WebRtc_Word16 RecordingDevices(); + virtual WebRtc_Word32 PlayoutDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]); + virtual WebRtc_Word32 RecordingDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]); + + // Device selection + virtual WebRtc_Word32 SetPlayoutDevice(WebRtc_UWord16 index); + virtual WebRtc_Word32 SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device); + virtual WebRtc_Word32 SetRecordingDevice(WebRtc_UWord16 index); + virtual WebRtc_Word32 SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device); + + // Audio transport initialization + virtual WebRtc_Word32 PlayoutIsAvailable(bool& available); + virtual WebRtc_Word32 InitPlayout(); + virtual bool PlayoutIsInitialized() const; + virtual WebRtc_Word32 RecordingIsAvailable(bool& available); + virtual WebRtc_Word32 InitRecording(); + virtual bool RecordingIsInitialized() const; + + // Audio transport control + virtual WebRtc_Word32 StartPlayout(); + virtual WebRtc_Word32 StopPlayout(); + virtual bool Playing() const; + virtual WebRtc_Word32 StartRecording(); + virtual WebRtc_Word32 StopRecording(); + virtual bool Recording() const; + + // Microphone Automatic Gain Control (AGC) + virtual WebRtc_Word32 SetAGC(bool enable); + virtual bool AGC() const; + + // Volume control based on the Windows Wave API (Windows only) + virtual WebRtc_Word32 SetWaveOutVolume(WebRtc_UWord16 volumeLeft, + WebRtc_UWord16 volumeRight); + virtual WebRtc_Word32 WaveOutVolume(WebRtc_UWord16& volumeLeft, + WebRtc_UWord16& volumeRight) const; + + // Audio mixer initialization + virtual WebRtc_Word32 SpeakerIsAvailable(bool& available); + virtual WebRtc_Word32 InitSpeaker(); + virtual bool SpeakerIsInitialized() const; + virtual WebRtc_Word32 MicrophoneIsAvailable(bool& available); + virtual WebRtc_Word32 InitMicrophone(); + virtual bool MicrophoneIsInitialized() const; + + // Speaker volume controls + virtual WebRtc_Word32 SpeakerVolumeIsAvailable(bool& available); + virtual WebRtc_Word32 SetSpeakerVolume(WebRtc_UWord32 volume); + virtual WebRtc_Word32 SpeakerVolume(WebRtc_UWord32& volume) const; + virtual WebRtc_Word32 MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const; + virtual WebRtc_Word32 MinSpeakerVolume(WebRtc_UWord32& minVolume) const; + virtual WebRtc_Word32 SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const; + + // Microphone volume controls + virtual WebRtc_Word32 MicrophoneVolumeIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneVolume(WebRtc_UWord32 volume); + virtual WebRtc_Word32 MicrophoneVolume(WebRtc_UWord32& volume) const; + virtual WebRtc_Word32 MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const; + virtual WebRtc_Word32 MinMicrophoneVolume(WebRtc_UWord32& minVolume) const; + virtual WebRtc_Word32 MicrophoneVolumeStepSize( + WebRtc_UWord16& stepSize) const; + + // Speaker mute control + virtual WebRtc_Word32 SpeakerMuteIsAvailable(bool& available); + virtual WebRtc_Word32 SetSpeakerMute(bool enable); + virtual WebRtc_Word32 SpeakerMute(bool& enabled) const; + + // Microphone mute control + virtual WebRtc_Word32 MicrophoneMuteIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneMute(bool enable); + virtual WebRtc_Word32 MicrophoneMute(bool& enabled) const; + + // Microphone boost control + virtual WebRtc_Word32 MicrophoneBoostIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneBoost(bool enable); + virtual WebRtc_Word32 MicrophoneBoost(bool& enabled) const; + + // Stereo support + virtual WebRtc_Word32 StereoPlayoutIsAvailable(bool& available); + virtual WebRtc_Word32 SetStereoPlayout(bool enable); + virtual WebRtc_Word32 StereoPlayout(bool& enabled) const; + virtual WebRtc_Word32 StereoRecordingIsAvailable(bool& available); + virtual WebRtc_Word32 SetStereoRecording(bool enable); + virtual WebRtc_Word32 StereoRecording(bool& enabled) const; + + // Delay information and control + virtual WebRtc_Word32 + SetPlayoutBuffer(const AudioDeviceModule::BufferType type, + WebRtc_UWord16 sizeMS); + virtual WebRtc_Word32 PlayoutBuffer(AudioDeviceModule::BufferType& type, + WebRtc_UWord16& sizeMS) const; + virtual WebRtc_Word32 PlayoutDelay(WebRtc_UWord16& delayMS) const; + virtual WebRtc_Word32 RecordingDelay(WebRtc_UWord16& delayMS) const; + + // CPU load + virtual WebRtc_Word32 CPULoad(WebRtc_UWord16& load) const; + +public: + virtual bool PlayoutWarning() const; + virtual bool PlayoutError() const; + virtual bool RecordingWarning() const; + virtual bool RecordingError() const; + virtual void ClearPlayoutWarning(); + virtual void ClearPlayoutError(); + virtual void ClearRecordingWarning(); + virtual void ClearRecordingError(); + +public: + virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer); + +private: + void Lock() + { + _critSect.Enter(); + } + ; + void UnLock() + { + _critSect.Leave(); + } + ; + void WaitForOperationCompletion(pa_operation* paOperation) const; + void WaitForSuccess(pa_operation* paOperation) const; + +private: + static void PaContextStateCallback(pa_context *c, void *pThis); + static void PaSinkInfoCallback(pa_context *c, const pa_sink_info *i, + int eol, void *pThis); + static void PaSourceInfoCallback(pa_context *c, const pa_source_info *i, + int eol, void *pThis); + static void PaServerInfoCallback(pa_context *c, const pa_server_info *i, + void *pThis); + static void PaStreamStateCallback(pa_stream *p, void *pThis); + void PaContextStateCallbackHandler(pa_context *c); + void PaSinkInfoCallbackHandler(const pa_sink_info *i, int eol); + void PaSourceInfoCallbackHandler(const pa_source_info *i, int eol); + void PaServerInfoCallbackHandler(const pa_server_info *i); + void PaStreamStateCallbackHandler(pa_stream *p); + + void EnableWriteCallback(); + void DisableWriteCallback(); + static void PaStreamWriteCallback(pa_stream *unused, size_t buffer_space, + void *pThis); + void PaStreamWriteCallbackHandler(size_t buffer_space); + static void PaStreamUnderflowCallback(pa_stream *unused, void *pThis); + void PaStreamUnderflowCallbackHandler(); + void EnableReadCallback(); + void DisableReadCallback(); + static void PaStreamReadCallback(pa_stream *unused1, size_t unused2, + void *pThis); + void PaStreamReadCallbackHandler(); + static void PaStreamOverflowCallback(pa_stream *unused, void *pThis); + void PaStreamOverflowCallbackHandler(); + WebRtc_Word32 LatencyUsecs(pa_stream *stream); + WebRtc_Word32 ReadRecordedData(const void* bufferData, size_t bufferSize); + WebRtc_Word32 ProcessRecordedData(WebRtc_Word8 *bufferData, + WebRtc_UWord32 bufferSizeInSamples, + WebRtc_UWord32 recDelay); + + WebRtc_Word32 CheckPulseAudioVersion(); + WebRtc_Word32 InitSamplingFrequency(); + WebRtc_Word32 GetDefaultDeviceInfo(bool recDevice, WebRtc_Word8* name, + WebRtc_UWord16& index); + WebRtc_Word32 InitPulseAudio(); + WebRtc_Word32 TerminatePulseAudio(); + + void PaLock(); + void PaUnLock(); + + static bool RecThreadFunc(void*); + static bool PlayThreadFunc(void*); + bool RecThreadProcess(); + bool PlayThreadProcess(); + +private: + AudioDeviceBuffer* _ptrAudioBuffer; + + CriticalSectionWrapper& _critSect; + EventWrapper& _timeEventRec; + EventWrapper& _timeEventPlay; + EventWrapper& _recStartEvent; + EventWrapper& _playStartEvent; + + ThreadWrapper* _ptrThreadPlay; + ThreadWrapper* _ptrThreadRec; + WebRtc_UWord32 _recThreadID; + WebRtc_UWord32 _playThreadID; + WebRtc_Word32 _id; + + AudioMixerManagerLinuxPulse _mixerManager; + + WebRtc_UWord16 _inputDeviceIndex; + WebRtc_UWord16 _outputDeviceIndex; + bool _inputDeviceIsSpecified; + bool _outputDeviceIsSpecified; + + WebRtc_UWord32 _samplingFreq; + WebRtc_UWord8 _recChannels; + WebRtc_UWord8 _playChannels; + + AudioDeviceModule::BufferType _playBufType; + +private: + bool _initialized; + bool _recording; + bool _playing; + bool _recIsInitialized; + bool _playIsInitialized; + bool _startRec; + bool _stopRec; + bool _startPlay; + bool _stopPlay; + bool _AGC; + +private: + WebRtc_UWord16 _playBufDelayFixed; // fixed playback delay + + WebRtc_UWord32 _sndCardPlayDelay; + WebRtc_UWord32 _sndCardRecDelay; + + WebRtc_Word32 _writeErrors; + WebRtc_UWord16 _playWarning; + WebRtc_UWord16 _playError; + WebRtc_UWord16 _recWarning; + WebRtc_UWord16 _recError; + + WebRtc_UWord16 _deviceIndex; + WebRtc_Word16 _numPlayDevices; + WebRtc_Word16 _numRecDevices; + WebRtc_Word8* _playDeviceName; + WebRtc_Word8* _recDeviceName; + WebRtc_Word8* _playDisplayDeviceName; + WebRtc_Word8* _recDisplayDeviceName; + WebRtc_Word8 _paServerVersion[32]; + + WebRtc_Word8* _playBuffer; + size_t _playbackBufferSize; + size_t _playbackBufferUnused; + size_t _tempBufferSpace; + WebRtc_Word8* _recBuffer; + size_t _recordBufferSize; + size_t _recordBufferUsed; + const void* _tempSampleData; + size_t _tempSampleDataSize; + WebRtc_Word32 _configuredLatencyPlay; + WebRtc_Word32 _configuredLatencyRec; + + // PulseAudio + WebRtc_UWord16 _paDeviceIndex; + bool _paStateChanged; + + pa_threaded_mainloop* _paMainloop; + pa_mainloop_api* _paMainloopApi; + pa_context* _paContext; + + pa_stream* _recStream; + pa_stream* _playStream; + WebRtc_UWord32 _recStreamFlags; + WebRtc_UWord32 _playStreamFlags; + pa_buffer_attr _playBufferAttr; + pa_buffer_attr _recBufferAttr; +}; + +} + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_PULSE_LINUX_H_ diff --git a/src/modules/audio_device/main/source/linux/audio_device_utility_linux.cc b/src/modules/audio_device/main/source/linux/audio_device_utility_linux.cc new file mode 100644 index 000000000..9c0b5e170 --- /dev/null +++ b/src/modules/audio_device/main/source/linux/audio_device_utility_linux.cc @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_device_utility_linux.h" +#include "audio_device_config.h" // DEBUG_PRINT() +#include "critical_section_wrapper.h" +#include "trace.h" + +namespace webrtc +{ + +AudioDeviceUtilityLinux::AudioDeviceUtilityLinux(const WebRtc_Word32 id) : + _critSect(*CriticalSectionWrapper::CreateCriticalSection()), _id(id), + _lastError(AudioDeviceModule::kAdmErrNone) +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, + "%s created", __FUNCTION__); +} + +AudioDeviceUtilityLinux::~AudioDeviceUtilityLinux() +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s destroyed", __FUNCTION__); + { + CriticalSectionScoped lock(_critSect); + + // free stuff here... + } + + delete &_critSect; +} + +// ============================================================================ +// API +// ============================================================================ + + +WebRtc_Word32 AudioDeviceUtilityLinux::Init() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id, + " OS info: %s", "Linux"); + + return 0; +} + + +} // namespace webrtc diff --git a/src/modules/audio_device/main/source/linux/audio_device_utility_linux.h b/src/modules/audio_device/main/source/linux/audio_device_utility_linux.h new file mode 100644 index 000000000..8df7acc29 --- /dev/null +++ b/src/modules/audio_device/main/source/linux/audio_device_utility_linux.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_LINUX_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_LINUX_H + +#include "audio_device_utility.h" +#include "audio_device.h" + +namespace webrtc +{ +class CriticalSectionWrapper; + +class AudioDeviceUtilityLinux: public AudioDeviceUtility +{ +public: + AudioDeviceUtilityLinux(const WebRtc_Word32 id); + ~AudioDeviceUtilityLinux(); + + virtual WebRtc_Word32 Init(); + +private: + CriticalSectionWrapper& _critSect; + WebRtc_Word32 _id; + AudioDeviceModule::ErrorCode _lastError; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_DEVICE_UTILITY_LINUX_H_ diff --git a/src/modules/audio_device/main/source/linux/audio_mixer_manager_alsa_linux.cc b/src/modules/audio_device/main/source/linux/audio_mixer_manager_alsa_linux.cc new file mode 100644 index 000000000..0c513542a --- /dev/null +++ b/src/modules/audio_device/main/source/linux/audio_mixer_manager_alsa_linux.cc @@ -0,0 +1,1342 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "audio_mixer_manager_alsa_linux.h" +#include "trace.h" + +extern webrtc_adm_linux_alsa::AlsaSymbolTable AlsaSymbolTable; + +// Accesses ALSA functions through our late-binding symbol table instead of +// directly. This way we don't have to link to libalsa, which means our binary +// will work on systems that don't have it. +#define LATE(sym) \ + LATESYM_GET(webrtc_adm_linux_alsa::AlsaSymbolTable, &AlsaSymbolTable, sym) + +namespace webrtc +{ + +AudioMixerManagerLinuxALSA::AudioMixerManagerLinuxALSA(const WebRtc_Word32 id) : + _critSect(*CriticalSectionWrapper::CreateCriticalSection()), + _id(id), + _outputMixerHandle(NULL), + _inputMixerHandle(NULL), + _outputMixerElement(NULL), + _inputMixerElement(NULL) +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s constructed", __FUNCTION__); + + memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize); + memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize); +} + +AudioMixerManagerLinuxALSA::~AudioMixerManagerLinuxALSA() +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s destructed", __FUNCTION__); + + Close(); + + delete &_critSect; +} + +// ============================================================================ +// PUBLIC METHODS +// ============================================================================ + +WebRtc_Word32 AudioMixerManagerLinuxALSA::Close() +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + CloseSpeaker(); + CloseMicrophone(); + + return 0; + +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::CloseSpeaker() +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + int errVal = 0; + + if (_outputMixerHandle != NULL) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Closing playout mixer"); + LATE(snd_mixer_free)(_outputMixerHandle); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error freeing playout mixer: %s", + LATE(snd_strerror)(errVal)); + } + errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error detachinging playout mixer: %s", + LATE(snd_strerror)(errVal)); + } + errVal = LATE(snd_mixer_close)(_outputMixerHandle); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error snd_mixer_close(handleMixer) errVal=%d", + errVal); + } + _outputMixerHandle = NULL; + _outputMixerElement = NULL; + } + memset(_outputMixerStr, 0, kAdmMaxDeviceNameSize); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::CloseMicrophone() +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + int errVal = 0; + + if (_inputMixerHandle != NULL) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Closing record mixer"); + + LATE(snd_mixer_free)(_inputMixerHandle); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error freeing record mixer: %s", + LATE(snd_strerror)(errVal)); + } + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Closing record mixer 2"); + + errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error detachinging record mixer: %s", + LATE(snd_strerror)(errVal)); + } + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Closing record mixer 3"); + + errVal = LATE(snd_mixer_close)(_inputMixerHandle); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error snd_mixer_close(handleMixer) errVal=%d", + errVal); + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Closing record mixer 4"); + _inputMixerHandle = NULL; + _inputMixerElement = NULL; + } + memset(_inputMixerStr, 0, kAdmMaxDeviceNameSize); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::OpenSpeaker(char* deviceName) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxALSA::OpenSpeaker(name=%s)", deviceName); + + CriticalSectionScoped lock(_critSect); + + int errVal = 0; + + // Close any existing output mixer handle + // + if (_outputMixerHandle != NULL) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Closing playout mixer"); + + LATE(snd_mixer_free)(_outputMixerHandle); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error freeing playout mixer: %s", + LATE(snd_strerror)(errVal)); + } + errVal = LATE(snd_mixer_detach)(_outputMixerHandle, _outputMixerStr); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error detachinging playout mixer: %s", + LATE(snd_strerror)(errVal)); + } + errVal = LATE(snd_mixer_close)(_outputMixerHandle); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error snd_mixer_close(handleMixer) errVal=%d", + errVal); + } + } + _outputMixerHandle = NULL; + _outputMixerElement = NULL; + + errVal = LATE(snd_mixer_open)(&_outputMixerHandle, 0); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "snd_mixer_open(&_outputMixerHandle, 0) - error"); + return -1; + } + + char controlName[kAdmMaxDeviceNameSize] = { 0 }; + GetControlName(controlName, deviceName); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " snd_mixer_attach(_outputMixerHandle, %s)", controlName); + + errVal = LATE(snd_mixer_attach)(_outputMixerHandle, controlName); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " snd_mixer_attach(_outputMixerHandle, %s) error: %s", + controlName, LATE(snd_strerror)(errVal)); + _outputMixerHandle = NULL; + return -1; + } + strcpy(_outputMixerStr, controlName); + + errVal = LATE(snd_mixer_selem_register)(_outputMixerHandle, NULL, NULL); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " snd_mixer_selem_register(_outputMixerHandle," + " NULL, NULL), error: %s", + LATE(snd_strerror)(errVal)); + _outputMixerHandle = NULL; + return -1; + } + + // Load and find the proper mixer element + if (LoadSpeakerMixerElement() < 0) + { + return -1; + } + + if (_outputMixerHandle != NULL) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " the output mixer device is now open (0x%x)", + _outputMixerHandle); + } + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::OpenMicrophone(char *deviceName) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxALSA::OpenMicrophone(name=%s)", + deviceName); + + CriticalSectionScoped lock(_critSect); + + int errVal = 0; + + // Close any existing input mixer handle + // + if (_inputMixerHandle != NULL) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Closing record mixer"); + + LATE(snd_mixer_free)(_inputMixerHandle); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error freeing record mixer: %s", + LATE(snd_strerror)(errVal)); + } + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Closing record mixer"); + + errVal = LATE(snd_mixer_detach)(_inputMixerHandle, _inputMixerStr); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error detachinging record mixer: %s", + LATE(snd_strerror)(errVal)); + } + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Closing record mixer"); + + errVal = LATE(snd_mixer_close)(_inputMixerHandle); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error snd_mixer_close(handleMixer) errVal=%d", + errVal); + } + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Closing record mixer"); + } + _inputMixerHandle = NULL; + _inputMixerElement = NULL; + + errVal = LATE(snd_mixer_open)(&_inputMixerHandle, 0); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " snd_mixer_open(&_inputMixerHandle, 0) - error"); + return -1; + } + + char controlName[kAdmMaxDeviceNameSize] = { 0 }; + GetControlName(controlName, deviceName); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " snd_mixer_attach(_inputMixerHandle, %s)", controlName); + + errVal = LATE(snd_mixer_attach)(_inputMixerHandle, controlName); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " snd_mixer_attach(_inputMixerHandle, %s) error: %s", + controlName, LATE(snd_strerror)(errVal)); + + _inputMixerHandle = NULL; + return -1; + } + strcpy(_inputMixerStr, controlName); + + errVal = LATE(snd_mixer_selem_register)(_inputMixerHandle, NULL, NULL); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " snd_mixer_selem_register(_inputMixerHandle," + " NULL, NULL), error: %s", + LATE(snd_strerror)(errVal)); + + _inputMixerHandle = NULL; + return -1; + } + // Load and find the proper mixer element + if (LoadMicMixerElement() < 0) + { + return -1; + } + + if (_inputMixerHandle != NULL) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " the input mixer device is now open (0x%x)", + _inputMixerHandle); + } + + return 0; +} + +bool AudioMixerManagerLinuxALSA::SpeakerIsInitialized() const +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + return (_outputMixerHandle != NULL); +} + +bool AudioMixerManagerLinuxALSA::MicrophoneIsInitialized() const +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + return (_inputMixerHandle != NULL); +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::SetSpeakerVolume( + WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxALSA::SetSpeakerVolume(volume=%u)", + volume); + + CriticalSectionScoped lock(_critSect); + + if (_outputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer element exists"); + return -1; + } + + int errVal = + LATE(snd_mixer_selem_set_playback_volume_all)(_outputMixerElement, + volume); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error changing master volume: %s", + LATE(snd_strerror)(errVal)); + return -1; + } + + return (0); +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::SpeakerVolume( + WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_outputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer element exists"); + return -1; + } + + long int vol(0); + + int + errVal = LATE(snd_mixer_selem_get_playback_volume)( + _outputMixerElement, + (snd_mixer_selem_channel_id_t) 0, + &vol); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Error getting outputvolume: %s", + LATE(snd_strerror)(errVal)); + return -1; + } + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerLinuxALSA::SpeakerVolume() => vol=%i", + vol); + + volume = static_cast (vol); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MaxSpeakerVolume( + WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_outputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avilable output mixer element exists"); + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = + LATE(snd_mixer_selem_get_playback_volume_range)(_outputMixerElement, + &minVol, &maxVol); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Playout hardware volume range, min: %d, max: %d", + minVol, maxVol); + + if (maxVol <= minVol) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error getting get_playback_volume_range: %s", + LATE(snd_strerror)(errVal)); + } + + maxVolume = static_cast (maxVol); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MinSpeakerVolume( + WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_outputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer element exists"); + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = + LATE(snd_mixer_selem_get_playback_volume_range)(_outputMixerElement, + &minVol, &maxVol); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Playout hardware volume range, min: %d, max: %d", + minVol, maxVol); + + if (maxVol <= minVol) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error getting get_playback_volume_range: %s", + LATE(snd_strerror)(errVal)); + } + + minVolume = static_cast (minVol); + + return 0; +} + +// TL: Have done testnig with these but they don't seem reliable and +// they were therefore not added +/* + // ---------------------------------------------------------------------------- + // SetMaxSpeakerVolume + // ---------------------------------------------------------------------------- + + WebRtc_Word32 AudioMixerManagerLinuxALSA::SetMaxSpeakerVolume( + WebRtc_UWord32 maxVolume) + { + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + if (_outputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer element exists"); + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = snd_mixer_selem_get_playback_volume_range( + _outputMixerElement, &minVol, &maxVol); + if ((maxVol <= minVol) || (errVal != 0)) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Error getting playback volume range: %s", snd_strerror(errVal)); + } + + maxVol = maxVolume; + errVal = snd_mixer_selem_set_playback_volume_range( + _outputMixerElement, minVol, maxVol); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Playout hardware volume range, min: %d, max: %d", minVol, maxVol); + if (errVal != 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error setting playback volume range: %s", snd_strerror(errVal)); + return -1; + } + + return 0; + } + + // ---------------------------------------------------------------------------- + // SetMinSpeakerVolume + // ---------------------------------------------------------------------------- + + WebRtc_Word32 AudioMixerManagerLinuxALSA::SetMinSpeakerVolume( + WebRtc_UWord32 minVolume) + { + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + if (_outputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer element exists"); + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = snd_mixer_selem_get_playback_volume_range( + _outputMixerElement, &minVol, &maxVol); + if ((maxVol <= minVol) || (errVal != 0)) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Error getting playback volume range: %s", snd_strerror(errVal)); + } + + minVol = minVolume; + errVal = snd_mixer_selem_set_playback_volume_range( + _outputMixerElement, minVol, maxVol); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Playout hardware volume range, min: %d, max: %d", minVol, maxVol); + if (errVal != 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error setting playback volume range: %s", snd_strerror(errVal)); + return -1; + } + + return 0; + } + */ + +WebRtc_Word32 AudioMixerManagerLinuxALSA::SpeakerVolumeStepSize( + WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_outputMixerHandle == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer exists"); + return -1; + } + + // The step size is always 1 for ALSA + stepSize = 1; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::SpeakerVolumeIsAvailable( + bool& available) +{ + if (_outputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer element exists"); + return -1; + } + + available = LATE(snd_mixer_selem_has_playback_volume)(_outputMixerElement); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::SpeakerMuteIsAvailable( + bool& available) +{ + if (_outputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer element exists"); + return -1; + } + + available = LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::SetSpeakerMute(bool enable) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxALSA::SetSpeakerMute(enable=%u)", + enable); + + CriticalSectionScoped lock(_critSect); + + if (_outputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer element exists"); + return -1; + } + + // Ensure that the selected speaker destination has a valid mute control. + bool available(false); + SpeakerMuteIsAvailable(available); + if (!available) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " it is not possible to mute the speaker"); + return -1; + } + + // Note value = 0 (off) means muted + int errVal = + LATE(snd_mixer_selem_set_playback_switch_all)(_outputMixerElement, + !enable); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error setting playback switch: %s", + LATE(snd_strerror)(errVal)); + return -1; + } + + return (0); +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::SpeakerMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_outputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer exists"); + return -1; + } + + // Ensure that the selected speaker destination has a valid mute control. + bool available = + LATE(snd_mixer_selem_has_playback_switch)(_outputMixerElement); + if (!available) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " it is not possible to mute the speaker"); + return -1; + } + + int value(false); + + // Retrieve one boolean control value for a specified mute-control + // + int + errVal = LATE(snd_mixer_selem_get_playback_switch)( + _outputMixerElement, + (snd_mixer_selem_channel_id_t) 0, + &value); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error getting playback switch: %s", + LATE(snd_strerror)(errVal)); + return -1; + } + + // Note value = 0 (off) means muted + enabled = (bool) !value; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MicrophoneMuteIsAvailable( + bool& available) +{ + if (_inputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer element exists"); + return -1; + } + + available = LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement); +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::SetMicrophoneMute(bool enable) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxALSA::SetMicrophoneMute(enable=%u)", + enable); + + CriticalSectionScoped lock(_critSect); + + if (_inputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer element exists"); + return -1; + } + + // Ensure that the selected microphone destination has a valid mute control. + bool available(false); + MicrophoneMuteIsAvailable(available); + if (!available) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " it is not possible to mute the microphone"); + return -1; + } + + // Note value = 0 (off) means muted + int errVal = + LATE(snd_mixer_selem_set_capture_switch_all)(_inputMixerElement, + !enable); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error setting capture switch: %s", + LATE(snd_strerror)(errVal)); + return -1; + } + + return (0); +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MicrophoneMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer exists"); + return -1; + } + + // Ensure that the selected microphone destination has a valid mute control. + bool available = + LATE(snd_mixer_selem_has_capture_switch)(_inputMixerElement); + if (!available) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " it is not possible to mute the microphone"); + return -1; + } + + int value(false); + + // Retrieve one boolean control value for a specified mute-control + // + int + errVal = LATE(snd_mixer_selem_get_capture_switch)( + _inputMixerElement, + (snd_mixer_selem_channel_id_t) 0, + &value); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error getting capture switch: %s", + LATE(snd_strerror)(errVal)); + return -1; + } + + // Note value = 0 (off) means muted + enabled = (bool) !value; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MicrophoneBoostIsAvailable( + bool& available) +{ + if (_inputMixerHandle == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer exists"); + return -1; + } + + // Microphone boost cannot be enabled through ALSA Simple Mixer Interface + available = false; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::SetMicrophoneBoost(bool enable) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxALSA::SetMicrophoneBoost(enable=%u)", + enable); + + CriticalSectionScoped lock(_critSect); + + if (_inputMixerHandle == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer exists"); + return -1; + } + + // Ensure that the selected microphone destination has a valid mute control. + bool available(false); + MicrophoneMuteIsAvailable(available); + if (!available) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " it is not possible to enable microphone boost"); + return -1; + } + + // It is assumed that the call above fails! + + return (0); +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MicrophoneBoost(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputMixerHandle == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer exists"); + return -1; + } + + // Microphone boost cannot be enabled on this platform! + enabled = false; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MicrophoneVolumeIsAvailable( + bool& available) +{ + if (_inputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer element exists"); + return -1; + } + + available = LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::SetMicrophoneVolume( + WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxALSA::SetMicrophoneVolume(volume=%u)", + volume); + + CriticalSectionScoped lock(_critSect); + + if (_inputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer element exists"); + return -1; + } + + int + errVal = + LATE(snd_mixer_selem_set_capture_volume_all)(_inputMixerElement, + volume); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error changing microphone volume: %s", + LATE(snd_strerror)(errVal)); + return -1; + } + + return (0); +} + +// TL: Have done testnig with these but they don't seem reliable and +// they were therefore not added +/* + // ---------------------------------------------------------------------------- + // SetMaxMicrophoneVolume + // ---------------------------------------------------------------------------- + + WebRtc_Word32 AudioMixerManagerLinuxALSA::SetMaxMicrophoneVolume( + WebRtc_UWord32 maxVolume) + { + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + if (_inputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer element exists"); + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = snd_mixer_selem_get_capture_volume_range(_inputMixerElement, + &minVol, &maxVol); + if ((maxVol <= minVol) || (errVal != 0)) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Error getting capture volume range: %s", snd_strerror(errVal)); + } + + maxVol = (long int)maxVolume; + printf("min %d max %d", minVol, maxVol); + errVal = snd_mixer_selem_set_capture_volume_range(_inputMixerElement, minVol, maxVol); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Capture hardware volume range, min: %d, max: %d", minVol, maxVol); + if (errVal != 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error setting capture volume range: %s", snd_strerror(errVal)); + return -1; + } + + return 0; + } + + // ---------------------------------------------------------------------------- + // SetMinMicrophoneVolume + // ---------------------------------------------------------------------------- + + WebRtc_Word32 AudioMixerManagerLinuxALSA::SetMinMicrophoneVolume( + WebRtc_UWord32 minVolume) + { + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); + + if (_inputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable output mixer element exists"); + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = snd_mixer_selem_get_capture_volume_range( + _inputMixerElement, &minVol, &maxVol); + if (maxVol <= minVol) + { + //maxVol = 255; + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Error getting capture volume range: %s", snd_strerror(errVal)); + } + + printf("min %d max %d", minVol, maxVol); + minVol = (long int)minVolume; + errVal = snd_mixer_selem_set_capture_volume_range( + _inputMixerElement, minVol, maxVol); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Capture hardware volume range, min: %d, max: %d", minVol, maxVol); + if (errVal != 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error setting capture volume range: %s", snd_strerror(errVal)); + return -1; + } + + return 0; + } + */ + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MicrophoneVolume( + WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer element exists"); + return -1; + } + + long int vol(0); + + int + errVal = + LATE(snd_mixer_selem_get_capture_volume)( + _inputMixerElement, + (snd_mixer_selem_channel_id_t) 0, + &vol); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Error getting inputvolume: %s", + LATE(snd_strerror)(errVal)); + return -1; + } + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerLinuxALSA::MicrophoneVolume() => vol=%i", + vol); + + volume = static_cast (vol); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MaxMicrophoneVolume( + WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer element exists"); + return -1; + } + + long int minVol(0); + long int maxVol(0); + + // check if we have mic volume at all + if (!LATE(snd_mixer_selem_has_capture_volume)(_inputMixerElement)) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " No microphone volume available"); + return -1; + } + + int errVal = + LATE(snd_mixer_selem_get_capture_volume_range)(_inputMixerElement, + &minVol, &maxVol); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Microphone hardware volume range, min: %d, max: %d", + minVol, maxVol); + if (maxVol <= minVol) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error getting microphone volume range: %s", + LATE(snd_strerror)(errVal)); + } + + maxVolume = static_cast (maxVol); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MinMicrophoneVolume( + WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputMixerElement == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer element exists"); + return -1; + } + + long int minVol(0); + long int maxVol(0); + + int errVal = + LATE(snd_mixer_selem_get_capture_volume_range)(_inputMixerElement, + &minVol, &maxVol); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Microphone hardware volume range, min: %d, max: %d", + minVol, maxVol); + if (maxVol <= minVol) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error getting microphone volume range: %s", + LATE(snd_strerror)(errVal)); + } + + minVolume = static_cast (minVol); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::MicrophoneVolumeStepSize( + WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputMixerHandle == NULL) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " no avaliable input mixer exists"); + return -1; + } + + // The step size is always 1 for ALSA + stepSize = 1; + + return 0; +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +WebRtc_Word32 AudioMixerManagerLinuxALSA::LoadMicMixerElement() const +{ + int errVal = LATE(snd_mixer_load)(_inputMixerHandle); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "snd_mixer_load(_inputMixerHandle), error: %s", + LATE(snd_strerror)(errVal)); + _inputMixerHandle = NULL; + return -1; + } + + snd_mixer_elem_t *elem = NULL; + snd_mixer_elem_t *micElem = NULL; + unsigned mixerIdx = 0; + const char *selemName = NULL; + + // Find and store handles to the right mixer elements + for (elem = LATE(snd_mixer_first_elem)(_inputMixerHandle); elem; elem + = LATE(snd_mixer_elem_next)(elem), mixerIdx++) + { + if (LATE(snd_mixer_selem_is_active)(elem)) + { + selemName = LATE(snd_mixer_selem_get_name)(elem); + if (strcmp(selemName, "Capture") == 0) // "Capture", "Mic" + { + _inputMixerElement = elem; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, + _id, " Capture element set"); + } else if (strcmp(selemName, "Mic") == 0) + { + micElem = elem; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, + _id, " Mic element found"); + } + } + + if (_inputMixerElement) + { + // Use the first Capture element that is found + // The second one may not work + break; + } + } + + if (_inputMixerElement == NULL) + { + // We didn't find a Capture handle, use Mic. + if (micElem != NULL) + { + _inputMixerElement = micElem; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Using Mic as capture volume."); + } else + { + _inputMixerElement = NULL; + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Could not find capture volume on the mixer."); + + return -1; + } + } + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxALSA::LoadSpeakerMixerElement() const +{ + int errVal = LATE(snd_mixer_load)(_outputMixerHandle); + if (errVal < 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " snd_mixer_load(_outputMixerHandle), error: %s", + LATE(snd_strerror)(errVal)); + _outputMixerHandle = NULL; + return -1; + } + + snd_mixer_elem_t *elem = NULL; + snd_mixer_elem_t *masterElem = NULL; + snd_mixer_elem_t *speakerElem = NULL; + unsigned mixerIdx = 0; + const char *selemName = NULL; + + // Find and store handles to the right mixer elements + for (elem = LATE(snd_mixer_first_elem)(_outputMixerHandle); elem; elem + = LATE(snd_mixer_elem_next)(elem), mixerIdx++) + { + if (LATE(snd_mixer_selem_is_active)(elem)) + { + selemName = LATE(snd_mixer_selem_get_name)(elem); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "snd_mixer_selem_get_name %d: %s =%x", mixerIdx, + selemName, elem); + + // "Master", "PCM", "Wave", "Master Mono", "PC Speaker", "PCM", "Wave" + if (strcmp(selemName, "PCM") == 0) + { + _outputMixerElement = elem; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, + _id, " PCM element set"); + } else if (strcmp(selemName, "Master") == 0) + { + masterElem = elem; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, + _id, " Master element found"); + } else if (strcmp(selemName, "Speaker") == 0) + { + speakerElem = elem; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, + _id, " Speaker element found"); + } + } + + if (_outputMixerElement) + { + // We have found the element we want + break; + } + } + + // If we didn't find a PCM Handle, use Master or Speaker + if (_outputMixerElement == NULL) + { + if (masterElem != NULL) + { + _outputMixerElement = masterElem; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Using Master as output volume."); + } else if (speakerElem != NULL) + { + _outputMixerElement = speakerElem; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Using Speaker as output volume."); + } else + { + _outputMixerElement = NULL; + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Could not find output volume in the mixer."); + return -1; + } + } + + return 0; +} + +void AudioMixerManagerLinuxALSA::GetControlName(char* controlName, + char* deviceName) const +{ + // Example + // deviceName: "front:CARD=Intel,DEV=0" + // controlName: "hw:CARD=Intel" + char* pos1 = strchr(deviceName, ':'); + char* pos2 = strchr(deviceName, ','); + if (!pos2) + { + // Can also be default:CARD=Intel + pos2 = &deviceName[strlen(deviceName)]; + } + if (pos1 && pos2) + { + strcpy(controlName, "hw"); + int nChar = (int) (pos2 - pos1); + strncpy(&controlName[2], pos1, nChar); + controlName[2 + nChar] = '\0'; + } else + { + strcpy(controlName, deviceName); + } + +} + +} diff --git a/src/modules/audio_device/main/source/linux/audio_mixer_manager_alsa_linux.h b/src/modules/audio_device/main/source/linux/audio_mixer_manager_alsa_linux.h new file mode 100644 index 000000000..94ea9820f --- /dev/null +++ b/src/modules/audio_device/main/source/linux/audio_mixer_manager_alsa_linux.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_MIXER_MANAGER_ALSA_LINUX_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_MIXER_MANAGER_ALSA_LINUX_H + +#include "typedefs.h" +#include "audio_device.h" +#include "critical_section_wrapper.h" +#include "alsasymboltable_linux.h" + +#include + +namespace webrtc +{ + +class AudioMixerManagerLinuxALSA +{ +public: + WebRtc_Word32 OpenSpeaker(char* deviceName); + WebRtc_Word32 OpenMicrophone(char* deviceName); + WebRtc_Word32 SetSpeakerVolume(WebRtc_UWord32 volume); + WebRtc_Word32 SpeakerVolume(WebRtc_UWord32& volume) const; + WebRtc_Word32 MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const; + WebRtc_Word32 MinSpeakerVolume(WebRtc_UWord32& minVolume) const; + WebRtc_Word32 SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const; + WebRtc_Word32 SpeakerVolumeIsAvailable(bool& available); + WebRtc_Word32 SpeakerMuteIsAvailable(bool& available); + WebRtc_Word32 SetSpeakerMute(bool enable); + WebRtc_Word32 SpeakerMute(bool& enabled) const; + WebRtc_Word32 MicrophoneMuteIsAvailable(bool& available); + WebRtc_Word32 SetMicrophoneMute(bool enable); + WebRtc_Word32 MicrophoneMute(bool& enabled) const; + WebRtc_Word32 MicrophoneBoostIsAvailable(bool& available); + WebRtc_Word32 SetMicrophoneBoost(bool enable); + WebRtc_Word32 MicrophoneBoost(bool& enabled) const; + WebRtc_Word32 MicrophoneVolumeIsAvailable(bool& available); + WebRtc_Word32 SetMicrophoneVolume(WebRtc_UWord32 volume); + WebRtc_Word32 MicrophoneVolume(WebRtc_UWord32& volume) const; + WebRtc_Word32 MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const; + WebRtc_Word32 MinMicrophoneVolume(WebRtc_UWord32& minVolume) const; + WebRtc_Word32 MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const; + WebRtc_Word32 Close(); + WebRtc_Word32 CloseSpeaker(); + WebRtc_Word32 CloseMicrophone(); + bool SpeakerIsInitialized() const; + bool MicrophoneIsInitialized() const; + +public: + AudioMixerManagerLinuxALSA(const WebRtc_Word32 id); + ~AudioMixerManagerLinuxALSA(); + +private: + WebRtc_Word32 LoadMicMixerElement() const; + WebRtc_Word32 LoadSpeakerMixerElement() const; + void GetControlName(char *controlName, char* deviceName) const; + +private: + CriticalSectionWrapper& _critSect; + WebRtc_Word32 _id; + mutable snd_mixer_t* _outputMixerHandle; + char _outputMixerStr[kAdmMaxDeviceNameSize]; + mutable snd_mixer_t* _inputMixerHandle; + char _inputMixerStr[kAdmMaxDeviceNameSize]; + mutable snd_mixer_elem_t* _outputMixerElement; + mutable snd_mixer_elem_t* _inputMixerElement; +}; + +} + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_MIXER_MANAGER_ALSA_LINUX_H_ diff --git a/src/modules/audio_device/main/source/linux/audio_mixer_manager_pulse_linux.cc b/src/modules/audio_device/main/source/linux/audio_mixer_manager_pulse_linux.cc new file mode 100644 index 000000000..ad7e90980 --- /dev/null +++ b/src/modules/audio_device/main/source/linux/audio_mixer_manager_pulse_linux.cc @@ -0,0 +1,1301 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "audio_mixer_manager_pulse_linux.h" +#include "trace.h" + +extern webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable; + +// Accesses Pulse functions through our late-binding symbol table instead of +// directly. This way we don't have to link to libpulse, which means our binary +// will work on systems that don't have it. +#define LATE(sym) \ + LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, sym) + +namespace webrtc +{ + +AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse(const WebRtc_Word32 id) : + _critSect(*CriticalSectionWrapper::CreateCriticalSection()), + _id(id), + _paOutputDeviceIndex(-1), + _paInputDeviceIndex(-1), + _paPlayStream(NULL), + _paRecStream(NULL), + _paMainloop(NULL), + _paContext(NULL), + _paVolume(0), + _paMute(0), + _paVolSteps(0), + _paSpeakerMute(false), + _paSpeakerVolume(0), + _paChannels(0), + _paObjectsSet(false), + _callbackValues(false) +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s constructed", __FUNCTION__); +} + +AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse() +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s destructed", __FUNCTION__); + + Close(); + + delete &_critSect; +} + +// ============================================================================ +// PUBLIC METHODS +// ============================================================================ + +WebRtc_Word32 AudioMixerManagerLinuxPulse::SetPulseAudioObjects( + pa_threaded_mainloop* mainloop, + pa_context* context) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!mainloop || !context) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " could not set PulseAudio objects for mixer"); + return -1; + } + + _paMainloop = mainloop; + _paContext = context; + _paObjectsSet = true; + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " the PulseAudio objects for the mixer has been set"); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::Close() +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + CloseSpeaker(); + CloseMicrophone(); + + _paMainloop = NULL; + _paContext = NULL; + _paObjectsSet = false; + + return 0; + +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::CloseSpeaker() +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + // Reset the index to -1 + _paOutputDeviceIndex = -1; + _paPlayStream = NULL; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::CloseMicrophone() +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + // Reset the index to -1 + _paInputDeviceIndex = -1; + _paRecStream = NULL; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)"); + + CriticalSectionScoped lock(_critSect); + _paPlayStream = playStream; + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxPulse::SetRecStream(recStream)"); + + CriticalSectionScoped lock(_critSect); + _paRecStream = recStream; + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::OpenSpeaker( + WebRtc_UWord16 deviceIndex) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex=%d)", + deviceIndex); + + CriticalSectionScoped lock(_critSect); + + // No point in opening the speaker + // if PA objects have not been set + if (!_paObjectsSet) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " PulseAudio objects has not been set"); + return -1; + } + + // Set the index for the PulseAudio + // output device to control + _paOutputDeviceIndex = deviceIndex; + + // Init the speaker volume to the normal volume + _paSpeakerVolume = PA_VOLUME_NORM; + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " the output mixer device is now open"); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::OpenMicrophone( + WebRtc_UWord16 deviceIndex) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex=%d)", + deviceIndex); + + CriticalSectionScoped lock(_critSect); + + // No point in opening the microphone + // if PA objects have not been set + if (!_paObjectsSet) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " PulseAudio objects have not been set"); + return -1; + } + + // Set the index for the PulseAudio + // input device to control + _paInputDeviceIndex = deviceIndex; + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " the input mixer device is now open"); + + return 0; +} + +bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + return (_paOutputDeviceIndex != -1); +} + +bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + return (_paInputDeviceIndex != -1); +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::SetSpeakerVolume( + WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume=%u)", + volume); + + CriticalSectionScoped lock(_critSect); + + if (_paOutputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " output device index has not been set"); + return -1; + } + + bool setFailed(false); + + if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream) + != PA_STREAM_UNCONNECTED)) + { + // We can only really set the volume if we have a connected stream + PaLock(); + + // Get the number of channels from the sample specification + const pa_sample_spec *spec = + LATE(pa_stream_get_sample_spec)(_paPlayStream); + if (!spec) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " could not get sample specification"); + PaUnLock(); + return -1; + } + + // Set the same volume for all channels + pa_cvolume cVolumes; + LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume); + + pa_operation* paOperation = NULL; + paOperation = LATE(pa_context_set_sink_input_volume)( + _paContext, + LATE(pa_stream_get_index)(_paPlayStream), + &cVolumes, + PaSetVolumeCallback, NULL); + if (!paOperation) + { + setFailed = true; + } + + // Don't need to wait for the completion + LATE(pa_operation_unref)(paOperation); + + PaUnLock(); + } else + { + // We have not created a stream or it's not connected to the sink + // Save the volume to be set at connection + _paSpeakerVolume = volume; + } + + if (setFailed) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " could not set speaker volume, error%d", + LATE(pa_context_errno)(_paContext)); + + return -1; + } + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::SpeakerVolume(WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paOutputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " output device index has not been set"); + return -1; + } + + if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream) + != PA_STREAM_UNCONNECTED)) + { + // We can only get the volume if we have a connected stream + pa_operation* paOperation = NULL; + ResetCallbackVariables(); + PaLock(); + + // Get info for this stream (sink input) + paOperation = LATE(pa_context_get_sink_input_info)( + _paContext, + LATE(pa_stream_get_index)(_paPlayStream), + PaSinkInputInfoCallback, + (void*) this); + + WaitForOperationCompletion(paOperation); + PaUnLock(); + + if (!_callbackValues) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Error getting output volume: %d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + volume = static_cast (_paVolume); + ResetCallbackVariables(); + } else + { + volume = _paSpeakerVolume; + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerLinuxPulse::SpeakerVolume() => vol=%i", + volume); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paOutputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " output device index has not been set"); + return -1; + } + + // PA_VOLUME_NORM corresponds to 100% (0db) + // but PA allows up to 150 db amplification + maxVolume = static_cast (PA_VOLUME_NORM); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::MinSpeakerVolume(WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paOutputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " output device index has not been set"); + return -1; + } + + minVolume = static_cast (PA_VOLUME_MUTED); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paOutputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " output device index has not been set"); + return -1; + } + + // The sink input (stream) will always have step size = 1 + // There are PA_VOLUME_NORM+1 steps + stepSize = 1; + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize() => " + "size=%i, stepSize"); + + // Reset members modified by callback + ResetCallbackVariables(); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available) +{ + if (_paOutputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " output device index has not been set"); + return -1; + } + + // Always available in Pulse Audio + available = true; + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available) +{ + if (_paOutputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " output device index has not been set"); + return -1; + } + + // Always available in Pulse Audio + available = true; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable=%u)", + enable); + + CriticalSectionScoped lock(_critSect); + + if (_paOutputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " output device index has not been set"); + return -1; + } + + bool setFailed(false); + + if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream) + != PA_STREAM_UNCONNECTED)) + { + // We can only really mute if we have a connected stream + PaLock(); + + pa_operation* paOperation = NULL; + paOperation = LATE(pa_context_set_sink_input_mute)( + _paContext, + LATE(pa_stream_get_index)(_paPlayStream), + (int) enable, + PaSetVolumeCallback, + NULL); + if (!paOperation) + { + setFailed = true; + } + + // Don't need to wait for the completion + LATE(pa_operation_unref)(paOperation); + + PaUnLock(); + } else + { + // We have not created a stream or it's not connected to the sink + // Save the mute status to be set at connection + _paSpeakerMute = enable; + } + + if (setFailed) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " could not mute speaker, error%d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paOutputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " output device index has not been set"); + return -1; + } + + if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream) + != PA_STREAM_UNCONNECTED)) + { + // We can only get the mute status if we have a connected stream + pa_operation* paOperation = NULL; + ResetCallbackVariables(); + PaLock(); + + // Get info for this stream (sink input) + paOperation = LATE(pa_context_get_sink_input_info)( + _paContext, + LATE(pa_stream_get_index)(_paPlayStream), + PaSinkInputInfoCallback, + (void*) this); + + WaitForOperationCompletion(paOperation); + PaUnLock(); + + if (!_callbackValues) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Error getting output volume: %d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + enabled = static_cast (_paMute); + ResetCallbackVariables(); + } else + { + enabled = _paSpeakerMute; + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerLinuxPulse::SpeakerMute() => " + "enabled=%i, enabled"); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available) +{ + if (_paOutputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " output device index has not been set"); + return -1; + } + + uint32_t deviceIndex = (uint32_t) _paOutputDeviceIndex; + + PaLock(); + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paPlayStream && (LATE(pa_stream_get_state)(_paPlayStream) + != PA_STREAM_UNCONNECTED)) + { + deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream); + } + + pa_operation* paOperation = NULL; + ResetCallbackVariables(); + + // Get info for this sink + // We want to know if the actual device can play out in stereo + paOperation = LATE(pa_context_get_sink_info_by_index)(_paContext, + deviceIndex, + PaSinkInfoCallback, + (void*) this); + + WaitForOperationCompletion(paOperation); + PaUnLock(); + + if (!_callbackValues) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Error getting number of output channels: %d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + available = static_cast (_paChannels == 2); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable() " + "=> available=%i, available"); + + // Reset members modified by callback + ResetCallbackVariables(); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available) +{ + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; + + PaLock(); + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) + != PA_STREAM_UNCONNECTED)) + { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + + pa_operation* paOperation = NULL; + ResetCallbackVariables(); + + // Get info for this source + // We want to know if the actual device can record in stereo + paOperation = LATE(pa_context_get_source_info_by_index)( + _paContext, deviceIndex, + PaSourceInfoCallback, + (void*) this); + + WaitForOperationCompletion(paOperation); + PaUnLock(); + + if (!_callbackValues) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Error getting number of input channels: %d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + available = static_cast (_paChannels == 2); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()" + " => available=%i, available"); + + // Reset members modified by callback + ResetCallbackVariables(); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable( + bool& available) +{ + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + // Always available in Pulse Audio + available = true; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable=%u)", + enable); + + CriticalSectionScoped lock(_critSect); + + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + bool setFailed(false); + pa_operation* paOperation = NULL; + ResetCallbackVariables(); + + uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; + + PaLock(); + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) + != PA_STREAM_UNCONNECTED)) + { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + + // Set mute switch for the source + paOperation = LATE(pa_context_set_source_mute_by_index)( + _paContext, deviceIndex, + enable, + PaSetVolumeCallback, NULL); + + if (!paOperation) + { + setFailed = true; + } + + // Don't need to wait for this to complete. + LATE(pa_operation_unref)(paOperation); + + PaUnLock(); + + // Reset variables altered by callback + ResetCallbackVariables(); + + if (setFailed) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " could not mute microphone, error%d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; + + PaLock(); + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) + != PA_STREAM_UNCONNECTED)) + { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + + pa_operation* paOperation = NULL; + ResetCallbackVariables(); + + // Get info for this source + paOperation + = LATE(pa_context_get_source_info_by_index)(_paContext, deviceIndex, + PaSourceInfoCallback, + (void*) this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + if (!_callbackValues) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Error getting input mute status: %d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + enabled = static_cast (_paMute); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerLinuxPulse::MicrophoneMute() =>" + " enabled=%i, enabled"); + + // Reset members modified by callback + ResetCallbackVariables(); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::MicrophoneBoostIsAvailable(bool& available) +{ + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + // Always unavailable in Pulse Audio + // Could make it possible to use PA_VOLUME_MAX + // but that gives bad audio with some sound cards + available = false; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxPulse::SetMicrophoneBoost(enable=%u)", + enable); + + CriticalSectionScoped lock(_critSect); + + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + // Ensure that the selected microphone destination has a valid boost control + bool available(false); + MicrophoneBoostIsAvailable(available); + if (!available) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " it is not possible to enable microphone boost"); + return -1; + } + + // It is assumed that the call above fails! + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::MicrophoneBoost(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + // Microphone boost cannot be enabled on this platform! + enabled = false; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable( + bool& available) +{ + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + // Always available in Pulse Audio + available = true; + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::SetMicrophoneVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume=%u)", + volume); + + CriticalSectionScoped lock(_critSect); + + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + // Unlike output streams, input streams have no concept of a stream volume, + // only a device volume. So we have to change the volume of the device + // itself. + + // The device may have a different number of channels than the stream and + // their mapping may be different, so we don't want to use the channel count + // from our sample spec. We could use PA_CHANNELS_MAX to cover our bases, + // and the server allows that even if the device's channel count is lower, + // but some buggy PA clients don't like that (the pavucontrol on Hardy dies + // in an assert if the channel count is different). So instead we look up + // the actual number of channels that the device has. + + uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; + + PaLock(); + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) + != PA_STREAM_UNCONNECTED)) + { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + + bool setFailed(false); + pa_operation* paOperation = NULL; + ResetCallbackVariables(); + + // Get the number of channels for this source + paOperation + = LATE(pa_context_get_source_info_by_index)(_paContext, deviceIndex, + PaSourceInfoCallback, + (void*) this); + + WaitForOperationCompletion(paOperation); + + if (!_callbackValues) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Error getting input channels: %d", + LATE(pa_context_errno)(_paContext)); + PaUnLock(); + return -1; + } + + WebRtc_UWord8 channels = _paChannels; + ResetCallbackVariables(); + + pa_cvolume cVolumes; + LATE(pa_cvolume_set)(&cVolumes, channels, volume); + + // Set the volume for the source + paOperation + = LATE(pa_context_set_source_volume_by_index)(_paContext, deviceIndex, + &cVolumes, + PaSetVolumeCallback, NULL); + + if (!paOperation) + { + setFailed = true; + } + + // Don't need to wait for this to complete. + LATE(pa_operation_unref)(paOperation); + + PaUnLock(); + + // Reset variables altered by callback + ResetCallbackVariables(); + + if (setFailed) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " could not set microphone volume, error%d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::MicrophoneVolume(WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; + + PaLock(); + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) + != PA_STREAM_UNCONNECTED)) + { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + + pa_operation* paOperation = NULL; + ResetCallbackVariables(); + + // Get info for this source + paOperation + = LATE(pa_context_get_source_info_by_index)(_paContext, deviceIndex, + PaSourceInfoCallback, + (void*) this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + if (!_callbackValues) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Error getting input volume: %d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + volume = static_cast (_paVolume); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol=%i, volume"); + + // Reset members modified by callback + ResetCallbackVariables(); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + // PA_VOLUME_NORM corresponds to 100% (0db) + // PA allows up to 150 db amplification (PA_VOLUME_MAX) + // but that doesn't work well for all sound cards + maxVolume = static_cast (PA_VOLUME_NORM); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerLinuxPulse::MinMicrophoneVolume(WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + minVolume = static_cast (PA_VOLUME_MUTED); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize( + WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_paInputDeviceIndex == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " input device index has not been set"); + return -1; + } + + uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; + + PaLock(); + + // Get the actual stream device index if we have a connected stream + // The device used by the stream can be changed + // during the call + if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) + != PA_STREAM_UNCONNECTED)) + { + deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); + } + + pa_operation* paOperation = NULL; + ResetCallbackVariables(); + + // Get info for this source + paOperation + = LATE(pa_context_get_source_info_by_index)(_paContext, deviceIndex, + PaSourceInfoCallback, + (void*) this); + + WaitForOperationCompletion(paOperation); + + PaUnLock(); + + if (!_callbackValues) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Error getting step size: %d", + LATE(pa_context_errno)(_paContext)); + return -1; + } + + stepSize = static_cast ((PA_VOLUME_NORM + 1) / _paVolSteps); + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize()" + " => size=%i, stepSize"); + + // Reset members modified by callback + ResetCallbackVariables(); + + return 0; +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +void AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context */*c*/, + const pa_sink_info *i, + int eol, void *pThis) +{ + static_cast (pThis)-> PaSinkInfoCallbackHandler( + i, eol); +} + +void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback( + pa_context */*c*/, + const pa_sink_input_info *i, + int eol, void *pThis) +{ + static_cast (pThis)-> + PaSinkInputInfoCallbackHandler(i, eol); +} + + +void AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context */*c*/, + const pa_source_info *i, + int eol, void *pThis) +{ + static_cast (pThis)-> + PaSourceInfoCallbackHandler(i, eol); +} + +void AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context * c, + int success, void */*pThis*/) +{ + if (!success) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1, + " failed to set volume"); + } +} + +void AudioMixerManagerLinuxPulse::PaSinkInfoCallbackHandler( + const pa_sink_info *i, + int eol) +{ + if (eol) + { + // Signal that we are done + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + return; + } + + _callbackValues = true; + _paChannels = i->channel_map.channels; // Get number of channels + pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. + for (int j = 0; j < _paChannels; ++j) + { + if (paVolume < i->volume.values[j]) + { + paVolume = i->volume.values[j]; + } + } + _paVolume = paVolume; // get the max volume for any channel + _paMute = i->mute; // get mute status + + // supported since PA 0.9.15 + //_paVolSteps = i->n_volume_steps; // get the number of volume steps + // default value is PA_VOLUME_NORM+1 + _paVolSteps = PA_VOLUME_NORM + 1; +} + +void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallbackHandler( + const pa_sink_input_info *i, + int eol) +{ + if (eol) + { + // Signal that we are done + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + return; + } + + _callbackValues = true; + _paChannels = i->channel_map.channels; // Get number of channels + pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. + for (int j = 0; j < _paChannels; ++j) + { + if (paVolume < i->volume.values[j]) + { + paVolume = i->volume.values[j]; + } + } + _paVolume = paVolume; // Get the max volume for any channel + _paMute = i->mute; // Get mute status +} + +void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler( + const pa_source_info *i, + int eol) +{ + if (eol) + { + // Signal that we are done + LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); + return; + } + + _callbackValues = true; + _paChannels = i->channel_map.channels; // Get number of channels + pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. + for (int j = 0; j < _paChannels; ++j) + { + if (paVolume < i->volume.values[j]) + { + paVolume = i->volume.values[j]; + } + } + _paVolume = paVolume; // Get the max volume for any channel + _paMute = i->mute; // Get mute status + + // supported since PA 0.9.15 + //_paVolSteps = i->n_volume_steps; // Get the number of volume steps + // default value is PA_VOLUME_NORM+1 + _paVolSteps = PA_VOLUME_NORM + 1; +} + +void AudioMixerManagerLinuxPulse::ResetCallbackVariables() const +{ + _paVolume = 0; + _paMute = 0; + _paVolSteps = 0; + _paChannels = 0; + _callbackValues = false; +} + +void AudioMixerManagerLinuxPulse::WaitForOperationCompletion( + pa_operation* paOperation) const +{ + while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) + { + LATE(pa_threaded_mainloop_wait)(_paMainloop); + } + + LATE(pa_operation_unref)(paOperation); +} + +void AudioMixerManagerLinuxPulse::PaLock() const +{ + LATE(pa_threaded_mainloop_lock)(_paMainloop); +} + +void AudioMixerManagerLinuxPulse::PaUnLock() const +{ + LATE(pa_threaded_mainloop_unlock)(_paMainloop); +} + +} diff --git a/src/modules/audio_device/main/source/linux/audio_mixer_manager_pulse_linux.h b/src/modules/audio_device/main/source/linux/audio_mixer_manager_pulse_linux.h new file mode 100644 index 000000000..a2f71f3ad --- /dev/null +++ b/src/modules/audio_device/main/source/linux/audio_mixer_manager_pulse_linux.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_MIXER_MANAGER_PULSE_LINUX_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_MIXER_MANAGER_PULSE_LINUX_H + +#include "typedefs.h" +#include "audio_device.h" +#include "critical_section_wrapper.h" +#include "pulseaudiosymboltable_linux.h" + +#include +#include + +#ifndef UINT32_MAX +#define UINT32_MAX ((uint32_t)-1) +#endif + +namespace webrtc +{ + +class AudioMixerManagerLinuxPulse +{ +public: + WebRtc_Word32 SetPlayStream(pa_stream* playStream); + WebRtc_Word32 SetRecStream(pa_stream* recStream); + WebRtc_Word32 OpenSpeaker(WebRtc_UWord16 deviceIndex); + WebRtc_Word32 OpenMicrophone(WebRtc_UWord16 deviceIndex); + WebRtc_Word32 SetSpeakerVolume(WebRtc_UWord32 volume); + WebRtc_Word32 SpeakerVolume(WebRtc_UWord32& volume) const; + WebRtc_Word32 MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const; + WebRtc_Word32 MinSpeakerVolume(WebRtc_UWord32& minVolume) const; + WebRtc_Word32 SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const; + WebRtc_Word32 SpeakerVolumeIsAvailable(bool& available); + WebRtc_Word32 SpeakerMuteIsAvailable(bool& available); + WebRtc_Word32 SetSpeakerMute(bool enable); + WebRtc_Word32 StereoPlayoutIsAvailable(bool& available); + WebRtc_Word32 StereoRecordingIsAvailable(bool& available); + WebRtc_Word32 SpeakerMute(bool& enabled) const; + WebRtc_Word32 MicrophoneMuteIsAvailable(bool& available); + WebRtc_Word32 SetMicrophoneMute(bool enable); + WebRtc_Word32 MicrophoneMute(bool& enabled) const; + WebRtc_Word32 MicrophoneBoostIsAvailable(bool& available); + WebRtc_Word32 SetMicrophoneBoost(bool enable); + WebRtc_Word32 MicrophoneBoost(bool& enabled) const; + WebRtc_Word32 MicrophoneVolumeIsAvailable(bool& available); + WebRtc_Word32 SetMicrophoneVolume(WebRtc_UWord32 volume); + WebRtc_Word32 MicrophoneVolume(WebRtc_UWord32& volume) const; + WebRtc_Word32 MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const; + WebRtc_Word32 MinMicrophoneVolume(WebRtc_UWord32& minVolume) const; + WebRtc_Word32 MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const; + WebRtc_Word32 SetPulseAudioObjects(pa_threaded_mainloop* mainloop, + pa_context* context); + WebRtc_Word32 Close(); + WebRtc_Word32 CloseSpeaker(); + WebRtc_Word32 CloseMicrophone(); + bool SpeakerIsInitialized() const; + bool MicrophoneIsInitialized() const; + +public: + AudioMixerManagerLinuxPulse(const WebRtc_Word32 id); + ~AudioMixerManagerLinuxPulse(); + +private: + static void PaSinkInfoCallback(pa_context *c, const pa_sink_info *i, + int eol, void *pThis); + static void PaSinkInputInfoCallback(pa_context *c, + const pa_sink_input_info *i, int eol, + void *pThis); + static void PaSourceInfoCallback(pa_context *c, const pa_source_info *i, + int eol, void *pThis); + static void + PaSetVolumeCallback(pa_context * /*c*/, int success, void */*pThis*/); + void PaSinkInfoCallbackHandler(const pa_sink_info *i, int eol); + void PaSinkInputInfoCallbackHandler(const pa_sink_input_info *i, int eol); + void PaSourceInfoCallbackHandler(const pa_source_info *i, int eol); + + void ResetCallbackVariables() const; + void WaitForOperationCompletion(pa_operation* paOperation) const; + void PaLock() const; + void PaUnLock() const; + +private: + CriticalSectionWrapper& _critSect; + WebRtc_Word32 _id; + WebRtc_Word16 _paOutputDeviceIndex; + WebRtc_Word16 _paInputDeviceIndex; + + pa_stream* _paPlayStream; + pa_stream* _paRecStream; + + pa_threaded_mainloop* _paMainloop; + pa_context* _paContext; + + mutable WebRtc_UWord32 _paVolume; + mutable WebRtc_UWord32 _paMute; + mutable WebRtc_UWord32 _paVolSteps; + bool _paSpeakerMute; + mutable WebRtc_UWord32 _paSpeakerVolume; + mutable WebRtc_UWord8 _paChannels; + bool _paObjectsSet; + mutable bool _callbackValues; + + WebRtc_UWord8 _micVolChannels; + WebRtc_UWord8 _spkVolChannels; +}; + +} + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_LINUX_AUDIO_MIXER_MANAGER_PULSE_LINUX_H_ diff --git a/src/modules/audio_device/main/source/linux/latebindingsymboltable_linux.cc b/src/modules/audio_device/main/source/linux/latebindingsymboltable_linux.cc new file mode 100644 index 000000000..8f3c7c8d4 --- /dev/null +++ b/src/modules/audio_device/main/source/linux/latebindingsymboltable_linux.cc @@ -0,0 +1,116 @@ +/* + * libjingle + * Copyright 2004--2010, Google Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "latebindingsymboltable_linux.h" + +#ifdef WEBRTC_LINUX +#include +#endif + +// TODO(grunell): Either put inside webrtc namespace or use webrtc:: instead. +using namespace webrtc; + +namespace webrtc_adm_linux { + +inline static const char *GetDllError() { +#ifdef WEBRTC_LINUX + char *err = dlerror(); + if (err) { + return err; + } else { + return "No error"; + } +#else +#error Not implemented +#endif +} + +DllHandle InternalLoadDll(const char dll_name[]) { +#ifdef WEBRTC_LINUX + DllHandle handle = dlopen(dll_name, RTLD_NOW); +#else +#error Not implemented +#endif + if (handle == kInvalidDllHandle) { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1, + "Can't load %s : %d", dll_name, GetDllError()); + } + return handle; +} + +void InternalUnloadDll(DllHandle handle) { +#ifdef WEBRTC_LINUX + if (dlclose(handle) != 0) { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1, + "%d", GetDllError()); + } +#else +#error Not implemented +#endif +} + +static bool LoadSymbol(DllHandle handle, + const char *symbol_name, + void **symbol) { +#ifdef WEBRTC_LINUX + *symbol = dlsym(handle, symbol_name); + char *err = dlerror(); + if (err) { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1, + "Error loading symbol %s : %d", symbol_name, err); + return false; + } else if (!*symbol) { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1, + "Symbol %s is NULL", symbol_name); + return false; + } + return true; +#else +#error Not implemented +#endif +} + +// This routine MUST assign SOME value for every symbol, even if that value is +// NULL, or else some symbols may be left with uninitialized data that the +// caller may later interpret as a valid address. +bool InternalLoadSymbols(DllHandle handle, + int num_symbols, + const char *const symbol_names[], + void *symbols[]) { +#ifdef WEBRTC_LINUX + // Clear any old errors. + dlerror(); +#endif + for (int i = 0; i < num_symbols; ++i) { + if (!LoadSymbol(handle, symbol_names[i], &symbols[i])) { + return false; + } + } + return true; +} + +} // namespace webrtc_adm_linux diff --git a/src/modules/audio_device/main/source/linux/latebindingsymboltable_linux.h b/src/modules/audio_device/main/source/linux/latebindingsymboltable_linux.h new file mode 100644 index 000000000..91d25aa2d --- /dev/null +++ b/src/modules/audio_device/main/source/linux/latebindingsymboltable_linux.h @@ -0,0 +1,195 @@ +/* + * libjingle + * Copyright 2004--2010, Google Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H +#define WEBRTC_AUDIO_DEVICE_LATEBINDINGSYMBOLTABLE_LINUX_H + +#include +#include // for NULL +#include + +#include "constructor_magic.h" +#include "trace.h" + +// This file provides macros for creating "symbol table" classes to simplify the +// dynamic loading of symbols from DLLs. Currently the implementation only +// supports Linux and pure C symbols. +// See talk/sound/pulseaudiosymboltable.(h|cc) for an example. + +namespace webrtc_adm_linux { + +#ifdef WEBRTC_LINUX +typedef void *DllHandle; + +const DllHandle kInvalidDllHandle = NULL; +#else +#error Not implemented +#endif + +// These are helpers for use only by the class below. +DllHandle InternalLoadDll(const char dll_name[]); + +void InternalUnloadDll(DllHandle handle); + +bool InternalLoadSymbols(DllHandle handle, + int num_symbols, + const char *const symbol_names[], + void *symbols[]); + +template +class LateBindingSymbolTable { + public: + LateBindingSymbolTable() + : handle_(kInvalidDllHandle), + undefined_symbols_(false) { + memset(symbols_, 0, sizeof(symbols_)); + } + + ~LateBindingSymbolTable() { + Unload(); + } + + static int NumSymbols() { + return SYMBOL_TABLE_SIZE; + } + + // We do not use this, but we offer it for theoretical convenience. + static const char *GetSymbolName(int index) { + assert(index < NumSymbols()); + return kSymbolNames[index]; + } + + bool IsLoaded() const { + return handle_ != kInvalidDllHandle; + } + + // Loads the DLL and the symbol table. Returns true iff the DLL and symbol + // table loaded successfully. + bool Load() { + if (IsLoaded()) { + return true; + } + if (undefined_symbols_) { + // We do not attempt to load again because repeated attempts are not + // likely to succeed and DLL loading is costly. + //WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1, + // "We know there are undefined symbols"); + return false; + } + handle_ = InternalLoadDll(kDllName); + if (!IsLoaded()) { + return false; + } + if (!InternalLoadSymbols(handle_, NumSymbols(), kSymbolNames, symbols_)) { + undefined_symbols_ = true; + Unload(); + return false; + } + return true; + } + + void Unload() { + if (!IsLoaded()) { + return; + } + InternalUnloadDll(handle_); + handle_ = kInvalidDllHandle; + memset(symbols_, 0, sizeof(symbols_)); + } + + // Retrieves the given symbol. NOTE: Recommended to use LATESYM_GET below + // instead of this. + void *GetSymbol(int index) const { + assert(IsLoaded()); + assert(index < NumSymbols()); + return symbols_[index]; + } + + private: + DllHandle handle_; + bool undefined_symbols_; + void *symbols_[SYMBOL_TABLE_SIZE]; + + DISALLOW_COPY_AND_ASSIGN(LateBindingSymbolTable); +}; + +// This macro must be invoked in a header to declare a symbol table class. +#define LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(ClassName) \ +enum { + +// This macro must be invoked in the header declaration once for each symbol +// (recommended to use an X-Macro to avoid duplication). +// This macro defines an enum with names built from the symbols, which +// essentially creates a hash table in the compiler from symbol names to their +// indices in the symbol table class. +#define LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(ClassName, sym) \ + ClassName##_SYMBOL_TABLE_INDEX_##sym, + +// This macro completes the header declaration. +#define LATE_BINDING_SYMBOL_TABLE_DECLARE_END(ClassName) \ + ClassName##_SYMBOL_TABLE_SIZE \ +}; \ +\ +extern const char ClassName##_kDllName[]; \ +extern const char *const \ + ClassName##_kSymbolNames[ClassName##_SYMBOL_TABLE_SIZE]; \ +\ +typedef ::webrtc_adm_linux::LateBindingSymbolTable \ + ClassName; + +// This macro must be invoked in a .cc file to define a previously-declared +// symbol table class. +#define LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(ClassName, dllName) \ +const char ClassName##_kDllName[] = dllName; \ +const char *const ClassName##_kSymbolNames[ClassName##_SYMBOL_TABLE_SIZE] = { + +// This macro must be invoked in the .cc definition once for each symbol +// (recommended to use an X-Macro to avoid duplication). +// This would have to use the mangled name if we were to ever support C++ +// symbols. +#define LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(ClassName, sym) \ + #sym, + +#define LATE_BINDING_SYMBOL_TABLE_DEFINE_END(ClassName) \ +}; + +// Index of a given symbol in the given symbol table class. +#define LATESYM_INDEXOF(ClassName, sym) \ + (ClassName##_SYMBOL_TABLE_INDEX_##sym) + +// Returns a reference to the given late-binded symbol, with the correct type. +#define LATESYM_GET(ClassName, inst, sym) \ + (*reinterpret_cast( \ + (inst)->GetSymbol(LATESYM_INDEXOF(ClassName, sym)))) + +} // namespace webrtc_adm_linux + +#endif // WEBRTC_ADM_LATEBINDINGSYMBOLTABLE_LINUX_H diff --git a/src/modules/audio_device/main/source/linux/pulseaudiosymboltable_linux.cc b/src/modules/audio_device/main/source/linux/pulseaudiosymboltable_linux.cc new file mode 100644 index 000000000..ae663f700 --- /dev/null +++ b/src/modules/audio_device/main/source/linux/pulseaudiosymboltable_linux.cc @@ -0,0 +1,39 @@ +/* + * libjingle + * Copyright 2004--2010, Google Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pulseaudiosymboltable_linux.h" + +namespace webrtc_adm_linux_pulse { + +LATE_BINDING_SYMBOL_TABLE_DEFINE_BEGIN(PulseAudioSymbolTable, "libpulse.so.0") +#define X(sym) \ + LATE_BINDING_SYMBOL_TABLE_DEFINE_ENTRY(PulseAudioSymbolTable, sym) +PULSE_AUDIO_SYMBOLS_LIST +#undef X +LATE_BINDING_SYMBOL_TABLE_DEFINE_END(PulseAudioSymbolTable) + +} // namespace webrtc_adm_linux_pulse diff --git a/src/modules/audio_device/main/source/linux/pulseaudiosymboltable_linux.h b/src/modules/audio_device/main/source/linux/pulseaudiosymboltable_linux.h new file mode 100644 index 000000000..049509ba8 --- /dev/null +++ b/src/modules/audio_device/main/source/linux/pulseaudiosymboltable_linux.h @@ -0,0 +1,104 @@ +/* + * libjingle + * Copyright 2004--2010, Google Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H +#define WEBRTC_AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H + +#include "latebindingsymboltable_linux.h" + +namespace webrtc_adm_linux_pulse { + +// The PulseAudio symbols we need, as an X-Macro list. +// This list must contain precisely every libpulse function that is used in +// the ADM LINUX PULSE Device and Mixer classes +#define PULSE_AUDIO_SYMBOLS_LIST \ + X(pa_bytes_per_second) \ + X(pa_context_connect) \ + X(pa_context_disconnect) \ + X(pa_context_errno) \ + X(pa_context_get_protocol_version) \ + X(pa_context_get_server_info) \ + X(pa_context_get_sink_info_list) \ + X(pa_context_get_sink_info_by_index) \ + X(pa_context_get_sink_info_by_name) \ + X(pa_context_get_sink_input_info) \ + X(pa_context_get_source_info_by_index) \ + X(pa_context_get_source_info_by_name) \ + X(pa_context_get_source_info_list) \ + X(pa_context_get_state) \ + X(pa_context_new) \ + X(pa_context_set_sink_input_volume) \ + X(pa_context_set_sink_input_mute) \ + X(pa_context_set_source_volume_by_index) \ + X(pa_context_set_source_mute_by_index) \ + X(pa_context_set_state_callback) \ + X(pa_context_unref) \ + X(pa_cvolume_set) \ + X(pa_operation_get_state) \ + X(pa_operation_unref) \ + X(pa_stream_connect_playback) \ + X(pa_stream_connect_record) \ + X(pa_stream_disconnect) \ + X(pa_stream_drop) \ + X(pa_stream_get_device_index) \ + X(pa_stream_get_index) \ + X(pa_stream_get_latency) \ + X(pa_stream_get_sample_spec) \ + X(pa_stream_get_state) \ + X(pa_stream_new) \ + X(pa_stream_peek) \ + X(pa_stream_readable_size) \ + X(pa_stream_set_buffer_attr) \ + X(pa_stream_set_overflow_callback) \ + X(pa_stream_set_read_callback) \ + X(pa_stream_set_state_callback) \ + X(pa_stream_set_underflow_callback) \ + X(pa_stream_set_write_callback) \ + X(pa_stream_unref) \ + X(pa_stream_writable_size) \ + X(pa_stream_write) \ + X(pa_strerror) \ + X(pa_threaded_mainloop_free) \ + X(pa_threaded_mainloop_get_api) \ + X(pa_threaded_mainloop_lock) \ + X(pa_threaded_mainloop_new) \ + X(pa_threaded_mainloop_signal) \ + X(pa_threaded_mainloop_start) \ + X(pa_threaded_mainloop_stop) \ + X(pa_threaded_mainloop_unlock) \ + X(pa_threaded_mainloop_wait) + +LATE_BINDING_SYMBOL_TABLE_DECLARE_BEGIN(PulseAudioSymbolTable) +#define X(sym) \ + LATE_BINDING_SYMBOL_TABLE_DECLARE_ENTRY(PulseAudioSymbolTable, sym) +PULSE_AUDIO_SYMBOLS_LIST +#undef X +LATE_BINDING_SYMBOL_TABLE_DECLARE_END(PulseAudioSymbolTable) + +} // namespace webrtc_adm_linux_pulse + +#endif // WEBRTC_AUDIO_DEVICE_PULSEAUDIOSYMBOLTABLE_LINUX_H diff --git a/src/modules/audio_device/main/source/mac/audio_device_mac.cc b/src/modules/audio_device/main/source/mac/audio_device_mac.cc new file mode 100644 index 000000000..8a360cca3 --- /dev/null +++ b/src/modules/audio_device/main/source/mac/audio_device_mac.cc @@ -0,0 +1,3431 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_device_utility.h" +#include "audio_device_mac.h" +#include "audio_device_config.h" + +#include "event_wrapper.h" +#include "trace.h" +#include "thread_wrapper.h" + +#include + +#include // sysctlbyname() +#include // mach_task_self() +#include // OSAtomicCompareAndSwap() +#include "portaudio/pa_ringbuffer.h" + +namespace webrtc +{ + +#define WEBRTC_CA_RETURN_ON_ERR(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(kTraceError, kTraceAudioDevice, _id, \ + "Error in " #expr, (const char *)&err); \ + return -1; \ + } \ + } while(0) + +#define WEBRTC_CA_LOG_ERR(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(kTraceError, kTraceAudioDevice, _id, \ + "Error in " #expr, (const char *)&err); \ + } \ + } while(0) + +#define WEBRTC_CA_LOG_WARN(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(kTraceWarning, kTraceAudioDevice, _id, \ + "Error in " #expr, (const char *)&err); \ + } \ + } while(0) + +enum +{ + MaxNumberDevices = 64 +}; + +void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) +{ + while (1) + { + int32_t oldValue = *theValue; + if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue) + == true) + { + return; + } + } +} + +int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) +{ + while (1) + { + WebRtc_Word32 value = *theValue; + if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true) + { + return value; + } + } +} + +// CoreAudio errors are best interpreted as four character strings. +void AudioDeviceMac::logCAMsg(const TraceLevel level, + const TraceModule module, + const WebRtc_Word32 id, const char *msg, + const char *err) +{ + assert(msg != NULL); + assert(err != NULL); + +#ifdef WEBRTC_BIG_ENDIAN + WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err); +#else + // We need to flip the characters in this case. + WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err + + 2, err + 1, err); +#endif +} + +AudioDeviceMac::AudioDeviceMac(const WebRtc_Word32 id) : + _ptrAudioBuffer(NULL), + _critSect(*CriticalSectionWrapper::CreateCriticalSection()), + _critSectCb(*CriticalSectionWrapper::CreateCriticalSection()), + _stopEventRec(*EventWrapper::Create()), + _stopEvent(*EventWrapper::Create()), + _captureWorkerThread(NULL), + _renderWorkerThread(NULL), + _captureWorkerThreadId(0), + _renderWorkerThreadId(0), + _id(id), + _mixerManager(id), + _inputDeviceIndex(0), + _outputDeviceIndex(0), + _inputDeviceID(kAudioObjectUnknown), + _outputDeviceID(kAudioObjectUnknown), + _inputDeviceIsSpecified(false), + _outputDeviceIsSpecified(false), + _recChannels(N_REC_CHANNELS), + _playChannels(N_PLAY_CHANNELS), + _captureBufData(NULL), + _renderBufData(NULL), + _playBufType(AudioDeviceModule::kFixedBufferSize), + _initialized(false), + _isShutDown(false), + _recording(false), + _playing(false), + _recIsInitialized(false), + _playIsInitialized(false), + _startRec(false), + _stopRec(false), + _stopPlay(false), + _AGC(false), + _renderDeviceIsAlive(1), + _captureDeviceIsAlive(1), + _twoDevices(true), + _doStop(false), + _doStopRec(false), + _macBookPro(false), + _macBookProPanRight(false), + _captureLatencyUs(0), + _renderLatencyUs(0), + _captureDelayUs(0), + _renderDelayUs(0), + _renderDelayOffsetSamples(0), + _playBufDelayFixed(20), + _playWarning(0), + _playError(0), + _recWarning(0), + _recError(0), + _paCaptureBuffer(NULL), + _paRenderBuffer(NULL), + _captureBufSizeSamples(0), + _renderBufSizeSamples(0) +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, + "%s created", __FUNCTION__); + + assert(&_stopEvent != NULL); + assert(&_stopEventRec != NULL); + + memset(_renderConvertData, 0, sizeof(_renderConvertData)); +} + + +AudioDeviceMac::~AudioDeviceMac() +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s destroyed", __FUNCTION__); + + if (!_isShutDown) + { + Terminate(); + } + + if (_captureWorkerThread) + { + delete _captureWorkerThread; + _captureWorkerThread = NULL; + } + + if (_renderWorkerThread) + { + delete _renderWorkerThread; + _renderWorkerThread = NULL; + } + + if (_paRenderBuffer) + { + delete _paRenderBuffer; + _paRenderBuffer = NULL; + } + + if (_paCaptureBuffer) + { + delete _paCaptureBuffer; + _paCaptureBuffer = NULL; + } + + if (_renderBufData) + { + delete[] _renderBufData; + _renderBufData = NULL; + } + + if (_captureBufData) + { + delete[] _captureBufData; + _captureBufData = NULL; + } + + kern_return_t kernErr = KERN_SUCCESS; + kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore); + if (kernErr != KERN_SUCCESS) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " semaphore_destroy() error: %d", kernErr); + } + + kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore); + if (kernErr != KERN_SUCCESS) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " semaphore_destroy() error: %d", kernErr); + } + + delete &_stopEvent; + delete &_stopEventRec; + delete &_critSect; + delete &_critSectCb; +} + +// ============================================================================ +// API +// ============================================================================ + +void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + _ptrAudioBuffer = audioBuffer; + + // inform the AudioBuffer about default settings for this implementation + _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); + _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); + _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS); + _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS); +} + +WebRtc_Word32 AudioDeviceMac::ActiveAudioLayer( + AudioDeviceModule::AudioLayer& audioLayer) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + audioLayer = AudioDeviceModule::kPlatformDefaultAudio; + return 0; +} + +WebRtc_Word32 AudioDeviceMac::Init() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_initialized) + { + return 0; + } + + OSStatus err = noErr; + + _isShutDown = false; + + // PortAudio ring buffers require an elementCount which is a power of two. + if (_renderBufData == NULL) + { + UInt32 powerOfTwo = 1; + while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) + { + powerOfTwo <<= 1; + } + _renderBufSizeSamples = powerOfTwo; + _renderBufData = new SInt16[_renderBufSizeSamples]; + } + + if (_paRenderBuffer == NULL) + { + _paRenderBuffer = new PaUtilRingBuffer; + ring_buffer_size_t bufSize = -1; + bufSize = PaUtil_InitializeRingBuffer(_paRenderBuffer, sizeof(SInt16), + _renderBufSizeSamples, + _renderBufData); + if (bufSize == -1) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, + _id, " PaUtil_InitializeRingBuffer() error"); + return -1; + } + } + + if (_captureBufData == NULL) + { + UInt32 powerOfTwo = 1; + while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) + { + powerOfTwo <<= 1; + } + _captureBufSizeSamples = powerOfTwo; + _captureBufData = new Float32[_captureBufSizeSamples]; + } + + if (_paCaptureBuffer == NULL) + { + _paCaptureBuffer = new PaUtilRingBuffer; + ring_buffer_size_t bufSize = -1; + bufSize = PaUtil_InitializeRingBuffer(_paCaptureBuffer, + sizeof(Float32), + _captureBufSizeSamples, + _captureBufData); + if (bufSize == -1) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, + _id, " PaUtil_InitializeRingBuffer() error"); + return -1; + } + } + + if (_renderWorkerThread == NULL) + { + _renderWorkerThread + = ThreadWrapper::CreateThread(RunRender, this, kRealtimePriority, + "RenderWorkerThread"); + if (_renderWorkerThread == NULL) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, + _id, " Render CreateThread() error"); + return -1; + } + } + + if (_captureWorkerThread == NULL) + { + _captureWorkerThread + = ThreadWrapper::CreateThread(RunCapture, this, kRealtimePriority, + "CaptureWorkerThread"); + if (_captureWorkerThread == NULL) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, + _id, " Capture CreateThread() error"); + return -1; + } + } + + kern_return_t kernErr = KERN_SUCCESS; + kernErr = semaphore_create(mach_task_self(), &_renderSemaphore, + SYNC_POLICY_FIFO, 0); + if (kernErr != KERN_SUCCESS) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " semaphore_create() error: %d", kernErr); + return -1; + } + + kernErr = semaphore_create(mach_task_self(), &_captureSemaphore, + SYNC_POLICY_FIFO, 0); + if (kernErr != KERN_SUCCESS) + { + WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, + " semaphore_create() error: %d", kernErr); + return -1; + } + + // Setting RunLoop to NULL here instructs HAL to manage its own thread for + // notifications. This was the default behaviour on OS X 10.5 and earlier, but now + // must be explicitly specified. HAL would otherwise try to use the main thread to + // issue notifications. + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyRunLoop, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster }; + CFRunLoopRef runLoop = NULL; + UInt32 size = sizeof(CFRunLoopRef); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(kAudioObjectSystemObject, + &propertyAddress, 0, NULL, size, &runLoop)); + + // Listen for any device changes. + propertyAddress.mSelector = kAudioHardwarePropertyDevices; + WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(kAudioObjectSystemObject, + &propertyAddress, &objectListenerProc, this)); + + // Determine if this is a MacBook Pro + _macBookPro = false; + _macBookProPanRight = false; + char buf[128]; + size_t length = sizeof(buf); + memset(buf, 0, length); + + int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0); + if (intErr != 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error in sysctlbyname(): %d", err); + } else + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Hardware model: %s", buf); + if (strncmp(buf, "MacBookPro", 10) == 0) + { + _macBookPro = true; + } + } + + _playWarning = 0; + _playError = 0; + _recWarning = 0; + _recError = 0; + + _initialized = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::Terminate() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (!_initialized) + { + return 0; + } + + if (_recording) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Recording must be stopped"); + return -1; + } + + if (_playing) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Playback must be stopped"); + return -1; + } + + _critSect.Enter(); + + _mixerManager.Close(); + + OSStatus err = noErr; + int retVal = 0; + + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster }; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(kAudioObjectSystemObject, + &propertyAddress, &objectListenerProc, this)); + + err = AudioHardwareUnload(); + if (err != noErr) + { + logCAMsg(kTraceError, kTraceAudioDevice, _id, + "Error in AudioHardwareUnload()", (const char*) &err); + retVal = -1; + } + + _critSect.Leave(); + + _isShutDown = true; + _initialized = false; + _outputDeviceIsSpecified = false; + _inputDeviceIsSpecified = false; + + return retVal; +} + +bool AudioDeviceMac::Initialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_initialized); +} + +WebRtc_Word32 AudioDeviceMac::SpeakerIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) + { + available = false; + return 0; + } + + // Given that InitSpeaker was successful, we know that a valid speaker exists + // + available = true; + + // Close the initialized output mixer + // + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::InitSpeaker() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_playing) + { + return -1; + } + + if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) + { + return -1; + } + + if (_inputDeviceID == _outputDeviceID) + { + _twoDevices = false; + } else + { + _twoDevices = true; + } + + if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) + { + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::MicrophoneIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitMicrophone() == -1) + { + available = false; + return 0; + } + + // Given that InitMicrophone was successful, we know that a valid microphone exists + // + available = true; + + // Close the initialized input mixer + // + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + + +WebRtc_Word32 AudioDeviceMac::InitMicrophone() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_recording) + { + return -1; + } + + if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) + { + return -1; + } + + if (_inputDeviceID == _outputDeviceID) + { + _twoDevices = false; + } else + { + _twoDevices = true; + } + + if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) + { + return -1; + } + + return 0; +} + +bool AudioDeviceMac::SpeakerIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_mixerManager.SpeakerIsInitialized()); +} + +bool AudioDeviceMac::MicrophoneIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_mixerManager.MicrophoneIsInitialized()); +} + +WebRtc_Word32 AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) + { + // If we end up here it means that the selected speaker has no volume + // control. + available = false; + return 0; + } + + // Given that InitSpeaker was successful, we know that a volume control exists + // + available = true; + + // Close the initialized output mixer + // + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SetSpeakerVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::SetSpeakerVolume(volume=%u)", volume); + + return (_mixerManager.SetSpeakerVolume(volume)); +} + +WebRtc_Word32 AudioDeviceMac::SpeakerVolume(WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 level(0); + + if (_mixerManager.SpeakerVolume(level) == -1) + { + return -1; + } + + volume = level; + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SetWaveOutVolume(WebRtc_UWord16 volumeLeft, + WebRtc_UWord16 volumeRight) +{ + WEBRTC_TRACE( + kTraceModuleCall, + kTraceAudioDevice, + _id, + "AudioDeviceMac::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)", + volumeLeft, volumeRight); + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " API call not supported on this platform"); + return -1; +} + +WebRtc_Word32 +AudioDeviceMac::WaveOutVolume(WebRtc_UWord16& /*volumeLeft*/, + WebRtc_UWord16& /*volumeRight*/) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " API call not supported on this platform"); + return -1; +} + +WebRtc_Word32 AudioDeviceMac::MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 maxVol(0); + + if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) + { + return -1; + } + + maxVolume = maxVol; + return 0; +} + +WebRtc_Word32 AudioDeviceMac::MinSpeakerVolume(WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 minVol(0); + + if (_mixerManager.MinSpeakerVolume(minVol) == -1) + { + return -1; + } + + minVolume = minVol; + return 0; +} + +WebRtc_Word32 +AudioDeviceMac::SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord16 delta(0); + + if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) + { + return -1; + } + + stepSize = delta; + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + // Make an attempt to open up the + // output mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitSpeaker() == -1) + { + // If we end up here it means that the selected speaker has no volume + // control, hence it is safe to state that there is no mute control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected speaker has a mute control + // + _mixerManager.SpeakerMuteIsAvailable(isAvailable); + + available = isAvailable; + + // Close the initialized output mixer + // + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SetSpeakerMute(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::SetSpeakerMute(enable=%u)", enable); + return (_mixerManager.SetSpeakerMute(enable)); +} + +WebRtc_Word32 AudioDeviceMac::SpeakerMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool muted(0); + + if (_mixerManager.SpeakerMute(muted) == -1) + { + return -1; + } + + enabled = muted; + return 0; +} + +WebRtc_Word32 AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected input device. + // + if (!wasInitialized && InitMicrophone() == -1) + { + // If we end up here it means that the selected microphone has no volume + // control, hence it is safe to state that there is no boost control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected microphone has a mute control + // + _mixerManager.MicrophoneMuteIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SetMicrophoneMute(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceWindowsWave::SetMicrophoneMute(enable=%u)", enable); + return (_mixerManager.SetMicrophoneMute(enable)); +} + +WebRtc_Word32 AudioDeviceMac::MicrophoneMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool muted(0); + + if (_mixerManager.MicrophoneMute(muted) == -1) + { + return -1; + } + + enabled = muted; + return 0; +} + +WebRtc_Word32 AudioDeviceMac::MicrophoneBoostIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Enumerate all avaliable microphone and make an attempt to open up the + // input mixer corresponding to the currently selected input device. + // + if (!wasInitialized && InitMicrophone() == -1) + { + // If we end up here it means that the selected microphone has no volume + // control, hence it is safe to state that there is no boost control + // already at this stage. + available = false; + return 0; + } + + // Check if the selected microphone has a boost control + // + _mixerManager.MicrophoneBoostIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SetMicrophoneBoost(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::SetMicrophoneBoost(enable=%u)", enable); + + return (_mixerManager.SetMicrophoneBoost(enable)); +} + +WebRtc_Word32 AudioDeviceMac::MicrophoneBoost(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool onOff(0); + + if (_mixerManager.MicrophoneBoost(onOff) == -1) + { + return -1; + } + + enabled = onOff; + return 0; +} + +WebRtc_Word32 AudioDeviceMac::StereoRecordingIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + if (!wasInitialized && InitMicrophone() == -1) + { + // Cannot open the specified device + available = false; + return 0; + } + + // Check if the selected microphone can record stereo + // + _mixerManager.StereoRecordingIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SetStereoRecording(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::SetStereoRecording(enable=%u)", enable); + + if (enable) + _recChannels = 2; + else + _recChannels = 1; + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::StereoRecording(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_recChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool isAvailable(false); + bool wasInitialized = _mixerManager.SpeakerIsInitialized(); + + if (!wasInitialized && InitSpeaker() == -1) + { + // Cannot open the specified device + available = false; + return 0; + } + + // Check if the selected microphone can record stereo + // + _mixerManager.StereoPlayoutIsAvailable(isAvailable); + available = isAvailable; + + // Close the initialized input mixer + // + if (!wasInitialized) + { + _mixerManager.CloseSpeaker(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SetStereoPlayout(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::SetStereoPlayout(enable=%u)", enable); + + if (enable) + _playChannels = 2; + else + _playChannels = 1; + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::StereoPlayout(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_playChannels == 2) + enabled = true; + else + enabled = false; + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SetAGC(bool enable) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::SetAGC(enable=%d)", enable); + + _AGC = enable; + + return 0; +} + +bool AudioDeviceMac::AGC() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + return _AGC; +} + +WebRtc_Word32 AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); + + // Make an attempt to open up the + // input mixer corresponding to the currently selected output device. + // + if (!wasInitialized && InitMicrophone() == -1) + { + // If we end up here it means that the selected microphone has no volume + // control. + available = false; + return 0; + } + + // Given that InitMicrophone was successful, we know that a volume control + // exists + // + available = true; + + // Close the initialized input mixer + // + if (!wasInitialized) + { + _mixerManager.CloseMicrophone(); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SetMicrophoneVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::SetMicrophoneVolume(volume=%u)", volume); + + return (_mixerManager.SetMicrophoneVolume(volume)); +} + +WebRtc_Word32 AudioDeviceMac::MicrophoneVolume(WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 level(0); + + if (_mixerManager.MicrophoneVolume(level) == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " failed to retrive current microphone level"); + return -1; + } + + volume = level; + return 0; +} + +WebRtc_Word32 +AudioDeviceMac::MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 maxVol(0); + + if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) + { + return -1; + } + + maxVolume = maxVol; + return 0; +} + +WebRtc_Word32 +AudioDeviceMac::MinMicrophoneVolume(WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord32 minVol(0); + + if (_mixerManager.MinMicrophoneVolume(minVol) == -1) + { + return -1; + } + + minVolume = minVol; + return 0; +} + +WebRtc_Word32 +AudioDeviceMac::MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WebRtc_UWord16 delta(0); + + if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) + { + return -1; + } + + stepSize = delta; + return 0; +} + +WebRtc_Word16 AudioDeviceMac::PlayoutDevices() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + AudioDeviceID playDevices[MaxNumberDevices]; + return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices, + MaxNumberDevices); +} + +WebRtc_Word32 AudioDeviceMac::SetPlayoutDevice(WebRtc_UWord16 index) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::SetPlayoutDevice(index=%u)", index); + + if (_playIsInitialized) + { + return -1; + } + + AudioDeviceID playDevices[MaxNumberDevices]; + WebRtc_UWord32 nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput, + playDevices, MaxNumberDevices); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " number of availiable waveform-audio output devices is %u", + nDevices); + + if (index > (nDevices - 1)) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " device index is out of range [0,%u]", (nDevices - 1)); + return -1; + } + + _outputDeviceIndex = index; + _outputDeviceIsSpecified = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType /*device*/) +{ + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "WindowsDeviceType not supported"); + return -1; +} + +WebRtc_Word32 AudioDeviceMac::PlayoutDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::PlayoutDeviceName(index=%u)", index); + + const WebRtc_UWord16 nDevices(PlayoutDevices()); + + if ((index > (nDevices - 1)) || (name == NULL)) + { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) + { + memset(guid, 0, kAdmMaxGuidSize); + } + + return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name); +} + +WebRtc_Word32 AudioDeviceMac::RecordingDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::RecordingDeviceName(index=%u)", index); + + const WebRtc_UWord16 nDevices(RecordingDevices()); + + if ((index > (nDevices - 1)) || (name == NULL)) + { + return -1; + } + + memset(name, 0, kAdmMaxDeviceNameSize); + + if (guid != NULL) + { + memset(guid, 0, kAdmMaxGuidSize); + } + + return GetDeviceName(kAudioDevicePropertyScopeInput, index, name); +} + +WebRtc_Word16 AudioDeviceMac::RecordingDevices() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + AudioDeviceID recDevices[MaxNumberDevices]; + return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices, + MaxNumberDevices); +} + +WebRtc_Word32 AudioDeviceMac::SetRecordingDevice(WebRtc_UWord16 index) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::SetRecordingDevice(index=%u)", index); + + if (_recIsInitialized) + { + return -1; + } + + AudioDeviceID recDevices[MaxNumberDevices]; + WebRtc_UWord32 nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput, + recDevices, MaxNumberDevices); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " number of availiable waveform-audio input devices is %u", + nDevices); + + if (index > (nDevices - 1)) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " device index is out of range [0,%u]", (nDevices - 1)); + return -1; + } + + _inputDeviceIndex = index; + _inputDeviceIsSpecified = true; + + return 0; +} + + +WebRtc_Word32 +AudioDeviceMac::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType /*device*/) +{ + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "WindowsDeviceType not supported"); + return -1; +} + +WebRtc_Word32 AudioDeviceMac::PlayoutIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + available = false; + + // Try to initialize the playout side + WebRtc_Word32 res = InitPlayout(); + + // Cancel effect of initialization + StopPlayout(); + + if (res != -1) + { + available = true; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::RecordingIsAvailable(bool& available) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + available = false; + + // Try to initialize the recording side + WebRtc_Word32 res = InitRecording(); + + // Cancel effect of initialization + StopRecording(); + + if (res != -1) + { + available = true; + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::InitPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_playing) + { + return -1; + } + + if (!_outputDeviceIsSpecified) + { + return -1; + } + + if (_playIsInitialized) + { + return 0; + } + + // Initialize the speaker (devices might have been added or removed) + if (InitSpeaker() == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " InitSpeaker() failed"); + } + + if (!MicrophoneIsInitialized()) + { + // Make this call to check if we are using + // one or two devices (_twoDevices) + bool available = false; + if (MicrophoneIsAvailable(available) == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " MicrophoneIsAvailable() failed"); + } + } + + PaUtil_FlushRingBuffer(_paRenderBuffer); + + OSStatus err = noErr; + UInt32 size = 0; + _renderDelayOffsetSamples = 0; + _renderDelayUs = 0; + _renderLatencyUs = 0; + _renderDeviceIsAlive = 1; + _doStop = false; + + // The internal microphone of a MacBook Pro is located under the left speaker + // grille. When the internal speakers are in use, we want to fully stereo + // pan to the right. + AudioObjectPropertyAddress + propertyAddress = { kAudioDevicePropertyDataSource, + kAudioDevicePropertyScopeOutput, 0 }; + if (_macBookPro) + { + _macBookProPanRight = false; + Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID, + &propertyAddress); + if (hasProperty) + { + UInt32 dataSource = 0; + size = sizeof(dataSource); + WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &dataSource)); + + if (dataSource == 'ispk') + { + _macBookProPanRight = true; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, + _id, + "MacBook Pro using internal speakers; stereo" + " panning right"); + } else + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, + _id, "MacBook Pro not using internal speakers"); + } + + // Add a listener to determine if the status changes. + WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID, + &propertyAddress, &objectListenerProc, this)); + } + } + + // Get current stream description + propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; + memset(&_outStreamFormat, 0, sizeof(_outStreamFormat)); + size = sizeof(_outStreamFormat); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &_outStreamFormat)); + + if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) + { + logCAMsg(kTraceError, kTraceAudioDevice, _id, + "Unacceptable output stream format -> mFormatID", + (const char *) &_outStreamFormat.mFormatID); + return -1; + } + + if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Too many channels on device -> mChannelsPerFrame = %d", + _outStreamFormat.mChannelsPerFrame); + return -1; + } + + if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Non-interleaved audio data is not supported.", + "AudioHardware streams should not have this format."); + return -1; + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Ouput stream format:"); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "mSampleRate = %f, mChannelsPerFrame = %u", + _outStreamFormat.mSampleRate, + _outStreamFormat.mChannelsPerFrame); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "mBytesPerPacket = %u, mFramesPerPacket = %u", + _outStreamFormat.mBytesPerPacket, + _outStreamFormat.mFramesPerPacket); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "mBytesPerFrame = %u, mBitsPerChannel = %u", + _outStreamFormat.mBytesPerFrame, + _outStreamFormat.mBitsPerChannel); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "mFormatFlags = %u, mChannelsPerFrame = %u", + _outStreamFormat.mFormatFlags, + _outStreamFormat.mChannelsPerFrame); + logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", + (const char *) &_outStreamFormat.mFormatID); + + // Our preferred format to work with + _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC; + if (_outStreamFormat.mChannelsPerFrame >= 2 && (_playChannels == 2)) + { + _outDesiredFormat.mChannelsPerFrame = 2; + } else + { + // Disable stereo playout when we only have one channel on the device. + _outDesiredFormat.mChannelsPerFrame = 1; + _playChannels = 1; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Stereo playout unavailable on this device"); + } + + if (_ptrAudioBuffer) + { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); + _ptrAudioBuffer->SetPlayoutChannels((WebRtc_UWord8) _playChannels); + } + + _renderDelayOffsetSamples = _renderBufSizeSamples - N_BUFFERS_OUT + * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame; + + _outDesiredFormat.mBytesPerPacket = _outDesiredFormat.mChannelsPerFrame + * sizeof(SInt16); + _outDesiredFormat.mFramesPerPacket = 1; // In uncompressed audio, + // a packet is one frame. + _outDesiredFormat.mBytesPerFrame = _outDesiredFormat.mChannelsPerFrame + * sizeof(SInt16); + _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8; + + _outDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger + | kLinearPCMFormatFlagIsPacked; +#ifdef WEBRTC_BIG_ENDIAN + _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; +#endif + _outDesiredFormat.mFormatID = kAudioFormatLinearPCM; + + WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_outDesiredFormat, &_outStreamFormat, + &_renderConverter)); + + // First try to set buffer size to desired value (_playBufDelayFixed) + UInt32 bufByteCount = (UInt32)((_outStreamFormat.mSampleRate / 1000.0) + * _playBufDelayFixed * _outStreamFormat.mChannelsPerFrame + * sizeof(Float32)); + if (_outStreamFormat.mFramesPerPacket != 0) + { + if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) + { + bufByteCount = ((UInt32)(bufByteCount + / _outStreamFormat.mFramesPerPacket) + 1) + * _outStreamFormat.mFramesPerPacket; + } + } + + // Ensure the buffer size is within the acceptable range provided by the device. + propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange; + AudioValueRange range; + size = sizeof(range); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &range)); + if (range.mMinimum > bufByteCount) + { + bufByteCount = range.mMinimum; + } else if (range.mMaximum < bufByteCount) + { + bufByteCount = range.mMaximum; + } + + propertyAddress.mSelector = kAudioDevicePropertyBufferSize; + size = sizeof(bufByteCount); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, size, &bufByteCount)); + + // Get render device latency + propertyAddress.mSelector = kAudioDevicePropertyLatency; + UInt32 latency = 0; + size = sizeof(UInt32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &latency)); + _renderLatencyUs = (WebRtc_UWord32) ((1.0e6 * latency) + / _outStreamFormat.mSampleRate); + + // Get render stream latency + propertyAddress.mSelector = kAudioDevicePropertyStreams; + AudioStreamID stream = 0; + size = sizeof(AudioStreamID); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &stream)); + propertyAddress.mSelector = kAudioStreamPropertyLatency; + size = sizeof(UInt32); + latency = 0; + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &latency)); + _renderLatencyUs += (WebRtc_UWord32) ((1.0e6 * latency) + / _outStreamFormat.mSampleRate); + + // Listen for format changes + propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; + WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(_outputDeviceID, + &propertyAddress, &objectListenerProc, this)); + + // Listen for processor overloads + propertyAddress.mSelector = kAudioDeviceProcessorOverload; + WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID, + &propertyAddress, &objectListenerProc, this)); + + if (_twoDevices || !_recIsInitialized) + { +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceCreateIOProcID != NULL) + { + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_outputDeviceID, + deviceIOProc, this, &_deviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceAddIOProc(_outputDeviceID, deviceIOProc, this)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + } + + // Mark playout side as initialized + _playIsInitialized = true; + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " initial playout status: _renderDelayOffsetSamples=%d," + " _renderDelayUs=%d, _renderLatencyUs=%d", + _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs); + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::InitRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (_recording) + { + return -1; + } + + if (!_inputDeviceIsSpecified) + { + return -1; + } + + if (_recIsInitialized) + { + return 0; + } + + // Initialize the microphone (devices might have been added or removed) + if (InitMicrophone() == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " InitMicrophone() failed"); + } + + if (!SpeakerIsInitialized()) + { + // Make this call to check if we are using + // one or two devices (_twoDevices) + bool available = false; + if (SpeakerIsAvailable(available) == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " SpeakerIsAvailable() failed"); + } + } + + OSStatus err = noErr; + UInt32 size = 0; + + PaUtil_FlushRingBuffer(_paCaptureBuffer); + + _captureDelayUs = 0; + _captureLatencyUs = 0; + _captureDeviceIsAlive = 1; + _doStopRec = false; + + // Get current stream description + AudioObjectPropertyAddress + propertyAddress = { kAudioDevicePropertyStreamFormat, + kAudioDevicePropertyScopeInput, 0 }; + memset(&_inStreamFormat, 0, sizeof(_inStreamFormat)); + size = sizeof(_inStreamFormat); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &_inStreamFormat)); + + if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) + { + logCAMsg(kTraceError, kTraceAudioDevice, _id, + "Unacceptable input stream format -> mFormatID", + (const char *) &_inStreamFormat.mFormatID); + return -1; + } + + if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + ", Too many channels on device (mChannelsPerFrame = %d)", + _inStreamFormat.mChannelsPerFrame); + return -1; + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Input stream format:"); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " mSampleRate = %f, mChannelsPerFrame = %u", + _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " mBytesPerPacket = %u, mFramesPerPacket = %u", + _inStreamFormat.mBytesPerPacket, + _inStreamFormat.mFramesPerPacket); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " mBytesPerFrame = %u, mBitsPerChannel = %u", + _inStreamFormat.mBytesPerFrame, + _inStreamFormat.mBitsPerChannel); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " mFormatFlags = %u, mChannelsPerFrame = %u", + _inStreamFormat.mFormatFlags, + _inStreamFormat.mChannelsPerFrame); + logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", + (const char *) &_inStreamFormat.mFormatID); + + // Our preferred format to work with + if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) + { + _inDesiredFormat.mChannelsPerFrame = 2; + } else + { + // Disable stereo recording when we only have one channel on the device. + _inDesiredFormat.mChannelsPerFrame = 1; + _recChannels = 1; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Stereo recording unavailable on this device"); + } + + if (_ptrAudioBuffer) + { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); + _ptrAudioBuffer->SetRecordingChannels((WebRtc_UWord8) _recChannels); + } + + _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC; + _inDesiredFormat.mBytesPerPacket = _inDesiredFormat.mChannelsPerFrame + * sizeof(SInt16); + _inDesiredFormat.mFramesPerPacket = 1; + _inDesiredFormat.mBytesPerFrame = _inDesiredFormat.mChannelsPerFrame + * sizeof(SInt16); + _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8; + + _inDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger + | kLinearPCMFormatFlagIsPacked; +#ifdef WEBRTC_BIG_ENDIAN + _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; +#endif + _inDesiredFormat.mFormatID = kAudioFormatLinearPCM; + + WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat, + &_captureConverter)); + + // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO) + // TODO(xians): investigate this block. + UInt32 bufByteCount = (UInt32)((_inStreamFormat.mSampleRate / 1000.0) + * 10.0 * N_BLOCKS_IO * _inStreamFormat.mChannelsPerFrame + * sizeof(Float32)); + if (_inStreamFormat.mFramesPerPacket != 0) + { + if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) + { + bufByteCount = ((UInt32)(bufByteCount + / _inStreamFormat.mFramesPerPacket) + 1) + * _inStreamFormat.mFramesPerPacket; + } + } + + // Ensure the buffer size is within the acceptable range provided by the device. + propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange; + AudioValueRange range; + size = sizeof(range); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &range)); + if (range.mMinimum > bufByteCount) + { + bufByteCount = range.mMinimum; + } else if (range.mMaximum < bufByteCount) + { + bufByteCount = range.mMaximum; + } + + propertyAddress.mSelector = kAudioDevicePropertyBufferSize; + size = sizeof(bufByteCount); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, size, &bufByteCount)); + + // Get capture device latency + propertyAddress.mSelector = kAudioDevicePropertyLatency; + UInt32 latency = 0; + size = sizeof(UInt32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &latency)); + _captureLatencyUs = (UInt32)((1.0e6 * latency) + / _inStreamFormat.mSampleRate); + + // Get capture stream latency + propertyAddress.mSelector = kAudioDevicePropertyStreams; + AudioStreamID stream = 0; + size = sizeof(AudioStreamID); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &stream)); + propertyAddress.mSelector = kAudioStreamPropertyLatency; + size = sizeof(UInt32); + latency = 0; + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &latency)); + _captureLatencyUs += (UInt32)((1.0e6 * latency) + / _inStreamFormat.mSampleRate); + + // Listen for format changes + // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged? + propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; + WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(_inputDeviceID, + &propertyAddress, &objectListenerProc, this)); + + // Listen for processor overloads + propertyAddress.mSelector = kAudioDeviceProcessorOverload; + WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_inputDeviceID, + &propertyAddress, &objectListenerProc, this)); + + if (_twoDevices) + { +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceCreateIOProcID != NULL) + { + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID, + inDeviceIOProc, this, &_inDeviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceAddIOProc(_inputDeviceID, inDeviceIOProc, + this)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + } else if (!_playIsInitialized) + { +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceCreateIOProcID != NULL) + { + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID, deviceIOProc, + this, &_deviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceAddIOProc(_inputDeviceID, deviceIOProc, + this)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + } + + // Mark recording side as initialized + _recIsInitialized = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::StartRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (!_recIsInitialized) + { + return -1; + } + + if (_recording) + { + return 0; + } + + if (!_initialized) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Recording worker thread has not been started"); + return -1; + } + + OSStatus err = noErr; + + unsigned int threadID(0); + if (_captureWorkerThread != NULL) + { + _captureWorkerThread->Start(threadID); + } + _captureWorkerThreadId = threadID; + + if (_twoDevices) + { +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceCreateIOProcID != NULL) + { + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, inDeviceIOProc)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + } else if (!_playing) + { +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceCreateIOProcID != NULL) + { + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, deviceIOProc)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + } + + _recording = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::StopRecording() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!_recIsInitialized) + { + return 0; + } + + OSStatus err = noErr; + + // Stop device + int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive); + if (_twoDevices) + { + if (_recording && captureDeviceIsAlive == 1) + { + _doStopRec = true; // Signal to io proc to stop audio device + _critSect.Leave(); // Cannot be under lock, risk of deadlock + if (kEventTimeout == _stopEventRec.Wait(2000)) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, + _id, " Timed out stopping the capture IOProc. " + "We likely failed to detect a device removal."); + } + _critSect.Enter(); + _doStopRec = false; + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " Recording stopped"); + } +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceDestroyIOProcID != NULL) + { + WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_inputDeviceID, + _inDeviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_LOG_WARN(AudioDeviceRemoveIOProc(_inputDeviceID, inDeviceIOProc)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + } else if (!_playing) + { + // Stop the shared device if playing has stopped as well. + if (_recording && captureDeviceIsAlive == 1) + { + _doStop = true; // Signal to io proc to stop audio device + _critSect.Leave(); // Cannot be under lock, risk of deadlock + if (kEventTimeout == _stopEvent.Wait(2000)) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, + _id, " Timed out stopping the shared IOProc. " + "We likely failed to detect a device removal."); + } + _critSect.Enter(); + _doStop = false; + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + " Recording stopped (shared)"); + } +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceDestroyIOProcID != NULL) + { + WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_inputDeviceID, _deviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_LOG_WARN(AudioDeviceRemoveIOProc(_inputDeviceID, deviceIOProc)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + } + + // Setting this signal will allow the worker thread to be stopped. + AtomicSet32(&_captureDeviceIsAlive, 0); + _critSect.Leave(); + if (_captureWorkerThread != NULL) + { + if (!_captureWorkerThread->Stop()) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Timed out waiting for the render worker thread to " + "stop."); + } + } + _critSect.Enter(); + + WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter)); + + // Remove listeners. + AudioObjectPropertyAddress + propertyAddress = { kAudioDevicePropertyStreamFormat, + kAudioDevicePropertyScopeInput, 0 }; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID, + &propertyAddress, &objectListenerProc, this)); + + propertyAddress.mSelector = kAudioDeviceProcessorOverload; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID, + &propertyAddress, &objectListenerProc, this)); + + _recIsInitialized = false; + _recording = false; + + return 0; +} + +bool AudioDeviceMac::RecordingIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_recIsInitialized); +} + +bool AudioDeviceMac::Recording() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_recording); +} + +bool AudioDeviceMac::PlayoutIsInitialized() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_playIsInitialized); +} + +WebRtc_Word32 AudioDeviceMac::StartPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (!_playIsInitialized) + { + return -1; + } + + if (_playing) + { + return 0; + } + + OSStatus err = noErr; + + unsigned int threadID(0); + if (_renderWorkerThread != NULL) + { + _renderWorkerThread->Start(threadID); + } + _renderWorkerThreadId = threadID; + + if (_twoDevices || !_recording) + { +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceCreateIOProcID != NULL) + { + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, deviceIOProc)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + } + _playing = true; + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::StopPlayout() +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + if (!_playIsInitialized) + { + return 0; + } + + OSStatus err = noErr; + + int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive); + if (_twoDevices || !_recording) + { + // Stop the shared device if recording has stopped as well. + if (_playing && renderDeviceIsAlive == 1) + { + _doStop = true; // Signal to io proc to stop audio device + _critSect.Leave(); // Cannot be under lock, risk of deadlock + if (kEventTimeout == _stopEvent.Wait(2000)) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, + _id, " Timed out stopping the render IOProc. " + "We likely failed to detect a device removal."); + } + _critSect.Enter(); + _doStop = false; + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + "Playout stopped"); + } +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceDestroyIOProcID != NULL) + { + WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID, + _deviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_LOG_WARN(AudioDeviceRemoveIOProc(_outputDeviceID, deviceIOProc)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + } + + // Setting this signal will allow the worker thread to be stopped. + AtomicSet32(&_renderDeviceIsAlive, 0); + _critSect.Leave(); + if (_renderWorkerThread != NULL) + { + if (!_renderWorkerThread->Stop()) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Timed out waiting for the render worker thread to " + "stop."); + } + } + _critSect.Enter(); + + WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter)); + + // Remove listeners. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, + 0 }; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID, + &propertyAddress, &objectListenerProc, this)); + + propertyAddress.mSelector = kAudioDeviceProcessorOverload; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID, + &propertyAddress, &objectListenerProc, this)); + + if (_macBookPro) + { + Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID, + &propertyAddress); + if (hasProperty) + { + propertyAddress.mSelector = kAudioDevicePropertyDataSource; + WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID, + &propertyAddress, &objectListenerProc, this)); + } + } + + _playIsInitialized = false; + _playing = false; + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::PlayoutDelay(WebRtc_UWord16& delayMS) const +{ + int32_t renderDelayUs = AtomicGet32(&_renderDelayUs); + delayMS = static_cast (1e-3 * (renderDelayUs + + _renderLatencyUs) + 0.5); + return 0; +} + +WebRtc_Word32 AudioDeviceMac::RecordingDelay(WebRtc_UWord16& delayMS) const +{ + int32_t captureDelayUs = AtomicGet32(&_captureDelayUs); + delayMS = static_cast (1e-3 * (captureDelayUs + + _captureLatencyUs) + 0.5); + return 0; +} + +bool AudioDeviceMac::Playing() const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + return (_playing); +} + +WebRtc_Word32 AudioDeviceMac::SetPlayoutBuffer( + const AudioDeviceModule::BufferType type, + WebRtc_UWord16 sizeMS) +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "AudioDeviceMac::SetPlayoutBuffer(type=%u, sizeMS=%u)", type, + sizeMS); + + if (type != AudioDeviceModule::kFixedBufferSize) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Adaptive buffer size not supported on this platform"); + return -1; + } + + _playBufType = type; + _playBufDelayFixed = sizeMS; + return 0; +} + +WebRtc_Word32 AudioDeviceMac::PlayoutBuffer( + AudioDeviceModule::BufferType& type, + WebRtc_UWord16& sizeMS) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + type = _playBufType; + sizeMS = _playBufDelayFixed; + + return 0; +} + +// Not implemented for Mac. +WebRtc_Word32 AudioDeviceMac::CPULoad(WebRtc_UWord16& /*load*/) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " API call not supported on this platform"); + + return -1; +} + +bool AudioDeviceMac::PlayoutWarning() const +{ + return (_playWarning > 0); +} + +bool AudioDeviceMac::PlayoutError() const +{ + return (_playError > 0); +} + +bool AudioDeviceMac::RecordingWarning() const +{ + return (_recWarning > 0); +} + +bool AudioDeviceMac::RecordingError() const +{ + return (_recError > 0); +} + +void AudioDeviceMac::ClearPlayoutWarning() +{ + _playWarning = 0; +} + +void AudioDeviceMac::ClearPlayoutError() +{ + _playError = 0; +} + +void AudioDeviceMac::ClearRecordingWarning() +{ + _recWarning = 0; +} + +void AudioDeviceMac::ClearRecordingError() +{ + _recError = 0; +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +WebRtc_Word32 +AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope, + AudioDeviceID scopedDeviceIds[], + const WebRtc_UWord32 deviceListLength) +{ + OSStatus err = noErr; + + AudioObjectPropertyAddress propertyAddress = { + kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster }; + UInt32 size = 0; + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, + &propertyAddress, 0, NULL, &size)); + if (size == 0) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "No devices"); + return 0; + } + + AudioDeviceID* deviceIds = (AudioDeviceID*) malloc(size); + UInt32 numberDevices = size / sizeof(AudioDeviceID); + AudioBufferList* bufferList = NULL; + UInt32 numberScopedDevices = 0; + + // First check if there is a default device and list it + UInt32 hardwareProperty = 0; + if (scope == kAudioDevicePropertyScopeOutput) + { + hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; + } else + { + hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; + } + + AudioObjectPropertyAddress + propertyAddressDefault = { hardwareProperty, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster }; + + AudioDeviceID usedID; + UInt32 uintSize = sizeof(UInt32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, + &propertyAddressDefault, 0, NULL, &uintSize, &usedID)); + if (usedID != kAudioDeviceUnknown) + { + scopedDeviceIds[numberScopedDevices] = usedID; + numberScopedDevices++; + } else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "GetNumberDevices(): Default device unknown"); + } + + // Then list the rest of the devices + bool listOK = true; + + WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, + &propertyAddress, 0, NULL, &size, deviceIds)); + if (err != noErr) + { + listOK = false; + } else + { + propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration; + propertyAddress.mScope = scope; + propertyAddress.mElement = 0; + for (UInt32 i = 0; i < numberDevices; i++) + { + // Check for input channels + WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(deviceIds[i], + &propertyAddress, 0, NULL, &size)); + if (err == kAudioHardwareBadDeviceError) + { + // This device doesn't actually exist; continue iterating. + continue; + } else if (err != noErr) + { + listOK = false; + break; + } + + bufferList = (AudioBufferList*) malloc(size); + WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(deviceIds[i], + &propertyAddress, 0, NULL, &size, bufferList)); + if (err != noErr) + { + listOK = false; + break; + } + + if (bufferList->mNumberBuffers > 0) + { + if (numberScopedDevices >= deviceListLength) + { + WEBRTC_TRACE(kTraceError, + kTraceAudioDevice, _id, + "Device list is not long enough"); + listOK = false; + break; + } + + scopedDeviceIds[numberScopedDevices] = deviceIds[i]; + numberScopedDevices++; + } + + free(bufferList); + bufferList = NULL; + } // for + } + + if (!listOK) + { + if (deviceIds) + { + free(deviceIds); + deviceIds = NULL; + } + + if (bufferList) + { + free(bufferList); + bufferList = NULL; + } + + return -1; + } + + // Happy ending + if (deviceIds) + { + free(deviceIds); + deviceIds = NULL; + } + + return numberScopedDevices; +} + +WebRtc_Word32 +AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope, + const WebRtc_UWord16 index, + char* name) +{ + OSStatus err = noErr; + UInt32 len = kAdmMaxDeviceNameSize; + AudioDeviceID deviceIds[MaxNumberDevices]; + + int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices); + if (numberDevices < 0) + { + return -1; + } else if (numberDevices == 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "No devices"); + return -1; + } + + // If the number is below the number of devices, assume it's "WEBRTC ID" + // otherwise assume it's a CoreAudio ID + AudioDeviceID usedID; + + // Check if there is a default device + bool isDefaultDevice = false; + if (index == 0) + { + UInt32 hardwareProperty = 0; + if (scope == kAudioDevicePropertyScopeOutput) + { + hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; + } else + { + hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; + } + AudioObjectPropertyAddress propertyAddress = { hardwareProperty, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster }; + UInt32 size = sizeof(UInt32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, + &propertyAddress, 0, NULL, &size, &usedID)); + if (usedID == kAudioDeviceUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "GetDeviceName(): Default device unknown"); + } else + { + isDefaultDevice = true; + } + } + + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyDeviceName, scope, 0 }; + + if (isDefaultDevice) + { + char devName[len]; + + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, + &propertyAddress, 0, NULL, &len, devName)); + + sprintf(name, "default (%s)", devName); + } else + { + if (index < numberDevices) + { + usedID = deviceIds[index]; + } else + { + usedID = index; + } + + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, + &propertyAddress, 0, NULL, &len, name)); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::InitDevice(const WebRtc_UWord16 userDeviceIndex, + AudioDeviceID& deviceId, + const bool isInput) +{ + OSStatus err = noErr; + UInt32 size = 0; + AudioObjectPropertyScope deviceScope; + AudioObjectPropertySelector defaultDeviceSelector; + AudioDeviceID deviceIds[MaxNumberDevices]; + + if (isInput) + { + deviceScope = kAudioDevicePropertyScopeInput; + defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice; + } else + { + deviceScope = kAudioDevicePropertyScopeOutput; + defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice; + } + + AudioObjectPropertyAddress + propertyAddress = { defaultDeviceSelector, + kAudioObjectPropertyScopeGlobal, + kAudioObjectPropertyElementMaster }; + + // Get the actual device IDs + int numberDevices = GetNumberDevices(deviceScope, deviceIds, + MaxNumberDevices); + if (numberDevices < 0) + { + return -1; + } else if (numberDevices == 0) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "InitDevice(): No devices"); + return -1; + } + + bool isDefaultDevice = false; + deviceId = kAudioDeviceUnknown; + if (userDeviceIndex == 0) + { + // Try to use default system device + size = sizeof(AudioDeviceID); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, + &propertyAddress, 0, NULL, &size, &deviceId)); + if (deviceId == kAudioDeviceUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " No default device exists"); + } else + { + isDefaultDevice = true; + } + } + + if (!isDefaultDevice) + { + deviceId = deviceIds[userDeviceIndex]; + } + + // Obtain device name and manufacturer for logging. + // Also use this as a test to ensure a user-set device ID is valid. + char devName[128]; + char devManf[128]; + memset(devName, 0, sizeof(devName)); + memset(devManf, 0, sizeof(devManf)); + + propertyAddress.mSelector = kAudioDevicePropertyDeviceName; + propertyAddress.mScope = deviceScope; + propertyAddress.mElement = 0; + size = sizeof(devName); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, + &propertyAddress, 0, NULL, &size, devName)); + + propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer; + size = sizeof(devManf); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, + &propertyAddress, 0, NULL, &size, devManf)); + + if (isInput) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Input device: %s %s", devManf, devName); + } else + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Output device: %s %s", devManf, devName); + } + + return 0; +} + +OSStatus AudioDeviceMac::objectListenerProc( + AudioObjectID objectId, + UInt32 numberAddresses, + const AudioObjectPropertyAddress addresses[], + void* clientData) +{ + AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData; + assert(ptrThis != NULL); + + ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses); + + // AudioObjectPropertyListenerProc functions are supposed to return 0 + return 0; +} + +OSStatus AudioDeviceMac::implObjectListenerProc( + const AudioObjectID objectId, + const UInt32 numberAddresses, + const AudioObjectPropertyAddress addresses[]) +{ + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + "AudioDeviceMac::implObjectListenerProc()"); + + for (UInt32 i = 0; i < numberAddresses; i++) + { + if (addresses[i].mSelector == kAudioHardwarePropertyDevices) + { + HandleDeviceChange(); + } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) + { + HandleStreamFormatChange(objectId, addresses[i]); + } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) + { + HandleDataSourceChange(objectId, addresses[i]); + } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) + { + HandleProcessorOverload(addresses[i]); + } + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::HandleDeviceChange() +{ + OSStatus err = noErr; + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + "kAudioHardwarePropertyDevices"); + + // A device has changed. Check if our registered devices have been removed. + // Ensure the devices have been initialized, meaning the IDs are valid. + if (MicrophoneIsInitialized()) + { + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyDeviceIsAlive, + kAudioDevicePropertyScopeInput, 0 }; + UInt32 deviceIsAlive = 1; + UInt32 size = sizeof(UInt32); + err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, + NULL, &size, &deviceIsAlive); + + if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "Capture device is not alive (probably removed)"); + AtomicSet32(&_captureDeviceIsAlive, 0); + _mixerManager.CloseMicrophone(); + if (_recError == 1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, + _id, " pending recording error exists"); + } + _recError = 1; // triggers callback from module process thread + } else if (err != noErr) + { + logCAMsg(kTraceError, kTraceAudioDevice, _id, + "Error in AudioDeviceGetProperty()", (const char*) &err); + return -1; + } + } + + if (SpeakerIsInitialized()) + { + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyDeviceIsAlive, + kAudioDevicePropertyScopeOutput, 0 }; + UInt32 deviceIsAlive = 1; + UInt32 size = sizeof(UInt32); + err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, + NULL, &size, &deviceIsAlive); + + if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + "Render device is not alive (probably removed)"); + AtomicSet32(&_renderDeviceIsAlive, 0); + _mixerManager.CloseSpeaker(); + if (_playError == 1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, + _id, " pending playout error exists"); + } + _playError = 1; // triggers callback from module process thread + } else if (err != noErr) + { + logCAMsg(kTraceError, kTraceAudioDevice, _id, + "Error in AudioDeviceGetProperty()", (const char*) &err); + return -1; + } + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::HandleStreamFormatChange( + const AudioObjectID objectId, + const AudioObjectPropertyAddress propertyAddress) +{ + OSStatus err = noErr; + + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + "Stream format changed"); + + if (objectId != _inputDeviceID && objectId != _outputDeviceID) + { + return 0; + } + + // Get the new device format + AudioStreamBasicDescription streamFormat; + UInt32 size = sizeof(streamFormat); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId, + &propertyAddress, 0, NULL, &size, &streamFormat)); + + if (streamFormat.mFormatID != kAudioFormatLinearPCM) + { + logCAMsg(kTraceError, kTraceAudioDevice, _id, + "Unacceptable input stream format -> mFormatID", + (const char *) &streamFormat.mFormatID); + return -1; + } + + if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + "Too many channels on device (mChannelsPerFrame = %d)", + streamFormat.mChannelsPerFrame); + return -1; + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Stream format:"); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "mSampleRate = %f, mChannelsPerFrame = %u", + streamFormat.mSampleRate, streamFormat.mChannelsPerFrame); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "mBytesPerPacket = %u, mFramesPerPacket = %u", + streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "mBytesPerFrame = %u, mBitsPerChannel = %u", + streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel); + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "mFormatFlags = %u, mChannelsPerFrame = %u", + streamFormat.mFormatFlags, streamFormat.mChannelsPerFrame); + logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", + (const char *) &streamFormat.mFormatID); + + if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) + { + memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat)); + + if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) + { + _inDesiredFormat.mChannelsPerFrame = 2; + } else + { + // Disable stereo recording when we only have one channel on the device. + _inDesiredFormat.mChannelsPerFrame = 1; + _recChannels = 1; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Stereo recording unavailable on this device"); + } + + if (_ptrAudioBuffer) + { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); + _ptrAudioBuffer->SetRecordingChannels((WebRtc_UWord8) _recChannels); + } + + // Recreate the converter with the new format + // TODO(xians): make this thread safe + WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter)); + + WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat, + &_captureConverter)); + } else + { + memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat)); + + if (_outStreamFormat.mChannelsPerFrame >= 2 && (_playChannels == 2)) + { + _outDesiredFormat.mChannelsPerFrame = 2; + } else + { + // Disable stereo playout when we only have one channel on the device. + _outDesiredFormat.mChannelsPerFrame = 1; + _playChannels = 1; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "Stereo playout unavailable on this device"); + } + + if (_ptrAudioBuffer) + { + // Update audio buffer with the selected parameters + _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); + _ptrAudioBuffer->SetPlayoutChannels((WebRtc_UWord8) _playChannels); + } + + _renderDelayOffsetSamples = _renderBufSizeSamples - N_BUFFERS_OUT + * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES + * _outDesiredFormat.mChannelsPerFrame; + + // Recreate the converter with the new format + // TODO(xians): make this thread safe + WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_renderConverter)); + + WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_outDesiredFormat, &streamFormat, + &_renderConverter)); + } + + return 0; +} + +WebRtc_Word32 AudioDeviceMac::HandleDataSourceChange( + const AudioObjectID objectId, + const AudioObjectPropertyAddress propertyAddress) +{ + OSStatus err = noErr; + + if (_macBookPro && propertyAddress.mScope + == kAudioDevicePropertyScopeOutput) + { + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, + "Data source changed"); + + _macBookProPanRight = false; + UInt32 dataSource = 0; + UInt32 size = sizeof(UInt32); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId, + &propertyAddress, 0, NULL, &size, &dataSource)); + if (dataSource == 'ispk') + { + _macBookProPanRight = true; + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "MacBook Pro using internal speakers; stereo panning right"); + } else + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "MacBook Pro not using internal speakers"); + } + } + + return 0; +} +WebRtc_Word32 AudioDeviceMac::HandleProcessorOverload( + const AudioObjectPropertyAddress propertyAddress) +{ + // TODO(xians): we probably want to notify the user in some way of the + // overload. However, the Windows interpretations of these errors seem to + // be more severe than what ProcessorOverload is thrown for. + // + // We don't log the notification, as it's sent from the HAL's IO thread. We + // don't want to slow it down even further. + if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) + { + //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor + // overload"); + //_callback->ProblemIsReported( + // SndCardStreamObserver::ERecordingProblem); + } else + { + //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + // "Render processor overload"); + //_callback->ProblemIsReported( + // SndCardStreamObserver::EPlaybackProblem); + } + + return 0; +} + +// ============================================================================ +// Thread Methods +// ============================================================================ + +OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID, const AudioTimeStamp*, + const AudioBufferList* inputData, + const AudioTimeStamp* inputTime, + AudioBufferList* outputData, + const AudioTimeStamp* outputTime, + void *clientData) +{ + AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData; + assert(ptrThis != NULL); + + ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime); + + // AudioDeviceIOProc functions are supposed to return 0 + return 0; +} + +OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef, + UInt32 *numberDataPackets, + AudioBufferList *data, + AudioStreamPacketDescription **, + void *userData) +{ + AudioDeviceMac *ptrThis = (AudioDeviceMac *) userData; + assert(ptrThis != NULL); + + return ptrThis->implOutConverterProc(numberDataPackets, data); +} + +OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID, const AudioTimeStamp*, + const AudioBufferList* inputData, + const AudioTimeStamp* inputTime, + AudioBufferList*, + const AudioTimeStamp*, void* clientData) +{ + AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData; + assert(ptrThis != NULL); + + ptrThis->implInDeviceIOProc(inputData, inputTime); + + // AudioDeviceIOProc functions are supposed to return 0 + return 0; +} + +OSStatus AudioDeviceMac::inConverterProc( + AudioConverterRef, + UInt32 *numberDataPackets, + AudioBufferList *data, + AudioStreamPacketDescription ** /*dataPacketDescription*/, + void *userData) +{ + AudioDeviceMac *ptrThis = static_cast (userData); + assert(ptrThis != NULL); + + return ptrThis->implInConverterProc(numberDataPackets, data); +} + +OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList *inputData, + const AudioTimeStamp *inputTime, + AudioBufferList *outputData, + const AudioTimeStamp *outputTime) +{ + OSStatus err = noErr; + UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime); + UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); + + if (!_twoDevices && _recording) + { + implInDeviceIOProc(inputData, inputTime); + } + + // Check if we should close down audio device + // Double-checked locking optimization to remove locking overhead + if (_doStop) + { + _critSect.Enter(); + if (_doStop) + { + // This case is for stop play only or stop play+rec + // (in that case out and in device ids are equal) +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceCreateIOProcID != NULL) + { + WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, deviceIOProc)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + if (err == noErr) + { + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, + _id, " Playout or shared device stopped"); + } + + _doStop = false; + _stopEvent.Set(); + _critSect.Leave(); + return 0; + } + _critSect.Leave(); + } + + if (!_playing) + { + // This can be the case when a shared device is capturing but not + // rendering. We allow the checks above before returning to avoid a + // timeout when capturing is stopped. + return 0; + } + + assert(_outStreamFormat.mBytesPerFrame != 0); + UInt32 size = outputData->mBuffers->mDataByteSize + / _outStreamFormat.mBytesPerFrame; + + // TODO(xians): signal an error somehow? + err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc, + this, &size, outputData, NULL); + if (err != noErr) + { + if (err == 1) + { + // This is our own error. + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " Error in AudioConverterFillComplexBuffer()"); + return 1; + } else + { + logCAMsg(kTraceError, kTraceAudioDevice, _id, + "Error in AudioConverterFillComplexBuffer()", + (const char *) &err); + return 1; + } + } + + ring_buffer_size_t bufSizeSamples = + PaUtil_GetRingBufferReadAvailable(_paRenderBuffer); + + int32_t renderDelayUs = static_cast (1e-3 * (outputTimeNs - nowNs) + + 0.5); + renderDelayUs += static_cast ((1.0e6 * bufSizeSamples) + / _outDesiredFormat.mChannelsPerFrame / _outDesiredFormat.mSampleRate + + 0.5); + + AtomicSet32(&_renderDelayUs, renderDelayUs); + + return 0; +} + +OSStatus AudioDeviceMac::implOutConverterProc(UInt32 *numberDataPackets, + AudioBufferList *data) +{ + assert(data->mNumberBuffers == 1); + ring_buffer_size_t numSamples = *numberDataPackets + * _outDesiredFormat.mChannelsPerFrame; + + data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame; + // Always give the converter as much as it wants, zero padding as required. + data->mBuffers->mDataByteSize = *numberDataPackets + * _outDesiredFormat.mBytesPerPacket; + data->mBuffers->mData = _renderConvertData; + memset(_renderConvertData, 0, sizeof(_renderConvertData)); + + PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples); + + kern_return_t kernErr = semaphore_signal_all(_renderSemaphore); + if (kernErr != KERN_SUCCESS) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " semaphore_signal_all() error: %d", kernErr); + return 1; + } + + return 0; +} + +OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList *inputData, + const AudioTimeStamp *inputTime) +{ + OSStatus err = noErr; + UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime); + UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); + + if (!_recording) + { + return 0; + } + + // Check if we should close down audio device + // Double-checked locking optimization to remove locking overhead + if (_doStopRec) + { + _critSect.Enter(); + if (_doStopRec) + { + // This case is for stop rec only +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + if (AudioDeviceCreateIOProcID != NULL) + { + WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); + } + else + { +#endif + WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, inDeviceIOProc)); +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + } +#endif + if (err == noErr) + { + WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, + _id, " Recording device stopped"); + } + + _doStopRec = false; + _stopEventRec.Set(); + _critSect.Leave(); + return 0; + } + _critSect.Leave(); + } + + ring_buffer_size_t bufSizeSamples = + PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer); + + int32_t captureDelayUs = static_cast (1e-3 * (nowNs - inputTimeNs) + + 0.5); + captureDelayUs + += static_cast ((1.0e6 * bufSizeSamples) + / _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mSampleRate + + 0.5); + + AtomicSet32(&_captureDelayUs, captureDelayUs); + + assert(inputData->mNumberBuffers == 1); + ring_buffer_size_t numSamples = inputData->mBuffers->mDataByteSize + * _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mBytesPerPacket; + PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData, + numSamples); + + kern_return_t kernErr = semaphore_signal_all(_captureSemaphore); + if (kernErr != KERN_SUCCESS) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " semaphore_signal_all() error: %d", kernErr); + } + + return err; +} + +OSStatus AudioDeviceMac::implInConverterProc(UInt32 *numberDataPackets, + AudioBufferList *data) +{ + assert(data->mNumberBuffers == 1); + ring_buffer_size_t numSamples = *numberDataPackets + * _inStreamFormat.mChannelsPerFrame; + + while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) + { + mach_timespec_t timeout; + timeout.tv_sec = 0; + timeout.tv_nsec = TIMER_PERIOD_MS; + + kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout); + if (kernErr == KERN_OPERATION_TIMED_OUT) + { + int32_t signal = AtomicGet32(&_captureDeviceIsAlive); + if (signal == 0) + { + // The capture device is no longer alive; stop the worker thread. + *numberDataPackets = 0; + return 1; + } + } else if (kernErr != KERN_SUCCESS) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " semaphore_wait() error: %d", kernErr); + } + } + + // Pass the read pointer directly to the converter to avoid a memcpy. + void* dummyPtr; + ring_buffer_size_t dummySize; + PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples, + &data->mBuffers->mData, &numSamples, + &dummyPtr, &dummySize); + PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples); + + data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame; + *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame; + data->mBuffers->mDataByteSize = *numberDataPackets + * _inStreamFormat.mBytesPerPacket; + + return 0; +} + +bool AudioDeviceMac::RunRender(void* ptrThis) +{ + return static_cast (ptrThis)->RenderWorkerThread(); +} + +bool AudioDeviceMac::RenderWorkerThread() +{ + ring_buffer_size_t numSamples = ENGINE_PLAY_BUF_SIZE_IN_SAMPLES + * _outDesiredFormat.mChannelsPerFrame; + while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) + - _renderDelayOffsetSamples < numSamples) + { + mach_timespec_t timeout; + timeout.tv_sec = 0; + timeout.tv_nsec = TIMER_PERIOD_MS; + + kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout); + if (kernErr == KERN_OPERATION_TIMED_OUT) + { + int32_t signal = AtomicGet32(&_renderDeviceIsAlive); + if (signal == 0) + { + // The render device is no longer alive; stop the worker thread. + return false; + } + } else if (kernErr != KERN_SUCCESS) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " semaphore_timedwait() error: %d", kernErr); + } + } + + WebRtc_Word8 playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES]; + + if (!_ptrAudioBuffer) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " capture AudioBuffer is invalid"); + return false; + } + + // Ask for new PCM data to be played out using the AudioDeviceBuffer. + WebRtc_UWord32 nSamples = + _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES); + + nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); + if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " invalid number of output samples(%d)", nSamples); + } + + WebRtc_UWord32 nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame; + + SInt16 *pPlayBuffer = (SInt16 *) &playBuffer; + if (_macBookProPanRight && (_playChannels == 2)) + { + // Mix entirely into the right channel and zero the left channel. + SInt32 sampleInt32 = 0; + for (WebRtc_UWord32 sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx + += 2) + { + sampleInt32 = pPlayBuffer[sampleIdx]; + sampleInt32 += pPlayBuffer[sampleIdx + 1]; + sampleInt32 /= 2; + + if (sampleInt32 > 32767) + { + sampleInt32 = 32767; + } else if (sampleInt32 < -32768) + { + sampleInt32 = -32768; + } + + pPlayBuffer[sampleIdx] = 0; + pPlayBuffer[sampleIdx + 1] = static_cast (sampleInt32); + } + } + + PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples); + + return true; +} + +bool AudioDeviceMac::RunCapture(void* ptrThis) +{ + return static_cast (ptrThis)->CaptureWorkerThread(); +} + +bool AudioDeviceMac::CaptureWorkerThread() +{ + OSStatus err = noErr; + UInt32 noRecSamples = ENGINE_REC_BUF_SIZE_IN_SAMPLES + * _inDesiredFormat.mChannelsPerFrame; + SInt16 recordBuffer[noRecSamples]; + UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES; + + AudioBufferList engineBuffer; + engineBuffer.mNumberBuffers = 1; // Interleaved channels. + engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame; + engineBuffer.mBuffers->mDataByteSize = _inDesiredFormat.mBytesPerPacket + * noRecSamples; + engineBuffer.mBuffers->mData = recordBuffer; + + err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc, + this, &size, &engineBuffer, NULL); + if (err != noErr) + { + if (err == 1) + { + // This is our own error. + return false; + } else + { + logCAMsg(kTraceError, kTraceAudioDevice, _id, + "Error in AudioConverterFillComplexBuffer()", + (const char *) &err); + return false; + } + } + + // TODO(xians): what if the returned size is incorrect? + if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) + { + WebRtc_UWord32 currentMicLevel(0); + WebRtc_UWord32 newMicLevel(0); + WebRtc_Word32 msecOnPlaySide; + WebRtc_Word32 msecOnRecordSide; + + int32_t captureDelayUs = AtomicGet32(&_captureDelayUs); + int32_t renderDelayUs = AtomicGet32(&_renderDelayUs); + + msecOnPlaySide = static_cast (1e-3 * (renderDelayUs + + _renderLatencyUs) + 0.5); + msecOnRecordSide = static_cast (1e-3 * (captureDelayUs + + _captureLatencyUs) + 0.5); + + if (!_ptrAudioBuffer) + { + WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, + " capture AudioBuffer is invalid"); + return false; + } + + // store the recorded buffer (no action will be taken if the + // #recorded samples is not a full buffer) + _ptrAudioBuffer->SetRecordedBuffer((WebRtc_Word8*) &recordBuffer, + (WebRtc_UWord32) size); + + if (AGC()) + { + // store current mic level in the audio buffer if AGC is enabled + if (MicrophoneVolume(currentMicLevel) == 0) + { + // this call does not affect the actual microphone volume + _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel); + } + } + + _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, 0); + + // deliver recorded samples at specified sample rate, mic level etc. + // to the observer using callback + _ptrAudioBuffer->DeliverRecordedData(); + + if (AGC()) + { + newMicLevel = _ptrAudioBuffer->NewMicLevel(); + if (newMicLevel != 0) + { + // The VQE will only deliver non-zero microphone levels when + // a change is needed. + // Set this new mic level (received from the observer as return + // value in the callback). + WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, + _id, " AGC change of volume: old=%u => new=%u", + currentMicLevel, newMicLevel); + if (SetMicrophoneVolume(newMicLevel) == -1) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " the required modification of the microphone " + "volume failed"); + } + } + } + } + + return true; +} + +} // namespace webrtc diff --git a/src/modules/audio_device/main/source/mac/audio_device_mac.h b/src/modules/audio_device/main/source/mac/audio_device_mac.h new file mode 100644 index 000000000..8069feeb9 --- /dev/null +++ b/src/modules/audio_device/main/source/mac/audio_device_mac.h @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_MAC_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_MAC_H + +#include "audio_device_generic.h" +#include "critical_section_wrapper.h" +#include "audio_mixer_manager_mac.h" + +#include +#include +#include + +struct PaUtilRingBuffer; + +namespace webrtc +{ +class EventWrapper; +class ThreadWrapper; + +const WebRtc_UWord32 N_REC_SAMPLES_PER_SEC = 48000; +const WebRtc_UWord32 N_PLAY_SAMPLES_PER_SEC = 48000; + +const WebRtc_UWord32 N_REC_CHANNELS = 1; // default is mono recording +const WebRtc_UWord32 N_PLAY_CHANNELS = 2; // default is stereo playout +const WebRtc_UWord32 N_DEVICE_CHANNELS = 8; + +const WebRtc_UWord32 ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC + / 100); +const WebRtc_UWord32 ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC + / 100); + +enum +{ + N_BLOCKS_IO = 2 +}; +enum +{ + N_BUFFERS_IN = 10 +}; +enum +{ + N_BUFFERS_OUT = 3 +}; // Must be at least N_BLOCKS_IO + +const WebRtc_UWord32 TIMER_PERIOD_MS = (2 * 10 * N_BLOCKS_IO * 1000000); + +const WebRtc_UWord32 REC_BUF_SIZE_IN_SAMPLES = (ENGINE_REC_BUF_SIZE_IN_SAMPLES + * N_DEVICE_CHANNELS * N_BUFFERS_IN); +const WebRtc_UWord32 PLAY_BUF_SIZE_IN_SAMPLES = + (ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * N_PLAY_CHANNELS * N_BUFFERS_OUT); + +class AudioDeviceMac: public AudioDeviceGeneric +{ +public: + AudioDeviceMac(const WebRtc_Word32 id); + ~AudioDeviceMac(); + + // Retrieve the currently utilized audio layer + virtual WebRtc_Word32 + ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const; + + // Main initializaton and termination + virtual WebRtc_Word32 Init(); + virtual WebRtc_Word32 Terminate(); + virtual bool Initialized() const; + + // Device enumeration + virtual WebRtc_Word16 PlayoutDevices(); + virtual WebRtc_Word16 RecordingDevices(); + virtual WebRtc_Word32 PlayoutDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]); + virtual WebRtc_Word32 RecordingDeviceName( + WebRtc_UWord16 index, + WebRtc_Word8 name[kAdmMaxDeviceNameSize], + WebRtc_Word8 guid[kAdmMaxGuidSize]); + + // Device selection + virtual WebRtc_Word32 SetPlayoutDevice(WebRtc_UWord16 index); + virtual WebRtc_Word32 SetPlayoutDevice( + AudioDeviceModule::WindowsDeviceType device); + virtual WebRtc_Word32 SetRecordingDevice(WebRtc_UWord16 index); + virtual WebRtc_Word32 SetRecordingDevice( + AudioDeviceModule::WindowsDeviceType device); + + // Audio transport initialization + virtual WebRtc_Word32 PlayoutIsAvailable(bool& available); + virtual WebRtc_Word32 InitPlayout(); + virtual bool PlayoutIsInitialized() const; + virtual WebRtc_Word32 RecordingIsAvailable(bool& available); + virtual WebRtc_Word32 InitRecording(); + virtual bool RecordingIsInitialized() const; + + // Audio transport control + virtual WebRtc_Word32 StartPlayout(); + virtual WebRtc_Word32 StopPlayout(); + virtual bool Playing() const; + virtual WebRtc_Word32 StartRecording(); + virtual WebRtc_Word32 StopRecording(); + virtual bool Recording() const; + + // Microphone Automatic Gain Control (AGC) + virtual WebRtc_Word32 SetAGC(bool enable); + virtual bool AGC() const; + + // Volume control based on the Windows Wave API (Windows only) + virtual WebRtc_Word32 SetWaveOutVolume(WebRtc_UWord16 volumeLeft, + WebRtc_UWord16 volumeRight); + virtual WebRtc_Word32 WaveOutVolume(WebRtc_UWord16& volumeLeft, + WebRtc_UWord16& volumeRight) const; + + // Audio mixer initialization + virtual WebRtc_Word32 SpeakerIsAvailable(bool& available); + virtual WebRtc_Word32 InitSpeaker(); + virtual bool SpeakerIsInitialized() const; + virtual WebRtc_Word32 MicrophoneIsAvailable(bool& available); + virtual WebRtc_Word32 InitMicrophone(); + virtual bool MicrophoneIsInitialized() const; + + // Speaker volume controls + virtual WebRtc_Word32 SpeakerVolumeIsAvailable(bool& available); + virtual WebRtc_Word32 SetSpeakerVolume(WebRtc_UWord32 volume); + virtual WebRtc_Word32 SpeakerVolume(WebRtc_UWord32& volume) const; + virtual WebRtc_Word32 MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const; + virtual WebRtc_Word32 MinSpeakerVolume(WebRtc_UWord32& minVolume) const; + virtual WebRtc_Word32 SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const; + + // Microphone volume controls + virtual WebRtc_Word32 MicrophoneVolumeIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneVolume(WebRtc_UWord32 volume); + virtual WebRtc_Word32 MicrophoneVolume(WebRtc_UWord32& volume) const; + virtual WebRtc_Word32 MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const; + virtual WebRtc_Word32 MinMicrophoneVolume(WebRtc_UWord32& minVolume) const; + virtual WebRtc_Word32 + MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const; + + // Microphone mute control + virtual WebRtc_Word32 MicrophoneMuteIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneMute(bool enable); + virtual WebRtc_Word32 MicrophoneMute(bool& enabled) const; + + // Speaker mute control + virtual WebRtc_Word32 SpeakerMuteIsAvailable(bool& available); + virtual WebRtc_Word32 SetSpeakerMute(bool enable); + virtual WebRtc_Word32 SpeakerMute(bool& enabled) const; + + // Microphone boost control + virtual WebRtc_Word32 MicrophoneBoostIsAvailable(bool& available); + virtual WebRtc_Word32 SetMicrophoneBoost(bool enable); + virtual WebRtc_Word32 MicrophoneBoost(bool& enabled) const; + + // Stereo support + virtual WebRtc_Word32 StereoPlayoutIsAvailable(bool& available); + virtual WebRtc_Word32 SetStereoPlayout(bool enable); + virtual WebRtc_Word32 StereoPlayout(bool& enabled) const; + virtual WebRtc_Word32 StereoRecordingIsAvailable(bool& available); + virtual WebRtc_Word32 SetStereoRecording(bool enable); + virtual WebRtc_Word32 StereoRecording(bool& enabled) const; + + // Delay information and control + virtual WebRtc_Word32 + SetPlayoutBuffer(const AudioDeviceModule::BufferType type, + WebRtc_UWord16 sizeMS); + virtual WebRtc_Word32 PlayoutBuffer(AudioDeviceModule::BufferType& type, + WebRtc_UWord16& sizeMS) const; + virtual WebRtc_Word32 PlayoutDelay(WebRtc_UWord16& delayMS) const; + virtual WebRtc_Word32 RecordingDelay(WebRtc_UWord16& delayMS) const; + + // CPU load + virtual WebRtc_Word32 CPULoad(WebRtc_UWord16& load) const; + +public: + virtual bool PlayoutWarning() const; + virtual bool PlayoutError() const; + virtual bool RecordingWarning() const; + virtual bool RecordingError() const; + virtual void ClearPlayoutWarning(); + virtual void ClearPlayoutError(); + virtual void ClearRecordingWarning(); + virtual void ClearRecordingError(); + +public: + virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer); + +private: + void Lock() + { + _critSect.Enter(); + } + ; + void UnLock() + { + _critSect.Leave(); + } + ; + WebRtc_Word32 Id() + { + return _id; + } + + static void AtomicSet32(int32_t* theValue, int32_t newValue); + static int32_t AtomicGet32(int32_t* theValue); + + static void logCAMsg(const TraceLevel level, + const TraceModule module, + const WebRtc_Word32 id, const char *msg, + const char *err); + + WebRtc_Word32 GetNumberDevices(const AudioObjectPropertyScope scope, + AudioDeviceID scopedDeviceIds[], + const WebRtc_UWord32 deviceListLength); + + WebRtc_Word32 GetDeviceName(const AudioObjectPropertyScope scope, + const WebRtc_UWord16 index, char* name); + + WebRtc_Word32 InitDevice(WebRtc_UWord16 userDeviceIndex, + AudioDeviceID& deviceId, bool isInput); + + static OSStatus + objectListenerProc(AudioObjectID objectId, UInt32 numberAddresses, + const AudioObjectPropertyAddress addresses[], + void* clientData); + + OSStatus + implObjectListenerProc(AudioObjectID objectId, UInt32 numberAddresses, + const AudioObjectPropertyAddress addresses[]); + + WebRtc_Word32 HandleDeviceChange(); + + WebRtc_Word32 + HandleStreamFormatChange(AudioObjectID objectId, + AudioObjectPropertyAddress propertyAddress); + + WebRtc_Word32 + HandleDataSourceChange(AudioObjectID objectId, + AudioObjectPropertyAddress propertyAddress); + + WebRtc_Word32 + HandleProcessorOverload(AudioObjectPropertyAddress propertyAddress); + +private: + static OSStatus deviceIOProc(AudioDeviceID device, + const AudioTimeStamp *now, + const AudioBufferList *inputData, + const AudioTimeStamp *inputTime, + AudioBufferList *outputData, + const AudioTimeStamp* outputTime, + void *clientData); + + static OSStatus + outConverterProc(AudioConverterRef audioConverter, + UInt32 *numberDataPackets, AudioBufferList *data, + AudioStreamPacketDescription **dataPacketDescription, + void *userData); + + static OSStatus inDeviceIOProc(AudioDeviceID device, + const AudioTimeStamp *now, + const AudioBufferList *inputData, + const AudioTimeStamp *inputTime, + AudioBufferList *outputData, + const AudioTimeStamp *outputTime, + void *clientData); + + static OSStatus + inConverterProc(AudioConverterRef audioConverter, + UInt32 *numberDataPackets, AudioBufferList *data, + AudioStreamPacketDescription **dataPacketDescription, + void *inUserData); + + OSStatus implDeviceIOProc(const AudioBufferList *inputData, + const AudioTimeStamp *inputTime, + AudioBufferList *outputData, + const AudioTimeStamp *outputTime); + + OSStatus implOutConverterProc(UInt32 *numberDataPackets, + AudioBufferList *data); + + OSStatus implInDeviceIOProc(const AudioBufferList *inputData, + const AudioTimeStamp *inputTime); + + OSStatus implInConverterProc(UInt32 *numberDataPackets, + AudioBufferList *data); + + static bool RunCapture(void*); + static bool RunRender(void*); + bool CaptureWorkerThread(); + bool RenderWorkerThread(); + +private: + AudioDeviceBuffer* _ptrAudioBuffer; + + CriticalSectionWrapper& _critSect; + CriticalSectionWrapper& _critSectCb; + + EventWrapper& _stopEventRec; + EventWrapper& _stopEvent; + + ThreadWrapper* _captureWorkerThread; + ThreadWrapper* _renderWorkerThread; + WebRtc_UWord32 _captureWorkerThreadId; + WebRtc_UWord32 _renderWorkerThreadId; + + WebRtc_Word32 _id; + + AudioMixerManagerMac _mixerManager; + + WebRtc_UWord16 _inputDeviceIndex; + WebRtc_UWord16 _outputDeviceIndex; + AudioDeviceID _inputDeviceID; + AudioDeviceID _outputDeviceID; +#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050 + AudioDeviceIOProcID _inDeviceIOProcID; + AudioDeviceIOProcID _deviceIOProcID; +#endif + bool _inputDeviceIsSpecified; + bool _outputDeviceIsSpecified; + + WebRtc_UWord8 _recChannels; + WebRtc_UWord8 _playChannels; + + Float32* _captureBufData; + SInt16* _renderBufData; + + SInt16 _renderConvertData[PLAY_BUF_SIZE_IN_SAMPLES]; + + AudioDeviceModule::BufferType _playBufType; + +private: + bool _initialized; + bool _isShutDown; + bool _recording; + bool _playing; + bool _recIsInitialized; + bool _playIsInitialized; + bool _startRec; + bool _stopRec; + bool _stopPlay; + bool _AGC; + + // Atomically set varaibles + int32_t _renderDeviceIsAlive; + int32_t _captureDeviceIsAlive; + + bool _twoDevices; + bool _doStop; // For play if not shared device or play+rec if shared device + bool _doStopRec; // For rec if not shared device + bool _macBookPro; + bool _macBookProPanRight; + bool _stereoRender; + bool _stereoRenderRequested; + + AudioConverterRef _captureConverter; + AudioConverterRef _renderConverter; + + AudioStreamBasicDescription _outStreamFormat; + AudioStreamBasicDescription _outDesiredFormat; + AudioStreamBasicDescription _inStreamFormat; + AudioStreamBasicDescription _inDesiredFormat; + + WebRtc_UWord32 _captureLatencyUs; + WebRtc_UWord32 _renderLatencyUs; + + // Atomically set variables + mutable int32_t _captureDelayUs; + mutable int32_t _renderDelayUs; + + WebRtc_Word32 _renderDelayOffsetSamples; + +private: + WebRtc_UWord16 _playBufDelay; // playback delay + WebRtc_UWord16 _playBufDelayFixed; // fixed playback delay + + WebRtc_UWord16 _playWarning; + WebRtc_UWord16 _playError; + WebRtc_UWord16 _recWarning; + WebRtc_UWord16 _recError; + + PaUtilRingBuffer* _paCaptureBuffer; + PaUtilRingBuffer* _paRenderBuffer; + + semaphore_t _renderSemaphore; + semaphore_t _captureSemaphore; + + WebRtc_UWord32 _captureBufSizeSamples; + WebRtc_UWord32 _renderBufSizeSamples; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_MAC_AUDIO_DEVICE_MAC_H_ diff --git a/src/modules/audio_device/main/source/mac/audio_device_utility_mac.h b/src/modules/audio_device/main/source/mac/audio_device_utility_mac.h new file mode 100644 index 000000000..ccb3d9986 --- /dev/null +++ b/src/modules/audio_device/main/source/mac/audio_device_utility_mac.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_MAC_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_MAC_H + +#include "audio_device_utility.h" +#include "audio_device.h" + +namespace webrtc +{ +class CriticalSectionWrapper; + +class AudioDeviceUtilityMac: public AudioDeviceUtility +{ +public: + AudioDeviceUtilityMac(const WebRtc_Word32 id); + ~AudioDeviceUtilityMac(); + + virtual WebRtc_Word32 Init(); + +private: + CriticalSectionWrapper& _critSect; + WebRtc_Word32 _id; + AudioDeviceModule::ErrorCode _lastError; +}; + +} // namespace webrtc + +#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_MAC_AUDIO_DEVICE_UTILITY_MAC_H_ diff --git a/src/modules/audio_device/main/source/mac/audio_mixer_manager_mac.cc b/src/modules/audio_device/main/source/mac/audio_mixer_manager_mac.cc new file mode 100644 index 000000000..5f70c4082 --- /dev/null +++ b/src/modules/audio_device/main/source/mac/audio_mixer_manager_mac.cc @@ -0,0 +1,1188 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio_mixer_manager_mac.h" +#include "trace.h" + +#include // getpid() + +namespace webrtc { + +#define WEBRTC_CA_RETURN_ON_ERR(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(kTraceError, kTraceAudioDevice, _id, \ + "Error in " #expr, (const char *)&err); \ + return -1; \ + } \ + } while(0) + +#define WEBRTC_CA_LOG_ERR(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(kTraceError, kTraceAudioDevice, _id, \ + "Error in " #expr, (const char *)&err); \ + } \ + } while(0) + +#define WEBRTC_CA_LOG_WARN(expr) \ + do { \ + err = expr; \ + if (err != noErr) { \ + logCAMsg(kTraceWarning, kTraceAudioDevice, _id, \ + "Error in " #expr, (const char *)&err); \ + } \ + } while(0) + +AudioMixerManagerMac::AudioMixerManagerMac(const WebRtc_Word32 id) : + _critSect(*CriticalSectionWrapper::CreateCriticalSection()), + _id(id), + _inputDeviceID(kAudioObjectUnknown), + _outputDeviceID(kAudioObjectUnknown), + _noInputChannels(0), + _noOutputChannels(0) +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s constructed", __FUNCTION__); +} + +AudioMixerManagerMac::~AudioMixerManagerMac() +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, + "%s destructed", __FUNCTION__); + + Close(); + + delete &_critSect; +} + +// ============================================================================ +// PUBLIC METHODS +// ============================================================================ + +WebRtc_Word32 AudioMixerManagerMac::Close() +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + CloseSpeaker(); + CloseMicrophone(); + + return 0; + +} + +WebRtc_Word32 AudioMixerManagerMac::CloseSpeaker() +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + _outputDeviceID = kAudioObjectUnknown; + _noOutputChannels = 0; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::CloseMicrophone() +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + CriticalSectionScoped lock(_critSect); + + _inputDeviceID = kAudioObjectUnknown; + _noInputChannels = 0; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::OpenSpeaker(AudioDeviceID deviceID) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerMac::OpenSpeaker(id=%d)", deviceID); + + CriticalSectionScoped lock(_critSect); + + OSStatus err = noErr; + UInt32 size = 0; + pid_t hogPid = -1; + + _outputDeviceID = deviceID; + + // Check which process, if any, has hogged the device. + AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyHogMode, + kAudioDevicePropertyScopeOutput, 0 }; + + size = sizeof(hogPid); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &hogPid)); + + if (hogPid == -1) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " No process has hogged the input device"); + } + // getpid() is apparently "always successful" + else if (hogPid == getpid()) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Our process has hogged the input device"); + } else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Another process (pid = %d) has hogged the input device", + static_cast (hogPid)); + + return -1; + } + + // get number of channels from stream format + propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; + + // Get the stream format, to be able to read the number of channels. + AudioStreamBasicDescription streamFormat; + size = sizeof(AudioStreamBasicDescription); + memset(&streamFormat, 0, size); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &streamFormat)); + + _noOutputChannels = streamFormat.mChannelsPerFrame; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::OpenMicrophone(AudioDeviceID deviceID) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerMac::OpenMicrophone(id=%d)", deviceID); + + CriticalSectionScoped lock(_critSect); + + OSStatus err = noErr; + UInt32 size = 0; + pid_t hogPid = -1; + + _inputDeviceID = deviceID; + + // Check which process, if any, has hogged the device. + AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyHogMode, + kAudioDevicePropertyScopeInput, 0 }; + size = sizeof(hogPid); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &hogPid)); + if (hogPid == -1) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " No process has hogged the input device"); + } + // getpid() is apparently "always successful" + else if (hogPid == getpid()) + { + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " Our process has hogged the input device"); + } else + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Another process (pid = %d) has hogged the input device", + static_cast (hogPid)); + + return -1; + } + + // get number of channels from stream format + propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; + + // Get the stream format, to be able to read the number of channels. + AudioStreamBasicDescription streamFormat; + size = sizeof(AudioStreamBasicDescription); + memset(&streamFormat, 0, size); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &streamFormat)); + + _noInputChannels = streamFormat.mChannelsPerFrame; + + return 0; +} + +bool AudioMixerManagerMac::SpeakerIsInitialized() const +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + return (_outputDeviceID != kAudioObjectUnknown); +} + +bool AudioMixerManagerMac::MicrophoneIsInitialized() const +{ + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", + __FUNCTION__); + + return (_inputDeviceID != kAudioObjectUnknown); +} + +WebRtc_Word32 AudioMixerManagerMac::SetSpeakerVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerMac::SetSpeakerVolume(volume=%u)", volume); + + CriticalSectionScoped lock(_critSect); + + if (_outputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + bool success = false; + + // volume range is 0.0 - 1.0, convert from 0 -255 + const Float32 vol = (Float32)(volume / 255.0); + + assert(vol <= 1.0 && vol >= 0.0); + + // Does the capture device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, + 0 }; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, size, &vol)); + + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noOutputChannels; i++) + { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, size, &vol)); + } + success = true; + } + + if (!success) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Unable to set a volume on any output channel"); + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::SpeakerVolume(WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_outputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + unsigned int channels = 0; + Float32 channelVol = 0; + Float32 vol = 0; + + // Does the device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, + 0 }; + Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID, + &propertyAddress); + if (hasProperty) + { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &vol)); + + // vol 0.0 to 1.0 -> convert to 0 - 255 + volume = static_cast (vol * 255 + 0.5); + } else + { + // Otherwise get the average volume across channels. + vol = 0; + for (UInt32 i = 1; i <= _noOutputChannels; i++) + { + channelVol = 0; + propertyAddress.mElement = i; + hasProperty = AudioObjectHasProperty(_outputDeviceID, + &propertyAddress); + if (hasProperty) + { + size = sizeof(channelVol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &channelVol)); + + vol += channelVol; + channels++; + } + } + + if (channels == 0) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Unable to get a volume on any channel"); + return -1; + } + + assert(channels > 0); + // vol 0.0 to 1.0 -> convert to 0 - 255 + volume = static_cast (255 * vol / channels + 0.5); + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerMac::SpeakerVolume() => vol=%i", vol); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerMac::MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_outputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + // volume range is 0.0 to 1.0 + // we convert that to 0 - 255 + maxVolume = 255; + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerMac::MinSpeakerVolume(WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_outputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + // volume range is 0.0 to 1.0 + // we convert that to 0 - 255 + minVolume = 0; + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerMac::SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_outputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + // volume range is 0.0 to 1.0 + // we convert that to 0 - 255 + stepSize = 1; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::SpeakerVolumeIsAvailable(bool& available) +{ + if (_outputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + + // Does the capture device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { + kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, + 0 }; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + available = true; + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noOutputChannels; i++) + { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err != noErr || !isSettable) + { + available = false; + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Volume cannot be set for output channel %d, err=%d", + i, err); + return -1; + } + } + + available = true; + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::SpeakerMuteIsAvailable(bool& available) +{ + if (_outputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + + // Does the capture device have a master mute control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute, + kAudioDevicePropertyScopeOutput, 0 }; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + available = true; + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noOutputChannels; i++) + { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err != noErr || !isSettable) + { + available = false; + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Mute cannot be set for output channel %d, err=%d", + i, err); + return -1; + } + } + + available = true; + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::SetSpeakerMute(bool enable) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerMac::SetSpeakerMute(enable=%u)", enable); + + CriticalSectionScoped lock(_critSect); + + if (_outputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + UInt32 mute = enable ? 1 : 0; + bool success = false; + + // Does the render device have a master mute control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute, + kAudioDevicePropertyScopeOutput, 0 }; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + size = sizeof(mute); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, size, &mute)); + + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noOutputChannels; i++) + { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + size = sizeof(mute); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, size, &mute)); + } + success = true; + } + + if (!success) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Unable to set mute on any input channel"); + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::SpeakerMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_outputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + unsigned int channels = 0; + UInt32 channelMuted = 0; + UInt32 muted = 0; + + // Does the device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute, + kAudioDevicePropertyScopeOutput, 0 }; + Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID, + &propertyAddress); + if (hasProperty) + { + size = sizeof(muted); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &muted)); + + // 1 means muted + enabled = static_cast (muted); + } else + { + // Otherwise check if all channels are muted. + for (UInt32 i = 1; i <= _noOutputChannels; i++) + { + muted = 0; + propertyAddress.mElement = i; + hasProperty = AudioObjectHasProperty(_outputDeviceID, + &propertyAddress); + if (hasProperty) + { + size = sizeof(channelMuted); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, + &propertyAddress, 0, NULL, &size, &channelMuted)); + + muted = (muted && channelMuted); + channels++; + } + } + + if (channels == 0) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Unable to get mute for any channel"); + return -1; + } + + assert(channels > 0); + // 1 means muted + enabled = static_cast (muted); + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerMac::SpeakerMute() => enabled=%d, enabled"); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::StereoPlayoutIsAvailable(bool& available) +{ + if (_outputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + available = (_noOutputChannels == 2); + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::StereoRecordingIsAvailable(bool& available) +{ + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + available = (_noInputChannels == 2); + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::MicrophoneMuteIsAvailable(bool& available) +{ + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + + // Does the capture device have a master mute control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute, + kAudioDevicePropertyScopeInput, 0 }; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + available = true; + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noInputChannels; i++) + { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err != noErr || !isSettable) + { + available = false; + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Mute cannot be set for output channel %d, err=%d", + i, err); + return -1; + } + } + + available = true; + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::SetMicrophoneMute(bool enable) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerMac::SetMicrophoneMute(enable=%u)", enable); + + CriticalSectionScoped lock(_critSect); + + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + UInt32 mute = enable ? 1 : 0; + bool success = false; + + // Does the capture device have a master mute control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute, + kAudioDevicePropertyScopeInput, 0 }; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + size = sizeof(mute); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, size, &mute)); + + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noInputChannels; i++) + { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + size = sizeof(mute); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, size, &mute)); + } + success = true; + } + + if (!success) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Unable to set mute on any input channel"); + return -1; + } + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::MicrophoneMute(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + unsigned int channels = 0; + UInt32 channelMuted = 0; + UInt32 muted = 0; + + // Does the device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute, + kAudioDevicePropertyScopeInput, 0 }; + Boolean hasProperty = AudioObjectHasProperty(_inputDeviceID, + &propertyAddress); + if (hasProperty) + { + size = sizeof(muted); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &muted)); + + // 1 means muted + enabled = static_cast (muted); + } else + { + // Otherwise check if all channels are muted. + for (UInt32 i = 1; i <= _noInputChannels; i++) + { + muted = 0; + propertyAddress.mElement = i; + hasProperty = AudioObjectHasProperty(_inputDeviceID, + &propertyAddress); + if (hasProperty) + { + size = sizeof(channelMuted); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &channelMuted)); + + muted = (muted && channelMuted); + channels++; + } + } + + if (channels == 0) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Unable to get mute for any channel"); + return -1; + } + + assert(channels > 0); + // 1 means muted + enabled = static_cast (muted); + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerMac::MicrophoneMute() => enabled=%d", + enabled); + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::MicrophoneBoostIsAvailable(bool& available) +{ + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + available = false; // No AudioObjectPropertySelector value for Mic Boost + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::SetMicrophoneBoost(bool enable) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerMac::SetMicrophoneBoost(enable=%u)", enable); + + CriticalSectionScoped lock(_critSect); + + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + // Ensure that the selected microphone has a valid boost control. + bool available(false); + MicrophoneBoostIsAvailable(available); + if (!available) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " it is not possible to enable microphone boost"); + return -1; + } + + // It is assumed that the call above fails! + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::MicrophoneBoost(bool& enabled) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + // Microphone boost cannot be enabled on this platform! + enabled = false; + + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::MicrophoneVolumeIsAvailable(bool& available) +{ + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + + // Does the capture device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress + propertyAddress = { kAudioDevicePropertyVolumeScalar, + kAudioDevicePropertyScopeInput, 0 }; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + available = true; + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noInputChannels; i++) + { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err != noErr || !isSettable) + { + available = false; + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Volume cannot be set for input channel %d, err=%d", + i, err); + return -1; + } + } + + available = true; + return 0; +} + +WebRtc_Word32 AudioMixerManagerMac::SetMicrophoneVolume(WebRtc_UWord32 volume) +{ + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + "AudioMixerManagerMac::SetMicrophoneVolume(volume=%u)", volume); + + CriticalSectionScoped lock(_critSect); + + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + bool success = false; + + // volume range is 0.0 - 1.0, convert from 0 - 255 + const Float32 vol = (Float32)(volume / 255.0); + + assert(vol <= 1.0 && vol >= 0.0); + + // Does the capture device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress + propertyAddress = { kAudioDevicePropertyVolumeScalar, + kAudioDevicePropertyScopeInput, 0 }; + Boolean isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, size, &vol)); + + return 0; + } + + // Otherwise try to set each channel. + for (UInt32 i = 1; i <= _noInputChannels; i++) + { + propertyAddress.mElement = i; + isSettable = false; + err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, + &isSettable); + if (err == noErr && isSettable) + { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, size, &vol)); + } + success = true; + } + + if (!success) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Unable to set a level on any input channel"); + return -1; + } + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerMac::MicrophoneVolume(WebRtc_UWord32& volume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + OSStatus err = noErr; + UInt32 size = 0; + unsigned int channels = 0; + Float32 channelVol = 0; + Float32 vol = 0; + + // Does the device have a master volume control? + // If so, use it exclusively. + AudioObjectPropertyAddress + propertyAddress = { kAudioDevicePropertyVolumeScalar, + kAudioDevicePropertyScopeInput, 0 }; + Boolean hasProperty = AudioObjectHasProperty(_inputDeviceID, + &propertyAddress); + if (hasProperty) + { + size = sizeof(vol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &vol)); + + // vol 0.0 to 1.0 -> convert to 0 - 255 + volume = static_cast (vol * 255 + 0.5); + } else + { + // Otherwise get the average volume across channels. + vol = 0; + for (UInt32 i = 1; i <= _noInputChannels; i++) + { + channelVol = 0; + propertyAddress.mElement = i; + hasProperty = AudioObjectHasProperty(_inputDeviceID, + &propertyAddress); + if (hasProperty) + { + size = sizeof(channelVol); + WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, + &propertyAddress, 0, NULL, &size, &channelVol)); + + vol += channelVol; + channels++; + } + } + + if (channels == 0) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " Unable to get a level on any channel"); + return -1; + } + + assert(channels > 0); + // vol 0.0 to 1.0 -> convert to 0 - 255 + volume = static_cast (255 * vol / channels + 0.5); + } + + WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, + " AudioMixerManagerMac::MicrophoneVolume() => vol=%i", + vol); + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerMac::MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + // volume range is 0.0 to 1.0 + // we convert that to 0 - 255 + maxVolume = 255; + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerMac::MinMicrophoneVolume(WebRtc_UWord32& minVolume) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + // volume range is 0.0 to 1.0 + // we convert that to 0 - 10 + minVolume = 0; + + return 0; +} + +WebRtc_Word32 +AudioMixerManagerMac::MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const +{ + WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, + "%s", __FUNCTION__); + + if (_inputDeviceID == kAudioObjectUnknown) + { + WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, + " device ID has not been set"); + return -1; + } + + // volume range is 0.0 to 1.0 + // we convert that to 0 - 10 + stepSize = 1; + + return 0; +} + +// ============================================================================ +// Private Methods +// ============================================================================ + +// CoreAudio errors are best interpreted as four character strings. +void AudioMixerManagerMac::logCAMsg(const TraceLevel level, + const TraceModule module, + const WebRtc_Word32 id, const char *msg, + const char *err) +{ + assert(msg != NULL); + assert(err != NULL); + +#ifdef WEBRTC_BIG_ENDIAN + WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err); +#else + // We need to flip the characters in this case. + WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err + + 2, err + 1, err); +#endif +} + +} // namespace webrtc +// EOF diff --git a/src/modules/audio_device/main/source/mac/audio_mixer_manager_mac.h b/src/modules/audio_device/main/source/mac/audio_mixer_manager_mac.h new file mode 100644 index 000000000..7209f9122 --- /dev/null +++ b/src/modules/audio_device/main/source/mac/audio_mixer_manager_mac.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_MIXER_MANAGER_MAC_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_MIXER_MANAGER_MAC_H + +#include "typedefs.h" +#include "audio_device.h" +#include "critical_section_wrapper.h" + +#include + +namespace webrtc { + +class AudioMixerManagerMac +{ +public: + WebRtc_Word32 OpenSpeaker(AudioDeviceID deviceID); + WebRtc_Word32 OpenMicrophone(AudioDeviceID deviceID); + WebRtc_Word32 SetSpeakerVolume(WebRtc_UWord32 volume); + WebRtc_Word32 SpeakerVolume(WebRtc_UWord32& volume) const; + WebRtc_Word32 MaxSpeakerVolume(WebRtc_UWord32& maxVolume) const; + WebRtc_Word32 MinSpeakerVolume(WebRtc_UWord32& minVolume) const; + WebRtc_Word32 SpeakerVolumeStepSize(WebRtc_UWord16& stepSize) const; + WebRtc_Word32 SpeakerVolumeIsAvailable(bool& available); + WebRtc_Word32 SpeakerMuteIsAvailable(bool& available); + WebRtc_Word32 SetSpeakerMute(bool enable); + WebRtc_Word32 SpeakerMute(bool& enabled) const; + WebRtc_Word32 StereoPlayoutIsAvailable(bool& available); + WebRtc_Word32 StereoRecordingIsAvailable(bool& available); + WebRtc_Word32 MicrophoneMuteIsAvailable(bool& available); + WebRtc_Word32 SetMicrophoneMute(bool enable); + WebRtc_Word32 MicrophoneMute(bool& enabled) const; + WebRtc_Word32 MicrophoneBoostIsAvailable(bool& available); + WebRtc_Word32 SetMicrophoneBoost(bool enable); + WebRtc_Word32 MicrophoneBoost(bool& enabled) const; + WebRtc_Word32 MicrophoneVolumeIsAvailable(bool& available); + WebRtc_Word32 SetMicrophoneVolume(WebRtc_UWord32 volume); + WebRtc_Word32 MicrophoneVolume(WebRtc_UWord32& volume) const; + WebRtc_Word32 MaxMicrophoneVolume(WebRtc_UWord32& maxVolume) const; + WebRtc_Word32 MinMicrophoneVolume(WebRtc_UWord32& minVolume) const; + WebRtc_Word32 MicrophoneVolumeStepSize(WebRtc_UWord16& stepSize) const; + WebRtc_Word32 Close(); + WebRtc_Word32 CloseSpeaker(); + WebRtc_Word32 CloseMicrophone(); + bool SpeakerIsInitialized() const; + bool MicrophoneIsInitialized() const; + +public: + AudioMixerManagerMac(const WebRtc_Word32 id); + ~AudioMixerManagerMac(); + +private: + static void logCAMsg(const TraceLevel level, + const TraceModule module, + const WebRtc_Word32 id, const char *msg, + const char *err); + +private: + CriticalSectionWrapper& _critSect; + WebRtc_Word32 _id; + + AudioDeviceID _inputDeviceID; + AudioDeviceID _outputDeviceID; + + WebRtc_UWord16 _noInputChannels; + WebRtc_UWord16 _noOutputChannels; + +}; + +} //namespace webrtc + +#endif // AUDIO_MIXER_MAC_H diff --git a/src/modules/audio_device/main/source/mac/portaudio/pa_memorybarrier.h b/src/modules/audio_device/main/source/mac/portaudio/pa_memorybarrier.h new file mode 100644 index 000000000..f68962220 --- /dev/null +++ b/src/modules/audio_device/main/source/mac/portaudio/pa_memorybarrier.h @@ -0,0 +1,127 @@ +/* + * $Id: pa_memorybarrier.h 1240 2007-07-17 13:05:07Z bjornroche $ + * Portable Audio I/O Library + * Memory barrier utilities + * + * Author: Bjorn Roche, XO Audio, LLC + * + * This program uses the PortAudio Portable Audio Library. + * For more information see: http://www.portaudio.com + * Copyright (c) 1999-2000 Ross Bencina and Phil Burk + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF + * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * The text above constitutes the entire PortAudio license; however, + * the PortAudio community also makes the following non-binding requests: + * + * Any person wishing to distribute modifications to the Software is + * requested to send the modifications to the original developer so that + * they can be incorporated into the canonical version. It is also + * requested that these non-binding requests be included along with the + * license above. + */ + +/** + @file pa_memorybarrier.h + @ingroup common_src +*/ + +/**************** + * Some memory barrier primitives based on the system. + * right now only OS X, FreeBSD, and Linux are supported. In addition to providing + * memory barriers, these functions should ensure that data cached in registers + * is written out to cache where it can be snooped by other CPUs. (ie, the volatile + * keyword should not be required) + * + * the primitives that must be defined are: + * + * PaUtil_FullMemoryBarrier() + * PaUtil_ReadMemoryBarrier() + * PaUtil_WriteMemoryBarrier() + * + ****************/ + +#if defined(__APPLE__) +# include + /* Here are the memory barrier functions. Mac OS X only provides + full memory barriers, so the three types of barriers are the same, + however, these barriers are superior to compiler-based ones. */ +# define PaUtil_FullMemoryBarrier() OSMemoryBarrier() +# define PaUtil_ReadMemoryBarrier() OSMemoryBarrier() +# define PaUtil_WriteMemoryBarrier() OSMemoryBarrier() +#elif defined(__GNUC__) + /* GCC >= 4.1 has built-in intrinsics. We'll use those */ +# if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) +# define PaUtil_FullMemoryBarrier() __sync_synchronize() +# define PaUtil_ReadMemoryBarrier() __sync_synchronize() +# define PaUtil_WriteMemoryBarrier() __sync_synchronize() + /* as a fallback, GCC understands volatile asm and "memory" to mean it + * should not reorder memory read/writes */ + /* Note that it is not clear that any compiler actually defines __PPC__, + * it can probably removed safely. */ +# elif defined( __ppc__ ) || defined( __powerpc__) || defined( __PPC__ ) +# define PaUtil_FullMemoryBarrier() asm volatile("sync":::"memory") +# define PaUtil_ReadMemoryBarrier() asm volatile("sync":::"memory") +# define PaUtil_WriteMemoryBarrier() asm volatile("sync":::"memory") +# elif defined( __i386__ ) || defined( __i486__ ) || defined( __i586__ ) || \ + defined( __i686__ ) || defined( __x86_64__ ) +# define PaUtil_FullMemoryBarrier() asm volatile("mfence":::"memory") +# define PaUtil_ReadMemoryBarrier() asm volatile("lfence":::"memory") +# define PaUtil_WriteMemoryBarrier() asm volatile("sfence":::"memory") +# else +# ifdef ALLOW_SMP_DANGERS +# warning Memory barriers not defined on this system or system unknown +# warning For SMP safety, you should fix this. +# define PaUtil_FullMemoryBarrier() +# define PaUtil_ReadMemoryBarrier() +# define PaUtil_WriteMemoryBarrier() +# else +# error Memory barriers are not defined on this system. You can still compile by defining ALLOW_SMP_DANGERS, but SMP safety will not be guaranteed. +# endif +# endif +#elif (_MSC_VER >= 1400) && !defined(_WIN32_WCE) +# include +# pragma intrinsic(_ReadWriteBarrier) +# pragma intrinsic(_ReadBarrier) +# pragma intrinsic(_WriteBarrier) +# define PaUtil_FullMemoryBarrier() _ReadWriteBarrier() +# define PaUtil_ReadMemoryBarrier() _ReadBarrier() +# define PaUtil_WriteMemoryBarrier() _WriteBarrier() +#elif defined(_WIN32_WCE) +# define PaUtil_FullMemoryBarrier() +# define PaUtil_ReadMemoryBarrier() +# define PaUtil_WriteMemoryBarrier() +#elif defined(_MSC_VER) || defined(__BORLANDC__) +# define PaUtil_FullMemoryBarrier() _asm { lock add [esp], 0 } +# define PaUtil_ReadMemoryBarrier() _asm { lock add [esp], 0 } +# define PaUtil_WriteMemoryBarrier() _asm { lock add [esp], 0 } +#else +# ifdef ALLOW_SMP_DANGERS +# warning Memory barriers not defined on this system or system unknown +# warning For SMP safety, you should fix this. +# define PaUtil_FullMemoryBarrier() +# define PaUtil_ReadMemoryBarrier() +# define PaUtil_WriteMemoryBarrier() +# else +# error Memory barriers are not defined on this system. You can still compile by defining ALLOW_SMP_DANGERS, but SMP safety will not be guaranteed. +# endif +#endif diff --git a/src/modules/audio_device/main/source/mac/portaudio/pa_ringbuffer.c b/src/modules/audio_device/main/source/mac/portaudio/pa_ringbuffer.c new file mode 100644 index 000000000..310d719f2 --- /dev/null +++ b/src/modules/audio_device/main/source/mac/portaudio/pa_ringbuffer.c @@ -0,0 +1,227 @@ +/* + * $Id: pa_ringbuffer.c 1421 2009-11-18 16:09:05Z bjornroche $ + * Portable Audio I/O Library + * Ring Buffer utility. + * + * Author: Phil Burk, http://www.softsynth.com + * modified for SMP safety on Mac OS X by Bjorn Roche + * modified for SMP safety on Linux by Leland Lucius + * also, allowed for const where possible + * modified for multiple-byte-sized data elements by Sven Fischer + * + * Note that this is safe only for a single-thread reader and a + * single-thread writer. + * + * This program uses the PortAudio Portable Audio Library. + * For more information see: http://www.portaudio.com + * Copyright (c) 1999-2000 Ross Bencina and Phil Burk + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF + * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * The text above constitutes the entire PortAudio license; however, + * the PortAudio community also makes the following non-binding requests: + * + * Any person wishing to distribute modifications to the Software is + * requested to send the modifications to the original developer so that + * they can be incorporated into the canonical version. It is also + * requested that these non-binding requests be included along with the + * license above. + */ + +/** + @file + @ingroup common_src +*/ + +#include +#include +#include +#include "pa_ringbuffer.h" +#include +#include "pa_memorybarrier.h" + +/*************************************************************************** + * Initialize FIFO. + * elementCount must be power of 2, returns -1 if not. + */ +ring_buffer_size_t PaUtil_InitializeRingBuffer( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementSizeBytes, ring_buffer_size_t elementCount, void *dataPtr ) +{ + if( ((elementCount-1) & elementCount) != 0) return -1; /* Not Power of two. */ + rbuf->bufferSize = elementCount; + rbuf->buffer = (char *)dataPtr; + PaUtil_FlushRingBuffer( rbuf ); + rbuf->bigMask = (elementCount*2)-1; + rbuf->smallMask = (elementCount)-1; + rbuf->elementSizeBytes = elementSizeBytes; + return 0; +} + +/*************************************************************************** +** Return number of elements available for reading. */ +ring_buffer_size_t PaUtil_GetRingBufferReadAvailable( PaUtilRingBuffer *rbuf ) +{ + PaUtil_ReadMemoryBarrier(); + return ( (rbuf->writeIndex - rbuf->readIndex) & rbuf->bigMask ); +} +/*************************************************************************** +** Return number of elements available for writing. */ +ring_buffer_size_t PaUtil_GetRingBufferWriteAvailable( PaUtilRingBuffer *rbuf ) +{ + /* Since we are calling PaUtil_GetRingBufferReadAvailable, we don't need an aditional MB */ + return ( rbuf->bufferSize - PaUtil_GetRingBufferReadAvailable(rbuf)); +} + +/*************************************************************************** +** Clear buffer. Should only be called when buffer is NOT being read. */ +void PaUtil_FlushRingBuffer( PaUtilRingBuffer *rbuf ) +{ + rbuf->writeIndex = rbuf->readIndex = 0; +} + +/*************************************************************************** +** Get address of region(s) to which we can write data. +** If the region is contiguous, size2 will be zero. +** If non-contiguous, size2 will be the size of second region. +** Returns room available to be written or elementCount, whichever is smaller. +*/ +ring_buffer_size_t PaUtil_GetRingBufferWriteRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount, + void **dataPtr1, ring_buffer_size_t *sizePtr1, + void **dataPtr2, ring_buffer_size_t *sizePtr2 ) +{ + ring_buffer_size_t index; + ring_buffer_size_t available = PaUtil_GetRingBufferWriteAvailable( rbuf ); + if( elementCount > available ) elementCount = available; + /* Check to see if write is not contiguous. */ + index = rbuf->writeIndex & rbuf->smallMask; + if( (index + elementCount) > rbuf->bufferSize ) + { + /* Write data in two blocks that wrap the buffer. */ + ring_buffer_size_t firstHalf = rbuf->bufferSize - index; + *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes]; + *sizePtr1 = firstHalf; + *dataPtr2 = &rbuf->buffer[0]; + *sizePtr2 = elementCount - firstHalf; + } + else + { + *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes]; + *sizePtr1 = elementCount; + *dataPtr2 = NULL; + *sizePtr2 = 0; + } + return elementCount; +} + + +/*************************************************************************** +*/ +ring_buffer_size_t PaUtil_AdvanceRingBufferWriteIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount ) +{ + /* we need to ensure that previous writes are seen before we update the write index */ + PaUtil_WriteMemoryBarrier(); + return rbuf->writeIndex = (rbuf->writeIndex + elementCount) & rbuf->bigMask; +} + +/*************************************************************************** +** Get address of region(s) from which we can read data. +** If the region is contiguous, size2 will be zero. +** If non-contiguous, size2 will be the size of second region. +** Returns room available to be written or elementCount, whichever is smaller. +*/ +ring_buffer_size_t PaUtil_GetRingBufferReadRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount, + void **dataPtr1, ring_buffer_size_t *sizePtr1, + void **dataPtr2, ring_buffer_size_t *sizePtr2 ) +{ + ring_buffer_size_t index; + ring_buffer_size_t available = PaUtil_GetRingBufferReadAvailable( rbuf ); + if( elementCount > available ) elementCount = available; + /* Check to see if read is not contiguous. */ + index = rbuf->readIndex & rbuf->smallMask; + if( (index + elementCount) > rbuf->bufferSize ) + { + /* Write data in two blocks that wrap the buffer. */ + ring_buffer_size_t firstHalf = rbuf->bufferSize - index; + *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes]; + *sizePtr1 = firstHalf; + *dataPtr2 = &rbuf->buffer[0]; + *sizePtr2 = elementCount - firstHalf; + } + else + { + *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes]; + *sizePtr1 = elementCount; + *dataPtr2 = NULL; + *sizePtr2 = 0; + } + return elementCount; +} +/*************************************************************************** +*/ +ring_buffer_size_t PaUtil_AdvanceRingBufferReadIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount ) +{ + /* we need to ensure that previous writes are always seen before updating the index. */ + PaUtil_WriteMemoryBarrier(); + return rbuf->readIndex = (rbuf->readIndex + elementCount) & rbuf->bigMask; +} + +/*************************************************************************** +** Return elements written. */ +ring_buffer_size_t PaUtil_WriteRingBuffer( PaUtilRingBuffer *rbuf, const void *data, ring_buffer_size_t elementCount ) +{ + ring_buffer_size_t size1, size2, numWritten; + void *data1, *data2; + numWritten = PaUtil_GetRingBufferWriteRegions( rbuf, elementCount, &data1, &size1, &data2, &size2 ); + if( size2 > 0 ) + { + + memcpy( data1, data, size1*rbuf->elementSizeBytes ); + data = ((char *)data) + size1*rbuf->elementSizeBytes; + memcpy( data2, data, size2*rbuf->elementSizeBytes ); + } + else + { + memcpy( data1, data, size1*rbuf->elementSizeBytes ); + } + PaUtil_AdvanceRingBufferWriteIndex( rbuf, numWritten ); + return numWritten; +} + +/*************************************************************************** +** Return elements read. */ +ring_buffer_size_t PaUtil_ReadRingBuffer( PaUtilRingBuffer *rbuf, void *data, ring_buffer_size_t elementCount ) +{ + ring_buffer_size_t size1, size2, numRead; + void *data1, *data2; + numRead = PaUtil_GetRingBufferReadRegions( rbuf, elementCount, &data1, &size1, &data2, &size2 ); + if( size2 > 0 ) + { + memcpy( data, data1, size1*rbuf->elementSizeBytes ); + data = ((char *)data) + size1*rbuf->elementSizeBytes; + memcpy( data, data2, size2*rbuf->elementSizeBytes ); + } + else + { + memcpy( data, data1, size1*rbuf->elementSizeBytes ); + } + PaUtil_AdvanceRingBufferReadIndex( rbuf, numRead ); + return numRead; +} diff --git a/src/modules/audio_device/main/source/mac/portaudio/pa_ringbuffer.h b/src/modules/audio_device/main/source/mac/portaudio/pa_ringbuffer.h new file mode 100644 index 000000000..393f6f8c5 --- /dev/null +++ b/src/modules/audio_device/main/source/mac/portaudio/pa_ringbuffer.h @@ -0,0 +1,233 @@ +#ifndef WEBRTC_AUDIO_DEVICE_PA_RINGBUFFER_H +#define WEBRTC_AUDIO_DEVICE_PA_RINGBUFFER_H +/* + * $Id: pa_ringbuffer.h 1421 2009-11-18 16:09:05Z bjornroche $ + * Portable Audio I/O Library + * Ring Buffer utility. + * + * Author: Phil Burk, http://www.softsynth.com + * modified for SMP safety on OS X by Bjorn Roche. + * also allowed for const where possible. + * modified for multiple-byte-sized data elements by Sven Fischer + * + * Note that this is safe only for a single-thread reader + * and a single-thread writer. + * + * This program is distributed with the PortAudio Portable Audio Library. + * For more information see: http://www.portaudio.com + * Copyright (c) 1999-2000 Ross Bencina and Phil Burk + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF + * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * The text above constitutes the entire PortAudio license; however, + * the PortAudio community also makes the following non-binding requests: + * + * Any person wishing to distribute modifications to the Software is + * requested to send the modifications to the original developer so that + * they can be incorporated into the canonical version. It is also + * requested that these non-binding requests be included along with the + * license above. + */ + +/** @file + @ingroup common_src + @brief Single-reader single-writer lock-free ring buffer + + PaUtilRingBuffer is a ring buffer used to transport samples between + different execution contexts (threads, OS callbacks, interrupt handlers) + without requiring the use of any locks. This only works when there is + a single reader and a single writer (ie. one thread or callback writes + to the ring buffer, another thread or callback reads from it). + + The PaUtilRingBuffer structure manages a ring buffer containing N + elements, where N must be a power of two. An element may be any size + (specified in bytes). + + The memory area used to store the buffer elements must be allocated by + the client prior to calling PaUtil_InitializeRingBuffer() and must outlive + the use of the ring buffer. +*/ + +#if defined(__APPLE__) +#include +typedef int32_t ring_buffer_size_t; +#elif defined( __GNUC__ ) +typedef long ring_buffer_size_t; +#elif (_MSC_VER >= 1400) +typedef long ring_buffer_size_t; +#elif defined(_MSC_VER) || defined(__BORLANDC__) +typedef long ring_buffer_size_t; +#else +typedef long ring_buffer_size_t; +#endif + + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + +typedef struct PaUtilRingBuffer +{ + ring_buffer_size_t bufferSize; /**< Number of elements in FIFO. Power of 2. Set by PaUtil_InitRingBuffer. */ + ring_buffer_size_t writeIndex; /**< Index of next writable element. Set by PaUtil_AdvanceRingBufferWriteIndex. */ + ring_buffer_size_t readIndex; /**< Index of next readable element. Set by PaUtil_AdvanceRingBufferReadIndex. */ + ring_buffer_size_t bigMask; /**< Used for wrapping indices with extra bit to distinguish full/empty. */ + ring_buffer_size_t smallMask; /**< Used for fitting indices to buffer. */ + ring_buffer_size_t elementSizeBytes; /**< Number of bytes per element. */ + char *buffer; /**< Pointer to the buffer containing the actual data. */ +}PaUtilRingBuffer; + +/** Initialize Ring Buffer. + + @param rbuf The ring buffer. + + @param elementSizeBytes The size of a single data element in bytes. + + @param elementCount The number of elements in the buffer (must be power of 2). + + @param dataPtr A pointer to a previously allocated area where the data + will be maintained. It must be elementCount*elementSizeBytes long. + + @return -1 if elementCount is not a power of 2, otherwise 0. +*/ +ring_buffer_size_t PaUtil_InitializeRingBuffer( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementSizeBytes, ring_buffer_size_t elementCount, void *dataPtr ); + +/** Clear buffer. Should only be called when buffer is NOT being read. + + @param rbuf The ring buffer. +*/ +void PaUtil_FlushRingBuffer( PaUtilRingBuffer *rbuf ); + +/** Retrieve the number of elements available in the ring buffer for writing. + + @param rbuf The ring buffer. + + @return The number of elements available for writing. +*/ +ring_buffer_size_t PaUtil_GetRingBufferWriteAvailable( PaUtilRingBuffer *rbuf ); + +/** Retrieve the number of elements available in the ring buffer for reading. + + @param rbuf The ring buffer. + + @return The number of elements available for reading. +*/ +ring_buffer_size_t PaUtil_GetRingBufferReadAvailable( PaUtilRingBuffer *rbuf ); + +/** Write data to the ring buffer. + + @param rbuf The ring buffer. + + @param data The address of new data to write to the buffer. + + @param elementCount The number of elements to be written. + + @return The number of elements written. +*/ +ring_buffer_size_t PaUtil_WriteRingBuffer( PaUtilRingBuffer *rbuf, const void *data, ring_buffer_size_t elementCount ); + +/** Read data from the ring buffer. + + @param rbuf The ring buffer. + + @param data The address where the data should be stored. + + @param elementCount The number of elements to be read. + + @return The number of elements read. +*/ +ring_buffer_size_t PaUtil_ReadRingBuffer( PaUtilRingBuffer *rbuf, void *data, ring_buffer_size_t elementCount ); + +/** Get address of region(s) to which we can write data. + + @param rbuf The ring buffer. + + @param elementCount The number of elements desired. + + @param dataPtr1 The address where the first (or only) region pointer will be + stored. + + @param sizePtr1 The address where the first (or only) region length will be + stored. + + @param dataPtr2 The address where the second region pointer will be stored if + the first region is too small to satisfy elementCount. + + @param sizePtr2 The address where the second region length will be stored if + the first region is too small to satisfy elementCount. + + @return The room available to be written or elementCount, whichever is smaller. +*/ +ring_buffer_size_t PaUtil_GetRingBufferWriteRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount, + void **dataPtr1, ring_buffer_size_t *sizePtr1, + void **dataPtr2, ring_buffer_size_t *sizePtr2 ); + +/** Advance the write index to the next location to be written. + + @param rbuf The ring buffer. + + @param elementCount The number of elements to advance. + + @return The new position. +*/ +ring_buffer_size_t PaUtil_AdvanceRingBufferWriteIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount ); + +/** Get address of region(s) from which we can write data. + + @param rbuf The ring buffer. + + @param elementCount The number of elements desired. + + @param dataPtr1 The address where the first (or only) region pointer will be + stored. + + @param sizePtr1 The address where the first (or only) region length will be + stored. + + @param dataPtr2 The address where the second region pointer will be stored if + the first region is too small to satisfy elementCount. + + @param sizePtr2 The address where the second region length will be stored if + the first region is too small to satisfy elementCount. + + @return The number of elements available for reading. +*/ +ring_buffer_size_t PaUtil_GetRingBufferReadRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount, + void **dataPtr1, ring_buffer_size_t *sizePtr1, + void **dataPtr2, ring_buffer_size_t *sizePtr2 ); + +/** Advance the read index to the next location to be read. + + @param rbuf The ring buffer. + + @param elementCount The number of elements to advance. + + @return The new position. +*/ +ring_buffer_size_t PaUtil_AdvanceRingBufferReadIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount ); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* MODULES_AUDIO_DEVICE_MAIN_SOURCE_MAC_PORTAUDIO_PA_RINGBUFFER_H_ */ diff --git a/src/modules/audio_device/main/source/Windows/audio_device_windows_core.cc b/src/modules/audio_device/main/source/win/audio_device_core_win.cc similarity index 99% rename from src/modules/audio_device/main/source/Windows/audio_device_windows_core.cc rename to src/modules/audio_device/main/source/win/audio_device_core_win.cc index 37c791161..31200b93e 100644 --- a/src/modules/audio_device/main/source/Windows/audio_device_windows_core.cc +++ b/src/modules/audio_device/main/source/win/audio_device_core_win.cc @@ -29,7 +29,7 @@ #ifdef WEBRTC_WINDOWS_CORE_AUDIO_BUILD #include "audio_device_utility.h" -#include "audio_device_windows_core.h" +#include "audio_device_core_win.h" #include "trace.h" #include diff --git a/src/modules/audio_device/main/source/Windows/audio_device_windows_core.h b/src/modules/audio_device/main/source/win/audio_device_core_win.h similarity index 98% rename from src/modules/audio_device/main/source/Windows/audio_device_windows_core.h rename to src/modules/audio_device/main/source/win/audio_device_core_win.h index 8f9f6fbeb..5d7756b36 100644 --- a/src/modules/audio_device/main/source/Windows/audio_device_windows_core.h +++ b/src/modules/audio_device/main/source/win/audio_device_core_win.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WINDOWS_CORE_H -#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WINDOWS_CORE_H +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_CORE_WIN_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_CORE_WIN_H #if (_MSC_VER >= 1400) // only include for VS 2005 and higher @@ -309,5 +309,5 @@ private: } // namespace webrtc -#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WINDOWS_CORE_H +#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_CORE_WIN_H diff --git a/src/modules/audio_device/main/source/Windows/audio_device_utility_windows.cc b/src/modules/audio_device/main/source/win/audio_device_utility_win.cc similarity index 99% rename from src/modules/audio_device/main/source/Windows/audio_device_utility_windows.cc rename to src/modules/audio_device/main/source/win/audio_device_utility_win.cc index 2e816c902..cd9c7ad12 100644 --- a/src/modules/audio_device/main/source/Windows/audio_device_utility_windows.cc +++ b/src/modules/audio_device/main/source/win/audio_device_utility_win.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "audio_device_utility_windows.h" +#include "audio_device_utility_win.h" #include "audio_device_config.h" #include "critical_section_wrapper.h" diff --git a/src/modules/audio_device/main/source/Windows/audio_device_utility_windows.h b/src/modules/audio_device/main/source/win/audio_device_utility_win.h similarity index 84% rename from src/modules/audio_device/main/source/Windows/audio_device_utility_windows.h rename to src/modules/audio_device/main/source/win/audio_device_utility_win.h index 6d0a1dfa7..77b4c22d1 100644 --- a/src/modules/audio_device/main/source/Windows/audio_device_utility_windows.h +++ b/src/modules/audio_device/main/source/win/audio_device_utility_win.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_WINDOWS_H -#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_WINDOWS_H +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_WIN_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_WIN_H #include "audio_device_utility.h" #include "audio_device.h" @@ -38,4 +38,4 @@ private: } // namespace webrtc -#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_WINDOWS_H +#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_WIN_H diff --git a/src/modules/audio_device/main/source/Windows/audio_device_windows_wave.cc b/src/modules/audio_device/main/source/win/audio_device_wave_win.cc similarity index 99% rename from src/modules/audio_device/main/source/Windows/audio_device_windows_wave.cc rename to src/modules/audio_device/main/source/win/audio_device_wave_win.cc index 15b46bfca..e6ea8f3e1 100644 --- a/src/modules/audio_device/main/source/Windows/audio_device_windows_wave.cc +++ b/src/modules/audio_device/main/source/win/audio_device_wave_win.cc @@ -9,7 +9,7 @@ */ #include "audio_device_utility.h" -#include "audio_device_windows_wave.h" +#include "audio_device_wave_win.h" #include "audio_device_config.h" #include "trace.h" diff --git a/src/modules/audio_device/main/source/Windows/audio_device_windows_wave.h b/src/modules/audio_device/main/source/win/audio_device_wave_win.h similarity index 98% rename from src/modules/audio_device/main/source/Windows/audio_device_windows_wave.h rename to src/modules/audio_device/main/source/win/audio_device_wave_win.h index 6eb2a585c..76607f69b 100644 --- a/src/modules/audio_device/main/source/Windows/audio_device_windows_wave.h +++ b/src/modules/audio_device/main/source/win/audio_device_wave_win.h @@ -8,11 +8,11 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WINDOWS_WAVE_H -#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WINDOWS_WAVE_H +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WAVE_WIN_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WAVE_WIN_H #include "audio_device_generic.h" -#include "audio_mixer_manager.h" +#include "audio_mixer_manager_win.h" #pragma comment( lib, "winmm.lib" ) @@ -330,4 +330,4 @@ private: } // namespace webrtc -#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WINDOWS_WAVE_H +#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WAVE_WIN_H diff --git a/src/modules/audio_device/main/source/Windows/audio_mixer_manager.cc b/src/modules/audio_device/main/source/win/audio_mixer_manager_win.cc similarity index 99% rename from src/modules/audio_device/main/source/Windows/audio_mixer_manager.cc rename to src/modules/audio_device/main/source/win/audio_mixer_manager_win.cc index f97290c3a..4855f5043 100644 --- a/src/modules/audio_device/main/source/Windows/audio_mixer_manager.cc +++ b/src/modules/audio_device/main/source/win/audio_mixer_manager_win.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "audio_mixer_manager.h" +#include "audio_mixer_manager_win.h" #include "trace.h" #include // StringCchCopy(), StringCchCat(), StringCchPrintf() diff --git a/src/modules/audio_device/main/source/Windows/audio_mixer_manager.h b/src/modules/audio_device/main/source/win/audio_mixer_manager_win.h similarity index 98% rename from src/modules/audio_device/main/source/Windows/audio_mixer_manager.h rename to src/modules/audio_device/main/source/win/audio_mixer_manager_win.h index 7e9467347..da9de47d0 100644 --- a/src/modules/audio_device/main/source/Windows/audio_mixer_manager.h +++ b/src/modules/audio_device/main/source/win/audio_mixer_manager_win.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_MIXER_MANAGER_H -#define WEBRTC_AUDIO_DEVICE_AUDIO_MIXER_MANAGER_H +#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_MIXER_MANAGER_WIN_H +#define WEBRTC_AUDIO_DEVICE_AUDIO_MIXER_MANAGER_WIN_H #include "typedefs.h" #include "audio_device.h"