Solve data race in Pulse audio implementation.

BUG=3056, 1320
TEST=AutoTest

Mainly add threadchecker and remove unnecessary lock.
And some more styling working.
- audio_device_pulse_linux.cc: wrap lines longer than 80 chars. And add '.' to some comments around. Not do it to all places.
- audio_mixer_manager_pulse_linux.cc: Here I adopt some chromium practice. We use to do many things to the failure of pulse operation, which causes most of the data race issue. In chromium, if we failed to call any pulse function, we just fail it w/o use the previous results. Here I did same. Please check if it's good.

R=bjornv@webrtc.org, henrika@webrtc.org, tommi@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/52479004

Cr-Commit-Position: refs/heads/master@{#9243}
This commit is contained in:
Brave Yao 2015-05-21 12:42:40 +08:00
parent 8602a3db73
commit 1a07a1e825
5 changed files with 366 additions and 457 deletions

File diff suppressed because it is too large Load Diff

View File

@ -15,6 +15,7 @@
#include "webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h" #include "webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h" #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/thread_wrapper.h" #include "webrtc/system_wrappers/interface/thread_wrapper.h"
#include "webrtc/base/thread_checker.h"
#include <X11/Xlib.h> #include <X11/Xlib.h>
#include <pulse/pulseaudio.h> #include <pulse/pulseaudio.h>
@ -204,7 +205,6 @@ public:
// CPU load // CPU load
int32_t CPULoad(uint16_t& load) const override; int32_t CPULoad(uint16_t& load) const override;
public:
bool PlayoutWarning() const override; bool PlayoutWarning() const override;
bool PlayoutError() const override; bool PlayoutError() const override;
bool RecordingWarning() const override; bool RecordingWarning() const override;
@ -214,7 +214,6 @@ public:
void ClearRecordingWarning() override; void ClearRecordingWarning() override;
void ClearRecordingError() override; void ClearRecordingError() override;
public:
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
private: private:
@ -227,10 +226,8 @@ private:
void WaitForOperationCompletion(pa_operation* paOperation) const; void WaitForOperationCompletion(pa_operation* paOperation) const;
void WaitForSuccess(pa_operation* paOperation) const; void WaitForSuccess(pa_operation* paOperation) const;
private:
bool KeyPressed() const; bool KeyPressed() const;
private:
static void PaContextStateCallback(pa_context *c, void *pThis); static void PaContextStateCallback(pa_context *c, void *pThis);
static void PaSinkInfoCallback(pa_context *c, const pa_sink_info *i, static void PaSinkInfoCallback(pa_context *c, const pa_sink_info *i,
int eol, void *pThis); int eol, void *pThis);
@ -279,7 +276,6 @@ private:
bool RecThreadProcess(); bool RecThreadProcess();
bool PlayThreadProcess(); bool PlayThreadProcess();
private:
AudioDeviceBuffer* _ptrAudioBuffer; AudioDeviceBuffer* _ptrAudioBuffer;
CriticalSectionWrapper& _critSect; CriticalSectionWrapper& _critSect;
@ -305,7 +301,12 @@ private:
AudioDeviceModule::BufferType _playBufType; AudioDeviceModule::BufferType _playBufType;
private: // Stores thread ID in constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
// Currently only does DCHECK(thread_checker_.CalledOnValidThread()).
rtc::ThreadChecker thread_checker_;
bool _initialized; bool _initialized;
bool _recording; bool _recording;
bool _playing; bool _playing;
@ -318,7 +319,6 @@ private:
bool _AGC; bool _AGC;
bool update_speaker_volume_at_startup_; bool update_speaker_volume_at_startup_;
private:
uint16_t _playBufDelayFixed; // fixed playback delay uint16_t _playBufDelayFixed; // fixed playback delay
uint32_t _sndCardPlayDelay; uint32_t _sndCardPlayDelay;

View File

@ -12,22 +12,36 @@
#include "webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h" #include "webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
#include "webrtc/system_wrappers/interface/trace.h" #include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/base/checks.h"
extern webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable; extern webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable;
// Accesses Pulse functions through our late-binding symbol table instead of // Accesses Pulse functions through our late-binding symbol table instead of
// directly. This way we don't have to link to libpulse, which means our binary // directly. This way we don't have to link to libpulse, which means our
// will work on systems that don't have it. // binary will work on systems that don't have it.
#define LATE(sym) \ #define LATE(sym) \
LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, sym) LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, \
&PaSymbolTable, sym)
namespace webrtc namespace webrtc
{ {
enum { kMaxRetryOnFailure = 2 }; class AutoPulseLock {
public:
explicit AutoPulseLock(pa_threaded_mainloop* pa_mainloop)
: pa_mainloop_(pa_mainloop) {
LATE(pa_threaded_mainloop_lock)(pa_mainloop_);
}
~AutoPulseLock() {
LATE(pa_threaded_mainloop_unlock)(pa_mainloop_);
}
private:
pa_threaded_mainloop* const pa_mainloop_;
};
AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse(const int32_t id) : AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse(const int32_t id) :
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_id(id), _id(id),
_paOutputDeviceIndex(-1), _paOutputDeviceIndex(-1),
_paInputDeviceIndex(-1), _paInputDeviceIndex(-1),
@ -41,8 +55,7 @@ AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse(const int32_t id) :
_paSpeakerMute(false), _paSpeakerMute(false),
_paSpeakerVolume(PA_VOLUME_NORM), _paSpeakerVolume(PA_VOLUME_NORM),
_paChannels(0), _paChannels(0),
_paObjectsSet(false), _paObjectsSet(false)
_callbackValues(false)
{ {
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
"%s constructed", __FUNCTION__); "%s constructed", __FUNCTION__);
@ -50,27 +63,25 @@ AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse(const int32_t id) :
AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse() AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse()
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
"%s destructed", __FUNCTION__); "%s destructed", __FUNCTION__);
Close(); Close();
delete &_critSect;
} }
// ============================================================================ // ===========================================================================
// PUBLIC METHODS // PUBLIC METHODS
// ============================================================================ // ===========================================================================
int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects( int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects(
pa_threaded_mainloop* mainloop, pa_threaded_mainloop* mainloop,
pa_context* context) pa_context* context)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
__FUNCTION__); __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
if (!mainloop || !context) if (!mainloop || !context)
{ {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@ -90,11 +101,10 @@ int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects(
int32_t AudioMixerManagerLinuxPulse::Close() int32_t AudioMixerManagerLinuxPulse::Close()
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
__FUNCTION__); __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
CloseSpeaker(); CloseSpeaker();
CloseMicrophone(); CloseMicrophone();
@ -108,11 +118,10 @@ int32_t AudioMixerManagerLinuxPulse::Close()
int32_t AudioMixerManagerLinuxPulse::CloseSpeaker() int32_t AudioMixerManagerLinuxPulse::CloseSpeaker()
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
__FUNCTION__); __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
// Reset the index to -1 // Reset the index to -1
_paOutputDeviceIndex = -1; _paOutputDeviceIndex = -1;
_paPlayStream = NULL; _paPlayStream = NULL;
@ -122,11 +131,10 @@ int32_t AudioMixerManagerLinuxPulse::CloseSpeaker()
int32_t AudioMixerManagerLinuxPulse::CloseMicrophone() int32_t AudioMixerManagerLinuxPulse::CloseMicrophone()
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
__FUNCTION__); __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
// Reset the index to -1 // Reset the index to -1
_paInputDeviceIndex = -1; _paInputDeviceIndex = -1;
_paRecStream = NULL; _paRecStream = NULL;
@ -136,20 +144,20 @@ int32_t AudioMixerManagerLinuxPulse::CloseMicrophone()
int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream) int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetPlayStream(playStream)"); "AudioMixerManagerLinuxPulse::SetPlayStream(playStream)");
CriticalSectionScoped lock(&_critSect);
_paPlayStream = playStream; _paPlayStream = playStream;
return 0; return 0;
} }
int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream) int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetRecStream(recStream)"); "AudioMixerManagerLinuxPulse::SetRecStream(recStream)");
CriticalSectionScoped lock(&_critSect);
_paRecStream = recStream; _paRecStream = recStream;
return 0; return 0;
} }
@ -157,12 +165,11 @@ int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream)
int32_t AudioMixerManagerLinuxPulse::OpenSpeaker( int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(
uint16_t deviceIndex) uint16_t deviceIndex)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex=%d)", "AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex=%d)",
deviceIndex); deviceIndex);
CriticalSectionScoped lock(&_critSect);
// No point in opening the speaker // No point in opening the speaker
// if PA objects have not been set // if PA objects have not been set
if (!_paObjectsSet) if (!_paObjectsSet)
@ -185,11 +192,10 @@ int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(
int32_t AudioMixerManagerLinuxPulse::OpenMicrophone( int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(
uint16_t deviceIndex) uint16_t deviceIndex)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::OpenMicrophone(deviceIndex=%d)", "AudioMixerManagerLinuxPulse::OpenMicrophone"
deviceIndex); "(deviceIndex=%d)", deviceIndex);
CriticalSectionScoped lock(&_critSect);
// No point in opening the microphone // No point in opening the microphone
// if PA objects have not been set // if PA objects have not been set
@ -212,6 +218,7 @@ int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(
bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s",
__FUNCTION__); __FUNCTION__);
@ -220,6 +227,7 @@ bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const
bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s",
__FUNCTION__); __FUNCTION__);
@ -229,12 +237,11 @@ bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const
int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume( int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(
uint32_t volume) uint32_t volume)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume=%u)", "AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume=%u)",
volume); volume);
CriticalSectionScoped lock(&_critSect);
if (_paOutputDeviceIndex == -1) if (_paOutputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -248,7 +255,7 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(
!= PA_STREAM_UNCONNECTED)) != PA_STREAM_UNCONNECTED))
{ {
// We can only really set the volume if we have a connected stream // We can only really set the volume if we have a connected stream
PaLock(); AutoPulseLock auto_lock(_paMainloop);
// Get the number of channels from the sample specification // Get the number of channels from the sample specification
const pa_sample_spec *spec = const pa_sample_spec *spec =
@ -257,7 +264,6 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(
{ {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
" could not get sample specification"); " could not get sample specification");
PaUnLock();
return -1; return -1;
} }
@ -278,8 +284,6 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(
// Don't need to wait for the completion // Don't need to wait for the completion
LATE(pa_operation_unref)(paOperation); LATE(pa_operation_unref)(paOperation);
PaUnLock();
} else } else
{ {
// We have not created a stream or it's not connected to the sink // We have not created a stream or it's not connected to the sink
@ -302,7 +306,6 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(
int32_t int32_t
AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const
{ {
if (_paOutputDeviceIndex == -1) if (_paOutputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -317,15 +320,16 @@ AudioMixerManagerLinuxPulse::SpeakerVolume(uint32_t& volume) const
if (!GetSinkInputInfo()) if (!GetSinkInputInfo())
return -1; return -1;
AutoPulseLock auto_lock(_paMainloop);
volume = static_cast<uint32_t> (_paVolume); volume = static_cast<uint32_t> (_paVolume);
ResetCallbackVariables();
} else } else
{ {
AutoPulseLock auto_lock(_paMainloop);
volume = _paSpeakerVolume; volume = _paSpeakerVolume;
} }
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
" AudioMixerManagerLinuxPulse::SpeakerVolume() => vol=%i", "\tAudioMixerManagerLinuxPulse::SpeakerVolume() => vol=%i",
volume); volume);
return 0; return 0;
@ -368,7 +372,7 @@ AudioMixerManagerLinuxPulse::MinSpeakerVolume(uint32_t& minVolume) const
int32_t int32_t
AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paOutputDeviceIndex == -1) if (_paOutputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -381,11 +385,8 @@ AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const
stepSize = 1; stepSize = 1;
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
" AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize() => " "\tAudioMixerManagerLinuxPulse::SpeakerVolumeStepSize() => "
"size=%i, stepSize"); "size=%i", stepSize);
// Reset members modified by callback
ResetCallbackVariables();
return 0; return 0;
} }
@ -393,6 +394,7 @@ AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const
int32_t int32_t
AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available) AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paOutputDeviceIndex == -1) if (_paOutputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -409,6 +411,7 @@ AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
int32_t int32_t
AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available) AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paOutputDeviceIndex == -1) if (_paOutputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -424,12 +427,11 @@ AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available)
int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable) int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetSpeakerMute(enable=%u)", "AudioMixerManagerLinuxPulse::SetSpeakerMute(enable=%u)",
enable); enable);
CriticalSectionScoped lock(&_critSect);
if (_paOutputDeviceIndex == -1) if (_paOutputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -443,7 +445,7 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable)
!= PA_STREAM_UNCONNECTED)) != PA_STREAM_UNCONNECTED))
{ {
// We can only really mute if we have a connected stream // We can only really mute if we have a connected stream
PaLock(); AutoPulseLock auto_lock(_paMainloop);
pa_operation* paOperation = NULL; pa_operation* paOperation = NULL;
paOperation = LATE(pa_context_set_sink_input_mute)( paOperation = LATE(pa_context_set_sink_input_mute)(
@ -459,8 +461,6 @@ int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable)
// Don't need to wait for the completion // Don't need to wait for the completion
LATE(pa_operation_unref)(paOperation); LATE(pa_operation_unref)(paOperation);
PaUnLock();
} else } else
{ {
// We have not created a stream or it's not connected to the sink // We have not created a stream or it's not connected to the sink
@ -497,7 +497,6 @@ int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const
return -1; return -1;
enabled = static_cast<bool> (_paMute); enabled = static_cast<bool> (_paMute);
ResetCallbackVariables();
} else } else
{ {
enabled = _paSpeakerMute; enabled = _paSpeakerMute;
@ -513,6 +512,7 @@ int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const
int32_t int32_t
AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available) AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paOutputDeviceIndex == -1) if (_paOutputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -522,7 +522,8 @@ AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available)
uint32_t deviceIndex = (uint32_t) _paOutputDeviceIndex; uint32_t deviceIndex = (uint32_t) _paOutputDeviceIndex;
PaLock(); {
AutoPulseLock auto_lock(_paMainloop);
// Get the actual stream device index if we have a connected stream // Get the actual stream device index if we have a connected stream
// The device used by the stream can be changed // The device used by the stream can be changed
@ -532,23 +533,20 @@ AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available)
{ {
deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream); deviceIndex = LATE(pa_stream_get_device_index)(_paPlayStream);
} }
}
PaUnLock();
if (!GetSinkInfoByIndex(deviceIndex)) if (!GetSinkInfoByIndex(deviceIndex))
return -1; return -1;
available = static_cast<bool> (_paChannels == 2); available = static_cast<bool> (_paChannels == 2);
// Reset members modified by callback
ResetCallbackVariables();
return 0; return 0;
} }
int32_t int32_t
AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available) AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1) if (_paInputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -558,7 +556,7 @@ AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available)
uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
PaLock(); AutoPulseLock auto_lock(_paMainloop);
// Get the actual stream device index if we have a connected stream // Get the actual stream device index if we have a connected stream
// The device used by the stream can be changed // The device used by the stream can be changed
@ -570,7 +568,6 @@ AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available)
} }
pa_operation* paOperation = NULL; pa_operation* paOperation = NULL;
ResetCallbackVariables();
// Get info for this source // Get info for this source
// We want to know if the actual device can record in stereo // We want to know if the actual device can record in stereo
@ -580,15 +577,6 @@ AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available)
(void*) this); (void*) this);
WaitForOperationCompletion(paOperation); WaitForOperationCompletion(paOperation);
PaUnLock();
if (!_callbackValues)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"Error getting number of input channels: %d",
LATE(pa_context_errno)(_paContext));
return -1;
}
available = static_cast<bool> (_paChannels == 2); available = static_cast<bool> (_paChannels == 2);
@ -596,15 +584,13 @@ AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available)
" AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()" " AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable()"
" => available=%i, available"); " => available=%i, available");
// Reset members modified by callback
ResetCallbackVariables();
return 0; return 0;
} }
int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable( int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable(
bool& available) bool& available)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1) if (_paInputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -620,12 +606,11 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable(
int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable) int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable=%u)", "AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable=%u)",
enable); enable);
CriticalSectionScoped lock(&_critSect);
if (_paInputDeviceIndex == -1) if (_paInputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -635,11 +620,10 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable)
bool setFailed(false); bool setFailed(false);
pa_operation* paOperation = NULL; pa_operation* paOperation = NULL;
ResetCallbackVariables();
uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
PaLock(); AutoPulseLock auto_lock(_paMainloop);
// Get the actual stream device index if we have a connected stream // Get the actual stream device index if we have a connected stream
// The device used by the stream can be changed // The device used by the stream can be changed
@ -664,11 +648,6 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable)
// Don't need to wait for this to complete. // Don't need to wait for this to complete.
LATE(pa_operation_unref)(paOperation); LATE(pa_operation_unref)(paOperation);
PaUnLock();
// Reset variables altered by callback
ResetCallbackVariables();
if (setFailed) if (setFailed)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -682,7 +661,7 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable)
int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1) if (_paInputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -692,8 +671,8 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const
uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
PaLock(); {
AutoPulseLock auto_lock(_paMainloop);
// Get the actual stream device index if we have a connected stream // Get the actual stream device index if we have a connected stream
// The device used by the stream can be changed // The device used by the stream can be changed
// during the call // during the call
@ -702,8 +681,7 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const
{ {
deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
} }
}
PaUnLock();
if (!GetSourceInfoByIndex(deviceIndex)) if (!GetSourceInfoByIndex(deviceIndex))
return -1; return -1;
@ -711,11 +689,8 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const
enabled = static_cast<bool> (_paMute); enabled = static_cast<bool> (_paMute);
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
" AudioMixerManagerLinuxPulse::MicrophoneMute() =>" "\tAudioMixerManagerLinuxPulse::MicrophoneMute() =>"
" enabled=%i, enabled"); " enabled=%i", enabled);
// Reset members modified by callback
ResetCallbackVariables();
return 0; return 0;
} }
@ -723,6 +698,7 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const
int32_t int32_t
AudioMixerManagerLinuxPulse::MicrophoneBoostIsAvailable(bool& available) AudioMixerManagerLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1) if (_paInputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -740,12 +716,11 @@ AudioMixerManagerLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
int32_t AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable) int32_t AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetMicrophoneBoost(enable=%u)", "AudioMixerManagerLinuxPulse::SetMicrophoneBoost(enable=%u)",
enable); enable);
CriticalSectionScoped lock(&_critSect);
if (_paInputDeviceIndex == -1) if (_paInputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -753,7 +728,7 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable)
return -1; return -1;
} }
// Ensure that the selected microphone destination has a valid boost control // Ensure the selected microphone destination has a valid boost control
bool available(false); bool available(false);
MicrophoneBoostIsAvailable(available); MicrophoneBoostIsAvailable(available);
if (!available) if (!available)
@ -770,7 +745,7 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable)
int32_t AudioMixerManagerLinuxPulse::MicrophoneBoost(bool& enabled) const int32_t AudioMixerManagerLinuxPulse::MicrophoneBoost(bool& enabled) const
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1) if (_paInputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -787,6 +762,7 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneBoost(bool& enabled) const
int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable( int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable(
bool& available) bool& available)
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1) if (_paInputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -804,10 +780,8 @@ int32_t
AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume) AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume)
{ {
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetMicrophoneVolume(volume=%u)", "AudioMixerManagerLinuxPulse::SetMicrophoneVolume"
volume); "(volume=%u)", volume);
CriticalSectionScoped lock(&_critSect);
if (_paInputDeviceIndex == -1) if (_paInputDeviceIndex == -1)
{ {
@ -816,22 +790,20 @@ AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume)
return -1; return -1;
} }
// Unlike output streams, input streams have no concept of a stream volume, // Unlike output streams, input streams have no concept of a stream
// only a device volume. So we have to change the volume of the device // volume, only a device volume. So we have to change the volume of the
// itself. // device itself.
// The device may have a different number of channels than the stream and // The device may have a different number of channels than the stream and
// their mapping may be different, so we don't want to use the channel count // their mapping may be different, so we don't want to use the channel
// from our sample spec. We could use PA_CHANNELS_MAX to cover our bases, // count from our sample spec. We could use PA_CHANNELS_MAX to cover our
// and the server allows that even if the device's channel count is lower, // bases, and the server allows that even if the device's channel count
// but some buggy PA clients don't like that (the pavucontrol on Hardy dies // is lower, but some buggy PA clients don't like that (the pavucontrol
// in an assert if the channel count is different). So instead we look up // on Hardy dies in an assert if the channel count is different). So
// the actual number of channels that the device has. // instead we look up the actual number of channels that the device has.
AutoPulseLock auto_lock(_paMainloop);
uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
PaLock();
// Get the actual stream device index if we have a connected stream // Get the actual stream device index if we have a connected stream
// The device used by the stream can be changed // The device used by the stream can be changed
// during the call // during the call
@ -843,7 +815,6 @@ AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume)
bool setFailed(false); bool setFailed(false);
pa_operation* paOperation = NULL; pa_operation* paOperation = NULL;
ResetCallbackVariables();
// Get the number of channels for this source // Get the number of channels for this source
paOperation paOperation
@ -853,18 +824,7 @@ AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume)
WaitForOperationCompletion(paOperation); WaitForOperationCompletion(paOperation);
if (!_callbackValues)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"Error getting input channels: %d",
LATE(pa_context_errno)(_paContext));
PaUnLock();
return -1;
}
uint8_t channels = _paChannels; uint8_t channels = _paChannels;
ResetCallbackVariables();
pa_cvolume cVolumes; pa_cvolume cVolumes;
LATE(pa_cvolume_set)(&cVolumes, channels, volume); LATE(pa_cvolume_set)(&cVolumes, channels, volume);
@ -872,7 +832,8 @@ AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume)
paOperation paOperation
= LATE(pa_context_set_source_volume_by_index)(_paContext, deviceIndex, = LATE(pa_context_set_source_volume_by_index)(_paContext, deviceIndex,
&cVolumes, &cVolumes,
PaSetVolumeCallback, NULL); PaSetVolumeCallback,
NULL);
if (!paOperation) if (!paOperation)
{ {
@ -882,11 +843,6 @@ AudioMixerManagerLinuxPulse::SetMicrophoneVolume(uint32_t volume)
// Don't need to wait for this to complete. // Don't need to wait for this to complete.
LATE(pa_operation_unref)(paOperation); LATE(pa_operation_unref)(paOperation);
PaUnLock();
// Reset variables altered by callback
ResetCallbackVariables();
if (setFailed) if (setFailed)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -911,29 +867,28 @@ AudioMixerManagerLinuxPulse::MicrophoneVolume(uint32_t& volume) const
uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
PaLock(); {
AutoPulseLock auto_lock(_paMainloop);
// Get the actual stream device index if we have a connected stream // Get the actual stream device index if we have a connected stream.
// The device used by the stream can be changed // The device used by the stream can be changed during the call.
// during the call
if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream) if (_paRecStream && (LATE(pa_stream_get_state)(_paRecStream)
!= PA_STREAM_UNCONNECTED)) != PA_STREAM_UNCONNECTED))
{ {
deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream); deviceIndex = LATE(pa_stream_get_device_index)(_paRecStream);
} }
}
PaUnLock();
if (!GetSourceInfoByIndex(deviceIndex)) if (!GetSourceInfoByIndex(deviceIndex))
return -1; return -1;
{
AutoPulseLock auto_lock(_paMainloop);
volume = static_cast<uint32_t> (_paVolume); volume = static_cast<uint32_t> (_paVolume);
}
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
" AudioMixerManagerLinuxPulse::MicrophoneVolume() => vol=%i, volume"); " AudioMixerManagerLinuxPulse::MicrophoneVolume()"
" => vol=%i, volume");
// Reset members modified by callback
ResetCallbackVariables();
return 0; return 0;
} }
@ -976,7 +931,7 @@ AudioMixerManagerLinuxPulse::MinMicrophoneVolume(uint32_t& minVolume) const
int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize( int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize(
uint16_t& stepSize) const uint16_t& stepSize) const
{ {
DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1) if (_paInputDeviceIndex == -1)
{ {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@ -986,7 +941,7 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize(
uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex; uint32_t deviceIndex = (uint32_t) _paInputDeviceIndex;
PaLock(); AutoPulseLock auto_lock(_paMainloop);
// Get the actual stream device index if we have a connected stream // Get the actual stream device index if we have a connected stream
// The device used by the stream can be changed // The device used by the stream can be changed
@ -998,7 +953,6 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize(
} }
pa_operation* paOperation = NULL; pa_operation* paOperation = NULL;
ResetCallbackVariables();
// Get info for this source // Get info for this source
paOperation paOperation
@ -1008,60 +962,55 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize(
WaitForOperationCompletion(paOperation); WaitForOperationCompletion(paOperation);
PaUnLock();
if (!_callbackValues)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"Error getting step size: %d",
LATE(pa_context_errno)(_paContext));
return -1;
}
stepSize = static_cast<uint16_t> ((PA_VOLUME_NORM + 1) / _paVolSteps); stepSize = static_cast<uint16_t> ((PA_VOLUME_NORM + 1) / _paVolSteps);
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
" AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize()" "\tAudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize()"
" => size=%i, stepSize"); " => size=%i", stepSize);
// Reset members modified by callback
ResetCallbackVariables();
return 0; return 0;
} }
// ============================================================================ // ===========================================================================
// Private Methods // Private Methods
// ============================================================================ // ===========================================================================
void AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context */*c*/, void
AudioMixerManagerLinuxPulse::PaSinkInfoCallback(pa_context */*c*/,
const pa_sink_info *i, const pa_sink_info *i,
int eol, void *pThis) int eol,
void *pThis)
{ {
static_cast<AudioMixerManagerLinuxPulse*> (pThis)-> PaSinkInfoCallbackHandler( static_cast<AudioMixerManagerLinuxPulse*> (pThis)->
i, eol); PaSinkInfoCallbackHandler(i, eol);
} }
void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback( void
AudioMixerManagerLinuxPulse::PaSinkInputInfoCallback(
pa_context */*c*/, pa_context */*c*/,
const pa_sink_input_info *i, const pa_sink_input_info *i,
int eol, void *pThis) int eol,
void *pThis)
{ {
static_cast<AudioMixerManagerLinuxPulse*> (pThis)-> static_cast<AudioMixerManagerLinuxPulse*> (pThis)->
PaSinkInputInfoCallbackHandler(i, eol); PaSinkInputInfoCallbackHandler(i, eol);
} }
void AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context */*c*/, void
AudioMixerManagerLinuxPulse::PaSourceInfoCallback(pa_context */*c*/,
const pa_source_info *i, const pa_source_info *i,
int eol, void *pThis) int eol,
void *pThis)
{ {
static_cast<AudioMixerManagerLinuxPulse*> (pThis)-> static_cast<AudioMixerManagerLinuxPulse*> (pThis)->
PaSourceInfoCallbackHandler(i, eol); PaSourceInfoCallbackHandler(i, eol);
} }
void AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context * c, void
int success, void */*pThis*/) AudioMixerManagerLinuxPulse::PaSetVolumeCallback(pa_context * c,
int success,
void */*pThis*/)
{ {
if (!success) if (!success)
{ {
@ -1081,7 +1030,6 @@ void AudioMixerManagerLinuxPulse::PaSinkInfoCallbackHandler(
return; return;
} }
_callbackValues = true;
_paChannels = i->channel_map.channels; // Get number of channels _paChannels = i->channel_map.channels; // Get number of channels
pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
for (int j = 0; j < _paChannels; ++j) for (int j = 0; j < _paChannels; ++j)
@ -1111,7 +1059,6 @@ void AudioMixerManagerLinuxPulse::PaSinkInputInfoCallbackHandler(
return; return;
} }
_callbackValues = true;
_paChannels = i->channel_map.channels; // Get number of channels _paChannels = i->channel_map.channels; // Get number of channels
pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
for (int j = 0; j < _paChannels; ++j) for (int j = 0; j < _paChannels; ++j)
@ -1136,7 +1083,6 @@ void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler(
return; return;
} }
_callbackValues = true;
_paChannels = i->channel_map.channels; // Get number of channels _paChannels = i->channel_map.channels; // Get number of channels
pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value. pa_volume_t paVolume = PA_VOLUME_MUTED; // Minimum possible value.
for (int j = 0; j < _paChannels; ++j) for (int j = 0; j < _paChannels; ++j)
@ -1155,15 +1101,6 @@ void AudioMixerManagerLinuxPulse::PaSourceInfoCallbackHandler(
_paVolSteps = PA_VOLUME_NORM + 1; _paVolSteps = PA_VOLUME_NORM + 1;
} }
void AudioMixerManagerLinuxPulse::ResetCallbackVariables() const
{
_paVolume = 0;
_paMute = 0;
_paVolSteps = 0;
_paChannels = 0;
_callbackValues = false;
}
void AudioMixerManagerLinuxPulse::WaitForOperationCompletion( void AudioMixerManagerLinuxPulse::WaitForOperationCompletion(
pa_operation* paOperation) const pa_operation* paOperation) const
{ {
@ -1175,23 +1112,10 @@ void AudioMixerManagerLinuxPulse::WaitForOperationCompletion(
LATE(pa_operation_unref)(paOperation); LATE(pa_operation_unref)(paOperation);
} }
void AudioMixerManagerLinuxPulse::PaLock() const
{
LATE(pa_threaded_mainloop_lock)(_paMainloop);
}
void AudioMixerManagerLinuxPulse::PaUnLock() const
{
LATE(pa_threaded_mainloop_unlock)(_paMainloop);
}
bool AudioMixerManagerLinuxPulse::GetSinkInputInfo() const { bool AudioMixerManagerLinuxPulse::GetSinkInputInfo() const {
pa_operation* paOperation = NULL; pa_operation* paOperation = NULL;
ResetCallbackVariables();
PaLock(); AutoPulseLock auto_lock(_paMainloop);
for (int retries = 0; retries < kMaxRetryOnFailure && !_callbackValues;
retries ++) {
// Get info for this stream (sink input). // Get info for this stream (sink input).
paOperation = LATE(pa_context_get_sink_input_info)( paOperation = LATE(pa_context_get_sink_input_info)(
_paContext, _paContext,
@ -1200,67 +1124,30 @@ bool AudioMixerManagerLinuxPulse::GetSinkInputInfo() const {
(void*) this); (void*) this);
WaitForOperationCompletion(paOperation); WaitForOperationCompletion(paOperation);
}
PaUnLock();
if (!_callbackValues) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"GetSinkInputInfo failed to get volume info : %d",
LATE(pa_context_errno)(_paContext));
return false;
}
return true; return true;
} }
bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex( bool AudioMixerManagerLinuxPulse::GetSinkInfoByIndex(
int device_index) const { int device_index) const {
pa_operation* paOperation = NULL; pa_operation* paOperation = NULL;
ResetCallbackVariables();
PaLock(); AutoPulseLock auto_lock(_paMainloop);
for (int retries = 0; retries < kMaxRetryOnFailure && !_callbackValues;
retries ++) {
paOperation = LATE(pa_context_get_sink_info_by_index)(_paContext, paOperation = LATE(pa_context_get_sink_info_by_index)(_paContext,
device_index, PaSinkInfoCallback, (void*) this); device_index, PaSinkInfoCallback, (void*) this);
WaitForOperationCompletion(paOperation); WaitForOperationCompletion(paOperation);
}
PaUnLock();
if (!_callbackValues) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"GetSinkInfoByIndex failed to get volume info: %d",
LATE(pa_context_errno)(_paContext));
return false;
}
return true; return true;
} }
bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex( bool AudioMixerManagerLinuxPulse::GetSourceInfoByIndex(
int device_index) const { int device_index) const {
pa_operation* paOperation = NULL; pa_operation* paOperation = NULL;
ResetCallbackVariables();
PaLock(); AutoPulseLock auto_lock(_paMainloop);
for (int retries = 0; retries < kMaxRetryOnFailure && !_callbackValues;
retries ++) {
paOperation = LATE(pa_context_get_source_info_by_index)( paOperation = LATE(pa_context_get_source_info_by_index)(
_paContext, device_index, PaSourceInfoCallback, (void*) this); _paContext, device_index, PaSourceInfoCallback, (void*) this);
WaitForOperationCompletion(paOperation); WaitForOperationCompletion(paOperation);
}
PaUnLock();
if (!_callbackValues) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"GetSourceInfoByIndex error: %d",
LATE(pa_context_errno)(_paContext));
return false;
}
return true; return true;
} }

View File

@ -15,6 +15,7 @@
#include "webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h" #include "webrtc/modules/audio_device/linux/pulseaudiosymboltable_linux.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h" #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/typedefs.h" #include "webrtc/typedefs.h"
#include "webrtc/base/thread_checker.h"
#include <pulse/pulseaudio.h> #include <pulse/pulseaudio.h>
#include <stdint.h> #include <stdint.h>
@ -82,17 +83,13 @@ private:
void PaSinkInputInfoCallbackHandler(const pa_sink_input_info *i, int eol); void PaSinkInputInfoCallbackHandler(const pa_sink_input_info *i, int eol);
void PaSourceInfoCallbackHandler(const pa_source_info *i, int eol); void PaSourceInfoCallbackHandler(const pa_source_info *i, int eol);
void ResetCallbackVariables() const;
void WaitForOperationCompletion(pa_operation* paOperation) const; void WaitForOperationCompletion(pa_operation* paOperation) const;
void PaLock() const;
void PaUnLock() const;
bool GetSinkInputInfo() const; bool GetSinkInputInfo() const;
bool GetSinkInfoByIndex(int device_index)const ; bool GetSinkInfoByIndex(int device_index)const ;
bool GetSourceInfoByIndex(int device_index) const; bool GetSourceInfoByIndex(int device_index) const;
private: private:
CriticalSectionWrapper& _critSect;
int32_t _id; int32_t _id;
int16_t _paOutputDeviceIndex; int16_t _paOutputDeviceIndex;
int16_t _paInputDeviceIndex; int16_t _paInputDeviceIndex;
@ -110,7 +107,12 @@ private:
mutable uint32_t _paSpeakerVolume; mutable uint32_t _paSpeakerVolume;
mutable uint8_t _paChannels; mutable uint8_t _paChannels;
bool _paObjectsSet; bool _paObjectsSet;
mutable bool _callbackValues;
// Stores thread ID in constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
// Currently only does DCHECK(thread_checker_.CalledOnValidThread()).
rtc::ThreadChecker thread_checker_;
}; };
} }

View File

@ -179,6 +179,7 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
// TODO(ajm): ensure this is called under kAdaptiveAnalog. // TODO(ajm): ensure this is called under kAdaptiveAnalog.
int GainControlImpl::set_stream_analog_level(int level) { int GainControlImpl::set_stream_analog_level(int level) {
CriticalSectionScoped crit_scoped(crit_);
was_analog_level_set_ = true; was_analog_level_set_ = true;
if (level < minimum_capture_level_ || level > maximum_capture_level_) { if (level < minimum_capture_level_ || level > maximum_capture_level_) {
return apm_->kBadParameterError; return apm_->kBadParameterError;