From 7d91d12152e01f450313a0157a00609474f2d2b3 Mon Sep 17 00:00:00 2001 From: Edouard DUPIN Date: Mon, 9 Feb 2015 21:44:32 +0100 Subject: [PATCH] [DEV] set the fork work corectly --- airtaudio/Api.cpp | 321 ++++++++++++++--------------- airtaudio/Api.h | 173 ++++++---------- airtaudio/CallbackInfo.h | 13 +- airtaudio/DeviceInfo.h | 11 +- airtaudio/Flags.cpp | 9 + airtaudio/Flags.h | 24 +++ airtaudio/Interface.cpp | 11 +- airtaudio/Interface.h | 16 +- airtaudio/StreamOptions.h | 11 +- airtaudio/StreamParameters.h | 11 +- airtaudio/api/Alsa.cpp | 349 ++++++++++++++++---------------- airtaudio/api/Alsa.h | 14 +- airtaudio/api/Android.cpp | 79 ++++---- airtaudio/api/Android.h | 9 +- airtaudio/api/Asio.cpp | 297 ++++++++++++++------------- airtaudio/api/Asio.h | 11 +- airtaudio/api/Core.cpp | 381 +++++++++++++++++------------------ airtaudio/api/Core.h | 20 +- airtaudio/api/CoreIos.h | 15 +- airtaudio/api/CoreIos.mm | 87 ++++---- airtaudio/api/Ds.cpp | 283 +++++++++++++------------- airtaudio/api/Ds.h | 11 +- airtaudio/api/Dummy.cpp | 11 +- airtaudio/api/Dummy.h | 11 +- airtaudio/api/Jack.cpp | 299 ++++++++++++++------------- airtaudio/api/Jack.h | 17 +- airtaudio/api/Oss.cpp | 361 +++++++++++++++++---------------- airtaudio/api/Oss.h | 11 +- airtaudio/api/Pulse.cpp | 247 +++++++++++------------ airtaudio/api/Pulse.h | 12 +- airtaudio/base.cpp | 13 +- airtaudio/base.h | 39 +--- airtaudio/debug.cpp | 9 +- airtaudio/debug.h | 9 +- airtaudio/error.cpp | 9 + airtaudio/error.h | 24 +++ airtaudio/int24_t.h | 52 ----- airtaudio/mode.cpp | 21 ++ airtaudio/mode.h | 23 +++ airtaudio/state.cpp | 6 + airtaudio/state.h | 22 ++ airtaudio/status.cpp | 32 +++ airtaudio/status.h | 24 +++ airtaudio/type.cpp | 70 +++++++ airtaudio/type.h | 41 ++++ lutin_airtaudio.py | 8 +- 46 files changed, 1846 insertions(+), 1681 deletions(-) create mode 100644 airtaudio/Flags.cpp create mode 100644 airtaudio/Flags.h create mode 100644 airtaudio/error.cpp create mode 100644 airtaudio/error.h delete mode 100644 airtaudio/int24_t.h create mode 100644 airtaudio/mode.cpp create mode 100644 airtaudio/mode.h create mode 100644 airtaudio/state.cpp create mode 100644 airtaudio/state.h create mode 100644 airtaudio/status.cpp create mode 100644 airtaudio/status.h create mode 100644 airtaudio/type.cpp create mode 100644 airtaudio/type.h diff --git a/airtaudio/Api.cpp b/airtaudio/Api.cpp index 0af18a5..e52bf69 100644 --- a/airtaudio/Api.cpp +++ b/airtaudio/Api.cpp @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ //#include @@ -17,67 +16,6 @@ #undef __class__ #define __class__ "api" -static const char* listType[] { - "undefined", - "alsa", - "pulse", - "oss", - "jack", - "coreOSX", - "corIOS", - "asio", - "ds", - "java", - "dummy", - "user1", - "user2", - "user3", - "user4" -}; -static int32_t listTypeSize = sizeof(listType)/sizeof(char*); - - -std::ostream& airtaudio::operator <<(std::ostream& _os, const enum airtaudio::type& _obj) { - _os << listType[_obj]; - return _os; -} - -std::ostream& airtaudio::operator <<(std::ostream& _os, const std::vector& _obj) { - _os << std::string("{"); - for (size_t iii=0; iii<_obj.size(); ++iii) { - if (iii!=0) { - _os << std::string(";"); - } - _os << _obj[iii]; - } - _os << std::string("}"); - return _os; -} - -std::string airtaudio::getTypeString(enum audio::format _value) { - return listType[_value]; -} - -enum airtaudio::type airtaudio::getTypeFromString(const std::string& _value) { - for (int32_t iii=0; iii(iii); - } - } - return airtaudio::type_undefined; -} - -int32_t airtaudio::modeToIdTable(enum mode _mode) { - switch (_mode) { - case mode_unknow: - case mode_duplex: - case mode_output: - return 0; - case mode_input: - return 1; - } - return 0; -} // Static variable definitions. const std::vector& airtaudio::genericSampleRate() { @@ -104,12 +42,14 @@ const std::vector& airtaudio::genericSampleRate() { }; -airtaudio::Api::Api() { - m_stream.state = airtaudio::state_closed; - m_stream.mode = airtaudio::mode_unknow; - m_stream.apiHandle = 0; - m_stream.userBuffer[0] = 0; - m_stream.userBuffer[1] = 0; +airtaudio::Api::Api() : + m_apiHandle(nullptr), + m_deviceBuffer(nullptr) { + m_device[0] = 11111; + m_device[1] = 11111; + m_state = airtaudio::state_closed; + m_mode = airtaudio::mode_unknow; + m_apiHandle = 0; } airtaudio::Api::~Api() { @@ -117,13 +57,13 @@ airtaudio::Api::~Api() { } enum airtaudio::error airtaudio::Api::openStream(airtaudio::StreamParameters *oParams, - airtaudio::StreamParameters *iParams, - enum audio::format format, - uint32_t sampleRate, - uint32_t *bufferFrames, - airtaudio::AirTAudioCallback callback, - airtaudio::StreamOptions *options) { - if (m_stream.state != airtaudio::state_closed) { + airtaudio::StreamParameters *iParams, + enum audio::format format, + uint32_t sampleRate, + uint32_t *bufferFrames, + airtaudio::AirTAudioCallback callback, + airtaudio::StreamOptions *options) { + if (m_state != airtaudio::state_closed) { ATA_ERROR("a stream is already open!"); return airtaudio::error_invalidUse; } @@ -193,11 +133,11 @@ enum airtaudio::error airtaudio::Api::openStream(airtaudio::StreamParameters *oP return airtaudio::error_systemError; } } - m_stream.callbackInfo.callback = callback; + m_callbackInfo.callback = callback; if (options != nullptr) { - options->numberOfBuffers = m_stream.nBuffers; + options->numberOfBuffers = m_nBuffers; } - m_stream.state = airtaudio::state_stopped; + m_state = airtaudio::state_stopped; return airtaudio::error_none; } @@ -232,9 +172,9 @@ void airtaudio::Api::tickStreamTime() { // Subclasses that do not provide their own implementation of // getStreamTime should call this function once per buffer I/O to // provide basic stream time support. - m_stream.streamTime += (m_stream.bufferSize * 1.0 / m_stream.sampleRate); + m_streamTime += (m_bufferSize * 1.0 / m_sampleRate); #if defined(HAVE_GETTIMEOFDAY) - gettimeofday(&m_stream.lastTickTimestamp, nullptr); + gettimeofday(&m_lastTickTimestamp, nullptr); #endif } @@ -243,13 +183,13 @@ long airtaudio::Api::getStreamLatency() { return 0; } long totalLatency = 0; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { - totalLatency = m_stream.latency[0]; + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { + totalLatency = m_latency[0]; } - if ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) { - totalLatency += m_stream.latency[1]; + if ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) { + totalLatency += m_latency[1]; } return totalLatency; } @@ -263,16 +203,16 @@ double airtaudio::Api::getStreamTime() { // adding in the elapsed time since the last tick. struct timeval then; struct timeval now; - if (m_stream.state != airtaudio::state_running || m_stream.streamTime == 0.0) { - return m_stream.streamTime; + if (m_state != airtaudio::state_running || m_streamTime == 0.0) { + return m_streamTime; } gettimeofday(&now, nullptr); - then = m_stream.lastTickTimestamp; - return m_stream.streamTime + then = m_lastTickTimestamp; + return m_streamTime + ((now.tv_sec + 0.000001 * now.tv_usec) - (then.tv_sec + 0.000001 * then.tv_usec)); #else - return m_stream.streamTime; + return m_streamTime; #endif } @@ -280,11 +220,11 @@ uint32_t airtaudio::Api::getStreamSampleRate() { if (verifyStream() != airtaudio::error_none) { return 0; } - return m_stream.sampleRate; + return m_sampleRate; } enum airtaudio::error airtaudio::Api::verifyStream() { - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("a stream is not open!"); return airtaudio::error_invalidUse; } @@ -292,98 +232,98 @@ enum airtaudio::error airtaudio::Api::verifyStream() { } void airtaudio::Api::clearStreamInfo() { - m_stream.mode = airtaudio::mode_unknow; - m_stream.state = airtaudio::state_closed; - m_stream.sampleRate = 0; - m_stream.bufferSize = 0; - m_stream.nBuffers = 0; - m_stream.userFormat = audio::format_unknow; - m_stream.streamTime = 0.0; - m_stream.apiHandle = 0; - m_stream.deviceBuffer = 0; - m_stream.callbackInfo.callback = 0; - m_stream.callbackInfo.isRunning = false; + m_mode = airtaudio::mode_unknow; + m_state = airtaudio::state_closed; + m_sampleRate = 0; + m_bufferSize = 0; + m_nBuffers = 0; + m_userFormat = audio::format_unknow; + m_streamTime = 0.0; + m_apiHandle = nullptr; + m_deviceBuffer = nullptr; + m_callbackInfo.callback = 0; + m_callbackInfo.isRunning = false; for (int32_t iii=0; iii<2; ++iii) { - m_stream.device[iii] = 11111; - m_stream.doConvertBuffer[iii] = false; - m_stream.deviceInterleaved[iii] = true; - m_stream.doByteSwap[iii] = false; - m_stream.nUserChannels[iii] = 0; - m_stream.nDeviceChannels[iii] = 0; - m_stream.channelOffset[iii] = 0; - m_stream.deviceFormat[iii] = audio::format_unknow; - m_stream.latency[iii] = 0; - m_stream.userBuffer[iii] = 0; - m_stream.convertInfo[iii].channels = 0; - m_stream.convertInfo[iii].inJump = 0; - m_stream.convertInfo[iii].outJump = 0; - m_stream.convertInfo[iii].inFormat = audio::format_unknow; - m_stream.convertInfo[iii].outFormat = audio::format_unknow; - m_stream.convertInfo[iii].inOffset.clear(); - m_stream.convertInfo[iii].outOffset.clear(); + m_device[iii] = 11111; + m_doConvertBuffer[iii] = false; + m_deviceInterleaved[iii] = true; + m_doByteSwap[iii] = false; + m_nUserChannels[iii] = 0; + m_nDeviceChannels[iii] = 0; + m_channelOffset[iii] = 0; + m_deviceFormat[iii] = audio::format_unknow; + m_latency[iii] = 0; + m_userBuffer[iii].clear(); + m_convertInfo[iii].channels = 0; + m_convertInfo[iii].inJump = 0; + m_convertInfo[iii].outJump = 0; + m_convertInfo[iii].inFormat = audio::format_unknow; + m_convertInfo[iii].outFormat = audio::format_unknow; + m_convertInfo[iii].inOffset.clear(); + m_convertInfo[iii].outOffset.clear(); } } void airtaudio::Api::setConvertInfo(airtaudio::mode _mode, uint32_t _firstChannel) { int32_t idTable = airtaudio::modeToIdTable(_mode); if (_mode == airtaudio::mode_input) { // convert device to user buffer - m_stream.convertInfo[idTable].inJump = m_stream.nDeviceChannels[1]; - m_stream.convertInfo[idTable].outJump = m_stream.nUserChannels[1]; - m_stream.convertInfo[idTable].inFormat = m_stream.deviceFormat[1]; - m_stream.convertInfo[idTable].outFormat = m_stream.userFormat; + m_convertInfo[idTable].inJump = m_nDeviceChannels[1]; + m_convertInfo[idTable].outJump = m_nUserChannels[1]; + m_convertInfo[idTable].inFormat = m_deviceFormat[1]; + m_convertInfo[idTable].outFormat = m_userFormat; } else { // convert user to device buffer - m_stream.convertInfo[idTable].inJump = m_stream.nUserChannels[0]; - m_stream.convertInfo[idTable].outJump = m_stream.nDeviceChannels[0]; - m_stream.convertInfo[idTable].inFormat = m_stream.userFormat; - m_stream.convertInfo[idTable].outFormat = m_stream.deviceFormat[0]; + m_convertInfo[idTable].inJump = m_nUserChannels[0]; + m_convertInfo[idTable].outJump = m_nDeviceChannels[0]; + m_convertInfo[idTable].inFormat = m_userFormat; + m_convertInfo[idTable].outFormat = m_deviceFormat[0]; } - if (m_stream.convertInfo[idTable].inJump < m_stream.convertInfo[idTable].outJump) { - m_stream.convertInfo[idTable].channels = m_stream.convertInfo[idTable].inJump; + if (m_convertInfo[idTable].inJump < m_convertInfo[idTable].outJump) { + m_convertInfo[idTable].channels = m_convertInfo[idTable].inJump; } else { - m_stream.convertInfo[idTable].channels = m_stream.convertInfo[idTable].outJump; + m_convertInfo[idTable].channels = m_convertInfo[idTable].outJump; } // Set up the interleave/deinterleave offsets. - if (m_stream.deviceInterleaved[idTable] == false) { + if (m_deviceInterleaved[idTable] == false) { if (_mode == airtaudio::mode_input) { - for (int32_t kkk=0; kkk 0) { - if (m_stream.deviceInterleaved[idTable]) { + if (m_deviceInterleaved[idTable]) { if (_mode == airtaudio::mode_output) { - for (int32_t kkk=0; kkk(_outBuffer); + uint8_t *in = reinterpret_cast(_inBuffer); + for (size_t iii=0; iii(_outBuffer); + uint16_t *in = reinterpret_cast(_inBuffer); + for (size_t iii=0; iii(_outBuffer); + uint32_t *in = reinterpret_cast(_inBuffer); + for (size_t iii=0; iii(_outBuffer); + uint64_t *in = reinterpret_cast(_inBuffer); + for (size_t iii=0; iii #include +#include +#include +#include namespace airtaudio { const std::vector& genericSampleRate(); - /** - * @brief Audio API specifier arguments. - */ - enum type { - type_undefined, //!< Error API. - type_alsa, //!< LINUX The Advanced Linux Sound Architecture. - type_pulse, //!< LINUX The Linux PulseAudio. - type_oss, //!< LINUX The Linux Open Sound System. - type_jack, //!< UNIX The Jack Low-Latency Audio Server. - type_coreOSX, //!< Macintosh OSX Core Audio. - type_coreIOS, //!< Macintosh iOS Core Audio. - type_asio, //!< WINDOWS The Steinberg Audio Stream I/O. - type_ds, //!< WINDOWS The Microsoft Direct Sound. - type_java, //!< ANDROID Interface. - type_dummy, //!< Empty wrapper (non-functional). - type_user1, //!< User interface 1. - type_user2, //!< User interface 2. - type_user3, //!< User interface 3. - type_user4, //!< User interface 4. - }; - std::ostream& operator <<(std::ostream& _os, const enum airtaudio::type& _obj); - std::ostream& operator <<(std::ostream& _os, const std::vector& _obj); - std::string getTypeString(enum audio::format _value); - enum airtaudio::type getTypeFromString(const std::string& _value); - enum state { - state_closed, - state_stopped, - state_stopping, - state_running - }; - enum mode { - mode_unknow, - mode_output, - mode_input, - mode_duplex - }; - int32_t modeToIdTable(enum mode _mode); // A protected structure used for buffer conversion. class ConvertInfo { public: @@ -64,45 +29,6 @@ namespace airtaudio { std::vector outOffset; }; - namespace api { - // A protected structure for audio streams. - class Stream { - public: - uint32_t device[2]; // Playback and record, respectively. - void *apiHandle; // void pointer for API specific stream handle information - enum airtaudio::mode mode; // airtaudio::mode_output, airtaudio::mode_input, or airtaudio::mode_duplex. - enum airtaudio::state state; // STOPPED, RUNNING, or CLOSED - char *userBuffer[2]; // Playback and record, respectively. - char *deviceBuffer; - bool doConvertBuffer[2]; // Playback and record, respectively. - bool deviceInterleaved[2]; // Playback and record, respectively. - bool doByteSwap[2]; // Playback and record, respectively. - uint32_t sampleRate; - uint32_t bufferSize; - uint32_t nBuffers; - uint32_t nUserChannels[2]; // Playback and record, respectively. - uint32_t nDeviceChannels[2]; // Playback and record channels, respectively. - uint32_t channelOffset[2]; // Playback and record, respectively. - uint64_t latency[2]; // Playback and record, respectively. - enum audio::format userFormat; - enum audio::format deviceFormat[2]; // Playback and record, respectively. - std::mutex mutex; - airtaudio::CallbackInfo callbackInfo; - airtaudio::ConvertInfo convertInfo[2]; - double streamTime; // Number of elapsed seconds since the stream started. - - #if defined(HAVE_GETTIMEOFDAY) - struct timeval lastTickTimestamp; - #endif - - Stream() : - apiHandle(nullptr), - deviceBuffer(nullptr) { - device[0] = 11111; - device[1] = 11111; - } - }; - }; class Api { public: Api(); @@ -113,12 +39,12 @@ namespace airtaudio { virtual uint32_t getDefaultInputDevice(); virtual uint32_t getDefaultOutputDevice(); enum airtaudio::error openStream(airtaudio::StreamParameters *_outputParameters, - airtaudio::StreamParameters *_inputParameters, - audio::format _format, - uint32_t _sampleRate, - uint32_t *_bufferFrames, - airtaudio::AirTAudioCallback _callback, - airtaudio::StreamOptions *_options); + airtaudio::StreamParameters *_inputParameters, + audio::format _format, + uint32_t _sampleRate, + uint32_t *_bufferFrames, + airtaudio::AirTAudioCallback _callback, + airtaudio::StreamOptions *_options); virtual enum airtaudio::error closeStream(); virtual enum airtaudio::error startStream() = 0; virtual enum airtaudio::error stopStream() = 0; @@ -127,22 +53,46 @@ namespace airtaudio { uint32_t getStreamSampleRate(); virtual double getStreamTime(); bool isStreamOpen() const { - return m_stream.state != airtaudio::state_closed; + return m_state != airtaudio::state_closed; } bool isStreamRunning() const { - return m_stream.state == airtaudio::state_running; + return m_state == airtaudio::state_running; } protected: - airtaudio::api::Stream m_stream; + mutable std::mutex m_mutex; + uint32_t m_device[2]; // Playback and record, respectively. + // TODO : Remove this use derivative property of the c++ class ... + void *m_apiHandle; // void pointer for API specific stream handle information + enum airtaudio::mode m_mode; // airtaudio::mode_output, airtaudio::mode_input, or airtaudio::mode_duplex. + enum airtaudio::state m_state; // STOPPED, RUNNING, or CLOSED + std::vector m_userBuffer[2]; // Playback and record, respectively. + char *m_deviceBuffer; + bool m_doConvertBuffer[2]; // Playback and record, respectively. + bool m_deviceInterleaved[2]; // Playback and record, respectively. + bool m_doByteSwap[2]; // Playback and record, respectively. + uint32_t m_sampleRate; + uint32_t m_bufferSize; + uint32_t m_nBuffers; + uint32_t m_nUserChannels[2]; // Playback and record, respectively. + uint32_t m_nDeviceChannels[2]; // Playback and record channels, respectively. + uint32_t m_channelOffset[2]; // Playback and record, respectively. + uint64_t m_latency[2]; // Playback and record, respectively. + enum audio::format m_userFormat; // TODO : Remove this ==> use can only open in the Harware format ... + enum audio::format m_deviceFormat[2]; // Playback and record, respectively. + // TODO : Remove this ... + airtaudio::CallbackInfo m_callbackInfo; + airtaudio::ConvertInfo m_convertInfo[2]; + // TODO : use : std::chrono::system_clock::time_point ... + double m_streamTime; // Number of elapsed seconds since the stream started. - /*! - Protected, api-specific method that attempts to open a device - with the given parameters. This function MUST be implemented by - all subclasses. If an error is encountered during the probe, a - "warning" message is reported and false is returned. A - successful probe is indicated by a return value of true. - */ + /** + * @brief api-specific method that attempts to open a device + * with the given parameters. This function MUST be implemented by + * all subclasses. If an error is encountered during the probe, a + * "warning" message is reported and false is returned. A + * successful probe is indicated by a return value of true. + */ virtual bool probeDeviceOpen(uint32_t _device, enum airtaudio::mode _mode, uint32_t _channels, @@ -151,17 +101,17 @@ namespace airtaudio { enum audio::format _format, uint32_t *_bufferSize, airtaudio::StreamOptions *_options); - - //! A protected function used to increment the stream time. + /** + * @brief Increment the stream time. + */ void tickStreamTime(); - - //! Protected common method to clear an RtApiStream structure. + /** + * @brief Clear an RtApiStream structure. + */ void clearStreamInfo(); - - /*! - Protected common method that throws an RtError (type = - INVALID_USE) if a stream is not open. - */ + /** + * @brief Check the current stream status + */ enum airtaudio::error verifyStream(); /** * @brief Protected method used to perform format, channel number, and/or interleaving @@ -171,12 +121,15 @@ namespace airtaudio { char *_inBuffer, airtaudio::ConvertInfo& _info); - //! Protected common method used to perform byte-swapping on buffers. + /** + * @brief Perform byte-swapping on buffers. + */ void byteSwapBuffer(char *_buffer, uint32_t _samples, enum audio::format _format); - - //! Protected common method that sets up the parameters for buffer conversion. + /** + * @brief Sets up the parameters for buffer conversion. + */ void setConvertInfo(enum airtaudio::mode _mode, uint32_t _firstChannel); }; diff --git a/airtaudio/CallbackInfo.h b/airtaudio/CallbackInfo.h index 7bed953..d55f5bc 100644 --- a/airtaudio/CallbackInfo.h +++ b/airtaudio/CallbackInfo.h @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #ifndef __AIRTAUDIO_CALLBACK_INFO_H__ @@ -19,7 +18,6 @@ namespace airtaudio { // handling functions. class CallbackInfo { public: - void* object; // Used as a "this" pointer. std::thread* thread; airtaudio::AirTAudioCallback callback; void* apiInfo; // void pointer for API specific callback information @@ -29,7 +27,6 @@ namespace airtaudio { // Default constructor. CallbackInfo() : - object(nullptr), callback(nullptr), apiInfo(nullptr), isRunning(false), diff --git a/airtaudio/DeviceInfo.h b/airtaudio/DeviceInfo.h index 5d14158..4c65e4f 100644 --- a/airtaudio/DeviceInfo.h +++ b/airtaudio/DeviceInfo.h @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #ifndef __AIRTAUDIO_DEVICE_INFO_H__ diff --git a/airtaudio/Flags.cpp b/airtaudio/Flags.cpp new file mode 100644 index 0000000..3a3c715 --- /dev/null +++ b/airtaudio/Flags.cpp @@ -0,0 +1,9 @@ +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio + */ + +#include +#include diff --git a/airtaudio/Flags.h b/airtaudio/Flags.h new file mode 100644 index 0000000..8ca6dcb --- /dev/null +++ b/airtaudio/Flags.h @@ -0,0 +1,24 @@ +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio + */ + +#ifndef __AIRTAUDIO_FLAGS_H__ +#define __AIRTAUDIO_FLAGS_H__ + +#include + +namespace airtaudio { + class Flags { + public: + bool m_minimizeLatency; // Simple example ==> TODO ... + Flags() : + m_minimizeLatency(false) { + // nothing to do ... + } + }; +}; + +#endif diff --git a/airtaudio/Interface.cpp b/airtaudio/Interface.cpp index 08b56c3..9de3331 100644 --- a/airtaudio/Interface.cpp +++ b/airtaudio/Interface.cpp @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ //#include diff --git a/airtaudio/Interface.h b/airtaudio/Interface.h index 53fe0fb..af7fb85 100644 --- a/airtaudio/Interface.h +++ b/airtaudio/Interface.h @@ -1,18 +1,16 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ -#ifndef __AIRTAUDIO_RTAUDIO_H__ -#define __AIRTAUDIO_RTAUDIO_H__ +#ifndef __AIRTAUDIO_INTERFACE_H__ +#define __AIRTAUDIO_INTERFACE_H__ #include #include #include -#include #include #include #include diff --git a/airtaudio/StreamOptions.h b/airtaudio/StreamOptions.h index c4f8868..9cacdab 100644 --- a/airtaudio/StreamOptions.h +++ b/airtaudio/StreamOptions.h @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #ifndef __AIRTAUDIO_STREAM_OPTION_H__ diff --git a/airtaudio/StreamParameters.h b/airtaudio/StreamParameters.h index 07a7743..e1fa9d8 100644 --- a/airtaudio/StreamParameters.h +++ b/airtaudio/StreamParameters.h @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #ifndef __AIRTAUDIO_STREAM_PARAMETER_H__ diff --git a/airtaudio/api/Alsa.cpp b/airtaudio/api/Alsa.cpp index 9e5ec8d..0b81679 100644 --- a/airtaudio/api/Alsa.cpp +++ b/airtaudio/api/Alsa.cpp @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ @@ -42,14 +41,14 @@ struct AlsaHandle { } }; -static void alsaCallbackHandler(void * _ptr); + airtaudio::api::Alsa::Alsa() { // Nothing to do here. } airtaudio::api::Alsa::~Alsa() { - if (m_stream.state != airtaudio::state_closed) { + if (m_state != airtaudio::state_closed) { closeStream(); } } @@ -154,9 +153,9 @@ airtaudio::DeviceInfo airtaudio::api::Alsa::getDeviceInfo(uint32_t _device) { foundDevice: // If a stream is already open, we cannot probe the stream devices. // Thus, use the saved results. - if ( m_stream.state != airtaudio::state_closed - && ( m_stream.device[0] == _device - || m_stream.device[1] == _device)) { + if ( m_state != airtaudio::state_closed + && ( m_device[0] == _device + || m_device[1] == _device)) { snd_ctl_close(chandle); if (_device >= m_devices.size()) { ATA_ERROR("device ID was not present before stream was opened."); @@ -428,7 +427,7 @@ foundDevice: // stream and save the results for use by getDeviceInfo(). if ( _mode == airtaudio::mode_output || ( _mode == airtaudio::mode_input - && m_stream.mode != airtaudio::mode_output)) { + && m_mode != airtaudio::mode_output)) { // only do once this->saveDeviceInfo(); } @@ -462,9 +461,9 @@ foundDevice: result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED); if (result < 0) { result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED); - m_stream.deviceInterleaved[modeToIdTable(_mode)] = false; + m_deviceInterleaved[modeToIdTable(_mode)] = false; } else { - m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; + m_deviceInterleaved[modeToIdTable(_mode)] = true; } if (result < 0) { snd_pcm_close(phandle); @@ -472,7 +471,7 @@ foundDevice: return false; } // Determine how to set the device format. - m_stream.userFormat = _format; + m_userFormat = _format; snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN; if (_format == audio::format_int8) { deviceFormat = SND_PCM_FORMAT_S8; @@ -488,7 +487,7 @@ foundDevice: deviceFormat = SND_PCM_FORMAT_FLOAT64; } if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) { - m_stream.deviceFormat[modeToIdTable(_mode)] = _format; + m_deviceFormat[modeToIdTable(_mode)] = _format; } else { // If we get here, no supported format was found. snd_pcm_close(phandle); @@ -503,11 +502,11 @@ foundDevice: return false; } // Determine whether byte-swaping is necessary. - m_stream.doByteSwap[modeToIdTable(_mode)] = false; + m_doByteSwap[modeToIdTable(_mode)] = false; if (deviceFormat != SND_PCM_FORMAT_S8) { result = snd_pcm_format_cpu_endian(deviceFormat); if (result == 0) { - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_doByteSwap[modeToIdTable(_mode)] = true; } else if (result < 0) { snd_pcm_close(phandle); ATA_ERROR("error getting pcm device (" << name << ") endian-ness, " << snd_strerror(result) << "."); @@ -523,7 +522,7 @@ foundDevice: } // Determine the number of channels for this device. We support a possible // minimum device channel number > than the value requested by the user. - m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; + m_nUserChannels[modeToIdTable(_mode)] = _channels; uint32_t value; result = snd_pcm_hw_params_get_channels_max(hw_params, &value); uint32_t deviceChannels = value; @@ -543,7 +542,7 @@ foundDevice: if (deviceChannels < _channels + _firstChannel) { deviceChannels = _channels + _firstChannel; } - m_stream.nDeviceChannels[modeToIdTable(_mode)] = deviceChannels; + m_nDeviceChannels[modeToIdTable(_mode)] = deviceChannels; // Set the device channels. result = snd_pcm_hw_params_set_channels(phandle, hw_params, deviceChannels); if (result < 0) { @@ -584,14 +583,14 @@ foundDevice: } // If attempting to setup a duplex stream, the bufferSize parameter // MUST be the same in both directions! - if ( m_stream.mode == airtaudio::mode_output + if ( m_mode == airtaudio::mode_output && _mode == airtaudio::mode_input - && *_bufferSize != m_stream.bufferSize) { + && *_bufferSize != m_bufferSize) { snd_pcm_close(phandle); ATA_ERROR("system error setting buffer size for duplex stream on device (" << name << ")."); return false; } - m_stream.bufferSize = *_bufferSize; + m_bufferSize = *_bufferSize; // Install the hardware configuration result = snd_pcm_hw_params(phandle, hw_params); if (result < 0) { @@ -621,46 +620,46 @@ foundDevice: return false; } // Set flags for buffer conversion - m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; - if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + m_doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false - && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if ( m_deviceInterleaved[modeToIdTable(_mode)] == false + && m_nUserChannels[modeToIdTable(_mode)] > 1) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate the ApiHandle if necessary and then save. AlsaHandle *apiInfo = nullptr; - if (m_stream.apiHandle == nullptr) { + if (m_apiHandle == nullptr) { apiInfo = (AlsaHandle *) new AlsaHandle; if (apiInfo == nullptr) { ATA_ERROR("error allocating AlsaHandle memory."); goto error; } - m_stream.apiHandle = (void *) apiInfo; + m_apiHandle = (void *) apiInfo; } else { - apiInfo = (AlsaHandle *) m_stream.apiHandle; + apiInfo = (AlsaHandle *) m_apiHandle; } apiInfo->handles[modeToIdTable(_mode)] = phandle; phandle = 0; // Allocate necessary internal buffers. uint64_t bufferBytes; - bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat); - m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { + bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat); + m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0); + if (m_userBuffer[modeToIdTable(_mode)].size() == 0) { ATA_ERROR("error allocating user buffer memory."); goto error; } - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]); if (_mode == airtaudio::mode_input) { - if ( m_stream.mode == airtaudio::mode_output - && m_stream.deviceBuffer) { - uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]); + if ( m_mode == airtaudio::mode_output + && m_deviceBuffer) { + uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]); if (bufferBytes <= bytesOut) { makeBuffer = false; } @@ -668,30 +667,30 @@ foundDevice: } if (makeBuffer) { bufferBytes *= *_bufferSize; - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = nullptr; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = nullptr; } - m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1); - if (m_stream.deviceBuffer == nullptr) { + m_deviceBuffer = (char *) calloc(bufferBytes, 1); + if (m_deviceBuffer == nullptr) { ATA_ERROR("error allocating device buffer memory."); goto error; } } } - m_stream.sampleRate = _sampleRate; - m_stream.nBuffers = periods; - m_stream.device[modeToIdTable(_mode)] = _device; - m_stream.state = airtaudio::state_stopped; + m_sampleRate = _sampleRate; + m_nBuffers = periods; + m_device[modeToIdTable(_mode)] = _device; + m_state = airtaudio::state_stopped; // Setup the buffer conversion information structure. - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, _firstChannel); } // Setup thread if necessary. - if ( m_stream.mode == airtaudio::mode_output + if ( m_mode == airtaudio::mode_output && _mode == airtaudio::mode_input) { // We had already set up an output stream. - m_stream.mode = airtaudio::mode_duplex; + m_mode = airtaudio::mode_duplex; // Link the streams if possible. apiInfo->synchronized = false; if (snd_pcm_link(apiInfo->handles[0], apiInfo->handles[1]) == 0) { @@ -701,13 +700,12 @@ foundDevice: // TODO : airtaudio::error_warning; } } else { - m_stream.mode = _mode; + m_mode = _mode; // Setup callback thread. - m_stream.callbackInfo.object = (void *) this; - m_stream.callbackInfo.isRunning = true; - m_stream.callbackInfo.thread = new std::thread(alsaCallbackHandler, &m_stream.callbackInfo); - if (m_stream.callbackInfo.thread == nullptr) { - m_stream.callbackInfo.isRunning = false; + m_callbackInfo.isRunning = true; + m_callbackInfo.thread = new std::thread(&airtaudio::api::Alsa::alsaCallbackEvent, this); + if (m_callbackInfo.thread == nullptr) { + m_callbackInfo.isRunning = false; ATA_ERROR("creating callback thread!"); goto error; } @@ -723,49 +721,46 @@ error: } delete apiInfo; apiInfo = nullptr; - m_stream.apiHandle = 0; + m_apiHandle = 0; } if (phandle) { snd_pcm_close(phandle); } for (int32_t iii=0; iii<2; ++iii) { - if (m_stream.userBuffer[iii]) { - free(m_stream.userBuffer[iii]); - m_stream.userBuffer[iii] = 0; - } + m_userBuffer[iii].clear(); } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = 0; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = 0; } - m_stream.state = airtaudio::state_closed; + m_state = airtaudio::state_closed; return false; } enum airtaudio::error airtaudio::api::Alsa::closeStream() { - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); return airtaudio::error_warning; } - AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle; - m_stream.callbackInfo.isRunning = false; - m_stream.mutex.lock(); - if (m_stream.state == airtaudio::state_stopped) { + AlsaHandle *apiInfo = (AlsaHandle *) m_apiHandle; + m_callbackInfo.isRunning = false; + m_mutex.lock(); + if (m_state == airtaudio::state_stopped) { apiInfo->runnable = true; apiInfo->runnable_cv.notify_one(); } - m_stream.mutex.unlock(); - if (m_stream.callbackInfo.thread != nullptr) { - m_stream.callbackInfo.thread->join(); + m_mutex.unlock(); + if (m_callbackInfo.thread != nullptr) { + m_callbackInfo.thread->join(); } - if (m_stream.state == airtaudio::state_running) { - m_stream.state = airtaudio::state_stopped; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + if (m_state == airtaudio::state_running) { + m_state = airtaudio::state_stopped; + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { snd_pcm_drop(apiInfo->handles[0]); } - if ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) { snd_pcm_drop(apiInfo->handles[1]); } } @@ -778,20 +773,17 @@ enum airtaudio::error airtaudio::api::Alsa::closeStream() { } delete apiInfo; apiInfo = nullptr; - m_stream.apiHandle = 0; + m_apiHandle = 0; } for (int32_t iii=0; iii<2; ++iii) { - if (m_stream.userBuffer[iii] != nullptr) { - free(m_stream.userBuffer[iii]); - m_stream.userBuffer[iii] = 0; - } + m_userBuffer[iii].clear(); } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = 0; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = 0; } - m_stream.mode = airtaudio::mode_unknow; - m_stream.state = airtaudio::state_closed; + m_mode = airtaudio::mode_unknow; + m_state = airtaudio::state_closed; return airtaudio::error_none; } @@ -800,17 +792,17 @@ enum airtaudio::error airtaudio::api::Alsa::startStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_running) { + if (m_state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); return airtaudio::error_warning; } - std::unique_lock lck(m_stream.mutex); + std::unique_lock lck(m_mutex); int32_t result = 0; snd_pcm_state_t state; - AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle; + AlsaHandle *apiInfo = (AlsaHandle *) m_apiHandle; snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { if (handle[0] == nullptr) { ATA_ERROR("send nullptr to alsa ..."); if (handle[1] != nullptr) { @@ -826,8 +818,8 @@ enum airtaudio::error airtaudio::api::Alsa::startStream() { } } } - if ( ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) + if ( ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) && !apiInfo->synchronized) { if (handle[1] == nullptr) { ATA_ERROR("send nullptr to alsa ..."); @@ -844,7 +836,7 @@ enum airtaudio::error airtaudio::api::Alsa::startStream() { } } } - m_stream.state = airtaudio::state_running; + m_state = airtaudio::state_running; unlock: apiInfo->runnable = true; apiInfo->runnable_cv.notify_one(); @@ -858,17 +850,17 @@ enum airtaudio::error airtaudio::api::Alsa::stopStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } - m_stream.state = airtaudio::state_stopped; - std::unique_lock lck(m_stream.mutex); + m_state = airtaudio::state_stopped; + std::unique_lock lck(m_mutex); int32_t result = 0; - AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle; + AlsaHandle *apiInfo = (AlsaHandle *) m_apiHandle; snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { if (apiInfo->synchronized) { result = snd_pcm_drop(handle[0]); } else { @@ -879,8 +871,8 @@ enum airtaudio::error airtaudio::api::Alsa::stopStream() { goto unlock; } } - if ( ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) + if ( ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) && !apiInfo->synchronized) { result = snd_pcm_drop(handle[1]); if (result < 0) { @@ -899,25 +891,25 @@ enum airtaudio::error airtaudio::api::Alsa::abortStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } - m_stream.state = airtaudio::state_stopped; - std::unique_lock lck(m_stream.mutex); + m_state = airtaudio::state_stopped; + std::unique_lock lck(m_mutex); int32_t result = 0; - AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle; + AlsaHandle *apiInfo = (AlsaHandle *) m_apiHandle; snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { result = snd_pcm_drop(handle[0]); if (result < 0) { ATA_ERROR("error aborting output pcm device, " << snd_strerror(result) << "."); goto unlock; } } - if ( ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) + if ( ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) && !apiInfo->synchronized) { result = snd_pcm_drop(handle[1]); if (result < 0) { @@ -932,47 +924,59 @@ unlock: return airtaudio::error_systemError; } + +void airtaudio::api::Alsa::alsaCallbackEvent(void *_userData) { + airtaudio::api::Alsa* myClass = reinterpret_cast(_userData); + myClass->callbackEvent(); +} + void airtaudio::api::Alsa::callbackEvent() { - AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle; - if (m_stream.state == airtaudio::state_stopped) { - std::unique_lock lck(m_stream.mutex); + while (m_callbackInfo.isRunning == true) { + callbackEventOneCycle(); + } +} + +void airtaudio::api::Alsa::callbackEventOneCycle() { + AlsaHandle *apiInfo = (AlsaHandle *) m_apiHandle; + if (m_state == airtaudio::state_stopped) { + std::unique_lock lck(m_mutex); // TODO : Set this back .... /* while (!apiInfo->runnable) { apiInfo->runnable_cv.wait(lck); } */ - if (m_stream.state != airtaudio::state_running) { + if (m_state != airtaudio::state_running) { return; } } - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_CRITICAL("the stream is closed ... this shouldn't happen!"); return; // TODO : notify appl: airtaudio::error_warning; } int32_t doStopStream = 0; double streamTime = getStreamTime(); enum airtaudio::status status = airtaudio::status_ok; - if (m_stream.mode != airtaudio::mode_input && apiInfo->xrun[0] == true) { + if (m_mode != airtaudio::mode_input && apiInfo->xrun[0] == true) { status = airtaudio::status_underflow; apiInfo->xrun[0] = false; } - if (m_stream.mode != airtaudio::mode_output && apiInfo->xrun[1] == true) { + if (m_mode != airtaudio::mode_output && apiInfo->xrun[1] == true) { status = airtaudio::status_overflow; apiInfo->xrun[1] = false; } - doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[0], - m_stream.userBuffer[1], - m_stream.bufferSize, - streamTime, - status); + doStopStream = m_callbackInfo.callback(&m_userBuffer[0][0], + &m_userBuffer[1][0], + m_bufferSize, + streamTime, + status); if (doStopStream == 2) { abortStream(); return; } - std::unique_lock lck(m_stream.mutex); + std::unique_lock lck(m_mutex); // The state might change while waiting on a mutex. - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { goto unlock; } int32_t result; @@ -982,29 +986,29 @@ void airtaudio::api::Alsa::callbackEvent() { snd_pcm_sframes_t frames; audio::format format; handle = (snd_pcm_t **) apiInfo->handles; - if ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) { // Setup parameters. - if (m_stream.doConvertBuffer[1]) { - buffer = m_stream.deviceBuffer; - channels = m_stream.nDeviceChannels[1]; - format = m_stream.deviceFormat[1]; + if (m_doConvertBuffer[1]) { + buffer = m_deviceBuffer; + channels = m_nDeviceChannels[1]; + format = m_deviceFormat[1]; } else { - buffer = m_stream.userBuffer[1]; - channels = m_stream.nUserChannels[1]; - format = m_stream.userFormat; + buffer = &m_userBuffer[1][0]; + channels = m_nUserChannels[1]; + format = m_userFormat; } // Read samples from device in interleaved/non-interleaved format. - if (m_stream.deviceInterleaved[1]) { - result = snd_pcm_readi(handle[1], buffer, m_stream.bufferSize); + if (m_deviceInterleaved[1]) { + result = snd_pcm_readi(handle[1], buffer, m_bufferSize); } else { void *bufs[channels]; - size_t offset = m_stream.bufferSize * audio::getFormatBytes(format); + size_t offset = m_bufferSize * audio::getFormatBytes(format); for (int32_t i=0; i 0) { - m_stream.latency[1] = frames; + m_latency[1] = frames; } } tryOutput: - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { // Setup parameters and do buffer conversion if necessary. - if (m_stream.doConvertBuffer[0]) { - buffer = m_stream.deviceBuffer; - convertBuffer(buffer, m_stream.userBuffer[0], m_stream.convertInfo[0]); - channels = m_stream.nDeviceChannels[0]; - format = m_stream.deviceFormat[0]; + if (m_doConvertBuffer[0]) { + buffer = m_deviceBuffer; + convertBuffer(buffer, &m_userBuffer[0][0], m_convertInfo[0]); + channels = m_nDeviceChannels[0]; + format = m_deviceFormat[0]; } else { - buffer = m_stream.userBuffer[0]; - channels = m_stream.nUserChannels[0]; - format = m_stream.userFormat; + buffer = &m_userBuffer[0][0]; + channels = m_nUserChannels[0]; + format = m_userFormat; } // Do byte swapping if necessary. - if (m_stream.doByteSwap[0]) { - byteSwapBuffer(buffer, m_stream.bufferSize * channels, format); + if (m_doByteSwap[0]) { + byteSwapBuffer(buffer, m_bufferSize * channels, format); } // Write samples to device in interleaved/non-interleaved format. - if (m_stream.deviceInterleaved[0]) { - result = snd_pcm_writei(handle[0], buffer, m_stream.bufferSize); + if (m_deviceInterleaved[0]) { + result = snd_pcm_writei(handle[0], buffer, m_bufferSize); } else { void *bufs[channels]; - size_t offset = m_stream.bufferSize * audio::getFormatBytes(format); + size_t offset = m_bufferSize * audio::getFormatBytes(format); for (int32_t i=0; i 0) { - m_stream.latency[0] = frames; + m_latency[0] = frames; } } @@ -1100,14 +1104,5 @@ unlock: } } -static void alsaCallbackHandler(void *_ptr) { - airtaudio::CallbackInfo *info = (airtaudio::CallbackInfo*)_ptr; - airtaudio::api::Alsa *object = (airtaudio::api::Alsa *) info->object; - bool *isRunning = &info->isRunning; - while (*isRunning == true) { - // TODO : pthread_testcancel(); - object->callbackEvent(); - } -} #endif diff --git a/airtaudio/api/Alsa.h b/airtaudio/api/Alsa.h index e7faed7..b99f88f 100644 --- a/airtaudio/api/Alsa.h +++ b/airtaudio/api/Alsa.h @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #if !defined(__AIRTAUDIO_API_ALSA_H__) && defined(__LINUX_ALSA__) @@ -31,6 +30,9 @@ namespace airtaudio { // which is not a member of RtAudio. External use of this function // will most likely produce highly undesireable results! void callbackEvent(); + void callbackEventOneCycle(); + private: + static void alsaCallbackEvent(void* _userData); private: std::vector m_devices; void saveDeviceInfo(); diff --git a/airtaudio/api/Android.cpp b/airtaudio/api/Android.cpp index 34ebafa..6abcc4a 100644 --- a/airtaudio/api/Android.cpp +++ b/airtaudio/api/Android.cpp @@ -1,7 +1,8 @@ -/** - * @author Edouard DUPIN - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #ifdef __ANDROID_JAVA__ @@ -109,19 +110,19 @@ void airtaudio::api::Android::callBackEvent(void* _data, int32_t doStopStream = 0; double streamTime = getStreamTime(); enum airtaudio::status status = airtaudio::status_ok; - if (m_stream.doConvertBuffer[airtaudio::mode_output] == true) { - doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::mode_output], - nullptr, - _frameRate, - streamTime, - status); - convertBuffer((char*)_data, (char*)m_stream.userBuffer[airtaudio::mode_output], m_stream.convertInfo[airtaudio::mode_output]); + if (m_doConvertBuffer[airtaudio::mode_output] == true) { + doStopStream = m_callbackInfo.callback(m_userBuffer[airtaudio::mode_output], + nullptr, + _frameRate, + streamTime, + status); + convertBuffer((char*)_data, (char*)m_userBuffer[airtaudio::mode_output], m_convertInfo[airtaudio::mode_output]); } else { - doStopStream = m_stream.callbackInfo.callback(_data, - nullptr, - _frameRate, - streamTime, - status); + doStopStream = m_callbackInfo.callback(_data, + nullptr, + _frameRate, + streamTime, + status); } if (doStopStream == 2) { abortStream(); @@ -154,8 +155,8 @@ bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device, ATA_ERROR("Can not start a device input or duplex for Android ..."); return false; } - m_stream.userFormat = _format; - m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; + m_userFormat = _format; + m_nUserChannels[modeToIdTable(_mode)] = _channels; ewol::Context& tmpContext = ewol::getContext(); bool ret = false; if (_format == SINT8) { @@ -163,38 +164,38 @@ bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device, } else { ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 1, androidCallBackEvent, this); } - m_stream.bufferSize = 256; - m_stream.sampleRate = _sampleRate; - m_stream.doByteSwap[modeToIdTable(_mode)] = false; // for endienness ... + m_bufferSize = 256; + m_sampleRate = _sampleRate; + m_doByteSwap[modeToIdTable(_mode)] = false; // for endienness ... // TODO : For now, we write it in hard ==> to bu update later ... - m_stream.deviceFormat[modeToIdTable(_mode)] = SINT16; - m_stream.nDeviceChannels[modeToIdTable(_mode)] = 2; - m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; + m_deviceFormat[modeToIdTable(_mode)] = SINT16; + m_nDeviceChannels[modeToIdTable(_mode)] = 2; + m_deviceInterleaved[modeToIdTable(_mode)] = true; - m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; - if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + m_doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false - && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if ( m_deviceInterleaved[modeToIdTable(_mode)] == false + && m_nUserChannels[modeToIdTable(_mode)] > 1) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) { + if (m_doConvertBuffer[modeToIdTable(_mode)] == true) { // Allocate necessary internal buffers. - uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat); - m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { + uint64_t bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat); + m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory."); } setConvertInfo(_mode, _firstChannel); } - ATA_INFO("device format : " << m_stream.deviceFormat[modeToIdTable(_mode)] << " user format : " << m_stream.userFormat); - ATA_INFO("device channels : " << m_stream.nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_stream.nUserChannels[modeToIdTable(_mode)]); - ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[modeToIdTable(_mode)]); + ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(_mode)] << " user format : " << m_userFormat); + ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(_mode)]); + ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(_mode)]); if (ret == false) { ATA_ERROR("Can not open device."); } diff --git a/airtaudio/api/Android.h b/airtaudio/api/Android.h index 618d530..4f58d4f 100644 --- a/airtaudio/api/Android.h +++ b/airtaudio/api/Android.h @@ -1,7 +1,8 @@ -/** - * @author Edouard DUPIN - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #if !defined(__AIRTAUDIO_API_ANDROID_H__) && defined(__ANDROID_JAVA__) diff --git a/airtaudio/api/Asio.cpp b/airtaudio/api/Asio.cpp index ada1f86..ebb2ae0 100644 --- a/airtaudio/api/Asio.cpp +++ b/airtaudio/api/Asio.cpp @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ @@ -83,7 +82,7 @@ airtaudio::api::Asio::Asio() { } airtaudio::api::Asio::~Asio() { - if (m_stream.state != airtaudio::state_closed) { + if (m_state != airtaudio::state_closed) { closeStream(); } if (m_coInitialized) { @@ -109,7 +108,7 @@ rtaudio::DeviceInfo airtaudio::api::Asio::getDeviceInfo(uint32_t _device) { return info; } // If a stream is already open, we cannot probe other devices. Thus, use the saved results. - if (m_stream.state != airtaudio::state_closed) { + if (m_state != airtaudio::state_closed) { if (_device >= m_devices.size()) { ATA_ERROR("device ID was not present before stream was opened."); return info; @@ -222,8 +221,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, airtaudio::StreamOptions *_options) { // For ASIO, a duplex stream MUST use the same driver. if ( _mode == airtaudio::mode_input - && m_stream.mode == airtaudio::mode_output - && m_stream.device[0] != _device) { + && m_mode == airtaudio::mode_output + && m_device[0] != _device) { ATA_ERROR("an ASIO duplex stream must use the same device for input and output!"); return false; } @@ -235,7 +234,7 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, } // Only load the driver once for duplex stream. if ( _mode != airtaudio::mode_input - || m_stream.mode != airtaudio::mode_output) { + || m_mode != airtaudio::mode_output) { // The getDeviceInfo() function will not work when a stream is open // because ASIO does not allow multiple devices to run at the same // time. Thus, we'll probe the system before opening a stream and @@ -267,9 +266,9 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, ATA_ERROR("driver (" << driverName << ") does not support requested channel count (" << _channels << ") + offset (" << _firstChannel << ")."); return false; } - m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels; - m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; - m_stream.channelOffset[modeToIdTable(_mode)] = _firstChannel; + m_nDeviceChannels[modeToIdTable(_mode)] = _channels; + m_nUserChannels[modeToIdTable(_mode)] = _channels; + m_channelOffset[modeToIdTable(_mode)] = _firstChannel; // Verify the sample rate is supported. result = ASIOCanSampleRate((ASIOSampleRate) _sampleRate); if (result != ASE_OK) { @@ -309,41 +308,41 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, return false; } // Assuming WINDOWS host is always little-endian. - m_stream.doByteSwap[modeToIdTable(_mode)] = false; - m_stream.userFormat = _format; - m_stream.deviceFormat[modeToIdTable(_mode)] = 0; + m_doByteSwap[modeToIdTable(_mode)] = false; + m_userFormat = _format; + m_deviceFormat[modeToIdTable(_mode)] = 0; if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB) { - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; if (channelInfo.type == ASIOSTInt16MSB) { - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_doByteSwap[modeToIdTable(_mode)] = true; } } else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB) { - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; if (channelInfo.type == ASIOSTInt32MSB) { - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_doByteSwap[modeToIdTable(_mode)] = true; } } else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB) { - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT32; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT32; if (channelInfo.type == ASIOSTFloat32MSB) { - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_doByteSwap[modeToIdTable(_mode)] = true; } } else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB) { - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT64; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT64; if (channelInfo.type == ASIOSTFloat64MSB) { - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_doByteSwap[modeToIdTable(_mode)] = true; } } else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB) { - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; if (channelInfo.type == ASIOSTInt24MSB) { - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_doByteSwap[modeToIdTable(_mode)] = true; } } - if (m_stream.deviceFormat[modeToIdTable(_mode)] == 0) { + if (m_deviceFormat[modeToIdTable(_mode)] == 0) { drivers.removeCurrentDriver(); ATA_ERROR("driver (" << driverName << ") data format not supported by RtAudio."); return false; @@ -394,18 +393,18 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, *_bufferSize = (*_bufferSize + granularity-1) / granularity * granularity; } if ( _mode == airtaudio::mode_input - && m_stream.mode == airtaudio::mode_output - && m_stream.bufferSize != *_bufferSize) { + && m_mode == airtaudio::mode_output + && m_bufferSize != *_bufferSize) { drivers.removeCurrentDriver(); ATA_ERROR("input/output buffersize discrepancy!"); return false; } - m_stream.bufferSize = *_bufferSize; - m_stream.nBuffers = 2; + m_bufferSize = *_bufferSize; + m_nBuffers = 2; // ASIO always uses non-interleaved buffers. - m_stream.deviceInterleaved[modeToIdTable(_mode)] = false; + m_deviceInterleaved[modeToIdTable(_mode)] = false; // Allocate, if necessary, our AsioHandle structure for the stream. - AsioHandle *handle = (AsioHandle *) m_stream.apiHandle; + AsioHandle *handle = (AsioHandle *) m_apiHandle; if (handle == nullptr) { handle = new AsioHandle; if (handle == nullptr) { @@ -419,14 +418,14 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, TRUE, // manual-reset FALSE, // non-signaled initially nullptr); // unnamed - m_stream.apiHandle = (void *) handle; + m_apiHandle = (void *) handle; } // Create the ASIO internal buffers. Since RtAudio sets up input // and output separately, we'll have to dispose of previously // created output buffers for a duplex stream. long inputLatency, outputLatency; if ( _mode == airtaudio::mode_input - && m_stream.mode == airtaudio::mode_output) { + && m_mode == airtaudio::mode_output) { ASIODisposeBuffers(); if (handle->bufferInfos == nullptr) { free(handle->bufferInfos); @@ -435,7 +434,7 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, } // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure. bool buffersAllocated = false; - uint32_t i, nChannels = m_stream.nDeviceChannels[0] + m_stream.nDeviceChannels[1]; + uint32_t i, nChannels = m_nDeviceChannels[0] + m_nDeviceChannels[1]; handle->bufferInfos = (ASIOBufferInfo *) malloc(nChannels * sizeof(ASIOBufferInfo)); if (handle->bufferInfos == nullptr) { ATA_ERROR("error allocating bufferInfo memory for driver (" << driverName << ")."); @@ -443,14 +442,14 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, } ASIOBufferInfo *infos; infos = handle->bufferInfos; - for (i=0; iisInput = ASIOFalse; - infos->channelNum = i + m_stream.channelOffset[0]; + infos->channelNum = i + m_channelOffset[0]; infos->buffers[0] = infos->buffers[1] = 0; } - for (i=0; iisInput = ASIOTrue; - infos->channelNum = i + m_stream.channelOffset[1]; + infos->channelNum = i + m_channelOffset[1]; infos->buffers[0] = infos->buffers[1] = 0; } // Set up the ASIO callback structure and create the ASIO data buffers. @@ -458,35 +457,35 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, asioCallbacks.sampleRateDidChange = &sampleRateChanged; asioCallbacks.asioMessage = &asioMessages; asioCallbacks.bufferSwitchTimeInfo = nullptr; - result = ASIOCreateBuffers(handle->bufferInfos, nChannels, m_stream.bufferSize, &asioCallbacks); + result = ASIOCreateBuffers(handle->bufferInfos, nChannels, m_bufferSize, &asioCallbacks); if (result != ASE_OK) { ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") creating buffers."); goto error; } buffersAllocated = true; // Set flags for buffer conversion. - m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; - if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + m_doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false - && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if ( m_deviceInterleaved[modeToIdTable(_mode)] == false + && m_nUserChannels[modeToIdTable(_mode)] > 1) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate necessary internal buffers uint64_t bufferBytes; - bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat); - m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { + bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat); + m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); goto error; } - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]); if (_mode == airtaudio::mode_input) { - if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { - uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]); + if (m_mode == airtaudio::mode_output && m_deviceBuffer) { + uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]); if (bufferBytes <= bytesOut) { makeBuffer = false; } @@ -494,41 +493,41 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, } if (makeBuffer) { bufferBytes *= *_bufferSize; - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = nullptr; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = nullptr; } - m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1); - if (m_stream.deviceBuffer == nullptr) { + m_deviceBuffer = (char *) calloc(bufferBytes, 1); + if (m_deviceBuffer == nullptr) { ATA_ERROR("error allocating device buffer memory."); goto error; } } } - m_stream.sampleRate = _sampleRate; - m_stream.device[modeToIdTable(_mode)] = _device; - m_stream.state = airtaudio::state_stopped; - asioCallbackInfo = &m_stream.callbackInfo; - m_stream.callbackInfo.object = (void*)this; - if ( m_stream.mode == airtaudio::mode_output + m_sampleRate = _sampleRate; + m_device[modeToIdTable(_mode)] = _device; + m_state = airtaudio::state_stopped; + asioCallbackInfo = &m_callbackInfo; + m_callbackInfo.object = (void*)this; + if ( m_mode == airtaudio::mode_output && _mode == airtaudio::mode_input) { // We had already set up an output stream. - m_stream.mode = airtaudio::mode_duplex; + m_mode = airtaudio::mode_duplex; } else { - m_stream.mode = _mode; + m_mode = _mode; } // Determine device latencies result = ASIOGetLatencies(&inputLatency, &outputLatency); if (result != ASE_OK) { ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting latency."); } else { - m_stream.latency[0] = outputLatency; - m_stream.latency[1] = inputLatency; + m_latency[0] = outputLatency; + m_latency[1] = inputLatency; } // Setup the buffer conversion information structure. We don't use // buffers to do channel offsets, so we override that parameter // here. - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, 0); } return true; @@ -545,53 +544,53 @@ error: } delete handle; handle = nullptr; - m_stream.apiHandle = 0; + m_apiHandle = 0; } for (int32_t i=0; i<2; i++) { - if (m_stream.userBuffer[i]) { - free(m_stream.userBuffer[i]); - m_stream.userBuffer[i] = 0; + if (m_userBuffer[i]) { + free(m_userBuffer[i]); + m_userBuffer[i] = 0; } } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = 0; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = 0; } return false; } enum airtaudio::error airtaudio::api::Asio::closeStream() { - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); return airtaudio::error_warning; } - if (m_stream.state == airtaudio::state_running) { - m_stream.state = airtaudio::state_stopped; + if (m_state == airtaudio::state_running) { + m_state = airtaudio::state_stopped; ASIOStop(); } ASIODisposeBuffers(); drivers.removeCurrentDriver(); - AsioHandle *handle = (AsioHandle *) m_stream.apiHandle; + AsioHandle *handle = (AsioHandle *) m_apiHandle; if (handle) { CloseHandle(handle->condition); if (handle->bufferInfos) { free(handle->bufferInfos); } delete handle; - m_stream.apiHandle = 0; + m_apiHandle = 0; } for (int32_t i=0; i<2; i++) { - if (m_stream.userBuffer[i]) { - free(m_stream.userBuffer[i]); - m_stream.userBuffer[i] = 0; + if (m_userBuffer[i]) { + free(m_userBuffer[i]); + m_userBuffer[i] = 0; } } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = 0; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = 0; } - m_stream.mode = airtaudio::mode_unknow; - m_stream.state = airtaudio::state_closed; + m_mode = airtaudio::mode_unknow; + m_state = airtaudio::state_closed; return airtaudio::error_none; } @@ -601,11 +600,11 @@ enum airtaudio::error airtaudio::api::Asio::startStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_running) { + if (m_state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); return airtaudio::error_warning; } - AsioHandle *handle = (AsioHandle *) m_stream.apiHandle; + AsioHandle *handle = (AsioHandle *) m_apiHandle; ASIOError result = ASIOStart(); if (result != ASE_OK) { ATA_ERROR("error (" << getAsioErrorString(result) << ") starting device."); @@ -614,7 +613,7 @@ enum airtaudio::error airtaudio::api::Asio::startStream() { handle->drainCounter = 0; handle->internalDrain = false; ResetEvent(handle->condition); - m_stream.state = airtaudio::state_running; + m_state = airtaudio::state_running; asioXRun = false; unlock: stopThreadCalled = false; @@ -628,18 +627,18 @@ enum airtaudio::error airtaudio::api::Asio::stopStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } - AsioHandle *handle = (AsioHandle *) m_stream.apiHandle; - if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) { + AsioHandle *handle = (AsioHandle *) m_apiHandle; + if (m_mode == airtaudio::mode_output || m_mode == airtaudio::mode_duplex) { if (handle->drainCounter == 0) { handle->drainCounter = 2; WaitForSingleObject(handle->condition, INFINITE); // block until signaled } } - m_stream.state = airtaudio::state_stopped; + m_state = airtaudio::state_stopped; ASIOError result = ASIOStop(); if (result != ASE_OK) { ATA_ERROR("error (" << getAsioErrorString(result) << ") stopping device."); @@ -654,7 +653,7 @@ enum airtaudio::error airtaudio::api::Asio::abortStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); error(airtaudio::error_warning); return; @@ -664,7 +663,7 @@ enum airtaudio::error airtaudio::api::Asio::abortStream() { // noted where the device buffers need to be zeroed to avoid // continuing sound, even when the device buffers are completely // disposed. So now, calling abort is the same as calling stop. - // AsioHandle *handle = (AsioHandle *) m_stream.apiHandle; + // AsioHandle *handle = (AsioHandle *) m_apiHandle; // handle->drainCounter = 2; return stopStream(); } @@ -683,25 +682,25 @@ static unsigned __stdcall asioStopStream(void *_ptr) { } bool airtaudio::api::Asio::callbackEvent(long bufferIndex) { - if ( m_stream.state == airtaudio::state_stopped - || m_stream.state == airtaudio::state_stopping) { + if ( m_state == airtaudio::state_stopped + || m_state == airtaudio::state_stopping) { return true; } - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("the stream is closed ... this shouldn't happen!"); return false; } - CallbackInfo *info = (CallbackInfo *) &m_stream.callbackInfo; - AsioHandle *handle = (AsioHandle *) m_stream.apiHandle; + CallbackInfo *info = (CallbackInfo *) &m_callbackInfo; + AsioHandle *handle = (AsioHandle *) m_apiHandle; // Check if we were draining the stream and signal if finished. if (handle->drainCounter > 3) { - m_stream.state = airtaudio::state_stopping; + m_state = airtaudio::state_stopping; if (handle->internalDrain == false) { SetEvent(handle->condition); } else { // spawn a thread to stop the stream unsigned threadId; - m_stream.callbackInfo.thread = _beginthreadex(nullptr, 0, &asioStopStream, - &m_stream.callbackInfo, 0, &threadId); + m_callbackInfo.thread = _beginthreadex(nullptr, 0, &asioStopStream, + &m_callbackInfo, 0, &threadId); } return true; } @@ -710,27 +709,27 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) { if (handle->drainCounter == 0) { double streamTime = getStreamTime(); rtaudio::streamStatus status = 0; - if (m_stream.mode != airtaudio::mode_input && asioXRun == true) { + if (m_mode != airtaudio::mode_input && asioXRun == true) { status |= RTAUDIO_airtaudio::status_underflow; asioXRun = false; } - if (m_stream.mode != airtaudio::mode_output && asioXRun == true) { + if (m_mode != airtaudio::mode_output && asioXRun == true) { status |= RTAUDIO_airtaudio::mode_input_OVERFLOW; asioXRun = false; } - int32_t cbReturnValue = info->callback(m_stream.userBuffer[0], - m_stream.userBuffer[1], - m_stream.bufferSize, + int32_t cbReturnValue = info->callback(m_userBuffer[0], + m_userBuffer[1], + m_bufferSize, streamTime, status); if (cbReturnValue == 2) { - m_stream.state = airtaudio::state_stopping; + m_state = airtaudio::state_stopping; handle->drainCounter = 2; unsigned threadId; - m_stream.callbackInfo.thread = _beginthreadex(nullptr, + m_callbackInfo.thread = _beginthreadex(nullptr, 0, &asioStopStream, - &m_stream.callbackInfo, + &m_callbackInfo, 0, &threadId); return true; @@ -740,40 +739,40 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) { } } uint32_t nChannels, bufferBytes, i, j; - nChannels = m_stream.nDeviceChannels[0] + m_stream.nDeviceChannels[1]; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { - bufferBytes = m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[0]); + nChannels = m_nDeviceChannels[0] + m_nDeviceChannels[1]; + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { + bufferBytes = m_bufferSize * audio::getFormatBytes(m_deviceFormat[0]); if (handle->drainCounter > 1) { // write zeros to the output stream for (i=0, j=0; ibufferInfos[i].isInput != ASIOTrue) { memset(handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes); } } - } else if (m_stream.doConvertBuffer[0]) { - convertBuffer(m_stream.deviceBuffer, m_stream.userBuffer[0], m_stream.convertInfo[0]); - if (m_stream.doByteSwap[0]) { - byteSwapBuffer(m_stream.deviceBuffer, - m_stream.bufferSize * m_stream.nDeviceChannels[0], - m_stream.deviceFormat[0]); + } else if (m_doConvertBuffer[0]) { + convertBuffer(m_deviceBuffer, m_userBuffer[0], m_convertInfo[0]); + if (m_doByteSwap[0]) { + byteSwapBuffer(m_deviceBuffer, + m_bufferSize * m_nDeviceChannels[0], + m_deviceFormat[0]); } for (i=0, j=0; ibufferInfos[i].isInput != ASIOTrue) { memcpy(handle->bufferInfos[i].buffers[bufferIndex], - &m_stream.deviceBuffer[j++*bufferBytes], + &m_deviceBuffer[j++*bufferBytes], bufferBytes); } } } else { - if (m_stream.doByteSwap[0]) { - byteSwapBuffer(m_stream.userBuffer[0], - m_stream.bufferSize * m_stream.nUserChannels[0], - m_stream.userFormat); + if (m_doByteSwap[0]) { + byteSwapBuffer(m_userBuffer[0], + m_bufferSize * m_nUserChannels[0], + m_userFormat); } for (i=0, j=0; ibufferInfos[i].isInput != ASIOTrue) { memcpy(handle->bufferInfos[i].buffers[bufferIndex], - &m_stream.userBuffer[0][bufferBytes*j++], + &m_userBuffer[0][bufferBytes*j++], bufferBytes); } } @@ -783,38 +782,38 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) { goto unlock; } } - if ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) { - bufferBytes = m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[1]); - if (m_stream.doConvertBuffer[1]) { + if ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) { + bufferBytes = m_bufferSize * audio::getFormatBytes(m_deviceFormat[1]); + if (m_doConvertBuffer[1]) { // Always interleave ASIO input data. for (i=0, j=0; ibufferInfos[i].isInput == ASIOTrue) { - memcpy(&m_stream.deviceBuffer[j++*bufferBytes], + memcpy(&m_deviceBuffer[j++*bufferBytes], handle->bufferInfos[i].buffers[bufferIndex], bufferBytes); } } - if (m_stream.doByteSwap[1]) { - byteSwapBuffer(m_stream.deviceBuffer, - m_stream.bufferSize * m_stream.nDeviceChannels[1], - m_stream.deviceFormat[1]); + if (m_doByteSwap[1]) { + byteSwapBuffer(m_deviceBuffer, + m_bufferSize * m_nDeviceChannels[1], + m_deviceFormat[1]); } - convertBuffer(m_stream.userBuffer[1], - m_stream.deviceBuffer, - m_stream.convertInfo[1]); + convertBuffer(m_userBuffer[1], + m_deviceBuffer, + m_convertInfo[1]); } else { for (i=0, j=0; ibufferInfos[i].isInput == ASIOTrue) { - memcpy(&m_stream.userBuffer[1][bufferBytes*j++], + memcpy(&m_userBuffer[1][bufferBytes*j++], handle->bufferInfos[i].buffers[bufferIndex], bufferBytes); } } - if (m_stream.doByteSwap[1]) { - byteSwapBuffer(m_stream.userBuffer[1], - m_stream.bufferSize * m_stream.nUserChannels[1], - m_stream.userFormat); + if (m_doByteSwap[1]) { + byteSwapBuffer(m_userBuffer[1], + m_bufferSize * m_nUserChannels[1], + m_userFormat); } } } diff --git a/airtaudio/api/Asio.h b/airtaudio/api/Asio.h index 350dc8a..e1ff933 100644 --- a/airtaudio/api/Asio.h +++ b/airtaudio/api/Asio.h @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #if !defined(__AIRTAUDIO_API_ASIO_H__) && defined(__WINDOWS_ASIO__) diff --git a/airtaudio/api/Core.cpp b/airtaudio/api/Core.cpp index ff233da..d110adb 100644 --- a/airtaudio/api/Core.cpp +++ b/airtaudio/api/Core.cpp @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ @@ -95,7 +94,7 @@ airtaudio::api::Core::~Core() { // The subclass destructor gets called before the base class // destructor, so close an existing stream before deallocating // apiDeviceId memory. - if (m_stream.state != airtaudio::state_closed) { + if (m_state != airtaudio::state_closed) { closeStream(); } } @@ -385,16 +384,15 @@ airtaudio::DeviceInfo airtaudio::api::Core::getDeviceInfo(uint32_t _device) { return info; } -static OSStatus callbackHandler(AudioDeviceID _inDevice, - const AudioTimeStamp* _inNow, - const AudioBufferList* _inInputData, - const AudioTimeStamp* _inInputTime, - AudioBufferList* _outOutputData, - const AudioTimeStamp* _inOutputTime, - void* _infoPointer) { - airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_infoPointer; - airtaudio::api::Core* object = (airtaudio::api::Core*)info->object; - if (object->callbackEvent(_inDevice, _inInputData, _outOutputData) == false) { +OSStatus airtaudio::api::Core::callbackEvent(AudioDeviceID _inDevice, + const AudioTimeStamp* _inNow, + const AudioBufferList* _inInputData, + const AudioTimeStamp* _inInputTime, + AudioBufferList* _outOutputData, + const AudioTimeStamp* _inOutputTime, + void* _userData) { + airtaudio::api::Core* myClass = reinterpret_cast(_userData); + if (myClass->callbackEvent(_inDevice, _inInputData, _outOutputData) == false) { return kAudioHardwareUnspecifiedError; } else { return kAudioHardwareNoError; @@ -598,14 +596,14 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, // If attempting to setup a duplex stream, the bufferSize parameter // MUST be the same in both directions! *_bufferSize = theSize; - if ( m_stream.mode == airtaudio::mode_output + if ( m_mode == airtaudio::mode_output && _mode == airtaudio::mode_input - && *_bufferSize != m_stream.bufferSize) { + && *_bufferSize != m_bufferSize) { ATA_ERROR("system error setting buffer size for duplex stream on device (" << _device << ")."); return false; } - m_stream.bufferSize = *_bufferSize; - m_stream.nBuffers = 1; + m_bufferSize = *_bufferSize; + m_nBuffers = 1; // Try to set "hog" mode ... it's not clear to me this is working. if ( _options != nullptr && _options->flags & HOG_DEVICE) { @@ -763,7 +761,7 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, if (AudioObjectHasProperty(id, &property) == true) { result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &latency); if (result == kAudioHardwareNoError) { - m_stream.latency[ _mode ] = latency; + m_latency[ _mode ] = latency; } else { ATA_ERROR("system error (" << getErrorCode(result) << ") getting device latency for device (" << _device << ")."); return false; @@ -772,75 +770,75 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, // Byte-swapping: According to AudioHardware.h, the stream data will // always be presented in native-endian format, so we should never // need to byte swap. - m_stream.doByteSwap[modeToIdTable(_mode)] = false; + m_doByteSwap[modeToIdTable(_mode)] = false; // From the CoreAudio documentation, PCM data must be supplied as // 32-bit floats. - m_stream.userFormat = _format; - m_stream.deviceFormat[modeToIdTable(_mode)] = FLOAT32; + m_userFormat = _format; + m_deviceFormat[modeToIdTable(_mode)] = FLOAT32; if (streamCount == 1) { - m_stream.nDeviceChannels[modeToIdTable(_mode)] = description.mChannelsPerFrame; + m_nDeviceChannels[modeToIdTable(_mode)] = description.mChannelsPerFrame; } else { // multiple streams - m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels; + m_nDeviceChannels[modeToIdTable(_mode)] = _channels; } - m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; - m_stream.channelOffset[modeToIdTable(_mode)] = channelOffset; // offset within a CoreAudio stream - m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; + m_nUserChannels[modeToIdTable(_mode)] = _channels; + m_channelOffset[modeToIdTable(_mode)] = channelOffset; // offset within a CoreAudio stream + m_deviceInterleaved[modeToIdTable(_mode)] = true; if (monoMode == true) { - m_stream.deviceInterleaved[modeToIdTable(_mode)] = false; + m_deviceInterleaved[modeToIdTable(_mode)] = false; } // Set flags for buffer conversion. - m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; - if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + m_doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } if (streamCount == 1) { - if ( m_stream.nUserChannels[modeToIdTable(_mode)] > 1 - && m_stream.deviceInterleaved[modeToIdTable(_mode)] == false) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if ( m_nUserChannels[modeToIdTable(_mode)] > 1 + && m_deviceInterleaved[modeToIdTable(_mode)] == false) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } } else if (monoMode) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + m_doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate our CoreHandle structure for the stream. CoreHandle *handle = 0; - if (m_stream.apiHandle == 0) { + if (m_apiHandle == 0) { handle = new CoreHandle; if (handle == nullptr) { ATA_ERROR("error allocating CoreHandle memory."); return false; } - m_stream.apiHandle = (void *) handle; + m_apiHandle = (void *) handle; } else { - handle = (CoreHandle *) m_stream.apiHandle; + handle = (CoreHandle *) m_apiHandle; } handle->iStream[modeToIdTable(_mode)] = firstStream; handle->nStreams[modeToIdTable(_mode)] = streamCount; handle->id[modeToIdTable(_mode)] = id; // Allocate necessary internal buffers. uint64_t bufferBytes; - bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat); - // m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); - m_stream.userBuffer[modeToIdTable(_mode)] = (char *) malloc(bufferBytes * sizeof(char)); - memset(m_stream.userBuffer[modeToIdTable(_mode)], 0, bufferBytes * sizeof(char)); - if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { + bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat); + // m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + m_userBuffer[modeToIdTable(_mode)] = (char *) malloc(bufferBytes * sizeof(char)); + memset(m_userBuffer[modeToIdTable(_mode)], 0, bufferBytes * sizeof(char)); + if (m_userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); goto error; } // If possible, we will make use of the CoreAudio stream buffers as // "device buffers". However, we can't do this if using multiple // streams. - if ( m_stream.doConvertBuffer[modeToIdTable(_mode)] + if ( m_doConvertBuffer[modeToIdTable(_mode)] && handle->nStreams[modeToIdTable(_mode)] > 1) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]); if (_mode == airtaudio::mode_input) { - if ( m_stream.mode == airtaudio::mode_output - && m_stream.deviceBuffer) { - uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]); + if ( m_mode == airtaudio::mode_output + && m_deviceBuffer) { + uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]); if (bufferBytes <= bytesOut) { makeBuffer = false; } @@ -848,23 +846,23 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, } if (makeBuffer) { bufferBytes *= *_bufferSize; - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = nullptr; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = nullptr; } - m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1); - if (m_stream.deviceBuffer == nullptr) { + m_deviceBuffer = (char *) calloc(bufferBytes, 1); + if (m_deviceBuffer == nullptr) { ATA_ERROR("error allocating device buffer memory."); goto error; } } } - m_stream.sampleRate = _sampleRate; - m_stream.device[modeToIdTable(_mode)] = _device; - m_stream.state = airtaudio::state_stopped; - m_stream.callbackInfo.object = (void *) this; + m_sampleRate = _sampleRate; + m_device[modeToIdTable(_mode)] = _device; + m_state = airtaudio::state_stopped; + m_callbackInfo.object = (void *) this; // Setup the buffer conversion information structure. - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { if (streamCount > 1) { setConvertInfo(_mode, 0); } else { @@ -872,26 +870,26 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, } } if ( _mode == airtaudio::mode_input - && m_stream.mode == airtaudio::mode_output - && m_stream.device[0] == _device) { + && m_mode == airtaudio::mode_output + && m_device[0] == _device) { // Only one callback procedure per device. - m_stream.mode = airtaudio::mode_duplex; + m_mode = airtaudio::mode_duplex; } else { #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) - result = AudioDeviceCreateIOProcID(id, callbackHandler, (void *) &m_stream.callbackInfo, &handle->procId[modeToIdTable(_mode)]); + result = AudioDeviceCreateIOProcID(id, &airtaudio::api::Core::callbackEvent, (void *) &m_callbackInfo, &handle->procId[modeToIdTable(_mode)]); #else // deprecated in favor of AudioDeviceCreateIOProcID() - result = AudioDeviceAddIOProc(id, callbackHandler, (void *) &m_stream.callbackInfo); + result = AudioDeviceAddIOProc(id, &airtaudio::api::Core::callbackEvent, (void *) &m_callbackInfo); #endif if (result != noErr) { ATA_ERROR("system error setting callback for device (" << _device << ")."); goto error; } - if ( m_stream.mode == airtaudio::mode_output + if ( m_mode == airtaudio::mode_output && _mode == airtaudio::mode_input) { - m_stream.mode = airtaudio::mode_duplex; + m_mode = airtaudio::mode_duplex; } else { - m_stream.mode = _mode; + m_mode = _mode; } } // Setup the device property listener for over/underload. @@ -901,67 +899,67 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, error: if (handle) { delete handle; - m_stream.apiHandle = 0; + m_apiHandle = 0; } for (int32_t i=0; i<2; i++) { - if (m_stream.userBuffer[i]) { - free(m_stream.userBuffer[i]); - m_stream.userBuffer[i] = 0; + if (m_userBuffer[i]) { + free(m_userBuffer[i]); + m_userBuffer[i] = 0; } } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = 0; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = 0; } - m_stream.state = airtaudio::state_closed; + m_state = airtaudio::state_closed; return false; } enum airtaudio::error airtaudio::api::Core::closeStream() { - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); return airtaudio::error_warning; } - CoreHandle *handle = (CoreHandle *) m_stream.apiHandle; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { - if (m_stream.state == airtaudio::state_running) { - AudioDeviceStop(handle->id[0], callbackHandler); + CoreHandle *handle = (CoreHandle *) m_apiHandle; + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { + if (m_state == airtaudio::state_running) { + AudioDeviceStop(handle->id[0], &airtaudio::api::Core::callbackEvent); } #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) AudioDeviceDestroyIOProcID(handle->id[0], handle->procId[0]); #else // deprecated in favor of AudioDeviceDestroyIOProcID() - AudioDeviceRemoveIOProc(handle->id[0], callbackHandler); + AudioDeviceRemoveIOProc(handle->id[0], &airtaudio::api::Core::callbackEvent); #endif } - if ( m_stream.mode == airtaudio::mode_input - || ( m_stream.mode == airtaudio::mode_duplex - && m_stream.device[0] != m_stream.device[1])) { - if (m_stream.state == airtaudio::state_running) { - AudioDeviceStop(handle->id[1], callbackHandler); + if ( m_mode == airtaudio::mode_input + || ( m_mode == airtaudio::mode_duplex + && m_device[0] != m_device[1])) { + if (m_state == airtaudio::state_running) { + AudioDeviceStop(handle->id[1], &airtaudio::api::Core::callbackEvent); } #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) AudioDeviceDestroyIOProcID(handle->id[1], handle->procId[1]); #else // deprecated in favor of AudioDeviceDestroyIOProcID() - AudioDeviceRemoveIOProc(handle->id[1], callbackHandler); + AudioDeviceRemoveIOProc(handle->id[1], &airtaudio::api::Core::callbackEvent); #endif } for (int32_t i=0; i<2; i++) { - if (m_stream.userBuffer[i]) { - free(m_stream.userBuffer[i]); - m_stream.userBuffer[i] = nullptr; + if (m_userBuffer[i]) { + free(m_userBuffer[i]); + m_userBuffer[i] = nullptr; } } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = nullptr; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = nullptr; } delete handle; - m_stream.apiHandle = 0; - m_stream.mode = airtaudio::mode_unknow; - m_stream.state = airtaudio::state_closed; + m_apiHandle = 0; + m_mode = airtaudio::mode_unknow; + m_state = airtaudio::state_closed; return airtaudio::error_none; } @@ -969,32 +967,32 @@ enum airtaudio::error airtaudio::api::Core::startStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_running) { + if (m_state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); return airtaudio::error_warning; } OSStatus result = noErr; - CoreHandle *handle = (CoreHandle *) m_stream.apiHandle; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { - result = AudioDeviceStart(handle->id[0], callbackHandler); + CoreHandle *handle = (CoreHandle *) m_apiHandle; + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { + result = AudioDeviceStart(handle->id[0], &airtaudio::api::Core::callbackEvent); if (result != noErr) { - ATA_ERROR("system error (" << getErrorCode(result) << ") starting callback procedure on device (" << m_stream.device[0] << ")."); + ATA_ERROR("system error (" << getErrorCode(result) << ") starting callback procedure on device (" << m_device[0] << ")."); goto unlock; } } - if ( m_stream.mode == airtaudio::mode_input - || ( m_stream.mode == airtaudio::mode_duplex - && m_stream.device[0] != m_stream.device[1])) { - result = AudioDeviceStart(handle->id[1], callbackHandler); + if ( m_mode == airtaudio::mode_input + || ( m_mode == airtaudio::mode_duplex + && m_device[0] != m_device[1])) { + result = AudioDeviceStart(handle->id[1], &airtaudio::api::Core::callbackEvent); if (result != noErr) { - ATA_ERROR("system error starting input callback procedure on device (" << m_stream.device[1] << ")."); + ATA_ERROR("system error starting input callback procedure on device (" << m_device[1] << ")."); goto unlock; } } handle->drainCounter = 0; handle->internalDrain = false; - m_stream.state = airtaudio::state_running; + m_state = airtaudio::state_running; unlock: if (result == noErr) { return airtaudio::error_none; @@ -1006,35 +1004,35 @@ enum airtaudio::error airtaudio::api::Core::stopStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } OSStatus result = noErr; - CoreHandle *handle = (CoreHandle *) m_stream.apiHandle; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + CoreHandle *handle = (CoreHandle *) m_apiHandle; + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { if (handle->drainCounter == 0) { - std::unique_lock lck(m_stream.mutex); + std::unique_lock lck(m_mutex); handle->drainCounter = 2; handle->condition.wait(lck); } - result = AudioDeviceStop(handle->id[0], callbackHandler); + result = AudioDeviceStop(handle->id[0], &airtaudio::api::Core::callbackEvent); if (result != noErr) { - ATA_ERROR("system error (" << getErrorCode(result) << ") stopping callback procedure on device (" << m_stream.device[0] << ")."); + ATA_ERROR("system error (" << getErrorCode(result) << ") stopping callback procedure on device (" << m_device[0] << ")."); goto unlock; } } - if ( m_stream.mode == airtaudio::mode_input - || ( m_stream.mode == airtaudio::mode_duplex - && m_stream.device[0] != m_stream.device[1])) { - result = AudioDeviceStop(handle->id[1], callbackHandler); + if ( m_mode == airtaudio::mode_input + || ( m_mode == airtaudio::mode_duplex + && m_device[0] != m_device[1])) { + result = AudioDeviceStop(handle->id[1], &airtaudio::api::Core::callbackEvent); if (result != noErr) { - ATA_ERROR("system error (" << getErrorCode(result) << ") stopping input callback procedure on device (" << m_stream.device[1] << ")."); + ATA_ERROR("system error (" << getErrorCode(result) << ") stopping input callback procedure on device (" << m_device[1] << ")."); goto unlock; } } - m_stream.state = airtaudio::state_stopped; + m_state = airtaudio::state_stopped; unlock: if (result == noErr) { return airtaudio::error_none; @@ -1046,11 +1044,11 @@ enum airtaudio::error airtaudio::api::Core::abortStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } - CoreHandle* handle = (CoreHandle*)m_stream.apiHandle; + CoreHandle* handle = (CoreHandle*)m_apiHandle; handle->drainCounter = 2; return stopStream(); } @@ -1060,30 +1058,29 @@ enum airtaudio::error airtaudio::api::Core::abortStream() { // aborted. It is better to handle it this way because the // callbackEvent() function probably should return before the AudioDeviceStop() // function is called. -static void coreStopStream(void *_ptr) { - airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_ptr; - airtaudio::api::Core* object = (airtaudio::api::Core*)info->object; - object->stopStream(); +void airtaudio::api::Core::coreStopStream(void *_userData) { + airtaudio::api::Core* myClass = reinterpret_cast(_userData); + myClass->stopStream(); } bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, const AudioBufferList *_inBufferList, const AudioBufferList *_outBufferList) { - if ( m_stream.state == airtaudio::state_stopped - || m_stream.state == airtaudio::state_stopping) { + if ( m_state == airtaudio::state_stopped + || m_state == airtaudio::state_stopping) { return true; } - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("the stream is closed ... this shouldn't happen!"); return false; } - CallbackInfo *info = (CallbackInfo *) &m_stream.callbackInfo; - CoreHandle *handle = (CoreHandle *) m_stream.apiHandle; + CallbackInfo *info = (CallbackInfo *) &m_callbackInfo; + CoreHandle *handle = (CoreHandle *) m_apiHandle; // Check if we were draining the stream and signal is finished. if (handle->drainCounter > 3) { - m_stream.state = airtaudio::state_stopping; + m_state = airtaudio::state_stopping; if (handle->internalDrain == true) { - new std::thread(coreStopStream, info); + new std::thread(&airtaudio::api::Core::coreStopStream, this); } else { // external call to stopStream() handle->condition.notify_one(); @@ -1094,26 +1091,26 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, // Invoke user callback to get fresh output data UNLESS we are // draining stream or duplex mode AND the input/output devices are // different AND this function is called for the input device. - if (handle->drainCounter == 0 && (m_stream.mode != airtaudio::mode_duplex || _deviceId == outputDevice)) { + if (handle->drainCounter == 0 && (m_mode != airtaudio::mode_duplex || _deviceId == outputDevice)) { double streamTime = getStreamTime(); enum airtaudio::status status = airtaudio::status_ok; - if ( m_stream.mode != airtaudio::mode_input + if ( m_mode != airtaudio::mode_input && handle->xrun[0] == true) { status |= airtaudio::status_underflow; handle->xrun[0] = false; } - if ( m_stream.mode != airtaudio::mode_output + if ( m_mode != airtaudio::mode_output && handle->xrun[1] == true) { status |= airtaudio::mode_input_OVERFLOW; handle->xrun[1] = false; } - int32_t cbReturnValue = info->callback(m_stream.userBuffer[0], - m_stream.userBuffer[1], - m_stream.bufferSize, + int32_t cbReturnValue = info->callback(m_userBuffer[0], + m_userBuffer[1], + m_bufferSize, streamTime, status); if (cbReturnValue == 2) { - m_stream.state = airtaudio::state_stopping; + m_state = airtaudio::state_stopping; handle->drainCounter = 2; abortStream(); return true; @@ -1122,8 +1119,8 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, handle->internalDrain = true; } } - if ( m_stream.mode == airtaudio::mode_output - || ( m_stream.mode == airtaudio::mode_duplex + if ( m_mode == airtaudio::mode_output + || ( m_mode == airtaudio::mode_duplex && _deviceId == outputDevice)) { if (handle->drainCounter > 1) { // write zeros to the output stream @@ -1140,29 +1137,29 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, } } } else if (handle->nStreams[0] == 1) { - if (m_stream.doConvertBuffer[0]) { + if (m_doConvertBuffer[0]) { // convert directly to CoreAudio stream buffer convertBuffer((char*)_outBufferList->mBuffers[handle->iStream[0]].mData, - m_stream.userBuffer[0], - m_stream.convertInfo[0]); + m_userBuffer[0], + m_convertInfo[0]); } else { // copy from user buffer memcpy(_outBufferList->mBuffers[handle->iStream[0]].mData, - m_stream.userBuffer[0], + m_userBuffer[0], _outBufferList->mBuffers[handle->iStream[0]].mDataByteSize); } } else { // fill multiple streams - float *inBuffer = (float *) m_stream.userBuffer[0]; - if (m_stream.doConvertBuffer[0]) { - convertBuffer(m_stream.deviceBuffer, m_stream.userBuffer[0], m_stream.convertInfo[0]); - inBuffer = (float *) m_stream.deviceBuffer; + float *inBuffer = (float *) m_userBuffer[0]; + if (m_doConvertBuffer[0]) { + convertBuffer(m_deviceBuffer, m_userBuffer[0], m_convertInfo[0]); + inBuffer = (float *) m_deviceBuffer; } - if (m_stream.deviceInterleaved[0] == false) { // mono mode + if (m_deviceInterleaved[0] == false) { // mono mode uint32_t bufferBytes = _outBufferList->mBuffers[handle->iStream[0]].mDataByteSize; - for (uint32_t i=0; imBuffers[handle->iStream[0]+i].mData, - (void *)&inBuffer[i*m_stream.bufferSize], + (void *)&inBuffer[i*m_bufferSize], bufferBytes); } } else { @@ -1170,15 +1167,15 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, uint32_t streamChannels, channelsLeft, inJump, outJump, inOffset; float *out, *in; bool inInterleaved = true; - uint32_t inChannels = m_stream.nUserChannels[0]; - if (m_stream.doConvertBuffer[0]) { + uint32_t inChannels = m_nUserChannels[0]; + if (m_doConvertBuffer[0]) { inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode - inChannels = m_stream.nDeviceChannels[0]; + inChannels = m_nDeviceChannels[0]; } if (inInterleaved) { inOffset = 1; } else { - inOffset = m_stream.bufferSize; + inOffset = m_bufferSize; } channelsLeft = inChannels; for (uint32_t i=0; inStreams[0]; i++) { @@ -1187,9 +1184,9 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, streamChannels = _outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels; outJump = 0; // Account for possible channel offset in first stream - if (i == 0 && m_stream.channelOffset[0] > 0) { - streamChannels -= m_stream.channelOffset[0]; - outJump = m_stream.channelOffset[0]; + if (i == 0 && m_channelOffset[0] > 0) { + streamChannels -= m_channelOffset[0]; + outJump = m_channelOffset[0]; out += outJump; } // Account for possible unfilled channels at end of the last stream @@ -1205,7 +1202,7 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, inJump = 1; in += (inChannels - channelsLeft) * inOffset; } - for (uint32_t i=0; iid[1]; - if ( m_stream.mode == airtaudio::mode_input - || ( m_stream.mode == airtaudio::mode_duplex + if ( m_mode == airtaudio::mode_input + || ( m_mode == airtaudio::mode_duplex && _deviceId == inputDevice)) { if (handle->nStreams[1] == 1) { - if (m_stream.doConvertBuffer[1]) { + if (m_doConvertBuffer[1]) { // convert directly from CoreAudio stream buffer - convertBuffer(m_stream.userBuffer[1], + convertBuffer(m_userBuffer[1], (char *) _inBufferList->mBuffers[handle->iStream[1]].mData, - m_stream.convertInfo[1]); + m_convertInfo[1]); } else { // copy to user buffer - memcpy(m_stream.userBuffer[1], + memcpy(m_userBuffer[1], _inBufferList->mBuffers[handle->iStream[1]].mData, _inBufferList->mBuffers[handle->iStream[1]].mDataByteSize); } } else { // read from multiple streams - float *outBuffer = (float *) m_stream.userBuffer[1]; - if (m_stream.doConvertBuffer[1]) { - outBuffer = (float *) m_stream.deviceBuffer; + float *outBuffer = (float *) m_userBuffer[1]; + if (m_doConvertBuffer[1]) { + outBuffer = (float *) m_deviceBuffer; } - if (m_stream.deviceInterleaved[1] == false) { + if (m_deviceInterleaved[1] == false) { // mono mode uint32_t bufferBytes = _inBufferList->mBuffers[handle->iStream[1]].mDataByteSize; - for (uint32_t i=0; imBuffers[handle->iStream[1]+i].mData, bufferBytes); } @@ -1255,15 +1252,15 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, uint32_t streamChannels, channelsLeft, inJump, outJump, outOffset; float *out, *in; bool outInterleaved = true; - uint32_t outChannels = m_stream.nUserChannels[1]; - if (m_stream.doConvertBuffer[1]) { + uint32_t outChannels = m_nUserChannels[1]; + if (m_doConvertBuffer[1]) { outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode - outChannels = m_stream.nDeviceChannels[1]; + outChannels = m_nDeviceChannels[1]; } if (outInterleaved) { outOffset = 1; } else { - outOffset = m_stream.bufferSize; + outOffset = m_bufferSize; } channelsLeft = outChannels; for (uint32_t i=0; inStreams[1]; i++) { @@ -1272,9 +1269,9 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, streamChannels = _inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels; inJump = 0; // Account for possible channel offset in first stream - if (i == 0 && m_stream.channelOffset[1] > 0) { - streamChannels -= m_stream.channelOffset[1]; - inJump = m_stream.channelOffset[1]; + if (i == 0 && m_channelOffset[1] > 0) { + streamChannels -= m_channelOffset[1]; + inJump = m_channelOffset[1]; in += inJump; } // Account for possible unread channels at end of the last stream @@ -1290,7 +1287,7 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, outJump = 1; out += (outChannels - channelsLeft) * outOffset; } - for (uint32_t i=0; i(_userData); // get all requested buffer : - for (int32_t iii=0; iii < ioData->mNumberBuffers; iii++) { - AudioBuffer buffer = ioData->mBuffers[iii]; - int32_t numberFrame = buffer.mDataByteSize/2/*stereo*/ /sizeof(int16_t); - ATA_VERBOSE("request data size: " << numberFrame << " busNumber=" << inBusNumber); + for (int32_t iii=0; iii < _ioData->mNumberBuffers; iii++) { + AudioBuffer buffer = _ioData->mBuffers[iii]; + int32_t numberFrame = buffer.mDataByteSize/2/*stereo*/ /sizeof(int16_t); + ATA_VERBOSE("request data size: " << numberFrame << " busNumber=" << _inBusNumber); myClass->callBackEvent(buffer.mData, numberFrame); } - return noErr; + return noErr; } @@ -187,40 +188,40 @@ bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device, bool ret = true; // configure Airtaudio internal configuration: - m_stream.userFormat = _format; - m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; - m_stream.bufferSize = 8192; - m_stream.sampleRate = _sampleRate; - m_stream.doByteSwap[modeToIdTable(_mode)] = false; // for endienness ... + m_userFormat = _format; + m_nUserChannels[modeToIdTable(_mode)] = _channels; + m_bufferSize = 8192; + m_sampleRate = _sampleRate; + m_doByteSwap[modeToIdTable(_mode)] = false; // for endienness ... // TODO : For now, we write it in hard ==> to be update later ... - m_stream.deviceFormat[modeToIdTable(_mode)] = SINT16; - m_stream.nDeviceChannels[modeToIdTable(_mode)] = 2; - m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; + m_deviceFormat[modeToIdTable(_mode)] = SINT16; + m_nDeviceChannels[modeToIdTable(_mode)] = 2; + m_deviceInterleaved[modeToIdTable(_mode)] = true; - m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; - if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + m_doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false - && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if ( m_deviceInterleaved[modeToIdTable(_mode)] == false + && m_nUserChannels[modeToIdTable(_mode)] > 1) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) { + if (m_doConvertBuffer[modeToIdTable(_mode)] == true) { // Allocate necessary internal buffers. - uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat); - m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { + uint64_t bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat); + m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); } setConvertInfo(_mode, _firstChannel); } - ATA_INFO("device format : " << m_stream.deviceFormat[modeToIdTable(_mode)] << " user format : " << m_stream.userFormat); - ATA_INFO("device channels : " << m_stream.nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_stream.nUserChannels[modeToIdTable(_mode)]); - ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[modeToIdTable(_mode)]); + ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(_mode)] << " user format : " << m_userFormat); + ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(_mode)]); + ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(_mode)]); if (ret == false) { ATA_ERROR("Can not open device."); } @@ -282,7 +283,7 @@ bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device, // Set output callback AURenderCallbackStruct callbackStruct; - callbackStruct.inputProc = playbackCallback; + callbackStruct.inputProc = &airtaudio::api::CoreIos::playbackCallback; callbackStruct.inputProcRefCon = this; status = AudioUnitSetProperty(m_private->audioUnit, kAudioUnitProperty_SetRenderCallback, diff --git a/airtaudio/api/Ds.cpp b/airtaudio/api/Ds.cpp index e0ec518..281dee3 100644 --- a/airtaudio/api/Ds.cpp +++ b/airtaudio/api/Ds.cpp @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ // Windows DirectSound API @@ -126,7 +125,7 @@ airtaudio::api::Ds::~Ds() { if (m_coInitialized) { CoUninitialize(); // balanced call. } - if (m_stream.state != airtaudio::state_closed) { + if (m_state != airtaudio::state_closed) { closeStream(); } } @@ -497,12 +496,12 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, && !( _format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT)) { waveFormat.wBitsPerSample = 16; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; } else { waveFormat.wBitsPerSample = 8; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; } - m_stream.userFormat = _format; + m_userFormat = _format; // Update wave format structure and buffer information. waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8; waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign; @@ -628,23 +627,23 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08; if (format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats) { waveFormat.wBitsPerSample = 8; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; } else { // assume 16-bit is supported waveFormat.wBitsPerSample = 16; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; } } else { // channel == 1 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08; if (format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats) { waveFormat.wBitsPerSample = 8; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; } else { // assume 16-bit is supported waveFormat.wBitsPerSample = 16; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; } } - m_stream.userFormat = _format; + m_userFormat = _format; // Update wave format structure and buffer information. waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8; waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign; @@ -709,36 +708,36 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, } // Set various stream parameters DsHandle *handle = 0; - m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel; - m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; - m_stream.bufferSize = *_bufferSize; - m_stream.channelOffset[modeToIdTable(_mode)] = _firstChannel; - m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; + m_nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel; + m_nUserChannels[modeToIdTable(_mode)] = _channels; + m_bufferSize = *_bufferSize; + m_channelOffset[modeToIdTable(_mode)] = _firstChannel; + m_deviceInterleaved[modeToIdTable(_mode)] = true; // Set flag for buffer conversion - m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; - if (m_stream.nUserChannels[modeToIdTable(_mode)] != m_stream.nDeviceChannels[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + m_doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_nUserChannels[modeToIdTable(_mode)] != m_nDeviceChannels[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false - && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if ( m_deviceInterleaved[modeToIdTable(_mode)] == false + && m_nUserChannels[modeToIdTable(_mode)] > 1) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate necessary internal buffers - long bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat); - m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { + long bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat); + m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); goto error; } - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]); if (_mode == airtaudio::mode_input) { - if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { - uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]); + if (m_mode == airtaudio::mode_output && m_deviceBuffer) { + uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]); if (bufferBytes <= (long) bytesOut) { makeBuffer = false; } @@ -746,18 +745,18 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, } if (makeBuffer) { bufferBytes *= *_bufferSize; - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); + if (m_deviceBuffer) { + free(m_deviceBuffer); } - m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1); - if (m_stream.deviceBuffer == nullptr) { + m_deviceBuffer = (char *) calloc(bufferBytes, 1); + if (m_deviceBuffer == nullptr) { ATA_ERROR("error allocating device buffer memory."); goto error; } } } // Allocate our DsHandle structures for the stream. - if (m_stream.apiHandle == 0) { + if (m_apiHandle == 0) { handle = new DsHandle; if (handle == nullptr) { ATA_ERROR("error allocating AsioHandle memory."); @@ -768,46 +767,46 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, TRUE, // manual-reset FALSE, // non-signaled initially nullptr); // unnamed - m_stream.apiHandle = (void *) handle; + m_apiHandle = (void *) handle; } else { - handle = (DsHandle *) m_stream.apiHandle; + handle = (DsHandle *) m_apiHandle; } handle->id[modeToIdTable(_mode)] = ohandle; handle->buffer[modeToIdTable(_mode)] = bhandle; handle->dsBufferSize[modeToIdTable(_mode)] = dsBufferSize; handle->dsPointerLeadTime[modeToIdTable(_mode)] = dsPointerLeadTime; - m_stream.device[modeToIdTable(_mode)] = _device; - m_stream.state = airtaudio::state_stopped; - if ( m_stream.mode == airtaudio::mode_output + m_device[modeToIdTable(_mode)] = _device; + m_state = airtaudio::state_stopped; + if ( m_mode == airtaudio::mode_output && _mode == airtaudio::mode_input) { // We had already set up an output stream. - m_stream.mode = airtaudio::mode_duplex; + m_mode = airtaudio::mode_duplex; } else { - m_stream.mode = _mode; + m_mode = _mode; } - m_stream.nBuffers = nBuffers; - m_stream.sampleRate = _sampleRate; + m_nBuffers = nBuffers; + m_sampleRate = _sampleRate; // Setup the buffer conversion information structure. - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, _firstChannel); } // Setup the callback thread. - if (m_stream.callbackInfo.isRunning == false) { + if (m_callbackInfo.isRunning == false) { unsigned threadId; - m_stream.callbackInfo.isRunning = true; - m_stream.callbackInfo.object = (void *) this; - m_stream.callbackInfo.thread = _beginthreadex(nullptr, + m_callbackInfo.isRunning = true; + m_callbackInfo.object = (void *) this; + m_callbackInfo.thread = _beginthreadex(nullptr, 0, &callbackHandler, - &m_stream.callbackInfo, + &m_callbackInfo, 0, &threadId); - if (m_stream.callbackInfo.thread == 0) { + if (m_callbackInfo.thread == 0) { ATA_ERROR("error creating callback thread!"); goto error; } // Boost DS thread priority - SetThreadPriority((HANDLE)m_stream.callbackInfo.thread, THREAD_PRIORITY_HIGHEST); + SetThreadPriority((HANDLE)m_callbackInfo.thread, THREAD_PRIORITY_HIGHEST); } return true; error: @@ -830,32 +829,32 @@ error: } CloseHandle(handle->condition); delete handle; - m_stream.apiHandle = 0; + m_apiHandle = 0; } for (int32_t i=0; i<2; i++) { - if (m_stream.userBuffer[i]) { - free(m_stream.userBuffer[i]); - m_stream.userBuffer[i] = 0; + if (m_userBuffer[i]) { + free(m_userBuffer[i]); + m_userBuffer[i] = 0; } } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = 0; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = 0; } - m_stream.state = airtaudio::state_closed; + m_state = airtaudio::state_closed; return false; } enum airtaudio::error airtaudio::api::Ds::closeStream() { - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); return airtaudio::error_warning; } // Stop the callback thread. - m_stream.callbackInfo.isRunning = false; - WaitForSingleObject((HANDLE) m_stream.callbackInfo.thread, INFINITE); - CloseHandle((HANDLE) m_stream.callbackInfo.thread); - DsHandle *handle = (DsHandle *) m_stream.apiHandle; + m_callbackInfo.isRunning = false; + WaitForSingleObject((HANDLE) m_callbackInfo.thread, INFINITE); + CloseHandle((HANDLE) m_callbackInfo.thread); + DsHandle *handle = (DsHandle *) m_apiHandle; if (handle) { if (handle->buffer[0]) { // the object pointer can be nullptr and valid LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0]; @@ -877,44 +876,44 @@ enum airtaudio::error airtaudio::api::Ds::closeStream() { } CloseHandle(handle->condition); delete handle; - m_stream.apiHandle = 0; + m_apiHandle = 0; } for (int32_t i=0; i<2; i++) { - if (m_stream.userBuffer[i]) { - free(m_stream.userBuffer[i]); - m_stream.userBuffer[i] = 0; + if (m_userBuffer[i]) { + free(m_userBuffer[i]); + m_userBuffer[i] = 0; } } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = 0; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = 0; } - m_stream.mode = airtaudio::mode_unknow; - m_stream.state = airtaudio::state_closed; + m_mode = airtaudio::mode_unknow; + m_state = airtaudio::state_closed; } enum airtaudio::error airtaudio::api::Ds::startStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_running) { + if (m_state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); return airtaudio::error_warning; } - DsHandle *handle = (DsHandle *) m_stream.apiHandle; + DsHandle *handle = (DsHandle *) m_apiHandle; // Increase scheduler frequency on lesser windows (a side-effect of // increasing timer accuracy). On greater windows (Win2K or later), // this is already in effect. timeBeginPeriod(1); m_buffersRolling = false; m_duplexPrerollBytes = 0; - if (m_stream.mode == airtaudio::mode_duplex) { + if (m_mode == airtaudio::mode_duplex) { // 0.5 seconds of silence in airtaudio::mode_duplex mode while the devices spin up and synchronize. - m_duplexPrerollBytes = (int) (0.5 * m_stream.sampleRate * audio::getFormatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1]); + m_duplexPrerollBytes = (int) (0.5 * m_sampleRate * audio::getFormatBytes(m_deviceFormat[1]) * m_nDeviceChannels[1]); } HRESULT result = 0; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; result = buffer->Play(0, 0, DSBPLAY_LOOPING); if (FAILED(result)) { @@ -922,8 +921,8 @@ enum airtaudio::error airtaudio::api::Ds::startStream() { goto unlock; } } - if ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) { LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; result = buffer->Start(DSCBSTART_LOOPING); if (FAILED(result)) { @@ -934,7 +933,7 @@ enum airtaudio::error airtaudio::api::Ds::startStream() { handle->drainCounter = 0; handle->internalDrain = false; ResetEvent(handle->condition); - m_stream.state = airtaudio::state_running; + m_state = airtaudio::state_running; unlock: if (FAILED(result)) { return airtaudio::error_systemError; @@ -946,21 +945,21 @@ enum airtaudio::error airtaudio::api::Ds::stopStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } HRESULT result = 0; LPVOID audioPtr; DWORD dataLen; - DsHandle *handle = (DsHandle *) m_stream.apiHandle; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + DsHandle *handle = (DsHandle *) m_apiHandle; + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { if (handle->drainCounter == 0) { handle->drainCounter = 2; WaitForSingleObject(handle->condition, INFINITE); // block until signaled } - m_stream.state = airtaudio::state_stopped; + m_state = airtaudio::state_stopped; // Stop the buffer and clear memory LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; result = buffer->Stop(); @@ -986,12 +985,12 @@ enum airtaudio::error airtaudio::api::Ds::stopStream() { // If we start playing again, we must begin at beginning of buffer. handle->bufferPointer[0] = 0; } - if ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) { LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; audioPtr = nullptr; dataLen = 0; - m_stream.state = airtaudio::state_stopped; + m_state = airtaudio::state_stopped; result = buffer->Stop(); if (FAILED(result)) { ATA_ERROR("error (" << getErrorString(result) << ") stopping input buffer!"); @@ -1027,29 +1026,29 @@ enum airtaudio::error airtaudio::api::Ds::abortStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } - DsHandle *handle = (DsHandle *) m_stream.apiHandle; + DsHandle *handle = (DsHandle *) m_apiHandle; handle->drainCounter = 2; return stopStream(); } void airtaudio::api::Ds::callbackEvent() { - if (m_stream.state == airtaudio::state_stopped || m_stream.state == airtaudio::state_stopping) { + if (m_state == airtaudio::state_stopped || m_state == airtaudio::state_stopping) { Sleep(50); // sleep 50 milliseconds return; } - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("the stream is closed ... this shouldn't happen!"); return; } - CallbackInfo *info = (CallbackInfo *) &m_stream.callbackInfo; - DsHandle *handle = (DsHandle *) m_stream.apiHandle; + CallbackInfo *info = (CallbackInfo *) &m_callbackInfo; + DsHandle *handle = (DsHandle *) m_apiHandle; // Check if we were draining the stream and signal is finished. - if (handle->drainCounter > m_stream.nBuffers + 2) { - m_stream.state = airtaudio::state_stopping; + if (handle->drainCounter > m_nBuffers + 2) { + m_state = airtaudio::state_stopping; if (handle->internalDrain == false) { SetEvent(handle->condition); } else { @@ -1062,23 +1061,23 @@ void airtaudio::api::Ds::callbackEvent() { if (handle->drainCounter == 0) { double streamTime = getStreamTime(); rtaudio::streamStatus status = 0; - if ( m_stream.mode != airtaudio::mode_input + if ( m_mode != airtaudio::mode_input && handle->xrun[0] == true) { status |= RTAUDIO_airtaudio::status_underflow; handle->xrun[0] = false; } - if ( m_stream.mode != airtaudio::mode_output + if ( m_mode != airtaudio::mode_output && handle->xrun[1] == true) { status |= RTAUDIO_airtaudio::mode_input_OVERFLOW; handle->xrun[1] = false; } - int32_t cbReturnValue = info->callback(m_stream.userBuffer[0], - m_stream.userBuffer[1], - m_stream.bufferSize, + int32_t cbReturnValue = info->callback(m_userBuffer[0], + m_userBuffer[1], + m_bufferSize, streamTime, status); if (cbReturnValue == 2) { - m_stream.state = airtaudio::state_stopping; + m_state = airtaudio::state_stopping; handle->drainCounter = 2; abortStream(); return; @@ -1098,7 +1097,7 @@ void airtaudio::api::Ds::callbackEvent() { char *buffer; long bufferBytes; if (m_buffersRolling == false) { - if (m_stream.mode == airtaudio::mode_duplex) { + if (m_mode == airtaudio::mode_duplex) { //assert(handle->dsBufferSize[0] == handle->dsBufferSize[1]); // It takes a while for the devices to get rolling. As a result, // there's no guarantee that the capture and write device pointers @@ -1148,7 +1147,7 @@ void airtaudio::api::Ds::callbackEvent() { handle->bufferPointer[0] -= handle->dsBufferSize[0]; } handle->bufferPointer[1] = safeReadPointer; - } else if (m_stream.mode == airtaudio::mode_output) { + } else if (m_mode == airtaudio::mode_output) { // Set the proper nextWritePosition after initial startup. LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; result = dsWriteBuffer->GetCurrentPosition(¤tWritePointer, &safeWritePointer); @@ -1163,30 +1162,30 @@ void airtaudio::api::Ds::callbackEvent() { } m_buffersRolling = true; } - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; if (handle->drainCounter > 1) { // write zeros to the output stream - bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0]; - bufferBytes *= audio::getFormatBytes(m_stream.userFormat); - memset(m_stream.userBuffer[0], 0, bufferBytes); + bufferBytes = m_bufferSize * m_nUserChannels[0]; + bufferBytes *= audio::getFormatBytes(m_userFormat); + memset(m_userBuffer[0], 0, bufferBytes); } // Setup parameters and do buffer conversion if necessary. - if (m_stream.doConvertBuffer[0]) { - buffer = m_stream.deviceBuffer; - convertBuffer(buffer, m_stream.userBuffer[0], m_stream.convertInfo[0]); - bufferBytes = m_stream.bufferSize * m_stream.nDeviceChannels[0]; - bufferBytes *= audio::getFormatBytes(m_stream.deviceFormat[0]); + if (m_doConvertBuffer[0]) { + buffer = m_deviceBuffer; + convertBuffer(buffer, m_userBuffer[0], m_convertInfo[0]); + bufferBytes = m_bufferSize * m_nDeviceChannels[0]; + bufferBytes *= audio::getFormatBytes(m_deviceFormat[0]); } else { - buffer = m_stream.userBuffer[0]; - bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0]; - bufferBytes *= audio::getFormatBytes(m_stream.userFormat); + buffer = m_userBuffer[0]; + bufferBytes = m_bufferSize * m_nUserChannels[0]; + bufferBytes *= audio::getFormatBytes(m_userFormat); } // No byte swapping necessary in DirectSound implementation. // Ahhh ... windoze. 16-bit data is signed but 8-bit data is // unsigned. So, we need to convert our signed 8-bit data here to // unsigned. - if (m_stream.deviceFormat[0] == RTAUDIO_SINT8) { + if (m_deviceFormat[0] == RTAUDIO_SINT8) { for (int32_t i=0; ibuffer[1]; long nextReadPointer = handle->bufferPointer[1]; @@ -1306,7 +1305,7 @@ void airtaudio::api::Ds::callbackEvent() { // In order to minimize audible dropouts in airtaudio::mode_duplex mode, we will // provide a pre-roll period of 0.5 seconds in which we return // zeros from the read buffer while the pointers sync up. - if (m_stream.mode == airtaudio::mode_duplex) { + if (m_mode == airtaudio::mode_duplex) { if (safeReadPointer < endRead) { if (m_duplexPrerollBytes <= 0) { // Pre-roll time over. Be more agressive. @@ -1335,10 +1334,10 @@ void airtaudio::api::Ds::callbackEvent() { } } else { // _mode == airtaudio::mode_input while ( safeReadPointer < endRead - && m_stream.callbackInfo.isRunning) { + && m_callbackInfo.isRunning) { // See comments for playback. double millis = (endRead - safeReadPointer) * 1000.0; - millis /= (audio::getFormatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1] * m_stream.sampleRate); + millis /= (audio::getFormatBytes(m_deviceFormat[1]) * m_nDeviceChannels[1] * m_sampleRate); if (millis < 1.0) { millis = 1.0; } @@ -1390,14 +1389,14 @@ void airtaudio::api::Ds::callbackEvent() { handle->bufferPointer[1] = nextReadPointer; // No byte swapping necessary in DirectSound implementation. // If necessary, convert 8-bit data from unsigned to signed. - if (m_stream.deviceFormat[1] == RTAUDIO_SINT8) { + if (m_deviceFormat[1] == RTAUDIO_SINT8) { for (int32_t j=0; jobject; - if (object->callbackEvent((uint64_t)_nframes) == false) { +int32_t airtaudio::api::Jack::jackCallbackHandler(jack_nframes_t _nframes, void* _userData) { + ATA_VERBOSE("Jack callback: [BEGIN] " << uint64_t(_userData)); + airtaudio::api::Jack* myClass = reinterpret_cast(_userData); + if (myClass->callbackEvent((uint64_t)_nframes) == false) { + ATA_VERBOSE("Jack callback: [END] 1"); return 1; } + ATA_VERBOSE("Jack callback: [END] 0"); return 0; } @@ -224,29 +225,28 @@ static int32_t jackCallbackHandler(jack_nframes_t _nframes, void *_infoPointer) // server signals that it is shutting down. It is necessary to handle // it this way because the jackShutdown() function must return before // the jack_deactivate() function (in closeStream()) will return. -static void jackCloseStream(void *_ptr) { - airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_ptr; - airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object; - object->closeStream(); +void airtaudio::api::Jack::jackCloseStream(void* _userData) { + airtaudio::api::Jack* myClass = reinterpret_cast(_userData); + myClass->closeStream(); } -static void jackShutdown(void* _infoPointer) { - airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_infoPointer; - airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object; +void airtaudio::api::Jack::jackShutdown(void* _userData) { + airtaudio::api::Jack* myClass = reinterpret_cast(_userData); // Check current stream state. If stopped, then we'll assume this // was called as a result of a call to airtaudio::api::Jack::stopStream (the // deactivation of a client handle causes this function to be called). // If not, we'll assume the Jack server is shutting down or some // other problem occurred and we should close the stream. - if (object->isStreamRunning() == false) { + if (myClass->isStreamRunning() == false) { return; } - new std::thread(jackCloseStream, info); - ATA_ERROR("RtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!"); + new std::thread(&airtaudio::api::Jack::jackCloseStream, _userData); + ATA_ERROR("The Jack server is shutting down this client ... stream stopped and closed!!"); } -static int32_t jackXrun(void* _infoPointer) { - JackHandle* handle = (JackHandle*)_infoPointer; +int32_t airtaudio::api::Jack::jackXrun(void* _userData) { + airtaudio::api::Jack* myClass = reinterpret_cast(_userData); + JackHandle* handle = (JackHandle*)myClass->m_apiHandle; if (handle->ports[0]) { handle->xrun[0] = true; } @@ -264,18 +264,18 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, audio::format _format, uint32_t* _bufferSize, airtaudio::StreamOptions* _options) { - JackHandle *handle = (JackHandle *) m_stream.apiHandle; + JackHandle *handle = (JackHandle *) m_apiHandle; // Look for jack server and try to become a client (only do once per stream). jack_client_t *client = 0; if ( _mode == airtaudio::mode_output || ( _mode == airtaudio::mode_input - && m_stream.mode != airtaudio::mode_output)) { + && m_mode != airtaudio::mode_output)) { jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption; jack_status_t *status = nullptr; if (_options && !_options->streamName.empty()) { client = jack_client_open(_options->streamName.c_str(), jackoptions, status); } else { - client = jack_client_open("RtApiJack", jackoptions, status); + client = jack_client_open("airtaudioJack", jackoptions, status); } if (client == 0) { ATA_ERROR("Jack server not found or connection error!"); @@ -336,7 +336,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, ATA_ERROR("the requested sample rate (" << _sampleRate << ") is different than the JACK server rate (" << jackRate << ")."); return false; } - m_stream.sampleRate = jackRate; + m_sampleRate = jackRate; // Get the latency of the JACK port. ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag); if (ports[ _firstChannel ]) { @@ -347,31 +347,33 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, // get the latency range jack_port_get_latency_range(jack_port_by_name(client, ports[_firstChannel]), cbmode, &latrange); // be optimistic, use the min! - m_stream.latency[modeToIdTable(_mode)] = latrange.min; - //m_stream.latency[modeToIdTable(_mode)] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ])); + m_latency[modeToIdTable(_mode)] = latrange.min; + //m_latency[modeToIdTable(_mode)] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ])); } free(ports); // The jack server always uses 32-bit floating-point data. - m_stream.deviceFormat[modeToIdTable(_mode)] = audio::format_float; - m_stream.userFormat = _format; + m_deviceFormat[modeToIdTable(_mode)] = audio::format_float; + m_userFormat = _format; // Jack always uses non-interleaved buffers. - m_stream.deviceInterleaved[modeToIdTable(_mode)] = false; + m_deviceInterleaved[modeToIdTable(_mode)] = false; // Jack always provides host byte-ordered data. - m_stream.doByteSwap[modeToIdTable(_mode)] = false; + m_doByteSwap[modeToIdTable(_mode)] = false; // Get the buffer size. The buffer size and number of buffers // (periods) is set when the jack server is started. - m_stream.bufferSize = (int) jack_get_buffer_size(client); - *_bufferSize = m_stream.bufferSize; - m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels; - m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; + m_bufferSize = (int) jack_get_buffer_size(client); + *_bufferSize = m_bufferSize; + m_nDeviceChannels[modeToIdTable(_mode)] = _channels; + m_nUserChannels[modeToIdTable(_mode)] = _channels; // Set flags for buffer conversion. - m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; - if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + m_doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; + ATA_CRITICAL("Can not update format ==> use RIVER lib for this ..."); } - if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false - && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if ( m_deviceInterleaved[modeToIdTable(_mode)] == false + && m_nUserChannels[modeToIdTable(_mode)] > 1) { + ATA_ERROR("Reorder channel for the interleaving properties ..."); + //m_doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate our JackHandle structure for the stream. if (handle == 0) { @@ -380,26 +382,27 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, ATA_ERROR("error allocating JackHandle memory."); goto error; } - m_stream.apiHandle = (void *) handle; + m_apiHandle = (void *) handle; handle->client = client; } handle->deviceName[modeToIdTable(_mode)] = deviceName; // Allocate necessary internal buffers. uint64_t bufferBytes; - bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat); - m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { + bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]); + ATA_VERBOSE("allocate : nbChannel=" << m_nUserChannels[modeToIdTable(_mode)] << " bufferSize=" << *_bufferSize << " format=" << m_deviceFormat[modeToIdTable(_mode)] << "=" << audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)])); + m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0); + if (m_userBuffer[modeToIdTable(_mode)].size() == 0) { ATA_ERROR("error allocating user buffer memory."); goto error; } - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; if (_mode == airtaudio::mode_output) { - bufferBytes = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]); + bufferBytes = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]); } else { // _mode == airtaudio::mode_input - bufferBytes = m_stream.nDeviceChannels[1] * audio::getFormatBytes(m_stream.deviceFormat[1]); - if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { - uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]); + bufferBytes = m_nDeviceChannels[1] * audio::getFormatBytes(m_deviceFormat[1]); + if (m_mode == airtaudio::mode_output && m_deviceBuffer) { + uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]); if (bufferBytes < bytesOut) { makeBuffer = false; } @@ -407,9 +410,9 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, } if (makeBuffer) { bufferBytes *= *_bufferSize; - if (m_stream.deviceBuffer) free(m_stream.deviceBuffer); - m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1); - if (m_stream.deviceBuffer == nullptr) { + if (m_deviceBuffer) free(m_deviceBuffer); + m_deviceBuffer = (char *) calloc(bufferBytes, 1); + if (m_deviceBuffer == nullptr) { ATA_ERROR("error allocating device buffer memory."); goto error; } @@ -421,24 +424,23 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, ATA_ERROR("error allocating port memory."); goto error; } - m_stream.device[modeToIdTable(_mode)] = _device; - m_stream.channelOffset[modeToIdTable(_mode)] = _firstChannel; - m_stream.state = airtaudio::state_stopped; - m_stream.callbackInfo.object = (void *) this; - if ( m_stream.mode == airtaudio::mode_output + m_device[modeToIdTable(_mode)] = _device; + m_channelOffset[modeToIdTable(_mode)] = _firstChannel; + m_state = airtaudio::state_stopped; + if ( m_mode == airtaudio::mode_output && _mode == airtaudio::mode_input) { // We had already set up the stream for output. - m_stream.mode = airtaudio::mode_duplex; + m_mode = airtaudio::mode_duplex; } else { - m_stream.mode = _mode; - jack_set_process_callback(handle->client, jackCallbackHandler, (void *) &m_stream.callbackInfo); - jack_set_xrun_callback(handle->client, jackXrun, (void *) &handle); - jack_on_shutdown(handle->client, jackShutdown, (void *) &m_stream.callbackInfo); + m_mode = _mode; + jack_set_process_callback(handle->client, &airtaudio::api::Jack::jackCallbackHandler, this); + jack_set_xrun_callback(handle->client, &airtaudio::api::Jack::jackXrun, this); + jack_on_shutdown(handle->client, &airtaudio::api::Jack::jackShutdown, this); } // Register our ports. char label[64]; if (_mode == airtaudio::mode_output) { - for (uint32_t i=0; iports[0][i] = jack_port_register(handle->client, (const char *)label, @@ -447,7 +449,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, 0); } } else { - for (uint32_t i=0; iports[1][i] = jack_port_register(handle->client, (const char *)label, @@ -459,7 +461,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, // Setup the buffer conversion information structure. We don't use // buffers to do channel offsets, so we override that parameter // here. - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, 0); } return true; @@ -473,29 +475,26 @@ error: free(handle->ports[1]); } delete handle; - m_stream.apiHandle = nullptr; + m_apiHandle = nullptr; } for (int32_t iii=0; iii<2; ++iii) { - if (m_stream.userBuffer[iii]) { - free(m_stream.userBuffer[iii]); - m_stream.userBuffer[iii] = nullptr; - } + m_userBuffer[iii].clear(); } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = nullptr; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = nullptr; } return false; } enum airtaudio::error airtaudio::api::Jack::closeStream() { - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); return airtaudio::error_warning; } - JackHandle *handle = (JackHandle *) m_stream.apiHandle; + JackHandle *handle = (JackHandle *) m_apiHandle; if (handle != nullptr) { - if (m_stream.state == airtaudio::state_running) { + if (m_state == airtaudio::state_running) { jack_deactivate(handle->client); } jack_client_close(handle->client); @@ -508,20 +507,17 @@ enum airtaudio::error airtaudio::api::Jack::closeStream() { free(handle->ports[1]); } delete handle; - m_stream.apiHandle = nullptr; + m_apiHandle = nullptr; } for (int32_t i=0; i<2; i++) { - if (m_stream.userBuffer[i]) { - free(m_stream.userBuffer[i]); - m_stream.userBuffer[i] = nullptr; - } + m_userBuffer[i].clear(); } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = nullptr; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = nullptr; } - m_stream.mode = airtaudio::mode_unknow; - m_stream.state = airtaudio::state_closed; + m_mode = airtaudio::mode_unknow; + m_state = airtaudio::state_closed; return airtaudio::error_none; } @@ -529,11 +525,11 @@ enum airtaudio::error airtaudio::api::Jack::startStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_running) { + if (m_state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); return airtaudio::error_warning; } - JackHandle *handle = (JackHandle *) m_stream.apiHandle; + JackHandle *handle = (JackHandle *) m_apiHandle; int32_t result = jack_activate(handle->client); if (result) { ATA_ERROR("unable to activate JACK client!"); @@ -541,8 +537,8 @@ enum airtaudio::error airtaudio::api::Jack::startStream() { } const char **ports; // Get the list of available ports. - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { result = 1; ports = jack_get_ports(handle->client, handle->deviceName[0].c_str(), nullptr, JackPortIsInput); if (ports == nullptr) { @@ -552,10 +548,10 @@ enum airtaudio::error airtaudio::api::Jack::startStream() { // Now make the port connections. Since RtAudio wasn't designed to // allow the user to select particular channels of a device, we'll // just open the first "nChannels" ports with offset. - for (uint32_t i=0; iclient, jack_port_name(handle->ports[0][i]), ports[ m_stream.channelOffset[0] + i ]); + if (ports[ m_channelOffset[0] + i ]) + result = jack_connect(handle->client, jack_port_name(handle->ports[0][i]), ports[ m_channelOffset[0] + i ]); if (result) { free(ports); ATA_ERROR("error connecting output ports!"); @@ -564,8 +560,8 @@ enum airtaudio::error airtaudio::api::Jack::startStream() { } free(ports); } - if ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) { result = 1; ports = jack_get_ports(handle->client, handle->deviceName[1].c_str(), nullptr, JackPortIsOutput); if (ports == nullptr) { @@ -573,10 +569,10 @@ enum airtaudio::error airtaudio::api::Jack::startStream() { goto unlock; } // Now make the port connections. See note above. - for (uint32_t i=0; iclient, ports[ m_stream.channelOffset[1] + i ], jack_port_name(handle->ports[1][i])); + if (ports[ m_channelOffset[1] + i ]) { + result = jack_connect(handle->client, ports[ m_channelOffset[1] + i ], jack_port_name(handle->ports[1][i])); } if (result) { free(ports); @@ -588,7 +584,7 @@ enum airtaudio::error airtaudio::api::Jack::startStream() { } handle->drainCounter = 0; handle->internalDrain = false; - m_stream.state = airtaudio::state_running; + m_state = airtaudio::state_running; unlock: if (result == 0) { return airtaudio::error_none; @@ -600,21 +596,21 @@ enum airtaudio::error airtaudio::api::Jack::stopStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } - JackHandle *handle = (JackHandle *) m_stream.apiHandle; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + JackHandle *handle = (JackHandle *) m_apiHandle; + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { if (handle->drainCounter == 0) { handle->drainCounter = 2; - std::unique_lock lck(m_stream.mutex); + std::unique_lock lck(m_mutex); handle->condition.wait(lck); } } jack_deactivate(handle->client); - m_stream.state = airtaudio::state_stopped; + m_state = airtaudio::state_stopped; return airtaudio::error_none; } @@ -622,11 +618,11 @@ enum airtaudio::error airtaudio::api::Jack::abortStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } - JackHandle *handle = (JackHandle *) m_stream.apiHandle; + JackHandle *handle = (JackHandle *) m_apiHandle; handle->drainCounter = 2; return stopStream(); } @@ -636,32 +632,30 @@ enum airtaudio::error airtaudio::api::Jack::abortStream() { // aborted. It is necessary to handle it this way because the // callbackEvent() function must return before the jack_deactivate() // function will return. -static void jackStopStream(void *_ptr) { - airtaudio::CallbackInfo *info = (airtaudio::CallbackInfo *) _ptr; - airtaudio::api::Jack *object = (airtaudio::api::Jack *) info->object; - object->stopStream(); +static void jackStopStream(void* _userData) { + airtaudio::api::Jack* myClass = reinterpret_cast(_userData); + myClass->stopStream(); } bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) { - if ( m_stream.state == airtaudio::state_stopped - || m_stream.state == airtaudio::state_stopping) { + if ( m_state == airtaudio::state_stopped + || m_state == airtaudio::state_stopping) { return true; } - if (m_stream.state == airtaudio::state_closed) { - ATA_ERROR("RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!"); + if (m_state == airtaudio::state_closed) { + ATA_ERROR("the stream is closed ... this shouldn't happen!"); return false; } - if (m_stream.bufferSize != _nframes) { - ATA_ERROR("RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!"); + if (m_bufferSize != _nframes) { + ATA_ERROR("the JACK buffer size has changed ... cannot process!"); return false; } - CallbackInfo *info = (CallbackInfo *) &m_stream.callbackInfo; - JackHandle *handle = (JackHandle *) m_stream.apiHandle; + JackHandle *handle = (JackHandle *) m_apiHandle; // Check if we were draining the stream and signal is finished. if (handle->drainCounter > 3) { - m_stream.state = airtaudio::state_stopping; + m_state = airtaudio::state_stopping; if (handle->internalDrain == true) { - new std::thread(jackStopStream, info); + new std::thread(jackStopStream, this); } else { handle->condition.notify_one(); } @@ -671,23 +665,23 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) { if (handle->drainCounter == 0) { double streamTime = getStreamTime(); enum airtaudio::status status = airtaudio::status_ok; - if (m_stream.mode != airtaudio::mode_input && handle->xrun[0] == true) { + if (m_mode != airtaudio::mode_input && handle->xrun[0] == true) { status = airtaudio::status_underflow; handle->xrun[0] = false; } - if (m_stream.mode != airtaudio::mode_output && handle->xrun[1] == true) { + if (m_mode != airtaudio::mode_output && handle->xrun[1] == true) { status = airtaudio::status_overflow; handle->xrun[1] = false; } - int32_t cbReturnValue = info->callback(m_stream.userBuffer[0], - m_stream.userBuffer[1], - m_stream.bufferSize, - streamTime, - status); + int32_t cbReturnValue = m_callbackInfo.callback(&m_userBuffer[0][0], + &m_userBuffer[1][0], + m_bufferSize, + streamTime, + status); if (cbReturnValue == 2) { - m_stream.state = airtaudio::state_stopping; + m_state = airtaudio::state_stopping; handle->drainCounter = 2; - new std::thread(jackStopStream, info); + new std::thread(jackStopStream, this); return true; } else if (cbReturnValue == 1) { @@ -697,22 +691,23 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) { } jack_default_audio_sample_t *jackbuffer; uint64_t bufferBytes = _nframes * sizeof(jack_default_audio_sample_t); - if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { if (handle->drainCounter > 1) { // write zeros to the output stream - for (uint32_t i=0; iports[0][i], (jack_nframes_t) _nframes); memset(jackbuffer, 0, bufferBytes); } - } else if (m_stream.doConvertBuffer[0]) { - convertBuffer(m_stream.deviceBuffer, m_stream.userBuffer[0], m_stream.convertInfo[0]); - for (uint32_t i=0; iports[0][i], (jack_nframes_t) _nframes); - memcpy(jackbuffer, &m_stream.deviceBuffer[i*bufferBytes], bufferBytes); + memcpy(jackbuffer, &m_deviceBuffer[i*bufferBytes], bufferBytes); } } else { // no buffer conversion - for (uint32_t i=0; iports[0][i], (jack_nframes_t) _nframes); - memcpy(jackbuffer, &m_stream.userBuffer[0][i*bufferBytes], bufferBytes); + memcpy(jackbuffer, &m_userBuffer[0][i*bufferBytes], bufferBytes); } } if (handle->drainCounter) { @@ -720,19 +715,19 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) { goto unlock; } } - if ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) { - if (m_stream.doConvertBuffer[1]) { - for (uint32_t i=0; iports[1][i], (jack_nframes_t) _nframes); - memcpy(&m_stream.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes); + memcpy(&m_deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes); } - convertBuffer(m_stream.userBuffer[1], m_stream.deviceBuffer, m_stream.convertInfo[1]); + convertBuffer(&m_userBuffer[1][0], m_deviceBuffer, m_convertInfo[1]); } else { // no buffer conversion - for (uint32_t i=0; iports[1][i], (jack_nframes_t) _nframes); - memcpy(&m_stream.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes); + memcpy(&m_userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes); } } } diff --git a/airtaudio/api/Jack.h b/airtaudio/api/Jack.h index 3441264..ab93a3c 100644 --- a/airtaudio/api/Jack.h +++ b/airtaudio/api/Jack.h @@ -1,14 +1,14 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #if !defined(__AIRTAUDIO_API_JACK_H__) && defined(__UNIX_JACK__) #define __AIRTAUDIO_API_JACK_H__ +#include namespace airtaudio { namespace api { class Jack: public airtaudio::Api { @@ -32,6 +32,11 @@ namespace airtaudio { // which is not a member of RtAudio. External use of this function // will most likely produce highly undesireable results! bool callbackEvent(uint64_t _nframes); + private: + static int32_t jackXrun(void* _userData); + static void jackCloseStream(void* _userData); + static void jackShutdown(void* _userData); + static int32_t jackCallbackHandler(jack_nframes_t _nframes, void* _userData); private: bool probeDeviceOpen(uint32_t _device, airtaudio::mode _mode, diff --git a/airtaudio/api/Oss.cpp b/airtaudio/api/Oss.cpp index 9576e0c..71e0157 100644 --- a/airtaudio/api/Oss.cpp +++ b/airtaudio/api/Oss.cpp @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ @@ -49,7 +48,7 @@ airtaudio::api::Oss::Oss() { } airtaudio::api::Oss::~Oss() { - if (m_stream.state != airtaudio::state_closed) { + if (m_state != airtaudio::state_closed) { closeStream(); } } @@ -227,12 +226,12 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, return false; } int32_t flags = 0; - OssHandle *handle = (OssHandle *) m_stream.apiHandle; + OssHandle *handle = (OssHandle *) m_apiHandle; if (_mode == airtaudio::mode_output) { flags |= O_WRONLY; } else { // _mode == airtaudio::mode_input - if ( m_stream.mode == airtaudio::mode_output - && m_stream.device[0] == _device) { + if ( m_mode == airtaudio::mode_output + && m_device[0] == _device) { // We just set the same device for playback ... close and reopen for duplex (OSS only). close(handle->id[0]); handle->id[0] = 0; @@ -241,7 +240,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, return false; } // Check that the number previously set channels is the same. - if (m_stream.nUserChannels[0] != _channels) { + if (m_nUserChannels[0] != _channels) { ATA_ERROR("input/output channels must be equal for OSS duplex device (" << ainfo.name << ")."); return false; } @@ -278,7 +277,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, } */ // Check the device channel support. - m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; + m_nUserChannels[modeToIdTable(_mode)] = _channels; if (ainfo.max_channels < (int)(_channels + _firstChannel)) { close(fd); ATA_ERROR("the device (" << ainfo.name << ") does not support requested channel parameters."); @@ -293,7 +292,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, ATA_ERROR("error setting channel parameters on device (" << ainfo.name << ")."); return false; } - m_stream.nDeviceChannels[modeToIdTable(_mode)] = deviceChannels; + m_nDeviceChannels[modeToIdTable(_mode)] = deviceChannels; // Get the data format mask int32_t mask; result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask); @@ -303,71 +302,71 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, return false; } // Determine how to set the device format. - m_stream.userFormat = _format; + m_userFormat = _format; int32_t deviceFormat = -1; - m_stream.doByteSwap[modeToIdTable(_mode)] = false; + m_doByteSwap[modeToIdTable(_mode)] = false; if (_format == RTAUDIO_SINT8) { if (mask & AFMT_S8) { deviceFormat = AFMT_S8; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; } } else if (_format == RTAUDIO_SINT16) { if (mask & AFMT_S16_NE) { deviceFormat = AFMT_S16_NE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; } else if (mask & AFMT_S16_OE) { deviceFormat = AFMT_S16_OE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; + m_doByteSwap[modeToIdTable(_mode)] = true; } } else if (_format == RTAUDIO_SINT24) { if (mask & AFMT_S24_NE) { deviceFormat = AFMT_S24_NE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; } else if (mask & AFMT_S24_OE) { deviceFormat = AFMT_S24_OE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; + m_doByteSwap[modeToIdTable(_mode)] = true; } } else if (_format == RTAUDIO_SINT32) { if (mask & AFMT_S32_NE) { deviceFormat = AFMT_S32_NE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; } else if (mask & AFMT_S32_OE) { deviceFormat = AFMT_S32_OE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; + m_doByteSwap[modeToIdTable(_mode)] = true; } } if (deviceFormat == -1) { // The user requested format is not natively supported by the device. if (mask & AFMT_S16_NE) { deviceFormat = AFMT_S16_NE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; } else if (mask & AFMT_S32_NE) { deviceFormat = AFMT_S32_NE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; } else if (mask & AFMT_S24_NE) { deviceFormat = AFMT_S24_NE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; } else if (mask & AFMT_S16_OE) { deviceFormat = AFMT_S16_OE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; + m_doByteSwap[modeToIdTable(_mode)] = true; } else if (mask & AFMT_S32_OE) { deviceFormat = AFMT_S32_OE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; + m_doByteSwap[modeToIdTable(_mode)] = true; } else if (mask & AFMT_S24_OE) { deviceFormat = AFMT_S24_OE; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; - m_stream.doByteSwap[modeToIdTable(_mode)] = true; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; + m_doByteSwap[modeToIdTable(_mode)] = true; } else if (mask & AFMT_S8) { deviceFormat = AFMT_S8; - m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; + m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; } } - if (m_stream.deviceFormat[modeToIdTable(_mode)] == 0) { + if (m_deviceFormat[modeToIdTable(_mode)] == 0) { // This really shouldn't happen ... close(fd); ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio."); @@ -389,7 +388,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM. // We'll check the actual value used near the end of the setup // procedure. - int32_t ossBufferBytes = *_bufferSize * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels; + int32_t ossBufferBytes = *_bufferSize * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]) * deviceChannels; if (ossBufferBytes < 16) { ossBufferBytes = 16; } @@ -411,10 +410,10 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, ATA_ERROR("error setting buffer size on device (" << ainfo.name << ")."); return false; } - m_stream.nBuffers = buffers; + m_nBuffers = buffers; // Save buffer size (in sample frames). - *_bufferSize = ossBufferBytes / (audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels); - m_stream.bufferSize = *_bufferSize; + *_bufferSize = ossBufferBytes / (audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]) * deviceChannels); + m_bufferSize = *_bufferSize; // Set the sample rate. int32_t srate = _sampleRate; result = ioctl(fd, SNDCTL_DSP_SPEED, &srate); @@ -429,55 +428,55 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, ATA_ERROR("device (" << ainfo.name << ") does not support sample rate (" << _sampleRate << ")."); return false; } - m_stream.sampleRate = _sampleRate; + m_sampleRate = _sampleRate; if ( _mode == airtaudio::mode_input - && m_stream._mode == airtaudio::mode_output - && m_stream.device[0] == _device) { + && m__mode == airtaudio::mode_output + && m_device[0] == _device) { // We're doing duplex setup here. - m_stream.deviceFormat[0] = m_stream.deviceFormat[1]; - m_stream.nDeviceChannels[0] = deviceChannels; + m_deviceFormat[0] = m_deviceFormat[1]; + m_nDeviceChannels[0] = deviceChannels; } // Set interleaving parameters. - m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; + m_deviceInterleaved[modeToIdTable(_mode)] = true; // Set flags for buffer conversion - m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; - if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + m_doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false - && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { - m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; + if ( m_deviceInterleaved[modeToIdTable(_mode)] == false + && m_nUserChannels[modeToIdTable(_mode)] > 1) { + m_doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate the stream handles if necessary and then save. - if (m_stream.apiHandle == 0) { + if (m_apiHandle == 0) { handle = new OssHandle; if handle == nullptr) { ATA_ERROR("error allocating OssHandle memory."); goto error; } - m_stream.apiHandle = (void *) handle; + m_apiHandle = (void *) handle; } else { - handle = (OssHandle *) m_stream.apiHandle; + handle = (OssHandle *) m_apiHandle; } handle->id[modeToIdTable(_mode)] = fd; // Allocate necessary internal buffers. uint64_t bufferBytes; - bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat); - m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { + bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat); + m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); goto error; } - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]); if (_mode == airtaudio::mode_input) { - if ( m_stream._mode == airtaudio::mode_output - && m_stream.deviceBuffer) { - uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]); + if ( m__mode == airtaudio::mode_output + && m_deviceBuffer) { + uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]); if (bufferBytes <= bytesOut) { makeBuffer = false; } @@ -485,37 +484,37 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, } if (makeBuffer) { bufferBytes *= *_bufferSize; - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); + if (m_deviceBuffer) { + free(m_deviceBuffer); } - m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1); - if (m_stream.deviceBuffer == nullptr) { + m_deviceBuffer = (char *) calloc(bufferBytes, 1); + if (m_deviceBuffer == nullptr) { ATA_ERROR("error allocating device buffer memory."); goto error; } } } - m_stream.device[modeToIdTable(_mode)] = _device; - m_stream.state = airtaudio::state_stopped; + m_device[modeToIdTable(_mode)] = _device; + m_state = airtaudio::state_stopped; // Setup the buffer conversion information structure. - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, _firstChannel); } // Setup thread if necessary. - if (m_stream.mode == airtaudio::mode_output && _mode == airtaudio::mode_input) { + if (m_mode == airtaudio::mode_output && _mode == airtaudio::mode_input) { // We had already set up an output stream. - m_stream.mode = airtaudio::mode_duplex; - if (m_stream.device[0] == _device) { + m_mode = airtaudio::mode_duplex; + if (m_device[0] == _device) { handle->id[0] = fd; } } else { - m_stream.mode = _mode; + m_mode = _mode; // Setup callback thread. - m_stream.callbackInfo.object = (void *) this; - m_stream.callbackInfo.isRunning = true; - m_stream.callbackInfo.thread = new std::thread(ossCallbackHandler, &m_stream.callbackInfo); - if (m_stream.callbackInfo.thread == nullptr) { - m_stream.callbackInfo.isRunning = false; + m_callbackInfo.object = (void *) this; + m_callbackInfo.isRunning = true; + m_callbackInfo.thread = new std::thread(ossCallbackHandler, &m_callbackInfo); + if (m_callbackInfo.thread == nullptr) { + m_callbackInfo.isRunning = false; ATA_ERROR("creating callback thread!"); goto error; } @@ -530,41 +529,41 @@ error: close(handle->id[1]); } delete handle; - m_stream.apiHandle = 0; + m_apiHandle = 0; } for (int32_t i=0; i<2; i++) { - if (m_stream.userBuffer[i]) { - free(m_stream.userBuffer[i]); - m_stream.userBuffer[i] = 0; + if (m_userBuffer[i]) { + free(m_userBuffer[i]); + m_userBuffer[i] = 0; } } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = 0; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = 0; } return false; } enum airtaudio::error airtaudio::api::Oss::closeStream() { - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); return airtaudio::error_warning; } - OssHandle *handle = (OssHandle *) m_stream.apiHandle; - m_stream.callbackInfo.isRunning = false; - m_stream.mutex.lock(); - if (m_stream.state == airtaudio::state_stopped) { + OssHandle *handle = (OssHandle *) m_apiHandle; + m_callbackInfo.isRunning = false; + m_mutex.lock(); + if (m_state == airtaudio::state_stopped) { handle->runnable.notify_one(); } - m_stream.mutex.unlock(); - m_stream.callbackInfo.thread->join(); - if (m_stream.state == airtaudio::state_running) { - if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) { + m_mutex.unlock(); + m_callbackInfo.thread->join(); + if (m_state == airtaudio::state_running) { + if (m_mode == airtaudio::mode_output || m_mode == airtaudio::mode_duplex) { ioctl(handle->id[0], SNDCTL_DSP_HALT, 0); } else { ioctl(handle->id[1], SNDCTL_DSP_HALT, 0); } - m_stream.state = airtaudio::state_stopped; + m_state = airtaudio::state_stopped; } if (handle) { if (handle->id[0]) { @@ -574,20 +573,20 @@ enum airtaudio::error airtaudio::api::Oss::closeStream() { close(handle->id[1]); } delete handle; - m_stream.apiHandle = 0; + m_apiHandle = 0; } for (int32_t i=0; i<2; i++) { - if (m_stream.userBuffer[i]) { - free(m_stream.userBuffer[i]); - m_stream.userBuffer[i] = 0; + if (m_userBuffer[i]) { + free(m_userBuffer[i]); + m_userBuffer[i] = 0; } } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = 0; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = 0; } - m_stream.mode = airtaudio::mode_unknow; - m_stream.state = airtaudio::state_closed; + m_mode = airtaudio::mode_unknow; + m_state = airtaudio::state_closed; return airtaudio::error_none; } @@ -595,16 +594,16 @@ enum airtaudio::error airtaudio::api::Oss::startStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_running) { + if (m_state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); return airtaudio::error_warning; } - m_stream.mutex.lock(); - m_stream.state = airtaudio::state_running; + m_mutex.lock(); + m_state = airtaudio::state_running; // No need to do anything else here ... OSS automatically starts // when fed samples. - m_stream.mutex.unlock(); - OssHandle *handle = (OssHandle *) m_stream.apiHandle; + m_mutex.unlock(); + OssHandle *handle = (OssHandle *) m_apiHandle; handle->runnable.notify_one(); } @@ -612,35 +611,35 @@ enum airtaudio::error airtaudio::api::Oss::stopStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return; } - m_stream.mutex.lock(); + m_mutex.lock(); // The state might change while waiting on a mutex. - if (m_stream.state == airtaudio::state_stopped) { - m_stream.mutex.unlock(); + if (m_state == airtaudio::state_stopped) { + m_mutex.unlock(); return; } int32_t result = 0; - OssHandle *handle = (OssHandle *) m_stream.apiHandle; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + OssHandle *handle = (OssHandle *) m_apiHandle; + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { // Flush the output with zeros a few times. char *buffer; int32_t samples; audio::format format; - if (m_stream.doConvertBuffer[0]) { - buffer = m_stream.deviceBuffer; - samples = m_stream.bufferSize * m_stream.nDeviceChannels[0]; - format = m_stream.deviceFormat[0]; + if (m_doConvertBuffer[0]) { + buffer = m_deviceBuffer; + samples = m_bufferSize * m_nDeviceChannels[0]; + format = m_deviceFormat[0]; } else { - buffer = m_stream.userBuffer[0]; - samples = m_stream.bufferSize * m_stream.nUserChannels[0]; - format = m_stream.userFormat; + buffer = m_userBuffer[0]; + samples = m_bufferSize * m_nUserChannels[0]; + format = m_userFormat; } memset(buffer, 0, samples * audio::getFormatBytes(format)); - for (uint32_t i=0; iid[0], buffer, samples * audio::getFormatBytes(format)); if (result == -1) { ATA_ERROR("audio write error."); @@ -649,23 +648,23 @@ enum airtaudio::error airtaudio::api::Oss::stopStream() { } result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0); if (result == -1) { - ATA_ERROR("system error stopping callback procedure on device (" << m_stream.device[0] << ")."); + ATA_ERROR("system error stopping callback procedure on device (" << m_device[0] << ")."); goto unlock; } handle->triggered = false; } - if ( m_stream.mode == airtaudio::mode_input - || ( m_stream.mode == airtaudio::mode_duplex + if ( m_mode == airtaudio::mode_input + || ( m_mode == airtaudio::mode_duplex && handle->id[0] != handle->id[1])) { result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0); if (result == -1) { - ATA_ERROR("system error stopping input callback procedure on device (" << m_stream.device[0] << ")."); + ATA_ERROR("system error stopping input callback procedure on device (" << m_device[0] << ")."); goto unlock; } } unlock: - m_stream.state = airtaudio::state_stopped; - m_stream.mutex.unlock(); + m_state = airtaudio::state_stopped; + m_mutex.unlock(); if (result != -1) { return airtaudio::error_none; } @@ -676,36 +675,36 @@ enum airtaudio::error airtaudio::api::Oss::abortStream() { if (verifyStream() != airtaudio::error_none) { return airtaudio::error_fail; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } - m_stream.mutex.lock(); + m_mutex.lock(); // The state might change while waiting on a mutex. - if (m_stream.state == airtaudio::state_stopped) { - m_stream.mutex.unlock(); + if (m_state == airtaudio::state_stopped) { + m_mutex.unlock(); return; } int32_t result = 0; - OssHandle *handle = (OssHandle *) m_stream.apiHandle; - if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) { + OssHandle *handle = (OssHandle *) m_apiHandle; + if (m_mode == airtaudio::mode_output || m_mode == airtaudio::mode_duplex) { result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0); if (result == -1) { - ATA_ERROR("system error stopping callback procedure on device (" << m_stream.device[0] << ")."); + ATA_ERROR("system error stopping callback procedure on device (" << m_device[0] << ")."); goto unlock; } handle->triggered = false; } - if (m_stream.mode == airtaudio::mode_input || (m_stream.mode == airtaudio::mode_duplex && handle->id[0] != handle->id[1])) { + if (m_mode == airtaudio::mode_input || (m_mode == airtaudio::mode_duplex && handle->id[0] != handle->id[1])) { result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0); if (result == -1) { - ATA_ERROR("system error stopping input callback procedure on device (" << m_stream.device[0] << ")."); + ATA_ERROR("system error stopping input callback procedure on device (" << m_device[0] << ")."); goto unlock; } } unlock: - m_stream.state = airtaudio::state_stopped; - m_stream.mutex.unlock(); + m_state = airtaudio::state_stopped; + m_mutex.unlock(); if (result != -1) { return airtaudio::error_none; } @@ -713,15 +712,15 @@ unlock: } void airtaudio::api::Oss::callbackEvent() { - OssHandle *handle = (OssHandle *) m_stream.apiHandle; - if (m_stream.state == airtaudio::state_stopped) { - std::unique_lock lck(m_stream.mutex); + OssHandle *handle = (OssHandle *) m_apiHandle; + if (m_state == airtaudio::state_stopped) { + std::unique_lock lck(m_mutex); handle->runnable.wait(lck); - if (m_stream.state != airtaudio::state_running) { + if (m_state != airtaudio::state_running) { return; } } - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("the stream is closed ... this shouldn't happen!"); return airtaudio::error_warning; } @@ -729,52 +728,52 @@ void airtaudio::api::Oss::callbackEvent() { int32_t doStopStream = 0; double streamTime = getStreamTime(); rtaudio::streamStatus status = 0; - if ( m_stream.mode != airtaudio::mode_input + if ( m_mode != airtaudio::mode_input && handle->xrun[0] == true) { status |= RTAUDIO_airtaudio::status_underflow; handle->xrun[0] = false; } - if ( m_stream.mode != airtaudio::mode_output + if ( m_mode != airtaudio::mode_output && handle->xrun[1] == true) { status |= RTAUDIO_airtaudio::mode_input_OVERFLOW; handle->xrun[1] = false; } - doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[0], - m_stream.userBuffer[1], - m_stream.bufferSize, + doStopStream = m_callbackInfo.callback(m_userBuffer[0], + m_userBuffer[1], + m_bufferSize, streamTime, status); if (doStopStream == 2) { this->abortStream(); return; } - m_stream.mutex.lock(); + m_mutex.lock(); // The state might change while waiting on a mutex. - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { goto unlock; } int32_t result; char *buffer; int32_t samples; audio::format format; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { // Setup parameters and do buffer conversion if necessary. - if (m_stream.doConvertBuffer[0]) { - buffer = m_stream.deviceBuffer; - convertBuffer(buffer, m_stream.userBuffer[0], m_stream.convertInfo[0]); - samples = m_stream.bufferSize * m_stream.nDeviceChannels[0]; - format = m_stream.deviceFormat[0]; + if (m_doConvertBuffer[0]) { + buffer = m_deviceBuffer; + convertBuffer(buffer, m_userBuffer[0], m_convertInfo[0]); + samples = m_bufferSize * m_nDeviceChannels[0]; + format = m_deviceFormat[0]; } else { - buffer = m_stream.userBuffer[0]; - samples = m_stream.bufferSize * m_stream.nUserChannels[0]; - format = m_stream.userFormat; + buffer = m_userBuffer[0]; + samples = m_bufferSize * m_nUserChannels[0]; + format = m_userFormat; } // Do byte swapping if necessary. - if (m_stream.doByteSwap[0]) { + if (m_doByteSwap[0]) { byteSwapBuffer(buffer, samples, format); } - if ( m_stream.mode == airtaudio::mode_duplex + if ( m_mode == airtaudio::mode_duplex && handle->triggered == false) { int32_t trig = 0; ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig); @@ -795,17 +794,17 @@ void airtaudio::api::Oss::callbackEvent() { // Continue on to input section. } } - if ( m_stream.mode == airtaudio::mode_input - || m_stream.mode == airtaudio::mode_duplex) { + if ( m_mode == airtaudio::mode_input + || m_mode == airtaudio::mode_duplex) { // Setup parameters. - if (m_stream.doConvertBuffer[1]) { - buffer = m_stream.deviceBuffer; - samples = m_stream.bufferSize * m_stream.nDeviceChannels[1]; - format = m_stream.deviceFormat[1]; + if (m_doConvertBuffer[1]) { + buffer = m_deviceBuffer; + samples = m_bufferSize * m_nDeviceChannels[1]; + format = m_deviceFormat[1]; } else { - buffer = m_stream.userBuffer[1]; - samples = m_stream.bufferSize * m_stream.nUserChannels[1]; - format = m_stream.userFormat; + buffer = m_userBuffer[1]; + samples = m_bufferSize * m_nUserChannels[1]; + format = m_userFormat; } // Read samples from device. result = read(handle->id[1], buffer, samples * audio::getFormatBytes(format)); @@ -817,16 +816,16 @@ void airtaudio::api::Oss::callbackEvent() { goto unlock; } // Do byte swapping if necessary. - if (m_stream.doByteSwap[1]) { + if (m_doByteSwap[1]) { byteSwapBuffer(buffer, samples, format); } // Do buffer conversion if necessary. - if (m_stream.doConvertBuffer[1]) { - convertBuffer(m_stream.userBuffer[1], m_stream.deviceBuffer, m_stream.convertInfo[1]); + if (m_doConvertBuffer[1]) { + convertBuffer(m_userBuffer[1], m_deviceBuffer, m_convertInfo[1]); } } unlock: - m_stream.mutex.unlock(); + m_mutex.unlock(); airtaudio::Api::tickStreamTime(); if (doStopStream == 1) { this->stopStream(); diff --git a/airtaudio/api/Oss.h b/airtaudio/api/Oss.h index a27013d..95f7ec1 100644 --- a/airtaudio/api/Oss.h +++ b/airtaudio/api/Oss.h @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #if !defined(__AIRTAUDIO_API_OSS_H__) && defined(__LINUX_OSS__) diff --git a/airtaudio/api/Pulse.cpp b/airtaudio/api/Pulse.cpp index ad21334..ad0a2d5 100644 --- a/airtaudio/api/Pulse.cpp +++ b/airtaudio/api/Pulse.cpp @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ @@ -65,7 +64,7 @@ struct PulseAudioHandle { }; airtaudio::api::Pulse::~Pulse() { - if (m_stream.state != airtaudio::state_closed) { + if (m_state != airtaudio::state_closed) { closeStream(); } } @@ -92,25 +91,27 @@ airtaudio::DeviceInfo airtaudio::api::Pulse::getDeviceInfo(uint32_t _device) { return info; } -static void pulseaudio_callback(void* _user) { - airtaudio::CallbackInfo *cbi = static_cast(_user); - airtaudio::api::Pulse *context = static_cast(cbi->object); - volatile bool *isRunning = &cbi->isRunning; - while (*isRunning) { - context->callbackEvent(); +static void pulseaudio_callback(void* _userData) { + airtaudio::api::Pulse* myClass = reinterpret_cast(_userData); + myClass->callbackEvent(); +} + +void airtaudio::api::Pulse::callbackEvent() { + while (m_callbackInfo.isRunning == true) { + callbackEventOneCycle(); } } enum airtaudio::error airtaudio::api::Pulse::closeStream() { - PulseAudioHandle *pah = static_cast(m_stream.apiHandle); - m_stream.callbackInfo.isRunning = false; + PulseAudioHandle *pah = static_cast(m_apiHandle); + m_callbackInfo.isRunning = false; if (pah) { - m_stream.mutex.lock(); - if (m_stream.state == airtaudio::state_stopped) { + m_mutex.lock(); + if (m_state == airtaudio::state_stopped) { pah->runnable = true; pah->runnable_cv.notify_one();; } - m_stream.mutex.unlock(); + m_mutex.unlock(); pah->thread->join(); if (pah->s_play) { pa_simple_flush(pah->s_play, nullptr); @@ -120,89 +121,83 @@ enum airtaudio::error airtaudio::api::Pulse::closeStream() { pa_simple_free(pah->s_rec); } delete pah; - m_stream.apiHandle = nullptr; + m_apiHandle = nullptr; } - if (m_stream.userBuffer[0] != nullptr) { - free(m_stream.userBuffer[0]); - m_stream.userBuffer[0] = nullptr; - } - if (m_stream.userBuffer[1] != nullptr) { - free(m_stream.userBuffer[1]); - m_stream.userBuffer[1] = nullptr; - } - m_stream.state = airtaudio::state_closed; - m_stream.mode = airtaudio::mode_unknow; + m_userBuffer[0].clear(); + m_userBuffer[1].clear(); + m_state = airtaudio::state_closed; + m_mode = airtaudio::mode_unknow; return airtaudio::error_none; } -void airtaudio::api::Pulse::callbackEvent() { - PulseAudioHandle *pah = static_cast(m_stream.apiHandle); - if (m_stream.state == airtaudio::state_stopped) { - std::unique_lock lck(m_stream.mutex); +void airtaudio::api::Pulse::callbackEventOneCycle() { + PulseAudioHandle *pah = static_cast(m_apiHandle); + if (m_state == airtaudio::state_stopped) { + std::unique_lock lck(m_mutex); while (!pah->runnable) { pah->runnable_cv.wait(lck); } - if (m_stream.state != airtaudio::state_running) { - m_stream.mutex.unlock(); + if (m_state != airtaudio::state_running) { + m_mutex.unlock(); return; } } - if (m_stream.state == airtaudio::state_closed) { + if (m_state == airtaudio::state_closed) { ATA_ERROR("the stream is closed ... this shouldn't happen!"); return; } double streamTime = getStreamTime(); enum airtaudio::status status = airtaudio::status_ok; - int32_t doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)], - m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)], - m_stream.bufferSize, - streamTime, - status); + int32_t doStopStream = m_callbackInfo.callback(&m_userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)][0], + &m_userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)][0], + m_bufferSize, + streamTime, + status); if (doStopStream == 2) { abortStream(); return; } - m_stream.mutex.lock(); - void *pulse_in = m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)]; - void *pulse_out = m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)]; - if (m_stream.state != airtaudio::state_running) { + m_mutex.lock(); + void *pulse_in = m_doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)] ? m_deviceBuffer : &m_userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)][0]; + void *pulse_out = m_doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)] ? m_deviceBuffer : &m_userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)][0]; + if (m_state != airtaudio::state_running) { goto unlock; } int32_t pa_error; size_t bytes; - if ( m_stream.mode == airtaudio::mode_output - || m_stream.mode == airtaudio::mode_duplex) { - if (m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)]) { - convertBuffer(m_stream.deviceBuffer, - m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)], - m_stream.convertInfo[airtaudio::modeToIdTable(airtaudio::mode_output)]); - bytes = m_stream.nDeviceChannels[airtaudio::modeToIdTable(airtaudio::mode_output)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[airtaudio::modeToIdTable(airtaudio::mode_output)]); + if ( m_mode == airtaudio::mode_output + || m_mode == airtaudio::mode_duplex) { + if (m_doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)]) { + convertBuffer(m_deviceBuffer, + &m_userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)][0], + m_convertInfo[airtaudio::modeToIdTable(airtaudio::mode_output)]); + bytes = m_nDeviceChannels[airtaudio::modeToIdTable(airtaudio::mode_output)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[airtaudio::modeToIdTable(airtaudio::mode_output)]); } else { - bytes = m_stream.nUserChannels[airtaudio::modeToIdTable(airtaudio::mode_output)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat); + bytes = m_nUserChannels[airtaudio::modeToIdTable(airtaudio::mode_output)] * m_bufferSize * audio::getFormatBytes(m_userFormat); } if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0) { ATA_ERROR("audio write error, " << pa_strerror(pa_error) << "."); return; } } - if (m_stream.mode == airtaudio::mode_input || m_stream.mode == airtaudio::mode_duplex) { - if (m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)]) { - bytes = m_stream.nDeviceChannels[airtaudio::modeToIdTable(airtaudio::mode_input)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[airtaudio::modeToIdTable(airtaudio::mode_input)]); + if (m_mode == airtaudio::mode_input || m_mode == airtaudio::mode_duplex) { + if (m_doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)]) { + bytes = m_nDeviceChannels[airtaudio::modeToIdTable(airtaudio::mode_input)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[airtaudio::modeToIdTable(airtaudio::mode_input)]); } else { - bytes = m_stream.nUserChannels[airtaudio::modeToIdTable(airtaudio::mode_input)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat); + bytes = m_nUserChannels[airtaudio::modeToIdTable(airtaudio::mode_input)] * m_bufferSize * audio::getFormatBytes(m_userFormat); } if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0) { ATA_ERROR("audio read error, " << pa_strerror(pa_error) << "."); return; } - if (m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)]) { - convertBuffer(m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)], - m_stream.deviceBuffer, - m_stream.convertInfo[airtaudio::modeToIdTable(airtaudio::mode_input)]); + if (m_doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)]) { + convertBuffer(&m_userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)][0], + m_deviceBuffer, + m_convertInfo[airtaudio::modeToIdTable(airtaudio::mode_input)]); } } unlock: - m_stream.mutex.unlock(); + m_mutex.unlock(); airtaudio::Api::tickStreamTime(); if (doStopStream == 1) { stopStream(); @@ -212,70 +207,70 @@ unlock: } enum airtaudio::error airtaudio::api::Pulse::startStream() { - PulseAudioHandle *pah = static_cast(m_stream.apiHandle); - if (m_stream.state == airtaudio::state_closed) { + PulseAudioHandle *pah = static_cast(m_apiHandle); + if (m_state == airtaudio::state_closed) { ATA_ERROR("the stream is not open!"); return airtaudio::error_invalidUse; } - if (m_stream.state == airtaudio::state_running) { + if (m_state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); return airtaudio::error_warning; } - m_stream.mutex.lock(); - m_stream.state = airtaudio::state_running; + m_mutex.lock(); + m_state = airtaudio::state_running; pah->runnable = true; pah->runnable_cv.notify_one(); - m_stream.mutex.unlock(); + m_mutex.unlock(); return airtaudio::error_none; } enum airtaudio::error airtaudio::api::Pulse::stopStream() { - PulseAudioHandle *pah = static_cast(m_stream.apiHandle); - if (m_stream.state == airtaudio::state_closed) { + PulseAudioHandle *pah = static_cast(m_apiHandle); + if (m_state == airtaudio::state_closed) { ATA_ERROR("the stream is not open!"); return airtaudio::error_invalidUse; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } - m_stream.state = airtaudio::state_stopped; - m_stream.mutex.lock(); + m_state = airtaudio::state_stopped; + m_mutex.lock(); if (pah && pah->s_play) { int32_t pa_error; if (pa_simple_drain(pah->s_play, &pa_error) < 0) { ATA_ERROR("error draining output device, " << pa_strerror(pa_error) << "."); - m_stream.mutex.unlock(); + m_mutex.unlock(); return airtaudio::error_systemError; } } - m_stream.state = airtaudio::state_stopped; - m_stream.mutex.unlock(); + m_state = airtaudio::state_stopped; + m_mutex.unlock(); return airtaudio::error_none; } enum airtaudio::error airtaudio::api::Pulse::abortStream() { - PulseAudioHandle *pah = static_cast(m_stream.apiHandle); - if (m_stream.state == airtaudio::state_closed) { + PulseAudioHandle *pah = static_cast(m_apiHandle); + if (m_state == airtaudio::state_closed) { ATA_ERROR("the stream is not open!"); return airtaudio::error_invalidUse; } - if (m_stream.state == airtaudio::state_stopped) { + if (m_state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return airtaudio::error_warning; } - m_stream.state = airtaudio::state_stopped; - m_stream.mutex.lock(); + m_state = airtaudio::state_stopped; + m_mutex.lock(); if (pah && pah->s_play) { int32_t pa_error; if (pa_simple_flush(pah->s_play, &pa_error) < 0) { ATA_ERROR("error flushing output device, " << pa_strerror(pa_error) << "."); - m_stream.mutex.unlock(); + m_mutex.unlock(); return airtaudio::error_systemError; } } - m_stream.state = airtaudio::state_stopped; - m_stream.mutex.unlock(); + m_state = airtaudio::state_stopped; + m_mutex.unlock(); return airtaudio::error_none; } @@ -308,7 +303,7 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) { if (_sampleRate == *sr) { sr_found = true; - m_stream.sampleRate = _sampleRate; + m_sampleRate = _sampleRate; ss.rate = _sampleRate; break; } @@ -323,7 +318,7 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, ++sf) { if (_format == sf->airtaudio_format) { sf_found = true; - m_stream.userFormat = sf->airtaudio_format; + m_userFormat = sf->airtaudio_format; ss.format = sf->pa_format; break; } @@ -332,55 +327,55 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, ATA_ERROR("unsupported sample format."); return false; } - m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; - m_stream.nBuffers = 1; - m_stream.doByteSwap[modeToIdTable(_mode)] = false; - m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; - m_stream.deviceFormat[modeToIdTable(_mode)] = m_stream.userFormat; - m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; - m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel; - m_stream.channelOffset[modeToIdTable(_mode)] = 0; + m_deviceInterleaved[modeToIdTable(_mode)] = true; + m_nBuffers = 1; + m_doByteSwap[modeToIdTable(_mode)] = false; + m_doConvertBuffer[modeToIdTable(_mode)] = false; + m_deviceFormat[modeToIdTable(_mode)] = m_userFormat; + m_nUserChannels[modeToIdTable(_mode)] = _channels; + m_nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel; + m_channelOffset[modeToIdTable(_mode)] = 0; // Allocate necessary internal buffers. - bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat); - m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { + bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat); + m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0); + if (m_userBuffer[modeToIdTable(_mode)].size() == 0) { ATA_ERROR("error allocating user buffer memory."); goto error; } - m_stream.bufferSize = *_bufferSize; - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + m_bufferSize = *_bufferSize; + if (m_doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]); if (_mode == airtaudio::mode_input) { - if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { - uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]); + if (m_mode == airtaudio::mode_output && m_deviceBuffer) { + uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]); if (bufferBytes <= bytesOut) makeBuffer = false; } } if (makeBuffer) { bufferBytes *= *_bufferSize; - if (m_stream.deviceBuffer) free(m_stream.deviceBuffer); - m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1); - if (m_stream.deviceBuffer == nullptr) { + if (m_deviceBuffer) free(m_deviceBuffer); + m_deviceBuffer = (char *) calloc(bufferBytes, 1); + if (m_deviceBuffer == nullptr) { ATA_ERROR("error allocating device buffer memory."); goto error; } } } - m_stream.device[modeToIdTable(_mode)] = _device; + m_device[modeToIdTable(_mode)] = _device; // Setup the buffer conversion information structure. - if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { + if (m_doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, _firstChannel); } - if (!m_stream.apiHandle) { + if (!m_apiHandle) { PulseAudioHandle *pah = new PulseAudioHandle; if (!pah) { ATA_ERROR("error allocating memory for handle."); goto error; } - m_stream.apiHandle = pah; + m_apiHandle = pah; } - pah = static_cast(m_stream.apiHandle); + pah = static_cast(m_apiHandle); int32_t error; switch (_mode) { case airtaudio::mode_input: @@ -400,38 +395,34 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, default: goto error; } - if (m_stream.mode == airtaudio::mode_unknow) { - m_stream.mode = _mode; - } else if (m_stream.mode == _mode) { + if (m_mode == airtaudio::mode_unknow) { + m_mode = _mode; + } else if (m_mode == _mode) { goto error; }else { - m_stream.mode = airtaudio::mode_duplex; + m_mode = airtaudio::mode_duplex; } - if (!m_stream.callbackInfo.isRunning) { - m_stream.callbackInfo.object = this; - m_stream.callbackInfo.isRunning = true; - pah->thread = new std::thread(pulseaudio_callback, (void *)&m_stream.callbackInfo); + if (!m_callbackInfo.isRunning) { + m_callbackInfo.isRunning = true; + pah->thread = new std::thread(pulseaudio_callback, this); if (pah->thread == nullptr) { ATA_ERROR("error creating thread."); goto error; } } - m_stream.state = airtaudio::state_stopped; + m_state = airtaudio::state_stopped; return true; error: - if (pah && m_stream.callbackInfo.isRunning) { + if (pah && m_callbackInfo.isRunning) { delete pah; - m_stream.apiHandle = 0; + m_apiHandle = 0; } for (int32_t i=0; i<2; i++) { - if (m_stream.userBuffer[i]) { - free(m_stream.userBuffer[i]); - m_stream.userBuffer[i] = 0; - } + m_userBuffer[i].clear(); } - if (m_stream.deviceBuffer) { - free(m_stream.deviceBuffer); - m_stream.deviceBuffer = 0; + if (m_deviceBuffer) { + free(m_deviceBuffer); + m_deviceBuffer = 0; } return false; } diff --git a/airtaudio/api/Pulse.h b/airtaudio/api/Pulse.h index 79c8c2a..436d3ec 100644 --- a/airtaudio/api/Pulse.h +++ b/airtaudio/api/Pulse.h @@ -1,9 +1,8 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ #if !defined(__AIRTAUDIO_API_PULSE_H__) && defined(__LINUX_PULSE__) @@ -29,6 +28,7 @@ namespace airtaudio { // public because it is called by the internal callback handler, // which is not a member of RtAudio. External use of this function // will most likely produce highly undesireable results! + void callbackEventOneCycle(); void callbackEvent(); private: std::vector m_devices; diff --git a/airtaudio/base.cpp b/airtaudio/base.cpp index b0a86bf..e5c2e7b 100644 --- a/airtaudio/base.cpp +++ b/airtaudio/base.cpp @@ -1,7 +1,6 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) - */ +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio + */ \ No newline at end of file diff --git a/airtaudio/base.h b/airtaudio/base.h index 9ce5f8c..e47cc3a 100644 --- a/airtaudio/base.h +++ b/airtaudio/base.h @@ -1,13 +1,12 @@ -/** - * @author Gary P. SCAVONE - * - * @copyright 2001-2013 Gary P. Scavone, all right reserved - * - * @license like MIT (see license file) +/** @file + * @author Edouard DUPIN + * @copyright 2011, Edouard DUPIN, all right reserved + * @license APACHE v2.0 (see license file) + * @fork from RTAudio */ -#ifndef __AIRTAUDIO_ERROR_H__ -#define __AIRTAUDIO_ERROR_H__ +#ifndef __AIRTAUDIO_CB_H__ +#define __AIRTAUDIO_CB_H__ #include #include @@ -15,6 +14,9 @@ #include #include