diff --git a/airtaudio/Api.cpp b/airtaudio/Api.cpp index a6469cd..0af18a5 100644 --- a/airtaudio/Api.cpp +++ b/airtaudio/Api.cpp @@ -34,29 +34,73 @@ static const char* listType[] { "user3", "user4" }; +static int32_t listTypeSize = sizeof(listType)/sizeof(char*); -std::ostream& operator <<(std::ostream& _os, const airtaudio::type& _obj){ + +std::ostream& airtaudio::operator <<(std::ostream& _os, const enum airtaudio::type& _obj) { _os << listType[_obj]; return _os; } +std::ostream& airtaudio::operator <<(std::ostream& _os, const std::vector& _obj) { + _os << std::string("{"); + for (size_t iii=0; iii<_obj.size(); ++iii) { + if (iii!=0) { + _os << std::string(";"); + } + _os << _obj[iii]; + } + _os << std::string("}"); + return _os; +} + +std::string airtaudio::getTypeString(enum audio::format _value) { + return listType[_value]; +} + +enum airtaudio::type airtaudio::getTypeFromString(const std::string& _value) { + for (int32_t iii=0; iii(iii); + } + } + return airtaudio::type_undefined; +} + +int32_t airtaudio::modeToIdTable(enum mode _mode) { + switch (_mode) { + case mode_unknow: + case mode_duplex: + case mode_output: + return 0; + case mode_input: + return 1; + } + return 0; +} + // Static variable definitions. -static const uint32_t MAX_SAMPLE_RATES = 14; -static const uint32_t SAMPLE_RATES[] = { - 4000, - 5512, - 8000, - 9600, - 11025, - 16000, - 22050, - 32000, - 44100, - 48000, - 88200, - 96000, - 176400, - 192000 +const std::vector& airtaudio::genericSampleRate() { + static std::vector list; + if (list.size() == 0) { + list.push_back(4000); + list.push_back(5512); + list.push_back(8000); + list.push_back(9600); + list.push_back(11025); + list.push_back(16000); + list.push_back(22050); + list.push_back(32000); + list.push_back(44100); + list.push_back(48000); + list.push_back(64000); + list.push_back(88200); + list.push_back(96000); + list.push_back(128000); + list.push_back(176400); + list.push_back(192000); + } + return list; }; @@ -72,32 +116,32 @@ airtaudio::Api::~Api() { } -enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters *oParams, +enum airtaudio::error airtaudio::Api::openStream(airtaudio::StreamParameters *oParams, airtaudio::StreamParameters *iParams, enum audio::format format, uint32_t sampleRate, uint32_t *bufferFrames, airtaudio::AirTAudioCallback callback, airtaudio::StreamOptions *options) { - if (m_stream.state != airtaudio::api::STREAM_CLOSED) { + if (m_stream.state != airtaudio::state_closed) { ATA_ERROR("a stream is already open!"); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } if (oParams && oParams->nChannels < 1) { ATA_ERROR("a non-nullptr output StreamParameters structure cannot have an nChannels value less than one."); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } if (iParams && iParams->nChannels < 1) { ATA_ERROR("a non-nullptr input StreamParameters structure cannot have an nChannels value less than one."); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } if (oParams == nullptr && iParams == nullptr) { ATA_ERROR("input and output StreamParameters structures are both nullptr!"); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } if (audio::getFormatBytes(format) == 0) { ATA_ERROR("'format' parameter value is undefined."); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } uint32_t nDevices = getDeviceCount(); uint32_t oChannels = 0; @@ -105,7 +149,7 @@ enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters oChannels = oParams->nChannels; if (oParams->deviceId >= nDevices) { ATA_ERROR("output device parameter value is invalid."); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } } uint32_t iChannels = 0; @@ -113,14 +157,14 @@ enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters iChannels = iParams->nChannels; if (iParams->deviceId >= nDevices) { ATA_ERROR("input device parameter value is invalid."); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } } clearStreamInfo(); bool result; if (oChannels > 0) { result = probeDeviceOpen(oParams->deviceId, - airtaudio::api::OUTPUT, + airtaudio::mode_output, oChannels, oParams->firstChannel, sampleRate, @@ -129,12 +173,12 @@ enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters options); if (result == false) { ATA_ERROR("system ERROR"); - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } } if (iChannels > 0) { result = probeDeviceOpen(iParams->deviceId, - airtaudio::api::INPUT, + airtaudio::mode_input, iChannels, iParams->firstChannel, sampleRate, @@ -146,15 +190,15 @@ enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters closeStream(); } ATA_ERROR("system error"); - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } } m_stream.callbackInfo.callback = callback; if (options != nullptr) { options->numberOfBuffers = m_stream.nBuffers; } - m_stream.state = airtaudio::api::STREAM_STOPPED; - return airtaudio::errorNone; + m_stream.state = airtaudio::state_stopped; + return airtaudio::error_none; } uint32_t airtaudio::Api::getDefaultInputDevice() { @@ -167,13 +211,13 @@ uint32_t airtaudio::Api::getDefaultOutputDevice() { return 0; } -enum airtaudio::errorType airtaudio::Api::closeStream() { +enum airtaudio::error airtaudio::Api::closeStream() { // MUST be implemented in subclasses! - return airtaudio::errorNone; + return airtaudio::error_none; } bool airtaudio::Api::probeDeviceOpen(uint32_t /*device*/, - airtaudio::api::StreamMode /*mode*/, + airtaudio::mode /*mode*/, uint32_t /*channels*/, uint32_t /*firstChannel*/, uint32_t /*sampleRate*/, @@ -195,23 +239,23 @@ void airtaudio::Api::tickStreamTime() { } long airtaudio::Api::getStreamLatency() { - if (verifyStream() != airtaudio::errorNone) { + if (verifyStream() != airtaudio::error_none) { return 0; } long totalLatency = 0; - if ( m_stream.mode == airtaudio::api::OUTPUT - || m_stream.mode == airtaudio::api::DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { totalLatency = m_stream.latency[0]; } - if ( m_stream.mode == airtaudio::api::INPUT - || m_stream.mode == airtaudio::api::DUPLEX) { + if ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) { totalLatency += m_stream.latency[1]; } return totalLatency; } double airtaudio::Api::getStreamTime() { - if (verifyStream() != airtaudio::errorNone) { + if (verifyStream() != airtaudio::error_none) { return 0.0f; } #if defined(HAVE_GETTIMEOFDAY) @@ -219,7 +263,7 @@ double airtaudio::Api::getStreamTime() { // adding in the elapsed time since the last tick. struct timeval then; struct timeval now; - if (m_stream.state != airtaudio::api::STREAM_RUNNING || m_stream.streamTime == 0.0) { + if (m_stream.state != airtaudio::state_running || m_stream.streamTime == 0.0) { return m_stream.streamTime; } gettimeofday(&now, nullptr); @@ -233,28 +277,27 @@ double airtaudio::Api::getStreamTime() { } uint32_t airtaudio::Api::getStreamSampleRate() { - if (verifyStream() != airtaudio::errorNone) { + if (verifyStream() != airtaudio::error_none) { return 0; } return m_stream.sampleRate; } -enum airtaudio::errorType airtaudio::Api::verifyStream() { - if (m_stream.state == airtaudio::api::STREAM_CLOSED) { +enum airtaudio::error airtaudio::Api::verifyStream() { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("a stream is not open!"); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } - return airtaudio::errorNone; + return airtaudio::error_none; } void airtaudio::Api::clearStreamInfo() { - m_stream.mode = airtaudio::api::UNINITIALIZED; - m_stream.state = airtaudio::api::STREAM_CLOSED; + m_stream.mode = airtaudio::mode_unknow; + m_stream.state = airtaudio::state_closed; m_stream.sampleRate = 0; m_stream.bufferSize = 0; m_stream.nBuffers = 0; m_stream.userFormat = audio::format_unknow; - m_stream.userInterleaved = true; m_stream.streamTime = 0.0; m_stream.apiHandle = 0; m_stream.deviceBuffer = 0; @@ -281,91 +324,80 @@ void airtaudio::Api::clearStreamInfo() { } } -void airtaudio::Api::setConvertInfo(airtaudio::api::StreamMode _mode, uint32_t _firstChannel) { - if (_mode == airtaudio::api::INPUT) { // convert device to user buffer - m_stream.convertInfo[_mode].inJump = m_stream.nDeviceChannels[1]; - m_stream.convertInfo[_mode].outJump = m_stream.nUserChannels[1]; - m_stream.convertInfo[_mode].inFormat = m_stream.deviceFormat[1]; - m_stream.convertInfo[_mode].outFormat = m_stream.userFormat; +void airtaudio::Api::setConvertInfo(airtaudio::mode _mode, uint32_t _firstChannel) { + int32_t idTable = airtaudio::modeToIdTable(_mode); + if (_mode == airtaudio::mode_input) { // convert device to user buffer + m_stream.convertInfo[idTable].inJump = m_stream.nDeviceChannels[1]; + m_stream.convertInfo[idTable].outJump = m_stream.nUserChannels[1]; + m_stream.convertInfo[idTable].inFormat = m_stream.deviceFormat[1]; + m_stream.convertInfo[idTable].outFormat = m_stream.userFormat; } else { // convert user to device buffer - m_stream.convertInfo[_mode].inJump = m_stream.nUserChannels[0]; - m_stream.convertInfo[_mode].outJump = m_stream.nDeviceChannels[0]; - m_stream.convertInfo[_mode].inFormat = m_stream.userFormat; - m_stream.convertInfo[_mode].outFormat = m_stream.deviceFormat[0]; + m_stream.convertInfo[idTable].inJump = m_stream.nUserChannels[0]; + m_stream.convertInfo[idTable].outJump = m_stream.nDeviceChannels[0]; + m_stream.convertInfo[idTable].inFormat = m_stream.userFormat; + m_stream.convertInfo[idTable].outFormat = m_stream.deviceFormat[0]; } - if (m_stream.convertInfo[_mode].inJump < m_stream.convertInfo[_mode].outJump) { - m_stream.convertInfo[_mode].channels = m_stream.convertInfo[_mode].inJump; + if (m_stream.convertInfo[idTable].inJump < m_stream.convertInfo[idTable].outJump) { + m_stream.convertInfo[idTable].channels = m_stream.convertInfo[idTable].inJump; } else { - m_stream.convertInfo[_mode].channels = m_stream.convertInfo[_mode].outJump; + m_stream.convertInfo[idTable].channels = m_stream.convertInfo[idTable].outJump; } // Set up the interleave/deinterleave offsets. - if (m_stream.deviceInterleaved[_mode] != m_stream.userInterleaved) { - if ( ( _mode == airtaudio::api::OUTPUT - && m_stream.deviceInterleaved[_mode]) - || ( _mode == airtaudio::api::INPUT - && m_stream.userInterleaved)) { - for (int32_t kkk=0; kkk 0) { - if (m_stream.deviceInterleaved[_mode]) { - if (_mode == airtaudio::api::OUTPUT) { - for (int32_t kkk=0; kkk namespace airtaudio { + const std::vector& genericSampleRate(); /** * @brief Audio API specifier arguments. */ @@ -33,6 +34,11 @@ namespace airtaudio { type_user3, //!< User interface 3. type_user4, //!< User interface 4. }; + std::ostream& operator <<(std::ostream& _os, const enum airtaudio::type& _obj); + std::ostream& operator <<(std::ostream& _os, const std::vector& _obj); + std::string getTypeString(enum audio::format _value); + enum airtaudio::type getTypeFromString(const std::string& _value); + enum state { state_closed, state_stopped, @@ -45,6 +51,7 @@ namespace airtaudio { mode_input, mode_duplex }; + int32_t modeToIdTable(enum mode _mode); // A protected structure used for buffer conversion. class ConvertInfo { public: @@ -63,12 +70,11 @@ namespace airtaudio { public: uint32_t device[2]; // Playback and record, respectively. void *apiHandle; // void pointer for API specific stream handle information - enum airtaudio::mode mode; // OUTPUT, INPUT, or DUPLEX. + enum airtaudio::mode mode; // airtaudio::mode_output, airtaudio::mode_input, or airtaudio::mode_duplex. enum airtaudio::state state; // STOPPED, RUNNING, or CLOSED char *userBuffer[2]; // Playback and record, respectively. char *deviceBuffer; bool doConvertBuffer[2]; // Playback and record, respectively. - bool userInterleaved; bool deviceInterleaved[2]; // Playback and record, respectively. bool doByteSwap[2]; // Playback and record, respectively. uint32_t sampleRate; @@ -106,17 +112,17 @@ namespace airtaudio { virtual airtaudio::DeviceInfo getDeviceInfo(uint32_t _device) = 0; virtual uint32_t getDefaultInputDevice(); virtual uint32_t getDefaultOutputDevice(); - enum airtaudio::errorType openStream(airtaudio::StreamParameters *_outputParameters, + enum airtaudio::error openStream(airtaudio::StreamParameters *_outputParameters, airtaudio::StreamParameters *_inputParameters, audio::format _format, uint32_t _sampleRate, uint32_t *_bufferFrames, airtaudio::AirTAudioCallback _callback, airtaudio::StreamOptions *_options); - virtual enum airtaudio::errorType closeStream(); - virtual enum airtaudio::errorType startStream() = 0; - virtual enum airtaudio::errorType stopStream() = 0; - virtual enum airtaudio::errorType abortStream() = 0; + virtual enum airtaudio::error closeStream(); + virtual enum airtaudio::error startStream() = 0; + virtual enum airtaudio::error stopStream() = 0; + virtual enum airtaudio::error abortStream() = 0; long getStreamLatency(); uint32_t getStreamSampleRate(); virtual double getStreamTime(); @@ -156,7 +162,7 @@ namespace airtaudio { Protected common method that throws an RtError (type = INVALID_USE) if a stream is not open. */ - enum airtaudio::errorType verifyStream(); + enum airtaudio::error verifyStream(); /** * @brief Protected method used to perform format, channel number, and/or interleaving * conversions between the user and device buffers. diff --git a/airtaudio/Interface.cpp b/airtaudio/Interface.cpp index 1a29d5b..08b56c3 100644 --- a/airtaudio/Interface.cpp +++ b/airtaudio/Interface.cpp @@ -14,8 +14,8 @@ #undef __class__ #define __class__ "Interface" -std::vector airtaudio::Interface::getCompiledApi() { - std::vector apis; +std::vector airtaudio::Interface::getCompiledApi() { + std::vector apis; // The order here will control the order of RtAudio's API search in // the constructor. for (auto &it : m_apiAvaillable) { @@ -26,7 +26,7 @@ std::vector airtaudio::Interface::getCompiledApi() { -void airtaudio::Interface::openRtApi(airtaudio::api::type _api) { +void airtaudio::Interface::openRtApi(enum airtaudio::type _api) { delete m_rtapi; m_rtapi = nullptr; for (auto &it :m_apiAvaillable) { @@ -49,57 +49,57 @@ airtaudio::Interface::Interface() : ATA_DEBUG("Add interface:"); #if defined(__UNIX_JACK__) ATA_DEBUG(" JACK"); - addInterface(airtaudio::api::UNIX_JACK, airtaudio::api::Jack::Create); + addInterface(airtaudio::type_jack, airtaudio::api::Jack::Create); #endif #if defined(__LINUX_ALSA__) ATA_DEBUG(" ALSA"); - addInterface(airtaudio::api::LINUX_ALSA, airtaudio::api::Alsa::Create); + addInterface(airtaudio::type_alsa, airtaudio::api::Alsa::Create); #endif #if defined(__LINUX_PULSE__) ATA_DEBUG(" PULSE"); - addInterface(airtaudio::api::LINUX_PULSE, airtaudio::api::Pulse::Create); + addInterface(airtaudio::type_pulse, airtaudio::api::Pulse::Create); #endif #if defined(__LINUX_OSS__) ATA_DEBUG(" OSS"); - addInterface(airtaudio::api::LINUX_OSS, airtaudio::api::Oss::Create); + addInterface(airtaudio::type_oss, airtaudio::api::Oss::Create); #endif #if defined(__WINDOWS_ASIO__) ATA_DEBUG(" ASIO"); - addInterface(airtaudio::api::WINDOWS_ASIO, airtaudio::api::Asio::Create); + addInterface(airtaudio::type_asio, airtaudio::api::Asio::Create); #endif #if defined(__WINDOWS_DS__) ATA_DEBUG(" DS"); - addInterface(airtaudio::api::WINDOWS_DS, airtaudio::api::Ds::Create); + addInterface(airtaudio::type_ds, airtaudio::api::Ds::Create); #endif #if defined(__MACOSX_CORE__) - ATA_DEBUG(" MACOSX_CORE"); - addInterface(airtaudio::api::MACOSX_CORE, airtaudio::api::Core::Create); + ATA_DEBUG(" CORE OSX"); + addInterface(airtaudio::type_coreOSX, airtaudio::api::Core::Create); #endif #if defined(__IOS_CORE__) - ATA_DEBUG(" IOS_CORE"); - addInterface(airtaudio::api::IOS_CORE, airtaudio::api::CoreIos::Create); + ATA_DEBUG(" CORE IOS"); + addInterface(airtaudio::type_coreIOS, airtaudio::api::CoreIos::Create); #endif #if defined(__ANDROID_JAVA__) ATA_DEBUG(" JAVA"); - addInterface(airtaudio::api::ANDROID_JAVA, airtaudio::api::Android::Create); + addInterface(airtaudio::type_java, airtaudio::api::Android::Create); #endif #if defined(__AIRTAUDIO_DUMMY__) ATA_DEBUG(" DUMMY"); - addInterface(airtaudio::api::RTAUDIO_DUMMY, airtaudio::api::Dummy::Create); + addInterface(airtaudio::type_dummy, airtaudio::api::Dummy::Create); #endif } -void airtaudio::Interface::addInterface(airtaudio::api::type _api, Api* (*_callbackCreate)()) { - m_apiAvaillable.push_back(std::pair(_api, _callbackCreate)); +void airtaudio::Interface::addInterface(enum airtaudio::type _api, Api* (*_callbackCreate)()) { + m_apiAvaillable.push_back(std::pair(_api, _callbackCreate)); } -enum airtaudio::errorType airtaudio::Interface::instanciate(airtaudio::api::type _api) { +enum airtaudio::error airtaudio::Interface::instanciate(enum airtaudio::type _api) { ATA_INFO("Instanciate API ..."); if (m_rtapi != nullptr) { ATA_WARNING("Interface already started ...!"); - return airtaudio::errorNone; + return airtaudio::error_none; } - if (_api != airtaudio::api::UNSPECIFIED) { + if (_api != airtaudio::type_undefined) { ATA_INFO("API specified : " << _api); // Attempt to open the specified API. openRtApi(_api); @@ -107,17 +107,17 @@ enum airtaudio::errorType airtaudio::Interface::instanciate(airtaudio::api::type if (m_rtapi->getDeviceCount() != 0) { ATA_INFO(" ==> api open"); } - return airtaudio::errorNone; + return airtaudio::error_none; } // No compiled support for specified API value. Issue a debug // warning and continue as if no API was specified. ATA_ERROR("RtAudio: no compiled support for specified API argument!"); - return airtaudio::errorFail; + return airtaudio::error_fail; } ATA_INFO("Auto choice API :"); // Iterate through the compiled APIs and return as soon as we find // one with at least one device or we reach the end of the list. - std::vector apis = getCompiledApi(); + std::vector apis = getCompiledApi(); ATA_INFO(" find : " << apis.size() << " apis."); for (auto &it : apis) { ATA_INFO("try open ..."); @@ -132,10 +132,10 @@ enum airtaudio::errorType airtaudio::Interface::instanciate(airtaudio::api::type } } if (m_rtapi != nullptr) { - return airtaudio::errorNone; + return airtaudio::error_none; } ATA_ERROR("RtAudio: no compiled API support found ... critical error!!"); - return airtaudio::errorFail; + return airtaudio::error_fail; } airtaudio::Interface::~Interface() { @@ -144,7 +144,7 @@ airtaudio::Interface::~Interface() { m_rtapi = nullptr; } -enum airtaudio::errorType airtaudio::Interface::openStream( +enum airtaudio::error airtaudio::Interface::openStream( airtaudio::StreamParameters* _outputParameters, airtaudio::StreamParameters* _inputParameters, audio::format _format, @@ -153,7 +153,7 @@ enum airtaudio::errorType airtaudio::Interface::openStream( airtaudio::AirTAudioCallback _callback, airtaudio::StreamOptions* _options) { if (m_rtapi == nullptr) { - return airtaudio::errorInputNull; + return airtaudio::error_inputNull; } return m_rtapi->openStream(_outputParameters, _inputParameters, diff --git a/airtaudio/Interface.h b/airtaudio/Interface.h index 0f25aee..53fe0fb 100644 --- a/airtaudio/Interface.h +++ b/airtaudio/Interface.h @@ -77,7 +77,7 @@ namespace airtaudio { /** * @brief Create an interface instance */ - enum airtaudio::errorType instanciate(enum airtaudio::type _api = airtaudio::type_undefined); + enum airtaudio::error instanciate(enum airtaudio::type _api = airtaudio::type_undefined); /** * @return the audio API specifier for the current instance of airtaudio. */ @@ -185,7 +185,7 @@ namespace airtaudio { * @param _errorCallback A client-defined function that will be invoked * when an error has occured. */ - enum airtaudio::errorType openStream(airtaudio::StreamParameters *_outputParameters, + enum airtaudio::error openStream(airtaudio::StreamParameters *_outputParameters, airtaudio::StreamParameters *_inputParameters, enum audio::format _format, uint32_t _sampleRate, @@ -199,9 +199,9 @@ namespace airtaudio { * If a stream is not open, this function issues a warning and * returns (no exception is thrown). */ - enum airtaudio::errorType closeStream() { + enum airtaudio::error closeStream() { if (m_rtapi == nullptr) { - return airtaudio::errorInputNull; + return airtaudio::error_inputNull; } return m_rtapi->closeStream(); } @@ -213,9 +213,9 @@ namespace airtaudio { * stream is not open. A warning is issued if the stream is already * running. */ - enum airtaudio::errorType startStream() { + enum airtaudio::error startStream() { if (m_rtapi == nullptr) { - return airtaudio::errorInputNull; + return airtaudio::error_inputNull; } return m_rtapi->startStream(); } @@ -227,9 +227,9 @@ namespace airtaudio { * stream is not open. A warning is issued if the stream is already * stopped. */ - enum airtaudio::errorType stopStream() { + enum airtaudio::error stopStream() { if (m_rtapi == nullptr) { - return airtaudio::errorInputNull; + return airtaudio::error_inputNull; } return m_rtapi->stopStream(); } @@ -240,9 +240,9 @@ namespace airtaudio { * stream is not open. A warning is issued if the stream is already * stopped. */ - enum airtaudio::errorType abortStream() { + enum airtaudio::error abortStream() { if (m_rtapi == nullptr) { - return airtaudio::errorInputNull; + return airtaudio::error_inputNull; } return m_rtapi->abortStream(); } diff --git a/airtaudio/StreamOptions.h b/airtaudio/StreamOptions.h index 62fb78c..c4f8868 100644 --- a/airtaudio/StreamOptions.h +++ b/airtaudio/StreamOptions.h @@ -10,76 +10,15 @@ #define __AIRTAUDIO_STREAM_OPTION_H__ namespace airtaudio { - - /** - * @brief The structure for specifying stream options. - * - * The following flags can be OR'ed together to allow a client to - * make changes to the default stream behavior: - * - * - \e RTAUDIO_NONINTERLEAVED: Use non-interleaved buffers (default = interleaved). - * - \e RTAUDIO_MINIMIZE_LATENCY: Attempt to set stream parameters for lowest possible latency. - * - \e RTAUDIO_HOG_DEVICE: Attempt grab device for exclusive use. - * - \e RTAUDIO_SCHEDULE_REALTIME: Attempt to select realtime scheduling for callback thread. - * - \e RTAUDIO_ALSA_USE_DEFAULT: Use the "default" PCM device (ALSA only). - * - * By default, RtAudio streams pass and receive audio data from the - * client in an interleaved format. By passing the - * RTAUDIO_NONINTERLEAVED flag to the openStream() function, audio - * data will instead be presented in non-interleaved buffers. In - * this case, each buffer argument in the RtAudioCallback function - * will point to a single array of data, with \c nFrames samples for - * each channel concatenated back-to-back. For example, the first - * sample of data for the second channel would be located at index \c - * nFrames (assuming the \c buffer pointer was recast to the correct - * data type for the stream). - * - * Certain audio APIs offer a number of parameters that influence the - * I/O latency of a stream. By default, RtAudio will attempt to set - * these parameters internally for robust (glitch-free) performance - * (though some APIs, like Windows Direct Sound, make this difficult). - * By passing the RTAUDIO_MINIMIZE_LATENCY flag to the openStream() - * function, internal stream settings will be influenced in an attempt - * to minimize stream latency, though possibly at the expense of stream - * performance. - * - * If the RTAUDIO_HOG_DEVICE flag is set, RtAudio will attempt to - * open the input and/or output stream device(s) for exclusive use. - * Note that this is not possible with all supported audio APIs. - * - * If the RTAUDIO_SCHEDULE_REALTIME flag is set, RtAudio will attempt - * to select realtime scheduling (round-robin) for the callback thread. - * The \c priority parameter will only be used if the RTAUDIO_SCHEDULE_REALTIME - * flag is set. It defines the thread's realtime priority. - * - * If the RTAUDIO_ALSA_USE_DEFAULT flag is set, RtAudio will attempt to - * open the "default" PCM device when using the ALSA API. Note that this - * will override any specified input or output device id. - * - * The \c numberOfBuffers parameter can be used to control stream - * latency in the Windows DirectSound, Linux OSS, and Linux Alsa APIs - * only. A value of two is usually the smallest allowed. Larger - * numbers can potentially result in more robust stream performance, - * though likely at the cost of stream latency. The value set by the - * user is replaced during execution of the RtAudio::openStream() - * function by the value actually used by the system. - * - * The \c streamName parameter can be used to set the client name - * when using the Jack API. By default, the client name is set to - * RtApiJack. However, if you wish to create multiple instances of - * RtAudio with Jack, each instance must have a unique client name. - */ class StreamOptions { public: - airtaudio::streamFlags flags; //!< A bit-mask of stream flags (RTAUDIO_NONINTERLEAVED, RTAUDIO_MINIMIZE_LATENCY, RTAUDIO_HOG_DEVICE, RTAUDIO_ALSA_USE_DEFAULT). + airtaudio::Flags flags; //!< A bit-mask of stream flags uint32_t numberOfBuffers; //!< Number of stream buffers. std::string streamName; //!< A stream name (currently used only in Jack). - int32_t priority; //!< Scheduling priority of callback thread (only used with flag RTAUDIO_SCHEDULE_REALTIME). // Default constructor. StreamOptions() : - flags(0), - numberOfBuffers(0), - priority(0) {} + flags(), + numberOfBuffers(0){} }; }; diff --git a/airtaudio/api/Alsa.cpp b/airtaudio/api/Alsa.cpp index 5fbeb62..9e5ec8d 100644 --- a/airtaudio/api/Alsa.cpp +++ b/airtaudio/api/Alsa.cpp @@ -35,6 +35,8 @@ struct AlsaHandle { AlsaHandle() : synchronized(false), runnable(false) { + handles[0] = nullptr; + handles[1] = nullptr; xrun[0] = false; xrun[1] = false; } @@ -47,7 +49,7 @@ airtaudio::api::Alsa::Alsa() { } airtaudio::api::Alsa::~Alsa() { - if (m_stream.state != STREAM_CLOSED) { + if (m_stream.state != airtaudio::state_closed) { closeStream(); } } @@ -64,16 +66,16 @@ uint32_t airtaudio::api::Alsa::getDeviceCount() { sprintf(name, "hw:%d", card); result = snd_ctl_open(&handle, name, 0); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror(result) << "."); - // TODO : Return error airtaudio::errorWarning; + ATA_ERROR("control open, card = " << card << ", " << snd_strerror(result) << "."); + // TODO : Return error airtaudio::error_warning; goto nextcard; } subdevice = -1; while(1) { result = snd_ctl_pcm_next_device(handle, &subdevice); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror(result) << "."); - // TODO : Return error airtaudio::errorWarning; + ATA_ERROR("control next device, card = " << card << ", " << snd_strerror(result) << "."); + // TODO : Return error airtaudio::error_warning; break; } if (subdevice < 0) { @@ -107,14 +109,14 @@ airtaudio::DeviceInfo airtaudio::api::Alsa::getDeviceInfo(uint32_t _device) { sprintf(name, "hw:%d", card); result = snd_ctl_open(&chandle, name, SND_CTL_NONBLOCK); if (result < 0) { - ATA_WARNING("airtaudio::api::Alsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror(result) << "."); + ATA_WARNING("control open, card = " << card << ", " << snd_strerror(result) << "."); goto nextcard; } subdevice = -1; while(1) { result = snd_ctl_pcm_next_device(chandle, &subdevice); if (result < 0) { - ATA_WARNING("airtaudio::api::Alsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror(result) << "."); + ATA_WARNING("control next device, card = " << card << ", " << snd_strerror(result) << "."); break; } if (subdevice < 0) { @@ -139,26 +141,26 @@ airtaudio::DeviceInfo airtaudio::api::Alsa::getDeviceInfo(uint32_t _device) { nDevices++; } if (nDevices == 0) { - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: no devices found!"); - // TODO : airtaudio::errorInvalidUse; + ATA_ERROR("no devices found!"); + // TODO : airtaudio::error_invalidUse; return info; } if (_device >= nDevices) { - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: device ID is invalid!"); - // TODO : airtaudio::errorInvalidUse; + ATA_ERROR("device ID is invalid!"); + // TODO : airtaudio::error_invalidUse; return info; } foundDevice: // If a stream is already open, we cannot probe the stream devices. // Thus, use the saved results. - if ( m_stream.state != STREAM_CLOSED + if ( m_stream.state != airtaudio::state_closed && ( m_stream.device[0] == _device || m_stream.device[1] == _device)) { snd_ctl_close(chandle); if (_device >= m_devices.size()) { - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: device ID was not present before stream was opened."); - // TODO : return airtaudio::errorWarning; + ATA_ERROR("device ID was not present before stream was opened."); + // TODO : return airtaudio::error_warning; return info; } return m_devices[ _device ]; @@ -184,16 +186,16 @@ foundDevice: } result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << "."); - // TODO : Return airtaudio::errorWarning; + ATA_ERROR("snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << "."); + // TODO : Return airtaudio::error_warning; goto captureProbe; } // The device is open ... fill the parameter structure. result = snd_pcm_hw_params_any(phandle, params); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << "."); - // TODO : Return airtaudio::errorWarning; + ATA_ERROR("snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << "."); + // TODO : Return airtaudio::error_warning; goto captureProbe; } // Get output channel information. @@ -201,8 +203,8 @@ foundDevice: result = snd_pcm_hw_params_get_channels_max(params, &value); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror(result) << "."); - // TODO : Return airtaudio::errorWarning; + ATA_ERROR("error getting device (" << name << ") output channels, " << snd_strerror(result) << "."); + // TODO : Return airtaudio::error_warning; goto captureProbe; } info.outputChannels = value; @@ -225,8 +227,8 @@ captureProbe: } result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << "."); - // TODO : Return airtaudio::errorWarning; + ATA_ERROR("snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << "."); + // TODO : Return airtaudio::error_warning; if (info.outputChannels == 0) { return info; } @@ -236,8 +238,8 @@ captureProbe: result = snd_pcm_hw_params_any(phandle, params); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << "."); - // TODO : Return airtaudio::errorWarning; + ATA_ERROR("snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << "."); + // TODO : Return airtaudio::error_warning; if (info.outputChannels == 0) { return info; } @@ -246,8 +248,8 @@ captureProbe: result = snd_pcm_hw_params_get_channels_max(params, &value); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror(result) << "."); - // TODO : Return airtaudio::errorWarning; + ATA_ERROR("error getting device (" << name << ") input channels, " << snd_strerror(result) << "."); + // TODO : Return airtaudio::error_warning; if (info.outputChannels == 0) { return info; } @@ -281,29 +283,29 @@ probeParameters: snd_pcm_info_set_stream(pcminfo, stream); result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << "."); - // TODO : Return airtaudio::errorWarning; + ATA_ERROR("snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << "."); + // TODO : Return airtaudio::error_warning; return info; } // The device is open ... fill the parameter structure. result = snd_pcm_hw_params_any(phandle, params); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << "."); - // TODO : Return airtaudio::errorWarning; + ATA_ERROR("snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << "."); + // TODO : Return airtaudio::error_warning; return info; } // Test our discrete set of sample rate values. info.sampleRates.clear(); - for (uint32_t i=0; iflags & airtaudio::ALSA_USE_DEFAULT) { - snprintf(name, sizeof(name), "%s", "default"); - } else { - // Count cards and devices - card = -1; - snd_card_next(&card); - while (card >= 0) { - sprintf(name, "hw:%d", card); - result = snd_ctl_open(&chandle, name, SND_CTL_NONBLOCK); - if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror(result) << "."); - return false; - } - subdevice = -1; - while(1) { - result = snd_ctl_pcm_next_device(chandle, &subdevice); - if (result < 0) break; - if (subdevice < 0) break; - if (nDevices == _device) { - sprintf(name, "hw:%d,%d", card, subdevice); - snd_ctl_close(chandle); - goto foundDevice; - } - nDevices++; - } - snd_ctl_close(chandle); - snd_card_next(&card); + // Count cards and devices + card = -1; + // NOTE : Find the device name : [BEGIN] + snd_card_next(&card); + while (card >= 0) { + sprintf(name, "hw:%d", card); + result = snd_ctl_open(&chandle, name, SND_CTL_NONBLOCK); + if (result < 0) { + ATA_ERROR("control open, card = " << card << ", " << snd_strerror(result) << "."); + return false; } - result = snd_ctl_open(&chandle, "default", SND_CTL_NONBLOCK); - if (result == 0) { + subdevice = -1; + while(1) { + result = snd_ctl_pcm_next_device(chandle, &subdevice); + if (result < 0) break; + if (subdevice < 0) break; if (nDevices == _device) { - strcpy(name, "default"); + sprintf(name, "hw:%d,%d", card, subdevice); + snd_ctl_close(chandle); goto foundDevice; } nDevices++; } - if (nDevices == 0) { - // This should not happen because a check is made before this function is called. - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: no devices found!"); - return false; - } - if (_device >= nDevices) { - // This should not happen because a check is made before this function is called. - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: device ID is invalid!"); - return false; - } + snd_ctl_close(chandle); + snd_card_next(&card); } + result = snd_ctl_open(&chandle, "default", SND_CTL_NONBLOCK); + if (result == 0) { + if (nDevices == _device) { + strcpy(name, "default"); + goto foundDevice; + } + nDevices++; + } + if (nDevices == 0) { + // This should not happen because a check is made before this function is called. + ATA_ERROR("no devices found!"); + return false; + } + if (_device >= nDevices) { + // This should not happen because a check is made before this function is called. + ATA_ERROR("device ID is invalid!"); + return false; + } + // NOTE : Find the device name : [ END ] foundDevice: // The getDeviceInfo() function will not work for a device that is // already open. Thus, we'll probe the system before opening a // stream and save the results for use by getDeviceInfo(). - if ( _mode == OUTPUT - || ( _mode == INPUT - && m_stream.mode != OUTPUT)) { + if ( _mode == airtaudio::mode_output + || ( _mode == airtaudio::mode_input + && m_stream.mode != airtaudio::mode_output)) { // only do once this->saveDeviceInfo(); } snd_pcm_stream_t stream; - if (_mode == OUTPUT) { + if (_mode == airtaudio::mode_output) { stream = SND_PCM_STREAM_PLAYBACK; } else { stream = SND_PCM_STREAM_CAPTURE; @@ -442,10 +442,10 @@ foundDevice: int32_t openMode = SND_PCM_ASYNC; result = snd_pcm_open(&phandle, name, stream, openMode); if (result < 0) { - if (_mode == OUTPUT) { - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: pcm device (" << name << ") won't open for output."); + if (_mode == airtaudio::mode_output) { + ATA_ERROR("pcm device (" << name << ") won't open for output."); } else { - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: pcm device (" << name << ") won't open for input."); + ATA_ERROR("pcm device (" << name << ") won't open for input."); } return false; } @@ -455,33 +455,20 @@ foundDevice: result = snd_pcm_hw_params_any(phandle, hw_params); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror(result) << "."); + ATA_ERROR("error getting pcm device (" << name << ") parameters, " << snd_strerror(result) << "."); return false; } - // Set access ... check user preference. - if ( _options != nullptr - && _options->flags & airtaudio::NONINTERLEAVED) { - m_stream.userInterleaved = false; + // Open stream all time in interleave mode (by default): (open in non interleave if we have no choice + result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED); + if (result < 0) { result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED); - if (result < 0) { - result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED); - m_stream.deviceInterleaved[_mode] = true; - } else { - m_stream.deviceInterleaved[_mode] = false; - } + m_stream.deviceInterleaved[modeToIdTable(_mode)] = false; } else { - m_stream.userInterleaved = true; - result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED); - if (result < 0) { - result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED); - m_stream.deviceInterleaved[_mode] = false; - } else { - m_stream.deviceInterleaved[_mode] = true; - } + m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; } if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror(result) << "."); + ATA_ERROR("error setting pcm device (" << name << ") access, " << snd_strerror(result) << "."); return false; } // Determine how to set the device format. @@ -501,7 +488,7 @@ foundDevice: deviceFormat = SND_PCM_FORMAT_FLOAT64; } if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) { - m_stream.deviceFormat[_mode] = _format; + m_stream.deviceFormat[modeToIdTable(_mode)] = _format; } else { // If we get here, no supported format was found. snd_pcm_close(phandle); @@ -512,18 +499,18 @@ foundDevice: result = snd_pcm_hw_params_set_format(phandle, hw_params, deviceFormat); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror(result) << "."); + ATA_ERROR("error setting pcm device (" << name << ") data format, " << snd_strerror(result) << "."); return false; } // Determine whether byte-swaping is necessary. - m_stream.doByteSwap[_mode] = false; + m_stream.doByteSwap[modeToIdTable(_mode)] = false; if (deviceFormat != SND_PCM_FORMAT_S8) { result = snd_pcm_format_cpu_endian(deviceFormat); if (result == 0) { - m_stream.doByteSwap[_mode] = true; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } else if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror(result) << "."); + ATA_ERROR("error getting pcm device (" << name << ") endian-ness, " << snd_strerror(result) << "."); return false; } } @@ -531,37 +518,37 @@ foundDevice: result = snd_pcm_hw_params_set_rate_near(phandle, hw_params, (uint32_t*) &_sampleRate, 0); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror(result) << "."); + ATA_ERROR("error setting sample rate on device (" << name << "), " << snd_strerror(result) << "."); return false; } // Determine the number of channels for this device. We support a possible // minimum device channel number > than the value requested by the user. - m_stream.nUserChannels[_mode] = _channels; + m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; uint32_t value; result = snd_pcm_hw_params_get_channels_max(hw_params, &value); uint32_t deviceChannels = value; if ( result < 0 || deviceChannels < _channels + _firstChannel) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror(result) << "."); + ATA_ERROR("requested channel parameters not supported by device (" << name << "), " << snd_strerror(result) << "."); return false; } result = snd_pcm_hw_params_get_channels_min(hw_params, &value); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror(result) << "."); + ATA_ERROR("error getting minimum channels for device (" << name << "), " << snd_strerror(result) << "."); return false; } deviceChannels = value; if (deviceChannels < _channels + _firstChannel) { deviceChannels = _channels + _firstChannel; } - m_stream.nDeviceChannels[_mode] = deviceChannels; + m_stream.nDeviceChannels[modeToIdTable(_mode)] = deviceChannels; // Set the device channels. result = snd_pcm_hw_params_set_channels(phandle, hw_params, deviceChannels); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror(result) << "."); + ATA_ERROR("error setting channels for device (" << name << "), " << snd_strerror(result) << "."); return false; } // Set the buffer (or period) size. @@ -570,26 +557,38 @@ foundDevice: result = snd_pcm_hw_params_set_period_size_near(phandle, hw_params, &periodSize, &dir); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror(result) << "."); + ATA_ERROR("error setting period size for device (" << name << "), " << snd_strerror(result) << "."); return false; } *_bufferSize = periodSize; // Set the buffer number, which in ALSA is referred to as the "period". uint32_t periods = 0; - if (_options && _options->flags & airtaudio::MINIMIZE_LATENCY) periods = 2; - if (_options && _options->numberOfBuffers > 0) periods = _options->numberOfBuffers; - if (periods < 2) periods = 4; // a fairly safe default value + if ( _options != nullptr + && _options->flags.m_minimizeLatency == true) { + periods = 2; + } + /* TODO : Chouse the number of low level buffer ... + if ( _options != nullptr + && _options->numberOfBuffers > 0) { + periods = _options->numberOfBuffers; + } + */ + if (periods < 2) { + periods = 4; // a fairly safe default value + } result = snd_pcm_hw_params_set_periods_near(phandle, hw_params, &periods, &dir); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror(result) << "."); + ATA_ERROR("error setting periods for device (" << name << "), " << snd_strerror(result) << "."); return false; } // If attempting to setup a duplex stream, the bufferSize parameter // MUST be the same in both directions! - if (m_stream.mode == OUTPUT && _mode == INPUT && *_bufferSize != m_stream.bufferSize) { + if ( m_stream.mode == airtaudio::mode_output + && _mode == airtaudio::mode_input + && *_bufferSize != m_stream.bufferSize) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ")."); + ATA_ERROR("system error setting buffer size for duplex stream on device (" << name << ")."); return false; } m_stream.bufferSize = *_bufferSize; @@ -597,7 +596,7 @@ foundDevice: result = snd_pcm_hw_params(phandle, hw_params); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror(result) << "."); + ATA_ERROR("error installing hardware configuration on device (" << name << "), " << snd_strerror(result) << "."); return false; } // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns. @@ -618,50 +617,49 @@ foundDevice: result = snd_pcm_sw_params(phandle, sw_params); if (result < 0) { snd_pcm_close(phandle); - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror(result) << "."); + ATA_ERROR("error installing software configuration on device (" << name << "), " << snd_strerror(result) << "."); return false; } // Set flags for buffer conversion - m_stream.doConvertBuffer[_mode] = false; - if (m_stream.userFormat != m_stream.deviceFormat[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode] - && m_stream.nUserChannels[_mode] > 1) { - m_stream.doConvertBuffer[_mode] = true; + if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false + && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate the ApiHandle if necessary and then save. - AlsaHandle *apiInfo = 0; - if (m_stream.apiHandle == 0) { + AlsaHandle *apiInfo = nullptr; + if (m_stream.apiHandle == nullptr) { apiInfo = (AlsaHandle *) new AlsaHandle; if (apiInfo == nullptr) { - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error allocating AlsaHandle memory."); + ATA_ERROR("error allocating AlsaHandle memory."); goto error; } m_stream.apiHandle = (void *) apiInfo; - apiInfo->handles[0] = 0; - apiInfo->handles[1] = 0; } else { apiInfo = (AlsaHandle *) m_stream.apiHandle; } - apiInfo->handles[_mode] = phandle; + apiInfo->handles[modeToIdTable(_mode)] = phandle; phandle = 0; // Allocate necessary internal buffers. uint64_t bufferBytes; - bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat); - m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[_mode] == nullptr) { - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error allocating user buffer memory."); + bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat); + m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { + ATA_ERROR("error allocating user buffer memory."); goto error; } - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[_mode] * audio::getFormatBytes(m_stream.deviceFormat[_mode]); - if (_mode == INPUT) { - if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) { + bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + if (_mode == airtaudio::mode_input) { + if ( m_stream.mode == airtaudio::mode_output + && m_stream.deviceBuffer) { uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]); if (bufferBytes <= bytesOut) { makeBuffer = false; @@ -676,31 +674,31 @@ foundDevice: } m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1); if (m_stream.deviceBuffer == nullptr) { - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error allocating device buffer memory."); + ATA_ERROR("error allocating device buffer memory."); goto error; } } } m_stream.sampleRate = _sampleRate; m_stream.nBuffers = periods; - m_stream.device[_mode] = _device; - m_stream.state = STREAM_STOPPED; + m_stream.device[modeToIdTable(_mode)] = _device; + m_stream.state = airtaudio::state_stopped; // Setup the buffer conversion information structure. - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, _firstChannel); } // Setup thread if necessary. - if ( m_stream.mode == OUTPUT - && _mode == INPUT) { + if ( m_stream.mode == airtaudio::mode_output + && _mode == airtaudio::mode_input) { // We had already set up an output stream. - m_stream.mode = DUPLEX; + m_stream.mode = airtaudio::mode_duplex; // Link the streams if possible. apiInfo->synchronized = false; if (snd_pcm_link(apiInfo->handles[0], apiInfo->handles[1]) == 0) { apiInfo->synchronized = true; } else { - ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: unable to synchronize input and output devices."); - // TODO : airtaudio::errorWarning; + ATA_ERROR("unable to synchronize input and output devices."); + // TODO : airtaudio::error_warning; } } else { m_stream.mode = _mode; @@ -710,7 +708,7 @@ foundDevice: m_stream.callbackInfo.thread = new std::thread(alsaCallbackHandler, &m_stream.callbackInfo); if (m_stream.callbackInfo.thread == nullptr) { m_stream.callbackInfo.isRunning = false; - ATA_ERROR("airtaudio::api::Alsa::error creating callback thread!"); + ATA_ERROR("creating callback thread!"); goto error; } } @@ -740,19 +738,19 @@ error: free(m_stream.deviceBuffer); m_stream.deviceBuffer = 0; } - m_stream.state = STREAM_CLOSED; + m_stream.state = airtaudio::state_closed; return false; } -enum airtaudio::errorType airtaudio::api::Alsa::closeStream() { - if (m_stream.state == STREAM_CLOSED) { - ATA_ERROR("airtaudio::api::Alsa::closeStream(): no open stream to close!"); - return airtaudio::errorWarning; +enum airtaudio::error airtaudio::api::Alsa::closeStream() { + if (m_stream.state == airtaudio::state_closed) { + ATA_ERROR("no open stream to close!"); + return airtaudio::error_warning; } AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle; m_stream.callbackInfo.isRunning = false; m_stream.mutex.lock(); - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { apiInfo->runnable = true; apiInfo->runnable_cv.notify_one(); } @@ -760,14 +758,14 @@ enum airtaudio::errorType airtaudio::api::Alsa::closeStream() { if (m_stream.callbackInfo.thread != nullptr) { m_stream.callbackInfo.thread->join(); } - if (m_stream.state == STREAM_RUNNING) { - m_stream.state = STREAM_STOPPED; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if (m_stream.state == airtaudio::state_running) { + m_stream.state = airtaudio::state_stopped; + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { snd_pcm_drop(apiInfo->handles[0]); } - if ( m_stream.mode == INPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) { snd_pcm_drop(apiInfo->handles[1]); } } @@ -792,138 +790,151 @@ enum airtaudio::errorType airtaudio::api::Alsa::closeStream() { free(m_stream.deviceBuffer); m_stream.deviceBuffer = 0; } - m_stream.mode = UNINITIALIZED; - m_stream.state = STREAM_CLOSED; - return airtaudio::errorNone; + m_stream.mode = airtaudio::mode_unknow; + m_stream.state = airtaudio::state_closed; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Alsa::startStream() { +enum airtaudio::error airtaudio::api::Alsa::startStream() { // This method calls snd_pcm_prepare if the device isn't already in that state. - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_RUNNING) { - ATA_ERROR("airtaudio::api::Alsa::startStream(): the stream is already running!"); - return airtaudio::errorWarning; + if (m_stream.state == airtaudio::state_running) { + ATA_ERROR("the stream is already running!"); + return airtaudio::error_warning; } std::unique_lock lck(m_stream.mutex); int32_t result = 0; snd_pcm_state_t state; AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle; snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles; - if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { + if (handle[0] == nullptr) { + ATA_ERROR("send nullptr to alsa ..."); + if (handle[1] != nullptr) { + ATA_ERROR("note : 1 is not null"); + } + } state = snd_pcm_state(handle[0]); if (state != SND_PCM_STATE_PREPARED) { result = snd_pcm_prepare(handle[0]); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::startStream: error preparing output pcm device, " << snd_strerror(result) << "."); + ATA_ERROR("error preparing output pcm device, " << snd_strerror(result) << "."); goto unlock; } } } - if ( ( m_stream.mode == INPUT - || m_stream.mode == DUPLEX) + if ( ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) && !apiInfo->synchronized) { + if (handle[1] == nullptr) { + ATA_ERROR("send nullptr to alsa ..."); + if (handle[0] != nullptr) { + ATA_ERROR("note : 0 is not null"); + } + } state = snd_pcm_state(handle[1]); if (state != SND_PCM_STATE_PREPARED) { result = snd_pcm_prepare(handle[1]); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::startStream: error preparing input pcm device, " << snd_strerror(result) << "."); + ATA_ERROR("error preparing input pcm device, " << snd_strerror(result) << "."); goto unlock; } } } - m_stream.state = STREAM_RUNNING; + m_stream.state = airtaudio::state_running; unlock: apiInfo->runnable = true; apiInfo->runnable_cv.notify_one(); if (result >= 0) { - return airtaudio::errorNone; + return airtaudio::error_none; } - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } -enum airtaudio::errorType airtaudio::api::Alsa::stopStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Alsa::stopStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { - ATA_ERROR("airtaudio::api::Alsa::stopStream(): the stream is already stopped!"); - return airtaudio::errorWarning; + if (m_stream.state == airtaudio::state_stopped) { + ATA_ERROR("the stream is already stopped!"); + return airtaudio::error_warning; } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; std::unique_lock lck(m_stream.mutex); int32_t result = 0; AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle; snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { if (apiInfo->synchronized) { result = snd_pcm_drop(handle[0]); } else { result = snd_pcm_drain(handle[0]); } if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::stopStream: error draining output pcm device, " << snd_strerror(result) << "."); + ATA_ERROR("error draining output pcm device, " << snd_strerror(result) << "."); goto unlock; } } - if ( ( m_stream.mode == INPUT - || m_stream.mode == DUPLEX) + if ( ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) && !apiInfo->synchronized) { result = snd_pcm_drop(handle[1]); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::stopStream: error stopping input pcm device, " << snd_strerror(result) << "."); + ATA_ERROR("error stopping input pcm device, " << snd_strerror(result) << "."); goto unlock; } } unlock: if (result >= 0) { - return airtaudio::errorNone; + return airtaudio::error_none; } - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } -enum airtaudio::errorType airtaudio::api::Alsa::abortStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Alsa::abortStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { - ATA_ERROR("airtaudio::api::Alsa::abortStream(): the stream is already stopped!"); - return airtaudio::errorWarning; + if (m_stream.state == airtaudio::state_stopped) { + ATA_ERROR("the stream is already stopped!"); + return airtaudio::error_warning; } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; std::unique_lock lck(m_stream.mutex); int32_t result = 0; AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle; snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { result = snd_pcm_drop(handle[0]); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::abortStream: error aborting output pcm device, " << snd_strerror(result) << "."); + ATA_ERROR("error aborting output pcm device, " << snd_strerror(result) << "."); goto unlock; } } - if ( ( m_stream.mode == INPUT - || m_stream.mode == DUPLEX) + if ( ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) && !apiInfo->synchronized) { result = snd_pcm_drop(handle[1]); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::abortStream: error aborting input pcm device, " << snd_strerror(result) << "."); + ATA_ERROR("error aborting input pcm device, " << snd_strerror(result) << "."); goto unlock; } } unlock: if (result >= 0) { - return airtaudio::errorNone; + return airtaudio::error_none; } - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } void airtaudio::api::Alsa::callbackEvent() { AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle; - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { std::unique_lock lck(m_stream.mutex); // TODO : Set this back .... /* @@ -931,23 +942,23 @@ void airtaudio::api::Alsa::callbackEvent() { apiInfo->runnable_cv.wait(lck); } */ - if (m_stream.state != STREAM_RUNNING) { + if (m_stream.state != airtaudio::state_running) { return; } } - if (m_stream.state == STREAM_CLOSED) { - ATA_CRITICAL("airtaudio::api::Alsa::callbackEvent(): the stream is closed ... this shouldn't happen!"); - return; // TODO : notify appl: airtaudio::errorWarning; + if (m_stream.state == airtaudio::state_closed) { + ATA_CRITICAL("the stream is closed ... this shouldn't happen!"); + return; // TODO : notify appl: airtaudio::error_warning; } int32_t doStopStream = 0; double streamTime = getStreamTime(); - airtaudio::streamStatus status = 0; - if (m_stream.mode != INPUT && apiInfo->xrun[0] == true) { - status |= airtaudio::OUTPUT_UNDERFLOW; + enum airtaudio::status status = airtaudio::status_ok; + if (m_stream.mode != airtaudio::mode_input && apiInfo->xrun[0] == true) { + status = airtaudio::status_underflow; apiInfo->xrun[0] = false; } - if (m_stream.mode != OUTPUT && apiInfo->xrun[1] == true) { - status |= airtaudio::INPUT_OVERFLOW; + if (m_stream.mode != airtaudio::mode_output && apiInfo->xrun[1] == true) { + status = airtaudio::status_overflow; apiInfo->xrun[1] = false; } doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[0], @@ -961,7 +972,7 @@ void airtaudio::api::Alsa::callbackEvent() { } std::unique_lock lck(m_stream.mutex); // The state might change while waiting on a mutex. - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { goto unlock; } int32_t result; @@ -971,8 +982,8 @@ void airtaudio::api::Alsa::callbackEvent() { snd_pcm_sframes_t frames; audio::format format; handle = (snd_pcm_t **) apiInfo->handles; - if ( m_stream.mode == airtaudio::api::INPUT - || m_stream.mode == airtaudio::api::DUPLEX) { + if ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) { // Setup parameters. if (m_stream.doConvertBuffer[1]) { buffer = m_stream.deviceBuffer; @@ -1001,15 +1012,15 @@ void airtaudio::api::Alsa::callbackEvent() { apiInfo->xrun[1] = true; result = snd_pcm_prepare(handle[1]); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::callbackEvent: error preparing device after overrun, " << snd_strerror(result) << "."); + ATA_ERROR("error preparing device after overrun, " << snd_strerror(result) << "."); } } else { - ATA_ERROR("airtaudio::api::Alsa::callbackEvent: error, current state is " << snd_pcm_state_name(state) << ", " << snd_strerror(result) << "."); + ATA_ERROR("error, current state is " << snd_pcm_state_name(state) << ", " << snd_strerror(result) << "."); } } else { - ATA_ERROR("airtaudio::api::Alsa::callbackEvent: audio read error, " << snd_strerror(result) << "."); + ATA_ERROR("audio read error, " << snd_strerror(result) << "."); } - // TODO : Notify application ... airtaudio::errorWarning; + // TODO : Notify application ... airtaudio::error_warning; goto tryOutput; } // Do byte swapping if necessary. @@ -1028,8 +1039,8 @@ void airtaudio::api::Alsa::callbackEvent() { } tryOutput: - if ( m_stream.mode == airtaudio::api::OUTPUT - || m_stream.mode == airtaudio::api::DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { // Setup parameters and do buffer conversion if necessary. if (m_stream.doConvertBuffer[0]) { buffer = m_stream.deviceBuffer; @@ -1064,15 +1075,15 @@ tryOutput: apiInfo->xrun[0] = true; result = snd_pcm_prepare(handle[0]); if (result < 0) { - ATA_ERROR("airtaudio::api::Alsa::callbackEvent: error preparing device after underrun, " << snd_strerror(result) << "."); + ATA_ERROR("error preparing device after underrun, " << snd_strerror(result) << "."); } } else { - ATA_ERROR("airtaudio::api::Alsa::callbackEvent: error, current state is " << snd_pcm_state_name(state) << ", " << snd_strerror(result) << "."); + ATA_ERROR("error, current state is " << snd_pcm_state_name(state) << ", " << snd_strerror(result) << "."); } } else { - ATA_ERROR("airtaudio::api::Alsa::callbackEvent: audio write error, " << snd_strerror(result) << "."); + ATA_ERROR("audio write error, " << snd_strerror(result) << "."); } - // TODO : Notuify application airtaudio::errorWarning; + // TODO : Notuify application airtaudio::error_warning; goto unlock; } // Check stream latency diff --git a/airtaudio/api/Alsa.h b/airtaudio/api/Alsa.h index 67dfc50..e7faed7 100644 --- a/airtaudio/api/Alsa.h +++ b/airtaudio/api/Alsa.h @@ -22,10 +22,10 @@ namespace airtaudio { } uint32_t getDeviceCount(); airtaudio::DeviceInfo getDeviceInfo(uint32_t _device); - enum airtaudio::errorType closeStream(); - enum airtaudio::errorType startStream(); - enum airtaudio::errorType stopStream(); - enum airtaudio::errorType abortStream(); + enum airtaudio::error closeStream(); + enum airtaudio::error startStream(); + enum airtaudio::error stopStream(); + enum airtaudio::error abortStream(); // This function is intended for internal use only. It must be // public because it is called by the internal callback handler, // which is not a member of RtAudio. External use of this function diff --git a/airtaudio/api/Android.cpp b/airtaudio/api/Android.cpp index af64a76..9caf248 100644 --- a/airtaudio/api/Android.cpp +++ b/airtaudio/api/Android.cpp @@ -76,46 +76,46 @@ airtaudio::DeviceInfo airtaudio::api::Android::getDeviceInfo(uint32_t _device) { return m_devices[_device]; } -enum airtaudio::errorType airtaudio::api::Android::closeStream() { +enum airtaudio::error airtaudio::api::Android::closeStream() { ATA_INFO("Clese Stream"); // Can not close the stream now... - return airtaudio::errorNone; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Android::startStream() { +enum airtaudio::error airtaudio::api::Android::startStream() { ATA_INFO("Start Stream"); // Can not close the stream now... - return airtaudio::errorNone; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Android::stopStream() { +enum airtaudio::error airtaudio::api::Android::stopStream() { ATA_INFO("Stop stream"); ewol::Context& tmpContext = ewol::getContext(); tmpContext.audioCloseDevice(0); // Can not close the stream now... - return airtaudio::errorNone; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Android::abortStream() { +enum airtaudio::error airtaudio::api::Android::abortStream() { ATA_INFO("Abort Stream"); ewol::Context& tmpContext = ewol::getContext(); tmpContext.audioCloseDevice(0); // Can not close the stream now... - return airtaudio::errorNone; + return airtaudio::error_none; } void airtaudio::api::Android::callBackEvent(void* _data, int32_t _frameRate) { int32_t doStopStream = 0; double streamTime = getStreamTime(); - airtaudio::streamStatus status = 0; - if (m_stream.doConvertBuffer[OUTPUT] == true) { - doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[OUTPUT], + enum airtaudio::status status = airtaudio::status_ok; + if (m_stream.doConvertBuffer[airtaudio::mode_output] == true) { + doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::mode_output], nullptr, _frameRate, streamTime, status); - convertBuffer((char*)_data, (char*)m_stream.userBuffer[OUTPUT], m_stream.convertInfo[OUTPUT]); + convertBuffer((char*)_data, (char*)m_stream.userBuffer[airtaudio::mode_output], m_stream.convertInfo[airtaudio::mode_output]); } else { doStopStream = m_stream.callbackInfo.callback(_data, nullptr, @@ -142,7 +142,7 @@ void airtaudio::api::Android::androidCallBackEvent(void* _data, } bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, @@ -150,12 +150,12 @@ bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device, uint32_t *_bufferSize, airtaudio::StreamOptions *_options) { ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate); - if (_mode != OUTPUT) { + if (_mode != airtaudio::mode_output) { ATA_ERROR("Can not start a device input or duplex for Android ..."); return false; } m_stream.userFormat = _format; - m_stream.nUserChannels[_mode] = _channels; + m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; ewol::Context& tmpContext = ewol::getContext(); bool ret = false; if (_format == SINT8) { @@ -165,36 +165,36 @@ bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device, } m_stream.bufferSize = 256; m_stream.sampleRate = _sampleRate; - m_stream.doByteSwap[_mode] = false; // for endienness ... + m_stream.doByteSwap[modeToIdTable(_mode)] = false; // for endienness ... // TODO : For now, we write it in hard ==> to bu update later ... - m_stream.deviceFormat[_mode] = SINT16; - m_stream.nDeviceChannels[_mode] = 2; - m_stream.deviceInterleaved[_mode] = true; + m_stream.deviceFormat[modeToIdTable(_mode)] = SINT16; + m_stream.nDeviceChannels[modeToIdTable(_mode)] = 2; + m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; - m_stream.doConvertBuffer[_mode] = false; - if (m_stream.userFormat != m_stream.deviceFormat[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode] - && m_stream.nUserChannels[_mode] > 1) { - m_stream.doConvertBuffer[_mode] = true; + if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false + && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.doConvertBuffer[_mode] == true) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) { // Allocate necessary internal buffers. - uint64_t bufferBytes = m_stream.nUserChannels[_mode] * m_stream.bufferSize * formatBytes(m_stream.userFormat); - m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[_mode] == nullptr) { + uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * formatBytes(m_stream.userFormat); + m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory."); } setConvertInfo(_mode, _firstChannel); } - ATA_INFO("device format : " << m_stream.deviceFormat[_mode] << " user format : " << m_stream.userFormat); - ATA_INFO("device channels : " << m_stream.nDeviceChannels[_mode] << " user channels : " << m_stream.nUserChannels[_mode]); - ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[_mode]); + ATA_INFO("device format : " << m_stream.deviceFormat[modeToIdTable(_mode)] << " user format : " << m_stream.userFormat); + ATA_INFO("device channels : " << m_stream.nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_stream.nUserChannels[modeToIdTable(_mode)]); + ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[modeToIdTable(_mode)]); if (ret == false) { ATA_ERROR("Can not open device."); } diff --git a/airtaudio/api/Android.h b/airtaudio/api/Android.h index 0ec800c..618d530 100644 --- a/airtaudio/api/Android.h +++ b/airtaudio/api/Android.h @@ -15,15 +15,15 @@ namespace airtaudio { public: Android(); virtual ~Android(); - airtaudio::api::type getCurrentApi() { - return airtaudio::api::ANDROID_JAVA; + enum airtaudio::type getCurrentApi() { + return airtaudio::type_java; } uint32_t getDeviceCount(); airtaudio::DeviceInfo getDeviceInfo(uint32_t _device); - enum airtaudio::errorType closeStream(); - enum airtaudio::errorType startStream(); - enum airtaudio::errorType stopStream(); - enum airtaudio::errorType abortStream(); + enum airtaudio::error closeStream(); + enum airtaudio::error startStream(); + enum airtaudio::error stopStream(); + enum airtaudio::error abortStream(); // This function is intended for internal use only. It must be // public because it is called by the internal callback handler, // which is not a member of RtAudio. External use of this function @@ -33,7 +33,7 @@ namespace airtaudio { std::vector m_devices; void saveDeviceInfo(); bool probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, diff --git a/airtaudio/api/Asio.cpp b/airtaudio/api/Asio.cpp index be6aecb..2f26409 100644 --- a/airtaudio/api/Asio.cpp +++ b/airtaudio/api/Asio.cpp @@ -83,7 +83,7 @@ airtaudio::api::Asio::Asio() { } airtaudio::api::Asio::~Asio() { - if (m_stream.state != STREAM_CLOSED) { + if (m_stream.state != airtaudio::state_closed) { closeStream(); } if (m_coInitialized) { @@ -109,7 +109,7 @@ rtaudio::DeviceInfo airtaudio::api::Asio::getDeviceInfo(uint32_t _device) { return info; } // If a stream is already open, we cannot probe other devices. Thus, use the saved results. - if (m_stream.state != STREAM_CLOSED) { + if (m_stream.state != airtaudio::state_closed) { if (_device >= m_devices.size()) { ATA_ERROR("device ID was not present before stream was opened."); return info; @@ -213,7 +213,7 @@ void airtaudio::api::Asio::saveDeviceInfo() { } bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, @@ -221,8 +221,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, uint32_t* _bufferSize, airtaudio::StreamOptions *_options) { // For ASIO, a duplex stream MUST use the same driver. - if ( _mode == INPUT - && m_stream.mode == OUTPUT + if ( _mode == airtaudio::mode_input + && m_stream.mode == airtaudio::mode_output && m_stream.device[0] != _device) { ATA_ERROR("an ASIO duplex stream must use the same device for input and output!"); return false; @@ -234,8 +234,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, return false; } // Only load the driver once for duplex stream. - if ( _mode != INPUT - || m_stream.mode != OUTPUT) { + if ( _mode != airtaudio::mode_input + || m_stream.mode != airtaudio::mode_output) { // The getDeviceInfo() function will not work when a stream is open // because ASIO does not allow multiple devices to run at the same // time. Thus, we'll probe the system before opening a stream and @@ -259,17 +259,17 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, ATA_ERROR("error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ")."); return false; } - if ( ( _mode == OUTPUT + if ( ( _mode == airtaudio::mode_output && (_channels+_firstChannel) > (uint32_t) outputChannels) - || ( _mode == INPUT + || ( _mode == airtaudio::mode_input && (_channels+_firstChannel) > (uint32_t) inputChannels)) { drivers.removeCurrentDriver(); ATA_ERROR("driver (" << driverName << ") does not support requested channel count (" << _channels << ") + offset (" << _firstChannel << ")."); return false; } - m_stream.nDeviceChannels[_mode] = _channels; - m_stream.nUserChannels[_mode] = _channels; - m_stream.channelOffset[_mode] = _firstChannel; + m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels; + m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; + m_stream.channelOffset[modeToIdTable(_mode)] = _firstChannel; // Verify the sample rate is supported. result = ASIOCanSampleRate((ASIOSampleRate) _sampleRate); if (result != ASE_OK) { @@ -297,7 +297,7 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, // Determine the driver data type. ASIOChannelInfo channelInfo; channelInfo.channel = 0; - if (_mode == OUTPUT) { + if (_mode == airtaudio::mode_output) { channelInfo.isInput = false; } else { channelInfo.isInput = true; @@ -309,41 +309,41 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, return false; } // Assuming WINDOWS host is always little-endian. - m_stream.doByteSwap[_mode] = false; + m_stream.doByteSwap[modeToIdTable(_mode)] = false; m_stream.userFormat = _format; - m_stream.deviceFormat[_mode] = 0; + m_stream.deviceFormat[modeToIdTable(_mode)] = 0; if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB) { - m_stream.deviceFormat[_mode] = RTAUDIO_SINT16; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; if (channelInfo.type == ASIOSTInt16MSB) { - m_stream.doByteSwap[_mode] = true; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } } else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB) { - m_stream.deviceFormat[_mode] = RTAUDIO_SINT32; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; if (channelInfo.type == ASIOSTInt32MSB) { - m_stream.doByteSwap[_mode] = true; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } } else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB) { - m_stream.deviceFormat[_mode] = RTAUDIO_FLOAT32; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT32; if (channelInfo.type == ASIOSTFloat32MSB) { - m_stream.doByteSwap[_mode] = true; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } } else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB) { - m_stream.deviceFormat[_mode] = RTAUDIO_FLOAT64; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT64; if (channelInfo.type == ASIOSTFloat64MSB) { - m_stream.doByteSwap[_mode] = true; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } } else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB) { - m_stream.deviceFormat[_mode] = RTAUDIO_SINT24; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; if (channelInfo.type == ASIOSTInt24MSB) { - m_stream.doByteSwap[_mode] = true; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } } - if (m_stream.deviceFormat[_mode] == 0) { + if (m_stream.deviceFormat[modeToIdTable(_mode)] == 0) { drivers.removeCurrentDriver(); ATA_ERROR("driver (" << driverName << ") data format not supported by RtAudio."); return false; @@ -393,8 +393,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, // Set to an even multiple of granularity, rounding up. *_bufferSize = (*_bufferSize + granularity-1) / granularity * granularity; } - if ( _mode == INPUT - && m_stream.mode == OUTPUT + if ( _mode == airtaudio::mode_input + && m_stream.mode == airtaudio::mode_output && m_stream.bufferSize != *_bufferSize) { drivers.removeCurrentDriver(); ATA_ERROR("input/output buffersize discrepancy!"); @@ -402,14 +402,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, } m_stream.bufferSize = *_bufferSize; m_stream.nBuffers = 2; - if ( _options != nullptr - && _options->flags & RTAUDIO_NONINTERLEAVED) { - m_stream.userInterleaved = false; - } else { - m_stream.userInterleaved = true; - } // ASIO always uses non-interleaved buffers. - m_stream.deviceInterleaved[_mode] = false; + m_stream.deviceInterleaved[modeToIdTable(_mode)] = false; // Allocate, if necessary, our AsioHandle structure for the stream. AsioHandle *handle = (AsioHandle *) m_stream.apiHandle; if (handle == nullptr) { @@ -431,8 +425,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, // and output separately, we'll have to dispose of previously // created output buffers for a duplex stream. long inputLatency, outputLatency; - if ( _mode == INPUT - && m_stream.mode == OUTPUT) { + if ( _mode == airtaudio::mode_input + && m_stream.mode == airtaudio::mode_output) { ASIODisposeBuffers(); if (handle->bufferInfos == nullptr) { free(handle->bufferInfos); @@ -471,27 +465,27 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, } buffersAllocated = true; // Set flags for buffer conversion. - m_stream.doConvertBuffer[_mode] = false; - if (m_stream.userFormat != m_stream.deviceFormat[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode] - && m_stream.nUserChannels[_mode] > 1) { - m_stream.doConvertBuffer[_mode] = true; + if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false + && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate necessary internal buffers uint64_t bufferBytes; - bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat); - m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[_mode] == nullptr) { + bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); + m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); goto error; } - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]); - if (_mode == INPUT) { - if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) { + bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + if (_mode == airtaudio::mode_input) { + if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); if (bufferBytes <= bytesOut) { makeBuffer = false; @@ -512,14 +506,14 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, } } m_stream.sampleRate = _sampleRate; - m_stream.device[_mode] = _device; - m_stream.state = STREAM_STOPPED; + m_stream.device[modeToIdTable(_mode)] = _device; + m_stream.state = airtaudio::state_stopped; asioCallbackInfo = &m_stream.callbackInfo; m_stream.callbackInfo.object = (void*)this; - if ( m_stream.mode == OUTPUT - && _mode == INPUT) { + if ( m_stream.mode == airtaudio::mode_output + && _mode == airtaudio::mode_input) { // We had already set up an output stream. - m_stream.mode = DUPLEX; + m_stream.mode = airtaudio::mode_duplex; } else { m_stream.mode = _mode; } @@ -534,7 +528,7 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device, // Setup the buffer conversion information structure. We don't use // buffers to do channel offsets, so we override that parameter // here. - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, 0); } return true; @@ -566,13 +560,13 @@ error: return false; } -enum airtaudio::errorType airtaudio::api::Asio::closeStream() { - if (m_stream.state == STREAM_CLOSED) { +enum airtaudio::error airtaudio::api::Asio::closeStream() { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } - if (m_stream.state == STREAM_RUNNING) { - m_stream.state = STREAM_STOPPED; + if (m_stream.state == airtaudio::state_running) { + m_stream.state = airtaudio::state_stopped; ASIOStop(); } ASIODisposeBuffers(); @@ -596,20 +590,20 @@ enum airtaudio::errorType airtaudio::api::Asio::closeStream() { free(m_stream.deviceBuffer); m_stream.deviceBuffer = 0; } - m_stream.mode = UNINITIALIZED; - m_stream.state = STREAM_CLOSED; - return airtaudio::errorNone; + m_stream.mode = airtaudio::mode_unknow; + m_stream.state = airtaudio::state_closed; + return airtaudio::error_none; } bool stopThreadCalled = false; -enum airtaudio::errorType airtaudio::api::Asio::startStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Asio::startStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_RUNNING) { + if (m_stream.state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } AsioHandle *handle = (AsioHandle *) m_stream.apiHandle; ASIOError result = ASIOStart(); @@ -620,49 +614,49 @@ enum airtaudio::errorType airtaudio::api::Asio::startStream() { handle->drainCounter = 0; handle->internalDrain = false; ResetEvent(handle->condition); - m_stream.state = STREAM_RUNNING; + m_stream.state = airtaudio::state_running; asioXRun = false; unlock: stopThreadCalled = false; if (result == ASE_OK) { - return airtaudio::errorNone; + return airtaudio::error_none; } - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } -enum airtaudio::errorType airtaudio::api::Asio::stopStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Asio::stopStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } AsioHandle *handle = (AsioHandle *) m_stream.apiHandle; - if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) { + if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) { if (handle->drainCounter == 0) { handle->drainCounter = 2; WaitForSingleObject(handle->condition, INFINITE); // block until signaled } } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; ASIOError result = ASIOStop(); if (result != ASE_OK) { ATA_ERROR("error (" << getAsioErrorString(result) << ") stopping device."); } if (result == ASE_OK) { - return airtaudio::errorNone; + return airtaudio::error_none; } - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } -enum airtaudio::errorType airtaudio::api::Asio::abortStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Asio::abortStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - error(airtaudio::errorWarning); + error(airtaudio::error_warning); return; } @@ -689,11 +683,11 @@ static unsigned __stdcall asioStopStream(void *_ptr) { } bool airtaudio::api::Asio::callbackEvent(long bufferIndex) { - if ( m_stream.state == STREAM_STOPPED - || m_stream.state == STREAM_STOPPING) { + if ( m_stream.state == airtaudio::state_stopped + || m_stream.state == airtaudio::state_stopping) { return true; } - if (m_stream.state == STREAM_CLOSED) { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("the stream is closed ... this shouldn't happen!"); return false; } @@ -701,7 +695,7 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) { AsioHandle *handle = (AsioHandle *) m_stream.apiHandle; // Check if we were draining the stream and signal if finished. if (handle->drainCounter > 3) { - m_stream.state = STREAM_STOPPING; + m_stream.state = airtaudio::state_stopping; if (handle->internalDrain == false) { SetEvent(handle->condition); } else { // spawn a thread to stop the stream @@ -716,12 +710,12 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) { if (handle->drainCounter == 0) { double streamTime = getStreamTime(); rtaudio::streamStatus status = 0; - if (m_stream.mode != INPUT && asioXRun == true) { - status |= RTAUDIO_OUTPUT_UNDERFLOW; + if (m_stream.mode != airtaudio::mode_input && asioXRun == true) { + status |= RTAUDIO_airtaudio::status_underflow; asioXRun = false; } - if (m_stream.mode != OUTPUT && asioXRun == true) { - status |= RTAUDIO_INPUT_OVERFLOW; + if (m_stream.mode != airtaudio::mode_output && asioXRun == true) { + status |= RTAUDIO_airtaudio::mode_input_OVERFLOW; asioXRun = false; } int32_t cbReturnValue = info->callback(m_stream.userBuffer[0], @@ -730,7 +724,7 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) { streamTime, status); if (cbReturnValue == 2) { - m_stream.state = STREAM_STOPPING; + m_stream.state = airtaudio::state_stopping; handle->drainCounter = 2; unsigned threadId; m_stream.callbackInfo.thread = _beginthreadex(nullptr, @@ -747,8 +741,8 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) { } uint32_t nChannels, bufferBytes, i, j; nChannels = m_stream.nDeviceChannels[0] + m_stream.nDeviceChannels[1]; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[0]); if (handle->drainCounter > 1) { // write zeros to the output stream for (i=0, j=0; iobject; - enum airtaudio::errorType ret = object->stopStream() - if (ret != airtaudio::errorNone) { + enum airtaudio::error ret = object->stopStream() + if (ret != airtaudio::error_none) { ATA_ERROR("error stop stream!"); } else { ATA_ERROR("driver reports sample rate changed to " << _sRate << " ... stream stopped!!!"); diff --git a/airtaudio/api/Asio.h b/airtaudio/api/Asio.h index d3cd01e..350dc8a 100644 --- a/airtaudio/api/Asio.h +++ b/airtaudio/api/Asio.h @@ -17,15 +17,15 @@ namespace airtaudio { public: Asio(); virtual ~Asio(); - airtaudio::api::type getCurrentApi() { + enum airtaudio::type getCurrentApi() { return airtaudio::WINDOWS_ASIO; } uint32_t getDeviceCount(); airtaudio::DeviceInfo getDeviceInfo(uint32_t _device); - enum airtaudio::errorType closeStream(); - enum airtaudio::errorType startStream(); - enum airtaudio::errorType stopStream(); - enum airtaudio::errorType abortStream(); + enum airtaudio::error closeStream(); + enum airtaudio::error startStream(); + enum airtaudio::error stopStream(); + enum airtaudio::error abortStream(); long getStreamLatency(); // This function is intended for internal use only. It must be // public because it is called by the internal callback handler, @@ -37,7 +37,7 @@ namespace airtaudio { void saveDeviceInfo(); bool m_coInitialized; bool probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, diff --git a/airtaudio/api/Core.cpp b/airtaudio/api/Core.cpp index 0100d5d..9066915 100644 --- a/airtaudio/api/Core.cpp +++ b/airtaudio/api/Core.cpp @@ -95,7 +95,7 @@ airtaudio::api::Core::~Core() { // The subclass destructor gets called before the base class // destructor, so close an existing stream before deallocating // apiDeviceId memory. - if (m_stream.state != STREAM_CLOSED) { + if (m_stream.state != airtaudio::state_closed) { closeStream(); } } @@ -434,7 +434,7 @@ static OSStatus rateListener(AudioObjectID _inDevice, } bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, @@ -473,7 +473,7 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, AudioDeviceID id = deviceList[ _device ]; // Setup for stream mode. bool isInput = false; - if (_mode == INPUT) { + if (_mode == airtaudio::mode_input) { isInput = true; property.mScope = kAudioDevicePropertyScopeInput; } else { @@ -582,7 +582,7 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, *_bufferSize = (uint64_t) bufferRange.mMaximum; } if ( _options != nullptr - && _options->flags & MINIMIZE_LATENCY) { + && _options->flags.m_minimizeLatency == true) { *_bufferSize = (uint64_t) bufferRange.mMinimum; } // Set the buffer size. For multiple streams, I'm assuming we only @@ -598,8 +598,8 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, // If attempting to setup a duplex stream, the bufferSize parameter // MUST be the same in both directions! *_bufferSize = theSize; - if ( m_stream.mode == OUTPUT - && _mode == INPUT + if ( m_stream.mode == airtaudio::mode_output + && _mode == airtaudio::mode_input && *_bufferSize != m_stream.bufferSize) { ATA_ERROR("system error setting buffer size for duplex stream on device (" << _device << ")."); return false; @@ -772,44 +772,38 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, // Byte-swapping: According to AudioHardware.h, the stream data will // always be presented in native-endian format, so we should never // need to byte swap. - m_stream.doByteSwap[_mode] = false; + m_stream.doByteSwap[modeToIdTable(_mode)] = false; // From the CoreAudio documentation, PCM data must be supplied as // 32-bit floats. m_stream.userFormat = _format; - m_stream.deviceFormat[_mode] = FLOAT32; + m_stream.deviceFormat[modeToIdTable(_mode)] = FLOAT32; if (streamCount == 1) { - m_stream.nDeviceChannels[_mode] = description.mChannelsPerFrame; + m_stream.nDeviceChannels[modeToIdTable(_mode)] = description.mChannelsPerFrame; } else { // multiple streams - m_stream.nDeviceChannels[_mode] = _channels; + m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels; } - m_stream.nUserChannels[_mode] = _channels; - m_stream.channelOffset[_mode] = channelOffset; // offset within a CoreAudio stream - if ( _options != nullptr - && _options->flags & NONINTERLEAVED) { - m_stream.userInterleaved = false; - } else { - m_stream.userInterleaved = true; - } - m_stream.deviceInterleaved[_mode] = true; + m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; + m_stream.channelOffset[modeToIdTable(_mode)] = channelOffset; // offset within a CoreAudio stream + m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; if (monoMode == true) { - m_stream.deviceInterleaved[_mode] = false; + m_stream.deviceInterleaved[modeToIdTable(_mode)] = false; } // Set flags for buffer conversion. - m_stream.doConvertBuffer[_mode] = false; - if (m_stream.userFormat != m_stream.deviceFormat[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } if (streamCount == 1) { - if ( m_stream.nUserChannels[_mode] > 1 - && m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + if ( m_stream.nUserChannels[modeToIdTable(_mode)] > 1 + && m_stream.deviceInterleaved[modeToIdTable(_mode)] == false) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - } else if (monoMode && m_stream.userInterleaved) { - m_stream.doConvertBuffer[_mode] = true; + } else if (monoMode) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate our CoreHandle structure for the stream. CoreHandle *handle = 0; @@ -823,28 +817,28 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, } else { handle = (CoreHandle *) m_stream.apiHandle; } - handle->iStream[_mode] = firstStream; - handle->nStreams[_mode] = streamCount; - handle->id[_mode] = id; + handle->iStream[modeToIdTable(_mode)] = firstStream; + handle->nStreams[modeToIdTable(_mode)] = streamCount; + handle->id[modeToIdTable(_mode)] = id; // Allocate necessary internal buffers. uint64_t bufferBytes; - bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat); - // m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1); - m_stream.userBuffer[_mode] = (char *) malloc(bufferBytes * sizeof(char)); - memset(m_stream.userBuffer[_mode], 0, bufferBytes * sizeof(char)); - if (m_stream.userBuffer[_mode] == nullptr) { + bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); + // m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + m_stream.userBuffer[modeToIdTable(_mode)] = (char *) malloc(bufferBytes * sizeof(char)); + memset(m_stream.userBuffer[modeToIdTable(_mode)], 0, bufferBytes * sizeof(char)); + if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); goto error; } // If possible, we will make use of the CoreAudio stream buffers as // "device buffers". However, we can't do this if using multiple // streams. - if ( m_stream.doConvertBuffer[_mode] - && handle->nStreams[_mode] > 1) { + if ( m_stream.doConvertBuffer[modeToIdTable(_mode)] + && handle->nStreams[modeToIdTable(_mode)] > 1) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]); - if (_mode == INPUT) { - if ( m_stream.mode == OUTPUT + bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + if (_mode == airtaudio::mode_input) { + if ( m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); if (bufferBytes <= bytesOut) { @@ -866,25 +860,25 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, } } m_stream.sampleRate = _sampleRate; - m_stream.device[_mode] = _device; - m_stream.state = STREAM_STOPPED; + m_stream.device[modeToIdTable(_mode)] = _device; + m_stream.state = airtaudio::state_stopped; m_stream.callbackInfo.object = (void *) this; // Setup the buffer conversion information structure. - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { if (streamCount > 1) { setConvertInfo(_mode, 0); } else { setConvertInfo(_mode, channelOffset); } } - if ( _mode == INPUT - && m_stream.mode == OUTPUT + if ( _mode == airtaudio::mode_input + && m_stream.mode == airtaudio::mode_output && m_stream.device[0] == _device) { // Only one callback procedure per device. - m_stream.mode = DUPLEX; + m_stream.mode = airtaudio::mode_duplex; } else { #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) - result = AudioDeviceCreateIOProcID(id, callbackHandler, (void *) &m_stream.callbackInfo, &handle->procId[_mode]); + result = AudioDeviceCreateIOProcID(id, callbackHandler, (void *) &m_stream.callbackInfo, &handle->procId[modeToIdTable(_mode)]); #else // deprecated in favor of AudioDeviceCreateIOProcID() result = AudioDeviceAddIOProc(id, callbackHandler, (void *) &m_stream.callbackInfo); @@ -893,9 +887,9 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device, ATA_ERROR("system error setting callback for device (" << _device << ")."); goto error; } - if ( m_stream.mode == OUTPUT - && _mode == INPUT) { - m_stream.mode = DUPLEX; + if ( m_stream.mode == airtaudio::mode_output + && _mode == airtaudio::mode_input) { + m_stream.mode = airtaudio::mode_duplex; } else { m_stream.mode = _mode; } @@ -919,19 +913,19 @@ error: free(m_stream.deviceBuffer); m_stream.deviceBuffer = 0; } - m_stream.state = STREAM_CLOSED; + m_stream.state = airtaudio::state_closed; return false; } -enum airtaudio::errorType airtaudio::api::Core::closeStream() { - if (m_stream.state == STREAM_CLOSED) { +enum airtaudio::error airtaudio::api::Core::closeStream() { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } CoreHandle *handle = (CoreHandle *) m_stream.apiHandle; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { - if (m_stream.state == STREAM_RUNNING) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { + if (m_stream.state == airtaudio::state_running) { AudioDeviceStop(handle->id[0], callbackHandler); } #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) @@ -941,10 +935,10 @@ enum airtaudio::errorType airtaudio::api::Core::closeStream() { AudioDeviceRemoveIOProc(handle->id[0], callbackHandler); #endif } - if ( m_stream.mode == INPUT - || ( m_stream.mode == DUPLEX + if ( m_stream.mode == airtaudio::mode_input + || ( m_stream.mode == airtaudio::mode_duplex && m_stream.device[0] != m_stream.device[1])) { - if (m_stream.state == STREAM_RUNNING) { + if (m_stream.state == airtaudio::state_running) { AudioDeviceStop(handle->id[1], callbackHandler); } #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) @@ -966,31 +960,31 @@ enum airtaudio::errorType airtaudio::api::Core::closeStream() { } delete handle; m_stream.apiHandle = 0; - m_stream.mode = UNINITIALIZED; - m_stream.state = STREAM_CLOSED; - return airtaudio::errorNone; + m_stream.mode = airtaudio::mode_unknow; + m_stream.state = airtaudio::state_closed; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Core::startStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Core::startStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_RUNNING) { + if (m_stream.state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } OSStatus result = noErr; CoreHandle *handle = (CoreHandle *) m_stream.apiHandle; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { result = AudioDeviceStart(handle->id[0], callbackHandler); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") starting callback procedure on device (" << m_stream.device[0] << ")."); goto unlock; } } - if ( m_stream.mode == INPUT - || ( m_stream.mode == DUPLEX + if ( m_stream.mode == airtaudio::mode_input + || ( m_stream.mode == airtaudio::mode_duplex && m_stream.device[0] != m_stream.device[1])) { result = AudioDeviceStart(handle->id[1], callbackHandler); if (result != noErr) { @@ -1000,26 +994,26 @@ enum airtaudio::errorType airtaudio::api::Core::startStream() { } handle->drainCounter = 0; handle->internalDrain = false; - m_stream.state = STREAM_RUNNING; + m_stream.state = airtaudio::state_running; unlock: if (result == noErr) { - return airtaudio::errorNone; + return airtaudio::error_none; } - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } -enum airtaudio::errorType airtaudio::api::Core::stopStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Core::stopStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } OSStatus result = noErr; CoreHandle *handle = (CoreHandle *) m_stream.apiHandle; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { if (handle->drainCounter == 0) { std::unique_lock lck(m_stream.mutex); handle->drainCounter = 2; @@ -1031,8 +1025,8 @@ enum airtaudio::errorType airtaudio::api::Core::stopStream() { goto unlock; } } - if ( m_stream.mode == INPUT - || ( m_stream.mode == DUPLEX + if ( m_stream.mode == airtaudio::mode_input + || ( m_stream.mode == airtaudio::mode_duplex && m_stream.device[0] != m_stream.device[1])) { result = AudioDeviceStop(handle->id[1], callbackHandler); if (result != noErr) { @@ -1040,21 +1034,21 @@ enum airtaudio::errorType airtaudio::api::Core::stopStream() { goto unlock; } } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; unlock: if (result == noErr) { - return airtaudio::errorNone; + return airtaudio::error_none; } - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } -enum airtaudio::errorType airtaudio::api::Core::abortStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Core::abortStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } CoreHandle* handle = (CoreHandle*)m_stream.apiHandle; handle->drainCounter = 2; @@ -1075,11 +1069,11 @@ static void coreStopStream(void *_ptr) { bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, const AudioBufferList *_inBufferList, const AudioBufferList *_outBufferList) { - if ( m_stream.state == STREAM_STOPPED - || m_stream.state == STREAM_STOPPING) { + if ( m_stream.state == airtaudio::state_stopped + || m_stream.state == airtaudio::state_stopping) { return true; } - if (m_stream.state == STREAM_CLOSED) { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("the stream is closed ... this shouldn't happen!"); return false; } @@ -1087,7 +1081,7 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, CoreHandle *handle = (CoreHandle *) m_stream.apiHandle; // Check if we were draining the stream and signal is finished. if (handle->drainCounter > 3) { - m_stream.state = STREAM_STOPPING; + m_stream.state = airtaudio::state_stopping; if (handle->internalDrain == true) { new std::thread(coreStopStream, info); } else { @@ -1100,17 +1094,17 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, // Invoke user callback to get fresh output data UNLESS we are // draining stream or duplex mode AND the input/output devices are // different AND this function is called for the input device. - if (handle->drainCounter == 0 && (m_stream.mode != DUPLEX || _deviceId == outputDevice)) { + if (handle->drainCounter == 0 && (m_stream.mode != airtaudio::mode_duplex || _deviceId == outputDevice)) { double streamTime = getStreamTime(); - airtaudio::streamStatus status = 0; - if ( m_stream.mode != INPUT + enum airtaudio::status status = airtaudio::status_ok; + if ( m_stream.mode != airtaudio::mode_input && handle->xrun[0] == true) { - status |= OUTPUT_UNDERFLOW; + status |= airtaudio::status_underflow; handle->xrun[0] = false; } - if ( m_stream.mode != OUTPUT + if ( m_stream.mode != airtaudio::mode_output && handle->xrun[1] == true) { - status |= INPUT_OVERFLOW; + status |= airtaudio::mode_input_OVERFLOW; handle->xrun[1] = false; } int32_t cbReturnValue = info->callback(m_stream.userBuffer[0], @@ -1119,7 +1113,7 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, streamTime, status); if (cbReturnValue == 2) { - m_stream.state = STREAM_STOPPING; + m_stream.state = airtaudio::state_stopping; handle->drainCounter = 2; abortStream(); return true; @@ -1128,8 +1122,8 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, handle->internalDrain = true; } } - if ( m_stream.mode == OUTPUT - || ( m_stream.mode == DUPLEX + if ( m_stream.mode == airtaudio::mode_output + || ( m_stream.mode == airtaudio::mode_duplex && _deviceId == outputDevice)) { if (handle->drainCounter > 1) { // write zeros to the output stream @@ -1175,7 +1169,7 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, // fill multiple multi-channel streams with interleaved data uint32_t streamChannels, channelsLeft, inJump, outJump, inOffset; float *out, *in; - bool inInterleaved = (m_stream.userInterleaved) ? true : false; + bool inInterleaved = true; uint32_t inChannels = m_stream.nUserChannels[0]; if (m_stream.doConvertBuffer[0]) { inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode @@ -1229,8 +1223,8 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, } AudioDeviceID inputDevice; inputDevice = handle->id[1]; - if ( m_stream.mode == INPUT - || ( m_stream.mode == DUPLEX + if ( m_stream.mode == airtaudio::mode_input + || ( m_stream.mode == airtaudio::mode_duplex && _deviceId == inputDevice)) { if (handle->nStreams[1] == 1) { if (m_stream.doConvertBuffer[1]) { @@ -1260,7 +1254,7 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId, // read from multiple multi-channel streams uint32_t streamChannels, channelsLeft, inJump, outJump, outOffset; float *out, *in; - bool outInterleaved = (m_stream.userInterleaved) ? true : false; + bool outInterleaved = true; uint32_t outChannels = m_stream.nUserChannels[1]; if (m_stream.doConvertBuffer[1]) { outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode diff --git a/airtaudio/api/Core.h b/airtaudio/api/Core.h index df5012d..744236b 100644 --- a/airtaudio/api/Core.h +++ b/airtaudio/api/Core.h @@ -19,17 +19,17 @@ namespace airtaudio { public: Core(); virtual ~Core(); - airtaudio::api::type getCurrentApi() { - return airtaudio::api::MACOSX_CORE; + enum airtaudio::type getCurrentApi() { + return airtaudio::type_coreOSX; } uint32_t getDeviceCount(); airtaudio::DeviceInfo getDeviceInfo(uint32_t _device); uint32_t getDefaultOutputDevice(); uint32_t getDefaultInputDevice(); - enum airtaudio::errorType closeStream(); - enum airtaudio::errorType startStream(); - enum airtaudio::errorType stopStream(); - enum airtaudio::errorType abortStream(); + enum airtaudio::error closeStream(); + enum airtaudio::error startStream(); + enum airtaudio::error stopStream(); + enum airtaudio::error abortStream(); long getStreamLatency(); // This function is intended for internal use only. It must be // public because it is called by the internal callback handler, @@ -41,7 +41,7 @@ namespace airtaudio { private: bool probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, diff --git a/airtaudio/api/CoreIos.h b/airtaudio/api/CoreIos.h index 16b5696..f346381 100644 --- a/airtaudio/api/CoreIos.h +++ b/airtaudio/api/CoreIos.h @@ -16,15 +16,15 @@ namespace airtaudio { public: CoreIos(); virtual ~CoreIos(); - airtaudio::api::type getCurrentApi() { - return airtaudio::api::IOS_CORE; + enum airtaudio::type getCurrentApi() { + return airtaudio::type_coreIOS; } uint32_t getDeviceCount(); airtaudio::DeviceInfo getDeviceInfo(uint32_t _device); - enum airtaudio::errorType closeStream(); - enum airtaudio::errorType startStream(); - enum airtaudio::errorType stopStream(); - enum airtaudio::errorType abortStream(); + enum airtaudio::error closeStream(); + enum airtaudio::error startStream(); + enum airtaudio::error stopStream(); + enum airtaudio::error abortStream(); // This function is intended for internal use only. It must be // public because it is called by the internal callback handler, // which is not a member of RtAudio. External use of this function @@ -34,7 +34,7 @@ namespace airtaudio { std::vector m_devices; void saveDeviceInfo(); bool probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, diff --git a/airtaudio/api/CoreIos.mm b/airtaudio/api/CoreIos.mm index 12f1949..75cb126 100644 --- a/airtaudio/api/CoreIos.mm +++ b/airtaudio/api/CoreIos.mm @@ -82,31 +82,31 @@ airtaudio::DeviceInfo airtaudio::api::CoreIos::getDeviceInfo(uint32_t _device) { return m_devices[_device]; } -enum airtaudio::errorType airtaudio::api::CoreIos::closeStream(void) { +enum airtaudio::error airtaudio::api::CoreIos::closeStream(void) { ATA_INFO("Close Stream"); // Can not close the stream now... - return airtaudio::errorNone; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::CoreIos::startStream(void) { +enum airtaudio::error airtaudio::api::CoreIos::startStream(void) { ATA_INFO("Start Stream"); OSStatus status = AudioOutputUnitStart(m_private->audioUnit); // Can not close the stream now... - return airtaudio::errorNone; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::CoreIos::stopStream(void) { +enum airtaudio::error airtaudio::api::CoreIos::stopStream(void) { ATA_INFO("Stop stream"); OSStatus status = AudioOutputUnitStop(m_private->audioUnit); // Can not close the stream now... - return airtaudio::errorNone; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::CoreIos::abortStream(void) { +enum airtaudio::error airtaudio::api::CoreIos::abortStream(void) { ATA_INFO("Abort Stream"); OSStatus status = AudioOutputUnitStop(m_private->audioUnit); // Can not close the stream now... - return airtaudio::errorNone; + return airtaudio::error_none; } void airtaudio::api::CoreIos::callBackEvent(void* _data, @@ -127,14 +127,14 @@ void airtaudio::api::CoreIos::callBackEvent(void* _data, #endif int32_t doStopStream = 0; double streamTime = getStreamTime(); - airtaudio::streamStatus status = 0; - if (m_stream.doConvertBuffer[OUTPUT] == true) { - doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[OUTPUT], + enum airtaudio::status status = airtaudio::status_ok; + if (m_stream.doConvertBuffer[airtaudio::mode_output] == true) { + doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::mode_output], nullptr, _frameRate, streamTime, status); - convertBuffer((char*)_data, (char*)m_stream.userBuffer[OUTPUT], m_stream.convertInfo[OUTPUT]); + convertBuffer((char*)_data, (char*)m_stream.userBuffer[airtaudio::mode_output], m_stream.convertInfo[airtaudio::mode_output]); } else { doStopStream = m_stream.callbackInfo.callback(_data, nullptr, @@ -172,7 +172,7 @@ static OSStatus playbackCallback(void *_userData, bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, @@ -180,7 +180,7 @@ bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device, uint32_t *_bufferSize, airtaudio::StreamOptions *_options) { ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate); - if (_mode != OUTPUT) { + if (_mode != airtaudio::mode_output) { ATA_ERROR("Can not start a device input or duplex for CoreIos ..."); return false; } @@ -188,39 +188,39 @@ bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device, // configure Airtaudio internal configuration: m_stream.userFormat = _format; - m_stream.nUserChannels[_mode] = _channels; + m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; m_stream.bufferSize = 8192; m_stream.sampleRate = _sampleRate; - m_stream.doByteSwap[_mode] = false; // for endienness ... + m_stream.doByteSwap[modeToIdTable(_mode)] = false; // for endienness ... // TODO : For now, we write it in hard ==> to be update later ... - m_stream.deviceFormat[_mode] = SINT16; - m_stream.nDeviceChannels[_mode] = 2; - m_stream.deviceInterleaved[_mode] = true; + m_stream.deviceFormat[modeToIdTable(_mode)] = SINT16; + m_stream.nDeviceChannels[modeToIdTable(_mode)] = 2; + m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; - m_stream.doConvertBuffer[_mode] = false; - if (m_stream.userFormat != m_stream.deviceFormat[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode] - && m_stream.nUserChannels[_mode] > 1) { - m_stream.doConvertBuffer[_mode] = true; + if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false + && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.doConvertBuffer[_mode] == true) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) { // Allocate necessary internal buffers. - uint64_t bufferBytes = m_stream.nUserChannels[_mode] * m_stream.bufferSize * formatBytes(m_stream.userFormat); - m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[_mode] == nullptr) { + uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * formatBytes(m_stream.userFormat); + m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); } setConvertInfo(_mode, _firstChannel); } - ATA_INFO("device format : " << m_stream.deviceFormat[_mode] << " user format : " << m_stream.userFormat); - ATA_INFO("device channels : " << m_stream.nDeviceChannels[_mode] << " user channels : " << m_stream.nUserChannels[_mode]); - ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[_mode]); + ATA_INFO("device format : " << m_stream.deviceFormat[modeToIdTable(_mode)] << " user format : " << m_stream.userFormat); + ATA_INFO("device channels : " << m_stream.nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_stream.nUserChannels[modeToIdTable(_mode)]); + ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[modeToIdTable(_mode)]); if (ret == false) { ATA_ERROR("Can not open device."); } diff --git a/airtaudio/api/Ds.cpp b/airtaudio/api/Ds.cpp index 8b0587d..3aec7db 100644 --- a/airtaudio/api/Ds.cpp +++ b/airtaudio/api/Ds.cpp @@ -126,7 +126,7 @@ airtaudio::api::Ds::~Ds() { if (m_coInitialized) { CoUninitialize(); // balanced call. } - if (m_stream.state != STREAM_CLOSED) { + if (m_stream.state != airtaudio::state_closed) { closeStream(); } } @@ -417,12 +417,12 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, ATA_ERROR("device ID is invalid!"); return false; } - if (_mode == OUTPUT) { + if (_mode == airtaudio::mode_output) { if (dsDevices[ _device ].validId[0] == false) { ATA_ERROR("device (" << _device << ") does not support output!"); return false; } - } else { // _mode == INPUT + } else { // _mode == airtaudio::mode_input if (dsDevices[ _device ].validId[1] == false) { ATA_ERROR("device (" << _device << ") does not support input!"); return false; @@ -445,9 +445,10 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, nBuffers = _options->numberOfBuffers; } if ( _options!= nullptr - && _options->flags & RTAUDIO_MINIMIZE_LATENCY) { + && _options->flags.m_minimizeLatency == true) { nBuffers = 2; } + */ if (nBuffers < 2) { nBuffers = 3; } @@ -470,7 +471,7 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, DWORD dsPointerLeadTime = 0; void *ohandle = 0, *bhandle = 0; HRESULT result; - if (_mode == OUTPUT) { + if (_mode == airtaudio::mode_output) { LPDIRECTSOUND output; result = DirectSoundCreate(dsDevices[ _device ].id[0], &output, nullptr); if (FAILED(result)) { @@ -496,10 +497,10 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, && !( _format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT)) { waveFormat.wBitsPerSample = 16; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT16; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; } else { waveFormat.wBitsPerSample = 8; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT8; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; } m_stream.userFormat = _format; // Update wave format structure and buffer information. @@ -600,7 +601,7 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, ohandle = (void *) output; bhandle = (void *) buffer; } - if (_mode == INPUT) { + if (_mode == airtaudio::mode_input) { LPDIRECTSOUNDCAPTURE input; result = DirectSoundCaptureCreate(dsDevices[ _device ].id[1], &input, nullptr); if (FAILED(result)) { @@ -627,20 +628,20 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08; if (format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats) { waveFormat.wBitsPerSample = 8; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT8; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; } else { // assume 16-bit is supported waveFormat.wBitsPerSample = 16; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT16; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; } } else { // channel == 1 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08; if (format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats) { waveFormat.wBitsPerSample = 8; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT8; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; } else { // assume 16-bit is supported waveFormat.wBitsPerSample = 16; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT16; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; } } m_stream.userFormat = _format; @@ -708,41 +709,35 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, } // Set various stream parameters DsHandle *handle = 0; - m_stream.nDeviceChannels[_mode] = _channels + _firstChannel; - m_stream.nUserChannels[_mode] = _channels; + m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel; + m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; m_stream.bufferSize = *_bufferSize; - m_stream.channelOffset[_mode] = _firstChannel; - m_stream.deviceInterleaved[_mode] = true; - if ( _options != nullptr - && _options->flags & RTAUDIO_NONINTERLEAVED) { - m_stream.userInterleaved = false; - } else { - m_stream.userInterleaved = true; - } + m_stream.channelOffset[modeToIdTable(_mode)] = _firstChannel; + m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; // Set flag for buffer conversion - m_stream.doConvertBuffer[_mode] = false; - if (m_stream.nUserChannels[_mode] != m_stream.nDeviceChannels[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_stream.nUserChannels[modeToIdTable(_mode)] != m_stream.nDeviceChannels[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.userFormat != m_stream.deviceFormat[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode] - && m_stream.nUserChannels[_mode] > 1) { - m_stream.doConvertBuffer[_mode] = true; + if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false + && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate necessary internal buffers - long bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat); - m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[_mode] == nullptr) { + long bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); + m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); goto error; } - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]); - if (_mode == INPUT) { - if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) { + bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + if (_mode == airtaudio::mode_input) { + if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); if (bufferBytes <= (long) bytesOut) { makeBuffer = false; @@ -777,23 +772,23 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device, } else { handle = (DsHandle *) m_stream.apiHandle; } - handle->id[_mode] = ohandle; - handle->buffer[_mode] = bhandle; - handle->dsBufferSize[_mode] = dsBufferSize; - handle->dsPointerLeadTime[_mode] = dsPointerLeadTime; - m_stream.device[_mode] = _device; - m_stream.state = STREAM_STOPPED; - if ( m_stream.mode == OUTPUT - && _mode == INPUT) { + handle->id[modeToIdTable(_mode)] = ohandle; + handle->buffer[modeToIdTable(_mode)] = bhandle; + handle->dsBufferSize[modeToIdTable(_mode)] = dsBufferSize; + handle->dsPointerLeadTime[modeToIdTable(_mode)] = dsPointerLeadTime; + m_stream.device[modeToIdTable(_mode)] = _device; + m_stream.state = airtaudio::state_stopped; + if ( m_stream.mode == airtaudio::mode_output + && _mode == airtaudio::mode_input) { // We had already set up an output stream. - m_stream.mode = DUPLEX; + m_stream.mode = airtaudio::mode_duplex; } else { m_stream.mode = _mode; } m_stream.nBuffers = nBuffers; m_stream.sampleRate = _sampleRate; // Setup the buffer conversion information structure. - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, _firstChannel); } // Setup the callback thread. @@ -847,14 +842,14 @@ error: free(m_stream.deviceBuffer); m_stream.deviceBuffer = 0; } - m_stream.state = STREAM_CLOSED; + m_stream.state = airtaudio::state_closed; return false; } -enum airtaudio::errorType airtaudio::api::Ds::closeStream() { - if (m_stream.state == STREAM_CLOSED) { +enum airtaudio::error airtaudio::api::Ds::closeStream() { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } // Stop the callback thread. m_stream.callbackInfo.isRunning = false; @@ -894,17 +889,17 @@ enum airtaudio::errorType airtaudio::api::Ds::closeStream() { free(m_stream.deviceBuffer); m_stream.deviceBuffer = 0; } - m_stream.mode = UNINITIALIZED; - m_stream.state = STREAM_CLOSED; + m_stream.mode = airtaudio::mode_unknow; + m_stream.state = airtaudio::state_closed; } -enum airtaudio::errorType airtaudio::api::Ds::startStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Ds::startStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_RUNNING) { + if (m_stream.state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } DsHandle *handle = (DsHandle *) m_stream.apiHandle; // Increase scheduler frequency on lesser windows (a side-effect of @@ -913,13 +908,13 @@ enum airtaudio::errorType airtaudio::api::Ds::startStream() { timeBeginPeriod(1); m_buffersRolling = false; m_duplexPrerollBytes = 0; - if (m_stream.mode == DUPLEX) { - // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize. + if (m_stream.mode == airtaudio::mode_duplex) { + // 0.5 seconds of silence in airtaudio::mode_duplex mode while the devices spin up and synchronize. m_duplexPrerollBytes = (int) (0.5 * m_stream.sampleRate * formatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1]); } HRESULT result = 0; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; result = buffer->Play(0, 0, DSBPLAY_LOOPING); if (FAILED(result)) { @@ -927,8 +922,8 @@ enum airtaudio::errorType airtaudio::api::Ds::startStream() { goto unlock; } } - if ( m_stream.mode == INPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) { LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; result = buffer->Start(DSCBSTART_LOOPING); if (FAILED(result)) { @@ -939,33 +934,33 @@ enum airtaudio::errorType airtaudio::api::Ds::startStream() { handle->drainCounter = 0; handle->internalDrain = false; ResetEvent(handle->condition); - m_stream.state = STREAM_RUNNING; + m_stream.state = airtaudio::state_running; unlock: if (FAILED(result)) { - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } - return airtaudio::errorNone; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Ds::stopStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Ds::stopStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } HRESULT result = 0; LPVOID audioPtr; DWORD dataLen; DsHandle *handle = (DsHandle *) m_stream.apiHandle; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { if (handle->drainCounter == 0) { handle->drainCounter = 2; WaitForSingleObject(handle->condition, INFINITE); // block until signaled } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; // Stop the buffer and clear memory LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; result = buffer->Stop(); @@ -991,12 +986,12 @@ enum airtaudio::errorType airtaudio::api::Ds::stopStream() { // If we start playing again, we must begin at beginning of buffer. handle->bufferPointer[0] = 0; } - if ( m_stream.mode == INPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) { LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; audioPtr = nullptr; dataLen = 0; - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; result = buffer->Stop(); if (FAILED(result)) { ATA_ERROR("error (" << getErrorString(result) << ") stopping input buffer!"); @@ -1023,18 +1018,18 @@ enum airtaudio::errorType airtaudio::api::Ds::stopStream() { unlock: timeEndPeriod(1); // revert to normal scheduler frequency on lesser windows. if (FAILED(result)) { - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } - return airtaudio::errorNone; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Ds::abortStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Ds::abortStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } DsHandle *handle = (DsHandle *) m_stream.apiHandle; handle->drainCounter = 2; @@ -1042,11 +1037,11 @@ enum airtaudio::errorType airtaudio::api::Ds::abortStream() { } void airtaudio::api::Ds::callbackEvent() { - if (m_stream.state == STREAM_STOPPED || m_stream.state == STREAM_STOPPING) { + if (m_stream.state == airtaudio::state_stopped || m_stream.state == airtaudio::state_stopping) { Sleep(50); // sleep 50 milliseconds return; } - if (m_stream.state == STREAM_CLOSED) { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("the stream is closed ... this shouldn't happen!"); return; } @@ -1054,7 +1049,7 @@ void airtaudio::api::Ds::callbackEvent() { DsHandle *handle = (DsHandle *) m_stream.apiHandle; // Check if we were draining the stream and signal is finished. if (handle->drainCounter > m_stream.nBuffers + 2) { - m_stream.state = STREAM_STOPPING; + m_stream.state = airtaudio::state_stopping; if (handle->internalDrain == false) { SetEvent(handle->condition); } else { @@ -1067,14 +1062,14 @@ void airtaudio::api::Ds::callbackEvent() { if (handle->drainCounter == 0) { double streamTime = getStreamTime(); rtaudio::streamStatus status = 0; - if ( m_stream.mode != INPUT + if ( m_stream.mode != airtaudio::mode_input && handle->xrun[0] == true) { - status |= RTAUDIO_OUTPUT_UNDERFLOW; + status |= RTAUDIO_airtaudio::status_underflow; handle->xrun[0] = false; } - if ( m_stream.mode != OUTPUT + if ( m_stream.mode != airtaudio::mode_output && handle->xrun[1] == true) { - status |= RTAUDIO_INPUT_OVERFLOW; + status |= RTAUDIO_airtaudio::mode_input_OVERFLOW; handle->xrun[1] = false; } int32_t cbReturnValue = info->callback(m_stream.userBuffer[0], @@ -1083,7 +1078,7 @@ void airtaudio::api::Ds::callbackEvent() { streamTime, status); if (cbReturnValue == 2) { - m_stream.state = STREAM_STOPPING; + m_stream.state = airtaudio::state_stopping; handle->drainCounter = 2; abortStream(); return; @@ -1103,7 +1098,7 @@ void airtaudio::api::Ds::callbackEvent() { char *buffer; long bufferBytes; if (m_buffersRolling == false) { - if (m_stream.mode == DUPLEX) { + if (m_stream.mode == airtaudio::mode_duplex) { //assert(handle->dsBufferSize[0] == handle->dsBufferSize[1]); // It takes a while for the devices to get rolling. As a result, // there's no guarantee that the capture and write device pointers @@ -1153,7 +1148,7 @@ void airtaudio::api::Ds::callbackEvent() { handle->bufferPointer[0] -= handle->dsBufferSize[0]; } handle->bufferPointer[1] = safeReadPointer; - } else if (m_stream.mode == OUTPUT) { + } else if (m_stream.mode == airtaudio::mode_output) { // Set the proper nextWritePosition after initial startup. LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; result = dsWriteBuffer->GetCurrentPosition(¤tWritePointer, &safeWritePointer); @@ -1168,8 +1163,8 @@ void airtaudio::api::Ds::callbackEvent() { } m_buffersRolling = true; } - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; if (handle->drainCounter > 1) { // write zeros to the output stream bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0]; @@ -1273,8 +1268,8 @@ void airtaudio::api::Ds::callbackEvent() { goto unlock; } } - if ( m_stream.mode == INPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) { // Setup parameters. if (m_stream.doConvertBuffer[1]) { buffer = m_stream.deviceBuffer; @@ -1298,20 +1293,20 @@ void airtaudio::api::Ds::callbackEvent() { safeReadPointer += dsBufferSize; // unwrap offset } DWORD endRead = nextReadPointer + bufferBytes; - // Handling depends on whether we are INPUT or DUPLEX. - // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode, + // Handling depends on whether we are airtaudio::mode_input or airtaudio::mode_duplex. + // If we're in airtaudio::mode_input mode then waiting is a good thing. If we're in airtaudio::mode_duplex mode, // then a wait here will drag the write pointers into the forbidden zone. // - // In DUPLEX mode, rather than wait, we will back off the read pointer until + // In airtaudio::mode_duplex mode, rather than wait, we will back off the read pointer until // it's in a safe position. This causes dropouts, but it seems to be the only // practical way to sync up the read and write pointers reliably, given the // the very complex relationship between phase and increment of the read and write // pointers. // - // In order to minimize audible dropouts in DUPLEX mode, we will + // In order to minimize audible dropouts in airtaudio::mode_duplex mode, we will // provide a pre-roll period of 0.5 seconds in which we return // zeros from the read buffer while the pointers sync up. - if (m_stream.mode == DUPLEX) { + if (m_stream.mode == airtaudio::mode_duplex) { if (safeReadPointer < endRead) { if (m_duplexPrerollBytes <= 0) { // Pre-roll time over. Be more agressive. @@ -1338,7 +1333,7 @@ void airtaudio::api::Ds::callbackEvent() { } endRead = nextReadPointer + bufferBytes; } - } else { // _mode == INPUT + } else { // _mode == airtaudio::mode_input while ( safeReadPointer < endRead && m_stream.callbackInfo.isRunning) { // See comments for playback. diff --git a/airtaudio/api/Ds.h b/airtaudio/api/Ds.h index 74817f3..93d40bb 100644 --- a/airtaudio/api/Ds.h +++ b/airtaudio/api/Ds.h @@ -17,17 +17,17 @@ namespace airtaudio { public: Ds(); virtual ~Ds(); - airtaudio::api::type getCurrentApi() { - return airtaudio::api::WINDOWS_DS; + enum airtaudio::type getCurrentApi() { + return airtaudio::type_ds; } uint32_t getDeviceCount(); uint32_t getDefaultOutputDevice(); uint32_t getDefaultInputDevice(); airtaudio::DeviceInfo getDeviceInfo(uint32_t _device); - enum airtaudio::errorType closeStream(); - enum airtaudio::errorType startStream(); - enum airtaudio::errorType stopStream(); - enum airtaudio::errorType abortStream(); + enum airtaudio::error closeStream(); + enum airtaudio::error startStream(); + enum airtaudio::error stopStream(); + enum airtaudio::error abortStream(); long getStreamLatency(); // This function is intended for internal use only. It must be // public because it is called by the internal callback handler, @@ -40,7 +40,7 @@ namespace airtaudio { long m_duplexPrerollBytes; std::vector dsDevices; bool probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, diff --git a/airtaudio/api/Dummy.cpp b/airtaudio/api/Dummy.cpp index 26abda1..8ae5850 100644 --- a/airtaudio/api/Dummy.cpp +++ b/airtaudio/api/Dummy.cpp @@ -20,7 +20,7 @@ airtaudio::Api* airtaudio::api::Dummy::Create() { airtaudio::api::Dummy::Dummy() { m_errorText = "This class provides no functionality."; - error(airtaudio::errorWarning); + error(airtaudio::error_warning); } uint32_t airtaudio::api::Dummy::getDeviceCount() { @@ -33,24 +33,24 @@ rtaudio::DeviceInfo airtaudio::api::Dummy::getDeviceInfo(uint32_t _device) { return info; } -enum airtaudio::errorType airtaudio::api::Dummy::closeStream() { - return airtaudio::errorNone; +enum airtaudio::error airtaudio::api::Dummy::closeStream() { + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Dummy::startStream() { - return airtaudio::errorNone; +enum airtaudio::error airtaudio::api::Dummy::startStream() { + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Dummy::stopStream() { - return airtaudio::errorNone; +enum airtaudio::error airtaudio::api::Dummy::stopStream() { + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Dummy::abortStream() { - return airtaudio::errorNone; +enum airtaudio::error airtaudio::api::Dummy::abortStream() { + return airtaudio::error_none; } bool airtaudio::api::Dummy::probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, diff --git a/airtaudio/api/Dummy.h b/airtaudio/api/Dummy.h index 0f1dbcd..88153cb 100644 --- a/airtaudio/api/Dummy.h +++ b/airtaudio/api/Dummy.h @@ -6,8 +6,8 @@ * @license like MIT (see license file) */ -#if !defined(__AIRTAUDIO_API_DUMMY_H__) && defined(__AIRTAUDIO_DUMMY__) -#define __AIRTAUDIO_API_DUMMY_H__ +#if !defined(__AIRTAUDIO_DUMMY__) && defined(__DUMMY__) +#define __AIRTAUDIO_DUMMY__ #include @@ -18,18 +18,18 @@ namespace airtaudio { static airtaudio::Api* Create(); public: Dummy(); - airtaudio::api::type getCurrentApi() { - return airtaudio::api::RTAUDIO_DUMMY; + enum airtaudio::type getCurrentApi() { + return airtaudio::type_dummy; } uint32_t getDeviceCount(); airtaudio::DeviceInfo getDeviceInfo(uint32_t _device); - enum airtaudio::errorType closeStream(); - enum airtaudio::errorType startStream(); - enum airtaudio::errorType stopStream(); - enum airtaudio::errorType abortStream(); + enum airtaudio::error closeStream(); + enum airtaudio::error startStream(); + enum airtaudio::error stopStream(); + enum airtaudio::error abortStream(); private: bool probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, diff --git a/airtaudio/api/Jack.cpp b/airtaudio/api/Jack.cpp index a52c2e7..92ac519 100644 --- a/airtaudio/api/Jack.cpp +++ b/airtaudio/api/Jack.cpp @@ -83,7 +83,7 @@ airtaudio::api::Jack::Jack() { } airtaudio::api::Jack::~Jack() { - if (m_stream.state != STREAM_CLOSED) { + if (m_stream.state != airtaudio::state_closed) { closeStream(); } } @@ -128,7 +128,7 @@ airtaudio::DeviceInfo airtaudio::api::Jack::getDeviceInfo(uint32_t _device) { jack_client_t *client = jack_client_open("RtApiJackInfo", options, status); if (client == nullptr) { ATA_ERROR("Jack server not found or connection error!"); - // TODO : airtaudio::errorWarning; + // TODO : airtaudio::error_warning; return info; } const char **ports; @@ -157,7 +157,7 @@ airtaudio::DeviceInfo airtaudio::api::Jack::getDeviceInfo(uint32_t _device) { if (_device >= nDevices) { jack_client_close(client); ATA_ERROR("device ID is invalid!"); - // TODO : airtaudio::errorInvalidUse; + // TODO : airtaudio::error_invalidUse; return info; } // Get the current jack server sample rate. @@ -187,7 +187,7 @@ airtaudio::DeviceInfo airtaudio::api::Jack::getDeviceInfo(uint32_t _device) { if (info.outputChannels == 0 && info.inputChannels == 0) { jack_client_close(client); ATA_ERROR("error determining Jack input/output channels!"); - // TODO : airtaudio::errorWarning; + // TODO : airtaudio::error_warning; return info; } // If device opens for both playback and capture, we determine the channels. @@ -256,7 +256,7 @@ static int32_t jackXrun(void* _infoPointer) { } bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, @@ -266,9 +266,9 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, JackHandle *handle = (JackHandle *) m_stream.apiHandle; // Look for jack server and try to become a client (only do once per stream). jack_client_t *client = 0; - if ( _mode == OUTPUT - || ( _mode == INPUT - && m_stream.mode != OUTPUT)) { + if ( _mode == airtaudio::mode_output + || ( _mode == airtaudio::mode_input + && m_stream.mode != airtaudio::mode_output)) { jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption; jack_status_t *status = nullptr; if (_options && !_options->streamName.empty()) { @@ -315,7 +315,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, // channels. Jack "input ports" equal RtAudio output channels. uint32_t nChannels = 0; uint64_t flag = JackPortIsInput; - if (_mode == INPUT) flag = JackPortIsOutput; + if (_mode == airtaudio::mode_input) flag = JackPortIsOutput; ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag); if (ports) { while (ports[ nChannels ]) { @@ -340,42 +340,37 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag); if (ports[ _firstChannel ]) { // Added by Ge Wang - jack_latency_callback_mode_t cbmode = (_mode == INPUT ? JackCaptureLatency : JackPlaybackLatency); + jack_latency_callback_mode_t cbmode = (_mode == airtaudio::mode_input ? JackCaptureLatency : JackPlaybackLatency); // the range (usually the min and max are equal) jack_latency_range_t latrange; latrange.min = latrange.max = 0; // get the latency range jack_port_get_latency_range(jack_port_by_name(client, ports[_firstChannel]), cbmode, &latrange); // be optimistic, use the min! - m_stream.latency[_mode] = latrange.min; - //m_stream.latency[_mode] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ])); + m_stream.latency[modeToIdTable(_mode)] = latrange.min; + //m_stream.latency[modeToIdTable(_mode)] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ])); } free(ports); // The jack server always uses 32-bit floating-point data. - m_stream.deviceFormat[_mode] = FLOAT32; + m_stream.deviceFormat[modeToIdTable(_mode)] = FLOAT32; m_stream.userFormat = _format; - if (_options && _options->flags & NONINTERLEAVED) { - m_stream.userInterleaved = false; - } else { - m_stream.userInterleaved = true; - } // Jack always uses non-interleaved buffers. - m_stream.deviceInterleaved[_mode] = false; + m_stream.deviceInterleaved[modeToIdTable(_mode)] = false; // Jack always provides host byte-ordered data. - m_stream.doByteSwap[_mode] = false; + m_stream.doByteSwap[modeToIdTable(_mode)] = false; // Get the buffer size. The buffer size and number of buffers // (periods) is set when the jack server is started. m_stream.bufferSize = (int) jack_get_buffer_size(client); *_bufferSize = m_stream.bufferSize; - m_stream.nDeviceChannels[_mode] = _channels; - m_stream.nUserChannels[_mode] = _channels; + m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels; + m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; // Set flags for buffer conversion. - m_stream.doConvertBuffer[_mode] = false; - if (m_stream.userFormat != m_stream.deviceFormat[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode] - && m_stream.nUserChannels[_mode] > 1) { - m_stream.doConvertBuffer[_mode] = true; + if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false + && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate our JackHandle structure for the stream. if (handle == 0) { @@ -387,22 +382,22 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, m_stream.apiHandle = (void *) handle; handle->client = client; } - handle->deviceName[_mode] = deviceName; + handle->deviceName[modeToIdTable(_mode)] = deviceName; // Allocate necessary internal buffers. uint64_t bufferBytes; - bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat); - m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[_mode] == nullptr) { + bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); + m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); goto error; } - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - if (_mode == OUTPUT) { + if (_mode == airtaudio::mode_output) { bufferBytes = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); - } else { // _mode == INPUT + } else { // _mode == airtaudio::mode_input bufferBytes = m_stream.nDeviceChannels[1] * formatBytes(m_stream.deviceFormat[1]); - if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) { + if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); if (bufferBytes < bytesOut) { makeBuffer = false; @@ -420,19 +415,19 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, } } // Allocate memory for the Jack ports (channels) identifiers. - handle->ports[_mode] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels); - if (handle->ports[_mode] == nullptr) { + handle->ports[modeToIdTable(_mode)] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels); + if (handle->ports[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating port memory."); goto error; } - m_stream.device[_mode] = _device; - m_stream.channelOffset[_mode] = _firstChannel; - m_stream.state = STREAM_STOPPED; + m_stream.device[modeToIdTable(_mode)] = _device; + m_stream.channelOffset[modeToIdTable(_mode)] = _firstChannel; + m_stream.state = airtaudio::state_stopped; m_stream.callbackInfo.object = (void *) this; - if ( m_stream.mode == OUTPUT - && _mode == INPUT) { + if ( m_stream.mode == airtaudio::mode_output + && _mode == airtaudio::mode_input) { // We had already set up the stream for output. - m_stream.mode = DUPLEX; + m_stream.mode = airtaudio::mode_duplex; } else { m_stream.mode = _mode; jack_set_process_callback(handle->client, jackCallbackHandler, (void *) &m_stream.callbackInfo); @@ -441,7 +436,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, } // Register our ports. char label[64]; - if (_mode == OUTPUT) { + if (_mode == airtaudio::mode_output) { for (uint32_t i=0; iports[0][i] = jack_port_register(handle->client, @@ -463,7 +458,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device, // Setup the buffer conversion information structure. We don't use // buffers to do channel offsets, so we override that parameter // here. - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, 0); } return true; @@ -492,14 +487,14 @@ error: return false; } -enum airtaudio::errorType airtaudio::api::Jack::closeStream() { - if (m_stream.state == STREAM_CLOSED) { +enum airtaudio::error airtaudio::api::Jack::closeStream() { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } JackHandle *handle = (JackHandle *) m_stream.apiHandle; if (handle != nullptr) { - if (m_stream.state == STREAM_RUNNING) { + if (m_stream.state == airtaudio::state_running) { jack_deactivate(handle->client); } jack_client_close(handle->client); @@ -524,18 +519,18 @@ enum airtaudio::errorType airtaudio::api::Jack::closeStream() { free(m_stream.deviceBuffer); m_stream.deviceBuffer = nullptr; } - m_stream.mode = UNINITIALIZED; - m_stream.state = STREAM_CLOSED; - return airtaudio::errorNone; + m_stream.mode = airtaudio::mode_unknow; + m_stream.state = airtaudio::state_closed; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Jack::startStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Jack::startStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_RUNNING) { + if (m_stream.state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } JackHandle *handle = (JackHandle *) m_stream.apiHandle; int32_t result = jack_activate(handle->client); @@ -545,8 +540,8 @@ enum airtaudio::errorType airtaudio::api::Jack::startStream() { } const char **ports; // Get the list of available ports. - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { result = 1; ports = jack_get_ports(handle->client, handle->deviceName[0].c_str(), nullptr, JackPortIsInput); if (ports == nullptr) { @@ -568,8 +563,8 @@ enum airtaudio::errorType airtaudio::api::Jack::startStream() { } free(ports); } - if ( m_stream.mode == INPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) { result = 1; ports = jack_get_ports(handle->client, handle->deviceName[1].c_str(), nullptr, JackPortIsOutput); if (ports == nullptr) { @@ -592,25 +587,25 @@ enum airtaudio::errorType airtaudio::api::Jack::startStream() { } handle->drainCounter = 0; handle->internalDrain = false; - m_stream.state = STREAM_RUNNING; + m_stream.state = airtaudio::state_running; unlock: if (result == 0) { - return airtaudio::errorNone; + return airtaudio::error_none; } - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } -enum airtaudio::errorType airtaudio::api::Jack::stopStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Jack::stopStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } JackHandle *handle = (JackHandle *) m_stream.apiHandle; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { if (handle->drainCounter == 0) { handle->drainCounter = 2; std::unique_lock lck(m_stream.mutex); @@ -618,17 +613,17 @@ enum airtaudio::errorType airtaudio::api::Jack::stopStream() { } } jack_deactivate(handle->client); - m_stream.state = STREAM_STOPPED; - return airtaudio::errorNone; + m_stream.state = airtaudio::state_stopped; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Jack::abortStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Jack::abortStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } JackHandle *handle = (JackHandle *) m_stream.apiHandle; handle->drainCounter = 2; @@ -647,11 +642,11 @@ static void jackStopStream(void *_ptr) { } bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) { - if ( m_stream.state == STREAM_STOPPED - || m_stream.state == STREAM_STOPPING) { + if ( m_stream.state == airtaudio::state_stopped + || m_stream.state == airtaudio::state_stopping) { return true; } - if (m_stream.state == STREAM_CLOSED) { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!"); return false; } @@ -663,7 +658,7 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) { JackHandle *handle = (JackHandle *) m_stream.apiHandle; // Check if we were draining the stream and signal is finished. if (handle->drainCounter > 3) { - m_stream.state = STREAM_STOPPING; + m_stream.state = airtaudio::state_stopping; if (handle->internalDrain == true) { new std::thread(jackStopStream, info); } else { @@ -674,13 +669,13 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) { // Invoke user callback first, to get fresh output data. if (handle->drainCounter == 0) { double streamTime = getStreamTime(); - airtaudio::streamStatus status = 0; - if (m_stream.mode != INPUT && handle->xrun[0] == true) { - status |= OUTPUT_UNDERFLOW; + enum airtaudio::status status = airtaudio::status_ok; + if (m_stream.mode != airtaudio::mode_input && handle->xrun[0] == true) { + status |= airtaudio::status_underflow; handle->xrun[0] = false; } - if (m_stream.mode != OUTPUT && handle->xrun[1] == true) { - status |= INPUT_OVERFLOW; + if (m_stream.mode != airtaudio::mode_output && handle->xrun[1] == true) { + status |= airtaudio::mode_input_OVERFLOW; handle->xrun[1] = false; } int32_t cbReturnValue = info->callback(m_stream.userBuffer[0], @@ -689,7 +684,7 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) { streamTime, status); if (cbReturnValue == 2) { - m_stream.state = STREAM_STOPPING; + m_stream.state = airtaudio::state_stopping; handle->drainCounter = 2; new std::thread(jackStopStream, info); return true; @@ -701,7 +696,7 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) { } jack_default_audio_sample_t *jackbuffer; uint64_t bufferBytes = _nframes * sizeof(jack_default_audio_sample_t); - if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) { + if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) { if (handle->drainCounter > 1) { // write zeros to the output stream for (uint32_t i=0; iports[0][i], (jack_nframes_t) _nframes); @@ -724,8 +719,8 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) { goto unlock; } } - if ( m_stream.mode == INPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) { if (m_stream.doConvertBuffer[1]) { for (uint32_t i=0; iports[1][i], (jack_nframes_t) _nframes); diff --git a/airtaudio/api/Jack.h b/airtaudio/api/Jack.h index 1e10de2..3441264 100644 --- a/airtaudio/api/Jack.h +++ b/airtaudio/api/Jack.h @@ -17,15 +17,15 @@ namespace airtaudio { public: Jack(); virtual ~Jack(); - airtaudio::api::type getCurrentApi() { - return airtaudio::api::UNIX_JACK; + enum airtaudio::type getCurrentApi() { + return airtaudio::type_jack; } uint32_t getDeviceCount(); airtaudio::DeviceInfo getDeviceInfo(uint32_t _device); - enum airtaudio::errorType closeStream(); - enum airtaudio::errorType startStream(); - enum airtaudio::errorType stopStream(); - enum airtaudio::errorType abortStream(); + enum airtaudio::error closeStream(); + enum airtaudio::error startStream(); + enum airtaudio::error stopStream(); + enum airtaudio::error abortStream(); long getStreamLatency(); // This function is intended for internal use only. It must be // public because it is called by the internal callback handler, @@ -34,7 +34,7 @@ namespace airtaudio { bool callbackEvent(uint64_t _nframes); private: bool probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, diff --git a/airtaudio/api/Oss.cpp b/airtaudio/api/Oss.cpp index 076e880..9cc84a4 100644 --- a/airtaudio/api/Oss.cpp +++ b/airtaudio/api/Oss.cpp @@ -49,7 +49,7 @@ airtaudio::api::Oss::Oss() { } airtaudio::api::Oss::~Oss() { - if (m_stream.state != STREAM_CLOSED) { + if (m_stream.state != airtaudio::state_closed) { closeStream(); } } @@ -102,20 +102,20 @@ airtaudio::DeviceInfo airtaudio::api::Oss::getDeviceInfo(uint32_t _device) { close(mixerfd); if (result == -1) { ATA_ERROR("error getting device (" << ainfo.name << ") info."); - error(airtaudio::errorWarning); + error(airtaudio::error_warning); return info; } // Probe channels - if (ainfo.caps & PCM_CAP_OUTPUT) { + if (ainfo.caps & PCM_CAP_airtaudio::mode_output) { info.outputChannels = ainfo.max_channels; } - if (ainfo.caps & PCM_CAP_INPUT) { + if (ainfo.caps & PCM_CAP_airtaudio::mode_input) { info.inputChannels = ainfo.max_channels; } - if (ainfo.caps & PCM_CAP_DUPLEX) { + if (ainfo.caps & PCM_CAP_airtaudio::mode_duplex) { if ( info.outputChannels > 0 && info.inputChannels > 0 - && ainfo.caps & PCM_CAP_DUPLEX) { + && ainfo.caps & PCM_CAP_airtaudio::mode_duplex) { info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels; } } @@ -215,11 +215,11 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, return false; } // Check if device supports input or output - if ( ( _mode == OUTPUT - && !(ainfo.caps & PCM_CAP_OUTPUT)) - || ( _mode == INPUT - && !(ainfo.caps & PCM_CAP_INPUT))) { - if (_mode == OUTPUT) { + if ( ( _mode == airtaudio::mode_output + && !(ainfo.caps & PCM_CAP_airtaudio::mode_output)) + || ( _mode == airtaudio::mode_input + && !(ainfo.caps & PCM_CAP_airtaudio::mode_input))) { + if (_mode == airtaudio::mode_output) { ATA_ERROR("device (" << ainfo.name << ") does not support output."); } else { ATA_ERROR("device (" << ainfo.name << ") does not support input."); @@ -228,15 +228,15 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, } int32_t flags = 0; OssHandle *handle = (OssHandle *) m_stream.apiHandle; - if (_mode == OUTPUT) { + if (_mode == airtaudio::mode_output) { flags |= O_WRONLY; - } else { // _mode == INPUT - if ( m_stream.mode == OUTPUT + } else { // _mode == airtaudio::mode_input + if ( m_stream.mode == airtaudio::mode_output && m_stream.device[0] == _device) { // We just set the same device for playback ... close and reopen for duplex (OSS only). close(handle->id[0]); handle->id[0] = 0; - if (!(ainfo.caps & PCM_CAP_DUPLEX)) { + if (!(ainfo.caps & PCM_CAP_airtaudio::mode_duplex)) { ATA_ERROR("device (" << ainfo.name << ") does not support duplex mode."); return false; } @@ -269,7 +269,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, // For duplex operation, specifically set this mode (this doesn't seem to work). /* if (flags | O_RDWR) { - result = ioctl(fd, SNDCTL_DSP_SETDUPLEX, nullptr); + result = ioctl(fd, SNDCTL_DSP_SETairtaudio::mode_duplex, nullptr); if (result == -1) { m_errorStream << "error setting duplex mode for device (" << ainfo.name << ")."; m_errorText = m_errorStream.str(); @@ -278,7 +278,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, } */ // Check the device channel support. - m_stream.nUserChannels[_mode] = _channels; + m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; if (ainfo.max_channels < (int)(_channels + _firstChannel)) { close(fd); ATA_ERROR("the device (" << ainfo.name << ") does not support requested channel parameters."); @@ -293,7 +293,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, ATA_ERROR("error setting channel parameters on device (" << ainfo.name << ")."); return false; } - m_stream.nDeviceChannels[_mode] = deviceChannels; + m_stream.nDeviceChannels[modeToIdTable(_mode)] = deviceChannels; // Get the data format mask int32_t mask; result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask); @@ -305,69 +305,69 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, // Determine how to set the device format. m_stream.userFormat = _format; int32_t deviceFormat = -1; - m_stream.doByteSwap[_mode] = false; + m_stream.doByteSwap[modeToIdTable(_mode)] = false; if (_format == RTAUDIO_SINT8) { if (mask & AFMT_S8) { deviceFormat = AFMT_S8; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT8; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; } } else if (_format == RTAUDIO_SINT16) { if (mask & AFMT_S16_NE) { deviceFormat = AFMT_S16_NE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT16; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; } else if (mask & AFMT_S16_OE) { deviceFormat = AFMT_S16_OE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT16; - m_stream.doByteSwap[_mode] = true; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } } else if (_format == RTAUDIO_SINT24) { if (mask & AFMT_S24_NE) { deviceFormat = AFMT_S24_NE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT24; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; } else if (mask & AFMT_S24_OE) { deviceFormat = AFMT_S24_OE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT24; - m_stream.doByteSwap[_mode] = true; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } } else if (_format == RTAUDIO_SINT32) { if (mask & AFMT_S32_NE) { deviceFormat = AFMT_S32_NE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT32; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; } else if (mask & AFMT_S32_OE) { deviceFormat = AFMT_S32_OE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT32; - m_stream.doByteSwap[_mode] = true; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } } if (deviceFormat == -1) { // The user requested format is not natively supported by the device. if (mask & AFMT_S16_NE) { deviceFormat = AFMT_S16_NE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT16; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; } else if (mask & AFMT_S32_NE) { deviceFormat = AFMT_S32_NE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT32; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; } else if (mask & AFMT_S24_NE) { deviceFormat = AFMT_S24_NE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT24; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; } else if (mask & AFMT_S16_OE) { deviceFormat = AFMT_S16_OE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT16; - m_stream.doByteSwap[_mode] = true; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } else if (mask & AFMT_S32_OE) { deviceFormat = AFMT_S32_OE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT32; - m_stream.doByteSwap[_mode] = true; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } else if (mask & AFMT_S24_OE) { deviceFormat = AFMT_S24_OE; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT24; - m_stream.doByteSwap[_mode] = true; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24; + m_stream.doByteSwap[modeToIdTable(_mode)] = true; } else if (mask & AFMT_S8) { deviceFormat = AFMT_S8; - m_stream.deviceFormat[_mode] = RTAUDIO_SINT8; + m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8; } } - if (m_stream.deviceFormat[_mode] == 0) { + if (m_stream.deviceFormat[modeToIdTable(_mode)] == 0) { // This really shouldn't happen ... close(fd); ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio."); @@ -389,7 +389,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM. // We'll check the actual value used near the end of the setup // procedure. - int32_t ossBufferBytes = *_bufferSize * formatBytes(m_stream.deviceFormat[_mode]) * deviceChannels; + int32_t ossBufferBytes = *_bufferSize * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels; if (ossBufferBytes < 16) { ossBufferBytes = 16; } @@ -398,7 +398,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, buffers = _options->numberOfBuffers; } if ( _options != nullptr - && _options->flags & RTAUDIO_MINIMIZE_LATENCY) { + && _options->flags.m_minimizeLatency == true) { buffers = 2; } if (buffers < 2) { @@ -413,7 +413,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, } m_stream.nBuffers = buffers; // Save buffer size (in sample frames). - *_bufferSize = ossBufferBytes / (formatBytes(m_stream.deviceFormat[_mode]) * deviceChannels); + *_bufferSize = ossBufferBytes / (formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels); m_stream.bufferSize = *_bufferSize; // Set the sample rate. int32_t srate = _sampleRate; @@ -430,30 +430,26 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, return false; } m_stream.sampleRate = _sampleRate; - if ( _mode == INPUT - && m_stream._mode == OUTPUT + if ( _mode == airtaudio::mode_input + && m_stream._mode == airtaudio::mode_output && m_stream.device[0] == _device) { // We're doing duplex setup here. m_stream.deviceFormat[0] = m_stream.deviceFormat[1]; m_stream.nDeviceChannels[0] = deviceChannels; } // Set interleaving parameters. - m_stream.userInterleaved = true; - m_stream.deviceInterleaved[_mode] = true; - if (_options && _options->flags & RTAUDIO_NONINTERLEAVED) { - m_stream.userInterleaved = false; - } + m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; // Set flags for buffer conversion - m_stream.doConvertBuffer[_mode] = false; - if (m_stream.userFormat != m_stream.deviceFormat[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; + if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) { - m_stream.doConvertBuffer[_mode] = true; + if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } - if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode] - && m_stream.nUserChannels[_mode] > 1) { - m_stream.doConvertBuffer[_mode] = true; + if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false + && m_stream.nUserChannels[modeToIdTable(_mode)] > 1) { + m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; } // Allocate the stream handles if necessary and then save. if (m_stream.apiHandle == 0) { @@ -466,20 +462,20 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, } else { handle = (OssHandle *) m_stream.apiHandle; } - handle->id[_mode] = fd; + handle->id[modeToIdTable(_mode)] = fd; // Allocate necessary internal buffers. uint64_t bufferBytes; - bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat); - m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[_mode] == nullptr) { + bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); + m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); goto error; } - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]); - if (_mode == INPUT) { - if ( m_stream._mode == OUTPUT + bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + if (_mode == airtaudio::mode_input) { + if ( m_stream._mode == airtaudio::mode_output && m_stream.deviceBuffer) { uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); if (bufferBytes <= bytesOut) { @@ -499,16 +495,16 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device, } } } - m_stream.device[_mode] = _device; - m_stream.state = STREAM_STOPPED; + m_stream.device[modeToIdTable(_mode)] = _device; + m_stream.state = airtaudio::state_stopped; // Setup the buffer conversion information structure. - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, _firstChannel); } // Setup thread if necessary. - if (m_stream.mode == OUTPUT && _mode == INPUT) { + if (m_stream.mode == airtaudio::mode_output && _mode == airtaudio::mode_input) { // We had already set up an output stream. - m_stream.mode = DUPLEX; + m_stream.mode = airtaudio::mode_duplex; if (m_stream.device[0] == _device) { handle->id[0] = fd; } @@ -549,26 +545,26 @@ error: return false; } -enum airtaudio::errorType airtaudio::api::Oss::closeStream() { - if (m_stream.state == STREAM_CLOSED) { +enum airtaudio::error airtaudio::api::Oss::closeStream() { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("no open stream to close!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } OssHandle *handle = (OssHandle *) m_stream.apiHandle; m_stream.callbackInfo.isRunning = false; m_stream.mutex.lock(); - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { handle->runnable.notify_one(); } m_stream.mutex.unlock(); m_stream.callbackInfo.thread->join(); - if (m_stream.state == STREAM_RUNNING) { - if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) { + if (m_stream.state == airtaudio::state_running) { + if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) { ioctl(handle->id[0], SNDCTL_DSP_HALT, 0); } else { ioctl(handle->id[1], SNDCTL_DSP_HALT, 0); } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; } if (handle) { if (handle->id[0]) { @@ -590,21 +586,21 @@ enum airtaudio::errorType airtaudio::api::Oss::closeStream() { free(m_stream.deviceBuffer); m_stream.deviceBuffer = 0; } - m_stream.mode = UNINITIALIZED; - m_stream.state = STREAM_CLOSED; - return airtaudio::errorNone; + m_stream.mode = airtaudio::mode_unknow; + m_stream.state = airtaudio::state_closed; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Oss::startStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Oss::startStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_RUNNING) { + if (m_stream.state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } m_stream.mutex.lock(); - m_stream.state = STREAM_RUNNING; + m_stream.state = airtaudio::state_running; // No need to do anything else here ... OSS automatically starts // when fed samples. m_stream.mutex.unlock(); @@ -612,24 +608,24 @@ enum airtaudio::errorType airtaudio::api::Oss::startStream() { handle->runnable.notify_one(); } -enum airtaudio::errorType airtaudio::api::Oss::stopStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Oss::stopStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); return; } m_stream.mutex.lock(); // The state might change while waiting on a mutex. - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { m_stream.mutex.unlock(); return; } int32_t result = 0; OssHandle *handle = (OssHandle *) m_stream.apiHandle; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { // Flush the output with zeros a few times. char *buffer; int32_t samples; @@ -648,7 +644,7 @@ enum airtaudio::errorType airtaudio::api::Oss::stopStream() { result = write(handle->id[0], buffer, samples * formatBytes(format)); if (result == -1) { ATA_ERROR("audio write error."); - return airtaudio::errorWarning; + return airtaudio::error_warning; } } result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0); @@ -658,8 +654,8 @@ enum airtaudio::errorType airtaudio::api::Oss::stopStream() { } handle->triggered = false; } - if ( m_stream.mode == INPUT - || ( m_stream.mode == DUPLEX + if ( m_stream.mode == airtaudio::mode_input + || ( m_stream.mode == airtaudio::mode_duplex && handle->id[0] != handle->id[1])) { result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0); if (result == -1) { @@ -668,31 +664,31 @@ enum airtaudio::errorType airtaudio::api::Oss::stopStream() { } } unlock: - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; m_stream.mutex.unlock(); if (result != -1) { - return airtaudio::errorNone; + return airtaudio::error_none; } - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } -enum airtaudio::errorType airtaudio::api::Oss::abortStream() { - if (verifyStream() != airtaudio::errorNone) { - return airtaudio::errorFail; +enum airtaudio::error airtaudio::api::Oss::abortStream() { + if (verifyStream() != airtaudio::error_none) { + return airtaudio::error_fail; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } m_stream.mutex.lock(); // The state might change while waiting on a mutex. - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { m_stream.mutex.unlock(); return; } int32_t result = 0; OssHandle *handle = (OssHandle *) m_stream.apiHandle; - if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) { + if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) { result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0); if (result == -1) { ATA_ERROR("system error stopping callback procedure on device (" << m_stream.device[0] << ")."); @@ -700,7 +696,7 @@ enum airtaudio::errorType airtaudio::api::Oss::abortStream() { } handle->triggered = false; } - if (m_stream.mode == INPUT || (m_stream.mode == DUPLEX && handle->id[0] != handle->id[1])) { + if (m_stream.mode == airtaudio::mode_input || (m_stream.mode == airtaudio::mode_duplex && handle->id[0] != handle->id[1])) { result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0); if (result == -1) { ATA_ERROR("system error stopping input callback procedure on device (" << m_stream.device[0] << ")."); @@ -708,39 +704,39 @@ enum airtaudio::errorType airtaudio::api::Oss::abortStream() { } } unlock: - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; m_stream.mutex.unlock(); if (result != -1) { - return airtaudio::errorNone; + return airtaudio::error_none; } - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } void airtaudio::api::Oss::callbackEvent() { OssHandle *handle = (OssHandle *) m_stream.apiHandle; - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { std::unique_lock lck(m_stream.mutex); handle->runnable.wait(lck); - if (m_stream.state != STREAM_RUNNING) { + if (m_stream.state != airtaudio::state_running) { return; } } - if (m_stream.state == STREAM_CLOSED) { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("the stream is closed ... this shouldn't happen!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } // Invoke user callback to get fresh output data. int32_t doStopStream = 0; double streamTime = getStreamTime(); rtaudio::streamStatus status = 0; - if ( m_stream.mode != INPUT + if ( m_stream.mode != airtaudio::mode_input && handle->xrun[0] == true) { - status |= RTAUDIO_OUTPUT_UNDERFLOW; + status |= RTAUDIO_airtaudio::status_underflow; handle->xrun[0] = false; } - if ( m_stream.mode != OUTPUT + if ( m_stream.mode != airtaudio::mode_output && handle->xrun[1] == true) { - status |= RTAUDIO_INPUT_OVERFLOW; + status |= RTAUDIO_airtaudio::mode_input_OVERFLOW; handle->xrun[1] = false; } doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[0], @@ -754,15 +750,15 @@ void airtaudio::api::Oss::callbackEvent() { } m_stream.mutex.lock(); // The state might change while waiting on a mutex. - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { goto unlock; } int32_t result; char *buffer; int32_t samples; audio::format format; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { // Setup parameters and do buffer conversion if necessary. if (m_stream.doConvertBuffer[0]) { buffer = m_stream.deviceBuffer; @@ -778,12 +774,12 @@ void airtaudio::api::Oss::callbackEvent() { if (m_stream.doByteSwap[0]) { byteSwapBuffer(buffer, samples, format); } - if ( m_stream.mode == DUPLEX + if ( m_stream.mode == airtaudio::mode_duplex && handle->triggered == false) { int32_t trig = 0; ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig); result = write(handle->id[0], buffer, samples * formatBytes(format)); - trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT; + trig = PCM_ENABLE_airtaudio::mode_input|PCM_ENABLE_airtaudio::mode_output; ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig); handle->triggered = true; } else { @@ -795,12 +791,12 @@ void airtaudio::api::Oss::callbackEvent() { // specific means for determining that. handle->xrun[0] = true; ATA_ERROR("audio write error."); - //error(airtaudio::errorWarning); + //error(airtaudio::error_warning); // Continue on to input section. } } - if ( m_stream.mode == INPUT - || m_stream.mode == DUPLEX) { + if ( m_stream.mode == airtaudio::mode_input + || m_stream.mode == airtaudio::mode_duplex) { // Setup parameters. if (m_stream.doConvertBuffer[1]) { buffer = m_stream.deviceBuffer; diff --git a/airtaudio/api/Oss.h b/airtaudio/api/Oss.h index 1258e2a..a27013d 100644 --- a/airtaudio/api/Oss.h +++ b/airtaudio/api/Oss.h @@ -17,15 +17,15 @@ namespace airtaudio { public: Oss(); virtual ~Oss(); - airtaudio::api::type getCurrentApi() { - return airtaudio::api::LINUX_OSS; + enum airtaudio::type getCurrentApi() { + return airtaudio::type_oss; } uint32_t getDeviceCount(); airtaudio::DeviceInfo getDeviceInfo(uint32_t _device); - enum airtaudio::errorType closeStream(); - enum airtaudio::errorType startStream(); - enum airtaudio::errorType stopStream(); - enum airtaudio::errorType abortStream(); + enum airtaudio::error closeStream(); + enum airtaudio::error startStream(); + enum airtaudio::error stopStream(); + enum airtaudio::error abortStream(); // This function is intended for internal use only. It must be // public because it is called by the internal callback handler, // which is not a member of RtAudio. External use of this function @@ -33,7 +33,7 @@ namespace airtaudio { void callbackEvent(); private: bool probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, diff --git a/airtaudio/api/Pulse.cpp b/airtaudio/api/Pulse.cpp index 03eab71..7e4b194 100644 --- a/airtaudio/api/Pulse.cpp +++ b/airtaudio/api/Pulse.cpp @@ -65,7 +65,7 @@ struct PulseAudioHandle { }; airtaudio::api::Pulse::~Pulse() { - if (m_stream.state != STREAM_CLOSED) { + if (m_stream.state != airtaudio::state_closed) { closeStream(); } } @@ -101,12 +101,12 @@ static void pulseaudio_callback(void* _user) { } } -enum airtaudio::errorType airtaudio::api::Pulse::closeStream() { +enum airtaudio::error airtaudio::api::Pulse::closeStream() { PulseAudioHandle *pah = static_cast(m_stream.apiHandle); m_stream.callbackInfo.isRunning = false; if (pah) { m_stream.mutex.lock(); - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { pah->runnable = true; pah->runnable_cv.notify_one();; } @@ -130,31 +130,31 @@ enum airtaudio::errorType airtaudio::api::Pulse::closeStream() { free(m_stream.userBuffer[1]); m_stream.userBuffer[1] = nullptr; } - m_stream.state = STREAM_CLOSED; - m_stream.mode = UNINITIALIZED; - return airtaudio::errorNone; + m_stream.state = airtaudio::state_closed; + m_stream.mode = airtaudio::mode_unknow; + return airtaudio::error_none; } void airtaudio::api::Pulse::callbackEvent() { PulseAudioHandle *pah = static_cast(m_stream.apiHandle); - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { std::unique_lock lck(m_stream.mutex); while (!pah->runnable) { pah->runnable_cv.wait(lck); } - if (m_stream.state != STREAM_RUNNING) { + if (m_stream.state != airtaudio::state_running) { m_stream.mutex.unlock(); return; } } - if (m_stream.state == STREAM_CLOSED) { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("the stream is closed ... this shouldn't happen!"); return; } double streamTime = getStreamTime(); - airtaudio::streamStatus status = 0; - int32_t doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[OUTPUT], - m_stream.userBuffer[INPUT], + enum airtaudio::status status = airtaudio::status_ok; + int32_t doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::mode_output], + m_stream.userBuffer[airtaudio::mode_input], m_stream.bufferSize, streamTime, status); @@ -163,42 +163,42 @@ void airtaudio::api::Pulse::callbackEvent() { return; } m_stream.mutex.lock(); - void *pulse_in = m_stream.doConvertBuffer[INPUT] ? m_stream.deviceBuffer : m_stream.userBuffer[INPUT]; - void *pulse_out = m_stream.doConvertBuffer[OUTPUT] ? m_stream.deviceBuffer : m_stream.userBuffer[OUTPUT]; - if (m_stream.state != STREAM_RUNNING) { + void *pulse_in = m_stream.doConvertBuffer[airtaudio::mode_input] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::mode_input]; + void *pulse_out = m_stream.doConvertBuffer[airtaudio::mode_output] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::mode_output]; + if (m_stream.state != airtaudio::state_running) { goto unlock; } int32_t pa_error; size_t bytes; - if ( m_stream.mode == OUTPUT - || m_stream.mode == DUPLEX) { - if (m_stream.doConvertBuffer[OUTPUT]) { + if ( m_stream.mode == airtaudio::mode_output + || m_stream.mode == airtaudio::mode_duplex) { + if (m_stream.doConvertBuffer[airtaudio::mode_output]) { convertBuffer(m_stream.deviceBuffer, - m_stream.userBuffer[OUTPUT], - m_stream.convertInfo[OUTPUT]); - bytes = m_stream.nDeviceChannels[OUTPUT] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[OUTPUT]); + m_stream.userBuffer[airtaudio::mode_output], + m_stream.convertInfo[airtaudio::mode_output]); + bytes = m_stream.nDeviceChannels[airtaudio::mode_output] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[airtaudio::mode_output]); } else { - bytes = m_stream.nUserChannels[OUTPUT] * m_stream.bufferSize * formatBytes(m_stream.userFormat); + bytes = m_stream.nUserChannels[airtaudio::mode_output] * m_stream.bufferSize * formatBytes(m_stream.userFormat); } if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0) { ATA_ERROR("audio write error, " << pa_strerror(pa_error) << "."); return; } } - if (m_stream.mode == INPUT || m_stream.mode == DUPLEX) { - if (m_stream.doConvertBuffer[INPUT]) { - bytes = m_stream.nDeviceChannels[INPUT] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[INPUT]); + if (m_stream.mode == airtaudio::mode_input || m_stream.mode == airtaudio::mode_duplex) { + if (m_stream.doConvertBuffer[airtaudio::mode_input]) { + bytes = m_stream.nDeviceChannels[airtaudio::mode_input] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[airtaudio::mode_input]); } else { - bytes = m_stream.nUserChannels[INPUT] * m_stream.bufferSize * formatBytes(m_stream.userFormat); + bytes = m_stream.nUserChannels[airtaudio::mode_input] * m_stream.bufferSize * formatBytes(m_stream.userFormat); } if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0) { ATA_ERROR("audio read error, " << pa_strerror(pa_error) << "."); return; } - if (m_stream.doConvertBuffer[INPUT]) { - convertBuffer(m_stream.userBuffer[INPUT], + if (m_stream.doConvertBuffer[airtaudio::mode_input]) { + convertBuffer(m_stream.userBuffer[airtaudio::mode_input], m_stream.deviceBuffer, - m_stream.convertInfo[INPUT]); + m_stream.convertInfo[airtaudio::mode_input]); } } unlock: @@ -211,76 +211,76 @@ unlock: return; } -enum airtaudio::errorType airtaudio::api::Pulse::startStream() { +enum airtaudio::error airtaudio::api::Pulse::startStream() { PulseAudioHandle *pah = static_cast(m_stream.apiHandle); - if (m_stream.state == STREAM_CLOSED) { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("the stream is not open!"); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } - if (m_stream.state == STREAM_RUNNING) { + if (m_stream.state == airtaudio::state_running) { ATA_ERROR("the stream is already running!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } m_stream.mutex.lock(); - m_stream.state = STREAM_RUNNING; + m_stream.state = airtaudio::state_running; pah->runnable = true; pah->runnable_cv.notify_one(); m_stream.mutex.unlock(); - return airtaudio::errorNone; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Pulse::stopStream() { +enum airtaudio::error airtaudio::api::Pulse::stopStream() { PulseAudioHandle *pah = static_cast(m_stream.apiHandle); - if (m_stream.state == STREAM_CLOSED) { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("the stream is not open!"); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; m_stream.mutex.lock(); if (pah && pah->s_play) { int32_t pa_error; if (pa_simple_drain(pah->s_play, &pa_error) < 0) { ATA_ERROR("error draining output device, " << pa_strerror(pa_error) << "."); m_stream.mutex.unlock(); - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; m_stream.mutex.unlock(); - return airtaudio::errorNone; + return airtaudio::error_none; } -enum airtaudio::errorType airtaudio::api::Pulse::abortStream() { +enum airtaudio::error airtaudio::api::Pulse::abortStream() { PulseAudioHandle *pah = static_cast(m_stream.apiHandle); - if (m_stream.state == STREAM_CLOSED) { + if (m_stream.state == airtaudio::state_closed) { ATA_ERROR("the stream is not open!"); - return airtaudio::errorInvalidUse; + return airtaudio::error_invalidUse; } - if (m_stream.state == STREAM_STOPPED) { + if (m_stream.state == airtaudio::state_stopped) { ATA_ERROR("the stream is already stopped!"); - return airtaudio::errorWarning; + return airtaudio::error_warning; } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; m_stream.mutex.lock(); if (pah && pah->s_play) { int32_t pa_error; if (pa_simple_flush(pah->s_play, &pa_error) < 0) { ATA_ERROR("error flushing output device, " << pa_strerror(pa_error) << "."); m_stream.mutex.unlock(); - return airtaudio::errorSystemError; + return airtaudio::error_systemError; } } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; m_stream.mutex.unlock(); - return airtaudio::errorNone; + return airtaudio::error_none; } bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, @@ -293,7 +293,7 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, if (_device != 0) { return false; } - if (_mode != INPUT && _mode != OUTPUT) { + if (_mode != airtaudio::mode_input && _mode != airtaudio::mode_output) { return false; } if (_channels != 1 && _channels != 2) { @@ -332,33 +332,27 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, ATA_ERROR("unsupported sample format."); return false; } - // Set interleaving parameters. - if (_options && _options->flags & NONINTERLEAVED) { - m_stream.userInterleaved = false; - } else { - m_stream.userInterleaved = true; - } - m_stream.deviceInterleaved[_mode] = true; + m_stream.deviceInterleaved[modeToIdTable(_mode)] = true; m_stream.nBuffers = 1; - m_stream.doByteSwap[_mode] = false; - m_stream.doConvertBuffer[_mode] = _channels > 1 && !m_stream.userInterleaved; - m_stream.deviceFormat[_mode] = m_stream.userFormat; - m_stream.nUserChannels[_mode] = _channels; - m_stream.nDeviceChannels[_mode] = _channels + _firstChannel; - m_stream.channelOffset[_mode] = 0; + m_stream.doByteSwap[modeToIdTable(_mode)] = false; + m_stream.doConvertBuffer[modeToIdTable(_mode)] = false; + m_stream.deviceFormat[modeToIdTable(_mode)] = m_stream.userFormat; + m_stream.nUserChannels[modeToIdTable(_mode)] = _channels; + m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel; + m_stream.channelOffset[modeToIdTable(_mode)] = 0; // Allocate necessary internal buffers. - bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat); - m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1); - if (m_stream.userBuffer[_mode] == nullptr) { + bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); + m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); + if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { ATA_ERROR("error allocating user buffer memory."); goto error; } m_stream.bufferSize = *_bufferSize; - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { bool makeBuffer = true; - bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]); - if (_mode == INPUT) { - if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) { + bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); + if (_mode == airtaudio::mode_input) { + if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); if (bufferBytes <= bytesOut) makeBuffer = false; } @@ -373,9 +367,9 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, } } } - m_stream.device[_mode] = _device; + m_stream.device[modeToIdTable(_mode)] = _device; // Setup the buffer conversion information structure. - if (m_stream.doConvertBuffer[_mode]) { + if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { setConvertInfo(_mode, _firstChannel); } if (!m_stream.apiHandle) { @@ -389,14 +383,14 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, pah = static_cast(m_stream.apiHandle); int32_t error; switch (_mode) { - case INPUT: + case airtaudio::mode_input: pah->s_rec = pa_simple_new(nullptr, "airtAudio", PA_STREAM_RECORD, nullptr, "Record", &ss, nullptr, nullptr, &error); if (!pah->s_rec) { ATA_ERROR("error connecting input to PulseAudio server."); goto error; } break; - case OUTPUT: + case airtaudio::mode_output: pah->s_play = pa_simple_new(nullptr, "airtAudio", PA_STREAM_PLAYBACK, nullptr, "Playback", &ss, nullptr, nullptr, &error); if (!pah->s_play) { ATA_ERROR("error connecting output to PulseAudio server."); @@ -406,12 +400,12 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, default: goto error; } - if (m_stream.mode == UNINITIALIZED) { + if (m_stream.mode == airtaudio::mode_unknow) { m_stream.mode = _mode; } else if (m_stream.mode == _mode) { goto error; }else { - m_stream.mode = DUPLEX; + m_stream.mode = airtaudio::mode_duplex; } if (!m_stream.callbackInfo.isRunning) { m_stream.callbackInfo.object = this; @@ -422,7 +416,7 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device, goto error; } } - m_stream.state = STREAM_STOPPED; + m_stream.state = airtaudio::state_stopped; return true; error: if (pah && m_stream.callbackInfo.isRunning) { diff --git a/airtaudio/api/Pulse.h b/airtaudio/api/Pulse.h index 2eebe82..79c8c2a 100644 --- a/airtaudio/api/Pulse.h +++ b/airtaudio/api/Pulse.h @@ -16,15 +16,15 @@ namespace airtaudio { static airtaudio::Api* Create(); public: virtual ~Pulse(); - airtaudio::api::type getCurrentApi() { - return airtaudio::api::LINUX_PULSE; + enum airtaudio::type getCurrentApi() { + return airtaudio::type_pulse; } uint32_t getDeviceCount(); airtaudio::DeviceInfo getDeviceInfo(uint32_t _device); - enum airtaudio::errorType closeStream(); - enum airtaudio::errorType startStream(); - enum airtaudio::errorType stopStream(); - enum airtaudio::errorType abortStream(); + enum airtaudio::error closeStream(); + enum airtaudio::error startStream(); + enum airtaudio::error stopStream(); + enum airtaudio::error abortStream(); // This function is intended for internal use only. It must be // public because it is called by the internal callback handler, // which is not a member of RtAudio. External use of this function @@ -34,7 +34,7 @@ namespace airtaudio { std::vector m_devices; void saveDeviceInfo(); bool probeDeviceOpen(uint32_t _device, - airtaudio::api::StreamMode _mode, + airtaudio::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, diff --git a/airtaudio/base.cpp b/airtaudio/base.cpp index 82e8f24..b0a86bf 100644 --- a/airtaudio/base.cpp +++ b/airtaudio/base.cpp @@ -5,101 +5,3 @@ * * @license like MIT (see license file) */ -#if 0 -#include - -std::ostream& airtaudio::operator <<(std::ostream& _os, enum errorType _obj) { - switch(_obj) { - case errorNone: - _os << "errorNone"; - break; - case errorFail: - _os << "errorFail"; - break; - case errorWarning: - _os << "errorWarning"; - break; - case errorInputNull: - _os << "errorInputNull"; - break; - case errorInvalidUse: - _os << "errorInvalidUse"; - break; - case errorSystemError: - _os << "errorSystemError"; - break; - default: - _os << "UNKNOW..."; - break; - } - return _os; -} - -std::ostream& airtaudio::operator <<(std::ostream& _os, const audio::format& _obj) { - switch(_obj) { - case SINT8: - _os << "SINT8"; - break; - case SINT16: - _os << "SINT16"; - break; - case SINT24: - _os << "SINT24"; - break; - case SINT32: - _os << "SINT32"; - break; - case FLOAT32: - _os << "FLOAT32"; - break; - case FLOAT64: - _os << "FLOAT64"; - break; - default: - _os << "UNKNOW..."; - break; - } - return _os; -} - -std::ostream& airtaudio::operator <<(std::ostream& _os, const airtaudio::streamFlags& _obj) { - switch(_obj) { - case NONINTERLEAVED: - _os << "NONINTERLEAVED"; - break; - case MINIMIZE_LATENCY: - _os << "MINIMIZE_LATENCY"; - break; - case HOG_DEVICE: - _os << "HOG_DEVICE"; - break; - case SCHEDULE_REALTIME: - _os << "SCHEDULE_REALTIME"; - break; - case ALSA_USE_DEFAULT: - _os << "ALSA_USE_DEFAULT"; - break; - default: - _os << "UNKNOW..."; - break; - } - return _os; -} - -std::ostream& airtaudio::operator <<(std::ostream& _os, const airtaudio::streamStatus& _obj) { - switch(_obj) { - case INPUT_OVERFLOW: - _os << "INPUT_OVERFLOW"; - break; - case OUTPUT_UNDERFLOW: - _os << "OUTPUT_UNDERFLOW"; - break; - default: - _os << "UNKNOW..."; - break; - } - return _os; -} - - -#endif diff --git a/airtaudio/base.h b/airtaudio/base.h index e568a44..9ce5f8c 100644 --- a/airtaudio/base.h +++ b/airtaudio/base.h @@ -27,87 +27,29 @@ //#include namespace airtaudio { - //! Defined RtError types. - enum errorType { - errorNone, //!< No error - errorFail, //!< An error occure in the operation - errorWarning, //!< A non-critical error. - errorInputNull, //!< null input or internal errror - errorInvalidUse, //!< The function was called incorrectly. - errorSystemError //!< A system error occured. + //! Defined error types. + enum error { + error_none, //!< No error + error_fail, //!< An error occure in the operation + error_warning, //!< A non-critical error. + error_inputNull, //!< null input or internal errror + error_invalidUse, //!< The function was called incorrectly. + error_systemError //!< A system error occured. }; - /** - * @typedef typedef uint64_t streamFlags; - * @brief RtAudio stream option flags. - * - * The following flags can be OR'ed together to allow a client to - * make changes to the default stream behavior: - * - * - \e NONINTERLEAVED: Use non-interleaved buffers (default = interleaved). - * - \e MINIMIZE_LATENCY: Attempt to set stream parameters for lowest possible latency. - * - \e HOG_DEVICE: Attempt grab device for exclusive use. - * - \e ALSA_USE_DEFAULT: Use the "default" PCM device (ALSA only). - * - * By default, RtAudio streams pass and receive audio data from the - * client in an interleaved format. By passing the - * RTAUDIO_NONINTERLEAVED flag to the openStream() function, audio - * data will instead be presented in non-interleaved buffers. In - * this case, each buffer argument in the RtAudioCallback function - * will point to a single array of data, with \c nFrames samples for - * each channel concatenated back-to-back. For example, the first - * sample of data for the second channel would be located at index \c - * nFrames (assuming the \c buffer pointer was recast to the correct - * data type for the stream). - * - * Certain audio APIs offer a number of parameters that influence the - * I/O latency of a stream. By default, RtAudio will attempt to set - * these parameters internally for robust (glitch-free) performance - * (though some APIs, like Windows Direct Sound, make this difficult). - * By passing the RTAUDIO_MINIMIZE_LATENCY flag to the openStream() - * function, internal stream settings will be influenced in an attempt - * to minimize stream latency, though possibly at the expense of stream - * performance. - * - * If the RTAUDIO_HOG_DEVICE flag is set, RtAudio will attempt to - * open the input and/or output stream device(s) for exclusive use. - * Note that this is not possible with all supported audio APIs. - * - * If the RTAUDIO_SCHEDULE_REALTIME flag is set, RtAudio will attempt - * to select realtime scheduling (round-robin) for the callback thread. - * - * If the RTAUDIO_ALSA_USE_DEFAULT flag is set, RtAudio will attempt to - * open the "default" PCM device when using the ALSA API. Note that this - * will override any specified input or output device id. - */ - typedef uint32_t streamFlags; - static const streamFlags NONINTERLEAVED = 0x1; // Use non-interleaved buffers (default = interleaved). - static const streamFlags MINIMIZE_LATENCY = 0x2; // Attempt to set stream parameters for lowest possible latency. - static const streamFlags HOG_DEVICE = 0x4; // Attempt grab device and prevent use by others. - static const streamFlags SCHEDULE_REALTIME = 0x8; // Try to select realtime scheduling for callback thread. - static const streamFlags ALSA_USE_DEFAULT = 0x10; // Use the "default" PCM device (ALSA only). - - /** - * @brief Debug operator To display the curent element in a Human redeable information - */ - //std::ostream& operator <<(std::ostream& _os, const airtaudio::streamFlags& _obj); - - /** - * @typedef typedef uint64_t rtaudio::streamStatus; - * @brief RtAudio stream status (over- or underflow) flags. - * - * Notification of a stream over- or underflow is indicated by a - * non-zero stream \c status argument in the RtAudioCallback function. - * The stream status can be one of the following two options, - * depending on whether the stream is open for output and/or input: - * - * - \e RTAUDIO_INPUT_OVERFLOW: Input data was discarded because of an overflow condition at the driver. - * - \e RTAUDIO_OUTPUT_UNDERFLOW: The output buffer ran low, likely producing a break in the output sound. - */ - typedef uint32_t streamStatus; - static const streamStatus INPUT_OVERFLOW = 0x1; // Input data was discarded because of an overflow condition at the driver. - static const streamStatus OUTPUT_UNDERFLOW = 0x2; // The output buffer ran low, likely causing a gap in the output sound. - + class Flags { + public: + bool m_minimizeLatency; // Simple example ==> TODO ... + Flags() : + m_minimizeLatency(false) { + // nothing to do ... + } + }; + enum status { + status_ok, //!< nothing... + status_overflow, //!< Internal buffer has more data than they can accept + status_underflow //!< The internal buffer is empty + }; /** * @brief RtAudio callback function prototype. * @@ -149,7 +91,7 @@ namespace airtaudio { void* _inputBuffer, uint32_t _nFrames, double _streamTime, - airtaudio::streamStatus _status)> AirTAudioCallback; + airtaudio::status _status)> AirTAudioCallback; } #include diff --git a/lutin_airtaudio.py b/lutin_airtaudio.py index 42e3132..9994c7c 100644 --- a/lutin_airtaudio.py +++ b/lutin_airtaudio.py @@ -17,14 +17,15 @@ def create(target): 'airtaudio/Api.cpp', 'airtaudio/api/Dummy.cpp', ]) - myModule.add_module_depend(['audio']) + myModule.add_module_depend(['audio', 'etk']) + + myModule.add_export_flag_CC(['-D__DUMMY__']) - myModule.add_export_flag_CC(['-D__AIRTAUDIO_API_DUMMY_H__']) if target.name=="Windows": myModule.add_src_file([ - 'airtaudio/api/Asio.cpp', - 'airtaudio/api/Ds.cpp', - ]) + 'airtaudio/api/Asio.cpp', + 'airtaudio/api/Ds.cpp', + ]) # ASIO API on Windows myModule.add_export_flag_CC(['__WINDOWS_ASIO__']) # Windows DirectSound API @@ -32,52 +33,37 @@ def create(target): myModule.add_module_depend(['etk']) elif target.name=="Linux": myModule.add_src_file([ - 'airtaudio/api/Alsa.cpp', - 'airtaudio/api/Jack.cpp', - 'airtaudio/api/Pulse.cpp', - 'airtaudio/api/Oss.cpp' - ]) - # Linux Alsa API - # TODO : myModule.add_optionnal_module_depend('alsa', "__LINUX_ALSA__") - myModule.add_export_flag_CC(['-D__LINUX_ALSA__']) - myModule.add_export_flag_LD("-lasound") - # Linux Jack API - # TODO : myModule.add_optionnal_module_depend('jack', "__UNIX_JACK__") - #myModule.add_export_flag_CC(['-D__UNIX_JACK__']) - #myModule.add_export_flag_LD("-ljack") - # Linux PulseAudio API - # TODO : myModule.add_optionnal_module_depend('pulse', "__LINUX_PULSE__") - #myModule.add_export_flag_CC(['-D__LINUX_PULSE__']) - #myModule.add_export_flag_LD("-lpulse-simple") - #myModule.add_export_flag_LD("-lpulse") - # TODO : myModule.add_optionnal_module_depend('oss', "__LINUX_OSS__") - #myModule.add_export_flag_CC(['-D__LINUX_OSS__']) - # ... - myModule.add_module_depend(['etk']) + 'airtaudio/api/Alsa.cpp', + 'airtaudio/api/Jack.cpp', + 'airtaudio/api/Pulse.cpp', + 'airtaudio/api/Oss.cpp' + ]) + myModule.add_optionnal_module_depend('alsa', "__LINUX_ALSA__") + myModule.add_optionnal_module_depend('jack', "__UNIX_JACK__") + myModule.add_optionnal_module_depend('pulse', "__LINUX_PULSE__") + myModule.add_optionnal_module_depend('oss', "__LINUX_OSS__") elif target.name=="MacOs": myModule.add_src_file([ 'airtaudio/api/Core.cpp', 'airtaudio/api/Oss.cpp' ]) # MacOsX core - # TODO : myModule.add_optionnal_module_depend('CoreAudio', "__MACOSX_CORE__") - myModule.add_export_flag_CC(['-D__MACOSX_CORE__']) - myModule.add_export_flag_LD("-framework CoreAudio") - myModule.add_module_depend(['etk']) + myModule.add_optionnal_module_depend('CoreAudio', "__MACOSX_CORE__") + #myModule.add_export_flag_CC(['-D__MACOSX_CORE__']) + #myModule.add_export_flag_LD("-framework CoreAudio") elif target.name=="IOs": myModule.add_src_file('airtaudio/api/CoreIos.mm') # IOsX core - # TODO : myModule.add_optionnal_module_depend('CoreAudio', "__IOS_CORE__") - myModule.add_export_flag_CC(['-D__IOS_CORE__']) - myModule.add_export_flag_LD("-framework CoreAudio") - myModule.add_export_flag_LD("-framework AudioToolbox") - myModule.add_module_depend(['etk']) + myModule.add_optionnal_module_depend('CoreAudio', "__IOS_CORE__") + #myModule.add_export_flag_CC(['-D__IOS_CORE__']) + #myModule.add_export_flag_LD("-framework CoreAudio") + #myModule.add_export_flag_LD("-framework AudioToolbox") elif target.name=="Android": myModule.add_src_file('airtaudio/api/Android.cpp') # MacOsX core - # TODO : myModule.add_optionnal_module_depend('ewolAndroidAudio', "__ANDROID_JAVA__") - myModule.add_export_flag_CC(['-D__ANDROID_JAVA__']) - myModule.add_module_depend(['ewol']) + myModule.add_optionnal_module_depend('ewolAndroidAudio', "__ANDROID_JAVA__") + #myModule.add_export_flag_CC(['-D__ANDROID_JAVA__']) + #myModule.add_module_depend(['ewol']) else: debug.warning("unknow target for AIRTAudio : " + target.name);