[DEV] rework Airtaudio in audioo::orchestra

This commit is contained in:
2015-04-10 22:06:17 +02:00
parent d4c53a53bf
commit f5c3affccb
72 changed files with 2805 additions and 2732 deletions

484
audio/orchestra/Api.cpp Normal file
View File

@@ -0,0 +1,484 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
//#include <etk/types.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <climits>
#undef __class__
#define __class__ "api"
// Static variable definitions.
const std::vector<uint32_t>& audio::orchestra::genericSampleRate() {
static std::vector<uint32_t> list;
if (list.size() == 0) {
list.push_back(4000);
list.push_back(5512);
list.push_back(8000);
list.push_back(9600);
list.push_back(11025);
list.push_back(16000);
list.push_back(22050);
list.push_back(32000);
list.push_back(44100);
list.push_back(48000);
list.push_back(64000);
list.push_back(88200);
list.push_back(96000);
list.push_back(128000);
list.push_back(176400);
list.push_back(192000);
}
return list;
};
audio::orchestra::Api::Api() :
m_callback(nullptr),
m_deviceBuffer(nullptr) {
m_device[0] = 11111;
m_device[1] = 11111;
m_state = audio::orchestra::state_closed;
m_mode = audio::orchestra::mode_unknow;
}
audio::orchestra::Api::~Api() {
}
enum audio::orchestra::error audio::orchestra::Api::startStream() {
ATA_VERBOSE("Start Stream");
m_startTime = std11::chrono::system_clock::now();
m_duration = std11::chrono::microseconds(0);
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::Api::openStream(audio::orchestra::StreamParameters* _oParams,
audio::orchestra::StreamParameters* _iParams,
enum audio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options) {
if (m_state != audio::orchestra::state_closed) {
ATA_ERROR("a stream is already open!");
return audio::orchestra::error_invalidUse;
}
if ( _oParams != nullptr
&& _oParams->nChannels < 1) {
ATA_ERROR("a non-nullptr output StreamParameters structure cannot have an nChannels value less than one.");
return audio::orchestra::error_invalidUse;
}
if ( _iParams != nullptr
&& _iParams->nChannels < 1) {
ATA_ERROR("a non-nullptr input StreamParameters structure cannot have an nChannels value less than one.");
return audio::orchestra::error_invalidUse;
}
if ( _oParams == nullptr
&& _iParams == nullptr) {
ATA_ERROR("input and output StreamParameters structures are both nullptr!");
return audio::orchestra::error_invalidUse;
}
if (audio::getFormatBytes(_format) == 0) {
ATA_ERROR("'format' parameter value is undefined.");
return audio::orchestra::error_invalidUse;
}
uint32_t nDevices = getDeviceCount();
uint32_t oChannels = 0;
if (_oParams != nullptr) {
oChannels = _oParams->nChannels;
if ( _oParams->deviceId >= nDevices
&& _oParams->deviceName == "") {
ATA_ERROR("output device parameter value is invalid.");
return audio::orchestra::error_invalidUse;
}
}
uint32_t iChannels = 0;
if (_iParams != nullptr) {
iChannels = _iParams->nChannels;
if ( _iParams->deviceId >= nDevices
&& _iParams->deviceName == "") {
ATA_ERROR("input device parameter value is invalid.");
return audio::orchestra::error_invalidUse;
}
}
clearStreamInfo();
bool result;
if (oChannels > 0) {
if (_oParams->deviceId == -1) {
result = probeDeviceOpenName(_oParams->deviceName,
audio::orchestra::mode_output,
oChannels,
_oParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
} else {
result = probeDeviceOpen(_oParams->deviceId,
audio::orchestra::mode_output,
oChannels,
_oParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
}
if (result == false) {
ATA_ERROR("system ERROR");
return audio::orchestra::error_systemError;
}
}
if (iChannels > 0) {
if (_iParams->deviceId == -1) {
result = probeDeviceOpenName(_iParams->deviceName,
audio::orchestra::mode_input,
iChannels,
_iParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
} else {
result = probeDeviceOpen(_iParams->deviceId,
audio::orchestra::mode_input,
iChannels,
_iParams->firstChannel,
_sampleRate,
_format,
_bufferFrames,
_options);
}
if (result == false) {
if (oChannels > 0) {
closeStream();
}
ATA_ERROR("system error");
return audio::orchestra::error_systemError;
}
}
m_callback = _callback;
//_options.numberOfBuffers = m_nBuffers;
m_state = audio::orchestra::state_stopped;
return audio::orchestra::error_none;
}
uint32_t audio::orchestra::Api::getDefaultInputDevice() {
// Should be implemented in subclasses if possible.
return 0;
}
uint32_t audio::orchestra::Api::getDefaultOutputDevice() {
// Should be implemented in subclasses if possible.
return 0;
}
enum audio::orchestra::error audio::orchestra::Api::closeStream() {
ATA_VERBOSE("Close Stream");
// MUST be implemented in subclasses!
return audio::orchestra::error_none;
}
bool audio::orchestra::Api::probeDeviceOpen(uint32_t /*device*/,
audio::orchestra::mode /*mode*/,
uint32_t /*channels*/,
uint32_t /*firstChannel*/,
uint32_t /*sampleRate*/,
audio::format /*format*/,
uint32_t * /*bufferSize*/,
const audio::orchestra::StreamOptions& /*options*/) {
// MUST be implemented in subclasses!
return false;
}
void audio::orchestra::Api::tickStreamTime() {
//ATA_WARNING("tick : size=" << m_bufferSize << " rate=" << m_sampleRate << " time=" << std11::chrono::nanoseconds((int64_t(m_bufferSize) * int64_t(1000000000)) / int64_t(m_sampleRate)).count());
//ATA_WARNING(" one element=" << std11::chrono::nanoseconds((int64_t(1000000000)) / int64_t(m_sampleRate)).count());
m_duration += std11::chrono::nanoseconds((int64_t(m_bufferSize) * int64_t(1000000000)) / int64_t(m_sampleRate));
}
long audio::orchestra::Api::getStreamLatency() {
if (verifyStream() != audio::orchestra::error_none) {
return 0;
}
long totalLatency = 0;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
totalLatency = m_latency[0];
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
totalLatency += m_latency[1];
}
return totalLatency;
}
std11::chrono::system_clock::time_point audio::orchestra::Api::getStreamTime() {
if (verifyStream() != audio::orchestra::error_none) {
return std11::chrono::system_clock::time_point();
}
return m_startTime + m_duration;
}
uint32_t audio::orchestra::Api::getStreamSampleRate() {
if (verifyStream() != audio::orchestra::error_none) {
return 0;
}
return m_sampleRate;
}
enum audio::orchestra::error audio::orchestra::Api::verifyStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("a stream is not open!");
return audio::orchestra::error_invalidUse;
}
return audio::orchestra::error_none;
}
void audio::orchestra::Api::clearStreamInfo() {
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
m_sampleRate = 0;
m_bufferSize = 0;
m_nBuffers = 0;
m_userFormat = audio::format_unknow;
m_startTime = std11::chrono::system_clock::time_point();
m_duration = std11::chrono::nanoseconds(0);
m_deviceBuffer = nullptr;
m_callback = nullptr;
for (int32_t iii=0; iii<2; ++iii) {
m_device[iii] = 11111;
m_doConvertBuffer[iii] = false;
m_deviceInterleaved[iii] = true;
m_doByteSwap[iii] = false;
m_nUserChannels[iii] = 0;
m_nDeviceChannels[iii] = 0;
m_channelOffset[iii] = 0;
m_deviceFormat[iii] = audio::format_unknow;
m_latency[iii] = 0;
m_userBuffer[iii].clear();
m_convertInfo[iii].channels = 0;
m_convertInfo[iii].inJump = 0;
m_convertInfo[iii].outJump = 0;
m_convertInfo[iii].inFormat = audio::format_unknow;
m_convertInfo[iii].outFormat = audio::format_unknow;
m_convertInfo[iii].inOffset.clear();
m_convertInfo[iii].outOffset.clear();
}
}
void audio::orchestra::Api::setConvertInfo(audio::orchestra::mode _mode, uint32_t _firstChannel) {
int32_t idTable = audio::orchestra::modeToIdTable(_mode);
if (_mode == audio::orchestra::mode_input) { // convert device to user buffer
m_convertInfo[idTable].inJump = m_nDeviceChannels[1];
m_convertInfo[idTable].outJump = m_nUserChannels[1];
m_convertInfo[idTable].inFormat = m_deviceFormat[1];
m_convertInfo[idTable].outFormat = m_userFormat;
} else { // convert user to device buffer
m_convertInfo[idTable].inJump = m_nUserChannels[0];
m_convertInfo[idTable].outJump = m_nDeviceChannels[0];
m_convertInfo[idTable].inFormat = m_userFormat;
m_convertInfo[idTable].outFormat = m_deviceFormat[0];
}
if (m_convertInfo[idTable].inJump < m_convertInfo[idTable].outJump) {
m_convertInfo[idTable].channels = m_convertInfo[idTable].inJump;
} else {
m_convertInfo[idTable].channels = m_convertInfo[idTable].outJump;
}
// Set up the interleave/deinterleave offsets.
if (m_deviceInterleaved[idTable] == false) {
if (_mode == audio::orchestra::mode_input) {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset.push_back(kkk * m_bufferSize);
m_convertInfo[idTable].outOffset.push_back(kkk);
m_convertInfo[idTable].inJump = 1;
}
} else {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset.push_back(kkk);
m_convertInfo[idTable].outOffset.push_back(kkk * m_bufferSize);
m_convertInfo[idTable].outJump = 1;
}
}
} else { // no (de)interleaving
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset.push_back(kkk);
m_convertInfo[idTable].outOffset.push_back(kkk);
}
}
// Add channel offset.
if (_firstChannel > 0) {
if (m_deviceInterleaved[idTable]) {
if (_mode == audio::orchestra::mode_output) {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].outOffset[kkk] += _firstChannel;
}
} else {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset[kkk] += _firstChannel;
}
}
} else {
if (_mode == audio::orchestra::mode_output) {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].outOffset[kkk] += (_firstChannel * m_bufferSize);
}
} else {
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
m_convertInfo[idTable].inOffset[kkk] += (_firstChannel * m_bufferSize);
}
}
}
}
}
void audio::orchestra::Api::convertBuffer(char *_outBuffer, char *_inBuffer, audio::orchestra::ConvertInfo &_info) {
// This function does format conversion, input/output channel compensation, and
// data interleaving/deinterleaving. 24-bit integers are assumed to occupy
// the lower three bytes of a 32-bit integer.
// Clear our device buffer when in/out duplex device channels are different
if ( _outBuffer == m_deviceBuffer
&& m_mode == audio::orchestra::mode_duplex
&& m_nDeviceChannels[0] < m_nDeviceChannels[1]) {
memset(_outBuffer, 0, m_bufferSize * _info.outJump * audio::getFormatBytes(_info.outFormat));
}
switch (audio::getFormatBytes(_info.outFormat)) {
case 1:
{
uint8_t *out = reinterpret_cast<uint8_t*>(_outBuffer);
uint8_t *in = reinterpret_cast<uint8_t*>(_inBuffer);
for (size_t iii=0; iii<m_bufferSize; ++iii) {
for (size_t jjj=0; jjj<_info.channels; jjj++) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
break;
case 2:
{
uint16_t *out = reinterpret_cast<uint16_t*>(_outBuffer);
uint16_t *in = reinterpret_cast<uint16_t*>(_inBuffer);
for (size_t iii=0; iii<m_bufferSize; ++iii) {
for (size_t jjj=0; jjj<_info.channels; jjj++) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
break;
case 4:
{
uint32_t *out = reinterpret_cast<uint32_t*>(_outBuffer);
uint32_t *in = reinterpret_cast<uint32_t*>(_inBuffer);
for (size_t iii=0; iii<m_bufferSize; ++iii) {
for (size_t jjj=0; jjj<_info.channels; jjj++) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
break;
case 8:
{
uint64_t *out = reinterpret_cast<uint64_t*>(_outBuffer);
uint64_t *in = reinterpret_cast<uint64_t*>(_inBuffer);
for (size_t iii=0; iii<m_bufferSize; ++iii) {
for (size_t jjj=0; jjj<_info.channels; jjj++) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
break;
}
}
void audio::orchestra::Api::byteSwapBuffer(char *_buffer, uint32_t _samples, audio::format _format) {
char val;
char *ptr;
ptr = _buffer;
if (_format == audio::format_int16) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 2nd bytes.
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 2 bytes.
ptr += 2;
}
} else if ( _format == audio::format_int32
|| _format == audio::format_float) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 4th bytes.
val = *(ptr);
*(ptr) = *(ptr+3);
*(ptr+3) = val;
// Swap 2nd and 3rd bytes.
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 3 more bytes.
ptr += 3;
}
} else if (_format == audio::format_int24) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 3rd bytes.
val = *(ptr);
*(ptr) = *(ptr+2);
*(ptr+2) = val;
// Increment 2 more bytes.
ptr += 2;
}
} else if (_format == audio::format_double) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 8th bytes
val = *(ptr);
*(ptr) = *(ptr+7);
*(ptr+7) = val;
// Swap 2nd and 7th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+5);
*(ptr+5) = val;
// Swap 3rd and 6th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+3);
*(ptr+3) = val;
// Swap 4th and 5th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 5 more bytes.
ptr += 5;
}
}
}

180
audio/orchestra/Api.h Normal file
View File

@@ -0,0 +1,180 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_API_H__
#define __AUDIO_ORCHESTRA_API_H__
#include <sstream>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/type.h>
#include <audio/orchestra/state.h>
#include <audio/orchestra/mode.h>
namespace audio {
namespace orchestra {
const std::vector<uint32_t>& genericSampleRate();
/**
* @brief airtaudio callback function prototype.
* @param _inputBuffer For input (or duplex) streams, this buffer will hold _nbChunk of input audio chunk (nullptr if no data).
* @param _timeInput Timestamp of the first buffer sample (recording time).
* @param _outputBuffer For output (or duplex) streams, the client should write _nbChunk of audio chunk into this buffer (nullptr if no data).
* @param _timeOutput Timestamp of the first buffer sample (playing time).
* @param _nbChunk The number of chunk of input or output chunk in the buffer (same size).
* @param _status List of error that occured in the laps of time.
*/
typedef std11::function<int32_t (const void* _inputBuffer,
const std11::chrono::system_clock::time_point& _timeInput,
void* _outputBuffer,
const std11::chrono::system_clock::time_point& _timeOutput,
uint32_t _nbChunk,
const std::vector<audio::orchestra::status>& _status)> AirTAudioCallback;
// A protected structure used for buffer conversion.
class ConvertInfo {
public:
int32_t channels;
int32_t inJump;
int32_t outJump;
enum audio::format inFormat;
enum audio::format outFormat;
std::vector<int> inOffset;
std::vector<int> outOffset;
};
class Api {
protected:
std::string m_name;
public:
Api();
virtual ~Api();
void setName(const std::string& _name) {
m_name = _name;
}
virtual audio::orchestra::type getCurrentApi() = 0;
virtual uint32_t getDeviceCount() = 0;
virtual audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device) = 0;
// TODO : Check API ...
virtual bool getNamedDeviceInfo(const std::string& _deviceName, audio::orchestra::DeviceInfo& _info) {
return false;
}
virtual uint32_t getDefaultInputDevice();
virtual uint32_t getDefaultOutputDevice();
enum audio::orchestra::error openStream(audio::orchestra::StreamParameters* _outputParameters,
audio::orchestra::StreamParameters* _inputParameters,
audio::format _format,
uint32_t _sampleRate,
uint32_t* _nbChunk,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options);
virtual enum audio::orchestra::error closeStream();
virtual enum audio::orchestra::error startStream();
virtual enum audio::orchestra::error stopStream() = 0;
virtual enum audio::orchestra::error abortStream() = 0;
long getStreamLatency();
uint32_t getStreamSampleRate();
virtual std11::chrono::system_clock::time_point getStreamTime();
bool isStreamOpen() const {
return m_state != audio::orchestra::state_closed;
}
bool isStreamRunning() const {
return m_state == audio::orchestra::state_running;
}
protected:
mutable std11::mutex m_mutex;
audio::orchestra::AirTAudioCallback m_callback;
uint32_t m_device[2]; // Playback and record, respectively.
enum audio::orchestra::mode m_mode; // audio::orchestra::mode_output, audio::orchestra::mode_input, or audio::orchestra::mode_duplex.
enum audio::orchestra::state m_state; // STOPPED, RUNNING, or CLOSED
std::vector<char> m_userBuffer[2]; // Playback and record, respectively.
char *m_deviceBuffer;
bool m_doConvertBuffer[2]; // Playback and record, respectively.
bool m_deviceInterleaved[2]; // Playback and record, respectively.
bool m_doByteSwap[2]; // Playback and record, respectively.
uint32_t m_sampleRate; // TODO : Rename frequency
uint32_t m_bufferSize;
uint32_t m_nBuffers;
uint32_t m_nUserChannels[2]; // Playback and record, respectively. // TODO : set only one config (open inout with the same number of channels (limitation)
uint32_t m_nDeviceChannels[2]; // Playback and record channels, respectively.
uint32_t m_channelOffset[2]; // Playback and record, respectively.
uint64_t m_latency[2]; // Playback and record, respectively.
enum audio::format m_userFormat; // TODO : Remove this ==> use can only open in the Harware format ...
enum audio::format m_deviceFormat[2]; // Playback and record, respectively.
audio::orchestra::ConvertInfo m_convertInfo[2];
//std11::chrono::system_clock::time_point
std11::chrono::system_clock::time_point m_startTime; //!< start time of the stream (restart at every stop, pause ...)
std11::chrono::nanoseconds m_duration; //!< duration from wich the stream is started
/**
* @brief api-specific method that attempts to open a device
* with the given parameters. This function MUST be implemented by
* all subclasses. If an error is encountered during the probe, a
* "warning" message is reported and false is returned. A
* successful probe is indicated by a return value of true.
*/
virtual bool probeDeviceOpen(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual bool probeDeviceOpenName(const std::string& _deviceName,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) { return false; }
/**
* @brief Increment the stream time.
*/
void tickStreamTime();
/**
* @brief Clear an RtApiStream structure.
*/
void clearStreamInfo();
/**
* @brief Check the current stream status
*/
enum audio::orchestra::error verifyStream();
/**
* @brief Protected method used to perform format, channel number, and/or interleaving
* conversions between the user and device buffers.
*/
void convertBuffer(char *_outBuffer,
char *_inBuffer,
audio::orchestra::ConvertInfo& _info);
/**
* @brief Perform byte-swapping on buffers.
*/
void byteSwapBuffer(char *_buffer,
uint32_t _samples,
enum audio::format _format);
/**
* @brief Sets up the parameters for buffer conversion.
*/
void setConvertInfo(enum audio::orchestra::mode _mode,
uint32_t _firstChannel);
public:
virtual bool isMasterOf(audio::orchestra::Api* _api) {
return false;
};
};
}
}
/**
* @brief Debug operator To display the curent element in a Human redeable information
*/
std::ostream& operator <<(std::ostream& _os, const audio::orchestra::type& _obj);
#endif

View File

View File

@@ -0,0 +1,47 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
//#include <etk/types.h>
#include <audio/orchestra/debug.h>
#include <audio/orchestra/DeviceInfo.h>
#include <etk/stdTools.h>
#include <iostream>
#undef __class__
#define __class__ "DeviceInfo"
void audio::orchestra::DeviceInfo::display(int32_t _tabNumber) const {
std::string space;
for (int32_t iii=0; iii<_tabNumber; ++iii) {
space += " ";
}
ATA_INFO(space + "probe=" << probed);
ATA_INFO(space + "name=" << name);
ATA_INFO(space + "outputChannels=" << outputChannels);
ATA_INFO(space + "inputChannels=" << inputChannels);
ATA_INFO(space + "duplexChannels=" << duplexChannels);
ATA_INFO(space + "isDefaultOutput=" << (isDefaultOutput==true?"true":"false"));
ATA_INFO(space + "isDefaultInput=" << (isDefaultInput==true?"true":"false"));
ATA_INFO(space + "rates=" << sampleRates);
ATA_INFO(space + "native Format: " << nativeFormats);
}
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const audio::orchestra::DeviceInfo& _obj) {
_os << "{";
_os << "probe=" << _obj.probed << ", ";
_os << "name=" << _obj.name << ", ";
_os << "outputChannels=" << _obj.outputChannels << ", ";
_os << "inputChannels=" << _obj.inputChannels << ", ";
_os << "duplexChannels=" << _obj.duplexChannels << ", ";
_os << "isDefaultOutput=" << _obj.isDefaultOutput << ", ";
_os << "isDefaultInput=" << _obj.isDefaultInput << ", ";
_os << "rates=" << _obj.sampleRates << ", ";
_os << "native Format: " << _obj.nativeFormats;
_os << "}";
return _os;
}

View File

@@ -0,0 +1,46 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_DEVICE_INFO_H__
#define __AUDIO_ORCHESTRA_DEVICE_INFO_H__
#include <audio/format.h>
namespace audio {
namespace orchestra {
/**
* @brief The public device information structure for returning queried values.
*/
class DeviceInfo {
public:
bool probed; //!< true if the device capabilities were successfully probed.
std::string name; //!< Character string device identifier.
uint32_t outputChannels; //!< Maximum output channels supported by device.
uint32_t inputChannels; //!< Maximum input channels supported by device.
uint32_t duplexChannels; //!< Maximum simultaneous input/output channels supported by device.
bool isDefaultOutput; //!< true if this is the default output device.
bool isDefaultInput; //!< true if this is the default input device.
std::vector<uint32_t> sampleRates; //!< Supported sample rates (queried from list of standard rates).
std::vector<audio::format> nativeFormats; //!< Bit mask of supported data formats.
// Default constructor.
DeviceInfo() :
probed(false),
outputChannels(0),
inputChannels(0),
duplexChannels(0),
isDefaultOutput(false),
isDefaultInput(false),
nativeFormats() {}
void display(int32_t _tabNumber = 1) const;
};
std::ostream& operator <<(std::ostream& _os, const audio::orchestra::DeviceInfo& _obj);
}
}
#endif

View File

@@ -0,0 +1,9 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/Flags.h>
#include <audio/orchestra/debug.h>

27
audio/orchestra/Flags.h Normal file
View File

@@ -0,0 +1,27 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_FLAGS_H__
#define __AUDIO_ORCHESTRA_FLAGS_H__
#include <etk/types.h>
namespace audio {
namespace orchestra {
class Flags {
public:
bool m_minimizeLatency; // Simple example ==> TODO ...
Flags() :
m_minimizeLatency(false) {
// nothing to do ...
}
};
}
}
#endif

View File

@@ -0,0 +1,184 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
//#include <etk/types.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <iostream>
#undef __class__
#define __class__ "Interface"
std::vector<enum audio::orchestra::type> audio::orchestra::Interface::getCompiledApi() {
std::vector<enum audio::orchestra::type> apis;
// The order here will control the order of RtAudio's API search in
// the constructor.
for (size_t iii=0; iii<m_apiAvaillable.size(); ++iii) {
apis.push_back(m_apiAvaillable[iii].first);
}
return apis;
}
void audio::orchestra::Interface::openRtApi(enum audio::orchestra::type _api) {
delete m_rtapi;
m_rtapi = nullptr;
for (size_t iii=0; iii<m_apiAvaillable.size(); ++iii) {
ATA_INFO("try open " << m_apiAvaillable[iii].first);
if (_api == m_apiAvaillable[iii].first) {
ATA_INFO(" ==> call it");
m_rtapi = m_apiAvaillable[iii].second();
if (m_rtapi != nullptr) {
return;
}
}
}
// TODO : An error occured ...
ATA_ERROR("Error in open API ...");
}
audio::orchestra::Interface::Interface() :
m_rtapi(nullptr) {
ATA_DEBUG("Add interface:");
#if defined(ORCHESTRA_BUILD_JACK)
ATA_DEBUG(" JACK");
addInterface(audio::orchestra::type_jack, audio::orchestra::api::Jack::Create);
#endif
#if defined(ORCHESTRA_BUILD_ALSA)
ATA_DEBUG(" ALSA");
addInterface(audio::orchestra::type_alsa, audio::orchestra::api::Alsa::Create);
#endif
#if defined(ORCHESTRA_BUILD_PULSE)
ATA_DEBUG(" PULSE");
addInterface(audio::orchestra::type_pulse, audio::orchestra::api::Pulse::Create);
#endif
#if defined(ORCHESTRA_BUILD_OSS)
ATA_DEBUG(" OSS");
addInterface(audio::orchestra::type_oss, audio::orchestra::api::Oss::Create);
#endif
#if defined(ORCHESTRA_BUILD_ASIO)
ATA_DEBUG(" ASIO");
addInterface(audio::orchestra::type_asio, audio::orchestra::api::Asio::Create);
#endif
#if defined(ORCHESTRA_BUILD_DS)
ATA_DEBUG(" DS");
addInterface(audio::orchestra::type_ds, audio::orchestra::api::Ds::Create);
#endif
#if defined(ORCHESTRA_BUILD_MACOSX_CORE)
ATA_DEBUG(" CORE OSX");
addInterface(audio::orchestra::type_coreOSX, audio::orchestra::api::Core::Create);
#endif
#if defined(ORCHESTRA_BUILD_IOS_CORE)
ATA_DEBUG(" CORE IOS");
addInterface(audio::orchestra::type_coreIOS, audio::orchestra::api::CoreIos::Create);
#endif
#if defined(ORCHESTRA_BUILD_JAVA)
ATA_DEBUG(" JAVA");
addInterface(audio::orchestra::type_java, audio::orchestra::api::Android::Create);
#endif
#if defined(ORCHESTRA_BUILD_DUMMY)
ATA_DEBUG(" DUMMY");
addInterface(audio::orchestra::type_dummy, audio::orchestra::api::Dummy::Create);
#endif
}
void audio::orchestra::Interface::addInterface(enum audio::orchestra::type _api, Api* (*_callbackCreate)()) {
m_apiAvaillable.push_back(std::pair<enum audio::orchestra::type, Api* (*)()>(_api, _callbackCreate));
}
enum audio::orchestra::error audio::orchestra::Interface::instanciate(enum audio::orchestra::type _api) {
ATA_INFO("Instanciate API ...");
if (m_rtapi != nullptr) {
ATA_WARNING("Interface already started ...!");
return audio::orchestra::error_none;
}
if (_api != audio::orchestra::type_undefined) {
ATA_INFO("API specified : " << _api);
// Attempt to open the specified API.
openRtApi(_api);
if (m_rtapi != nullptr) {
if (m_rtapi->getDeviceCount() != 0) {
ATA_INFO(" ==> api open");
}
return audio::orchestra::error_none;
}
// No compiled support for specified API value. Issue a debug
// warning and continue as if no API was specified.
ATA_ERROR("RtAudio: no compiled support for specified API argument!");
return audio::orchestra::error_fail;
}
ATA_INFO("Auto choice API :");
// Iterate through the compiled APIs and return as soon as we find
// one with at least one device or we reach the end of the list.
std::vector<enum audio::orchestra::type> apis = getCompiledApi();
ATA_INFO(" find : " << apis.size() << " apis.");
for (size_t iii=0; iii<apis.size(); ++iii) {
ATA_INFO("try open ...");
openRtApi(apis[iii]);
if(m_rtapi == nullptr) {
ATA_ERROR(" ==> can not create ...");
continue;
}
if (m_rtapi->getDeviceCount() != 0) {
ATA_INFO(" ==> api open");
break;
}
}
if (m_rtapi != nullptr) {
return audio::orchestra::error_none;
}
ATA_ERROR("RtAudio: no compiled API support found ... critical error!!");
return audio::orchestra::error_fail;
}
audio::orchestra::Interface::~Interface() {
ATA_INFO("Remove interface");
delete m_rtapi;
m_rtapi = nullptr;
}
enum audio::orchestra::error audio::orchestra::Interface::openStream(audio::orchestra::StreamParameters* _outputParameters,
audio::orchestra::StreamParameters* _inputParameters,
audio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options) {
if (m_rtapi == nullptr) {
return audio::orchestra::error_inputNull;
}
return m_rtapi->openStream(_outputParameters,
_inputParameters,
_format,
_sampleRate,
_bufferFrames,
_callback,
_options);
}
bool audio::orchestra::Interface::isMasterOf(audio::orchestra::Interface& _interface) {
if (m_rtapi == nullptr) {
ATA_ERROR("Current Master API is nullptr ...");
return false;
}
if (_interface.m_rtapi == nullptr) {
ATA_ERROR("Current Slave API is nullptr ...");
return false;
}
if (m_rtapi->getCurrentApi() != _interface.m_rtapi->getCurrentApi()) {
ATA_ERROR("Can not link 2 Interface with not the same Low level type (?)");//" << _interface.m_adac->getCurrentApi() << " != " << m_adac->getCurrentApi() << ")");
return false;
}
if (m_rtapi->getCurrentApi() != audio::orchestra::type_alsa) {
ATA_ERROR("Link 2 device together work only if the interafec is ?");// << audio::orchestra::type_alsa << " not for " << m_rtapi->getCurrentApi());
return false;
}
return m_rtapi->isMasterOf(_interface.m_rtapi);
}

320
audio/orchestra/Interface.h Normal file
View File

@@ -0,0 +1,320 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_INTERFACE_H__
#define __AUDIO_ORCHESTRA_INTERFACE_H__
#include <string>
#include <vector>
#include <audio/orchestra/base.h>
#include <audio/orchestra/CallbackInfo.h>
#include <audio/orchestra/Api.h>
#include <audio/orchestra/api/Alsa.h>
#include <audio/orchestra/api/Android.h>
#include <audio/orchestra/api/Asio.h>
#include <audio/orchestra/api/Core.h>
#include <audio/orchestra/api/CoreIos.h>
#include <audio/orchestra/api/Ds.h>
#include <audio/orchestra/api/Dummy.h>
#include <audio/orchestra/api/Jack.h>
#include <audio/orchestra/api/Oss.h>
#include <audio/orchestra/api/Pulse.h>
namespace audio {
namespace orchestra {
/**
* @brief audio::orchestra::Interface class declaration.
*
* audio::orchestra::Interface is a "controller" used to select an available audio i/o
* interface. It presents a common API for the user to call but all
* functionality is implemented by the class RtApi and its
* subclasses. RtAudio creates an instance of an RtApi subclass
* based on the user's API choice. If no choice is made, RtAudio
* attempts to make a "logical" API selection.
*/
class Interface {
protected:
std::vector<std::pair<enum audio::orchestra::type, Api* (*)()> > m_apiAvaillable;
protected:
audio::orchestra::Api *m_rtapi;
public:
void setName(const std::string& _name) {
if (m_rtapi == nullptr) {
return;
}
m_rtapi->setName(_name);
}
/**
* @brief A static function to determine the available compiled audio APIs.
*
* The values returned in the std::vector can be compared against
* the enumerated list values. Note that there can be more than one
* API compiled for certain operating systems.
*/
std::vector<enum audio::orchestra::type> getCompiledApi();
/**
* @brief The class constructor.
* @note the creating of the basic instance is done by Instanciate
*/
Interface();
/**
* @brief The destructor.
*
* If a stream is running or open, it will be stopped and closed
* automatically.
*/
virtual ~Interface();
/**
* @brief Add an interface of the Possible List.
* @param[in] _api Type of the interface.
* @param[in] _callbackCreate API creation callback.
*/
void addInterface(enum audio::orchestra::type _api, Api* (*_callbackCreate)());
/**
* @brief Create an interface instance
*/
enum audio::orchestra::error instanciate(enum audio::orchestra::type _api = audio::orchestra::type_undefined);
/**
* @return the audio API specifier for the current instance of airtaudio.
*/
enum audio::orchestra::type getCurrentApi() {
if (m_rtapi == nullptr) {
return audio::orchestra::type_undefined;
}
return m_rtapi->getCurrentApi();
}
/**
* @brief A public function that queries for the number of audio devices available.
*
* This function performs a system query of available devices each time it
* is called, thus supporting devices connected \e after instantiation. If
* a system error occurs during processing, a warning will be issued.
*/
uint32_t getDeviceCount() {
if (m_rtapi == nullptr) {
return 0;
}
return m_rtapi->getDeviceCount();
}
/**
* @brief Any device integer between 0 and getDeviceCount() - 1 is valid.
* If an invalid argument is provided, an RtError (type = INVALID_USE)
* will be thrown. If a device is busy or otherwise unavailable, the
* structure member "probed" will have a value of "false" and all
* other members are undefined. If the specified device is the
* current default input or output device, the corresponding
* "isDefault" member will have a value of "true".
*
* @return An audio::orchestra::DeviceInfo structure for a specified device number.
*/
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device) {
if (m_rtapi == nullptr) {
return audio::orchestra::DeviceInfo();
}
return m_rtapi->getDeviceInfo(_device);
}
audio::orchestra::DeviceInfo getDeviceInfo(const std::string& _deviceName) {
if (m_rtapi == nullptr) {
return audio::orchestra::DeviceInfo();
}
audio::orchestra::DeviceInfo info;
m_rtapi->getNamedDeviceInfo(_deviceName, info);
return info;
}
/**
* @brief A function that returns the index of the default output device.
*
* If the underlying audio API does not provide a "default
* device", or if no devices are available, the return value will be
* 0. Note that this is a valid device identifier and it is the
* client's responsibility to verify that a device is available
* before attempting to open a stream.
*/
uint32_t getDefaultOutputDevice() {
if (m_rtapi == nullptr) {
return 0;
}
return m_rtapi->getDefaultOutputDevice();
}
/**
* @brief A function that returns the index of the default input device.
*
* If the underlying audio API does not provide a "default
* device", or if no devices are available, the return value will be
* 0. Note that this is a valid device identifier and it is the
* client's responsibility to verify that a device is available
* before attempting to open a stream.
*/
uint32_t getDefaultInputDevice() {
if (m_rtapi == nullptr) {
return 0;
}
return m_rtapi->getDefaultInputDevice();
}
/**
* @brief A public function for opening a stream with the specified parameters.
*
* An RtError (type = SYSTEM_ERROR) is thrown if a stream cannot be
* opened with the specified parameters or an error occurs during
* processing. An RtError (type = INVALID_USE) is thrown if any
* invalid device ID or channel number parameters are specified.
* @param _outputParameters Specifies output stream parameters to use
* when opening a stream, including a device ID, number of channels,
* and starting channel number. For input-only streams, this
* argument should be nullptr. The device ID is an index value between
* 0 and getDeviceCount() - 1.
* @param _inputParameters Specifies input stream parameters to use
* when opening a stream, including a device ID, number of channels,
* and starting channel number. For output-only streams, this
* argument should be nullptr. The device ID is an index value between
* 0 and getDeviceCount() - 1.
* @param _format An audio::format specifying the desired sample data format.
* @param _sampleRate The desired sample rate (sample frames per second).
* @param _bufferFrames A pointer to a value indicating the desired
* internal buffer size in sample frames. The actual value
* used by the device is returned via the same pointer. A
* value of zero can be specified, in which case the lowest
* allowable value is determined.
* @param _callback A client-defined function that will be invoked
* when input data is available and/or output data is needed.
* @param _options An optional pointer to a structure containing various
* global stream options, including a list of OR'ed audio::orchestra::streamFlags
* and a suggested number of stream buffers that can be used to
* control stream latency. More buffers typically result in more
* robust performance, though at a cost of greater latency. If a
* value of zero is specified, a system-specific median value is
* chosen. If the airtaudio_MINIMIZE_LATENCY flag bit is set, the
* lowest allowable value is used. The actual value used is
* returned via the structure argument. The parameter is API dependent.
* @param _errorCallback A client-defined function that will be invoked
* when an error has occured.
*/
enum audio::orchestra::error openStream(audio::orchestra::StreamParameters *_outputParameters,
audio::orchestra::StreamParameters *_inputParameters,
enum audio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
audio::orchestra::AirTAudioCallback _callback,
const audio::orchestra::StreamOptions& _options = audio::orchestra::StreamOptions());
/**
* @brief A function that closes a stream and frees any associated stream memory.
*
* If a stream is not open, this function issues a warning and
* returns (no exception is thrown).
*/
enum audio::orchestra::error closeStream() {
if (m_rtapi == nullptr) {
return audio::orchestra::error_inputNull;
}
return m_rtapi->closeStream();
}
/**
* @brief A function that starts a stream.
*
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* running.
*/
enum audio::orchestra::error startStream() {
if (m_rtapi == nullptr) {
return audio::orchestra::error_inputNull;
}
return m_rtapi->startStream();
}
/**
* @brief Stop a stream, allowing any samples remaining in the output queue to be played.
*
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* stopped.
*/
enum audio::orchestra::error stopStream() {
if (m_rtapi == nullptr) {
return audio::orchestra::error_inputNull;
}
return m_rtapi->stopStream();
}
/**
* @brief Stop a stream, discarding any samples remaining in the input/output queue.
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* stopped.
*/
enum audio::orchestra::error abortStream() {
if (m_rtapi == nullptr) {
return audio::orchestra::error_inputNull;
}
return m_rtapi->abortStream();
}
/**
* @return true if a stream is open and false if not.
*/
bool isStreamOpen() const {
if (m_rtapi == nullptr) {
return false;
}
return m_rtapi->isStreamOpen();
}
/**
* @return true if the stream is running and false if it is stopped or not open.
*/
bool isStreamRunning() const {
if (m_rtapi == nullptr) {
return false;
}
return m_rtapi->isStreamRunning();
}
/**
* @brief If a stream is not open, an RtError (type = INVALID_USE) will be thrown.
* @return the number of elapsed seconds since the stream was started.
*/
std11::chrono::system_clock::time_point getStreamTime() {
if (m_rtapi == nullptr) {
return std11::chrono::system_clock::time_point();
}
return m_rtapi->getStreamTime();
}
/**
* @brief The stream latency refers to delay in audio input and/or output
* caused by internal buffering by the audio system and/or hardware.
* For duplex streams, the returned value will represent the sum of
* the input and output latencies. If a stream is not open, an
* RtError (type = INVALID_USE) will be thrown. If the API does not
* report latency, the return value will be zero.
* @return The internal stream latency in sample frames.
*/
long getStreamLatency() {
if (m_rtapi == nullptr) {
return 0;
}
return m_rtapi->getStreamLatency();
}
/**
* @brief On some systems, the sample rate used may be slightly different
* than that specified in the stream parameters. If a stream is not
* open, an RtError (type = INVALID_USE) will be thrown.
* @return Returns actual sample rate in use by the stream.
*/
uint32_t getStreamSampleRate() {
if (m_rtapi == nullptr) {
return 0;
}
return m_rtapi->getStreamSampleRate();
}
bool isMasterOf(audio::orchestra::Interface& _interface);
protected:
void openRtApi(enum audio::orchestra::type _api);
};
}
}
#endif

View File

@@ -0,0 +1,45 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/StreamOptions.h>
#include <etk/stdTools.h>
#include <audio/orchestra/debug.h>
static const char* listValue[] = {
"hardware",
"trigered",
"soft"
};
std::ostream& audio::orchestra::operator <<(std::ostream& _os, enum audio::orchestra::timestampMode _obj) {
_os << listValue[_obj];
return _os;
}
namespace etk {
template <> bool from_string<enum audio::orchestra::timestampMode>(enum audio::orchestra::timestampMode& _variableRet, const std::string& _value) {
if (_value == "hardware") {
_variableRet = audio::orchestra::timestampMode_Hardware;
return true;
}
if (_value == "trigered") {
_variableRet = audio::orchestra::timestampMode_trigered;
return true;
}
if (_value == "soft") {
_variableRet = audio::orchestra::timestampMode_soft;
return true;
}
return false;
}
template <enum audio::orchestra::timestampMode> std::string to_string(const enum audio::orchestra::timestampMode& _variable) {
return listValue[_variable];
}
}

View File

@@ -0,0 +1,39 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_STREAM_OPTION_H__
#define __AUDIO_ORCHESTRA_STREAM_OPTION_H__
#include <audio/orchestra/Flags.h>
namespace audio {
namespace orchestra {
enum timestampMode {
timestampMode_Hardware, //!< enable harware timestamp
timestampMode_trigered, //!< get harware triger time stamp and ingrement with duration
timestampMode_soft, //!< Simulate all timestamp.
};
std::ostream& operator <<(std::ostream& _os, enum audio::orchestra::timestampMode _obj);
class StreamOptions {
public:
audio::orchestra::Flags flags; //!< A bit-mask of stream flags
uint32_t numberOfBuffers; //!< Number of stream buffers.
std::string streamName; //!< A stream name (currently used only in Jack).
enum timestampMode mode; //!< mode of timestamping data...
// Default constructor.
StreamOptions() :
flags(),
numberOfBuffers(0),
mode(timestampMode_Hardware) {}
};
}
}
#endif

View File

@@ -0,0 +1,35 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_STREAM_PARAMETER_H__
#define __AUDIO_ORCHESTRA_STREAM_PARAMETER_H__
namespace audio {
namespace orchestra {
/**
* @brief The structure for specifying input or ouput stream parameters.
*/
class StreamParameters {
public:
int32_t deviceId; //!< Device index (-1 to getDeviceCount() - 1).
std::string deviceName; //!< name of the device (if deviceId==-1 this must not be == "", and the oposite ...)
uint32_t nChannels; //!< Number of channels.
uint32_t firstChannel; //!< First channel index on device (default = 0).
// Default constructor.
StreamParameters() :
deviceId(-1),
nChannels(0),
firstChannel(0) {
}
};
}
}
#endif

1389
audio/orchestra/api/Alsa.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,77 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_ALSA_H__) && defined(ORCHESTRA_BUILD_ALSA)
#define __AUDIO_ORCHESTRA_API_ALSA_H__
namespace audio {
namespace orchestra {
namespace api {
class AlsaPrivate;
class Alsa: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Alsa();
virtual ~Alsa();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_alsa;
}
uint32_t getDeviceCount();
private:
bool getNamedDeviceInfoLocal(const std::string& _deviceName,
audio::orchestra::DeviceInfo& _info,
int32_t _cardId=-1, // Alsa card ID
int32_t _subdevice=-1, // alsa subdevice ID
int32_t _localDeviceId=-1); // local ID of device fined
public:
bool getNamedDeviceInfo(const std::string& _deviceName, audio::orchestra::DeviceInfo& _info) {
return getNamedDeviceInfoLocal(_deviceName, _info);
}
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
void callbackEventOneCycle();
private:
static void alsaCallbackEvent(void* _userData);
private:
std11::shared_ptr<AlsaPrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual bool probeDeviceOpenName(const std::string& _deviceName,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual std11::chrono::system_clock::time_point getStreamTime();
public:
bool isMasterOf(audio::orchestra::Api* _api);
};
}
}
}
#endif

View File

@@ -0,0 +1,210 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifdef ORCHESTRA_BUILD_JAVA
#include <ewol/context/Context.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <limits.h>
#undef __class__
#define __class__ "api::Android"
audio::orchestra::Api* audio::orchestra::api::Android::Create() {
ATA_INFO("Create Android device ... ");
return new audio::orchestra::api::Android();
}
audio::orchestra::api::Android::Android() {
ATA_INFO("new Android");
// On android, we set a static device ...
ATA_INFO("get context");
ewol::Context& tmpContext = ewol::getContext();
ATA_INFO("done p=" << (int64_t)&tmpContext);
int32_t deviceCount = tmpContext.audioGetDeviceCount();
ATA_ERROR("Get count devices : " << deviceCount);
for (int32_t iii=0; iii<deviceCount; ++iii) {
std::string property = tmpContext.audioGetDeviceProperty(iii);
ATA_ERROR("Get devices property : " << property);
std::vector<std::string> listProperty = etk::split(property, ':');
audio::orchestra::DeviceInfo tmp;
tmp.name = listProperty[0];
std::vector<std::string> listFreq = etk::split(listProperty[2], ',');
for(size_t fff=0; fff<listFreq.size(); ++fff) {
tmp.sampleRates.push_back(etk::string_to_int32_t(listFreq[fff]));
}
tmp.outputChannels = 0;
tmp.inputChannels = 0;
tmp.duplexChannels = 0;
if (listProperty[1] == "out") {
tmp.isDefaultOutput = true;
tmp.isDefaultInput = false;
tmp.outputChannels = etk::string_to_int32_t(listProperty[3]);
} else if (listProperty[1] == "in") {
tmp.isDefaultOutput = false;
tmp.isDefaultInput = true;
tmp.inputChannels = etk::string_to_int32_t(listProperty[3]);
} else {
/* duplex */
tmp.isDefaultOutput = true;
tmp.isDefaultInput = true;
tmp.duplexChannels = etk::string_to_int32_t(listProperty[3]);
}
tmp.nativeFormats = audio::getListFormatFromString(listProperty[4]);
m_devices.push_back(tmp);
}
ATA_INFO("Create Android interface (end)");
}
audio::orchestra::api::Android::~Android() {
ATA_INFO("Destroy Android interface");
}
uint32_t audio::orchestra::api::Android::getDeviceCount() {
//ATA_INFO("Get device count:"<< m_devices.size());
return m_devices.size();
}
audio::orchestra::DeviceInfo audio::orchestra::api::Android::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
return m_devices[_device];
}
enum audio::orchestra::error audio::orchestra::api::Android::closeStream() {
ATA_INFO("Clese Stream");
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Android::startStream() {
ATA_INFO("Start Stream");
// TODO : Check return ...
audio::orchestra::Api::startStream();
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Android::stopStream() {
ATA_INFO("Stop stream");
ewol::Context& tmpContext = ewol::getContext();
tmpContext.audioCloseDevice(0);
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Android::abortStream() {
ATA_INFO("Abort Stream");
ewol::Context& tmpContext = ewol::getContext();
tmpContext.audioCloseDevice(0);
// Can not close the stream now...
return audio::orchestra::error_none;
}
void audio::orchestra::api::Android::callBackEvent(void* _data,
int32_t _frameRate) {
int32_t doStopStream = 0;
std11::chrono::system_clock::time_point streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[audio::orchestra::mode_output] == true) {
doStopStream = m_callback(nullptr,
std11::chrono::system_clock::time_point(),
m_userBuffer[audio::orchestra::mode_output],
streamTime,
_frameRate,
status);
convertBuffer((char*)_data, (char*)m_userBuffer[audio::orchestra::mode_output], m_convertInfo[audio::orchestra::mode_output]);
} else {
doStopStream = m_callback(_data,
streamTime,
nullptr,
std11::chrono::system_clock::time_point(),
_frameRate,
status);
}
if (doStopStream == 2) {
abortStream();
return;
}
audio::orchestra::Api::tickStreamTime();
}
void audio::orchestra::api::Android::androidCallBackEvent(void* _data,
int32_t _frameRate,
void* _userData) {
if (_userData == nullptr) {
ATA_INFO("callback event ... nullptr pointer");
return;
}
audio::orchestra::api::Android* myClass = static_cast<audio::orchestra::api::Android*>(_userData);
myClass->callBackEvent(_data, _frameRate/2);
}
bool audio::orchestra::api::Android::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
if (_mode != audio::orchestra::mode_output) {
ATA_ERROR("Can not start a device input or duplex for Android ...");
return false;
}
m_userFormat = _format;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
ewol::Context& tmpContext = ewol::getContext();
bool ret = false;
if (_format == SINT8) {
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 0, androidCallBackEvent, this);
} else {
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 1, androidCallBackEvent, this);
}
m_bufferSize = 256;
m_sampleRate = _sampleRate;
m_doByteSwap[modeToIdTable(_mode)] = false; // for endienness ...
// TODO : For now, we write it in hard ==> to bu update later ...
m_deviceFormat[modeToIdTable(_mode)] = SINT16;
m_nDeviceChannels[modeToIdTable(_mode)] = 2;
m_deviceInterleaved[modeToIdTable(_mode)] = true;
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_doConvertBuffer[modeToIdTable(_mode)] == true) {
// Allocate necessary internal buffers.
uint64_t bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("audio::orchestra::api::Android::probeDeviceOpen: error allocating user buffer memory.");
}
setConvertInfo(_mode, _firstChannel);
}
ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(_mode)] << " user format : " << m_userFormat);
ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(_mode)]);
ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(_mode)]);
if (ret == false) {
ATA_ERROR("Can not open device.");
}
return ret;
}
#endif

View File

@@ -0,0 +1,56 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_ANDROID_H__) && defined(ORCHESTRA_BUILD_JAVA)
#define __AUDIO_ORCHESTRA_API_ANDROID_H__
namespace audio {
namespace orchestra {
namespace api {
class Android: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Android();
virtual ~Android();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_java;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
private:
void callBackEvent(void* _data,
int32_t _frameRate);
static void androidCallBackEvent(void* _data,
int32_t _frameRate,
void* _userData);
};
}
}
}
#endif

View File

@@ -0,0 +1,923 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_ASIO)
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
audio::orchestra::Api* audio::orchestra::api::Asio::Create() {
return new audio::orchestra::api::Asio();
}
// The ASIO API is designed around a callback scheme, so this
// implementation is similar to that used for OS-X CoreAudio and Linux
// Jack. The primary constraint with ASIO is that it only allows
// access to a single driver at a time. Thus, it is not possible to
// have more than one simultaneous RtAudio stream.
//
// This implementation also requires a number of external ASIO files
// and a few global variables. The ASIO callback scheme does not
// allow for the passing of user data, so we must create a global
// pointer to our callbackInfo structure.
//
// On unix systems, we make use of a pthread condition variable.
// Since there is no equivalent in Windows, I hacked something based
// on information found in
// http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
#include "asiosys.h"
#include "asio.h"
#include "iasiothiscallresolver.h"
#include "asiodrivers.h"
#include <cmath>
#undef __class__
#define __class__ "api::Asio"
static AsioDrivers drivers;
static ASIOCallbacks asioCallbacks;
static ASIODriverInfo driverInfo;
static CallbackInfo *asioCallbackInfo;
static bool asioXRun;
namespace audio {
namespace orchestra {
namespace api {
class AsioPrivate {
public:
int32_t drainCounter; // Tracks callback counts when draining
bool internalDrain; // Indicates if stop is initiated from callback or not.
ASIOBufferInfo *bufferInfos;
HANDLE condition;
AsioPrivate() :
drainCounter(0),
internalDrain(false),
bufferInfos(0) {
}
};
}
}
}
// Function declarations (definitions at end of section)
static const char* getAsioErrorString(ASIOError _result);
static void sampleRateChanged(ASIOSampleRate _sRate);
static long asioMessages(long _selector, long _value, void* _message, double* _opt);
audio::orchestra::api::Asio::Asio() :
m_private(new audio::orchestra::api::AsioPrivate()) {
// ASIO cannot run on a multi-threaded appartment. You can call
// CoInitialize beforehand, but it must be for appartment threading
// (in which case, CoInitilialize will return S_FALSE here).
m_coInitialized = false;
HRESULT hr = CoInitialize(nullptr);
if (FAILED(hr)) {
ATA_ERROR("requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)");
}
m_coInitialized = true;
drivers.removeCurrentDriver();
driverInfo.asioVersion = 2;
// See note in DirectSound implementation about GetDesktopWindow().
driverInfo.sysRef = GetForegroundWindow();
}
audio::orchestra::api::Asio::~Asio() {
if (m_state != audio::orchestra::state_closed) {
closeStream();
}
if (m_coInitialized) {
CoUninitialize();
}
}
uint32_t audio::orchestra::api::Asio::getDeviceCount() {
return (uint32_t) drivers.asioGetNumDev();
}
rtaudio::DeviceInfo audio::orchestra::api::Asio::getDeviceInfo(uint32_t _device) {
rtaudio::DeviceInfo info;
info.probed = false;
// Get device ID
uint32_t nDevices = getDeviceCount();
if (nDevices == 0) {
ATA_ERROR("no devices found!");
return info;
}
if (_device >= nDevices) {
ATA_ERROR("device ID is invalid!");
return info;
}
// If a stream is already open, we cannot probe other devices. Thus, use the saved results.
if (m_state != audio::orchestra::state_closed) {
if (_device >= m_devices.size()) {
ATA_ERROR("device ID was not present before stream was opened.");
return info;
}
return m_devices[ _device ];
}
char driverName[32];
ASIOError result = drivers.asioGetDriverName((int) _device, driverName, 32);
if (result != ASE_OK) {
ATA_ERROR("unable to get driver name (" << getAsioErrorString(result) << ").");
return info;
}
info.name = driverName;
if (!drivers.loadDriver(driverName)) {
ATA_ERROR("unable to load driver (" << driverName << ").");
return info;
}
result = ASIOInit(&driverInfo);
if (result != ASE_OK) {
ATA_ERROR("error (" << getAsioErrorString(result) << ") initializing driver (" << driverName << ").");
return info;
}
// Determine the device channel information.
long inputChannels, outputChannels;
result = ASIOGetChannels(&inputChannels, &outputChannels);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ").");
return info;
}
info.outputChannels = outputChannels;
info.inputChannels = inputChannels;
if (info.outputChannels > 0 && info.inputChannels > 0) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
// Determine the supported sample rates.
info.sampleRates.clear();
for (uint32_t i=0; i<MAX_SAMPLE_RATES; i++) {
result = ASIOCanSampleRate((ASIOSampleRate) SAMPLE_RATES[i]);
if (result == ASE_OK) {
info.sampleRates.push_back(SAMPLE_RATES[i]);
}
}
// Determine supported data types ... just check first channel and assume rest are the same.
ASIOChannelInfo channelInfo;
channelInfo.channel = 0;
channelInfo.isInput = true;
if (info.inputChannels <= 0) {
channelInfo.isInput = false;
}
result = ASIOGetChannelInfo(&channelInfo);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("error (" << getAsioErrorString(result) << ") getting driver channel info (" << driverName << ").");
return info;
}
info.nativeFormats.clear();
if ( channelInfo.type == ASIOSTInt16MSB
|| channelInfo.type == ASIOSTInt16LSB) {
info.nativeFormats.push_back(audio::format_int16);
} else if ( channelInfo.type == ASIOSTInt32MSB
|| channelInfo.type == ASIOSTInt32LSB) {
info.nativeFormats.push_back(audio::format_int32);
} else if ( channelInfo.type == ASIOSTFloat32MSB
|| channelInfo.type == ASIOSTFloat32LSB) {
info.nativeFormats.push_back(audio::format_float);
} else if ( channelInfo.type == ASIOSTFloat64MSB
|| channelInfo.type == ASIOSTFloat64LSB) {
info.nativeFormats.push_back(audio::format_double);
} else if ( channelInfo.type == ASIOSTInt24MSB
|| channelInfo.type == ASIOSTInt24LSB) {
info.nativeFormats.push_back(audio::format_int24);
}
if (info.outputChannels > 0){
if (getDefaultOutputDevice() == _device) {
info.isDefaultOutput = true;
}
}
if (info.inputChannels > 0) {
if (getDefaultInputDevice() == _device) {
info.isDefaultInput = true;
}
}
info.probed = true;
drivers.removeCurrentDriver();
return info;
}
static void bufferSwitch(long _index, ASIOBool _processNow) {
RtApiAsio* object = (RtApiAsio*)asioCallbackInfo->object;
object->callbackEvent(_index);
}
void audio::orchestra::api::Asio::saveDeviceInfo() {
m_devices.clear();
uint32_t nDevices = getDeviceCount();
m_devices.resize(nDevices);
for (uint32_t i=0; i<nDevices; i++) {
m_devices[i] = getDeviceInfo(i);
}
}
bool audio::orchestra::api::Asio::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
// For ASIO, a duplex stream MUST use the same driver.
if ( _mode == audio::orchestra::mode_input
&& m_mode == audio::orchestra::mode_output
&& m_device[0] != _device) {
ATA_ERROR("an ASIO duplex stream must use the same device for input and output!");
return false;
}
char driverName[32];
ASIOError result = drivers.asioGetDriverName((int) _device, driverName, 32);
if (result != ASE_OK) {
ATA_ERROR("unable to get driver name (" << getAsioErrorString(result) << ").");
return false;
}
// Only load the driver once for duplex stream.
if ( _mode != audio::orchestra::mode_input
|| m_mode != audio::orchestra::mode_output) {
// The getDeviceInfo() function will not work when a stream is open
// because ASIO does not allow multiple devices to run at the same
// time. Thus, we'll probe the system before opening a stream and
// save the results for use by getDeviceInfo().
this->saveDeviceInfo();
if (!drivers.loadDriver(driverName)) {
ATA_ERROR("unable to load driver (" << driverName << ").");
return false;
}
result = ASIOInit(&driverInfo);
if (result != ASE_OK) {
ATA_ERROR("error (" << getAsioErrorString(result) << ") initializing driver (" << driverName << ").");
return false;
}
}
// Check the device channel count.
long inputChannels, outputChannels;
result = ASIOGetChannels(&inputChannels, &outputChannels);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ").");
return false;
}
if ( ( _mode == audio::orchestra::mode_output
&& (_channels+_firstChannel) > (uint32_t) outputChannels)
|| ( _mode == audio::orchestra::mode_input
&& (_channels+_firstChannel) > (uint32_t) inputChannels)) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") does not support requested channel count (" << _channels << ") + offset (" << _firstChannel << ").");
return false;
}
m_nDeviceChannels[modeToIdTable(_mode)] = _channels;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
m_channelOffset[modeToIdTable(_mode)] = _firstChannel;
// Verify the sample rate is supported.
result = ASIOCanSampleRate((ASIOSampleRate) _sampleRate);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") does not support requested sample rate (" << _sampleRate << ").");
return false;
}
// Get the current sample rate
ASIOSampleRate currentRate;
result = ASIOGetSampleRate(&currentRate);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") error getting sample rate.");
return false;
}
// Set the sample rate only if necessary
if (currentRate != _sampleRate) {
result = ASIOSetSampleRate((ASIOSampleRate) _sampleRate);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") error setting sample rate (" << _sampleRate << ").");
return false;
}
}
// Determine the driver data type.
ASIOChannelInfo channelInfo;
channelInfo.channel = 0;
if (_mode == audio::orchestra::mode_output) {
channelInfo.isInput = false;
} else {
channelInfo.isInput = true;
}
result = ASIOGetChannelInfo(&channelInfo);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting data format.");
return false;
}
// Assuming WINDOWS host is always little-endian.
m_doByteSwap[modeToIdTable(_mode)] = false;
m_userFormat = _format;
m_deviceFormat[modeToIdTable(_mode)] = 0;
if ( channelInfo.type == ASIOSTInt16MSB
|| channelInfo.type == ASIOSTInt16LSB) {
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
if (channelInfo.type == ASIOSTInt16MSB) {
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if ( channelInfo.type == ASIOSTInt32MSB
|| channelInfo.type == ASIOSTInt32LSB) {
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
if (channelInfo.type == ASIOSTInt32MSB) {
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if ( channelInfo.type == ASIOSTFloat32MSB
|| channelInfo.type == ASIOSTFloat32LSB) {
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT32;
if (channelInfo.type == ASIOSTFloat32MSB) {
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if ( channelInfo.type == ASIOSTFloat64MSB
|| channelInfo.type == ASIOSTFloat64LSB) {
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT64;
if (channelInfo.type == ASIOSTFloat64MSB) {
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if ( channelInfo.type == ASIOSTInt24MSB
|| channelInfo.type == ASIOSTInt24LSB) {
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
if (channelInfo.type == ASIOSTInt24MSB) {
m_doByteSwap[modeToIdTable(_mode)] = true;
}
}
if (m_deviceFormat[modeToIdTable(_mode)] == 0) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") data format not supported by RtAudio.");
return false;
}
// Set the buffer size. For a duplex stream, this will end up
// setting the buffer size based on the input constraints, which
// should be ok.
long minSize, maxSize, preferSize, granularity;
result = ASIOGetBufferSize(&minSize, &maxSize, &preferSize, &granularity);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting buffer size.");
return false;
}
if (*_bufferSize < (uint32_t) minSize) {
*_bufferSize = (uint32_t) minSize;
} else if (*_bufferSize > (uint32_t) maxSize) {
*_bufferSize = (uint32_t) maxSize;
} else if (granularity == -1) {
// Make sure bufferSize is a power of two.
int32_t log2_of_min_size = 0;
int32_t log2_of_max_size = 0;
for (uint32_t i = 0; i < sizeof(long) * 8; i++) {
if (minSize & ((long)1 << i)) {
log2_of_min_size = i;
}
if (maxSize & ((long)1 << i)) {
log2_of_max_size = i;
}
}
long min_delta = std::abs((long)*_bufferSize - ((long)1 << log2_of_min_size));
int32_t min_delta_num = log2_of_min_size;
for (int32_t i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
long current_delta = std::abs((long)*_bufferSize - ((long)1 << i));
if (current_delta < min_delta) {
min_delta = current_delta;
min_delta_num = i;
}
}
*_bufferSize = ((uint32_t)1 << min_delta_num);
if (*_bufferSize < (uint32_t) {
minSize) *_bufferSize = (uint32_t) minSize;
} else if (*_bufferSize > (uint32_t) maxSize) {
*_bufferSize = (uint32_t) maxSize;
}
} else if (granularity != 0) {
// Set to an even multiple of granularity, rounding up.
*_bufferSize = (*_bufferSize + granularity-1) / granularity * granularity;
}
if ( _mode == audio::orchestra::mode_input
&& m_mode == audio::orchestra::mode_output
&& m_bufferSize != *_bufferSize) {
drivers.removeCurrentDriver();
ATA_ERROR("input/output buffersize discrepancy!");
return false;
}
m_bufferSize = *_bufferSize;
m_nBuffers = 2;
// ASIO always uses non-interleaved buffers.
m_deviceInterleaved[modeToIdTable(_mode)] = false;
m_private->bufferInfos = 0;
// Create a manual-reset event.
m_private->condition = CreateEvent(nullptr, // no security
TRUE, // manual-reset
FALSE, // non-signaled initially
nullptr); // unnamed
// Create the ASIO internal buffers. Since RtAudio sets up input
// and output separately, we'll have to dispose of previously
// created output buffers for a duplex stream.
long inputLatency, outputLatency;
if ( _mode == audio::orchestra::mode_input
&& m_mode == audio::orchestra::mode_output) {
ASIODisposeBuffers();
if (m_private->bufferInfos == nullptr) {
free(m_private->bufferInfos);
m_private->bufferInfos = nullptr;
}
}
// Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
bool buffersAllocated = false;
uint32_t i, nChannels = m_nDeviceChannels[0] + m_nDeviceChannels[1];
m_private->bufferInfos = (ASIOBufferInfo *) malloc(nChannels * sizeof(ASIOBufferInfo));
if (m_private->bufferInfos == nullptr) {
ATA_ERROR("error allocating bufferInfo memory for driver (" << driverName << ").");
goto error;
}
ASIOBufferInfo *infos;
infos = m_private->bufferInfos;
for (i=0; i<m_nDeviceChannels[0]; i++, infos++) {
infos->isInput = ASIOFalse;
infos->channelNum = i + m_channelOffset[0];
infos->buffers[0] = infos->buffers[1] = 0;
}
for (i=0; i<m_nDeviceChannels[1]; i++, infos++) {
infos->isInput = ASIOTrue;
infos->channelNum = i + m_channelOffset[1];
infos->buffers[0] = infos->buffers[1] = 0;
}
// Set up the ASIO callback structure and create the ASIO data buffers.
asioCallbacks.bufferSwitch = &bufferSwitch;
asioCallbacks.sampleRateDidChange = &sampleRateChanged;
asioCallbacks.asioMessage = &asioMessages;
asioCallbacks.bufferSwitchTimeInfo = nullptr;
result = ASIOCreateBuffers(m_private->bufferInfos, nChannels, m_bufferSize, &asioCallbacks);
if (result != ASE_OK) {
ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") creating buffers.");
goto error;
}
buffersAllocated = true;
// Set flags for buffer conversion.
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
// Allocate necessary internal buffers
uint64_t bufferBytes;
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
if (_mode == audio::orchestra::mode_input) {
if (m_mode == audio::orchestra::mode_output && m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes <= bytesOut) {
makeBuffer = false;
}
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
}
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
m_sampleRate = _sampleRate;
m_device[modeToIdTable(_mode)] = _device;
m_state = audio::orchestra::state_stopped;
if ( _mode == audio::orchestra::mode_output
&& _mode == audio::orchestra::mode_input) {
// We had already set up an output stream.
m_mode = audio::orchestra::mode_duplex;
} else {
m_mode = _mode;
}
// Determine device latencies
result = ASIOGetLatencies(&inputLatency, &outputLatency);
if (result != ASE_OK) {
ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting latency.");
} else {
m_latency[0] = outputLatency;
m_latency[1] = inputLatency;
}
// Setup the buffer conversion information structure. We don't use
// buffers to do channel offsets, so we override that parameter
// here.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, 0);
}
return true;
error:
if (buffersAllocated) {
ASIODisposeBuffers();
}
drivers.removeCurrentDriver();
CloseHandle(m_private->condition);
if (m_private->bufferInfos != nullptr) {
free(m_private->bufferInfos);
m_private->bufferInfos = nullptr;
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
free(m_userBuffer[i]);
m_userBuffer[i] = 0;
}
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
return false;
}
enum audio::orchestra::error audio::orchestra::api::Asio::closeStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
if (m_state == audio::orchestra::state_running) {
m_state = audio::orchestra::state_stopped;
ASIOStop();
}
ASIODisposeBuffers();
drivers.removeCurrentDriver();
CloseHandle(m_private->condition);
if (m_private->bufferInfos) {
free(m_private->bufferInfos);
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
free(m_userBuffer[i]);
m_userBuffer[i] = 0;
}
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
return audio::orchestra::error_none;
}
bool stopThreadCalled = false;
enum audio::orchestra::error audio::orchestra::api::Asio::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
ASIOError result = ASIOStart();
if (result != ASE_OK) {
ATA_ERROR("error (" << getAsioErrorString(result) << ") starting device.");
goto unlock;
}
m_private->drainCounter = 0;
m_private->internalDrain = false;
ResetEvent(m_private->condition);
m_state = audio::orchestra::state_running;
asioXRun = false;
unlock:
stopThreadCalled = false;
if (result == ASE_OK) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
enum audio::orchestra::error audio::orchestra::api::Asio::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
if (m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) {
if (m_private->drainCounter == 0) {
m_private->drainCounter = 2;
WaitForSingleObject(m_private->condition, INFINITE); // block until signaled
}
}
m_state = audio::orchestra::state_stopped;
ASIOError result = ASIOStop();
if (result != ASE_OK) {
ATA_ERROR("error (" << getAsioErrorString(result) << ") stopping device.");
}
if (result == ASE_OK) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
enum audio::orchestra::error audio::orchestra::api::Asio::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
error(audio::orchestra::error_warning);
return;
}
// The following lines were commented-out because some behavior was
// noted where the device buffers need to be zeroed to avoid
// continuing sound, even when the device buffers are completely
// disposed. So now, calling abort is the same as calling stop.
// handle->drainCounter = 2;
return stopStream();
}
// This function will be called by a spawned thread when the user
// callback function signals that the stream should be stopped or
// aborted. It is necessary to handle it this way because the
// callbackEvent() function must return before the ASIOStop()
// function will return.
static unsigned __stdcall asioStopStream(void *_ptr) {
CallbackInfo* info = (CallbackInfo*)_ptr;
RtApiAsio* object = (RtApiAsio*)info->object;
object->stopStream();
_endthreadex(0);
return 0;
}
bool audio::orchestra::api::Asio::callbackEvent(long bufferIndex) {
if ( m_state == audio::orchestra::state_stopped
|| m_state == audio::orchestra::state_stopping) {
return true;
}
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return false;
}
CallbackInfo *info = (CallbackInfo *) &m_callbackInfo;
// Check if we were draining the stream and signal if finished.
if (m_private->drainCounter > 3) {
m_state = audio::orchestra::state_stopping;
if (m_private->internalDrain == false) {
SetEvent(m_private->condition);
} else { // spawn a thread to stop the stream
unsigned threadId;
m_callbackInfo.thread = _beginthreadex(nullptr,
0,
&asioStopStream,
&m_callbackInfo,
0,
&threadId);
}
return true;
}
// Invoke user callback to get fresh output data UNLESS we are
// draining stream.
if (m_private->drainCounter == 0) {
std11::chrono::system_clock::time_point streamTime = getStreamTime();
std::vector<enum audio::orchestra::status status;
if (m_mode != audio::orchestra::mode_input && asioXRun == true) {
status.push_back(audio::orchestra::status_underflow);
asioXRun = false;
}
if (m_mode != audio::orchestra::mode_output && asioXRun == true) {
status.push_back(audio::orchestra::status_underflow;
asioXRun = false;
}
int32_t cbReturnValue = info->callback(m_userBuffer[1],
streamTime,
m_userBuffer[0],
streamTime,
m_bufferSize,
status);
if (cbReturnValue == 2) {
m_state = audio::orchestra::state_stopping;
m_private->drainCounter = 2;
unsigned threadId;
m_callbackInfo.thread = _beginthreadex(nullptr,
0,
&asioStopStream,
&m_callbackInfo,
0,
&threadId);
return true;
} else if (cbReturnValue == 1) {
m_private->drainCounter = 1;
m_private->internalDrain = true;
}
}
uint32_t nChannels, bufferBytes, i, j;
nChannels = m_nDeviceChannels[0] + m_nDeviceChannels[1];
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
bufferBytes = m_bufferSize * audio::getFormatBytes(m_deviceFormat[0]);
if (m_private->drainCounter > 1) { // write zeros to the output stream
for (i=0, j=0; i<nChannels; i++) {
if (m_private->bufferInfos[i].isInput != ASIOTrue) {
memset(m_private->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes);
}
}
} else if (m_doConvertBuffer[0]) {
convertBuffer(m_deviceBuffer, m_userBuffer[0], m_convertInfo[0]);
if (m_doByteSwap[0]) {
byteSwapBuffer(m_deviceBuffer,
m_bufferSize * m_nDeviceChannels[0],
m_deviceFormat[0]);
}
for (i=0, j=0; i<nChannels; i++) {
if (m_private->bufferInfos[i].isInput != ASIOTrue) {
memcpy(m_private->bufferInfos[i].buffers[bufferIndex],
&m_deviceBuffer[j++*bufferBytes],
bufferBytes);
}
}
} else {
if (m_doByteSwap[0]) {
byteSwapBuffer(m_userBuffer[0],
m_bufferSize * m_nUserChannels[0],
m_userFormat);
}
for (i=0, j=0; i<nChannels; i++) {
if (m_private->bufferInfos[i].isInput != ASIOTrue) {
memcpy(m_private->bufferInfos[i].buffers[bufferIndex],
&m_userBuffer[0][bufferBytes*j++],
bufferBytes);
}
}
}
if (m_private->drainCounter) {
m_private->drainCounter++;
goto unlock;
}
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
bufferBytes = m_bufferSize * audio::getFormatBytes(m_deviceFormat[1]);
if (m_doConvertBuffer[1]) {
// Always interleave ASIO input data.
for (i=0, j=0; i<nChannels; i++) {
if (m_private->bufferInfos[i].isInput == ASIOTrue) {
memcpy(&m_deviceBuffer[j++*bufferBytes],
m_private->bufferInfos[i].buffers[bufferIndex],
bufferBytes);
}
}
if (m_doByteSwap[1]) {
byteSwapBuffer(m_deviceBuffer,
m_bufferSize * m_nDeviceChannels[1],
m_deviceFormat[1]);
}
convertBuffer(m_userBuffer[1],
m_deviceBuffer,
m_convertInfo[1]);
} else {
for (i=0, j=0; i<nChannels; i++) {
if (m_private->bufferInfos[i].isInput == ASIOTrue) {
memcpy(&m_userBuffer[1][bufferBytes*j++],
m_private->bufferInfos[i].buffers[bufferIndex],
bufferBytes);
}
}
if (m_doByteSwap[1]) {
byteSwapBuffer(m_userBuffer[1],
m_bufferSize * m_nUserChannels[1],
m_userFormat);
}
}
}
unlock:
// The following call was suggested by Malte Clasen. While the API
// documentation indicates it should not be required, some device
// drivers apparently do not function correctly without it.
ASIOOutputReady();
audio::orchestra::Api::tickStreamTime();
return true;
}
static void sampleRateChanged(ASIOSampleRate _sRate) {
// The ASIO documentation says that this usually only happens during
// external sync. Audio processing is not stopped by the driver,
// actual sample rate might not have even changed, maybe only the
// sample rate status of an AES/EBU or S/PDIF digital input at the
// audio device.
RtApi* object = (RtApi*)asioCallbackInfo->object;
enum audio::orchestra::error ret = object->stopStream()
if (ret != audio::orchestra::error_none) {
ATA_ERROR("error stop stream!");
} else {
ATA_ERROR("driver reports sample rate changed to " << _sRate << " ... stream stopped!!!");
}
}
static long asioMessages(long _selector, long _value, void* _message, double* _opt) {
long ret = 0;
switch(_selector) {
case kAsioSelectorSupported:
if ( _value == kAsioResetRequest
|| _value == kAsioEngineVersion
|| _value == kAsioResyncRequest
|| _value == kAsioLatenciesChanged
// The following three were added for ASIO 2.0, you don't
// necessarily have to support them.
|| _value == kAsioSupportsTimeInfo
|| _value == kAsioSupportsTimeCode
|| _value == kAsioSupportsInputMonitor) {
ret = 1L;
}
break;
case kAsioResetRequest:
// Defer the task and perform the reset of the driver during the
// next "safe" situation. You cannot reset the driver right now,
// as this code is called from the driver. Reset the driver is
// done by completely destruct is. I.e. ASIOStop(),
// ASIODisposeBuffers(), Destruction Afterwards you initialize the
// driver again.
ATA_ERROR("driver reset requested!!!");
ret = 1L;
break;
case kAsioResyncRequest:
// This informs the application that the driver encountered some
// non-fatal data loss. It is used for synchronization purposes
// of different media. Added mainly to work around the Win16Mutex
// problems in Windows 95/98 with the Windows Multimedia system,
// which could lose data because the Mutex was held too long by
// another thread. However a driver can issue it in other
// situations, too.
// ATA_ERROR("driver resync requested!!!");
asioXRun = true;
ret = 1L;
break;
case kAsioLatenciesChanged:
// This will inform the host application that the drivers were
// latencies changed. Beware, it this does not mean that the
// buffer sizes have changed! You might need to update internal
// delay data.
ATA_ERROR("driver latency may have changed!!!");
ret = 1L;
break;
case kAsioEngineVersion:
// Return the supported ASIO version of the host application. If
// a host application does not implement this selector, ASIO 1.0
// is assumed by the driver.
ret = 2L;
break;
case kAsioSupportsTimeInfo:
// Informs the driver whether the
// asioCallbacks.bufferSwitchTimeInfo() callback is supported.
// For compatibility with ASIO 1.0 drivers the host application
// should always support the "old" bufferSwitch method, too.
ret = 0;
break;
case kAsioSupportsTimeCode:
// Informs the driver whether application is interested in time
// code info. If an application does not need to know about time
// code, the driver has less work to do.
ret = 0;
break;
}
return ret;
}
static const char* getAsioErrorString(ASIOError _result) {
struct Messages {
ASIOError value;
const char*message;
};
static const Messages m[] = {
{ ASE_NotPresent, "Hardware input or output is not present or available." },
{ ASE_HWMalfunction, "Hardware is malfunctioning." },
{ ASE_InvalidParameter, "Invalid input parameter." },
{ ASE_InvalidMode, "Invalid mode." },
{ ASE_SPNotAdvancing, "Sample position not advancing." },
{ ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
{ ASE_NoMemory, "Not enough memory to complete the request." }
};
for (uint32_t i = 0; i < sizeof(m)/sizeof(m[0]); ++i) {
if (m[i].value == result) {
return m[i].message;
}
}
return "Unknown error.";
}
#endif

View File

@@ -0,0 +1,54 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_ASIO_H__) && defined(ORCHESTRA_BUILD_ASIO)
#define __AUDIO_ORCHESTRA_API_ASIO_H__
namespace audio {
namespace orchestra {
namespace api {
class AsioPrivate:
class Asio: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Asio();
virtual ~Asio();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::WINDOWS_ASIO;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(long _bufferIndex);
private:
std::shared_ptr<AsioPrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool m_coInitialized;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

1292
audio/orchestra/api/Core.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_CORE_H__) && defined(ORCHESTRA_BUILD_MACOSX_CORE)
#define __AUDIO_ORCHESTRA_API_CORE_H__
#include <CoreAudio/AudioHardware.h>
namespace audio {
namespace orchestra {
namespace api {
class CorePrivate;
class Core: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Core();
virtual ~Core();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_coreOSX;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
uint32_t getDefaultOutputDevice();
uint32_t getDefaultInputDevice();
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
bool callbackEvent(AudioDeviceID _deviceId,
const AudioBufferList *_inBufferList,
const std11::chrono::system_clock::time_point& _inTime,
const AudioBufferList *_outBufferList,
const std11::chrono::system_clock::time_point& _outTime);
static OSStatus callbackEvent(AudioDeviceID _inDevice,
const AudioTimeStamp* _inNow,
const AudioBufferList* _inInputData,
const AudioTimeStamp* _inInputTime,
AudioBufferList* _outOutputData,
const AudioTimeStamp* _inOutputTime,
void* _infoPointer);
static void coreStopStream(void *_userData);
private:
std::shared_ptr<CorePrivate> m_private;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
static const char* getErrorCode(OSStatus _code);
static OSStatus xrunListener(AudioObjectID _inDevice,
uint32_t _nAddresses,
const AudioObjectPropertyAddress _properties[],
void* _userData);
};
}
}
}
#endif

View File

@@ -0,0 +1,57 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_CORE_IOS_H__) && defined(ORCHESTRA_BUILD_IOS_CORE)
#define __AUDIO_ORCHESTRA_API_CORE_IOS_H__
namespace audio {
namespace orchestra {
namespace api {
class CoreIosPrivate;
class CoreIos: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
CoreIos();
virtual ~CoreIos();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_coreIOS;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
public:
void callBackEvent(void* _data,
int32_t _frameRate);
public:
std11::shared_ptr<CoreIosPrivate> m_private;
};
}
}
}
#endif

View File

@@ -0,0 +1,312 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifdef ORCHESTRA_BUILD_IOS_CORE
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <limits.h>
#undef __class__
#define __class__ "api::CoreIos"
audio::orchestra::Api* audio::orchestra::api::CoreIos::Create(void) {
ATA_INFO("Create CoreIos device ... ");
return new audio::orchestra::api::CoreIos();
}
#define kOutputBus 0
#define kInputBus 1
namespace audio {
namespace orchestra {
namespace api {
class CoreIosPrivate {
public:
AudioComponentInstance audioUnit;
};
}
}
}
audio::orchestra::api::CoreIos::CoreIos(void) :
m_private(new audio::orchestra::api::CoreIosPrivate()) {
ATA_INFO("new CoreIos");
int32_t deviceCount = 2;
ATA_ERROR("Get count devices : " << 2);
audio::orchestra::DeviceInfo tmp;
// Add default output format :
tmp.name = "out";
tmp.sampleRates.push_back(48000);
tmp.outputChannels = 2;
tmp.inputChannels = 0;
tmp.duplexChannels = 0;
tmp.isDefaultOutput = true;
tmp.isDefaultInput = false;
tmp.nativeFormats.push_back(audio::format_int16);
m_devices.push_back(tmp);
// add default input format:
tmp.name = "in";
tmp.sampleRates.push_back(48000);
tmp.outputChannels = 0;
tmp.inputChannels = 2;
tmp.duplexChannels = 0;
tmp.isDefaultOutput = false;
tmp.isDefaultInput = true;
tmp.nativeFormats.push_back(audio::format_int16);
m_devices.push_back(tmp);
ATA_INFO("Create CoreIOs interface (end)");
}
audio::orchestra::api::CoreIos::~CoreIos(void) {
ATA_INFO("Destroy CoreIOs interface");
AudioUnitUninitialize(m_private->audioUnit);
}
uint32_t audio::orchestra::api::CoreIos::getDeviceCount(void) {
//ATA_INFO("Get device count:"<< m_devices.size());
return m_devices.size();
}
audio::orchestra::DeviceInfo audio::orchestra::api::CoreIos::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
return m_devices[_device];
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::closeStream(void) {
ATA_INFO("Close Stream");
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::startStream(void) {
ATA_INFO("Start Stream");
// TODO : Check return ...
audio::orchestra::Api::startStream();
OSStatus status = AudioOutputUnitStart(m_private->audioUnit);
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::stopStream(void) {
ATA_INFO("Stop stream");
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::abortStream(void) {
ATA_INFO("Abort Stream");
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
// Can not close the stream now...
return audio::orchestra::error_none;
}
void audio::orchestra::api::CoreIos::callBackEvent(void* _data,
int32_t _nbChunk) {
#if 0
static double value=0;
int16_t* vals = (int16_t*)_data;
for (int32_t iii=0; iii<_frameRate; ++iii) {
*vals++ = (int16_t)(sin(value) * 32760.0);
*vals++ = (int16_t)(sin(value) * 32760.0);
value += 0.09;
if (value >= M_PI*2.0) {
value -= M_PI*2.0;
}
}
return;
#endif
int32_t doStopStream = 0;
std11::chrono::system_clock::time_point streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[modeToIdTable(audio::orchestra::mode_output)] == true) {
doStopStream = m_callback(nullptr,
streamTime,
&m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0],
streamTime,
_nbChunk,
status);
convertBuffer((char*)_data, &m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0], m_convertInfo[modeToIdTable(audio::orchestra::mode_output)]);
} else {
doStopStream = m_callback(_data,
streamTime,
nullptr,
streamTime,
_nbChunk,
status);
}
if (doStopStream == 2) {
abortStream();
return;
}
audio::orchestra::Api::tickStreamTime();
}
static OSStatus playbackCallback(void *_userData,
AudioUnitRenderActionFlags* _ioActionFlags,
const AudioTimeStamp* _inTimeStamp,
uint32_t _inBusNumber,
uint32_t _inNumberFrames,
AudioBufferList* _ioData) {
if (_userData == nullptr) {
ATA_ERROR("callback event ... nullptr pointer");
return -1;
}
audio::orchestra::api::CoreIos* myClass = static_cast<audio::orchestra::api::CoreIos*>(_userData);
// get all requested buffer :
for (int32_t iii=0; iii < _ioData->mNumberBuffers; iii++) {
AudioBuffer buffer = _ioData->mBuffers[iii];
int32_t numberFrame = buffer.mDataByteSize/2/*stereo*/ /sizeof(int16_t);
ATA_VERBOSE("request data size: " << numberFrame << " busNumber=" << _inBusNumber);
myClass->callBackEvent(buffer.mData, numberFrame);
}
return noErr;
}
bool audio::orchestra::api::CoreIos::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
if (_mode != audio::orchestra::mode_output) {
ATA_ERROR("Can not start a device input or duplex for CoreIos ...");
return false;
}
bool ret = true;
// configure Airtaudio internal configuration:
m_userFormat = _format;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
m_bufferSize = 8192;
m_sampleRate = _sampleRate;
m_doByteSwap[modeToIdTable(_mode)] = false; // for endienness ...
// TODO : For now, we write it in hard ==> to be update later ...
m_deviceFormat[modeToIdTable(_mode)] = audio::format_int16;
m_nDeviceChannels[modeToIdTable(_mode)] = 2;
m_deviceInterleaved[modeToIdTable(_mode)] = true;
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_doConvertBuffer[modeToIdTable(_mode)] == true) {
// Allocate necessary internal buffers.
uint64_t bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
}
setConvertInfo(_mode, _firstChannel);
}
ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(_mode)] << " user format : " << m_userFormat);
ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(_mode)]);
ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(_mode)]);
if (ret == false) {
ATA_ERROR("Can not open device.");
}
// Configure IOs interface:
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(nullptr, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &m_private->audioUnit);
if (status != 0) {
ATA_ERROR("can not create an audio intance...");
}
uint32_t flag = 1;
// Enable IO for playback
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
if (status != 0) {
ATA_ERROR("can not request audio autorisation...");
}
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 48000.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1; //
audioFormat.mChannelsPerFrame = 2; // stereo
audioFormat.mBitsPerChannel = sizeof(short) * 8;
audioFormat.mBytesPerPacket = sizeof(short) * audioFormat.mChannelsPerFrame;
audioFormat.mBytesPerFrame = sizeof(short) * audioFormat.mChannelsPerFrame;
audioFormat.mReserved = 0;
// Apply format
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
if (status != 0) {
ATA_ERROR("can not set stream properties...");
}
// Set output callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = &playbackCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
if (status != 0) {
ATA_ERROR("can not set Callback...");
}
// Initialise
status = AudioUnitInitialize(m_private->audioUnit);
if (status != 0) {
ATA_ERROR("can not initialize...");
}
return ret;
}
#endif

1464
audio/orchestra/api/Ds.cpp Normal file

File diff suppressed because it is too large Load Diff

58
audio/orchestra/api/Ds.h Normal file
View File

@@ -0,0 +1,58 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_DS_H__) && defined(ORCHESTRA_BUILD_DS)
#define __AUDIO_ORCHESTRA_API_DS_H__
namespace audio {
namespace orchestra {
namespace api {
class DsPrivate;
class Ds: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Ds();
virtual ~Ds();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_ds;
}
uint32_t getDeviceCount();
uint32_t getDefaultOutputDevice();
uint32_t getDefaultInputDevice();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
static void dsCallbackEvent(void *_userData);
std11::shared_ptr<DsPrivate> m_private;
bool m_coInitialized;
bool m_buffersRolling;
long m_duplexPrerollBytes;
bool probeDeviceOpen(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@@ -0,0 +1,63 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(__DUMMY__)
#include <audio/orchestra/api/Dummy.h>
#include <audio/orchestra/debug.h>
#undef __class__
#define __class__ "api::Dummy"
audio::orchestra::Api* audio::orchestra::api::Dummy::Create() {
return new audio::orchestra::api::Dummy();
}
audio::orchestra::api::Dummy::Dummy() {
ATA_WARNING("This class provides no functionality.");
}
uint32_t audio::orchestra::api::Dummy::getDeviceCount() {
return 0;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Dummy::getDeviceInfo(uint32_t _device) {
(void)_device;
return audio::orchestra::DeviceInfo();
}
enum audio::orchestra::error audio::orchestra::api::Dummy::closeStream() {
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Dummy::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Dummy::stopStream() {
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Dummy::abortStream() {
return audio::orchestra::error_none;
}
bool audio::orchestra::api::Dummy::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
return false;
}
#endif

View File

@@ -0,0 +1,45 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_DUMMY__) && defined(ORCHESTRA_BUILD_DUMMY)
#define __AUDIO_ORCHESTRA_DUMMY__
#include <audio/orchestra/Interface.h>
namespace audio {
namespace orchestra {
namespace api {
class Dummy: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Dummy();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_dummy;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
private:
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@@ -0,0 +1,734 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
// must run before :
#if defined(ORCHESTRA_BUILD_JACK)
#include <unistd.h>
#include <limits.h>
#include <iostream>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <string.h>
#include <etk/thread/tools.h>
#undef __class__
#define __class__ "api::Jack"
audio::orchestra::Api* audio::orchestra::api::Jack::Create() {
return new audio::orchestra::api::Jack();
}
// JACK is a low-latency audio server, originally written for the
// GNU/Linux operating system and now also ported to OS-X. It can
// connect a number of different applications to an audio device, as
// well as allowing them to share audio between themselves.
//
// When using JACK with RtAudio, "devices" refer to JACK clients that
// have ports connected to the server. The JACK server is typically
// started in a terminal as follows:
//
// .jackd -d alsa -d hw:0
//
// or through an interface program such as qjackctl. Many of the
// parameters normally set for a stream are fixed by the JACK server
// and can be specified when the JACK server is started. In
// particular,
//
// jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
// jackd -r -d alsa -r 48000
//
// specifies a sample rate of 44100 Hz, a buffer size of 512 sample
// frames, and number of buffers = 4. Once the server is running, it
// is not possible to override these values. If the values are not
// specified in the command-line, the JACK server uses default values.
//
// The JACK server does not have to be running when an instance of
// audio::orchestra::Jack is created, though the function getDeviceCount() will
// report 0 devices found until JACK has been started. When no
// devices are available (i.e., the JACK server is not running), a
// stream cannot be opened.
#include <jack/jack.h>
#include <unistd.h>
#include <cstdio>
namespace audio {
namespace orchestra {
namespace api {
class JackPrivate {
public:
jack_client_t *client;
jack_port_t **ports[2];
std::string deviceName[2];
bool xrun[2];
std11::condition_variable condition;
int32_t drainCounter; // Tracks callback counts when draining
bool internalDrain; // Indicates if stop is initiated from callback or not.
JackPrivate() :
client(0),
drainCounter(0),
internalDrain(false) {
ports[0] = 0;
ports[1] = 0;
xrun[0] = false;
xrun[1] = false;
}
};
}
}
}
audio::orchestra::api::Jack::Jack() :
m_private(new audio::orchestra::api::JackPrivate()) {
// Nothing to do here.
}
audio::orchestra::api::Jack::~Jack() {
if (m_state != audio::orchestra::state_closed) {
closeStream();
}
}
uint32_t audio::orchestra::api::Jack::getDeviceCount() {
// See if we can become a jack client.
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = nullptr;
jack_client_t *client = jack_client_open("orchestraJackCount", options, status);
if (client == nullptr) {
return 0;
}
const char **ports;
std::string port, previousPort;
uint32_t nChannels = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nChannels ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon + 1);
if (port != previousPort) {
nDevices++;
previousPort = port;
}
}
} while (ports[++nChannels]);
free(ports);
}
jack_client_close(client);
return nDevices;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Jack::getDeviceInfo(uint32_t _device) {
audio::orchestra::DeviceInfo info;
info.probed = false;
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption
jack_status_t *status = nullptr;
jack_client_t *client = jack_client_open("orchestraJackInfo", options, status);
if (client == nullptr) {
ATA_ERROR("Jack server not found or connection error!");
// TODO : audio::orchestra::error_warning;
return info;
}
const char **ports;
std::string port, previousPort;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (port != previousPort) {
if (nDevices == _device) {
info.name = port;
}
nDevices++;
previousPort = port;
}
}
} while (ports[++nPorts]);
free(ports);
}
if (_device >= nDevices) {
jack_client_close(client);
ATA_ERROR("device ID is invalid!");
// TODO : audio::orchestra::error_invalidUse;
return info;
}
// Get the current jack server sample rate.
info.sampleRates.clear();
info.sampleRates.push_back(jack_get_sample_rate(client));
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
ports = jack_get_ports(client, info.name.c_str(), nullptr, JackPortIsInput);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
info.outputChannels = nChannels;
}
// Jack "output ports" equal RtAudio input channels.
nChannels = 0;
ports = jack_get_ports(client, info.name.c_str(), nullptr, JackPortIsOutput);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
info.inputChannels = nChannels;
}
if (info.outputChannels == 0 && info.inputChannels == 0) {
jack_client_close(client);
ATA_ERROR("error determining Jack input/output channels!");
// TODO : audio::orchestra::error_warning;
return info;
}
// If device opens for both playback and capture, we determine the channels.
if (info.outputChannels > 0 && info.inputChannels > 0) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
// Jack always uses 32-bit floats.
info.nativeFormats.push_back(audio::format_float);
// Jack doesn't provide default devices so we'll use the first available one.
if ( _device == 0
&& info.outputChannels > 0) {
info.isDefaultOutput = true;
}
if ( _device == 0
&& info.inputChannels > 0) {
info.isDefaultInput = true;
}
jack_client_close(client);
info.probed = true;
return info;
}
int32_t audio::orchestra::api::Jack::jackCallbackHandler(jack_nframes_t _nframes, void* _userData) {
ATA_VERBOSE("Jack callback: [BEGIN] " << uint64_t(_userData));
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
if (myClass->callbackEvent((uint64_t)_nframes) == false) {
ATA_VERBOSE("Jack callback: [END] 1");
return 1;
}
ATA_VERBOSE("Jack callback: [END] 0");
return 0;
}
// This function will be called by a spawned thread when the Jack
// server signals that it is shutting down. It is necessary to handle
// it this way because the jackShutdown() function must return before
// the jack_deactivate() function (in closeStream()) will return.
void audio::orchestra::api::Jack::jackCloseStream(void* _userData) {
etk::thread::setName("Jack_closeStream");
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
myClass->closeStream();
}
void audio::orchestra::api::Jack::jackShutdown(void* _userData) {
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
// Check current stream state. If stopped, then we'll assume this
// was called as a result of a call to audio::orchestra::api::Jack::stopStream (the
// deactivation of a client handle causes this function to be called).
// If not, we'll assume the Jack server is shutting down or some
// other problem occurred and we should close the stream.
if (myClass->isStreamRunning() == false) {
return;
}
new std11::thread(&audio::orchestra::api::Jack::jackCloseStream, _userData);
ATA_ERROR("The Jack server is shutting down this client ... stream stopped and closed!!");
}
int32_t audio::orchestra::api::Jack::jackXrun(void* _userData) {
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
if (myClass->m_private->ports[0]) {
myClass->m_private->xrun[0] = true;
}
if (myClass->m_private->ports[1]) {
myClass->m_private->xrun[1] = true;
}
return 0;
}
bool audio::orchestra::api::Jack::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
// Look for jack server and try to become a client (only do once per stream).
jack_client_t *client = 0;
if ( _mode == audio::orchestra::mode_output
|| ( _mode == audio::orchestra::mode_input
&& m_mode != audio::orchestra::mode_output)) {
jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = nullptr;
if (!_options.streamName.empty()) {
client = jack_client_open(_options.streamName.c_str(), jackoptions, status);
} else {
client = jack_client_open("orchestraJack", jackoptions, status);
}
if (client == 0) {
ATA_ERROR("Jack server not found or connection error!");
return false;
}
} else {
// The handle must have been created on an earlier pass.
client = m_private->client;
}
const char **ports;
std::string port, previousPort, deviceName;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (port != previousPort) {
if (nDevices == _device) {
deviceName = port;
}
nDevices++;
previousPort = port;
}
}
} while (ports[++nPorts]);
free(ports);
}
if (_device >= nDevices) {
ATA_ERROR("device ID is invalid!");
return false;
}
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
uint64_t flag = JackPortIsInput;
if (_mode == audio::orchestra::mode_input) flag = JackPortIsOutput;
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
}
// Compare the jack ports for specified client to the requested number of channels.
if (nChannels < (_channels + _firstChannel)) {
ATA_ERROR("requested number of channels (" << _channels << ") + offset (" << _firstChannel << ") not found for specified device (" << _device << ":" << deviceName << ").");
return false;
}
// Check the jack server sample rate.
uint32_t jackRate = jack_get_sample_rate(client);
if (_sampleRate != jackRate) {
jack_client_close(client);
ATA_ERROR("the requested sample rate (" << _sampleRate << ") is different than the JACK server rate (" << jackRate << ").");
return false;
}
m_sampleRate = jackRate;
// Get the latency of the JACK port.
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
if (ports[ _firstChannel ]) {
// Added by Ge Wang
jack_latency_callback_mode_t cbmode = (_mode == audio::orchestra::mode_input ? JackCaptureLatency : JackPlaybackLatency);
// the range (usually the min and max are equal)
jack_latency_range_t latrange; latrange.min = latrange.max = 0;
// get the latency range
jack_port_get_latency_range(jack_port_by_name(client, ports[_firstChannel]), cbmode, &latrange);
// be optimistic, use the min!
m_latency[modeToIdTable(_mode)] = latrange.min;
//m_latency[modeToIdTable(_mode)] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ]));
}
free(ports);
// The jack server always uses 32-bit floating-point data.
m_deviceFormat[modeToIdTable(_mode)] = audio::format_float;
m_userFormat = _format;
// Jack always uses non-interleaved buffers.
m_deviceInterleaved[modeToIdTable(_mode)] = false;
// Jack always provides host byte-ordered data.
m_doByteSwap[modeToIdTable(_mode)] = false;
// Get the buffer size. The buffer size and number of buffers
// (periods) is set when the jack server is started.
m_bufferSize = (int) jack_get_buffer_size(client);
*_bufferSize = m_bufferSize;
m_nDeviceChannels[modeToIdTable(_mode)] = _channels;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
// Set flags for buffer conversion.
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
ATA_CRITICAL("Can not update format ==> use RIVER lib for this ...");
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
ATA_ERROR("Reorder channel for the interleaving properties ...");
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
// Allocate our JackHandle structure for the stream.
m_private->client = client;
m_private->deviceName[modeToIdTable(_mode)] = deviceName;
// Allocate necessary internal buffers.
uint64_t bufferBytes;
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
ATA_VERBOSE("allocate : nbChannel=" << m_nUserChannels[modeToIdTable(_mode)] << " bufferSize=" << *_bufferSize << " format=" << m_deviceFormat[modeToIdTable(_mode)] << "=" << audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]));
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
if (_mode == audio::orchestra::mode_output) {
bufferBytes = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
} else { // _mode == audio::orchestra::mode_input
bufferBytes = m_nDeviceChannels[1] * audio::getFormatBytes(m_deviceFormat[1]);
if (m_mode == audio::orchestra::mode_output && m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes < bytesOut) {
makeBuffer = false;
}
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) free(m_deviceBuffer);
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
// Allocate memory for the Jack ports (channels) identifiers.
m_private->ports[modeToIdTable(_mode)] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels);
if (m_private->ports[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating port memory.");
goto error;
}
m_device[modeToIdTable(_mode)] = _device;
m_channelOffset[modeToIdTable(_mode)] = _firstChannel;
m_state = audio::orchestra::state_stopped;
if ( m_mode == audio::orchestra::mode_output
&& _mode == audio::orchestra::mode_input) {
// We had already set up the stream for output.
m_mode = audio::orchestra::mode_duplex;
} else {
m_mode = _mode;
jack_set_process_callback(m_private->client, &audio::orchestra::api::Jack::jackCallbackHandler, this);
jack_set_xrun_callback(m_private->client, &audio::orchestra::api::Jack::jackXrun, this);
jack_on_shutdown(m_private->client, &audio::orchestra::api::Jack::jackShutdown, this);
}
// Register our ports.
char label[64];
if (_mode == audio::orchestra::mode_output) {
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
snprintf(label, 64, "outport %d", i);
m_private->ports[0][i] = jack_port_register(m_private->client,
(const char *)label,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput,
0);
}
} else {
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
snprintf(label, 64, "inport %d", i);
m_private->ports[1][i] = jack_port_register(m_private->client,
(const char *)label,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput,
0);
}
}
// Setup the buffer conversion information structure. We don't use
// buffers to do channel offsets, so we override that parameter
// here.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, 0);
}
return true;
error:
jack_client_close(m_private->client);
if (m_private->ports[0] != nullptr) {
free(m_private->ports[0]);
m_private->ports[0] = nullptr;
}
if (m_private->ports[1] != nullptr) {
free(m_private->ports[1]);
m_private->ports[1] = nullptr;
}
for (int32_t iii=0; iii<2; ++iii) {
m_userBuffer[iii].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
}
return false;
}
enum audio::orchestra::error audio::orchestra::api::Jack::closeStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
if (m_private != nullptr) {
if (m_state == audio::orchestra::state_running) {
jack_deactivate(m_private->client);
}
jack_client_close(m_private->client);
}
if (m_private->ports[0] != nullptr) {
free(m_private->ports[0]);
m_private->ports[0] = nullptr;
}
if (m_private->ports[1] != nullptr) {
free(m_private->ports[1]);
m_private->ports[1] = nullptr;
}
for (int32_t i=0; i<2; i++) {
m_userBuffer[i].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Jack::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
int32_t result = jack_activate(m_private->client);
if (result) {
ATA_ERROR("unable to activate JACK client!");
goto unlock;
}
const char **ports;
// Get the list of available ports.
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
result = 1;
ports = jack_get_ports(m_private->client, m_private->deviceName[0].c_str(), nullptr, JackPortIsInput);
if (ports == nullptr) {
ATA_ERROR("error determining available JACK input ports!");
goto unlock;
}
// Now make the port connections. Since RtAudio wasn't designed to
// allow the user to select particular channels of a device, we'll
// just open the first "nChannels" ports with offset.
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
result = 1;
if (ports[ m_channelOffset[0] + i ])
result = jack_connect(m_private->client, jack_port_name(m_private->ports[0][i]), ports[ m_channelOffset[0] + i ]);
if (result) {
free(ports);
ATA_ERROR("error connecting output ports!");
goto unlock;
}
}
free(ports);
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
result = 1;
ports = jack_get_ports(m_private->client, m_private->deviceName[1].c_str(), nullptr, JackPortIsOutput);
if (ports == nullptr) {
ATA_ERROR("error determining available JACK output ports!");
goto unlock;
}
// Now make the port connections. See note above.
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
result = 1;
if (ports[ m_channelOffset[1] + i ]) {
result = jack_connect(m_private->client, ports[ m_channelOffset[1] + i ], jack_port_name(m_private->ports[1][i]));
}
if (result) {
free(ports);
ATA_ERROR("error connecting input ports!");
goto unlock;
}
}
free(ports);
}
m_private->drainCounter = 0;
m_private->internalDrain = false;
m_state = audio::orchestra::state_running;
unlock:
if (result == 0) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
enum audio::orchestra::error audio::orchestra::api::Jack::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_private->drainCounter == 0) {
m_private->drainCounter = 2;
std11::unique_lock<std11::mutex> lck(m_mutex);
m_private->condition.wait(lck);
}
}
jack_deactivate(m_private->client);
m_state = audio::orchestra::state_stopped;
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Jack::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_private->drainCounter = 2;
return stopStream();
}
// This function will be called by a spawned thread when the user
// callback function signals that the stream should be stopped or
// aborted. It is necessary to handle it this way because the
// callbackEvent() function must return before the jack_deactivate()
// function will return.
static void jackStopStream(void* _userData) {
etk::thread::setName("Jack_stopStream");
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
myClass->stopStream();
}
bool audio::orchestra::api::Jack::callbackEvent(uint64_t _nframes) {
if ( m_state == audio::orchestra::state_stopped
|| m_state == audio::orchestra::state_stopping) {
return true;
}
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return false;
}
if (m_bufferSize != _nframes) {
ATA_ERROR("the JACK buffer size has changed ... cannot process!");
return false;
}
// Check if we were draining the stream and signal is finished.
if (m_private->drainCounter > 3) {
m_state = audio::orchestra::state_stopping;
if (m_private->internalDrain == true) {
new std11::thread(jackStopStream, this);
} else {
m_private->condition.notify_one();
}
return true;
}
// Invoke user callback first, to get fresh output data.
if (m_private->drainCounter == 0) {
std11::chrono::time_point<std11::chrono::system_clock> streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if (m_mode != audio::orchestra::mode_input && m_private->xrun[0] == true) {
status.push_back(audio::orchestra::status_underflow);
m_private->xrun[0] = false;
}
if (m_mode != audio::orchestra::mode_output && m_private->xrun[1] == true) {
status.push_back(audio::orchestra::status_overflow);
m_private->xrun[1] = false;
}
int32_t cbReturnValue = m_callback(&m_userBuffer[1][0],
streamTime,
&m_userBuffer[0][0],
streamTime,
m_bufferSize,
status);
if (cbReturnValue == 2) {
m_state = audio::orchestra::state_stopping;
m_private->drainCounter = 2;
new std11::thread(jackStopStream, this);
return true;
}
else if (cbReturnValue == 1) {
m_private->drainCounter = 1;
m_private->internalDrain = true;
}
}
jack_default_audio_sample_t *jackbuffer;
uint64_t bufferBytes = _nframes * sizeof(jack_default_audio_sample_t);
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_private->drainCounter > 1) { // write zeros to the output stream
for (uint32_t i=0; i<m_nDeviceChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
memset(jackbuffer, 0, bufferBytes);
}
} else if (m_doConvertBuffer[0]) {
convertBuffer(m_deviceBuffer, &m_userBuffer[0][0], m_convertInfo[0]);
for (uint32_t i=0; i<m_nDeviceChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
memcpy(jackbuffer, &m_deviceBuffer[i*bufferBytes], bufferBytes);
}
} else { // no buffer conversion
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
memcpy(jackbuffer, &m_userBuffer[0][i*bufferBytes], bufferBytes);
}
}
if (m_private->drainCounter) {
m_private->drainCounter++;
goto unlock;
}
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
if (m_doConvertBuffer[1]) {
for (uint32_t i=0; i<m_nDeviceChannels[1]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[1][i], (jack_nframes_t) _nframes);
memcpy(&m_deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes);
}
convertBuffer(&m_userBuffer[1][0], m_deviceBuffer, m_convertInfo[1]);
} else {
// no buffer conversion
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[1][i], (jack_nframes_t) _nframes);
memcpy(&m_userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes);
}
}
}
unlock:
audio::orchestra::Api::tickStreamTime();
return true;
}
#endif

View File

@@ -0,0 +1,58 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_JACK_H__) && defined(ORCHESTRA_BUILD_JACK)
#define __AUDIO_ORCHESTRA_API_JACK_H__
#include <jack/jack.h>
namespace audio {
namespace orchestra {
namespace api {
class JackPrivate;
class Jack: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Jack();
virtual ~Jack();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_jack;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(uint64_t _nframes);
private:
static int32_t jackXrun(void* _userData);
static void jackCloseStream(void* _userData);
static void jackShutdown(void* _userData);
static int32_t jackCallbackHandler(jack_nframes_t _nframes, void* _userData);
private:
std11::shared_ptr<JackPrivate> m_private;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

830
audio/orchestra/api/Oss.cpp Normal file
View File

@@ -0,0 +1,830 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_OSS)
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <fcntl.h>
#include "soundcard.h"
#include <errno.h>
#include <math.h>
#undef __class__
#define __class__ "api::Oss"
audio::orchestra::Api* audio::orchestra::api::Oss::Create() {
return new audio::orchestra::api::Oss();
}
static void *ossCallbackHandler(void* _userData);
namespace audio {
namespace orchestra {
namespace api {
class OssPrivate {
public:
int32_t id[2]; // device ids
bool xrun[2];
bool triggered;
std11::condition_variable runnable;
std11::shared_ptr<std11::thread> thread;
bool threadRunning;
OssPrivate():
triggered(false),
threadRunning(false) {
id[0] = 0;
id[1] = 0;
xrun[0] = false;
xrun[1] = false;
}
};
}
}
}
audio::orchestra::api::Oss::Oss() :
m_private(new audio::orchestra::api::OssPrivate()) {
// Nothing to do here.
}
audio::orchestra::api::Oss::~Oss() {
if (m_state != audio::orchestra::state_closed) {
closeStream();
}
}
uint32_t audio::orchestra::api::Oss::getDeviceCount() {
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("error opening '/dev/mixer'.");
return 0;
}
oss_sysinfo sysinfo;
if (ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo) == -1) {
close(mixerfd);
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
return 0;
}
close(mixerfd);
return sysinfo.numaudios;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Oss::getDeviceInfo(uint32_t _device) {
rtaudio::DeviceInfo info;
info.probed = false;
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("error opening '/dev/mixer'.");
return info;
}
oss_sysinfo sysinfo;
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
if (result == -1) {
close(mixerfd);
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
return info;
}
unsigned nDevices = sysinfo.numaudios;
if (nDevices == 0) {
close(mixerfd);
ATA_ERROR("no devices found!");
return info;
}
if (_device >= nDevices) {
close(mixerfd);
ATA_ERROR("device ID is invalid!");
return info;
}
oss_audioinfo ainfo;
ainfo.dev = _device;
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
close(mixerfd);
if (result == -1) {
ATA_ERROR("error getting device (" << ainfo.name << ") info.");
error(audio::orchestra::error_warning);
return info;
}
// Probe channels
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_output) {
info.outputChannels = ainfo.max_channels;
}
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_input) {
info.inputChannels = ainfo.max_channels;
}
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex) {
if ( info.outputChannels > 0
&& info.inputChannels > 0
&& ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
}
// Probe data formats ... do for input
uint64_t mask = ainfo.iformats;
if ( mask & AFMT_S16_LE
|| mask & AFMT_S16_BE) {
info.nativeFormats.push_back(audio::format_int16);
}
if (mask & AFMT_S8) {
info.nativeFormats.push_back(audio::format_int8);
}
if ( mask & AFMT_S32_LE
|| mask & AFMT_S32_BE) {
info.nativeFormats.push_back(audio::format_int32);
}
if (mask & AFMT_FLOAT) {
info.nativeFormats.push_back(audio::format_float);
}
if ( mask & AFMT_S24_LE
|| mask & AFMT_S24_BE) {
info.nativeFormats.push_back(audio::format_int24);
}
// Check that we have at least one supported format
if (info.nativeFormats == 0) {
ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio.");
return info;
}
// Probe the supported sample rates.
info.sampleRates.clear();
if (ainfo.nrates) {
for (uint32_t i=0; i<ainfo.nrates; i++) {
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
if (ainfo.rates[i] == SAMPLE_RATES[k]) {
info.sampleRates.push_back(SAMPLE_RATES[k]);
break;
}
}
}
} else {
// Check min and max rate values;
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
if ( ainfo.min_rate <= (int) SAMPLE_RATES[k]
&& ainfo.max_rate >= (int) SAMPLE_RATES[k]) {
info.sampleRates.push_back(SAMPLE_RATES[k]);
}
}
}
if (info.sampleRates.size() == 0) {
ATA_ERROR("no supported sample rates found for device (" << ainfo.name << ").");
} else {
info.probed = true;
info.name = ainfo.name;
}
return info;
}
bool audio::orchestra::api::Oss::probeDeviceOpen(uint32_t _device,
StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
rtaudio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("error opening '/dev/mixer'.");
return false;
}
oss_sysinfo sysinfo;
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
if (result == -1) {
close(mixerfd);
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
return false;
}
unsigned nDevices = sysinfo.numaudios;
if (nDevices == 0) {
// This should not happen because a check is made before this function is called.
close(mixerfd);
ATA_ERROR("no devices found!");
return false;
}
if (_device >= nDevices) {
// This should not happen because a check is made before this function is called.
close(mixerfd);
ATA_ERROR("device ID is invalid!");
return false;
}
oss_audioinfo ainfo;
ainfo.dev = _device;
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
close(mixerfd);
if (result == -1) {
ATA_ERROR("error getting device (" << ainfo.name << ") info.");
return false;
}
// Check if device supports input or output
if ( ( _mode == audio::orchestra::mode_output
&& !(ainfo.caps & PCM_CAP_audio::orchestra::mode_output))
|| ( _mode == audio::orchestra::mode_input
&& !(ainfo.caps & PCM_CAP_audio::orchestra::mode_input))) {
if (_mode == audio::orchestra::mode_output) {
ATA_ERROR("device (" << ainfo.name << ") does not support output.");
} else {
ATA_ERROR("device (" << ainfo.name << ") does not support input.");
}
return false;
}
int32_t flags = 0;
if (_mode == audio::orchestra::mode_output) {
flags |= O_WRONLY;
} else { // _mode == audio::orchestra::mode_input
if ( m_mode == audio::orchestra::mode_output
&& m_device[0] == _device) {
// We just set the same device for playback ... close and reopen for duplex (OSS only).
close(m_private->id[0]);
m_private->id[0] = 0;
if (!(ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex)) {
ATA_ERROR("device (" << ainfo.name << ") does not support duplex mode.");
return false;
}
// Check that the number previously set channels is the same.
if (m_nUserChannels[0] != _channels) {
ATA_ERROR("input/output channels must be equal for OSS duplex device (" << ainfo.name << ").");
return false;
}
flags |= O_RDWR;
} else {
flags |= O_RDONLY;
}
}
// Set exclusive access if specified.
if (_options.flags & RTAUDIO_HOG_DEVICE) {
flags |= O_EXCL;
}
// Try to open the device.
int32_t fd;
fd = open(ainfo.devnode, flags, 0);
if (fd == -1) {
if (errno == EBUSY) {
ATA_ERROR("device (" << ainfo.name << ") is busy.");
} else {
ATA_ERROR("error opening device (" << ainfo.name << ").");
}
return false;
}
// For duplex operation, specifically set this mode (this doesn't seem to work).
/*
if (flags | O_RDWR) {
result = ioctl(fd, SNDCTL_DSP_SETaudio::orchestra::mode_duplex, nullptr);
if (result == -1) {
m_errorStream << "error setting duplex mode for device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return false;
}
}
*/
// Check the device channel support.
m_nUserChannels[modeToIdTable(_mode)] = _channels;
if (ainfo.max_channels < (int)(_channels + _firstChannel)) {
close(fd);
ATA_ERROR("the device (" << ainfo.name << ") does not support requested channel parameters.");
return false;
}
// Set the number of channels.
int32_t deviceChannels = _channels + _firstChannel;
result = ioctl(fd, SNDCTL_DSP_CHANNELS, &deviceChannels);
if ( result == -1
|| deviceChannels < (int)(_channels + _firstChannel)) {
close(fd);
ATA_ERROR("error setting channel parameters on device (" << ainfo.name << ").");
return false;
}
m_nDeviceChannels[modeToIdTable(_mode)] = deviceChannels;
// Get the data format mask
int32_t mask;
result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask);
if (result == -1) {
close(fd);
ATA_ERROR("error getting device (" << ainfo.name << ") data formats.");
return false;
}
// Determine how to set the device format.
m_userFormat = _format;
int32_t deviceFormat = -1;
m_doByteSwap[modeToIdTable(_mode)] = false;
if (_format == RTAUDIO_SINT8) {
if (mask & AFMT_S8) {
deviceFormat = AFMT_S8;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
}
} else if (_format == RTAUDIO_SINT16) {
if (mask & AFMT_S16_NE) {
deviceFormat = AFMT_S16_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
} else if (mask & AFMT_S16_OE) {
deviceFormat = AFMT_S16_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if (_format == RTAUDIO_SINT24) {
if (mask & AFMT_S24_NE) {
deviceFormat = AFMT_S24_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
} else if (mask & AFMT_S24_OE) {
deviceFormat = AFMT_S24_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if (_format == RTAUDIO_SINT32) {
if (mask & AFMT_S32_NE) {
deviceFormat = AFMT_S32_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
} else if (mask & AFMT_S32_OE) {
deviceFormat = AFMT_S32_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
m_doByteSwap[modeToIdTable(_mode)] = true;
}
}
if (deviceFormat == -1) {
// The user requested format is not natively supported by the device.
if (mask & AFMT_S16_NE) {
deviceFormat = AFMT_S16_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
} else if (mask & AFMT_S32_NE) {
deviceFormat = AFMT_S32_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
} else if (mask & AFMT_S24_NE) {
deviceFormat = AFMT_S24_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
} else if (mask & AFMT_S16_OE) {
deviceFormat = AFMT_S16_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
m_doByteSwap[modeToIdTable(_mode)] = true;
} else if (mask & AFMT_S32_OE) {
deviceFormat = AFMT_S32_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
m_doByteSwap[modeToIdTable(_mode)] = true;
} else if (mask & AFMT_S24_OE) {
deviceFormat = AFMT_S24_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
m_doByteSwap[modeToIdTable(_mode)] = true;
} else if (mask & AFMT_S8) {
deviceFormat = AFMT_S8;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
}
}
if (m_deviceFormat[modeToIdTable(_mode)] == 0) {
// This really shouldn't happen ...
close(fd);
ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio.");
return false;
}
// Set the data format.
int32_t temp = deviceFormat;
result = ioctl(fd, SNDCTL_DSP_SETFMT, &deviceFormat);
if ( result == -1
|| deviceFormat != temp) {
close(fd);
ATA_ERROR("error setting data format on device (" << ainfo.name << ").");
return false;
}
// Attempt to set the buffer size. According to OSS, the minimum
// number of buffers is two. The supposed minimum buffer size is 16
// bytes, so that will be our lower bound. The argument to this
// call is in the form 0xMMMMSSSS (hex), where the buffer size (in
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
// We'll check the actual value used near the end of the setup
// procedure.
int32_t ossBufferBytes = *_bufferSize * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]) * deviceChannels;
if (ossBufferBytes < 16) {
ossBufferBytes = 16;
}
int32_t buffers = 0;
buffers = _options.numberOfBuffers;
if (_options.flags.m_minimizeLatency == true) {
buffers = 2;
}
if (buffers < 2) {
buffers = 3;
}
temp = ((int) buffers << 16) + (int)(log10((double)ossBufferBytes) / log10(2.0));
result = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &temp);
if (result == -1) {
close(fd);
ATA_ERROR("error setting buffer size on device (" << ainfo.name << ").");
return false;
}
m_nBuffers = buffers;
// Save buffer size (in sample frames).
*_bufferSize = ossBufferBytes / (audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]) * deviceChannels);
m_bufferSize = *_bufferSize;
// Set the sample rate.
int32_t srate = _sampleRate;
result = ioctl(fd, SNDCTL_DSP_SPEED, &srate);
if (result == -1) {
close(fd);
ATA_ERROR("error setting sample rate (" << _sampleRate << ") on device (" << ainfo.name << ").");
return false;
}
// Verify the sample rate setup worked.
if (abs(srate - _sampleRate) > 100) {
close(fd);
ATA_ERROR("device (" << ainfo.name << ") does not support sample rate (" << _sampleRate << ").");
return false;
}
m_sampleRate = _sampleRate;
if ( _mode == audio::orchestra::mode_input
&& m__mode == audio::orchestra::mode_output
&& m_device[0] == _device) {
// We're doing duplex setup here.
m_deviceFormat[0] = m_deviceFormat[1];
m_nDeviceChannels[0] = deviceChannels;
}
// Set interleaving parameters.
m_deviceInterleaved[modeToIdTable(_mode)] = true;
// Set flags for buffer conversion
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
m_private->id[modeToIdTable(_mode)] = fd;
// Allocate necessary internal buffers.
uint64_t bufferBytes;
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
if (_mode == audio::orchestra::mode_input) {
if ( m__mode == audio::orchestra::mode_output
&& m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes <= bytesOut) {
makeBuffer = false;
}
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) {
free(m_deviceBuffer);
}
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
m_device[modeToIdTable(_mode)] = _device;
m_state = audio::orchestra::state_stopped;
// Setup the buffer conversion information structure.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, _firstChannel);
}
// Setup thread if necessary.
if (m_mode == audio::orchestra::mode_output && _mode == audio::orchestra::mode_input) {
// We had already set up an output stream.
m_mode = audio::orchestra::mode_duplex;
if (m_device[0] == _device) {
m_private->id[0] = fd;
}
} else {
m_mode = _mode;
// Setup callback thread.
m_private->threadRunning = true;
m_private->thread = new std11::thread(ossCallbackHandler, this);
if (m_private->thread == nullptr) {
m_private->threadRunning = false;
ATA_ERROR("creating callback thread!");
goto error;
}
}
return true;
error:
if (m_private->id[0] != nullptr) {
close(m_private->id[0]);
m_private->id[0] = nullptr;
}
if (m_private->id[1] != nullptr) {
close(m_private->id[1]);
m_private->id[1] = nullptr;
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
free(m_userBuffer[i]);
m_userBuffer[i] = 0;
}
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
return false;
}
enum audio::orchestra::error audio::orchestra::api::Oss::closeStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
m_private->threadRunning = false;
m_mutex.lock();
if (m_state == audio::orchestra::state_stopped) {
m_private->runnable.notify_one();
}
m_mutex.unlock();
m_private->thread->join();
if (m_state == audio::orchestra::state_running) {
if (m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) {
ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
} else {
ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
}
m_state = audio::orchestra::state_stopped;
}
if (m_private->id[0] != nullptr) {
close(m_private->id[0]);
m_private->id[0] = nullptr;
}
if (m_private->id[1] != nullptr) {
close(m_private->id[1]);
m_private->id[1] = nullptr;
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
free(m_userBuffer[i]);
m_userBuffer[i] = 0;
}
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Oss::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
m_mutex.lock();
m_state = audio::orchestra::state_running;
// No need to do anything else here ... OSS automatically starts
// when fed samples.
m_mutex.unlock();
m_private->runnable.notify_one();
}
enum audio::orchestra::error audio::orchestra::api::Oss::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return;
}
m_mutex.lock();
// The state might change while waiting on a mutex.
if (m_state == audio::orchestra::state_stopped) {
m_mutex.unlock();
return;
}
int32_t result = 0;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
// Flush the output with zeros a few times.
char *buffer;
int32_t samples;
audio::format format;
if (m_doConvertBuffer[0]) {
buffer = m_deviceBuffer;
samples = m_bufferSize * m_nDeviceChannels[0];
format = m_deviceFormat[0];
} else {
buffer = m_userBuffer[0];
samples = m_bufferSize * m_nUserChannels[0];
format = m_userFormat;
}
memset(buffer, 0, samples * audio::getFormatBytes(format));
for (uint32_t i=0; i<m_nBuffers+1; i++) {
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
if (result == -1) {
ATA_ERROR("audio write error.");
return audio::orchestra::error_warning;
}
}
result = ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
m_private->triggered = false;
}
if ( m_mode == audio::orchestra::mode_input
|| ( m_mode == audio::orchestra::mode_duplex
&& m_private->id[0] != m_private->id[1])) {
result = ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping input callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
}
unlock:
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
if (result != -1) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
enum audio::orchestra::error audio::orchestra::api::Oss::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_mutex.lock();
// The state might change while waiting on a mutex.
if (m_state == audio::orchestra::state_stopped) {
m_mutex.unlock();
return;
}
int32_t result = 0;
if (m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) {
result = ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
m_private->triggered = false;
}
if (m_mode == audio::orchestra::mode_input || (m_mode == audio::orchestra::mode_duplex && m_private->id[0] != m_private->id[1])) {
result = ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping input callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
}
unlock:
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
if (result != -1) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
void audio::orchestra::api::Oss::callbackEvent() {
if (m_state == audio::orchestra::state_stopped) {
std11::unique_lock<std11::mutex> lck(m_mutex);
m_private->runnable.wait(lck);
if (m_state != audio::orchestra::state_running) {
return;
}
}
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return audio::orchestra::error_warning;
}
// Invoke user callback to get fresh output data.
int32_t doStopStream = 0;
std11::chrono::system_clock::time_point streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if ( m_mode != audio::orchestra::mode_input
&& m_private->xrun[0] == true) {
status.push_back(audio::orchestra::status_underflow);
m_private->xrun[0] = false;
}
if ( m_mode != audio::orchestra::mode_output
&& m_private->xrun[1] == true) {
status.push_back(audio::orchestra::status_overflow);
m_private->xrun[1] = false;
}
doStopStream = m_callback(m_userBuffer[1],
streamTime,
m_userBuffer[0],
streamTime,
m_bufferSize,
status);
if (doStopStream == 2) {
this->abortStream();
return;
}
m_mutex.lock();
// The state might change while waiting on a mutex.
if (m_state == audio::orchestra::state_stopped) {
goto unlock;
}
int32_t result;
char *buffer;
int32_t samples;
audio::format format;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
// Setup parameters and do buffer conversion if necessary.
if (m_doConvertBuffer[0]) {
buffer = m_deviceBuffer;
convertBuffer(buffer, m_userBuffer[0], m_convertInfo[0]);
samples = m_bufferSize * m_nDeviceChannels[0];
format = m_deviceFormat[0];
} else {
buffer = m_userBuffer[0];
samples = m_bufferSize * m_nUserChannels[0];
format = m_userFormat;
}
// Do byte swapping if necessary.
if (m_doByteSwap[0]) {
byteSwapBuffer(buffer, samples, format);
}
if ( m_mode == audio::orchestra::mode_duplex
&& m_private->triggered == false) {
int32_t trig = 0;
ioctl(m_private->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
trig = PCM_ENABLE_audio::orchestra::mode_input|PCM_ENABLE_audio::orchestra::mode_output;
ioctl(m_private->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
m_private->triggered = true;
} else {
// Write samples to device.
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
}
if (result == -1) {
// We'll assume this is an underrun, though there isn't a
// specific means for determining that.
m_private->xrun[0] = true;
ATA_ERROR("audio write error.");
//error(audio::orchestra::error_warning);
// Continue on to input section.
}
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
// Setup parameters.
if (m_doConvertBuffer[1]) {
buffer = m_deviceBuffer;
samples = m_bufferSize * m_nDeviceChannels[1];
format = m_deviceFormat[1];
} else {
buffer = m_userBuffer[1];
samples = m_bufferSize * m_nUserChannels[1];
format = m_userFormat;
}
// Read samples from device.
result = read(m_private->id[1], buffer, samples * audio::getFormatBytes(format));
if (result == -1) {
// We'll assume this is an overrun, though there isn't a
// specific means for determining that.
m_private->xrun[1] = true;
ATA_ERROR("audio read error.");
goto unlock;
}
// Do byte swapping if necessary.
if (m_doByteSwap[1]) {
byteSwapBuffer(buffer, samples, format);
}
// Do buffer conversion if necessary.
if (m_doConvertBuffer[1]) {
convertBuffer(m_userBuffer[1], m_deviceBuffer, m_convertInfo[1]);
}
}
unlock:
m_mutex.unlock();
audio::orchestra::Api::tickStreamTime();
if (doStopStream == 1) {
this->stopStream();
}
}
static void ossCallbackHandler(void* _userData) {
etk::thread::setName("OSS callback-" + m_name);
audio::orchestra::api::Alsa* myClass = reinterpret_cast<audio::orchestra::api::Oss*>(_userData);
while (myClass->m_private->threadRunning == true) {
myClass->callbackEvent();
}
}
#endif

51
audio/orchestra/api/Oss.h Normal file
View File

@@ -0,0 +1,51 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_OSS_H__) && defined(ORCHESTRA_BUILD_OSS)
#define __AUDIO_ORCHESTRA_API_OSS_H__
namespace audio {
namespace orchestra {
namespace api {
class OssPrivate;
class Oss: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Oss();
virtual ~Oss();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_oss;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std11::shared_ptr<OssPrivate> m_private;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@@ -0,0 +1,424 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_PULSE)
#include <unistd.h>
#include <limits.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <pulse/error.h>
#include <pulse/simple.h>
#include <cstdio>
#include <etk/thread/tools.h>
#undef __class__
#define __class__ "api::Pulse"
audio::orchestra::Api* audio::orchestra::api::Pulse::Create() {
return new audio::orchestra::api::Pulse();
}
static const uint32_t SUPPORTED_SAMPLERATES[] = {
8000,
16000,
22050,
32000,
44100,
48000,
96000,
0
};
struct rtaudio_pa_format_mapping_t {
enum audio::format airtaudio_format;
pa_sample_format_t pa_format;
};
static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
{audio::format_int16, PA_SAMPLE_S16LE},
{audio::format_int32, PA_SAMPLE_S32LE},
{audio::format_float, PA_SAMPLE_FLOAT32LE},
{audio::format_unknow, PA_SAMPLE_INVALID}};
namespace audio {
namespace orchestra {
namespace api {
class PulsePrivate {
public:
pa_simple *s_play;
pa_simple *s_rec;
std11::shared_ptr<std11::thread> thread;
bool threadRunning;
std11::condition_variable runnable_cv;
bool runnable;
PulsePrivate() :
s_play(0),
s_rec(0),
threadRunning(false),
runnable(false) {
}
};
}
}
}
audio::orchestra::api::Pulse::Pulse() :
m_private(new audio::orchestra::api::PulsePrivate()) {
}
audio::orchestra::api::Pulse::~Pulse() {
if (m_state != audio::orchestra::state_closed) {
closeStream();
}
}
uint32_t audio::orchestra::api::Pulse::getDeviceCount() {
return 1;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Pulse::getDeviceInfo(uint32_t _device) {
audio::orchestra::DeviceInfo info;
info.probed = true;
info.name = "PulseAudio";
info.outputChannels = 2;
info.inputChannels = 2;
info.duplexChannels = 2;
info.isDefaultOutput = true;
info.isDefaultInput = true;
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
info.sampleRates.push_back(*sr);
}
info.nativeFormats.push_back(audio::format_int16);
info.nativeFormats.push_back(audio::format_int32);
info.nativeFormats.push_back(audio::format_float);
return info;
}
static void pulseaudio_callback(void* _userData) {
audio::orchestra::api::Pulse* myClass = reinterpret_cast<audio::orchestra::api::Pulse*>(_userData);
myClass->callbackEvent();
}
void audio::orchestra::api::Pulse::callbackEvent() {
etk::thread::setName("Pulse IO-" + m_name);
while (m_private->threadRunning == true) {
callbackEventOneCycle();
}
}
enum audio::orchestra::error audio::orchestra::api::Pulse::closeStream() {
m_private->threadRunning = false;
m_mutex.lock();
if (m_state == audio::orchestra::state_stopped) {
m_private->runnable = true;
m_private->runnable_cv.notify_one();;
}
m_mutex.unlock();
m_private->thread->join();
if (m_private->s_play) {
pa_simple_flush(m_private->s_play, nullptr);
pa_simple_free(m_private->s_play);
}
if (m_private->s_rec) {
pa_simple_free(m_private->s_rec);
}
m_userBuffer[0].clear();
m_userBuffer[1].clear();
m_state = audio::orchestra::state_closed;
m_mode = audio::orchestra::mode_unknow;
return audio::orchestra::error_none;
}
void audio::orchestra::api::Pulse::callbackEventOneCycle() {
if (m_state == audio::orchestra::state_stopped) {
std11::unique_lock<std11::mutex> lck(m_mutex);
while (!m_private->runnable) {
m_private->runnable_cv.wait(lck);
}
if (m_state != audio::orchestra::state_running) {
m_mutex.unlock();
return;
}
}
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return;
}
std11::chrono::system_clock::time_point streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
int32_t doStopStream = m_callback(&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0],
streamTime,
&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0],
streamTime,
m_bufferSize,
status);
if (doStopStream == 2) {
abortStream();
return;
}
m_mutex.lock();
void *pulse_in = m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] ? m_deviceBuffer : &m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0];
void *pulse_out = m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] ? m_deviceBuffer : &m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0];
if (m_state != audio::orchestra::state_running) {
goto unlock;
}
int32_t pa_error;
size_t bytes;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]) {
convertBuffer(m_deviceBuffer,
&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0],
m_convertInfo[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]);
bytes = m_nDeviceChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]);
} else {
bytes = m_nUserChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
}
if (pa_simple_write(m_private->s_play, pulse_out, bytes, &pa_error) < 0) {
ATA_ERROR("audio write error, " << pa_strerror(pa_error) << ".");
return;
}
}
if (m_mode == audio::orchestra::mode_input || m_mode == audio::orchestra::mode_duplex) {
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]) {
bytes = m_nDeviceChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]);
} else {
bytes = m_nUserChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
}
if (pa_simple_read(m_private->s_rec, pulse_in, bytes, &pa_error) < 0) {
ATA_ERROR("audio read error, " << pa_strerror(pa_error) << ".");
return;
}
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]) {
convertBuffer(&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0],
m_deviceBuffer,
m_convertInfo[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]);
}
}
unlock:
m_mutex.unlock();
audio::orchestra::Api::tickStreamTime();
if (doStopStream == 1) {
stopStream();
return;
}
return;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state_running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
m_mutex.lock();
m_state = audio::orchestra::state_running;
m_private->runnable = true;
m_private->runnable_cv.notify_one();
m_mutex.unlock();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::stopStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_state = audio::orchestra::state_stopped;
m_mutex.lock();
if (m_private->s_play) {
int32_t pa_error;
if (pa_simple_drain(m_private->s_play, &pa_error) < 0) {
ATA_ERROR("error draining output device, " << pa_strerror(pa_error) << ".");
m_mutex.unlock();
return audio::orchestra::error_systemError;
}
}
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::abortStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_state = audio::orchestra::state_stopped;
m_mutex.lock();
if (m_private && m_private->s_play) {
int32_t pa_error;
if (pa_simple_flush(m_private->s_play, &pa_error) < 0) {
ATA_ERROR("error flushing output device, " << pa_strerror(pa_error) << ".");
m_mutex.unlock();
return audio::orchestra::error_systemError;
}
}
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
return audio::orchestra::error_none;
}
bool audio::orchestra::api::Pulse::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
uint64_t bufferBytes = 0;
pa_sample_spec ss;
if (_device != 0) {
return false;
}
if (_mode != audio::orchestra::mode_input && _mode != audio::orchestra::mode_output) {
return false;
}
if (_channels != 1 && _channels != 2) {
ATA_ERROR("unsupported number of channels.");
return false;
}
ss.channels = _channels;
if (_firstChannel != 0) {
return false;
}
bool sr_found = false;
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
if (_sampleRate == *sr) {
sr_found = true;
m_sampleRate = _sampleRate;
ss.rate = _sampleRate;
break;
}
}
if (!sr_found) {
ATA_ERROR("unsupported sample rate.");
return false;
}
bool sf_found = 0;
for (const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
sf->airtaudio_format && sf->pa_format != PA_SAMPLE_INVALID;
++sf) {
if (_format == sf->airtaudio_format) {
sf_found = true;
m_userFormat = sf->airtaudio_format;
ss.format = sf->pa_format;
break;
}
}
if (!sf_found) {
ATA_ERROR("unsupported sample format.");
return false;
}
m_deviceInterleaved[modeToIdTable(_mode)] = true;
m_nBuffers = 1;
m_doByteSwap[modeToIdTable(_mode)] = false;
m_doConvertBuffer[modeToIdTable(_mode)] = false;
m_deviceFormat[modeToIdTable(_mode)] = m_userFormat;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
m_nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel;
m_channelOffset[modeToIdTable(_mode)] = 0;
// Allocate necessary internal buffers.
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
m_bufferSize = *_bufferSize;
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
if (_mode == audio::orchestra::mode_input) {
if (m_mode == audio::orchestra::mode_output && m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes <= bytesOut) makeBuffer = false;
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) free(m_deviceBuffer);
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
m_device[modeToIdTable(_mode)] = _device;
// Setup the buffer conversion information structure.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, _firstChannel);
}
int32_t error;
switch (_mode) {
case audio::orchestra::mode_input:
m_private->s_rec = pa_simple_new(nullptr, "orchestra", PA_STREAM_RECORD, nullptr, "Record", &ss, nullptr, nullptr, &error);
if (!m_private->s_rec) {
ATA_ERROR("error connecting input to PulseAudio server.");
goto error;
}
break;
case audio::orchestra::mode_output:
m_private->s_play = pa_simple_new(nullptr, "orchestra", PA_STREAM_PLAYBACK, nullptr, "Playback", &ss, nullptr, nullptr, &error);
if (!m_private->s_play) {
ATA_ERROR("error connecting output to PulseAudio server.");
goto error;
}
break;
default:
goto error;
}
if (m_mode == audio::orchestra::mode_unknow) {
m_mode = _mode;
} else if (m_mode == _mode) {
goto error;
}else {
m_mode = audio::orchestra::mode_duplex;
}
if (!m_private->threadRunning) {
m_private->threadRunning = true;
std11::shared_ptr<std11::thread> tmpThread(new std11::thread(&pulseaudio_callback, this));
m_private->thread = std::move(tmpThread);
if (m_private->thread == nullptr) {
ATA_ERROR("error creating thread.");
goto error;
}
}
m_state = audio::orchestra::state_stopped;
return true;
error:
for (int32_t i=0; i<2; i++) {
m_userBuffer[i].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
return false;
}
#endif

View File

@@ -0,0 +1,54 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_PULSE_H__) && defined(ORCHESTRA_BUILD_PULSE)
#define __AUDIO_ORCHESTRA_API_PULSE_H__
namespace audio {
namespace orchestra {
namespace api {
class PulsePrivate;
class Pulse: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Pulse();
virtual ~Pulse();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_pulse;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEventOneCycle();
void callbackEvent();
private:
std11::shared_ptr<PulsePrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

6
audio/orchestra/base.cpp Normal file
View File

@@ -0,0 +1,6 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/

30
audio/orchestra/base.h Normal file
View File

@@ -0,0 +1,30 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_CB_H__
#define __AUDIO_ORCHESTRA_CB_H__
#include <etk/thread.h>
#include <etk/condition_variable.h>
#include <etk/mutex.h>
#include <etk/chrono.h>
#include <etk/functional.h>
#include <etk/memory.h>
#include <audio/channel.h>
#include <audio/format.h>
#include <audio/orchestra/error.h>
#include <audio/orchestra/status.h>
#include <audio/orchestra/Flags.h>
#include <audio/orchestra/CallbackInfo.h>
#include <audio/orchestra/DeviceInfo.h>
#include <audio/orchestra/StreamOptions.h>
#include <audio/orchestra/StreamParameters.h>
#endif

13
audio/orchestra/debug.cpp Normal file
View File

@@ -0,0 +1,13 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/debug.h>
int32_t audio::orchestra::getLogId() {
static int32_t g_val = etk::log::registerInstance("audio-orchestra");
return g_val;
}

44
audio/orchestra/debug.h Normal file
View File

@@ -0,0 +1,44 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_DEBUG_H__
#define __AUDIO_ORCHESTRA_DEBUG_H__
#include <etk/log.h>
namespace audio {
namespace orchestra {
int32_t getLogId();
}
}
#define ATA_BASE(info,data) TK_LOG_BASE(audio::orchestra::getLogId(),info,data)
#define ATA_CRITICAL(data) ATA_BASE(1, data)
#define ATA_ERROR(data) ATA_BASE(2, data)
#define ATA_WARNING(data) ATA_BASE(3, data)
#ifdef DEBUG
#define ATA_INFO(data) ATA_BASE(4, data)
#define ATA_DEBUG(data) ATA_BASE(5, data)
#define ATA_VERBOSE(data) ATA_BASE(6, data)
#define ATA_TODO(data) ATA_BASE(4, "TODO : " << data)
#else
#define ATA_INFO(data) do { } while(false)
#define ATA_DEBUG(data) do { } while(false)
#define ATA_VERBOSE(data) do { } while(false)
#define ATA_TODO(data) do { } while(false)
#endif
#define ATA_ASSERT(cond,data) \
do { \
if (!(cond)) { \
ATA_CRITICAL(data); \
assert(!#cond); \
} \
} while (0)
#endif

View File

@@ -0,0 +1,9 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/error.h>
#include <audio/orchestra/debug.h>

26
audio/orchestra/error.h Normal file
View File

@@ -0,0 +1,26 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_ERROR_H__
#define __AUDIO_ORCHESTRA_ERROR_H__
#include <etk/types.h>
namespace audio {
namespace orchestra {
enum error {
error_none, //!< No error
error_fail, //!< An error occure in the operation
error_warning, //!< A non-critical error.
error_inputNull, //!< null input or internal errror
error_invalidUse, //!< The function was called incorrectly.
error_systemError //!< A system error occured.
};
}
}
#endif

21
audio/orchestra/mode.cpp Normal file
View File

@@ -0,0 +1,21 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/mode.h>
#include <audio/orchestra/debug.h>
int32_t audio::orchestra::modeToIdTable(enum mode _mode) {
switch (_mode) {
case mode_unknow:
case mode_duplex:
case mode_output:
return 0;
case mode_input:
return 1;
}
return 0;
}

26
audio/orchestra/mode.h Normal file
View File

@@ -0,0 +1,26 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_MODE_H__
#define __AUDIO_ORCHESTRA_MODE_H__
#include <etk/types.h>
namespace audio {
namespace orchestra {
enum mode {
mode_unknow,
mode_output,
mode_input,
mode_duplex
};
int32_t modeToIdTable(enum mode _mode);
}
}
#endif

View File

@@ -0,0 +1,6 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/

25
audio/orchestra/state.h Normal file
View File

@@ -0,0 +1,25 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_STATE_H__
#define __AUDIO_ORCHESTRA_STATE_H__
#include <etk/types.h>
namespace audio {
namespace orchestra {
enum state {
state_closed,
state_stopped,
state_stopping,
state_running
};
}
}
#endif

View File

@@ -0,0 +1,32 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/status.h>
#include <audio/orchestra/debug.h>
static const char* listValue[] = {
"ok",
"overflow",
"underflow"
};
std::ostream& audio::orchestra::operator <<(std::ostream& _os, enum audio::orchestra::status _obj) {
_os << listValue[_obj];
return _os;
}
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::status>& _obj) {
_os << std::string("{");
for (size_t iii=0; iii<_obj.size(); ++iii) {
if (iii!=0) {
_os << std::string(";");
}
_os << _obj[iii];
}
_os << std::string("}");
return _os;
}

26
audio/orchestra/status.h Normal file
View File

@@ -0,0 +1,26 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_STATUS_H__
#define __AUDIO_ORCHESTRA_STATUS_H__
#include <etk/types.h>
namespace audio {
namespace orchestra {
enum status {
status_ok, //!< nothing...
status_overflow, //!< Internal buffer has more data than they can accept
status_underflow //!< The internal buffer is empty
};
std::ostream& operator <<(std::ostream& _os, enum audio::orchestra::status _obj);
std::ostream& operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::status>& _obj);
}
}
#endif

73
audio/orchestra/type.cpp Normal file
View File

@@ -0,0 +1,73 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#include <audio/orchestra/type.h>
#include <audio/orchestra/debug.h>
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <climits>
#undef __class__
#define __class__ "type"
static const char* listType[] = {
"undefined",
"alsa",
"pulse",
"oss",
"jack",
"coreOSX",
"corIOS",
"asio",
"ds",
"java",
"dummy",
"user1",
"user2",
"user3",
"user4"
};
static int32_t listTypeSize = sizeof(listType)/sizeof(char*);
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const enum audio::orchestra::type& _obj) {
_os << listType[_obj];
return _os;
}
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::type>& _obj) {
_os << std::string("{");
for (size_t iii=0; iii<_obj.size(); ++iii) {
if (iii!=0) {
_os << std::string(";");
}
_os << _obj[iii];
}
_os << std::string("}");
return _os;
}
/*
template <enum audio::format> std::string to_string(const enum audio::format& _variable) {
return listType[_value];
}
*/
std::string audio::orchestra::getTypeString(enum audio::orchestra::type _value) {
return listType[_value];
}
enum audio::orchestra::type audio::orchestra::getTypeFromString(const std::string& _value) {
for (int32_t iii=0; iii<listTypeSize; ++iii) {
if (_value == listType[iii]) {
return static_cast<enum audio::orchestra::type>(iii);
}
}
if (_value == "auto") {
return audio::orchestra::type_undefined;
}
return audio::orchestra::type_undefined;
}

44
audio/orchestra/type.h Normal file
View File

@@ -0,0 +1,44 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifndef __AUDIO_ORCHESTRA_TYPE_H__
#define __AUDIO_ORCHESTRA_TYPE_H__
#include <etk/types.h>
#include <etk/stdTools.h>
namespace audio {
namespace orchestra {
/**
* @brief Audio API specifier arguments.
*/
enum type {
type_undefined, //!< Error API.
type_alsa, //!< LINUX The Advanced Linux Sound Architecture.
type_pulse, //!< LINUX The Linux PulseAudio.
type_oss, //!< LINUX The Linux Open Sound System.
type_jack, //!< UNIX The Jack Low-Latency Audio Server.
type_coreOSX, //!< Macintosh OSX Core Audio.
type_coreIOS, //!< Macintosh iOS Core Audio.
type_asio, //!< WINDOWS The Steinberg Audio Stream I/O.
type_ds, //!< WINDOWS The Microsoft Direct Sound.
type_java, //!< ANDROID Interface.
type_dummy, //!< Empty wrapper (non-functional).
type_user1, //!< User interface 1.
type_user2, //!< User interface 2.
type_user3, //!< User interface 3.
type_user4, //!< User interface 4.
};
std::ostream& operator <<(std::ostream& _os, const enum audio::orchestra::type& _obj);
std::ostream& operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::type>& _obj);
std::string getTypeString(enum audio::orchestra::type _value);
enum audio::orchestra::type getTypeFromString(const std::string& _value);
}
}
#endif