[DEV] add basic clone or RtAudio with code simplification review (tested alsa, pulse, jack)

This commit is contained in:
Edouard DUPIN 2014-03-11 21:46:00 +01:00
commit b21b2f7413
27 changed files with 9861 additions and 0 deletions

878
airtaudio/Api.cpp Normal file
View File

@ -0,0 +1,878 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#include <airtaudio/Interface.h>
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <climits>
// Static variable definitions.
const uint32_t airtaudio::api::MAX_SAMPLE_RATES = 14;
const uint32_t airtaudio::api::SAMPLE_RATES[] = {
4000, 5512, 8000, 9600, 11025, 16000, 22050,
32000, 44100, 48000, 88200, 96000, 176400, 192000
};
airtaudio::Api::Api(void) {
m_stream.state = airtaudio::api::STREAM_CLOSED;
m_stream.mode = airtaudio::api::UNINITIALIZED;
m_stream.apiHandle = 0;
m_stream.userBuffer[0] = 0;
m_stream.userBuffer[1] = 0;
m_showWarnings = true;
}
airtaudio::Api::~Api(void) {
}
void airtaudio::Api::openStream(airtaudio::StreamParameters *oParams,
airtaudio::StreamParameters *iParams,
airtaudio::format format,
uint32_t sampleRate,
uint32_t *bufferFrames,
airtaudio::AirTAudioCallback callback,
void *userData,
airtaudio::StreamOptions *options,
airtaudio::AirTAudioErrorCallback errorCallback) {
if (m_stream.state != airtaudio::api::STREAM_CLOSED) {
m_errorText = "airtaudio::Api::openStream: a stream is already open!";
error(airtaudio::errorInvalidUse);
return;
}
if (oParams && oParams->nChannels < 1) {
m_errorText = "airtaudio::Api::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
error(airtaudio::errorInvalidUse);
return;
}
if (iParams && iParams->nChannels < 1) {
m_errorText = "airtaudio::Api::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
error(airtaudio::errorInvalidUse);
return;
}
if (oParams == NULL && iParams == NULL) {
m_errorText = "airtaudio::Api::openStream: input and output StreamParameters structures are both NULL!";
error(airtaudio::errorInvalidUse);
return;
}
if (formatBytes(format) == 0) {
m_errorText = "airtaudio::Api::openStream: 'format' parameter value is undefined.";
error(airtaudio::errorInvalidUse);
return;
}
uint32_t nDevices = getDeviceCount();
uint32_t oChannels = 0;
if (oParams) {
oChannels = oParams->nChannels;
if (oParams->deviceId >= nDevices) {
m_errorText = "airtaudio::Api::openStream: output device parameter value is invalid.";
error(airtaudio::errorInvalidUse);
return;
}
}
uint32_t iChannels = 0;
if (iParams) {
iChannels = iParams->nChannels;
if (iParams->deviceId >= nDevices) {
m_errorText = "airtaudio::Api::openStream: input device parameter value is invalid.";
error(airtaudio::errorInvalidUse);
return;
}
}
clearStreamInfo();
bool result;
if (oChannels > 0) {
result = probeDeviceOpen(oParams->deviceId,
airtaudio::api::OUTPUT,
oChannels,
oParams->firstChannel,
sampleRate,
format,
bufferFrames,
options);
if (result == false) {
error(airtaudio::errorSystemError);
return;
}
}
if (iChannels > 0) {
result = probeDeviceOpen(iParams->deviceId,
airtaudio::api::INPUT,
iChannels,
iParams->firstChannel,
sampleRate,
format,
bufferFrames,
options);
if (result == false) {
if (oChannels > 0) closeStream();
error(airtaudio::errorSystemError);
return;
}
}
m_stream.callbackInfo.callback = (void *) callback;
m_stream.callbackInfo.userData = userData;
m_stream.callbackInfo.errorCallback = (void *) errorCallback;
if (options != NULL) {
options->numberOfBuffers = m_stream.nBuffers;
}
m_stream.state = airtaudio::api::STREAM_STOPPED;
}
uint32_t airtaudio::Api::getDefaultInputDevice(void) {
// Should be implemented in subclasses if possible.
return 0;
}
uint32_t airtaudio::Api::getDefaultOutputDevice(void) {
// Should be implemented in subclasses if possible.
return 0;
}
void airtaudio::Api::closeStream(void) {
// MUST be implemented in subclasses!
return;
}
bool airtaudio::Api::probeDeviceOpen(uint32_t /*device*/,
airtaudio::api::StreamMode /*mode*/,
uint32_t /*channels*/,
uint32_t /*firstChannel*/,
uint32_t /*sampleRate*/,
airtaudio::format /*format*/,
uint32_t * /*bufferSize*/,
airtaudio::StreamOptions * /*options*/) {
// MUST be implemented in subclasses!
return airtaudio::api::FAILURE;
}
void airtaudio::Api::tickStreamTime(void) {
// Subclasses that do not provide their own implementation of
// getStreamTime should call this function once per buffer I/O to
// provide basic stream time support.
m_stream.streamTime += (m_stream.bufferSize * 1.0 / m_stream.sampleRate);
#if defined(HAVE_GETTIMEOFDAY)
gettimeofday(&m_stream.lastTickTimestamp, NULL);
#endif
}
long airtaudio::Api::getStreamLatency(void) {
verifyStream();
long totalLatency = 0;
if (m_stream.mode == airtaudio::api::OUTPUT || m_stream.mode == airtaudio::api::DUPLEX) {
totalLatency = m_stream.latency[0];
}
if (m_stream.mode == airtaudio::api::INPUT || m_stream.mode == airtaudio::api::DUPLEX) {
totalLatency += m_stream.latency[1];
}
return totalLatency;
}
double airtaudio::Api::getStreamTime(void) {
verifyStream();
#if defined(HAVE_GETTIMEOFDAY)
// Return a very accurate estimate of the stream time by
// adding in the elapsed time since the last tick.
struct timeval then;
struct timeval now;
if (m_stream.state != airtaudio::api::STREAM_RUNNING || m_stream.streamTime == 0.0) {
return m_stream.streamTime;
}
gettimeofday(&now, NULL);
then = m_stream.lastTickTimestamp;
return m_stream.streamTime
+ ((now.tv_sec + 0.000001 * now.tv_usec)
- (then.tv_sec + 0.000001 * then.tv_usec));
#else
return m_stream.streamTime;
#endif
}
uint32_t airtaudio::Api::getStreamSampleRate(void) {
verifyStream();
return m_stream.sampleRate;
}
// *************************************************** //
//
// Protected common (OS-independent) RtAudio methods.
//
// *************************************************** //
// This method can be modified to control the behavior of error
// message printing.
void airtaudio::Api::error(airtaudio::errorType _type) {
m_errorStream.str(""); // clear the ostringstream
airtaudio::AirTAudioErrorCallback errorCallback = (airtaudio::AirTAudioErrorCallback) m_stream.callbackInfo.errorCallback;
if (errorCallback) {
// abortStream() can generate new error messages. Ignore them. Just keep original one.
static bool firstErrorOccured = false;
if (firstErrorOccured) {
return;
}
firstErrorOccured = true;
const std::string errorMessage = m_errorText;
if (_type != airtaudio::errorWarning && m_stream.state != airtaudio::api::STREAM_STOPPED) {
m_stream.callbackInfo.isRunning = false; // exit from the thread
abortStream();
}
errorCallback(_type, errorMessage);
firstErrorOccured = false;
return;
}
if (_type == airtaudio::errorWarning && m_showWarnings == true) {
std::cerr << '\n' << m_errorText << "\n\n";
} else if (_type != airtaudio::errorWarning) {
//throw(RtError(m_errorText, type));
std::cout << m_errorText << std::endl;
}
}
void airtaudio::Api::verifyStream(void) {
if (m_stream.state == airtaudio::api::STREAM_CLOSED) {
m_errorText = "airtaudio::Api:: a stream is not open!";
error(airtaudio::errorInvalidUse);
}
}
void airtaudio::Api::clearStreamInfo(void) {
m_stream.mode = airtaudio::api::UNINITIALIZED;
m_stream.state = airtaudio::api::STREAM_CLOSED;
m_stream.sampleRate = 0;
m_stream.bufferSize = 0;
m_stream.nBuffers = 0;
m_stream.userFormat = 0;
m_stream.userInterleaved = true;
m_stream.streamTime = 0.0;
m_stream.apiHandle = 0;
m_stream.deviceBuffer = 0;
m_stream.callbackInfo.callback = 0;
m_stream.callbackInfo.userData = 0;
m_stream.callbackInfo.isRunning = false;
m_stream.callbackInfo.errorCallback = 0;
for (int32_t iii=0; iii<2; ++iii) {
m_stream.device[iii] = 11111;
m_stream.doConvertBuffer[iii] = false;
m_stream.deviceInterleaved[iii] = true;
m_stream.doByteSwap[iii] = false;
m_stream.nUserChannels[iii] = 0;
m_stream.nDeviceChannels[iii] = 0;
m_stream.channelOffset[iii] = 0;
m_stream.deviceFormat[iii] = 0;
m_stream.latency[iii] = 0;
m_stream.userBuffer[iii] = 0;
m_stream.convertInfo[iii].channels = 0;
m_stream.convertInfo[iii].inJump = 0;
m_stream.convertInfo[iii].outJump = 0;
m_stream.convertInfo[iii].inFormat = 0;
m_stream.convertInfo[iii].outFormat = 0;
m_stream.convertInfo[iii].inOffset.clear();
m_stream.convertInfo[iii].outOffset.clear();
}
}
uint32_t airtaudio::Api::formatBytes(airtaudio::format _format)
{
if (_format == airtaudio::SINT16) {
return 2;
} else if ( _format == airtaudio::SINT32
|| _format == airtaudio::FLOAT32) {
return 4;
} else if (_format == airtaudio::FLOAT64) {
return 8;
} else if (_format == airtaudio::SINT24) {
return 3;
} else if (_format == airtaudio::SINT8) {
return 1;
}
m_errorText = "airtaudio::Api::formatBytes: undefined format.";
error(airtaudio::errorWarning);
return 0;
}
void airtaudio::Api::setConvertInfo(airtaudio::api::StreamMode _mode, uint32_t _firstChannel) {
if (_mode == airtaudio::api::INPUT) { // convert device to user buffer
m_stream.convertInfo[_mode].inJump = m_stream.nDeviceChannels[1];
m_stream.convertInfo[_mode].outJump = m_stream.nUserChannels[1];
m_stream.convertInfo[_mode].inFormat = m_stream.deviceFormat[1];
m_stream.convertInfo[_mode].outFormat = m_stream.userFormat;
} else { // convert user to device buffer
m_stream.convertInfo[_mode].inJump = m_stream.nUserChannels[0];
m_stream.convertInfo[_mode].outJump = m_stream.nDeviceChannels[0];
m_stream.convertInfo[_mode].inFormat = m_stream.userFormat;
m_stream.convertInfo[_mode].outFormat = m_stream.deviceFormat[0];
}
if (m_stream.convertInfo[_mode].inJump < m_stream.convertInfo[_mode].outJump) {
m_stream.convertInfo[_mode].channels = m_stream.convertInfo[_mode].inJump;
} else {
m_stream.convertInfo[_mode].channels = m_stream.convertInfo[_mode].outJump;
}
// Set up the interleave/deinterleave offsets.
if (m_stream.deviceInterleaved[_mode] != m_stream.userInterleaved) {
if ( ( _mode == airtaudio::api::OUTPUT
&& m_stream.deviceInterleaved[_mode])
|| ( _mode == airtaudio::api::INPUT
&& m_stream.userInterleaved)) {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset.push_back(kkk * m_stream.bufferSize);
m_stream.convertInfo[_mode].outOffset.push_back(kkk);
m_stream.convertInfo[_mode].inJump = 1;
}
} else {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset.push_back(kkk);
m_stream.convertInfo[_mode].outOffset.push_back(kkk * m_stream.bufferSize);
m_stream.convertInfo[_mode].outJump = 1;
}
}
} else { // no (de)interleaving
if (m_stream.userInterleaved) {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset.push_back(kkk);
m_stream.convertInfo[_mode].outOffset.push_back(kkk);
}
} else {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset.push_back(kkk * m_stream.bufferSize);
m_stream.convertInfo[_mode].outOffset.push_back(kkk * m_stream.bufferSize);
m_stream.convertInfo[_mode].inJump = 1;
m_stream.convertInfo[_mode].outJump = 1;
}
}
}
// Add channel offset.
if (_firstChannel > 0) {
if (m_stream.deviceInterleaved[_mode]) {
if (_mode == airtaudio::api::OUTPUT) {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].outOffset[kkk] += _firstChannel;
}
} else {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset[kkk] += _firstChannel;
}
}
} else {
if (_mode == airtaudio::api::OUTPUT) {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].outOffset[kkk] += (_firstChannel * m_stream.bufferSize);
}
} else {
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
m_stream.convertInfo[_mode].inOffset[kkk] += (_firstChannel * m_stream.bufferSize);
}
}
}
}
}
void airtaudio::Api::convertBuffer(char *_outBuffer, char *_inBuffer, airtaudio::api::ConvertInfo &_info) {
// This function does format conversion, input/output channel compensation, and
// data interleaving/deinterleaving. 24-bit integers are assumed to occupy
// the lower three bytes of a 32-bit integer.
// Clear our device buffer when in/out duplex device channels are different
if ( _outBuffer == m_stream.deviceBuffer
&& m_stream.mode == airtaudio::api::DUPLEX
&& m_stream.nDeviceChannels[0] < m_stream.nDeviceChannels[1]) {
memset(_outBuffer, 0, m_stream.bufferSize * _info.outJump * formatBytes(_info.outFormat));
}
int32_t jjj;
if (_info.outFormat == airtaudio::FLOAT64) {
double scale;
double *out = (double *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
signed char *in = (signed char *)_inBuffer;
scale = 1.0 / 127.5;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT16) {
int16_t *in = (int16_t *)_inBuffer;
scale = 1.0 / 32767.5;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
int24_t *in = (int24_t *)_inBuffer;
scale = 1.0 / 8388607.5;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (double) (in[_info.inOffset[jjj]].asInt());
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
int32_t *in = (int32_t *)_inBuffer;
scale = 1.0 / 2147483647.5;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
// Channel compensation and/or (de)interleaving only.
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
}
else if (_info.outFormat == airtaudio::FLOAT32) {
float scale;
float *out = (float *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
signed char *in = (signed char *)_inBuffer;
scale = (float) (1.0 / 127.5);
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT16) {
int16_t *in = (int16_t *)_inBuffer;
scale = (float) (1.0 / 32767.5);
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
int24_t *in = (int24_t *)_inBuffer;
scale = (float) (1.0 / 8388607.5);
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (float) (in[_info.inOffset[jjj]].asInt());
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
int32_t *in = (int32_t *)_inBuffer;
scale = (float) (1.0 / 2147483647.5);
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] += 0.5;
out[_info.outOffset[jjj]] *= scale;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
// Channel compensation and/or (de)interleaving only.
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
}
else if (_info.outFormat == airtaudio::SINT32) {
int32_t *out = (int32_t *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
signed char *in = (signed char *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] <<= 24;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT16) {
int16_t *in = (int16_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] <<= 16;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
int24_t *in = (int24_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) in[_info.inOffset[jjj]].asInt();
out[_info.outOffset[jjj]] <<= 8;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
// Channel compensation and/or (de)interleaving only.
int32_t *in = (int32_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 2147483647.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 2147483647.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
}
else if (_info.outFormat == airtaudio::SINT24) {
int24_t *out = (int24_t *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
signed char *in = (signed char *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] << 16);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT16) {
int16_t *in = (int16_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] << 8);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
// Channel compensation and/or (de)interleaving only.
int24_t *in = (int24_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
int32_t *in = (int32_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] >> 8);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 8388607.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 8388607.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
}
else if (_info.outFormat == airtaudio::SINT16) {
int16_t *out = (int16_t *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
signed char *in = (signed char *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int16_t) in[_info.inOffset[jjj]];
out[_info.outOffset[jjj]] <<= 8;
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT16) {
// Channel compensation and/or (de)interleaving only.
int16_t *in = (int16_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
int24_t *in = (int24_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int16_t) (in[_info.inOffset[jjj]].asInt() >> 8);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
int32_t *in = (int32_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int16_t) ((in[_info.inOffset[jjj]] >> 16) & 0x0000ffff);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int16_t) (in[_info.inOffset[jjj]] * 32767.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (int16_t) (in[_info.inOffset[jjj]] * 32767.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
}
else if (_info.outFormat == airtaudio::SINT8) {
signed char *out = (signed char *)_outBuffer;
if (_info.inFormat == airtaudio::SINT8) {
// Channel compensation and/or (de)interleaving only.
signed char *in = (signed char *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
}
in += _info.inJump;
out += _info.outJump;
}
}
if (_info.inFormat == airtaudio::SINT16) {
int16_t *in = (int16_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (signed char) ((in[_info.inOffset[jjj]] >> 8) & 0x00ff);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT24) {
int24_t *in = (int24_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (signed char) (in[_info.inOffset[jjj]].asInt() >> 16);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::SINT32) {
int32_t *in = (int32_t *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (signed char) ((in[_info.inOffset[jjj]] >> 24) & 0x000000ff);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT32) {
float *in = (float *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (signed char) (in[_info.inOffset[jjj]] * 127.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
else if (_info.inFormat == airtaudio::FLOAT64) {
double *in = (double *)_inBuffer;
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
for (jjj=0; jjj<_info.channels; ++jjj) {
out[_info.outOffset[jjj]] = (signed char) (in[_info.inOffset[jjj]] * 127.5 - 0.5);
}
in += _info.inJump;
out += _info.outJump;
}
}
}
}
void airtaudio::Api::byteSwapBuffer(char *_buffer, uint32_t _samples, airtaudio::format _format) {
register char val;
register char *ptr;
ptr = _buffer;
if (_format == airtaudio::SINT16) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 2nd bytes.
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 2 bytes.
ptr += 2;
}
} else if ( _format == airtaudio::SINT32
|| _format == airtaudio::FLOAT32) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 4th bytes.
val = *(ptr);
*(ptr) = *(ptr+3);
*(ptr+3) = val;
// Swap 2nd and 3rd bytes.
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 3 more bytes.
ptr += 3;
}
} else if (_format == airtaudio::SINT24) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 3rd bytes.
val = *(ptr);
*(ptr) = *(ptr+2);
*(ptr+2) = val;
// Increment 2 more bytes.
ptr += 2;
}
} else if (_format == airtaudio::FLOAT64) {
for (uint32_t iii=0; iii<_samples; ++iii) {
// Swap 1st and 8th bytes
val = *(ptr);
*(ptr) = *(ptr+7);
*(ptr+7) = val;
// Swap 2nd and 7th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+5);
*(ptr+5) = val;
// Swap 3rd and 6th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+3);
*(ptr+3) = val;
// Swap 4th and 5th bytes
ptr += 1;
val = *(ptr);
*(ptr) = *(ptr+1);
*(ptr+1) = val;
// Increment 5 more bytes.
ptr += 5;
}
}
}

201
airtaudio/Api.h Normal file
View File

@ -0,0 +1,201 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_API_H__
#define __AIRTAUDIO_API_H__
#include <sstream>
namespace airtaudio {
namespace api {
/**
* @brief Audio API specifier arguments.
*/
enum type {
UNSPECIFIED, /*!< Search for a working compiled API. */
LINUX_ALSA, /*!< The Advanced Linux Sound Architecture API. */
LINUX_PULSE, /*!< The Linux PulseAudio API. */
LINUX_OSS, /*!< The Linux Open Sound System API. */
UNIX_JACK, /*!< The Jack Low-Latency Audio Server API. */
MACOSX_CORE, /*!< Macintosh OS-X Core Audio API. */
WINDOWS_ASIO, /*!< The Steinberg Audio Stream I/O API. */
WINDOWS_DS, /*!< The Microsoft Direct Sound API. */
RTAUDIO_DUMMY /*!< A compilable but non-functional API. */
};
extern const uint32_t MAX_SAMPLE_RATES;
extern const uint32_t SAMPLE_RATES[];
enum {
FAILURE,
SUCCESS
};
enum StreamState {
STREAM_STOPPED,
STREAM_STOPPING,
STREAM_RUNNING,
STREAM_CLOSED = -50
};
enum StreamMode {
OUTPUT,
INPUT,
DUPLEX,
UNINITIALIZED = -75
};
// A protected structure used for buffer conversion.
struct ConvertInfo {
int32_t channels;
int32_t inJump, outJump;
airtaudio::format inFormat, outFormat;
std::vector<int> inOffset;
std::vector<int> outOffset;
};
// A protected structure for audio streams.
class Stream {
public:
uint32_t device[2]; // Playback and record, respectively.
void *apiHandle; // void pointer for API specific stream handle information
airtaudio::api::StreamMode mode; // OUTPUT, INPUT, or DUPLEX.
airtaudio::api::StreamState state; // STOPPED, RUNNING, or CLOSED
char *userBuffer[2]; // Playback and record, respectively.
char *deviceBuffer;
bool doConvertBuffer[2]; // Playback and record, respectively.
bool userInterleaved;
bool deviceInterleaved[2]; // Playback and record, respectively.
bool doByteSwap[2]; // Playback and record, respectively.
uint32_t sampleRate;
uint32_t bufferSize;
uint32_t nBuffers;
uint32_t nUserChannels[2]; // Playback and record, respectively.
uint32_t nDeviceChannels[2]; // Playback and record channels, respectively.
uint32_t channelOffset[2]; // Playback and record, respectively.
uint64_t latency[2]; // Playback and record, respectively.
airtaudio::format userFormat;
airtaudio::format deviceFormat[2]; // Playback and record, respectively.
std::mutex mutex;
airtaudio::CallbackInfo callbackInfo;
airtaudio::api::ConvertInfo convertInfo[2];
double streamTime; // Number of elapsed seconds since the stream started.
#if defined(HAVE_GETTIMEOFDAY)
struct timeval lastTickTimestamp;
#endif
Stream(void) :
apiHandle(0),
deviceBuffer(0) {
device[0] = 11111;
device[1] = 11111;
}
};
};
/**
* RtApi class declaration.
*
* Subclasses of RtApi contain all API- and OS-specific code necessary
* to fully implement the RtAudio API.
*
* Note that RtApi is an abstract base class and cannot be
* explicitly instantiated. The class RtAudio will create an
* instance of an RtApi subclass (RtApiOss, RtApiAlsa,
* RtApiJack, RtApiCore, RtApiDs, or RtApiAsio).
*/
class Api {
public:
Api(void);
virtual ~Api(void);
virtual airtaudio::api::type getCurrentApi(void) = 0;
virtual uint32_t getDeviceCount(void) = 0;
virtual airtaudio::DeviceInfo getDeviceInfo(uint32_t _device) = 0;
virtual uint32_t getDefaultInputDevice(void);
virtual uint32_t getDefaultOutputDevice(void);
void openStream(airtaudio::StreamParameters *_outputParameters,
airtaudio::StreamParameters *_inputParameters,
airtaudio::format _format,
uint32_t _sampleRate,
uint32_t *_bufferFrames,
airtaudio::AirTAudioCallback _callback,
void *_userData,
airtaudio::StreamOptions *_options,
airtaudio::AirTAudioErrorCallback _errorCallback);
virtual void closeStream(void);
virtual void startStream(void) = 0;
virtual void stopStream(void) = 0;
virtual void abortStream(void) = 0;
long getStreamLatency(void);
uint32_t getStreamSampleRate(void);
virtual double getStreamTime(void);
bool isStreamOpen(void) const {
return m_stream.state != airtaudio::api::STREAM_CLOSED;
}
bool isStreamRunning(void) const {
return m_stream.state == airtaudio::api::STREAM_RUNNING;
}
void showWarnings(bool _value) {
m_showWarnings = _value;
}
protected:
std::ostringstream m_errorStream;
std::string m_errorText;
bool m_showWarnings;
airtaudio::api::Stream m_stream;
/*!
Protected, api-specific method that attempts to open a device
with the given parameters. This function MUST be implemented by
all subclasses. If an error is encountered during the probe, a
"warning" message is reported and FAILURE is returned. A
successful probe is indicated by a return value of SUCCESS.
*/
virtual bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
//! A protected function used to increment the stream time.
void tickStreamTime(void);
//! Protected common method to clear an RtApiStream structure.
void clearStreamInfo();
/*!
Protected common method that throws an RtError (type =
INVALID_USE) if a stream is not open.
*/
void verifyStream(void);
//! Protected common error method to allow global control over error handling.
void error(airtaudio::errorType _type);
/**
* @brief Protected method used to perform format, channel number, and/or interleaving
* conversions between the user and device buffers.
*/
void convertBuffer(char *_outBuffer, char *_inBuffer, airtaudio::api::ConvertInfo &_info);
//! Protected common method used to perform byte-swapping on buffers.
void byteSwapBuffer(char *_buffer, uint32_t _samples, airtaudio::format _format);
//! Protected common method that returns the number of bytes for a given format.
uint32_t formatBytes(airtaudio::format _format);
//! Protected common method that sets up the parameters for buffer conversion.
void setConvertInfo(airtaudio::api::StreamMode _mode, uint32_t _firstChannel);
};
};
#endif

46
airtaudio/CallbackInfo.h Normal file
View File

@ -0,0 +1,46 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_CALLBACK_INFO_H__
#define __AIRTAUDIO_CALLBACK_INFO_H__
#include <thread>
namespace airtaudio {
// This global structure type is used to pass callback information
// between the private RtAudio stream structure and global callback
// handling functions.
class CallbackInfo {
public:
void* object; // Used as a "this" pointer.
std::thread* thread;
void* callback;
void* userData;
void* errorCallback;
void* apiInfo; // void pointer for API specific callback information
bool isRunning;
bool doRealtime;
int32_t priority;
// Default constructor.
CallbackInfo(void) :
object(0),
callback(0),
userData(0),
errorCallback(0),
apiInfo(0),
isRunning(false),
doRealtime(false) {
}
};
};
#endif

40
airtaudio/DeviceInfo.h Normal file
View File

@ -0,0 +1,40 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_DEVICE_INFO_H__
#define __AIRTAUDIO_DEVICE_INFO_H__
namespace airtaudio {
/**
* @brief The public device information structure for returning queried values.
*/
class DeviceInfo {
public:
bool probed; //!< true if the device capabilities were successfully probed.
std::string name; //!< Character string device identifier.
uint32_t outputChannels; //!< Maximum output channels supported by device.
uint32_t inputChannels; //!< Maximum input channels supported by device.
uint32_t duplexChannels; //!< Maximum simultaneous input/output channels supported by device.
bool isDefaultOutput; //!< true if this is the default output device.
bool isDefaultInput; //!< true if this is the default input device.
std::vector<uint32_t> sampleRates; //!< Supported sample rates (queried from list of standard rates).
airtaudio::format nativeFormats; //!< Bit mask of supported data formats.
// Default constructor.
DeviceInfo(void) :
probed(false),
outputChannels(0),
inputChannels(0),
duplexChannels(0),
isDefaultOutput(false),
isDefaultInput(false),
nativeFormats(0) {}
};
};
#endif

155
airtaudio/Interface.cpp Normal file
View File

@ -0,0 +1,155 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#include <airtaudio/Interface.h>
#include <iostream>
std::vector<airtaudio::api::type> airtaudio::Interface::getCompiledApi(void) {
std::vector<airtaudio::api::type> apis;
// The order here will control the order of RtAudio's API search in
// the constructor.
#if defined(__UNIX_JACK__)
apis.push_back(airtaudio::api::UNIX_JACK);
#endif
#if defined(__LINUX_ALSA__)
apis.push_back(airtaudio::api::LINUX_ALSA);
#endif
#if defined(__LINUX_PULSE__)
apis.push_back(airtaudio::api::LINUX_PULSE);
#endif
#if defined(__LINUX_OSS__)
apis.push_back(airtaudio::api::LINUX_OSS);
#endif
#if defined(__WINDOWS_ASIO__)
apis.push_back(airtaudio::api::WINDOWS_ASIO);
#endif
#if defined(__WINDOWS_DS__)
apis.push_back(airtaudio::api::WINDOWS_DS);
#endif
#if defined(__MACOSX_CORE__)
apis.push_back(airtaudio::api::MACOSX_CORE);
#endif
#if defined(__AIRTAUDIO_DUMMY__)
apis.push_back(airtaudio::api::RTAUDIO_DUMMY);
#endif
return apis;
}
void airtaudio::Interface::openRtApi(airtaudio::api::type _api) {
if (m_rtapi != NULL) {
delete m_rtapi;
m_rtapi = NULL;
}
#if defined(__UNIX_JACK__)
if (_api == airtaudio::api::UNIX_JACK) {
m_rtapi = new airtaudio::api::Jack();
}
#endif
#if defined(__LINUX_ALSA__)
if (_api == airtaudio::api::LINUX_ALSA) {
m_rtapi = new airtaudio::api::Alsa();
}
#endif
#if defined(__LINUX_PULSE__)
if (_api == airtaudio::api::LINUX_PULSE) {
m_rtapi = new airtaudio::api::Pulse();
}
#endif
#if defined(__LINUX_OSS__)
if (_api == airtaudio::api::LINUX_OSS) {
m_rtapi = new airtaudio::api::Oss();
}
#endif
#if defined(__WINDOWS_ASIO__)
if (_api == airtaudio::api::WINDOWS_ASIO) {
m_rtapi = new airtaudio::api::Asio();
}
#endif
#if defined(__WINDOWS_DS__)
if (_api == airtaudio::api::WINDOWS_DS) {
m_rtapi = new airtaudio::api::Ds();
}
#endif
#if defined(__MACOSX_CORE__)
if (_api == airtaudio::api::MACOSX_CORE) {
m_rtapi = new airtaudio::api::Core();
}
#endif
#if defined(__AIRTAUDIO_DUMMY__)
if (_api == rtaudio::RTAUDIO_DUMMY) {
m_rtapi = new airtaudio::api::Dummy();
}
#endif
}
airtaudio::Interface::Interface(airtaudio::api::type _api) :
m_rtapi(NULL) {
if (_api != airtaudio::api::UNSPECIFIED) {
// Attempt to open the specified API.
openRtApi(_api);
if (m_rtapi != NULL) {
return;
}
// No compiled support for specified API value. Issue a debug
// warning and continue as if no API was specified.
std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
}
// Iterate through the compiled APIs and return as soon as we find
// one with at least one device or we reach the end of the list.
std::vector<airtaudio::api::type> apis = getCompiledApi();
for (uint32_t iii=0; iii<apis.size(); ++iii) {
openRtApi(apis[iii]);
if (m_rtapi->getDeviceCount() != 0) {
break;
}
}
if (m_rtapi != NULL) {
return;
}
// It should not be possible to get here because the preprocessor
// definition __AIRTAUDIO_DUMMY__ is automatically defined if no
// API-specific definitions are passed to the compiler. But just in
// case something weird happens, we'll print out an error message.
// TODO : Set it in error ...
std::cout << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
}
airtaudio::Interface::~Interface(void) {
if (m_rtapi != NULL) {
delete m_rtapi;
m_rtapi = NULL;
}
}
void airtaudio::Interface::openStream(airtaudio::StreamParameters* _outputParameters,
airtaudio::StreamParameters* _inputParameters,
airtaudio::format _format,
uint32_t _sampleRate,
uint32_t* _bufferFrames,
airtaudio::AirTAudioCallback _callback,
void* _userData,
airtaudio::StreamOptions* _options,
airtaudio::AirTAudioErrorCallback _errorCallback)
{
if (m_rtapi == NULL) {
return;
}
return m_rtapi->openStream(_outputParameters,
_inputParameters,
_format,
_sampleRate,
_bufferFrames,
_callback,
_userData,
_options,
_errorCallback);
}

311
airtaudio/Interface.h Normal file
View File

@ -0,0 +1,311 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_RTAUDIO_H__
#define __AIRTAUDIO_RTAUDIO_H__
#include <string>
#include <vector>
#include <airtaudio/base.h>
#include <airtaudio/int24_t.h>
#include <airtaudio/CallbackInfo.h>
#include <airtaudio/Api.h>
#include <airtaudio/api/Alsa.h>
#include <airtaudio/api/Asio.h>
#include <airtaudio/api/Core.h>
#include <airtaudio/api/Ds.h>
#include <airtaudio/api/Dummy.h>
#include <airtaudio/api/Jack.h>
#include <airtaudio/api/Oss.h>
#include <airtaudio/api/Pulse.h>
namespace airtaudio {
/**
* @brief airtaudio::Interface class declaration.
*
* airtaudio::Interface is a "controller" used to select an available audio i/o
* interface. It presents a common API for the user to call but all
* functionality is implemented by the class RtApi and its
* subclasses. RtAudio creates an instance of an RtApi subclass
* based on the user's API choice. If no choice is made, RtAudio
* attempts to make a "logical" API selection.
*/
class Interface {
protected:
airtaudio::Api *m_rtapi;
public:
/**
* @brief A static function to determine the current airtaudio version.
*/
static std::string getVersion(void) {
return airtaudio::VERSION;
}
/**
* @brief A static function to determine the available compiled audio APIs.
*
* The values returned in the std::vector can be compared against
* the enumerated list values. Note that there can be more than one
* API compiled for certain operating systems.
*/
static std::vector<airtaudio::api::type> getCompiledApi(void);
/**
* @brief The class constructor.
*
* The constructor performs minor initialization tasks. No exceptions
* can be thrown.
*
* If no API argument is specified and multiple API support has been
* compiled, the default order of use is JACK, ALSA, OSS (Linux
* systems) and ASIO, DS (Windows systems).
*/
Interface(airtaudio::api::type _api = airtaudio::api::UNSPECIFIED);
/**
* @brief The destructor.
*
* If a stream is running or open, it will be stopped and closed
* automatically.
*/
~Interface(void);
/**
* @return the audio API specifier for the current instance of airtaudio.
*/
airtaudio::api::type getCurrentApi(void) {
if (m_rtapi == NULL) {
return airtaudio::api::UNSPECIFIED;
}
return m_rtapi->getCurrentApi();
}
/**
* @brief A public function that queries for the number of audio devices available.
*
* This function performs a system query of available devices each time it
* is called, thus supporting devices connected \e after instantiation. If
* a system error occurs during processing, a warning will be issued.
*/
uint32_t getDeviceCount(void) {
if (m_rtapi == NULL) {
return 0;
}
return m_rtapi->getDeviceCount();
}
/**
* @brief Any device integer between 0 and getDeviceCount() - 1 is valid.
* If an invalid argument is provided, an RtError (type = INVALID_USE)
* will be thrown. If a device is busy or otherwise unavailable, the
* structure member "probed" will have a value of "false" and all
* other members are undefined. If the specified device is the
* current default input or output device, the corresponding
* "isDefault" member will have a value of "true".
*
* @return An airtaudio::DeviceInfo structure for a specified device number.
*/
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device) {
if (m_rtapi == NULL) {
return airtaudio::DeviceInfo();
}
return m_rtapi->getDeviceInfo(_device);
}
/**
* @brief A function that returns the index of the default output device.
*
* If the underlying audio API does not provide a "default
* device", or if no devices are available, the return value will be
* 0. Note that this is a valid device identifier and it is the
* client's responsibility to verify that a device is available
* before attempting to open a stream.
*/
uint32_t getDefaultOutputDevice(void) {
if (m_rtapi == NULL) {
return 0;
}
return m_rtapi->getDefaultOutputDevice();
}
/**
* @brief A function that returns the index of the default input device.
*
* If the underlying audio API does not provide a "default
* device", or if no devices are available, the return value will be
* 0. Note that this is a valid device identifier and it is the
* client's responsibility to verify that a device is available
* before attempting to open a stream.
*/
uint32_t getDefaultInputDevice(void) {
if (m_rtapi == NULL) {
return 0;
}
return m_rtapi->getDefaultInputDevice();
}
/**
* @brief A public function for opening a stream with the specified parameters.
*
* An RtError (type = SYSTEM_ERROR) is thrown if a stream cannot be
* opened with the specified parameters or an error occurs during
* processing. An RtError (type = INVALID_USE) is thrown if any
* invalid device ID or channel number parameters are specified.
* @param _outputParameters Specifies output stream parameters to use
* when opening a stream, including a device ID, number of channels,
* and starting channel number. For input-only streams, this
* argument should be NULL. The device ID is an index value between
* 0 and getDeviceCount() - 1.
* @param _inputParameters Specifies input stream parameters to use
* when opening a stream, including a device ID, number of channels,
* and starting channel number. For output-only streams, this
* argument should be NULL. The device ID is an index value between
* 0 and getDeviceCount() - 1.
* @param _format An airtaudio::format specifying the desired sample data format.
* @param _sampleRate The desired sample rate (sample frames per second).
* @param *_bufferFrames A pointer to a value indicating the desired
* internal buffer size in sample frames. The actual value
* used by the device is returned via the same pointer. A
* value of zero can be specified, in which case the lowest
* allowable value is determined.
* @param _callback A client-defined function that will be invoked
* when input data is available and/or output data is needed.
* @param _userData An optional pointer to data that can be accessed
* from within the callback function.
* @param _options An optional pointer to a structure containing various
* global stream options, including a list of OR'ed airtaudio::streamFlags
* and a suggested number of stream buffers that can be used to
* control stream latency. More buffers typically result in more
* robust performance, though at a cost of greater latency. If a
* value of zero is specified, a system-specific median value is
* chosen. If the airtaudio_MINIMIZE_LATENCY flag bit is set, the
* lowest allowable value is used. The actual value used is
* returned via the structure argument. The parameter is API dependent.
* @param _errorCallback A client-defined function that will be invoked
* when an error has occured.
*/
void openStream(airtaudio::StreamParameters *_outputParameters,
airtaudio::StreamParameters *_inputParameters,
airtaudio::format _format,
uint32_t _sampleRate,
uint32_t *_bufferFrames,
airtaudio::AirTAudioCallback _callback,
void *_userData = NULL,
airtaudio::StreamOptions *_options = NULL,
airtaudio::AirTAudioErrorCallback _errorCallback = NULL);
/**
* @brief A function that closes a stream and frees any associated stream memory.
*
* If a stream is not open, this function issues a warning and
* returns (no exception is thrown).
*/
void closeStream(void) {
if (m_rtapi == NULL) {
return;
}
return m_rtapi->closeStream();
}
/**
* @brief A function that starts a stream.
*
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* running.
*/
void startStream(void) {
if (m_rtapi == NULL) {
return;
}
return m_rtapi->startStream();
}
/**
* @brief Stop a stream, allowing any samples remaining in the output queue to be played.
*
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* stopped.
*/
void stopStream(void) {
if (m_rtapi == NULL) {
return;
}
return m_rtapi->stopStream();
}
/**
* @brief Stop a stream, discarding any samples remaining in the input/output queue.
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
* during processing. An RtError (type = INVALID_USE) is thrown if a
* stream is not open. A warning is issued if the stream is already
* stopped.
*/
void abortStream(void) {
if (m_rtapi == NULL) {
return;
}
return m_rtapi->abortStream();
}
/**
* @return true if a stream is open and false if not.
*/
bool isStreamOpen(void) const {
if (m_rtapi == NULL) {
return false;
}
return m_rtapi->isStreamOpen();
}
/**
* @return true if the stream is running and false if it is stopped or not open.
*/
bool isStreamRunning(void) const {
if (m_rtapi == NULL) {
return false;
}
return m_rtapi->isStreamRunning();
}
/**
* @brief If a stream is not open, an RtError (type = INVALID_USE) will be thrown.
* @return the number of elapsed seconds since the stream was started.
*/
double getStreamTime(void) {
if (m_rtapi == NULL) {
return 0.0;
}
return m_rtapi->getStreamTime();
}
/**
* @brief The stream latency refers to delay in audio input and/or output
* caused by internal buffering by the audio system and/or hardware.
* For duplex streams, the returned value will represent the sum of
* the input and output latencies. If a stream is not open, an
* RtError (type = INVALID_USE) will be thrown. If the API does not
* report latency, the return value will be zero.
* @return The internal stream latency in sample frames.
*/
long getStreamLatency(void) {
if (m_rtapi == NULL) {
return 0;
}
return m_rtapi->getStreamLatency();
}
/**
* @brief On some systems, the sample rate used may be slightly different
* than that specified in the stream parameters. If a stream is not
* open, an RtError (type = INVALID_USE) will be thrown.
* @return Returns actual sample rate in use by the stream.
*/
uint32_t getStreamSampleRate(void) {
if (m_rtapi == NULL) {
return 0;
}
return m_rtapi->getStreamSampleRate();
}
/**
* @brief Specify whether warning messages should be printed to stderr.
*/
void showWarnings(bool _value = true) {
m_rtapi->showWarnings(_value);
}
protected:
void openRtApi(airtaudio::api::type _api);
};
};
#endif

87
airtaudio/StreamOptions.h Normal file
View File

@ -0,0 +1,87 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_STREAM_OPTION_H__
#define __AIRTAUDIO_STREAM_OPTION_H__
namespace airtaudio {
/**
* @brief The structure for specifying stream options.
*
* The following flags can be OR'ed together to allow a client to
* make changes to the default stream behavior:
*
* - \e RTAUDIO_NONINTERLEAVED: Use non-interleaved buffers (default = interleaved).
* - \e RTAUDIO_MINIMIZE_LATENCY: Attempt to set stream parameters for lowest possible latency.
* - \e RTAUDIO_HOG_DEVICE: Attempt grab device for exclusive use.
* - \e RTAUDIO_SCHEDULE_REALTIME: Attempt to select realtime scheduling for callback thread.
* - \e RTAUDIO_ALSA_USE_DEFAULT: Use the "default" PCM device (ALSA only).
*
* By default, RtAudio streams pass and receive audio data from the
* client in an interleaved format. By passing the
* RTAUDIO_NONINTERLEAVED flag to the openStream() function, audio
* data will instead be presented in non-interleaved buffers. In
* this case, each buffer argument in the RtAudioCallback function
* will point to a single array of data, with \c nFrames samples for
* each channel concatenated back-to-back. For example, the first
* sample of data for the second channel would be located at index \c
* nFrames (assuming the \c buffer pointer was recast to the correct
* data type for the stream).
*
* Certain audio APIs offer a number of parameters that influence the
* I/O latency of a stream. By default, RtAudio will attempt to set
* these parameters internally for robust (glitch-free) performance
* (though some APIs, like Windows Direct Sound, make this difficult).
* By passing the RTAUDIO_MINIMIZE_LATENCY flag to the openStream()
* function, internal stream settings will be influenced in an attempt
* to minimize stream latency, though possibly at the expense of stream
* performance.
*
* If the RTAUDIO_HOG_DEVICE flag is set, RtAudio will attempt to
* open the input and/or output stream device(s) for exclusive use.
* Note that this is not possible with all supported audio APIs.
*
* If the RTAUDIO_SCHEDULE_REALTIME flag is set, RtAudio will attempt
* to select realtime scheduling (round-robin) for the callback thread.
* The \c priority parameter will only be used if the RTAUDIO_SCHEDULE_REALTIME
* flag is set. It defines the thread's realtime priority.
*
* If the RTAUDIO_ALSA_USE_DEFAULT flag is set, RtAudio will attempt to
* open the "default" PCM device when using the ALSA API. Note that this
* will override any specified input or output device id.
*
* The \c numberOfBuffers parameter can be used to control stream
* latency in the Windows DirectSound, Linux OSS, and Linux Alsa APIs
* only. A value of two is usually the smallest allowed. Larger
* numbers can potentially result in more robust stream performance,
* though likely at the cost of stream latency. The value set by the
* user is replaced during execution of the RtAudio::openStream()
* function by the value actually used by the system.
*
* The \c streamName parameter can be used to set the client name
* when using the Jack API. By default, the client name is set to
* RtApiJack. However, if you wish to create multiple instances of
* RtAudio with Jack, each instance must have a unique client name.
*/
class StreamOptions {
public:
airtaudio::streamFlags flags; //!< A bit-mask of stream flags (RTAUDIO_NONINTERLEAVED, RTAUDIO_MINIMIZE_LATENCY, RTAUDIO_HOG_DEVICE, RTAUDIO_ALSA_USE_DEFAULT).
uint32_t numberOfBuffers; //!< Number of stream buffers.
std::string streamName; //!< A stream name (currently used only in Jack).
int32_t priority; //!< Scheduling priority of callback thread (only used with flag RTAUDIO_SCHEDULE_REALTIME).
// Default constructor.
StreamOptions(void) :
flags(0),
numberOfBuffers(0),
priority(0) {}
};
};
#endif

View File

@ -0,0 +1,30 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_STREAM_PARAMETER_H__
#define __AIRTAUDIO_STREAM_PARAMETER_H__
namespace airtaudio {
/**
* @brief The structure for specifying input or ouput stream parameters.
*/
class StreamParameters {
public:
uint32_t deviceId; //!< Device index (0 to getDeviceCount() - 1).
uint32_t nChannels; //!< Number of channels.
uint32_t firstChannel; //!< First channel index on device (default = 0).
// Default constructor.
StreamParameters(void) :
deviceId(0),
nChannels(0),
firstChannel(0) { }
};
};
#endif

1184
airtaudio/api/Alsa.cpp Normal file

File diff suppressed because it is too large Load Diff

47
airtaudio/api/Alsa.h Normal file
View File

@ -0,0 +1,47 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_ALSA_H__) && defined(__LINUX_ALSA__)
#define __AIRTAUDIO_API_ALSA_H__
namespace airtaudio {
namespace api {
class Alsa: public airtaudio::Api {
public:
Alsa();
~Alsa();
airtaudio::api::type getCurrentApi(void) {
return airtaudio::api::LINUX_ALSA;
}
uint32_t getDeviceCount(void);
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
void closeStream(void);
void startStream(void);
void stopStream(void);
void abortStream(void);
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent(void);
private:
std::vector<airtaudio::DeviceInfo> m_devices;
void saveDeviceInfo(void);
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

979
airtaudio/api/Asio.cpp Normal file
View File

@ -0,0 +1,979 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if defined(__WINDOWS_ASIO__) // ASIO API on Windows
#include <airtaudio/Interface.h>
// The ASIO API is designed around a callback scheme, so this
// implementation is similar to that used for OS-X CoreAudio and Linux
// Jack. The primary constraint with ASIO is that it only allows
// access to a single driver at a time. Thus, it is not possible to
// have more than one simultaneous RtAudio stream.
//
// This implementation also requires a number of external ASIO files
// and a few global variables. The ASIO callback scheme does not
// allow for the passing of user data, so we must create a global
// pointer to our callbackInfo structure.
//
// On unix systems, we make use of a pthread condition variable.
// Since there is no equivalent in Windows, I hacked something based
// on information found in
// http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
#include "asiosys.h"
#include "asio.h"
#include "iasiothiscallresolver.h"
#include "asiodrivers.h"
#include <cmath>
static AsioDrivers drivers;
static ASIOCallbacks asioCallbacks;
static ASIODriverInfo driverInfo;
static CallbackInfo *asioCallbackInfo;
static bool asioXRun;
struct AsioHandle {
int32_t drainCounter; // Tracks callback counts when draining
bool internalDrain; // Indicates if stop is initiated from callback or not.
ASIOBufferInfo *bufferInfos;
HANDLE condition;
AsioHandle()
:drainCounter(0), internalDrain(false), bufferInfos(0) {}
};
// Function declarations (definitions at end of section)
static const char* getAsioErrorString(ASIOError result);
static void sampleRateChanged(ASIOSampleRate sRate);
static long asioMessages(long selector, long value, void* message, double* opt);
airtaudio::api::Asio::Asio(void) {
// ASIO cannot run on a multi-threaded appartment. You can call
// CoInitialize beforehand, but it must be for appartment threading
// (in which case, CoInitilialize will return S_FALSE here).
m_coInitialized = false;
HRESULT hr = CoInitialize(NULL);
if (FAILED(hr)) {
m_errorText = "airtaudio::api::Asio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
error(airtaudio::errorWarning);
}
m_coInitialized = true;
drivers.removeCurrentDriver();
driverInfo.asioVersion = 2;
// See note in DirectSound implementation about GetDesktopWindow().
driverInfo.sysRef = GetForegroundWindow();
}
airtaudio::api::Asio::~Asio(void) {
if (m_stream.state != STREAM_CLOSED) closeStream();
if (m_coInitialized) CoUninitialize();
}
uint32_t airtaudio::api::Asio::getDeviceCount(void)
{
return (uint32_t) drivers.asioGetNumDev();
}
rtaudio::DeviceInfo airtaudio::api::Asio::getDeviceInfo(uint32_t device)
{
rtaudio::DeviceInfo info;
info.probed = false;
// Get device ID
uint32_t nDevices = getDeviceCount();
if (nDevices == 0) {
m_errorText = "airtaudio::api::Asio::getDeviceInfo: no devices found!";
error(airtaudio::errorInvalidUse);
return info;
}
if (device >= nDevices) {
m_errorText = "airtaudio::api::Asio::getDeviceInfo: device ID is invalid!";
error(airtaudio::errorInvalidUse);
return info;
}
// If a stream is already open, we cannot probe other devices. Thus, use the saved results.
if (m_stream.state != STREAM_CLOSED) {
if (device >= m_devices.size()) {
m_errorText = "airtaudio::api::Asio::getDeviceInfo: device ID was not present before stream was opened.";
error(airtaudio::errorWarning);
return info;
}
return m_devices[ device ];
}
char driverName[32];
ASIOError result = drivers.asioGetDriverName((int) device, driverName, 32);
if (result != ASE_OK) {
m_errorStream << "airtaudio::api::Asio::getDeviceInfo: unable to get driver name (" << getAsioErrorString(result) << ").";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning);
return info;
}
info.name = driverName;
if (!drivers.loadDriver(driverName)) {
m_errorStream << "airtaudio::api::Asio::getDeviceInfo: unable to load driver (" << driverName << ").";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning);
return info;
}
result = ASIOInit(&driverInfo);
if (result != ASE_OK) {
m_errorStream << "airtaudio::api::Asio::getDeviceInfo: error (" << getAsioErrorString(result) << ") initializing driver (" << driverName << ").";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning);
return info;
}
// Determine the device channel information.
long inputChannels, outputChannels;
result = ASIOGetChannels(&inputChannels, &outputChannels);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
m_errorStream << "airtaudio::api::Asio::getDeviceInfo: error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ").";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning);
return info;
}
info.outputChannels = outputChannels;
info.inputChannels = inputChannels;
if (info.outputChannels > 0 && info.inputChannels > 0)
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
// Determine the supported sample rates.
info.sampleRates.clear();
for (uint32_t i=0; i<MAX_SAMPLE_RATES; i++) {
result = ASIOCanSampleRate((ASIOSampleRate) SAMPLE_RATES[i]);
if (result == ASE_OK)
info.sampleRates.push_back(SAMPLE_RATES[i]);
}
// Determine supported data types ... just check first channel and assume rest are the same.
ASIOChannelInfo channelInfo;
channelInfo.channel = 0;
channelInfo.isInput = true;
if (info.inputChannels <= 0) channelInfo.isInput = false;
result = ASIOGetChannelInfo(&channelInfo);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
m_errorStream << "airtaudio::api::Asio::getDeviceInfo: error (" << getAsioErrorString(result) << ") getting driver channel info (" << driverName << ").";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning);
return info;
}
info.nativeFormats = 0;
if (channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB)
info.nativeFormats |= RTAUDIO_SINT16;
else if (channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB)
info.nativeFormats |= RTAUDIO_SINT32;
else if (channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB)
info.nativeFormats |= RTAUDIO_FLOAT32;
else if (channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB)
info.nativeFormats |= RTAUDIO_FLOAT64;
else if (channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB)
info.nativeFormats |= RTAUDIO_SINT24;
if (info.outputChannels > 0)
if (getDefaultOutputDevice() == device) info.isDefaultOutput = true;
if (info.inputChannels > 0)
if (getDefaultInputDevice() == device) info.isDefaultInput = true;
info.probed = true;
drivers.removeCurrentDriver();
return info;
}
static void bufferSwitch(long index, ASIOBool processNow)
{
RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
object->callbackEvent(index);
}
void airtaudio::api::Asio::saveDeviceInfo(void)
{
m_devices.clear();
uint32_t nDevices = getDeviceCount();
m_devices.resize(nDevices);
for (uint32_t i=0; i<nDevices; i++)
m_devices[i] = getDeviceInfo(i);
}
bool airtaudio::api::Asio::probeDeviceOpen(uint32_t device, StreamMode mode, uint32_t channels,
uint32_t firstChannel, uint32_t sampleRate,
rtaudio::format format, uint32_t *bufferSize,
rtaudio::StreamOptions *options)
{
// For ASIO, a duplex stream MUST use the same driver.
if (mode == INPUT && m_stream.mode == OUTPUT && m_stream.device[0] != device) {
m_errorText = "airtaudio::api::Asio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
return FAILURE;
}
char driverName[32];
ASIOError result = drivers.asioGetDriverName((int) device, driverName, 32);
if (result != ASE_OK) {
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString(result) << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Only load the driver once for duplex stream.
if (mode != INPUT || m_stream.mode != OUTPUT) {
// The getDeviceInfo() function will not work when a stream is open
// because ASIO does not allow multiple devices to run at the same
// time. Thus, we'll probe the system before opening a stream and
// save the results for use by getDeviceInfo().
this->saveDeviceInfo();
if (!drivers.loadDriver(driverName)) {
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: unable to load driver (" << driverName << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
result = ASIOInit(&driverInfo);
if (result != ASE_OK) {
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: error (" << getAsioErrorString(result) << ") initializing driver (" << driverName << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
}
// Check the device channel count.
long inputChannels, outputChannels;
result = ASIOGetChannels(&inputChannels, &outputChannels);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
if ((mode == OUTPUT && (channels+firstChannel) > (uint32_t) outputChannels) ||
(mode == INPUT && (channels+firstChannel) > (uint32_t) inputChannels)) {
drivers.removeCurrentDriver();
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
m_stream.nDeviceChannels[mode] = channels;
m_stream.nUserChannels[mode] = channels;
m_stream.channelOffset[mode] = firstChannel;
// Verify the sample rate is supported.
result = ASIOCanSampleRate((ASIOSampleRate) sampleRate);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Get the current sample rate
ASIOSampleRate currentRate;
result = ASIOGetSampleRate(&currentRate);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Set the sample rate only if necessary
if (currentRate != sampleRate) {
result = ASIOSetSampleRate((ASIOSampleRate) sampleRate);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
}
// Determine the driver data type.
ASIOChannelInfo channelInfo;
channelInfo.channel = 0;
if (mode == OUTPUT) channelInfo.isInput = false;
else channelInfo.isInput = true;
result = ASIOGetChannelInfo(&channelInfo);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting data format.";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Assuming WINDOWS host is always little-endian.
m_stream.doByteSwap[mode] = false;
m_stream.userFormat = format;
m_stream.deviceFormat[mode] = 0;
if (channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB) {
m_stream.deviceFormat[mode] = RTAUDIO_SINT16;
if (channelInfo.type == ASIOSTInt16MSB) m_stream.doByteSwap[mode] = true;
}
else if (channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB) {
m_stream.deviceFormat[mode] = RTAUDIO_SINT32;
if (channelInfo.type == ASIOSTInt32MSB) m_stream.doByteSwap[mode] = true;
}
else if (channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB) {
m_stream.deviceFormat[mode] = RTAUDIO_FLOAT32;
if (channelInfo.type == ASIOSTFloat32MSB) m_stream.doByteSwap[mode] = true;
}
else if (channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB) {
m_stream.deviceFormat[mode] = RTAUDIO_FLOAT64;
if (channelInfo.type == ASIOSTFloat64MSB) m_stream.doByteSwap[mode] = true;
}
else if (channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB) {
m_stream.deviceFormat[mode] = RTAUDIO_SINT24;
if (channelInfo.type == ASIOSTInt24MSB) m_stream.doByteSwap[mode] = true;
}
if (m_stream.deviceFormat[mode] == 0) {
drivers.removeCurrentDriver();
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Set the buffer size. For a duplex stream, this will end up
// setting the buffer size based on the input constraints, which
// should be ok.
long minSize, maxSize, preferSize, granularity;
result = ASIOGetBufferSize(&minSize, &maxSize, &preferSize, &granularity);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting buffer size.";
m_errorText = m_errorStream.str();
return FAILURE;
}
if (*bufferSize < (uint32_t) minSize) *bufferSize = (uint32_t) minSize;
else if (*bufferSize > (uint32_t) maxSize) *bufferSize = (uint32_t) maxSize;
else if (granularity == -1) {
// Make sure bufferSize is a power of two.
int32_t log2_of_min_size = 0;
int32_t log2_of_max_size = 0;
for (uint32_t i = 0; i < sizeof(long) * 8; i++) {
if (minSize & ((long)1 << i)) log2_of_min_size = i;
if (maxSize & ((long)1 << i)) log2_of_max_size = i;
}
long min_delta = std::abs((long)*bufferSize - ((long)1 << log2_of_min_size));
int32_t min_delta_num = log2_of_min_size;
for (int32_t i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
long current_delta = std::abs((long)*bufferSize - ((long)1 << i));
if (current_delta < min_delta) {
min_delta = current_delta;
min_delta_num = i;
}
}
*bufferSize = ((uint32_t)1 << min_delta_num);
if (*bufferSize < (uint32_t) minSize) *bufferSize = (uint32_t) minSize;
else if (*bufferSize > (uint32_t) maxSize) *bufferSize = (uint32_t) maxSize;
}
else if (granularity != 0) {
// Set to an even multiple of granularity, rounding up.
*bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
}
if (mode == INPUT && m_stream.mode == OUTPUT && m_stream.bufferSize != *bufferSize) {
drivers.removeCurrentDriver();
m_errorText = "airtaudio::api::Asio::probeDeviceOpen: input/output buffersize discrepancy!";
return FAILURE;
}
m_stream.bufferSize = *bufferSize;
m_stream.nBuffers = 2;
if (options && options->flags & RTAUDIO_NONINTERLEAVED) m_stream.userInterleaved = false;
else m_stream.userInterleaved = true;
// ASIO always uses non-interleaved buffers.
m_stream.deviceInterleaved[mode] = false;
// Allocate, if necessary, our AsioHandle structure for the stream.
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
if (handle == 0) {
try {
handle = new AsioHandle;
}
catch (std::bad_alloc&) {
//if (handle == NULL) {
drivers.removeCurrentDriver();
m_errorText = "airtaudio::api::Asio::probeDeviceOpen: error allocating AsioHandle memory.";
return FAILURE;
}
handle->bufferInfos = 0;
// Create a manual-reset event.
handle->condition = CreateEvent(NULL, // no security
TRUE, // manual-reset
FALSE, // non-signaled initially
NULL); // unnamed
m_stream.apiHandle = (void *) handle;
}
// Create the ASIO internal buffers. Since RtAudio sets up input
// and output separately, we'll have to dispose of previously
// created output buffers for a duplex stream.
long inputLatency, outputLatency;
if (mode == INPUT && m_stream.mode == OUTPUT) {
ASIODisposeBuffers();
if (handle->bufferInfos) free(handle->bufferInfos);
}
// Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
bool buffersAllocated = false;
uint32_t i, nChannels = m_stream.nDeviceChannels[0] + m_stream.nDeviceChannels[1];
handle->bufferInfos = (ASIOBufferInfo *) malloc(nChannels * sizeof(ASIOBufferInfo));
if (handle->bufferInfos == NULL) {
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
m_errorText = m_errorStream.str();
goto error;
}
ASIOBufferInfo *infos;
infos = handle->bufferInfos;
for (i=0; i<m_stream.nDeviceChannels[0]; i++, infos++) {
infos->isInput = ASIOFalse;
infos->channelNum = i + m_stream.channelOffset[0];
infos->buffers[0] = infos->buffers[1] = 0;
}
for (i=0; i<m_stream.nDeviceChannels[1]; i++, infos++) {
infos->isInput = ASIOTrue;
infos->channelNum = i + m_stream.channelOffset[1];
infos->buffers[0] = infos->buffers[1] = 0;
}
// Set up the ASIO callback structure and create the ASIO data buffers.
asioCallbacks.bufferSwitch = &bufferSwitch;
asioCallbacks.sampleRateDidChange = &sampleRateChanged;
asioCallbacks.asioMessage = &asioMessages;
asioCallbacks.bufferSwitchTimeInfo = NULL;
result = ASIOCreateBuffers(handle->bufferInfos, nChannels, m_stream.bufferSize, &asioCallbacks);
if (result != ASE_OK) {
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString(result) << ") creating buffers.";
m_errorText = m_errorStream.str();
goto error;
}
buffersAllocated = true;
// Set flags for buffer conversion.
m_stream.doConvertBuffer[mode] = false;
if (m_stream.userFormat != m_stream.deviceFormat[mode])
m_stream.doConvertBuffer[mode] = true;
if (m_stream.userInterleaved != m_stream.deviceInterleaved[mode] &&
m_stream.nUserChannels[mode] > 1)
m_stream.doConvertBuffer[mode] = true;
// Allocate necessary internal buffers
uint64_t bufferBytes;
bufferBytes = m_stream.nUserChannels[mode] * *bufferSize * formatBytes(m_stream.userFormat);
m_stream.userBuffer[mode] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[mode] == NULL) {
m_errorText = "airtaudio::api::Asio::probeDeviceOpen: error allocating user buffer memory.";
goto error;
}
if (m_stream.doConvertBuffer[mode]) {
bool makeBuffer = true;
bufferBytes = m_stream.nDeviceChannels[mode] * formatBytes(m_stream.deviceFormat[mode]);
if (mode == INPUT) {
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
if (bufferBytes <= bytesOut) makeBuffer = false;
}
}
if (makeBuffer) {
bufferBytes *= *bufferSize;
if (m_stream.deviceBuffer) free(m_stream.deviceBuffer);
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_stream.deviceBuffer == NULL) {
m_errorText = "airtaudio::api::Asio::probeDeviceOpen: error allocating device buffer memory.";
goto error;
}
}
}
m_stream.sampleRate = sampleRate;
m_stream.device[mode] = device;
m_stream.state = STREAM_STOPPED;
asioCallbackInfo = &m_stream.callbackInfo;
m_stream.callbackInfo.object = (void *) this;
if (m_stream.mode == OUTPUT && mode == INPUT)
// We had already set up an output stream.
m_stream.mode = DUPLEX;
else
m_stream.mode = mode;
// Determine device latencies
result = ASIOGetLatencies(&inputLatency, &outputLatency);
if (result != ASE_OK) {
m_errorStream << "airtaudio::api::Asio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting latency.";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning); // warn but don't fail
}
else {
m_stream.latency[0] = outputLatency;
m_stream.latency[1] = inputLatency;
}
// Setup the buffer conversion information structure. We don't use
// buffers to do channel offsets, so we override that parameter
// here.
if (m_stream.doConvertBuffer[mode]) setConvertInfo(mode, 0);
return SUCCESS;
error:
if (buffersAllocated)
ASIODisposeBuffers();
drivers.removeCurrentDriver();
if (handle) {
CloseHandle(handle->condition);
if (handle->bufferInfos)
free(handle->bufferInfos);
delete handle;
m_stream.apiHandle = 0;
}
for (int32_t i=0; i<2; i++) {
if (m_stream.userBuffer[i]) {
free(m_stream.userBuffer[i]);
m_stream.userBuffer[i] = 0;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = 0;
}
return FAILURE;
}
void airtaudio::api::Asio::closeStream()
{
if (m_stream.state == STREAM_CLOSED) {
m_errorText = "airtaudio::api::Asio::closeStream(): no open stream to close!";
error(airtaudio::errorWarning);
return;
}
if (m_stream.state == STREAM_RUNNING) {
m_stream.state = STREAM_STOPPED;
ASIOStop();
}
ASIODisposeBuffers();
drivers.removeCurrentDriver();
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
if (handle) {
CloseHandle(handle->condition);
if (handle->bufferInfos)
free(handle->bufferInfos);
delete handle;
m_stream.apiHandle = 0;
}
for (int32_t i=0; i<2; i++) {
if (m_stream.userBuffer[i]) {
free(m_stream.userBuffer[i]);
m_stream.userBuffer[i] = 0;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = 0;
}
m_stream.mode = UNINITIALIZED;
m_stream.state = STREAM_CLOSED;
}
bool stopThreadCalled = false;
void airtaudio::api::Asio::startStream()
{
verifyStream();
if (m_stream.state == STREAM_RUNNING) {
m_errorText = "airtaudio::api::Asio::startStream(): the stream is already running!";
error(airtaudio::errorWarning);
return;
}
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
ASIOError result = ASIOStart();
if (result != ASE_OK) {
m_errorStream << "airtaudio::api::Asio::startStream: error (" << getAsioErrorString(result) << ") starting device.";
m_errorText = m_errorStream.str();
goto unlock;
}
handle->drainCounter = 0;
handle->internalDrain = false;
ResetEvent(handle->condition);
m_stream.state = STREAM_RUNNING;
asioXRun = false;
unlock:
stopThreadCalled = false;
if (result == ASE_OK) return;
error(airtaudio::errorSystemError);
}
void airtaudio::api::Asio::stopStream()
{
verifyStream();
if (m_stream.state == STREAM_STOPPED) {
m_errorText = "airtaudio::api::Asio::stopStream(): the stream is already stopped!";
error(airtaudio::errorWarning);
return;
}
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
if (handle->drainCounter == 0) {
handle->drainCounter = 2;
WaitForSingleObject(handle->condition, INFINITE); // block until signaled
}
}
m_stream.state = STREAM_STOPPED;
ASIOError result = ASIOStop();
if (result != ASE_OK) {
m_errorStream << "airtaudio::api::Asio::stopStream: error (" << getAsioErrorString(result) << ") stopping device.";
m_errorText = m_errorStream.str();
}
if (result == ASE_OK) return;
error(airtaudio::errorSystemError);
}
void airtaudio::api::Asio::abortStream()
{
verifyStream();
if (m_stream.state == STREAM_STOPPED) {
m_errorText = "airtaudio::api::Asio::abortStream(): the stream is already stopped!";
error(airtaudio::errorWarning);
return;
}
// The following lines were commented-out because some behavior was
// noted where the device buffers need to be zeroed to avoid
// continuing sound, even when the device buffers are completely
// disposed. So now, calling abort is the same as calling stop.
// AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
// handle->drainCounter = 2;
stopStream();
}
// This function will be called by a spawned thread when the user
// callback function signals that the stream should be stopped or
// aborted. It is necessary to handle it this way because the
// callbackEvent() function must return before the ASIOStop()
// function will return.
static unsigned __stdcall asioStopStream(void *ptr)
{
CallbackInfo *info = (CallbackInfo *) ptr;
RtApiAsio *object = (RtApiAsio *) info->object;
object->stopStream();
_endthreadex(0);
return 0;
}
bool airtaudio::api::Asio::callbackEvent(long bufferIndex)
{
if (m_stream.state == STREAM_STOPPED || m_stream.state == STREAM_STOPPING) return SUCCESS;
if (m_stream.state == STREAM_CLOSED) {
m_errorText = "airtaudio::api::Asio::callbackEvent(): the stream is closed ... this shouldn't happen!";
error(airtaudio::errorWarning);
return FAILURE;
}
CallbackInfo *info = (CallbackInfo *) &m_stream.callbackInfo;
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
// Check if we were draining the stream and signal if finished.
if (handle->drainCounter > 3) {
m_stream.state = STREAM_STOPPING;
if (handle->internalDrain == false)
SetEvent(handle->condition);
else { // spawn a thread to stop the stream
unsigned threadId;
m_stream.callbackInfo.thread = _beginthreadex(NULL, 0, &asioStopStream,
&m_stream.callbackInfo, 0, &threadId);
}
return SUCCESS;
}
// Invoke user callback to get fresh output data UNLESS we are
// draining stream.
if (handle->drainCounter == 0) {
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) info->callback;
double streamTime = getStreamTime();
rtaudio::streamStatus status = 0;
if (m_stream.mode != INPUT && asioXRun == true) {
status |= RTAUDIO_OUTPUT_UNDERFLOW;
asioXRun = false;
}
if (m_stream.mode != OUTPUT && asioXRun == true) {
status |= RTAUDIO_INPUT_OVERFLOW;
asioXRun = false;
}
int32_t cbReturnValue = callback(m_stream.userBuffer[0], m_stream.userBuffer[1],
m_stream.bufferSize, streamTime, status, info->userData);
if (cbReturnValue == 2) {
m_stream.state = STREAM_STOPPING;
handle->drainCounter = 2;
unsigned threadId;
m_stream.callbackInfo.thread = _beginthreadex(NULL, 0, &asioStopStream,
&m_stream.callbackInfo, 0, &threadId);
return SUCCESS;
}
else if (cbReturnValue == 1) {
handle->drainCounter = 1;
handle->internalDrain = true;
}
}
uint32_t nChannels, bufferBytes, i, j;
nChannels = m_stream.nDeviceChannels[0] + m_stream.nDeviceChannels[1];
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[0]);
if (handle->drainCounter > 1) { // write zeros to the output stream
for (i=0, j=0; i<nChannels; i++) {
if (handle->bufferInfos[i].isInput != ASIOTrue)
memset(handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes);
}
}
else if (m_stream.doConvertBuffer[0]) {
convertBuffer(m_stream.deviceBuffer, m_stream.userBuffer[0], m_stream.convertInfo[0]);
if (m_stream.doByteSwap[0])
byteSwapBuffer(m_stream.deviceBuffer,
m_stream.bufferSize * m_stream.nDeviceChannels[0],
m_stream.deviceFormat[0]);
for (i=0, j=0; i<nChannels; i++) {
if (handle->bufferInfos[i].isInput != ASIOTrue)
memcpy(handle->bufferInfos[i].buffers[bufferIndex],
&m_stream.deviceBuffer[j++*bufferBytes], bufferBytes);
}
}
else {
if (m_stream.doByteSwap[0])
byteSwapBuffer(m_stream.userBuffer[0],
m_stream.bufferSize * m_stream.nUserChannels[0],
m_stream.userFormat);
for (i=0, j=0; i<nChannels; i++) {
if (handle->bufferInfos[i].isInput != ASIOTrue)
memcpy(handle->bufferInfos[i].buffers[bufferIndex],
&m_stream.userBuffer[0][bufferBytes*j++], bufferBytes);
}
}
if (handle->drainCounter) {
handle->drainCounter++;
goto unlock;
}
}
if (m_stream.mode == INPUT || m_stream.mode == DUPLEX) {
bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[1]);
if (m_stream.doConvertBuffer[1]) {
// Always interleave ASIO input data.
for (i=0, j=0; i<nChannels; i++) {
if (handle->bufferInfos[i].isInput == ASIOTrue)
memcpy(&m_stream.deviceBuffer[j++*bufferBytes],
handle->bufferInfos[i].buffers[bufferIndex],
bufferBytes);
}
if (m_stream.doByteSwap[1])
byteSwapBuffer(m_stream.deviceBuffer,
m_stream.bufferSize * m_stream.nDeviceChannels[1],
m_stream.deviceFormat[1]);
convertBuffer(m_stream.userBuffer[1], m_stream.deviceBuffer, m_stream.convertInfo[1]);
}
else {
for (i=0, j=0; i<nChannels; i++) {
if (handle->bufferInfos[i].isInput == ASIOTrue) {
memcpy(&m_stream.userBuffer[1][bufferBytes*j++],
handle->bufferInfos[i].buffers[bufferIndex],
bufferBytes);
}
}
if (m_stream.doByteSwap[1])
byteSwapBuffer(m_stream.userBuffer[1],
m_stream.bufferSize * m_stream.nUserChannels[1],
m_stream.userFormat);
}
}
unlock:
// The following call was suggested by Malte Clasen. While the API
// documentation indicates it should not be required, some device
// drivers apparently do not function correctly without it.
ASIOOutputReady();
RtApi::tickStreamTime();
return SUCCESS;
}
static void sampleRateChanged(ASIOSampleRate sRate)
{
// The ASIO documentation says that this usually only happens during
// external sync. Audio processing is not stopped by the driver,
// actual sample rate might not have even changed, maybe only the
// sample rate status of an AES/EBU or S/PDIF digital input at the
// audio device.
RtApi *object = (RtApi *) asioCallbackInfo->object;
try {
object->stopStream();
}
catch (RtError &exception) {
std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
return;
}
std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
}
static long asioMessages(long selector, long value, void* message, double* opt)
{
long ret = 0;
switch(selector) {
case kAsioSelectorSupported:
if (value == kAsioResetRequest
|| value == kAsioEngineVersion
|| value == kAsioResyncRequest
|| value == kAsioLatenciesChanged
// The following three were added for ASIO 2.0, you don't
// necessarily have to support them.
|| value == kAsioSupportsTimeInfo
|| value == kAsioSupportsTimeCode
|| value == kAsioSupportsInputMonitor)
ret = 1L;
break;
case kAsioResetRequest:
// Defer the task and perform the reset of the driver during the
// next "safe" situation. You cannot reset the driver right now,
// as this code is called from the driver. Reset the driver is
// done by completely destruct is. I.e. ASIOStop(),
// ASIODisposeBuffers(), Destruction Afterwards you initialize the
// driver again.
std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
ret = 1L;
break;
case kAsioResyncRequest:
// This informs the application that the driver encountered some
// non-fatal data loss. It is used for synchronization purposes
// of different media. Added mainly to work around the Win16Mutex
// problems in Windows 95/98 with the Windows Multimedia system,
// which could lose data because the Mutex was held too long by
// another thread. However a driver can issue it in other
// situations, too.
// std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
asioXRun = true;
ret = 1L;
break;
case kAsioLatenciesChanged:
// This will inform the host application that the drivers were
// latencies changed. Beware, it this does not mean that the
// buffer sizes have changed! You might need to update internal
// delay data.
std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
ret = 1L;
break;
case kAsioEngineVersion:
// Return the supported ASIO version of the host application. If
// a host application does not implement this selector, ASIO 1.0
// is assumed by the driver.
ret = 2L;
break;
case kAsioSupportsTimeInfo:
// Informs the driver whether the
// asioCallbacks.bufferSwitchTimeInfo() callback is supported.
// For compatibility with ASIO 1.0 drivers the host application
// should always support the "old" bufferSwitch method, too.
ret = 0;
break;
case kAsioSupportsTimeCode:
// Informs the driver whether application is interested in time
// code info. If an application does not need to know about time
// code, the driver has less work to do.
ret = 0;
break;
}
return ret;
}
static const char* getAsioErrorString(ASIOError result)
{
struct Messages
{
ASIOError value;
const char*message;
};
static const Messages m[] =
{
{ ASE_NotPresent, "Hardware input or output is not present or available." },
{ ASE_HWMalfunction, "Hardware is malfunctioning." },
{ ASE_InvalidParameter, "Invalid input parameter." },
{ ASE_InvalidMode, "Invalid mode." },
{ ASE_SPNotAdvancing, "Sample position not advancing." },
{ ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
{ ASE_NoMemory, "Not enough memory to complete the request." }
};
for (uint32_t i = 0; i < sizeof(m)/sizeof(m[0]); ++i)
if (m[i].value == result) return m[i].message;
return "Unknown error.";
}
//******************** End of __WINDOWS_ASIO__ *********************//
#endif

49
airtaudio/api/Asio.h Normal file
View File

@ -0,0 +1,49 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_ASIO_H__) && defined(__WINDOWS_ASIO__)
#define __AIRTAUDIO_API_ASIO_H__
namespace airtaudio {
namespace api {
class Asio: public airtaudio::Api {
public:
Asio();
~Asio();
airtaudio::api::type getCurrentApi(void) {
return airtaudio::WINDOWS_ASIO;
}
uint32_t getDeviceCount(void);
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
void closeStream(void);
void startStream(void);
void stopStream(void);
void abortStream(void);
long getStreamLatency(void);
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(long _bufferIndex);
private:
std::vector<airtaudio::DeviceInfo> m_devices;
void saveDeviceInfo(void);
bool m_coInitialized;
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

1405
airtaudio/api/Core.cpp Normal file

File diff suppressed because it is too large Load Diff

54
airtaudio/api/Core.h Normal file
View File

@ -0,0 +1,54 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_CORE_H__) && defined(__MACOSX_CORE__)
#define __AIRTAUDIO_API_CORE_H__
#include <CoreAudio/AudioHardware.h>
namespace airtaudio {
namespace api {
class Core: public airtaudio::Api {
public:
Core();
~Core();
airtaudio::api::type getCurrentApi(void) {
return airtaudio::api::MACOSX_CORE;
}
uint32_t getDeviceCount(void);
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
uint32_t getDefaultOutputDevice(void);
uint32_t getDefaultInputDevice(void);
void closeStream(void);
void startStream(void);
void stopStream(void);
void abortStream(void);
long getStreamLatency(void);
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(AudioDeviceID _deviceId,
const AudioBufferList *_inBufferList,
const AudioBufferList *_outBufferList);
private:
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
static const char* getErrorCode(OSStatus _code);
};
};
};
#endif

1621
airtaudio/api/Ds.cpp Normal file

File diff suppressed because it is too large Load Diff

52
airtaudio/api/Ds.h Normal file
View File

@ -0,0 +1,52 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_DS_H__) && defined(__WINDOWS_DS__)
#define __AIRTAUDIO_API_DS_H__
namespace airtaudio {
namespace api {
class Ds: public airtaudio::Api {
public:
Ds(void);
~Ds(void);
airtaudio::api::type getCurrentApi(void) {
return airtaudio::api::WINDOWS_DS;
}
uint32_t getDeviceCount(void);
uint32_t getDefaultOutputDevice(void);
uint32_t getDefaultInputDevice(void);
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
void closeStream(void);
void startStream(void);
void stopStream(void);
void abortStream(void);
long getStreamLatency(void);
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent(void);
private:
bool m_coInitialized;
bool m_buffersRolling;
long m_duplexPrerollBytes;
std::vector<struct DsDevice> dsDevices;
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

54
airtaudio/api/Dummy.cpp Normal file
View File

@ -0,0 +1,54 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if defined(__AIRTAUDIO_DUMMY__)
airtaudio::api::Dummy::Dummy(void) {
m_errorText = "airtaudio::api::Dummy: This class provides no functionality.";
error(airtaudio::errorWarning);
}
uint32_t airtaudio::api::Dummy::getDeviceCount(void) {
return 0;
}
rtaudio::DeviceInfo airtaudio::api::Dummy::getDeviceInfo(uint32_t _device) {
(void)_device;
rtaudio::DeviceInfo info;
return info;
}
void airtaudio::api::Dummy::closeStream(void) {
}
void airtaudio::api::Dummy::startStream(void) {
}
void airtaudio::api::Dummy::stopStream(void) {
}
void airtaudio::api::Dummy::abortStream(void) {
}
bool airtaudio::api::Dummy::probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options) {
return false;
}
#endif

41
airtaudio/api/Dummy.h Normal file
View File

@ -0,0 +1,41 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_DUMMY_H__) && defined(__AIRTAUDIO_DUMMY__)
#define __AIRTAUDIO_API_DUMMY_H__
#include <airtaudio/Interface.h>
namespace airtaudio {
namespace api {
class Dummy: public airtaudio::Api {
public:
Dummy(void);
airtaudio::api::type getCurrentApi(void) {
return airtaudio::api::RTAUDIO_DUMMY;
}
uint32_t getDeviceCount(void);
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
void closeStream(void);
void startStream(void);
void stopStream(void);
void abortStream(void);
private:
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

792
airtaudio/api/Jack.cpp Normal file
View File

@ -0,0 +1,792 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if defined(__UNIX_JACK__)
#include <unistd.h>
#include <limits.h>
#include <iostream>
#include <airtaudio/Interface.h>
#include <string.h>
// JACK is a low-latency audio server, originally written for the
// GNU/Linux operating system and now also ported to OS-X. It can
// connect a number of different applications to an audio device, as
// well as allowing them to share audio between themselves.
//
// When using JACK with RtAudio, "devices" refer to JACK clients that
// have ports connected to the server. The JACK server is typically
// started in a terminal as follows:
//
// .jackd -d alsa -d hw:0
//
// or through an interface program such as qjackctl. Many of the
// parameters normally set for a stream are fixed by the JACK server
// and can be specified when the JACK server is started. In
// particular,
//
// .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
//
// specifies a sample rate of 44100 Hz, a buffer size of 512 sample
// frames, and number of buffers = 4. Once the server is running, it
// is not possible to override these values. If the values are not
// specified in the command-line, the JACK server uses default values.
//
// The JACK server does not have to be running when an instance of
// RtApiJack is created, though the function getDeviceCount() will
// report 0 devices found until JACK has been started. When no
// devices are available (i.e., the JACK server is not running), a
// stream cannot be opened.
#include <jack/jack.h>
#include <unistd.h>
#include <cstdio>
// A structure to hold various information related to the Jack API
// implementation.
struct JackHandle {
jack_client_t *client;
jack_port_t **ports[2];
std::string deviceName[2];
bool xrun[2];
std::condition_variable condition;
int32_t drainCounter; // Tracks callback counts when draining
bool internalDrain; // Indicates if stop is initiated from callback or not.
JackHandle(void) :
client(0),
drainCounter(0),
internalDrain(false) {
ports[0] = 0;
ports[1] = 0;
xrun[0] = false;
xrun[1] = false;
}
};
static void jackSilentError(const char *) {};
airtaudio::api::Jack::Jack(void) {
// Nothing to do here.
#if !defined(__RTAUDIO_DEBUG__)
// Turn off Jack's internal error reporting.
jack_set_error_function(&jackSilentError);
#endif
}
airtaudio::api::Jack::~Jack(void) {
if (m_stream.state != STREAM_CLOSED) {
closeStream();
}
}
uint32_t airtaudio::api::Jack::getDeviceCount(void) {
// See if we can become a jack client.
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = NULL;
jack_client_t *client = jack_client_open("RtApiJackCount", options, status);
if (client == NULL) {
return 0;
}
const char **ports;
std::string port, previousPort;
uint32_t nChannels = 0, nDevices = 0;
ports = jack_get_ports(client, NULL, NULL, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nChannels ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon + 1);
if (port != previousPort) {
nDevices++;
previousPort = port;
}
}
} while (ports[++nChannels]);
free(ports);
}
jack_client_close(client);
return nDevices;
}
airtaudio::DeviceInfo airtaudio::api::Jack::getDeviceInfo(uint32_t _device)
{
airtaudio::DeviceInfo info;
info.probed = false;
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption
jack_status_t *status = NULL;
jack_client_t *client = jack_client_open("RtApiJackInfo", options, status);
if (client == NULL) {
m_errorText = "airtaudio::api::Jack::getDeviceInfo: Jack server not found or connection error!";
error(airtaudio::errorWarning);
return info;
}
const char **ports;
std::string port, previousPort;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, NULL, NULL, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (port != previousPort) {
if (nDevices == _device) {
info.name = port;
}
nDevices++;
previousPort = port;
}
}
} while (ports[++nPorts]);
free(ports);
}
if (_device >= nDevices) {
jack_client_close(client);
m_errorText = "airtaudio::api::Jack::getDeviceInfo: device ID is invalid!";
error(airtaudio::errorInvalidUse);
return info;
}
// Get the current jack server sample rate.
info.sampleRates.clear();
info.sampleRates.push_back(jack_get_sample_rate(client));
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
ports = jack_get_ports(client, info.name.c_str(), NULL, JackPortIsInput);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
info.outputChannels = nChannels;
}
// Jack "output ports" equal RtAudio input channels.
nChannels = 0;
ports = jack_get_ports(client, info.name.c_str(), NULL, JackPortIsOutput);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
info.inputChannels = nChannels;
}
if (info.outputChannels == 0 && info.inputChannels == 0) {
jack_client_close(client);
m_errorText = "airtaudio::api::Jack::getDeviceInfo: error determining Jack input/output channels!";
error(airtaudio::errorWarning);
return info;
}
// If device opens for both playback and capture, we determine the channels.
if (info.outputChannels > 0 && info.inputChannels > 0) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
// Jack always uses 32-bit floats.
info.nativeFormats = airtaudio::FLOAT32;
// Jack doesn't provide default devices so we'll use the first available one.
if ( _device == 0
&& info.outputChannels > 0) {
info.isDefaultOutput = true;
}
if ( _device == 0
&& info.inputChannels > 0) {
info.isDefaultInput = true;
}
jack_client_close(client);
info.probed = true;
return info;
}
static int32_t jackCallbackHandler(jack_nframes_t _nframes, void *_infoPointer) {
airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_infoPointer;
airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object;
if (object->callbackEvent((uint64_t)_nframes) == false) {
return 1;
}
return 0;
}
// This function will be called by a spawned thread when the Jack
// server signals that it is shutting down. It is necessary to handle
// it this way because the jackShutdown() function must return before
// the jack_deactivate() function (in closeStream()) will return.
static void *jackCloseStream(void *_ptr) {
airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_ptr;
airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object;
object->closeStream();
pthread_exit(NULL);
}
static void jackShutdown(void* _infoPointer) {
airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_infoPointer;
airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object;
// Check current stream state. If stopped, then we'll assume this
// was called as a result of a call to airtaudio::api::Jack::stopStream (the
// deactivation of a client handle causes this function to be called).
// If not, we'll assume the Jack server is shutting down or some
// other problem occurred and we should close the stream.
if (object->isStreamRunning() == false) {
return;
}
pthread_t threadId;
pthread_create(&threadId, NULL, jackCloseStream, info);
std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
}
static int32_t jackXrun(void* _infoPointer) {
JackHandle* handle = (JackHandle*)_infoPointer;
if (handle->ports[0]) {
handle->xrun[0] = true;
}
if (handle->ports[1]) {
handle->xrun[1] = true;
}
return 0;
}
bool airtaudio::api::Jack::probeDeviceOpen(uint32_t device,
airtaudio::api::StreamMode mode,
uint32_t channels,
uint32_t firstChannel,
uint32_t sampleRate,
airtaudio::format format,
uint32_t *bufferSize,
airtaudio::StreamOptions *options) {
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
// Look for jack server and try to become a client (only do once per stream).
jack_client_t *client = 0;
if (mode == OUTPUT || (mode == INPUT && m_stream.mode != OUTPUT)) {
jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = NULL;
if (options && !options->streamName.empty()) {
client = jack_client_open(options->streamName.c_str(), jackoptions, status);
} else {
client = jack_client_open("RtApiJack", jackoptions, status);
}
if (client == 0) {
m_errorText = "airtaudio::api::Jack::probeDeviceOpen: Jack server not found or connection error!";
error(airtaudio::errorWarning);
return FAILURE;
}
}
else {
// The handle must have been created on an earlier pass.
client = handle->client;
}
const char **ports;
std::string port, previousPort, deviceName;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, NULL, NULL, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (port != previousPort) {
if (nDevices == device) deviceName = port;
nDevices++;
previousPort = port;
}
}
} while (ports[++nPorts]);
free(ports);
}
if (device >= nDevices) {
m_errorText = "airtaudio::api::Jack::probeDeviceOpen: device ID is invalid!";
return FAILURE;
}
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
uint64_t flag = JackPortIsInput;
if (mode == INPUT) flag = JackPortIsOutput;
ports = jack_get_ports(client, deviceName.c_str(), NULL, flag);
if (ports) {
while (ports[ nChannels ]) nChannels++;
free(ports);
}
// Compare the jack ports for specified client to the requested number of channels.
if (nChannels < (channels + firstChannel)) {
m_errorStream << "airtaudio::api::Jack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Check the jack server sample rate.
uint32_t jackRate = jack_get_sample_rate(client);
if (sampleRate != jackRate) {
jack_client_close(client);
m_errorStream << "airtaudio::api::Jack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
m_stream.sampleRate = jackRate;
// Get the latency of the JACK port.
ports = jack_get_ports(client, deviceName.c_str(), NULL, flag);
if (ports[ firstChannel ]) {
// Added by Ge Wang
jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
// the range (usually the min and max are equal)
jack_latency_range_t latrange; latrange.min = latrange.max = 0;
// get the latency range
jack_port_get_latency_range(jack_port_by_name(client, ports[firstChannel]), cbmode, &latrange);
// be optimistic, use the min!
m_stream.latency[mode] = latrange.min;
//m_stream.latency[mode] = jack_port_get_latency(jack_port_by_name(client, ports[ firstChannel ]));
}
free(ports);
// The jack server always uses 32-bit floating-point data.
m_stream.deviceFormat[mode] = FLOAT32;
m_stream.userFormat = format;
if (options && options->flags & NONINTERLEAVED) m_stream.userInterleaved = false;
else m_stream.userInterleaved = true;
// Jack always uses non-interleaved buffers.
m_stream.deviceInterleaved[mode] = false;
// Jack always provides host byte-ordered data.
m_stream.doByteSwap[mode] = false;
// Get the buffer size. The buffer size and number of buffers
// (periods) is set when the jack server is started.
m_stream.bufferSize = (int) jack_get_buffer_size(client);
*bufferSize = m_stream.bufferSize;
m_stream.nDeviceChannels[mode] = channels;
m_stream.nUserChannels[mode] = channels;
// Set flags for buffer conversion.
m_stream.doConvertBuffer[mode] = false;
if (m_stream.userFormat != m_stream.deviceFormat[mode])
m_stream.doConvertBuffer[mode] = true;
if (m_stream.userInterleaved != m_stream.deviceInterleaved[mode] &&
m_stream.nUserChannels[mode] > 1)
m_stream.doConvertBuffer[mode] = true;
// Allocate our JackHandle structure for the stream.
if (handle == 0) {
try {
handle = new JackHandle;
}
catch (std::bad_alloc&) {
m_errorText = "airtaudio::api::Jack::probeDeviceOpen: error allocating JackHandle memory.";
goto error;
}
m_stream.apiHandle = (void *) handle;
handle->client = client;
}
handle->deviceName[mode] = deviceName;
// Allocate necessary internal buffers.
uint64_t bufferBytes;
bufferBytes = m_stream.nUserChannels[mode] * *bufferSize * formatBytes(m_stream.userFormat);
m_stream.userBuffer[mode] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[mode] == NULL) {
m_errorText = "airtaudio::api::Jack::probeDeviceOpen: error allocating user buffer memory.";
goto error;
}
if (m_stream.doConvertBuffer[mode]) {
bool makeBuffer = true;
if (mode == OUTPUT)
bufferBytes = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
else { // mode == INPUT
bufferBytes = m_stream.nDeviceChannels[1] * formatBytes(m_stream.deviceFormat[1]);
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
if (bufferBytes < bytesOut) makeBuffer = false;
}
}
if (makeBuffer) {
bufferBytes *= *bufferSize;
if (m_stream.deviceBuffer) free(m_stream.deviceBuffer);
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_stream.deviceBuffer == NULL) {
m_errorText = "airtaudio::api::Jack::probeDeviceOpen: error allocating device buffer memory.";
goto error;
}
}
}
// Allocate memory for the Jack ports (channels) identifiers.
handle->ports[mode] = (jack_port_t **) malloc (sizeof (jack_port_t *) * channels);
if (handle->ports[mode] == NULL) {
m_errorText = "airtaudio::api::Jack::probeDeviceOpen: error allocating port memory.";
goto error;
}
m_stream.device[mode] = device;
m_stream.channelOffset[mode] = firstChannel;
m_stream.state = STREAM_STOPPED;
m_stream.callbackInfo.object = (void *) this;
if (m_stream.mode == OUTPUT && mode == INPUT)
// We had already set up the stream for output.
m_stream.mode = DUPLEX;
else {
m_stream.mode = mode;
jack_set_process_callback(handle->client, jackCallbackHandler, (void *) &m_stream.callbackInfo);
jack_set_xrun_callback(handle->client, jackXrun, (void *) &handle);
jack_on_shutdown(handle->client, jackShutdown, (void *) &m_stream.callbackInfo);
}
// Register our ports.
char label[64];
if (mode == OUTPUT) {
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
snprintf(label, 64, "outport %d", i);
handle->ports[0][i] = jack_port_register(handle->client,
(const char *)label,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput,
0);
}
} else {
for (uint32_t i=0; i<m_stream.nUserChannels[1]; i++) {
snprintf(label, 64, "inport %d", i);
handle->ports[1][i] = jack_port_register(handle->client,
(const char *)label,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput,
0);
}
}
// Setup the buffer conversion information structure. We don't use
// buffers to do channel offsets, so we override that parameter
// here.
if (m_stream.doConvertBuffer[mode]) {
setConvertInfo(mode, 0);
}
return SUCCESS;
error:
if (handle) {
jack_client_close(handle->client);
if (handle->ports[0]) {
free(handle->ports[0]);
}
if (handle->ports[1]) {
free(handle->ports[1]);
}
delete handle;
m_stream.apiHandle = 0;
}
for (int32_t iii=0; iii<2; ++iii) {
if (m_stream.userBuffer[iii]) {
free(m_stream.userBuffer[iii]);
m_stream.userBuffer[iii] = 0;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = 0;
}
return FAILURE;
}
void airtaudio::api::Jack::closeStream(void)
{
if (m_stream.state == STREAM_CLOSED) {
m_errorText = "airtaudio::api::Jack::closeStream(): no open stream to close!";
error(airtaudio::errorWarning);
return;
}
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
if (handle) {
if (m_stream.state == STREAM_RUNNING)
jack_deactivate(handle->client);
jack_client_close(handle->client);
}
if (handle) {
if (handle->ports[0]) {
free(handle->ports[0]);
}
if (handle->ports[1]) {
free(handle->ports[1]);
}
delete handle;
m_stream.apiHandle = 0;
}
for (int32_t i=0; i<2; i++) {
if (m_stream.userBuffer[i]) {
free(m_stream.userBuffer[i]);
m_stream.userBuffer[i] = 0;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = 0;
}
m_stream.mode = UNINITIALIZED;
m_stream.state = STREAM_CLOSED;
}
void airtaudio::api::Jack::startStream(void)
{
verifyStream();
if (m_stream.state == STREAM_RUNNING) {
m_errorText = "airtaudio::api::Jack::startStream(): the stream is already running!";
error(airtaudio::errorWarning);
return;
}
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
int32_t result = jack_activate(handle->client);
if (result) {
m_errorText = "airtaudio::api::Jack::startStream(): unable to activate JACK client!";
goto unlock;
}
const char **ports;
// Get the list of available ports.
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
result = 1;
ports = jack_get_ports(handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
if (ports == NULL) {
m_errorText = "airtaudio::api::Jack::startStream(): error determining available JACK input ports!";
goto unlock;
}
// Now make the port connections. Since RtAudio wasn't designed to
// allow the user to select particular channels of a device, we'll
// just open the first "nChannels" ports with offset.
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
result = 1;
if (ports[ m_stream.channelOffset[0] + i ])
result = jack_connect(handle->client, jack_port_name(handle->ports[0][i]), ports[ m_stream.channelOffset[0] + i ]);
if (result) {
free(ports);
m_errorText = "airtaudio::api::Jack::startStream(): error connecting output ports!";
goto unlock;
}
}
free(ports);
}
if (m_stream.mode == INPUT || m_stream.mode == DUPLEX) {
result = 1;
ports = jack_get_ports(handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput);
if (ports == NULL) {
m_errorText = "airtaudio::api::Jack::startStream(): error determining available JACK output ports!";
goto unlock;
}
// Now make the port connections. See note above.
for (uint32_t i=0; i<m_stream.nUserChannels[1]; i++) {
result = 1;
if (ports[ m_stream.channelOffset[1] + i ])
result = jack_connect(handle->client, ports[ m_stream.channelOffset[1] + i ], jack_port_name(handle->ports[1][i]));
if (result) {
free(ports);
m_errorText = "airtaudio::api::Jack::startStream(): error connecting input ports!";
goto unlock;
}
}
free(ports);
}
handle->drainCounter = 0;
handle->internalDrain = false;
m_stream.state = STREAM_RUNNING;
unlock:
if (result == 0) return;
error(airtaudio::errorSystemError);
}
void airtaudio::api::Jack::stopStream(void) {
verifyStream();
if (m_stream.state == STREAM_STOPPED) {
m_errorText = "airtaudio::api::Jack::stopStream(): the stream is already stopped!";
error(airtaudio::errorWarning);
return;
}
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
if ( m_stream.mode == OUTPUT
|| m_stream.mode == DUPLEX) {
if (handle->drainCounter == 0) {
handle->drainCounter = 2;
std::unique_lock<std::mutex> lck(m_stream.mutex);
handle->condition.wait(lck);
}
}
jack_deactivate(handle->client);
m_stream.state = STREAM_STOPPED;
}
void airtaudio::api::Jack::abortStream(void)
{
verifyStream();
if (m_stream.state == STREAM_STOPPED) {
m_errorText = "airtaudio::api::Jack::abortStream(): the stream is already stopped!";
error(airtaudio::errorWarning);
return;
}
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
handle->drainCounter = 2;
stopStream();
}
// This function will be called by a spawned thread when the user
// callback function signals that the stream should be stopped or
// aborted. It is necessary to handle it this way because the
// callbackEvent() function must return before the jack_deactivate()
// function will return.
static void jackStopStream(void *_ptr) {
airtaudio::CallbackInfo *info = (airtaudio::CallbackInfo *) _ptr;
airtaudio::api::Jack *object = (airtaudio::api::Jack *) info->object;
object->stopStream();
}
bool airtaudio::api::Jack::callbackEvent(uint64_t nframes)
{
if (m_stream.state == STREAM_STOPPED || m_stream.state == STREAM_STOPPING) return SUCCESS;
if (m_stream.state == STREAM_CLOSED) {
m_errorText = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
error(airtaudio::errorWarning);
return FAILURE;
}
if (m_stream.bufferSize != nframes) {
m_errorText = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
error(airtaudio::errorWarning);
return FAILURE;
}
CallbackInfo *info = (CallbackInfo *) &m_stream.callbackInfo;
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
// Check if we were draining the stream and signal is finished.
if (handle->drainCounter > 3) {
m_stream.state = STREAM_STOPPING;
if (handle->internalDrain == true) {
new std::thread(jackStopStream, info);
} else {
handle->condition.notify_one();
}
return SUCCESS;
}
// Invoke user callback first, to get fresh output data.
if (handle->drainCounter == 0) {
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) info->callback;
double streamTime = getStreamTime();
airtaudio::streamStatus status = 0;
if (m_stream.mode != INPUT && handle->xrun[0] == true) {
status |= OUTPUT_UNDERFLOW;
handle->xrun[0] = false;
}
if (m_stream.mode != OUTPUT && handle->xrun[1] == true) {
status |= INPUT_OVERFLOW;
handle->xrun[1] = false;
}
int32_t cbReturnValue = callback(m_stream.userBuffer[0], m_stream.userBuffer[1],
m_stream.bufferSize, streamTime, status, info->userData);
if (cbReturnValue == 2) {
m_stream.state = STREAM_STOPPING;
handle->drainCounter = 2;
new std::thread(jackStopStream, info);
return SUCCESS;
}
else if (cbReturnValue == 1) {
handle->drainCounter = 1;
handle->internalDrain = true;
}
}
jack_default_audio_sample_t *jackbuffer;
uint64_t bufferBytes = nframes * sizeof(jack_default_audio_sample_t);
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
if (handle->drainCounter > 1) { // write zeros to the output stream
for (uint32_t i=0; i<m_stream.nDeviceChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) nframes);
memset(jackbuffer, 0, bufferBytes);
}
}
else if (m_stream.doConvertBuffer[0]) {
convertBuffer(m_stream.deviceBuffer, m_stream.userBuffer[0], m_stream.convertInfo[0]);
for (uint32_t i=0; i<m_stream.nDeviceChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) nframes);
memcpy(jackbuffer, &m_stream.deviceBuffer[i*bufferBytes], bufferBytes);
}
}
else { // no buffer conversion
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) nframes);
memcpy(jackbuffer, &m_stream.userBuffer[0][i*bufferBytes], bufferBytes);
}
}
if (handle->drainCounter) {
handle->drainCounter++;
goto unlock;
}
}
if ( m_stream.mode == INPUT
|| m_stream.mode == DUPLEX) {
if (m_stream.doConvertBuffer[1]) {
for (uint32_t i=0; i<m_stream.nDeviceChannels[1]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[1][i], (jack_nframes_t) nframes);
memcpy(&m_stream.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes);
}
convertBuffer(m_stream.userBuffer[1], m_stream.deviceBuffer, m_stream.convertInfo[1]);
} else {
// no buffer conversion
for (uint32_t i=0; i<m_stream.nUserChannels[1]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[1][i], (jack_nframes_t) nframes);
memcpy(&m_stream.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes);
}
}
}
unlock:
airtaudio::Api::tickStreamTime();
return SUCCESS;
}
//******************** End of __UNIX_JACK__ *********************//
#endif

46
airtaudio/api/Jack.h Normal file
View File

@ -0,0 +1,46 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_JACK_H__) && defined(__UNIX_JACK__)
#define __AIRTAUDIO_API_JACK_H__
namespace airtaudio {
namespace api {
class Jack: public airtaudio::Api {
public:
Jack(void);
~Jack(void);
airtaudio::api::type getCurrentApi(void) {
return airtaudio::api::UNIX_JACK;
}
uint32_t getDeviceCount(void);
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
void closeStream(void);
void startStream(void);
void stopStream(void);
void abortStream(void);
long getStreamLatency(void);
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(uint64_t _nframes);
private:
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

945
airtaudio/api/Oss.cpp Normal file
View File

@ -0,0 +1,945 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if defined(__LINUX_OSS__)
#include <airtaudio/Interface.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <fcntl.h>
#include "soundcard.h"
#include <errno.h>
#include <math.h>
static void *ossCallbackHandler(void * ptr);
// A structure to hold various information related to the OSS API
// implementation.
struct OssHandle {
int32_t id[2]; // device ids
bool xrun[2];
bool triggered;
pthread_cond_t runnable;
OssHandle()
:triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
};
airtaudio::api::Oss::Oss(void) {
// Nothing to do here.
}
airtaudio::api::Oss::~Oss(void) {
if (m_stream.state != STREAM_CLOSED) {
closeStream();
}
}
uint32_t airtaudio::api::Oss::getDeviceCount(void)
{
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
m_errorText = "airtaudio::api::Oss::getDeviceCount: error opening '/dev/mixer'.";
error(airtaudio::errorWarning);
return 0;
}
oss_sysinfo sysinfo;
if (ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo) == -1) {
close(mixerfd);
m_errorText = "airtaudio::api::Oss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
error(airtaudio::errorWarning);
return 0;
}
close(mixerfd);
return sysinfo.numaudios;
}
rtaudio::DeviceInfo airtaudio::api::Oss::getDeviceInfo(uint32_t device)
{
rtaudio::DeviceInfo info;
info.probed = false;
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
m_errorText = "airtaudio::api::Oss::getDeviceInfo: error opening '/dev/mixer'.";
error(airtaudio::errorWarning);
return info;
}
oss_sysinfo sysinfo;
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
if (result == -1) {
close(mixerfd);
m_errorText = "airtaudio::api::Oss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
error(airtaudio::errorWarning);
return info;
}
unsigned nDevices = sysinfo.numaudios;
if (nDevices == 0) {
close(mixerfd);
m_errorText = "airtaudio::api::Oss::getDeviceInfo: no devices found!";
error(airtaudio::errorInvalidUse);
return info;
}
if (device >= nDevices) {
close(mixerfd);
m_errorText = "airtaudio::api::Oss::getDeviceInfo: device ID is invalid!";
error(airtaudio::errorInvalidUse);
return info;
}
oss_audioinfo ainfo;
ainfo.dev = device;
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
close(mixerfd);
if (result == -1) {
m_errorStream << "airtaudio::api::Oss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning);
return info;
}
// Probe channels
if (ainfo.caps & PCM_CAP_OUTPUT) info.outputChannels = ainfo.max_channels;
if (ainfo.caps & PCM_CAP_INPUT) info.inputChannels = ainfo.max_channels;
if (ainfo.caps & PCM_CAP_DUPLEX) {
if (info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX)
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
// Probe data formats ... do for input
uint64_t mask = ainfo.iformats;
if (mask & AFMT_S16_LE || mask & AFMT_S16_BE)
info.nativeFormats |= RTAUDIO_SINT16;
if (mask & AFMT_S8)
info.nativeFormats |= RTAUDIO_SINT8;
if (mask & AFMT_S32_LE || mask & AFMT_S32_BE)
info.nativeFormats |= RTAUDIO_SINT32;
if (mask & AFMT_FLOAT)
info.nativeFormats |= RTAUDIO_FLOAT32;
if (mask & AFMT_S24_LE || mask & AFMT_S24_BE)
info.nativeFormats |= RTAUDIO_SINT24;
// Check that we have at least one supported format
if (info.nativeFormats == 0) {
m_errorStream << "airtaudio::api::Oss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning);
return info;
}
// Probe the supported sample rates.
info.sampleRates.clear();
if (ainfo.nrates) {
for (uint32_t i=0; i<ainfo.nrates; i++) {
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
if (ainfo.rates[i] == SAMPLE_RATES[k]) {
info.sampleRates.push_back(SAMPLE_RATES[k]);
break;
}
}
}
}
else {
// Check min and max rate values;
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
if (ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k])
info.sampleRates.push_back(SAMPLE_RATES[k]);
}
}
if (info.sampleRates.size() == 0) {
m_errorStream << "airtaudio::api::Oss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning);
}
else {
info.probed = true;
info.name = ainfo.name;
}
return info;
}
bool airtaudio::api::Oss::probeDeviceOpen(uint32_t device, StreamMode mode, uint32_t channels,
uint32_t firstChannel, uint32_t sampleRate,
rtaudio::format format, uint32_t *bufferSize,
rtaudio::StreamOptions *options)
{
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
m_errorText = "airtaudio::api::Oss::probeDeviceOpen: error opening '/dev/mixer'.";
return FAILURE;
}
oss_sysinfo sysinfo;
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
if (result == -1) {
close(mixerfd);
m_errorText = "airtaudio::api::Oss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
return FAILURE;
}
unsigned nDevices = sysinfo.numaudios;
if (nDevices == 0) {
// This should not happen because a check is made before this function is called.
close(mixerfd);
m_errorText = "airtaudio::api::Oss::probeDeviceOpen: no devices found!";
return FAILURE;
}
if (device >= nDevices) {
// This should not happen because a check is made before this function is called.
close(mixerfd);
m_errorText = "airtaudio::api::Oss::probeDeviceOpen: device ID is invalid!";
return FAILURE;
}
oss_audioinfo ainfo;
ainfo.dev = device;
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
close(mixerfd);
if (result == -1) {
m_errorStream << "airtaudio::api::Oss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Check if device supports input or output
if ((mode == OUTPUT && !(ainfo.caps & PCM_CAP_OUTPUT)) ||
(mode == INPUT && !(ainfo.caps & PCM_CAP_INPUT))) {
if (mode == OUTPUT)
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
else
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
m_errorText = m_errorStream.str();
return FAILURE;
}
int32_t flags = 0;
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
if (mode == OUTPUT)
flags |= O_WRONLY;
else { // mode == INPUT
if (m_stream.mode == OUTPUT && m_stream.device[0] == device) {
// We just set the same device for playback ... close and reopen for duplex (OSS only).
close(handle->id[0]);
handle->id[0] = 0;
if (!(ainfo.caps & PCM_CAP_DUPLEX)) {
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Check that the number previously set channels is the same.
if (m_stream.nUserChannels[0] != channels) {
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
flags |= O_RDWR;
}
else
flags |= O_RDONLY;
}
// Set exclusive access if specified.
if (options && options->flags & RTAUDIO_HOG_DEVICE) flags |= O_EXCL;
// Try to open the device.
int32_t fd;
fd = open(ainfo.devnode, flags, 0);
if (fd == -1) {
if (errno == EBUSY)
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
else
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
// For duplex operation, specifically set this mode (this doesn't seem to work).
/*
if (flags | O_RDWR) {
result = ioctl(fd, SNDCTL_DSP_SETDUPLEX, NULL);
if (result == -1) {
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
}
*/
// Check the device channel support.
m_stream.nUserChannels[mode] = channels;
if (ainfo.max_channels < (int)(channels + firstChannel)) {
close(fd);
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Set the number of channels.
int32_t deviceChannels = channels + firstChannel;
result = ioctl(fd, SNDCTL_DSP_CHANNELS, &deviceChannels);
if (result == -1 || deviceChannels < (int)(channels + firstChannel)) {
close(fd);
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
m_stream.nDeviceChannels[mode] = deviceChannels;
// Get the data format mask
int32_t mask;
result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask);
if (result == -1) {
close(fd);
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Determine how to set the device format.
m_stream.userFormat = format;
int32_t deviceFormat = -1;
m_stream.doByteSwap[mode] = false;
if (format == RTAUDIO_SINT8) {
if (mask & AFMT_S8) {
deviceFormat = AFMT_S8;
m_stream.deviceFormat[mode] = RTAUDIO_SINT8;
}
}
else if (format == RTAUDIO_SINT16) {
if (mask & AFMT_S16_NE) {
deviceFormat = AFMT_S16_NE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT16;
}
else if (mask & AFMT_S16_OE) {
deviceFormat = AFMT_S16_OE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT16;
m_stream.doByteSwap[mode] = true;
}
}
else if (format == RTAUDIO_SINT24) {
if (mask & AFMT_S24_NE) {
deviceFormat = AFMT_S24_NE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT24;
}
else if (mask & AFMT_S24_OE) {
deviceFormat = AFMT_S24_OE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT24;
m_stream.doByteSwap[mode] = true;
}
}
else if (format == RTAUDIO_SINT32) {
if (mask & AFMT_S32_NE) {
deviceFormat = AFMT_S32_NE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT32;
}
else if (mask & AFMT_S32_OE) {
deviceFormat = AFMT_S32_OE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT32;
m_stream.doByteSwap[mode] = true;
}
}
if (deviceFormat == -1) {
// The user requested format is not natively supported by the device.
if (mask & AFMT_S16_NE) {
deviceFormat = AFMT_S16_NE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT16;
}
else if (mask & AFMT_S32_NE) {
deviceFormat = AFMT_S32_NE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT32;
}
else if (mask & AFMT_S24_NE) {
deviceFormat = AFMT_S24_NE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT24;
}
else if (mask & AFMT_S16_OE) {
deviceFormat = AFMT_S16_OE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT16;
m_stream.doByteSwap[mode] = true;
}
else if (mask & AFMT_S32_OE) {
deviceFormat = AFMT_S32_OE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT32;
m_stream.doByteSwap[mode] = true;
}
else if (mask & AFMT_S24_OE) {
deviceFormat = AFMT_S24_OE;
m_stream.deviceFormat[mode] = RTAUDIO_SINT24;
m_stream.doByteSwap[mode] = true;
}
else if (mask & AFMT_S8) {
deviceFormat = AFMT_S8;
m_stream.deviceFormat[mode] = RTAUDIO_SINT8;
}
}
if (m_stream.deviceFormat[mode] == 0) {
// This really shouldn't happen ...
close(fd);
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Set the data format.
int32_t temp = deviceFormat;
result = ioctl(fd, SNDCTL_DSP_SETFMT, &deviceFormat);
if (result == -1 || deviceFormat != temp) {
close(fd);
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Attempt to set the buffer size. According to OSS, the minimum
// number of buffers is two. The supposed minimum buffer size is 16
// bytes, so that will be our lower bound. The argument to this
// call is in the form 0xMMMMSSSS (hex), where the buffer size (in
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
// We'll check the actual value used near the end of the setup
// procedure.
int32_t ossBufferBytes = *bufferSize * formatBytes(m_stream.deviceFormat[mode]) * deviceChannels;
if (ossBufferBytes < 16) ossBufferBytes = 16;
int32_t buffers = 0;
if (options) buffers = options->numberOfBuffers;
if (options && options->flags & RTAUDIO_MINIMIZE_LATENCY) buffers = 2;
if (buffers < 2) buffers = 3;
temp = ((int) buffers << 16) + (int)(log10((double)ossBufferBytes) / log10(2.0));
result = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &temp);
if (result == -1) {
close(fd);
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
m_stream.nBuffers = buffers;
// Save buffer size (in sample frames).
*bufferSize = ossBufferBytes / (formatBytes(m_stream.deviceFormat[mode]) * deviceChannels);
m_stream.bufferSize = *bufferSize;
// Set the sample rate.
int32_t srate = sampleRate;
result = ioctl(fd, SNDCTL_DSP_SPEED, &srate);
if (result == -1) {
close(fd);
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
// Verify the sample rate setup worked.
if (abs(srate - sampleRate) > 100) {
close(fd);
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
m_errorText = m_errorStream.str();
return FAILURE;
}
m_stream.sampleRate = sampleRate;
if (mode == INPUT && m_stream.mode == OUTPUT && m_stream.device[0] == device) {
// We're doing duplex setup here.
m_stream.deviceFormat[0] = m_stream.deviceFormat[1];
m_stream.nDeviceChannels[0] = deviceChannels;
}
// Set interleaving parameters.
m_stream.userInterleaved = true;
m_stream.deviceInterleaved[mode] = true;
if (options && options->flags & RTAUDIO_NONINTERLEAVED)
m_stream.userInterleaved = false;
// Set flags for buffer conversion
m_stream.doConvertBuffer[mode] = false;
if (m_stream.userFormat != m_stream.deviceFormat[mode])
m_stream.doConvertBuffer[mode] = true;
if (m_stream.nUserChannels[mode] < m_stream.nDeviceChannels[mode])
m_stream.doConvertBuffer[mode] = true;
if (m_stream.userInterleaved != m_stream.deviceInterleaved[mode] &&
m_stream.nUserChannels[mode] > 1)
m_stream.doConvertBuffer[mode] = true;
// Allocate the stream handles if necessary and then save.
if (m_stream.apiHandle == 0) {
try {
handle = new OssHandle;
}
catch (std::bad_alloc&) {
m_errorText = "airtaudio::api::Oss::probeDeviceOpen: error allocating OssHandle memory.";
goto error;
}
if (pthread_cond_init(&handle->runnable, NULL)) {
m_errorText = "airtaudio::api::Oss::probeDeviceOpen: error initializing pthread condition variable.";
goto error;
}
m_stream.apiHandle = (void *) handle;
}
else {
handle = (OssHandle *) m_stream.apiHandle;
}
handle->id[mode] = fd;
// Allocate necessary internal buffers.
uint64_t bufferBytes;
bufferBytes = m_stream.nUserChannels[mode] * *bufferSize * formatBytes(m_stream.userFormat);
m_stream.userBuffer[mode] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[mode] == NULL) {
m_errorText = "airtaudio::api::Oss::probeDeviceOpen: error allocating user buffer memory.";
goto error;
}
if (m_stream.doConvertBuffer[mode]) {
bool makeBuffer = true;
bufferBytes = m_stream.nDeviceChannels[mode] * formatBytes(m_stream.deviceFormat[mode]);
if (mode == INPUT) {
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
if (bufferBytes <= bytesOut) makeBuffer = false;
}
}
if (makeBuffer) {
bufferBytes *= *bufferSize;
if (m_stream.deviceBuffer) free(m_stream.deviceBuffer);
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_stream.deviceBuffer == NULL) {
m_errorText = "airtaudio::api::Oss::probeDeviceOpen: error allocating device buffer memory.";
goto error;
}
}
}
m_stream.device[mode] = device;
m_stream.state = STREAM_STOPPED;
// Setup the buffer conversion information structure.
if (m_stream.doConvertBuffer[mode]) setConvertInfo(mode, firstChannel);
// Setup thread if necessary.
if (m_stream.mode == OUTPUT && mode == INPUT) {
// We had already set up an output stream.
m_stream.mode = DUPLEX;
if (m_stream.device[0] == device) handle->id[0] = fd;
}
else {
m_stream.mode = mode;
// Setup callback thread.
m_stream.callbackInfo.object = (void *) this;
// Set the thread attributes for joinable and realtime scheduling
// priority. The higher priority will only take affect if the
// program is run as root or suid.
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
#ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
if (options && options->flags & RTAUDIO_SCHEDULE_REALTIME) {
struct sched_param param;
int32_t priority = options->priority;
int32_t min = sched_get_priority_min(SCHED_RR);
int32_t max = sched_get_priority_max(SCHED_RR);
if (priority < min) priority = min;
else if (priority > max) priority = max;
param.sched_priority = priority;
pthread_attr_setschedparam(&attr, &param);
pthread_attr_setschedpolicy(&attr, SCHED_RR);
}
else
pthread_attr_setschedpolicy(&attr, SCHED_OTHER);
#else
pthread_attr_setschedpolicy(&attr, SCHED_OTHER);
#endif
m_stream.callbackInfo.isRunning = true;
result = pthread_create(&m_stream.callbackInfo.thread, &attr, ossCallbackHandler, &m_stream.callbackInfo);
pthread_attr_destroy(&attr);
if (result) {
m_stream.callbackInfo.isRunning = false;
m_errorText = "airtaudio::api::Oss::error creating callback thread!";
goto error;
}
}
return SUCCESS;
error:
if (handle) {
pthread_cond_destroy(&handle->runnable);
if (handle->id[0]) close(handle->id[0]);
if (handle->id[1]) close(handle->id[1]);
delete handle;
m_stream.apiHandle = 0;
}
for (int32_t i=0; i<2; i++) {
if (m_stream.userBuffer[i]) {
free(m_stream.userBuffer[i]);
m_stream.userBuffer[i] = 0;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = 0;
}
return FAILURE;
}
void airtaudio::api::Oss::closeStream()
{
if (m_stream.state == STREAM_CLOSED) {
m_errorText = "airtaudio::api::Oss::closeStream(): no open stream to close!";
error(airtaudio::errorWarning);
return;
}
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
m_stream.callbackInfo.isRunning = false;
m_stream.mutex.lock();
if (m_stream.state == STREAM_STOPPED)
pthread_cond_signal(&handle->runnable);
m_stream.mutex.unlock();
pthread_join(m_stream.callbackInfo.thread, NULL);
if (m_stream.state == STREAM_RUNNING) {
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX)
ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
else
ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
m_stream.state = STREAM_STOPPED;
}
if (handle) {
pthread_cond_destroy(&handle->runnable);
if (handle->id[0]) close(handle->id[0]);
if (handle->id[1]) close(handle->id[1]);
delete handle;
m_stream.apiHandle = 0;
}
for (int32_t i=0; i<2; i++) {
if (m_stream.userBuffer[i]) {
free(m_stream.userBuffer[i]);
m_stream.userBuffer[i] = 0;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = 0;
}
m_stream.mode = UNINITIALIZED;
m_stream.state = STREAM_CLOSED;
}
void airtaudio::api::Oss::startStream()
{
verifyStream();
if (m_stream.state == STREAM_RUNNING) {
m_errorText = "airtaudio::api::Oss::startStream(): the stream is already running!";
error(airtaudio::errorWarning);
return;
}
m_stream.mutex.lock();
m_stream.state = STREAM_RUNNING;
// No need to do anything else here ... OSS automatically starts
// when fed samples.
m_stream.mutex.unlock();
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
pthread_cond_signal(&handle->runnable);
}
void airtaudio::api::Oss::stopStream()
{
verifyStream();
if (m_stream.state == STREAM_STOPPED) {
m_errorText = "airtaudio::api::Oss::stopStream(): the stream is already stopped!";
error(airtaudio::errorWarning);
return;
}
m_stream.mutex.lock();
// The state might change while waiting on a mutex.
if (m_stream.state == STREAM_STOPPED) {
m_stream.mutex.unlock();
return;
}
int32_t result = 0;
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
// Flush the output with zeros a few times.
char *buffer;
int32_t samples;
rtaudio::format format;
if (m_stream.doConvertBuffer[0]) {
buffer = m_stream.deviceBuffer;
samples = m_stream.bufferSize * m_stream.nDeviceChannels[0];
format = m_stream.deviceFormat[0];
}
else {
buffer = m_stream.userBuffer[0];
samples = m_stream.bufferSize * m_stream.nUserChannels[0];
format = m_stream.userFormat;
}
memset(buffer, 0, samples * formatBytes(format));
for (uint32_t i=0; i<m_stream.nBuffers+1; i++) {
result = write(handle->id[0], buffer, samples * formatBytes(format));
if (result == -1) {
m_errorText = "airtaudio::api::Oss::stopStream: audio write error.";
error(airtaudio::errorWarning);
}
}
result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
if (result == -1) {
m_errorStream << "airtaudio::api::Oss::stopStream: system error stopping callback procedure on device (" << m_stream.device[0] << ").";
m_errorText = m_errorStream.str();
goto unlock;
}
handle->triggered = false;
}
if (m_stream.mode == INPUT || (m_stream.mode == DUPLEX && handle->id[0] != handle->id[1])) {
result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
if (result == -1) {
m_errorStream << "airtaudio::api::Oss::stopStream: system error stopping input callback procedure on device (" << m_stream.device[0] << ").";
m_errorText = m_errorStream.str();
goto unlock;
}
}
unlock:
m_stream.state = STREAM_STOPPED;
m_stream.mutex.unlock();
if (result != -1) return;
error(airtaudio::errorSystemError);
}
void airtaudio::api::Oss::abortStream()
{
verifyStream();
if (m_stream.state == STREAM_STOPPED) {
m_errorText = "airtaudio::api::Oss::abortStream(): the stream is already stopped!";
error(airtaudio::errorWarning);
return;
}
m_stream.mutex.lock();
// The state might change while waiting on a mutex.
if (m_stream.state == STREAM_STOPPED) {
m_stream.mutex.unlock();
return;
}
int32_t result = 0;
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
if (result == -1) {
m_errorStream << "airtaudio::api::Oss::abortStream: system error stopping callback procedure on device (" << m_stream.device[0] << ").";
m_errorText = m_errorStream.str();
goto unlock;
}
handle->triggered = false;
}
if (m_stream.mode == INPUT || (m_stream.mode == DUPLEX && handle->id[0] != handle->id[1])) {
result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
if (result == -1) {
m_errorStream << "airtaudio::api::Oss::abortStream: system error stopping input callback procedure on device (" << m_stream.device[0] << ").";
m_errorText = m_errorStream.str();
goto unlock;
}
}
unlock:
m_stream.state = STREAM_STOPPED;
m_stream.mutex.unlock();
if (result != -1) return;
*error(airtaudio::errorSystemError);
}
void airtaudio::api::Oss::callbackEvent()
{
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
if (m_stream.state == STREAM_STOPPED) {
m_stream.mutex.lock();
pthread_cond_wait(&handle->runnable, &m_stream.mutex);
if (m_stream.state != STREAM_RUNNING) {
m_stream.mutex.unlock();
return;
}
m_stream.mutex.unlock();
}
if (m_stream.state == STREAM_CLOSED) {
m_errorText = "airtaudio::api::Oss::callbackEvent(): the stream is closed ... this shouldn't happen!";
error(airtaudio::errorWarning);
return;
}
// Invoke user callback to get fresh output data.
int32_t doStopStream = 0;
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
double streamTime = getStreamTime();
rtaudio::streamStatus status = 0;
if (m_stream.mode != INPUT && handle->xrun[0] == true) {
status |= RTAUDIO_OUTPUT_UNDERFLOW;
handle->xrun[0] = false;
}
if (m_stream.mode != OUTPUT && handle->xrun[1] == true) {
status |= RTAUDIO_INPUT_OVERFLOW;
handle->xrun[1] = false;
}
doStopStream = callback(m_stream.userBuffer[0], m_stream.userBuffer[1],
m_stream.bufferSize, streamTime, status, m_stream.callbackInfo.userData);
if (doStopStream == 2) {
this->abortStream();
return;
}
m_stream.mutex.lock();
// The state might change while waiting on a mutex.
if (m_stream.state == STREAM_STOPPED) goto unlock;
int32_t result;
char *buffer;
int32_t samples;
rtaudio::format format;
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
// Setup parameters and do buffer conversion if necessary.
if (m_stream.doConvertBuffer[0]) {
buffer = m_stream.deviceBuffer;
convertBuffer(buffer, m_stream.userBuffer[0], m_stream.convertInfo[0]);
samples = m_stream.bufferSize * m_stream.nDeviceChannels[0];
format = m_stream.deviceFormat[0];
}
else {
buffer = m_stream.userBuffer[0];
samples = m_stream.bufferSize * m_stream.nUserChannels[0];
format = m_stream.userFormat;
}
// Do byte swapping if necessary.
if (m_stream.doByteSwap[0])
byteSwapBuffer(buffer, samples, format);
if (m_stream.mode == DUPLEX && handle->triggered == false) {
int32_t trig = 0;
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
result = write(handle->id[0], buffer, samples * formatBytes(format));
trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
handle->triggered = true;
}
else
// Write samples to device.
result = write(handle->id[0], buffer, samples * formatBytes(format));
if (result == -1) {
// We'll assume this is an underrun, though there isn't a
// specific means for determining that.
handle->xrun[0] = true;
m_errorText = "airtaudio::api::Oss::callbackEvent: audio write error.";
error(airtaudio::errorWarning);
// Continue on to input section.
}
}
if (m_stream.mode == INPUT || m_stream.mode == DUPLEX) {
// Setup parameters.
if (m_stream.doConvertBuffer[1]) {
buffer = m_stream.deviceBuffer;
samples = m_stream.bufferSize * m_stream.nDeviceChannels[1];
format = m_stream.deviceFormat[1];
}
else {
buffer = m_stream.userBuffer[1];
samples = m_stream.bufferSize * m_stream.nUserChannels[1];
format = m_stream.userFormat;
}
// Read samples from device.
result = read(handle->id[1], buffer, samples * formatBytes(format));
if (result == -1) {
// We'll assume this is an overrun, though there isn't a
// specific means for determining that.
handle->xrun[1] = true;
m_errorText = "airtaudio::api::Oss::callbackEvent: audio read error.";
error(airtaudio::errorWarning);
goto unlock;
}
// Do byte swapping if necessary.
if (m_stream.doByteSwap[1])
byteSwapBuffer(buffer, samples, format);
// Do buffer conversion if necessary.
if (m_stream.doConvertBuffer[1])
convertBuffer(m_stream.userBuffer[1], m_stream.deviceBuffer, m_stream.convertInfo[1]);
}
unlock:
m_stream.mutex.unlock();
RtApi::tickStreamTime();
if (doStopStream == 1) this->stopStream();
}
static void *ossCallbackHandler(void *ptr)
{
CallbackInfo *info = (CallbackInfo *) ptr;
RtApiOss *object = (RtApiOss *) info->object;
bool *isRunning = &info->isRunning;
while (*isRunning == true) {
pthread_testcancel();
object->callbackEvent();
}
pthread_exit(NULL);
}
//******************** End of __LINUX_OSS__ *********************//
#endif

45
airtaudio/api/Oss.h Normal file
View File

@ -0,0 +1,45 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_OSS_H__) && defined(__LINUX_OSS__)
#define __AIRTAUDIO_API_OSS_H__
namespace airtaudio {
namespace api {
class Oss: public airtaudio::Api {
public:
Oss(void);
~Oss(void);
airtaudio::api::type getCurrentApi(void) {
return airtaudio::api::LINUX_OSS;
}
uint32_t getDeviceCount(void);
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
void closeStream(void);
void startStream(void);
void stopStream(void);
void abortStream(void);
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent(void);
private:
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

446
airtaudio/api/Pulse.cpp Normal file
View File

@ -0,0 +1,446 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if defined(__LINUX_PULSE__)
#include <unistd.h>
#include <limits.h>
#include <airtaudio/Interface.h>
// Code written by Peter Meerwald, pmeerw@pmeerw.net
// and Tristan Matthews.
#include <pulse/error.h>
#include <pulse/simple.h>
#include <cstdio>
static const uint32_t SUPPORTED_SAMPLERATES[] = {
8000,
16000,
22050,
32000,
44100,
48000,
96000,
0
};
struct rtaudio_pa_format_mapping_t {
airtaudio::format airtaudio_format;
pa_sample_format_t pa_format;
};
static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
{airtaudio::SINT16, PA_SAMPLE_S16LE},
{airtaudio::SINT32, PA_SAMPLE_S32LE},
{airtaudio::FLOAT32, PA_SAMPLE_FLOAT32LE},
{0, PA_SAMPLE_INVALID}};
struct PulseAudioHandle {
pa_simple *s_play;
pa_simple *s_rec;
std::thread* thread;
std::condition_variable runnable_cv;
bool runnable;
PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
};
airtaudio::api::Pulse::~Pulse()
{
if (m_stream.state != STREAM_CLOSED)
closeStream();
}
uint32_t airtaudio::api::Pulse::getDeviceCount(void) {
return 1;
}
airtaudio::DeviceInfo airtaudio::api::Pulse::getDeviceInfo(uint32_t _device) {
airtaudio::DeviceInfo info;
info.probed = true;
info.name = "PulseAudio";
info.outputChannels = 2;
info.inputChannels = 2;
info.duplexChannels = 2;
info.isDefaultOutput = true;
info.isDefaultInput = true;
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
info.sampleRates.push_back(*sr);
}
info.nativeFormats = SINT16 | SINT32 | FLOAT32;
return info;
}
static void pulseaudio_callback(void* _user) {
airtaudio::CallbackInfo *cbi = static_cast<airtaudio::CallbackInfo *>(_user);
airtaudio::api::Pulse *context = static_cast<airtaudio::api::Pulse*>(cbi->object);
volatile bool *isRunning = &cbi->isRunning;
while (*isRunning) {
context->callbackEvent();
}
}
void airtaudio::api::Pulse::closeStream(void) {
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
m_stream.callbackInfo.isRunning = false;
if (pah) {
m_stream.mutex.lock();
if (m_stream.state == STREAM_STOPPED) {
pah->runnable = true;
pah->runnable_cv.notify_one();;
}
m_stream.mutex.unlock();
pah->thread->join();
if (pah->s_play) {
pa_simple_flush(pah->s_play, NULL);
pa_simple_free(pah->s_play);
}
if (pah->s_rec) {
pa_simple_free(pah->s_rec);
}
delete pah;
m_stream.apiHandle = 0;
}
if (m_stream.userBuffer[0] != NULL) {
free(m_stream.userBuffer[0]);
m_stream.userBuffer[0] = NULL;
}
if (m_stream.userBuffer[1] != NULL) {
free(m_stream.userBuffer[1]);
m_stream.userBuffer[1] = NULL;
}
m_stream.state = STREAM_CLOSED;
m_stream.mode = UNINITIALIZED;
}
void airtaudio::api::Pulse::callbackEvent(void) {
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
if (m_stream.state == STREAM_STOPPED) {
std::unique_lock<std::mutex> lck(m_stream.mutex);
while (!pah->runnable) {
pah->runnable_cv.wait(lck);
}
if (m_stream.state != STREAM_RUNNING) {
m_stream.mutex.unlock();
return;
}
}
if (m_stream.state == STREAM_CLOSED) {
m_errorText = "airtaudio::api::Pulse::callbackEvent(): the stream is closed ... "
"this shouldn't happen!";
error(airtaudio::errorWarning);
return;
}
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
double streamTime = getStreamTime();
airtaudio::streamStatus status = 0;
int32_t doStopStream = callback(m_stream.userBuffer[OUTPUT],
m_stream.userBuffer[INPUT],
m_stream.bufferSize,
streamTime,
status,
m_stream.callbackInfo.userData);
if (doStopStream == 2) {
abortStream();
return;
}
m_stream.mutex.lock();
void *pulse_in = m_stream.doConvertBuffer[INPUT] ? m_stream.deviceBuffer : m_stream.userBuffer[INPUT];
void *pulse_out = m_stream.doConvertBuffer[OUTPUT] ? m_stream.deviceBuffer : m_stream.userBuffer[OUTPUT];
if (m_stream.state != STREAM_RUNNING) {
goto unlock;
}
int32_t pa_error;
size_t bytes;
if ( m_stream.mode == OUTPUT
|| m_stream.mode == DUPLEX) {
if (m_stream.doConvertBuffer[OUTPUT]) {
convertBuffer(m_stream.deviceBuffer,
m_stream.userBuffer[OUTPUT],
m_stream.convertInfo[OUTPUT]);
bytes = m_stream.nDeviceChannels[OUTPUT] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[OUTPUT]);
} else {
bytes = m_stream.nUserChannels[OUTPUT] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
}
if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0) {
m_errorStream << "airtaudio::api::Pulse::callbackEvent: audio write error, " << pa_strerror(pa_error) << ".";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning);
}
}
if (m_stream.mode == INPUT || m_stream.mode == DUPLEX) {
if (m_stream.doConvertBuffer[INPUT]) {
bytes = m_stream.nDeviceChannels[INPUT] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[INPUT]);
} else {
bytes = m_stream.nUserChannels[INPUT] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
}
if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0) {
m_errorStream << "airtaudio::api::Pulse::callbackEvent: audio read error, " << pa_strerror(pa_error) << ".";
m_errorText = m_errorStream.str();
error(airtaudio::errorWarning);
}
if (m_stream.doConvertBuffer[INPUT]) {
convertBuffer(m_stream.userBuffer[INPUT],
m_stream.deviceBuffer,
m_stream.convertInfo[INPUT]);
}
}
unlock:
m_stream.mutex.unlock();
airtaudio::Api::tickStreamTime();
if (doStopStream == 1) {
stopStream();
}
}
void airtaudio::api::Pulse::startStream(void) {
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
if (m_stream.state == STREAM_CLOSED) {
m_errorText = "airtaudio::api::Pulse::startStream(): the stream is not open!";
error(airtaudio::errorInvalidUse);
return;
}
if (m_stream.state == STREAM_RUNNING) {
m_errorText = "airtaudio::api::Pulse::startStream(): the stream is already running!";
error(airtaudio::errorWarning);
return;
}
m_stream.mutex.lock();
m_stream.state = STREAM_RUNNING;
pah->runnable = true;
pah->runnable_cv.notify_one();
m_stream.mutex.unlock();
}
void airtaudio::api::Pulse::stopStream(void) {
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
if (m_stream.state == STREAM_CLOSED) {
m_errorText = "airtaudio::api::Pulse::stopStream(): the stream is not open!";
error(airtaudio::errorInvalidUse);
return;
}
if (m_stream.state == STREAM_STOPPED) {
m_errorText = "airtaudio::api::Pulse::stopStream(): the stream is already stopped!";
error(airtaudio::errorWarning);
return;
}
m_stream.state = STREAM_STOPPED;
m_stream.mutex.lock();
if (pah && pah->s_play) {
int32_t pa_error;
if (pa_simple_drain(pah->s_play, &pa_error) < 0) {
m_errorStream << "airtaudio::api::Pulse::stopStream: error draining output device, " <<
pa_strerror(pa_error) << ".";
m_errorText = m_errorStream.str();
m_stream.mutex.unlock();
error(airtaudio::errorSystemError);
return;
}
}
m_stream.state = STREAM_STOPPED;
m_stream.mutex.unlock();
}
void airtaudio::api::Pulse::abortStream(void) {
PulseAudioHandle *pah = static_cast<PulseAudioHandle*>(m_stream.apiHandle);
if (m_stream.state == STREAM_CLOSED) {
m_errorText = "airtaudio::api::Pulse::abortStream(): the stream is not open!";
error(airtaudio::errorInvalidUse);
return;
}
if (m_stream.state == STREAM_STOPPED) {
m_errorText = "airtaudio::api::Pulse::abortStream(): the stream is already stopped!";
error(airtaudio::errorWarning);
return;
}
m_stream.state = STREAM_STOPPED;
m_stream.mutex.lock();
if (pah && pah->s_play) {
int32_t pa_error;
if (pa_simple_flush(pah->s_play, &pa_error) < 0) {
m_errorStream << "airtaudio::api::Pulse::abortStream: error flushing output device, " <<
pa_strerror(pa_error) << ".";
m_errorText = m_errorStream.str();
m_stream.mutex.unlock();
error(airtaudio::errorSystemError);
return;
}
}
m_stream.state = STREAM_STOPPED;
m_stream.mutex.unlock();
}
bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t device,
airtaudio::api::StreamMode mode,
uint32_t channels,
uint32_t firstChannel,
uint32_t sampleRate,
airtaudio::format format,
uint32_t *bufferSize,
airtaudio::StreamOptions *options) {
PulseAudioHandle *pah = 0;
uint64_t bufferBytes = 0;
pa_sample_spec ss;
if (device != 0) {
return false;
}
if (mode != INPUT && mode != OUTPUT) {
return false;
}
if (channels != 1 && channels != 2) {
m_errorText = "airtaudio::api::Pulse::probeDeviceOpen: unsupported number of channels.";
return false;
}
ss.channels = channels;
if (firstChannel != 0) {
return false;
}
bool sr_found = false;
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
if (sampleRate == *sr) {
sr_found = true;
m_stream.sampleRate = sampleRate;
ss.rate = sampleRate;
break;
}
}
if (!sr_found) {
m_errorText = "airtaudio::api::Pulse::probeDeviceOpen: unsupported sample rate.";
return false;
}
bool sf_found = 0;
for (const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
sf->airtaudio_format && sf->pa_format != PA_SAMPLE_INVALID;
++sf) {
if (format == sf->airtaudio_format) {
sf_found = true;
m_stream.userFormat = sf->airtaudio_format;
ss.format = sf->pa_format;
break;
}
}
if (!sf_found) {
m_errorText = "airtaudio::api::Pulse::probeDeviceOpen: unsupported sample format.";
return false;
}
// Set interleaving parameters.
if (options && options->flags & NONINTERLEAVED) {
m_stream.userInterleaved = false;
} else {
m_stream.userInterleaved = true;
}
m_stream.deviceInterleaved[mode] = true;
m_stream.nBuffers = 1;
m_stream.doByteSwap[mode] = false;
m_stream.doConvertBuffer[mode] = channels > 1 && !m_stream.userInterleaved;
m_stream.deviceFormat[mode] = m_stream.userFormat;
m_stream.nUserChannels[mode] = channels;
m_stream.nDeviceChannels[mode] = channels + firstChannel;
m_stream.channelOffset[mode] = 0;
// Allocate necessary internal buffers.
bufferBytes = m_stream.nUserChannels[mode] * *bufferSize * formatBytes(m_stream.userFormat);
m_stream.userBuffer[mode] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[mode] == NULL) {
m_errorText = "airtaudio::api::Pulse::probeDeviceOpen: error allocating user buffer memory.";
goto error;
}
m_stream.bufferSize = *bufferSize;
if (m_stream.doConvertBuffer[mode]) {
bool makeBuffer = true;
bufferBytes = m_stream.nDeviceChannels[mode] * formatBytes(m_stream.deviceFormat[mode]);
if (mode == INPUT) {
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
if (bufferBytes <= bytesOut) makeBuffer = false;
}
}
if (makeBuffer) {
bufferBytes *= *bufferSize;
if (m_stream.deviceBuffer) free(m_stream.deviceBuffer);
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_stream.deviceBuffer == NULL) {
m_errorText = "airtaudio::api::Pulse::probeDeviceOpen: error allocating device buffer memory.";
goto error;
}
}
}
m_stream.device[mode] = device;
// Setup the buffer conversion information structure.
if (m_stream.doConvertBuffer[mode]) {
setConvertInfo(mode, firstChannel);
}
if (!m_stream.apiHandle) {
PulseAudioHandle *pah = new PulseAudioHandle;
if (!pah) {
m_errorText = "airtaudio::api::Pulse::probeDeviceOpen: error allocating memory for handle.";
goto error;
}
m_stream.apiHandle = pah;
}
pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
int32_t error;
switch (mode) {
case INPUT:
pah->s_rec = pa_simple_new(NULL, "RtAudio", PA_STREAM_RECORD, NULL, "Record", &ss, NULL, NULL, &error);
if (!pah->s_rec) {
m_errorText = "airtaudio::api::Pulse::probeDeviceOpen: error connecting input to PulseAudio server.";
goto error;
}
break;
case OUTPUT:
pah->s_play = pa_simple_new(NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error);
if (!pah->s_play) {
m_errorText = "airtaudio::api::Pulse::probeDeviceOpen: error connecting output to PulseAudio server.";
goto error;
}
break;
default:
goto error;
}
if (m_stream.mode == UNINITIALIZED) {
m_stream.mode = mode;
} else if (m_stream.mode == mode) {
goto error;
}else {
m_stream.mode = DUPLEX;
}
if (!m_stream.callbackInfo.isRunning) {
m_stream.callbackInfo.object = this;
m_stream.callbackInfo.isRunning = true;
pah->thread = new std::thread(pulseaudio_callback, (void *)&m_stream.callbackInfo);
if (pah->thread == NULL) {
m_errorText = "airtaudio::api::Pulse::probeDeviceOpen: error creating thread.";
goto error;
}
}
m_stream.state = STREAM_STOPPED;
return true;
error:
if (pah && m_stream.callbackInfo.isRunning) {
delete pah;
m_stream.apiHandle = 0;
}
for (int32_t i=0; i<2; i++) {
if (m_stream.userBuffer[i]) {
free(m_stream.userBuffer[i]);
m_stream.userBuffer[i] = 0;
}
}
if (m_stream.deviceBuffer) {
free(m_stream.deviceBuffer);
m_stream.deviceBuffer = 0;
}
return FAILURE;
}
#endif

46
airtaudio/api/Pulse.h Normal file
View File

@ -0,0 +1,46 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#if !defined(__AIRTAUDIO_API_PULSE_H__) && defined(__LINUX_PULSE__)
#define __AIRTAUDIO_API_PULSE_H__
namespace airtaudio {
namespace api {
class Pulse: public airtaudio::Api {
public:
~Pulse(void);
airtaudio::api::type getCurrentApi(void) {
return airtaudio::api::LINUX_PULSE;
}
uint32_t getDeviceCount(void);
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
void closeStream(void);
void startStream(void);
void stopStream(void);
void abortStream(void);
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent(void);
private:
std::vector<airtaudio::DeviceInfo> m_devices;
void saveDeviceInfo(void);
bool probeDeviceOpen(uint32_t _device,
airtaudio::api::StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
airtaudio::format _format,
uint32_t *_bufferSize,
airtaudio::StreamOptions *_options);
};
};
};
#endif

189
airtaudio/base.h Normal file
View File

@ -0,0 +1,189 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_ERROR_H__
#define __AIRTAUDIO_ERROR_H__
#include <thread>
#include <condition_variable>
#include <mutex>
// defien type : uintXX_t and intXX_t
#define __STDC_LIMIT_MACROS
// note in android include the macro of min max are overwitten
#include <stdint.h>
#if defined(HAVE_GETTIMEOFDAY)
#include <sys/time.h>
#endif
namespace airtaudio {
//! Defined RtError types.
enum errorType {
errorWarning, //!< A non-critical error.
errorInvalidUse, //!< The function was called incorrectly.
errorSystemError //!< A system error occured.
};
// airtaudio version
static const std::string VERSION("4.0.12");
/**
* @typedef typedef uint64_t format;
* @brief airtaudio data format type.
*
* Support for signed integers and floats. Audio data fed to/from an
* airtaudio stream is assumed to ALWAYS be in host byte order. The
* internal routines will automatically take care of any necessary
* byte-swapping between the host format and the soundcard. Thus,
* endian-ness is not a concern in the following format definitions.
*
* - \e SINT8: 8-bit signed integer.
* - \e SINT16: 16-bit signed integer.
* - \e SINT24: 24-bit signed integer.
* - \e SINT32: 32-bit signed integer.
* - \e FLOAT32: Normalized between plus/minus 1.0.
* - \e FLOAT64: Normalized between plus/minus 1.0.
*/
typedef uint64_t format;
static const format SINT8 = 0x1; // 8-bit signed integer.
static const format SINT16 = 0x2; // 16-bit signed integer.
static const format SINT24 = 0x4; // 24-bit signed integer.
static const format SINT32 = 0x8; // 32-bit signed integer.
static const format FLOAT32 = 0x10; // Normalized between plus/minus 1.0.
static const format FLOAT64 = 0x20; // Normalized between plus/minus 1.0.
/**
* @typedef typedef uint64_t streamFlags;
* @brief RtAudio stream option flags.
*
* The following flags can be OR'ed together to allow a client to
* make changes to the default stream behavior:
*
* - \e NONINTERLEAVED: Use non-interleaved buffers (default = interleaved).
* - \e MINIMIZE_LATENCY: Attempt to set stream parameters for lowest possible latency.
* - \e HOG_DEVICE: Attempt grab device for exclusive use.
* - \e ALSA_USE_DEFAULT: Use the "default" PCM device (ALSA only).
*
* By default, RtAudio streams pass and receive audio data from the
* client in an interleaved format. By passing the
* RTAUDIO_NONINTERLEAVED flag to the openStream() function, audio
* data will instead be presented in non-interleaved buffers. In
* this case, each buffer argument in the RtAudioCallback function
* will point to a single array of data, with \c nFrames samples for
* each channel concatenated back-to-back. For example, the first
* sample of data for the second channel would be located at index \c
* nFrames (assuming the \c buffer pointer was recast to the correct
* data type for the stream).
*
* Certain audio APIs offer a number of parameters that influence the
* I/O latency of a stream. By default, RtAudio will attempt to set
* these parameters internally for robust (glitch-free) performance
* (though some APIs, like Windows Direct Sound, make this difficult).
* By passing the RTAUDIO_MINIMIZE_LATENCY flag to the openStream()
* function, internal stream settings will be influenced in an attempt
* to minimize stream latency, though possibly at the expense of stream
* performance.
*
* If the RTAUDIO_HOG_DEVICE flag is set, RtAudio will attempt to
* open the input and/or output stream device(s) for exclusive use.
* Note that this is not possible with all supported audio APIs.
*
* If the RTAUDIO_SCHEDULE_REALTIME flag is set, RtAudio will attempt
* to select realtime scheduling (round-robin) for the callback thread.
*
* If the RTAUDIO_ALSA_USE_DEFAULT flag is set, RtAudio will attempt to
* open the "default" PCM device when using the ALSA API. Note that this
* will override any specified input or output device id.
*/
typedef uint32_t streamFlags;
static const streamFlags NONINTERLEAVED = 0x1; // Use non-interleaved buffers (default = interleaved).
static const streamFlags MINIMIZE_LATENCY = 0x2; // Attempt to set stream parameters for lowest possible latency.
static const streamFlags HOG_DEVICE = 0x4; // Attempt grab device and prevent use by others.
static const streamFlags SCHEDULE_REALTIME = 0x8; // Try to select realtime scheduling for callback thread.
static const streamFlags ALSA_USE_DEFAULT = 0x10; // Use the "default" PCM device (ALSA only).
/**
* @typedef typedef uint64_t rtaudio::streamStatus;
* @brief RtAudio stream status (over- or underflow) flags.
*
* Notification of a stream over- or underflow is indicated by a
* non-zero stream \c status argument in the RtAudioCallback function.
* The stream status can be one of the following two options,
* depending on whether the stream is open for output and/or input:
*
* - \e RTAUDIO_INPUT_OVERFLOW: Input data was discarded because of an overflow condition at the driver.
* - \e RTAUDIO_OUTPUT_UNDERFLOW: The output buffer ran low, likely producing a break in the output sound.
*/
typedef uint32_t streamStatus;
static const streamStatus INPUT_OVERFLOW = 0x1; // Input data was discarded because of an overflow condition at the driver.
static const streamStatus OUTPUT_UNDERFLOW = 0x2; // The output buffer ran low, likely causing a gap in the output sound.
/**
* @brief RtAudio callback function prototype.
*
* All RtAudio clients must create a function of type RtAudioCallback
* to read and/or write data from/to the audio stream. When the
* underlying audio system is ready for new input or output data, this
* function will be invoked.
*
* @param _outputBuffer For output (or duplex) streams, the client
* should write \c nFrames of audio sample frames into this
* buffer. This argument should be recast to the datatype
* specified when the stream was opened. For input-only
* streams, this argument will be NULL.
*
* @param _inputBuffer For input (or duplex) streams, this buffer will
* hold \c nFrames of input audio sample frames. This
* argument should be recast to the datatype specified when the
* stream was opened. For output-only streams, this argument
* will be NULL.
*
* @param _nFrames The number of sample frames of input or output
* data in the buffers. The actual buffer size in bytes is
* dependent on the data type and number of channels in use.
*
* @param _streamTime The number of seconds that have elapsed since the
* stream was started.
*
* @param _status If non-zero, this argument indicates a data overflow
* or underflow condition for the stream. The particular
* condition can be determined by comparison with the
* streamStatus flags.
*
* @param _userData A pointer to optional data provided by the client
* when opening the stream (default = NULL).
*
* To continue normal stream operation, the RtAudioCallback function
* should return a value of zero. To stop the stream and drain the
* output buffer, the function should return a value of one. To abort
* the stream immediately, the client should return a value of two.
*/
typedef int32_t (*AirTAudioCallback)(void *_outputBuffer,
void *_inputBuffer,
uint32_t _nFrames,
double _streamTime,
airtaudio::streamStatus _status,
void *_userData);
/**
* @brief RtAudio error callback function prototype.
* @param _type Type of error.
* @param _errorText Error description.
*/
typedef void (*AirTAudioErrorCallback)(airtaudio::errorType _type,
const std::string &_errorText);
}
#include <airtaudio/DeviceInfo.h>
#include <airtaudio/StreamOptions.h>
#include <airtaudio/StreamParameters.h>
#endif

52
airtaudio/int24_t.h Normal file
View File

@ -0,0 +1,52 @@
/**
* @author Gary P. SCAVONE
*
* @copyright 2001-2013 Gary P. Scavone, all right reserved
*
* @license like MIT (see license file)
*/
#ifndef __AIRTAUDIO_INT24_T_H__
#define __AIRTAUDIO_INT24_T_H__
#pragma pack(push, 1)
class int24_t {
protected:
uint8_t c3[3];
public:
int24_t(void) {}
int24_t& operator = (const int32_t& i) {
c3[0] = (i & 0x000000ff);
c3[1] = (i & 0x0000ff00) >> 8;
c3[2] = (i & 0x00ff0000) >> 16;
return *this;
}
int24_t(const int24_t& v) {
*this = v;
}
int24_t(const double& d) {
*this = (int32_t)d;
}
int24_t(const float& f) {
*this = (int32_t)f;
}
int24_t(const int16_t& s) {
*this = (int32_t)s;
}
int24_t(const int8_t& c) {
*this = (int32_t)c;
}
int32_t asInt(void) {
int32_t i = c3[0] | (c3[1] << 8) | (c3[2] << 16);
if (i & 0x800000) {
i |= ~0xffffff;
}
return i;
}
};
#pragma pack(pop)
#endif

66
lutin_airtaudio.py Normal file
View File

@ -0,0 +1,66 @@
#!/usr/bin/python
import lutinModule as module
import lutinTools as tools
import lutinDebug as debug
def get_desc():
return "airtaudio : Generic wrapper on all audio interface"
def create(target):
myModule = module.Module(__file__, 'airtaudio', 'LIBRARY')
myModule.add_src_file([
'airtaudio/Interface.cpp',
'airtaudio/Api.cpp',
'airtaudio/api/Alsa.cpp',
'airtaudio/api/Asio.cpp',
'airtaudio/api/Core.cpp',
'airtaudio/api/Ds.cpp',
'airtaudio/api/Dummy.cpp',
'airtaudio/api/Jack.cpp',
'airtaudio/api/Oss.cpp',
'airtaudio/api/Pulse.cpp'
])
if target.name=="Windows":
# ASIO API on Windows
myModule.add_export_flag_CC(['__WINDOWS_ASIO__'])
# Windows DirectSound API
#myModule.add_export_flag_CC(['__WINDOWS_DS__'])
elif target.name=="Linux":
# Linux Alsa API
myModule.add_export_flag_CC(['-D__LINUX_ALSA__'])
myModule.add_export_flag_LD("-lasound")
# Linux Jack API
myModule.add_export_flag_CC(['-D__UNIX_JACK__'])
myModule.add_export_flag_LD("-ljack")
# Linux PulseAudio API
myModule.add_export_flag_CC(['-D__LINUX_PULSE__'])
myModule.add_export_flag_LD("-lpulse-simple")
myModule.add_export_flag_LD("-lpulse")
#depending libs :
myModule.add_export_flag_LD("-lpthread")
elif target.name=="MacOs":
# MacOsX core
myModule.add_export_flag_CC(['__MACOSX_CORE__'])
myModule.add_export_flag_LD("-framework CoreAudio")
myModule.add_export_flag_LD("-framework CoreMIDI")
else:
debug.warning("unknow target for RTAudio : " + target.name);
myModule.add_export_path(tools.get_current_path(__file__))
myModule.add_path(tools.get_current_path(__file__)+"/rtaudio/")
myModule.add_path(tools.get_current_path(__file__)+"/rtaudio/include/")
# add the currrent module at the
return myModule