Compare commits
47 Commits
Author | SHA1 | Date | |
---|---|---|---|
17d59cf370 | |||
5992923c88 | |||
32c0784d3b | |||
488b44b03b | |||
24af15748d | |||
32858d6104 | |||
f5c3affccb | |||
d4c53a53bf | |||
38f51ec421 | |||
102a4e5ca5 | |||
e873a44615 | |||
40f2b25c5e | |||
c4b4c40931 | |||
12bd24d064 | |||
36e5f26cb4 | |||
e4c46dbd64 | |||
1bf633b346 | |||
0f5667bb67 | |||
8f3d17fdf8 | |||
a5dbe5a607 | |||
e6de495285 | |||
12342a4ffa | |||
4b30ecbfff | |||
1e8794e576 | |||
1d5ac5fecb | |||
3a9ce1ffd9 | |||
c685c176dd | |||
1baebd1029 | |||
24fef86124 | |||
b72d6f31df | |||
d9453e6e7a | |||
650f24c288 | |||
4fc9a3e05f | |||
1a24bb9254 | |||
5c9361c199 | |||
4febe7b119 | |||
7d91d12152 | |||
f4471f25e8 | |||
028279a74f | |||
7c6a495d86 | |||
3629886590 | |||
6de9bac0fc | |||
af22fafffa | |||
d8933f0989 | |||
6aa1746a27 | |||
dd604bc736 | |||
00af426a47 |
@@ -1,882 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
//#include <etk/types.h>
|
||||
#include <airtaudio/Interface.h>
|
||||
#include <airtaudio/debug.h>
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <climits>
|
||||
|
||||
|
||||
|
||||
std::ostream& operator <<(std::ostream& _os, const airtaudio::api::type& _obj){
|
||||
switch (_obj) {
|
||||
default:
|
||||
case airtaudio::api::UNSPECIFIED: _os << "UNSPECIFIED"; break;
|
||||
case airtaudio::api::LINUX_ALSA: _os << "LINUX_ALSA"; break;
|
||||
case airtaudio::api::LINUX_PULSE: _os << "LINUX_PULSE"; break;
|
||||
case airtaudio::api::LINUX_OSS: _os << "LINUX_OSS"; break;
|
||||
case airtaudio::api::UNIX_JACK: _os << "UNIX_JACK"; break;
|
||||
case airtaudio::api::MACOSX_CORE: _os << "MACOSX_CORE"; break;
|
||||
case airtaudio::api::IOS_CORE: _os << "IOS_CORE"; break;
|
||||
case airtaudio::api::WINDOWS_ASIO: _os << "WINDOWS_ASIO"; break;
|
||||
case airtaudio::api::WINDOWS_DS: _os << "WINDOWS_DS"; break;
|
||||
case airtaudio::api::RTAUDIO_DUMMY: _os << "RTAUDIO_DUMMY"; break;
|
||||
case airtaudio::api::ANDROID_JAVA: _os << "ANDROID_JAVA"; break;
|
||||
case airtaudio::api::USER_INTERFACE_1: _os << "USER_INTERFACE_1"; break;
|
||||
case airtaudio::api::USER_INTERFACE_2: _os << "USER_INTERFACE_2"; break;
|
||||
case airtaudio::api::USER_INTERFACE_3: _os << "USER_INTERFACE_3"; break;
|
||||
case airtaudio::api::USER_INTERFACE_4: _os << "USER_INTERFACE_4"; break;
|
||||
}
|
||||
return _os;
|
||||
}
|
||||
|
||||
// Static variable definitions.
|
||||
const uint32_t airtaudio::api::MAX_SAMPLE_RATES = 14;
|
||||
const uint32_t airtaudio::api::SAMPLE_RATES[] = {
|
||||
4000,
|
||||
5512,
|
||||
8000,
|
||||
9600,
|
||||
11025,
|
||||
16000,
|
||||
22050,
|
||||
32000,
|
||||
44100,
|
||||
48000,
|
||||
88200,
|
||||
96000,
|
||||
176400,
|
||||
192000
|
||||
};
|
||||
|
||||
|
||||
airtaudio::Api::Api() {
|
||||
m_stream.state = airtaudio::api::STREAM_CLOSED;
|
||||
m_stream.mode = airtaudio::api::UNINITIALIZED;
|
||||
m_stream.apiHandle = 0;
|
||||
m_stream.userBuffer[0] = 0;
|
||||
m_stream.userBuffer[1] = 0;
|
||||
}
|
||||
|
||||
airtaudio::Api::~Api() {
|
||||
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters *oParams,
|
||||
airtaudio::StreamParameters *iParams,
|
||||
airtaudio::format format,
|
||||
uint32_t sampleRate,
|
||||
uint32_t *bufferFrames,
|
||||
airtaudio::AirTAudioCallback callback,
|
||||
void *userData,
|
||||
airtaudio::StreamOptions *options) {
|
||||
if (m_stream.state != airtaudio::api::STREAM_CLOSED) {
|
||||
ATA_ERROR("airtaudio::Api::openStream: a stream is already open!");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
if (oParams && oParams->nChannels < 1) {
|
||||
ATA_ERROR("airtaudio::Api::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
if (iParams && iParams->nChannels < 1) {
|
||||
ATA_ERROR("airtaudio::Api::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
if (oParams == NULL && iParams == NULL) {
|
||||
ATA_ERROR("airtaudio::Api::openStream: input and output StreamParameters structures are both NULL!");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
if (formatBytes(format) == 0) {
|
||||
ATA_ERROR("airtaudio::Api::openStream: 'format' parameter value is undefined.");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
uint32_t nDevices = getDeviceCount();
|
||||
uint32_t oChannels = 0;
|
||||
if (oParams) {
|
||||
oChannels = oParams->nChannels;
|
||||
if (oParams->deviceId >= nDevices) {
|
||||
ATA_ERROR("airtaudio::Api::openStream: output device parameter value is invalid.");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
}
|
||||
uint32_t iChannels = 0;
|
||||
if (iParams) {
|
||||
iChannels = iParams->nChannels;
|
||||
if (iParams->deviceId >= nDevices) {
|
||||
ATA_ERROR("airtaudio::Api::openStream: input device parameter value is invalid.");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
}
|
||||
clearStreamInfo();
|
||||
bool result;
|
||||
if (oChannels > 0) {
|
||||
result = probeDeviceOpen(oParams->deviceId,
|
||||
airtaudio::api::OUTPUT,
|
||||
oChannels,
|
||||
oParams->firstChannel,
|
||||
sampleRate,
|
||||
format,
|
||||
bufferFrames,
|
||||
options);
|
||||
if (result == false) {
|
||||
ATA_ERROR("system ERROR");
|
||||
return airtaudio::errorSystemError;
|
||||
}
|
||||
}
|
||||
if (iChannels > 0) {
|
||||
result = probeDeviceOpen(iParams->deviceId,
|
||||
airtaudio::api::INPUT,
|
||||
iChannels,
|
||||
iParams->firstChannel,
|
||||
sampleRate,
|
||||
format,
|
||||
bufferFrames,
|
||||
options);
|
||||
if (result == false) {
|
||||
if (oChannels > 0) {
|
||||
closeStream();
|
||||
}
|
||||
ATA_ERROR("system error");
|
||||
return airtaudio::errorSystemError;
|
||||
}
|
||||
}
|
||||
m_stream.callbackInfo.callback = (void *) callback;
|
||||
m_stream.callbackInfo.userData = userData;
|
||||
if (options != NULL) {
|
||||
options->numberOfBuffers = m_stream.nBuffers;
|
||||
}
|
||||
m_stream.state = airtaudio::api::STREAM_STOPPED;
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
uint32_t airtaudio::Api::getDefaultInputDevice() {
|
||||
// Should be implemented in subclasses if possible.
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t airtaudio::Api::getDefaultOutputDevice() {
|
||||
// Should be implemented in subclasses if possible.
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::Api::closeStream() {
|
||||
// MUST be implemented in subclasses!
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
bool airtaudio::Api::probeDeviceOpen(uint32_t /*device*/,
|
||||
airtaudio::api::StreamMode /*mode*/,
|
||||
uint32_t /*channels*/,
|
||||
uint32_t /*firstChannel*/,
|
||||
uint32_t /*sampleRate*/,
|
||||
airtaudio::format /*format*/,
|
||||
uint32_t * /*bufferSize*/,
|
||||
airtaudio::StreamOptions * /*options*/) {
|
||||
// MUST be implemented in subclasses!
|
||||
return false;
|
||||
}
|
||||
|
||||
void airtaudio::Api::tickStreamTime() {
|
||||
// Subclasses that do not provide their own implementation of
|
||||
// getStreamTime should call this function once per buffer I/O to
|
||||
// provide basic stream time support.
|
||||
m_stream.streamTime += (m_stream.bufferSize * 1.0 / m_stream.sampleRate);
|
||||
#if defined(HAVE_GETTIMEOFDAY)
|
||||
gettimeofday(&m_stream.lastTickTimestamp, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
long airtaudio::Api::getStreamLatency() {
|
||||
if (verifyStream() != airtaudio::errorNone) {
|
||||
return 0;
|
||||
}
|
||||
long totalLatency = 0;
|
||||
if ( m_stream.mode == airtaudio::api::OUTPUT
|
||||
|| m_stream.mode == airtaudio::api::DUPLEX) {
|
||||
totalLatency = m_stream.latency[0];
|
||||
}
|
||||
if ( m_stream.mode == airtaudio::api::INPUT
|
||||
|| m_stream.mode == airtaudio::api::DUPLEX) {
|
||||
totalLatency += m_stream.latency[1];
|
||||
}
|
||||
return totalLatency;
|
||||
}
|
||||
|
||||
double airtaudio::Api::getStreamTime() {
|
||||
if (verifyStream() != airtaudio::errorNone) {
|
||||
return 0.0f;
|
||||
}
|
||||
#if defined(HAVE_GETTIMEOFDAY)
|
||||
// Return a very accurate estimate of the stream time by
|
||||
// adding in the elapsed time since the last tick.
|
||||
struct timeval then;
|
||||
struct timeval now;
|
||||
if (m_stream.state != airtaudio::api::STREAM_RUNNING || m_stream.streamTime == 0.0) {
|
||||
return m_stream.streamTime;
|
||||
}
|
||||
gettimeofday(&now, NULL);
|
||||
then = m_stream.lastTickTimestamp;
|
||||
return m_stream.streamTime
|
||||
+ ((now.tv_sec + 0.000001 * now.tv_usec)
|
||||
- (then.tv_sec + 0.000001 * then.tv_usec));
|
||||
#else
|
||||
return m_stream.streamTime;
|
||||
#endif
|
||||
}
|
||||
|
||||
uint32_t airtaudio::Api::getStreamSampleRate() {
|
||||
if (verifyStream() != airtaudio::errorNone) {
|
||||
return 0;
|
||||
}
|
||||
return m_stream.sampleRate;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::Api::verifyStream() {
|
||||
if (m_stream.state == airtaudio::api::STREAM_CLOSED) {
|
||||
ATA_ERROR("airtaudio::Api:: a stream is not open!");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
void airtaudio::Api::clearStreamInfo() {
|
||||
m_stream.mode = airtaudio::api::UNINITIALIZED;
|
||||
m_stream.state = airtaudio::api::STREAM_CLOSED;
|
||||
m_stream.sampleRate = 0;
|
||||
m_stream.bufferSize = 0;
|
||||
m_stream.nBuffers = 0;
|
||||
m_stream.userFormat = 0;
|
||||
m_stream.userInterleaved = true;
|
||||
m_stream.streamTime = 0.0;
|
||||
m_stream.apiHandle = 0;
|
||||
m_stream.deviceBuffer = 0;
|
||||
m_stream.callbackInfo.callback = 0;
|
||||
m_stream.callbackInfo.userData = 0;
|
||||
m_stream.callbackInfo.isRunning = false;
|
||||
for (int32_t iii=0; iii<2; ++iii) {
|
||||
m_stream.device[iii] = 11111;
|
||||
m_stream.doConvertBuffer[iii] = false;
|
||||
m_stream.deviceInterleaved[iii] = true;
|
||||
m_stream.doByteSwap[iii] = false;
|
||||
m_stream.nUserChannels[iii] = 0;
|
||||
m_stream.nDeviceChannels[iii] = 0;
|
||||
m_stream.channelOffset[iii] = 0;
|
||||
m_stream.deviceFormat[iii] = 0;
|
||||
m_stream.latency[iii] = 0;
|
||||
m_stream.userBuffer[iii] = 0;
|
||||
m_stream.convertInfo[iii].channels = 0;
|
||||
m_stream.convertInfo[iii].inJump = 0;
|
||||
m_stream.convertInfo[iii].outJump = 0;
|
||||
m_stream.convertInfo[iii].inFormat = 0;
|
||||
m_stream.convertInfo[iii].outFormat = 0;
|
||||
m_stream.convertInfo[iii].inOffset.clear();
|
||||
m_stream.convertInfo[iii].outOffset.clear();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t airtaudio::Api::formatBytes(airtaudio::format _format)
|
||||
{
|
||||
if (_format == airtaudio::SINT16) {
|
||||
return 2;
|
||||
} else if ( _format == airtaudio::SINT32
|
||||
|| _format == airtaudio::FLOAT32) {
|
||||
return 4;
|
||||
} else if (_format == airtaudio::FLOAT64) {
|
||||
return 8;
|
||||
} else if (_format == airtaudio::SINT24) {
|
||||
return 3;
|
||||
} else if (_format == airtaudio::SINT8) {
|
||||
return 1;
|
||||
}
|
||||
ATA_ERROR("airtaudio::Api::formatBytes: undefined format.");
|
||||
// TODO : airtaudio::errorWarning;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void airtaudio::Api::setConvertInfo(airtaudio::api::StreamMode _mode, uint32_t _firstChannel) {
|
||||
if (_mode == airtaudio::api::INPUT) { // convert device to user buffer
|
||||
m_stream.convertInfo[_mode].inJump = m_stream.nDeviceChannels[1];
|
||||
m_stream.convertInfo[_mode].outJump = m_stream.nUserChannels[1];
|
||||
m_stream.convertInfo[_mode].inFormat = m_stream.deviceFormat[1];
|
||||
m_stream.convertInfo[_mode].outFormat = m_stream.userFormat;
|
||||
} else { // convert user to device buffer
|
||||
m_stream.convertInfo[_mode].inJump = m_stream.nUserChannels[0];
|
||||
m_stream.convertInfo[_mode].outJump = m_stream.nDeviceChannels[0];
|
||||
m_stream.convertInfo[_mode].inFormat = m_stream.userFormat;
|
||||
m_stream.convertInfo[_mode].outFormat = m_stream.deviceFormat[0];
|
||||
}
|
||||
if (m_stream.convertInfo[_mode].inJump < m_stream.convertInfo[_mode].outJump) {
|
||||
m_stream.convertInfo[_mode].channels = m_stream.convertInfo[_mode].inJump;
|
||||
} else {
|
||||
m_stream.convertInfo[_mode].channels = m_stream.convertInfo[_mode].outJump;
|
||||
}
|
||||
// Set up the interleave/deinterleave offsets.
|
||||
if (m_stream.deviceInterleaved[_mode] != m_stream.userInterleaved) {
|
||||
if ( ( _mode == airtaudio::api::OUTPUT
|
||||
&& m_stream.deviceInterleaved[_mode])
|
||||
|| ( _mode == airtaudio::api::INPUT
|
||||
&& m_stream.userInterleaved)) {
|
||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
||||
m_stream.convertInfo[_mode].inOffset.push_back(kkk * m_stream.bufferSize);
|
||||
m_stream.convertInfo[_mode].outOffset.push_back(kkk);
|
||||
m_stream.convertInfo[_mode].inJump = 1;
|
||||
}
|
||||
} else {
|
||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
||||
m_stream.convertInfo[_mode].inOffset.push_back(kkk);
|
||||
m_stream.convertInfo[_mode].outOffset.push_back(kkk * m_stream.bufferSize);
|
||||
m_stream.convertInfo[_mode].outJump = 1;
|
||||
}
|
||||
}
|
||||
} else { // no (de)interleaving
|
||||
if (m_stream.userInterleaved) {
|
||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
||||
m_stream.convertInfo[_mode].inOffset.push_back(kkk);
|
||||
m_stream.convertInfo[_mode].outOffset.push_back(kkk);
|
||||
}
|
||||
} else {
|
||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
||||
m_stream.convertInfo[_mode].inOffset.push_back(kkk * m_stream.bufferSize);
|
||||
m_stream.convertInfo[_mode].outOffset.push_back(kkk * m_stream.bufferSize);
|
||||
m_stream.convertInfo[_mode].inJump = 1;
|
||||
m_stream.convertInfo[_mode].outJump = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add channel offset.
|
||||
if (_firstChannel > 0) {
|
||||
if (m_stream.deviceInterleaved[_mode]) {
|
||||
if (_mode == airtaudio::api::OUTPUT) {
|
||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
||||
m_stream.convertInfo[_mode].outOffset[kkk] += _firstChannel;
|
||||
}
|
||||
} else {
|
||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
||||
m_stream.convertInfo[_mode].inOffset[kkk] += _firstChannel;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (_mode == airtaudio::api::OUTPUT) {
|
||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
||||
m_stream.convertInfo[_mode].outOffset[kkk] += (_firstChannel * m_stream.bufferSize);
|
||||
}
|
||||
} else {
|
||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
||||
m_stream.convertInfo[_mode].inOffset[kkk] += (_firstChannel * m_stream.bufferSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void airtaudio::Api::convertBuffer(char *_outBuffer, char *_inBuffer, airtaudio::api::ConvertInfo &_info) {
|
||||
// This function does format conversion, input/output channel compensation, and
|
||||
// data interleaving/deinterleaving. 24-bit integers are assumed to occupy
|
||||
// the lower three bytes of a 32-bit integer.
|
||||
|
||||
// Clear our device buffer when in/out duplex device channels are different
|
||||
if ( _outBuffer == m_stream.deviceBuffer
|
||||
&& m_stream.mode == airtaudio::api::DUPLEX
|
||||
&& m_stream.nDeviceChannels[0] < m_stream.nDeviceChannels[1]) {
|
||||
memset(_outBuffer, 0, m_stream.bufferSize * _info.outJump * formatBytes(_info.outFormat));
|
||||
}
|
||||
int32_t jjj;
|
||||
if (_info.outFormat == airtaudio::FLOAT64) {
|
||||
double scale;
|
||||
double *out = (double *)_outBuffer;
|
||||
|
||||
if (_info.inFormat == airtaudio::SINT8) {
|
||||
signed char *in = (signed char *)_inBuffer;
|
||||
scale = 1.0 / 127.5;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
|
||||
out[_info.outOffset[jjj]] += 0.5;
|
||||
out[_info.outOffset[jjj]] *= scale;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT16) {
|
||||
int16_t *in = (int16_t *)_inBuffer;
|
||||
scale = 1.0 / 32767.5;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
|
||||
out[_info.outOffset[jjj]] += 0.5;
|
||||
out[_info.outOffset[jjj]] *= scale;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT24) {
|
||||
int24_t *in = (int24_t *)_inBuffer;
|
||||
scale = 1.0 / 8388607.5;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (double) (in[_info.inOffset[jjj]].asInt());
|
||||
out[_info.outOffset[jjj]] += 0.5;
|
||||
out[_info.outOffset[jjj]] *= scale;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT32) {
|
||||
int32_t *in = (int32_t *)_inBuffer;
|
||||
scale = 1.0 / 2147483647.5;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
|
||||
out[_info.outOffset[jjj]] += 0.5;
|
||||
out[_info.outOffset[jjj]] *= scale;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT32) {
|
||||
float *in = (float *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (double) in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT64) {
|
||||
// Channel compensation and/or (de)interleaving only.
|
||||
double *in = (double *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (_info.outFormat == airtaudio::FLOAT32) {
|
||||
float scale;
|
||||
float *out = (float *)_outBuffer;
|
||||
if (_info.inFormat == airtaudio::SINT8) {
|
||||
signed char *in = (signed char *)_inBuffer;
|
||||
scale = (float) (1.0 / 127.5);
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
|
||||
out[_info.outOffset[jjj]] += 0.5;
|
||||
out[_info.outOffset[jjj]] *= scale;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT16) {
|
||||
int16_t *in = (int16_t *)_inBuffer;
|
||||
scale = (float) (1.0 / 32767.5);
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
|
||||
out[_info.outOffset[jjj]] += 0.5;
|
||||
out[_info.outOffset[jjj]] *= scale;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT24) {
|
||||
int24_t *in = (int24_t *)_inBuffer;
|
||||
scale = (float) (1.0 / 8388607.5);
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (float) (in[_info.inOffset[jjj]].asInt());
|
||||
out[_info.outOffset[jjj]] += 0.5;
|
||||
out[_info.outOffset[jjj]] *= scale;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT32) {
|
||||
int32_t *in = (int32_t *)_inBuffer;
|
||||
scale = (float) (1.0 / 2147483647.5);
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
|
||||
out[_info.outOffset[jjj]] += 0.5;
|
||||
out[_info.outOffset[jjj]] *= scale;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT32) {
|
||||
// Channel compensation and/or (de)interleaving only.
|
||||
float *in = (float *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT64) {
|
||||
double *in = (double *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (float) in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (_info.outFormat == airtaudio::SINT32) {
|
||||
int32_t *out = (int32_t *)_outBuffer;
|
||||
if (_info.inFormat == airtaudio::SINT8) {
|
||||
signed char *in = (signed char *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int32_t) in[_info.inOffset[jjj]];
|
||||
out[_info.outOffset[jjj]] <<= 24;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT16) {
|
||||
int16_t *in = (int16_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int32_t) in[_info.inOffset[jjj]];
|
||||
out[_info.outOffset[jjj]] <<= 16;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT24) {
|
||||
int24_t *in = (int24_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int32_t) in[_info.inOffset[jjj]].asInt();
|
||||
out[_info.outOffset[jjj]] <<= 8;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT32) {
|
||||
// Channel compensation and/or (de)interleaving only.
|
||||
int32_t *in = (int32_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT32) {
|
||||
float *in = (float *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 2147483647.5 - 0.5);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT64) {
|
||||
double *in = (double *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 2147483647.5 - 0.5);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (_info.outFormat == airtaudio::SINT24) {
|
||||
int24_t *out = (int24_t *)_outBuffer;
|
||||
if (_info.inFormat == airtaudio::SINT8) {
|
||||
signed char *in = (signed char *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] << 16);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT16) {
|
||||
int16_t *in = (int16_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] << 8);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT24) {
|
||||
// Channel compensation and/or (de)interleaving only.
|
||||
int24_t *in = (int24_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT32) {
|
||||
int32_t *in = (int32_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] >> 8);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT32) {
|
||||
float *in = (float *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 8388607.5 - 0.5);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT64) {
|
||||
double *in = (double *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int32_t) (in[_info.inOffset[jjj]] * 8388607.5 - 0.5);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (_info.outFormat == airtaudio::SINT16) {
|
||||
int16_t *out = (int16_t *)_outBuffer;
|
||||
if (_info.inFormat == airtaudio::SINT8) {
|
||||
signed char *in = (signed char *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int16_t) in[_info.inOffset[jjj]];
|
||||
out[_info.outOffset[jjj]] <<= 8;
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT16) {
|
||||
// Channel compensation and/or (de)interleaving only.
|
||||
int16_t *in = (int16_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT24) {
|
||||
int24_t *in = (int24_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int16_t) (in[_info.inOffset[jjj]].asInt() >> 8);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT32) {
|
||||
int32_t *in = (int32_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int16_t) ((in[_info.inOffset[jjj]] >> 16) & 0x0000ffff);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT32) {
|
||||
float *in = (float *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int16_t) (in[_info.inOffset[jjj]] * 32767.5 - 0.5);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT64) {
|
||||
double *in = (double *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (int16_t) (in[_info.inOffset[jjj]] * 32767.5 - 0.5);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (_info.outFormat == airtaudio::SINT8) {
|
||||
signed char *out = (signed char *)_outBuffer;
|
||||
if (_info.inFormat == airtaudio::SINT8) {
|
||||
// Channel compensation and/or (de)interleaving only.
|
||||
signed char *in = (signed char *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
if (_info.inFormat == airtaudio::SINT16) {
|
||||
int16_t *in = (int16_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (signed char) ((in[_info.inOffset[jjj]] >> 8) & 0x00ff);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT24) {
|
||||
int24_t *in = (int24_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (signed char) (in[_info.inOffset[jjj]].asInt() >> 16);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::SINT32) {
|
||||
int32_t *in = (int32_t *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (signed char) ((in[_info.inOffset[jjj]] >> 24) & 0x000000ff);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT32) {
|
||||
float *in = (float *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (signed char) (in[_info.inOffset[jjj]] * 127.5 - 0.5);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
else if (_info.inFormat == airtaudio::FLOAT64) {
|
||||
double *in = (double *)_inBuffer;
|
||||
for (uint32_t iii=0; iii<m_stream.bufferSize; ++iii) {
|
||||
for (jjj=0; jjj<_info.channels; ++jjj) {
|
||||
out[_info.outOffset[jjj]] = (signed char) (in[_info.inOffset[jjj]] * 127.5 - 0.5);
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void airtaudio::Api::byteSwapBuffer(char *_buffer, uint32_t _samples, airtaudio::format _format) {
|
||||
register char val;
|
||||
register char *ptr;
|
||||
ptr = _buffer;
|
||||
if (_format == airtaudio::SINT16) {
|
||||
for (uint32_t iii=0; iii<_samples; ++iii) {
|
||||
// Swap 1st and 2nd bytes.
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+1);
|
||||
*(ptr+1) = val;
|
||||
|
||||
// Increment 2 bytes.
|
||||
ptr += 2;
|
||||
}
|
||||
} else if ( _format == airtaudio::SINT32
|
||||
|| _format == airtaudio::FLOAT32) {
|
||||
for (uint32_t iii=0; iii<_samples; ++iii) {
|
||||
// Swap 1st and 4th bytes.
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+3);
|
||||
*(ptr+3) = val;
|
||||
|
||||
// Swap 2nd and 3rd bytes.
|
||||
ptr += 1;
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+1);
|
||||
*(ptr+1) = val;
|
||||
|
||||
// Increment 3 more bytes.
|
||||
ptr += 3;
|
||||
}
|
||||
} else if (_format == airtaudio::SINT24) {
|
||||
for (uint32_t iii=0; iii<_samples; ++iii) {
|
||||
// Swap 1st and 3rd bytes.
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+2);
|
||||
*(ptr+2) = val;
|
||||
|
||||
// Increment 2 more bytes.
|
||||
ptr += 2;
|
||||
}
|
||||
} else if (_format == airtaudio::FLOAT64) {
|
||||
for (uint32_t iii=0; iii<_samples; ++iii) {
|
||||
// Swap 1st and 8th bytes
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+7);
|
||||
*(ptr+7) = val;
|
||||
|
||||
// Swap 2nd and 7th bytes
|
||||
ptr += 1;
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+5);
|
||||
*(ptr+5) = val;
|
||||
|
||||
// Swap 3rd and 6th bytes
|
||||
ptr += 1;
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+3);
|
||||
*(ptr+3) = val;
|
||||
|
||||
// Swap 4th and 5th bytes
|
||||
ptr += 1;
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+1);
|
||||
*(ptr+1) = val;
|
||||
|
||||
// Increment 5 more bytes.
|
||||
ptr += 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
196
airtaudio/Api.h
196
airtaudio/Api.h
@@ -1,196 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#ifndef __AIRTAUDIO_API_H__
|
||||
#define __AIRTAUDIO_API_H__
|
||||
|
||||
#include <sstream>
|
||||
#include <airtaudio/debug.h>
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
/**
|
||||
* @brief Audio API specifier arguments.
|
||||
*/
|
||||
enum type {
|
||||
UNSPECIFIED, //!< Search for a working compiled API.
|
||||
LINUX_ALSA, //!< The Advanced Linux Sound Architecture API.
|
||||
LINUX_PULSE, //!< The Linux PulseAudio API.
|
||||
LINUX_OSS, //!< The Linux Open Sound System API.
|
||||
UNIX_JACK, //!< The Jack Low-Latency Audio Server API.
|
||||
MACOSX_CORE, //!< Macintosh OS-X Core Audio API.
|
||||
IOS_CORE, //!< Macintosh OS-X Core Audio API.
|
||||
WINDOWS_ASIO, //!< The Steinberg Audio Stream I/O API.
|
||||
WINDOWS_DS, //!< The Microsoft Direct Sound API.
|
||||
RTAUDIO_DUMMY, //!< A compilable but non-functional API.
|
||||
ANDROID_JAVA, //!< Android Interface.
|
||||
USER_INTERFACE_1, //!< User interface 1.
|
||||
USER_INTERFACE_2, //!< User interface 2.
|
||||
USER_INTERFACE_3, //!< User interface 3.
|
||||
USER_INTERFACE_4, //!< User interface 4.
|
||||
};
|
||||
|
||||
extern const uint32_t MAX_SAMPLE_RATES;
|
||||
extern const uint32_t SAMPLE_RATES[];
|
||||
|
||||
enum StreamState {
|
||||
STREAM_STOPPED,
|
||||
STREAM_STOPPING,
|
||||
STREAM_RUNNING,
|
||||
STREAM_CLOSED = -50
|
||||
};
|
||||
|
||||
enum StreamMode {
|
||||
OUTPUT,
|
||||
INPUT,
|
||||
DUPLEX,
|
||||
UNINITIALIZED = -75
|
||||
};
|
||||
|
||||
// A protected structure used for buffer conversion.
|
||||
struct ConvertInfo {
|
||||
int32_t channels;
|
||||
int32_t inJump, outJump;
|
||||
airtaudio::format inFormat, outFormat;
|
||||
std::vector<int> inOffset;
|
||||
std::vector<int> outOffset;
|
||||
};
|
||||
|
||||
// A protected structure for audio streams.
|
||||
class Stream {
|
||||
public:
|
||||
uint32_t device[2]; // Playback and record, respectively.
|
||||
void *apiHandle; // void pointer for API specific stream handle information
|
||||
airtaudio::api::StreamMode mode; // OUTPUT, INPUT, or DUPLEX.
|
||||
airtaudio::api::StreamState state; // STOPPED, RUNNING, or CLOSED
|
||||
char *userBuffer[2]; // Playback and record, respectively.
|
||||
char *deviceBuffer;
|
||||
bool doConvertBuffer[2]; // Playback and record, respectively.
|
||||
bool userInterleaved;
|
||||
bool deviceInterleaved[2]; // Playback and record, respectively.
|
||||
bool doByteSwap[2]; // Playback and record, respectively.
|
||||
uint32_t sampleRate;
|
||||
uint32_t bufferSize;
|
||||
uint32_t nBuffers;
|
||||
uint32_t nUserChannels[2]; // Playback and record, respectively.
|
||||
uint32_t nDeviceChannels[2]; // Playback and record channels, respectively.
|
||||
uint32_t channelOffset[2]; // Playback and record, respectively.
|
||||
uint64_t latency[2]; // Playback and record, respectively.
|
||||
airtaudio::format userFormat;
|
||||
airtaudio::format deviceFormat[2]; // Playback and record, respectively.
|
||||
std::mutex mutex;
|
||||
airtaudio::CallbackInfo callbackInfo;
|
||||
airtaudio::api::ConvertInfo convertInfo[2];
|
||||
double streamTime; // Number of elapsed seconds since the stream started.
|
||||
|
||||
#if defined(HAVE_GETTIMEOFDAY)
|
||||
struct timeval lastTickTimestamp;
|
||||
#endif
|
||||
|
||||
Stream() :
|
||||
apiHandle(0),
|
||||
deviceBuffer(0) {
|
||||
device[0] = 11111;
|
||||
device[1] = 11111;
|
||||
}
|
||||
};
|
||||
};
|
||||
/**
|
||||
* RtApi class declaration.
|
||||
*
|
||||
* Subclasses of RtApi contain all API- and OS-specific code necessary
|
||||
* to fully implement the RtAudio API.
|
||||
*
|
||||
* Note that RtApi is an abstract base class and cannot be
|
||||
* explicitly instantiated. The class RtAudio will create an
|
||||
* instance of an RtApi subclass (RtApiOss, RtApiAlsa,
|
||||
* RtApiJack, RtApiCore, RtApiDs, or RtApiAsio).
|
||||
*/
|
||||
class Api {
|
||||
public:
|
||||
Api();
|
||||
virtual ~Api();
|
||||
virtual airtaudio::api::type getCurrentApi() = 0;
|
||||
virtual uint32_t getDeviceCount() = 0;
|
||||
virtual airtaudio::DeviceInfo getDeviceInfo(uint32_t _device) = 0;
|
||||
virtual uint32_t getDefaultInputDevice();
|
||||
virtual uint32_t getDefaultOutputDevice();
|
||||
enum airtaudio::errorType openStream(airtaudio::StreamParameters *_outputParameters,
|
||||
airtaudio::StreamParameters *_inputParameters,
|
||||
airtaudio::format _format,
|
||||
uint32_t _sampleRate,
|
||||
uint32_t *_bufferFrames,
|
||||
airtaudio::AirTAudioCallback _callback,
|
||||
void *_userData,
|
||||
airtaudio::StreamOptions *_options);
|
||||
virtual enum airtaudio::errorType closeStream();
|
||||
virtual enum airtaudio::errorType startStream() = 0;
|
||||
virtual enum airtaudio::errorType stopStream() = 0;
|
||||
virtual enum airtaudio::errorType abortStream() = 0;
|
||||
long getStreamLatency();
|
||||
uint32_t getStreamSampleRate();
|
||||
virtual double getStreamTime();
|
||||
bool isStreamOpen() const {
|
||||
return m_stream.state != airtaudio::api::STREAM_CLOSED;
|
||||
}
|
||||
bool isStreamRunning() const {
|
||||
return m_stream.state == airtaudio::api::STREAM_RUNNING;
|
||||
}
|
||||
|
||||
protected:
|
||||
airtaudio::api::Stream m_stream;
|
||||
|
||||
/*!
|
||||
Protected, api-specific method that attempts to open a device
|
||||
with the given parameters. This function MUST be implemented by
|
||||
all subclasses. If an error is encountered during the probe, a
|
||||
"warning" message is reported and false is returned. A
|
||||
successful probe is indicated by a return value of true.
|
||||
*/
|
||||
virtual bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
|
||||
//! A protected function used to increment the stream time.
|
||||
void tickStreamTime();
|
||||
|
||||
//! Protected common method to clear an RtApiStream structure.
|
||||
void clearStreamInfo();
|
||||
|
||||
/*!
|
||||
Protected common method that throws an RtError (type =
|
||||
INVALID_USE) if a stream is not open.
|
||||
*/
|
||||
enum airtaudio::errorType verifyStream();
|
||||
/**
|
||||
* @brief Protected method used to perform format, channel number, and/or interleaving
|
||||
* conversions between the user and device buffers.
|
||||
*/
|
||||
void convertBuffer(char *_outBuffer, char *_inBuffer, airtaudio::api::ConvertInfo& _info);
|
||||
|
||||
//! Protected common method used to perform byte-swapping on buffers.
|
||||
void byteSwapBuffer(char *_buffer, uint32_t _samples, airtaudio::format _format);
|
||||
|
||||
//! Protected common method that returns the number of bytes for a given format.
|
||||
uint32_t formatBytes(airtaudio::format _format);
|
||||
|
||||
//! Protected common method that sets up the parameters for buffer conversion.
|
||||
void setConvertInfo(airtaudio::api::StreamMode _mode, uint32_t _firstChannel);
|
||||
};
|
||||
};
|
||||
/**
|
||||
* @brief Debug operator To display the curent element in a Human redeable information
|
||||
*/
|
||||
std::ostream& operator <<(std::ostream& _os, const airtaudio::api::type& _obj);
|
||||
|
||||
#endif
|
@@ -1,44 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#ifndef __AIRTAUDIO_CALLBACK_INFO_H__
|
||||
#define __AIRTAUDIO_CALLBACK_INFO_H__
|
||||
|
||||
#include <thread>
|
||||
|
||||
namespace airtaudio {
|
||||
// This global structure type is used to pass callback information
|
||||
// between the private RtAudio stream structure and global callback
|
||||
// handling functions.
|
||||
class CallbackInfo {
|
||||
public:
|
||||
void* object; // Used as a "this" pointer.
|
||||
std::thread* thread;
|
||||
void* callback;
|
||||
void* userData;
|
||||
void* apiInfo; // void pointer for API specific callback information
|
||||
bool isRunning;
|
||||
bool doRealtime;
|
||||
int32_t priority;
|
||||
|
||||
// Default constructor.
|
||||
CallbackInfo() :
|
||||
object(0),
|
||||
callback(0),
|
||||
userData(0),
|
||||
apiInfo(0),
|
||||
isRunning(false),
|
||||
doRealtime(false) {
|
||||
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
#endif
|
||||
|
@@ -1,40 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#ifndef __AIRTAUDIO_DEVICE_INFO_H__
|
||||
#define __AIRTAUDIO_DEVICE_INFO_H__
|
||||
|
||||
namespace airtaudio {
|
||||
/**
|
||||
* @brief The public device information structure for returning queried values.
|
||||
*/
|
||||
class DeviceInfo {
|
||||
public:
|
||||
bool probed; //!< true if the device capabilities were successfully probed.
|
||||
std::string name; //!< Character string device identifier.
|
||||
uint32_t outputChannels; //!< Maximum output channels supported by device.
|
||||
uint32_t inputChannels; //!< Maximum input channels supported by device.
|
||||
uint32_t duplexChannels; //!< Maximum simultaneous input/output channels supported by device.
|
||||
bool isDefaultOutput; //!< true if this is the default output device.
|
||||
bool isDefaultInput; //!< true if this is the default input device.
|
||||
std::vector<uint32_t> sampleRates; //!< Supported sample rates (queried from list of standard rates).
|
||||
airtaudio::format nativeFormats; //!< Bit mask of supported data formats.
|
||||
// Default constructor.
|
||||
DeviceInfo() :
|
||||
probed(false),
|
||||
outputChannels(0),
|
||||
inputChannels(0),
|
||||
duplexChannels(0),
|
||||
isDefaultOutput(false),
|
||||
isDefaultInput(false),
|
||||
nativeFormats(0) {}
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@@ -1,158 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
//#include <etk/types.h>
|
||||
#include <airtaudio/Interface.h>
|
||||
#include <airtaudio/debug.h>
|
||||
#include <iostream>
|
||||
|
||||
std::vector<airtaudio::api::type> airtaudio::Interface::getCompiledApi() {
|
||||
std::vector<airtaudio::api::type> apis;
|
||||
// The order here will control the order of RtAudio's API search in
|
||||
// the constructor.
|
||||
for (auto &it : m_apiAvaillable) {
|
||||
apis.push_back(it.first);
|
||||
}
|
||||
return apis;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void airtaudio::Interface::openRtApi(airtaudio::api::type _api) {
|
||||
if (m_rtapi != NULL) {
|
||||
delete m_rtapi;
|
||||
m_rtapi = NULL;
|
||||
}
|
||||
for (auto &it :m_apiAvaillable) {
|
||||
ATA_ERROR("try open " << it.first);
|
||||
if (_api == it.first) {
|
||||
ATA_ERROR(" ==> call it");
|
||||
m_rtapi = it.second();
|
||||
if (m_rtapi != NULL) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO : An eror occured ...
|
||||
ATA_ERROR("Error in open API ...");
|
||||
}
|
||||
|
||||
|
||||
airtaudio::Interface::Interface() :
|
||||
m_rtapi(NULL) {
|
||||
#if defined(__UNIX_JACK__)
|
||||
addInterface(airtaudio::api::UNIX_JACK, airtaudio::api::Jack::Create);
|
||||
#endif
|
||||
#if defined(__LINUX_ALSA__)
|
||||
addInterface(airtaudio::api::LINUX_ALSA, airtaudio::api::Alsa::Create);
|
||||
#endif
|
||||
#if defined(__LINUX_PULSE__)
|
||||
addInterface(airtaudio::api::LINUX_PULSE, airtaudio::api::Pulse::Create);
|
||||
#endif
|
||||
#if defined(__LINUX_OSS__)
|
||||
addInterface(airtaudio::api::LINUX_OSS, airtaudio::api::Oss::Create);
|
||||
#endif
|
||||
#if defined(__WINDOWS_ASIO__)
|
||||
addInterface(airtaudio::api::WINDOWS_ASIO, airtaudio::api::Asio::Create);
|
||||
#endif
|
||||
#if defined(__WINDOWS_DS__)
|
||||
addInterface(airtaudio::api::WINDOWS_DS, airtaudio::api::Ds::Create);
|
||||
#endif
|
||||
#if defined(__MACOSX_CORE__)
|
||||
addInterface(airtaudio::api::MACOSX_CORE, airtaudio::api::Core::Create);
|
||||
#endif
|
||||
#if defined(__IOS_CORE__)
|
||||
addInterface(airtaudio::api::IOS_CORE, airtaudio::api::CoreIos::Create);
|
||||
#endif
|
||||
#if defined(__ANDROID_JAVA__)
|
||||
addInterface(airtaudio::api::ANDROID_JAVA, airtaudio::api::Android::Create);
|
||||
#endif
|
||||
#if defined(__AIRTAUDIO_DUMMY__)
|
||||
addInterface(airtaudio::api::RTAUDIO_DUMMY, airtaudio::api::Dummy::Create);
|
||||
#endif
|
||||
}
|
||||
|
||||
void airtaudio::Interface::addInterface(airtaudio::api::type _api, Api* (*_callbackCreate)()) {
|
||||
m_apiAvaillable.push_back(std::pair<airtaudio::api::type, Api* (*)()>(_api, _callbackCreate));
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::Interface::instanciate(airtaudio::api::type _api) {
|
||||
ATA_INFO("Instanciate API ...");
|
||||
if (m_rtapi != NULL) {
|
||||
ATA_WARNING("Interface already started ...!");
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
if (_api != airtaudio::api::UNSPECIFIED) {
|
||||
ATA_ERROR("API specified ...");
|
||||
// Attempt to open the specified API.
|
||||
openRtApi(_api);
|
||||
if (m_rtapi != NULL) {
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
// No compiled support for specified API value. Issue a debug
|
||||
// warning and continue as if no API was specified.
|
||||
ATA_ERROR("RtAudio: no compiled support for specified API argument!");
|
||||
return airtaudio::errorFail;
|
||||
}
|
||||
ATA_INFO("Auto choice API :");
|
||||
// Iterate through the compiled APIs and return as soon as we find
|
||||
// one with at least one device or we reach the end of the list.
|
||||
std::vector<airtaudio::api::type> apis = getCompiledApi();
|
||||
ATA_INFO(" find : " << apis.size() << " apis.");
|
||||
for (auto &it : apis) {
|
||||
ATA_INFO("try open ...");
|
||||
openRtApi(it);
|
||||
if(m_rtapi == NULL) {
|
||||
ATA_ERROR(" ==> can not create ...");
|
||||
continue;
|
||||
}
|
||||
if (m_rtapi->getDeviceCount() != 0) {
|
||||
ATA_INFO(" ==> api open");
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (m_rtapi != NULL) {
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
ATA_ERROR("RtAudio: no compiled API support found ... critical error!!");
|
||||
return airtaudio::errorFail;
|
||||
}
|
||||
|
||||
airtaudio::Interface::~Interface() {
|
||||
ATA_INFO("Remove interface");
|
||||
if (m_rtapi != NULL) {
|
||||
delete m_rtapi;
|
||||
m_rtapi = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::Interface::openStream(
|
||||
airtaudio::StreamParameters* _outputParameters,
|
||||
airtaudio::StreamParameters* _inputParameters,
|
||||
airtaudio::format _format,
|
||||
uint32_t _sampleRate,
|
||||
uint32_t* _bufferFrames,
|
||||
airtaudio::AirTAudioCallback _callback,
|
||||
void* _userData,
|
||||
airtaudio::StreamOptions* _options) {
|
||||
if (m_rtapi == NULL) {
|
||||
return airtaudio::errorInputNull;
|
||||
}
|
||||
return m_rtapi->openStream(_outputParameters,
|
||||
_inputParameters,
|
||||
_format,
|
||||
_sampleRate,
|
||||
_bufferFrames,
|
||||
_callback,
|
||||
_userData,
|
||||
_options);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@@ -1,312 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#ifndef __AIRTAUDIO_RTAUDIO_H__
|
||||
#define __AIRTAUDIO_RTAUDIO_H__
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <airtaudio/base.h>
|
||||
#include <airtaudio/int24_t.h>
|
||||
#include <airtaudio/CallbackInfo.h>
|
||||
#include <airtaudio/Api.h>
|
||||
#include <airtaudio/api/Alsa.h>
|
||||
#include <airtaudio/api/Android.h>
|
||||
#include <airtaudio/api/Asio.h>
|
||||
#include <airtaudio/api/Core.h>
|
||||
#include <airtaudio/api/CoreIos.h>
|
||||
#include <airtaudio/api/Ds.h>
|
||||
#include <airtaudio/api/Dummy.h>
|
||||
#include <airtaudio/api/Jack.h>
|
||||
#include <airtaudio/api/Oss.h>
|
||||
#include <airtaudio/api/Pulse.h>
|
||||
namespace airtaudio {
|
||||
/**
|
||||
* @brief airtaudio::Interface class declaration.
|
||||
*
|
||||
* airtaudio::Interface is a "controller" used to select an available audio i/o
|
||||
* interface. It presents a common API for the user to call but all
|
||||
* functionality is implemented by the class RtApi and its
|
||||
* subclasses. RtAudio creates an instance of an RtApi subclass
|
||||
* based on the user's API choice. If no choice is made, RtAudio
|
||||
* attempts to make a "logical" API selection.
|
||||
*/
|
||||
class Interface {
|
||||
protected:
|
||||
std::vector<std::pair<airtaudio::api::type, Api* (*)()>> m_apiAvaillable;
|
||||
protected:
|
||||
airtaudio::Api *m_rtapi;
|
||||
public:
|
||||
/**
|
||||
* @brief A static function to determine the current airtaudio version.
|
||||
*/
|
||||
static std::string getVersion() {
|
||||
return airtaudio::VERSION;
|
||||
}
|
||||
/**
|
||||
* @brief A static function to determine the available compiled audio APIs.
|
||||
*
|
||||
* The values returned in the std::vector can be compared against
|
||||
* the enumerated list values. Note that there can be more than one
|
||||
* API compiled for certain operating systems.
|
||||
*/
|
||||
std::vector<airtaudio::api::type> getCompiledApi();
|
||||
/**
|
||||
* @brief The class constructor.
|
||||
* @note the creating of the basic instance is done by Instanciate
|
||||
*/
|
||||
Interface();
|
||||
/**
|
||||
* @brief The destructor.
|
||||
*
|
||||
* If a stream is running or open, it will be stopped and closed
|
||||
* automatically.
|
||||
*/
|
||||
virtual ~Interface();
|
||||
/**
|
||||
* @brief Add an interface of the Possible List.
|
||||
* @param[in] _api Type of the interface.
|
||||
* @param[in] _callbackCreate API creation callback.
|
||||
*/
|
||||
void addInterface(airtaudio::api::type _api, Api* (*_callbackCreate)());
|
||||
/**
|
||||
* @brief Create an interface instance
|
||||
*/
|
||||
enum airtaudio::errorType instanciate(airtaudio::api::type _api = airtaudio::api::UNSPECIFIED);
|
||||
/**
|
||||
* @return the audio API specifier for the current instance of airtaudio.
|
||||
*/
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
if (m_rtapi == NULL) {
|
||||
return airtaudio::api::UNSPECIFIED;
|
||||
}
|
||||
return m_rtapi->getCurrentApi();
|
||||
}
|
||||
/**
|
||||
* @brief A public function that queries for the number of audio devices available.
|
||||
*
|
||||
* This function performs a system query of available devices each time it
|
||||
* is called, thus supporting devices connected \e after instantiation. If
|
||||
* a system error occurs during processing, a warning will be issued.
|
||||
*/
|
||||
uint32_t getDeviceCount() {
|
||||
if (m_rtapi == NULL) {
|
||||
return 0;
|
||||
}
|
||||
return m_rtapi->getDeviceCount();
|
||||
}
|
||||
/**
|
||||
* @brief Any device integer between 0 and getDeviceCount() - 1 is valid.
|
||||
* If an invalid argument is provided, an RtError (type = INVALID_USE)
|
||||
* will be thrown. If a device is busy or otherwise unavailable, the
|
||||
* structure member "probed" will have a value of "false" and all
|
||||
* other members are undefined. If the specified device is the
|
||||
* current default input or output device, the corresponding
|
||||
* "isDefault" member will have a value of "true".
|
||||
*
|
||||
* @return An airtaudio::DeviceInfo structure for a specified device number.
|
||||
*/
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device) {
|
||||
if (m_rtapi == NULL) {
|
||||
return airtaudio::DeviceInfo();
|
||||
}
|
||||
return m_rtapi->getDeviceInfo(_device);
|
||||
}
|
||||
/**
|
||||
* @brief A function that returns the index of the default output device.
|
||||
*
|
||||
* If the underlying audio API does not provide a "default
|
||||
* device", or if no devices are available, the return value will be
|
||||
* 0. Note that this is a valid device identifier and it is the
|
||||
* client's responsibility to verify that a device is available
|
||||
* before attempting to open a stream.
|
||||
*/
|
||||
uint32_t getDefaultOutputDevice() {
|
||||
if (m_rtapi == NULL) {
|
||||
return 0;
|
||||
}
|
||||
return m_rtapi->getDefaultOutputDevice();
|
||||
}
|
||||
/**
|
||||
* @brief A function that returns the index of the default input device.
|
||||
*
|
||||
* If the underlying audio API does not provide a "default
|
||||
* device", or if no devices are available, the return value will be
|
||||
* 0. Note that this is a valid device identifier and it is the
|
||||
* client's responsibility to verify that a device is available
|
||||
* before attempting to open a stream.
|
||||
*/
|
||||
uint32_t getDefaultInputDevice() {
|
||||
if (m_rtapi == NULL) {
|
||||
return 0;
|
||||
}
|
||||
return m_rtapi->getDefaultInputDevice();
|
||||
}
|
||||
/**
|
||||
* @brief A public function for opening a stream with the specified parameters.
|
||||
*
|
||||
* An RtError (type = SYSTEM_ERROR) is thrown if a stream cannot be
|
||||
* opened with the specified parameters or an error occurs during
|
||||
* processing. An RtError (type = INVALID_USE) is thrown if any
|
||||
* invalid device ID or channel number parameters are specified.
|
||||
* @param _outputParameters Specifies output stream parameters to use
|
||||
* when opening a stream, including a device ID, number of channels,
|
||||
* and starting channel number. For input-only streams, this
|
||||
* argument should be NULL. The device ID is an index value between
|
||||
* 0 and getDeviceCount() - 1.
|
||||
* @param _inputParameters Specifies input stream parameters to use
|
||||
* when opening a stream, including a device ID, number of channels,
|
||||
* and starting channel number. For output-only streams, this
|
||||
* argument should be NULL. The device ID is an index value between
|
||||
* 0 and getDeviceCount() - 1.
|
||||
* @param _format An airtaudio::format specifying the desired sample data format.
|
||||
* @param _sampleRate The desired sample rate (sample frames per second).
|
||||
* @param *_bufferFrames A pointer to a value indicating the desired
|
||||
* internal buffer size in sample frames. The actual value
|
||||
* used by the device is returned via the same pointer. A
|
||||
* value of zero can be specified, in which case the lowest
|
||||
* allowable value is determined.
|
||||
* @param _callback A client-defined function that will be invoked
|
||||
* when input data is available and/or output data is needed.
|
||||
* @param _userData An optional pointer to data that can be accessed
|
||||
* from within the callback function.
|
||||
* @param _options An optional pointer to a structure containing various
|
||||
* global stream options, including a list of OR'ed airtaudio::streamFlags
|
||||
* and a suggested number of stream buffers that can be used to
|
||||
* control stream latency. More buffers typically result in more
|
||||
* robust performance, though at a cost of greater latency. If a
|
||||
* value of zero is specified, a system-specific median value is
|
||||
* chosen. If the airtaudio_MINIMIZE_LATENCY flag bit is set, the
|
||||
* lowest allowable value is used. The actual value used is
|
||||
* returned via the structure argument. The parameter is API dependent.
|
||||
* @param _errorCallback A client-defined function that will be invoked
|
||||
* when an error has occured.
|
||||
*/
|
||||
enum airtaudio::errorType openStream(airtaudio::StreamParameters *_outputParameters,
|
||||
airtaudio::StreamParameters *_inputParameters,
|
||||
airtaudio::format _format,
|
||||
uint32_t _sampleRate,
|
||||
uint32_t *_bufferFrames,
|
||||
airtaudio::AirTAudioCallback _callback,
|
||||
void *_userData = NULL,
|
||||
airtaudio::StreamOptions *_options = NULL);
|
||||
|
||||
/**
|
||||
* @brief A function that closes a stream and frees any associated stream memory.
|
||||
*
|
||||
* If a stream is not open, this function issues a warning and
|
||||
* returns (no exception is thrown).
|
||||
*/
|
||||
enum airtaudio::errorType closeStream() {
|
||||
if (m_rtapi == NULL) {
|
||||
return airtaudio::errorInputNull;
|
||||
}
|
||||
return m_rtapi->closeStream();
|
||||
}
|
||||
/**
|
||||
* @brief A function that starts a stream.
|
||||
*
|
||||
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
|
||||
* during processing. An RtError (type = INVALID_USE) is thrown if a
|
||||
* stream is not open. A warning is issued if the stream is already
|
||||
* running.
|
||||
*/
|
||||
enum airtaudio::errorType startStream() {
|
||||
if (m_rtapi == NULL) {
|
||||
return airtaudio::errorInputNull;
|
||||
}
|
||||
return m_rtapi->startStream();
|
||||
}
|
||||
/**
|
||||
* @brief Stop a stream, allowing any samples remaining in the output queue to be played.
|
||||
*
|
||||
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
|
||||
* during processing. An RtError (type = INVALID_USE) is thrown if a
|
||||
* stream is not open. A warning is issued if the stream is already
|
||||
* stopped.
|
||||
*/
|
||||
enum airtaudio::errorType stopStream() {
|
||||
if (m_rtapi == NULL) {
|
||||
return airtaudio::errorInputNull;
|
||||
}
|
||||
return m_rtapi->stopStream();
|
||||
}
|
||||
/**
|
||||
* @brief Stop a stream, discarding any samples remaining in the input/output queue.
|
||||
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
|
||||
* during processing. An RtError (type = INVALID_USE) is thrown if a
|
||||
* stream is not open. A warning is issued if the stream is already
|
||||
* stopped.
|
||||
*/
|
||||
enum airtaudio::errorType abortStream() {
|
||||
if (m_rtapi == NULL) {
|
||||
return airtaudio::errorInputNull;
|
||||
}
|
||||
return m_rtapi->abortStream();
|
||||
}
|
||||
/**
|
||||
* @return true if a stream is open and false if not.
|
||||
*/
|
||||
bool isStreamOpen() const {
|
||||
if (m_rtapi == NULL) {
|
||||
return false;
|
||||
}
|
||||
return m_rtapi->isStreamOpen();
|
||||
}
|
||||
/**
|
||||
* @return true if the stream is running and false if it is stopped or not open.
|
||||
*/
|
||||
bool isStreamRunning() const {
|
||||
if (m_rtapi == NULL) {
|
||||
return false;
|
||||
}
|
||||
return m_rtapi->isStreamRunning();
|
||||
}
|
||||
/**
|
||||
* @brief If a stream is not open, an RtError (type = INVALID_USE) will be thrown.
|
||||
* @return the number of elapsed seconds since the stream was started.
|
||||
*/
|
||||
double getStreamTime() {
|
||||
if (m_rtapi == NULL) {
|
||||
return 0.0;
|
||||
}
|
||||
return m_rtapi->getStreamTime();
|
||||
}
|
||||
/**
|
||||
* @brief The stream latency refers to delay in audio input and/or output
|
||||
* caused by internal buffering by the audio system and/or hardware.
|
||||
* For duplex streams, the returned value will represent the sum of
|
||||
* the input and output latencies. If a stream is not open, an
|
||||
* RtError (type = INVALID_USE) will be thrown. If the API does not
|
||||
* report latency, the return value will be zero.
|
||||
* @return The internal stream latency in sample frames.
|
||||
*/
|
||||
long getStreamLatency() {
|
||||
if (m_rtapi == NULL) {
|
||||
return 0;
|
||||
}
|
||||
return m_rtapi->getStreamLatency();
|
||||
}
|
||||
/**
|
||||
* @brief On some systems, the sample rate used may be slightly different
|
||||
* than that specified in the stream parameters. If a stream is not
|
||||
* open, an RtError (type = INVALID_USE) will be thrown.
|
||||
* @return Returns actual sample rate in use by the stream.
|
||||
*/
|
||||
uint32_t getStreamSampleRate() {
|
||||
if (m_rtapi == NULL) {
|
||||
return 0;
|
||||
}
|
||||
return m_rtapi->getStreamSampleRate();
|
||||
}
|
||||
protected:
|
||||
void openRtApi(airtaudio::api::type _api);
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,87 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#ifndef __AIRTAUDIO_STREAM_OPTION_H__
|
||||
#define __AIRTAUDIO_STREAM_OPTION_H__
|
||||
|
||||
namespace airtaudio {
|
||||
|
||||
/**
|
||||
* @brief The structure for specifying stream options.
|
||||
*
|
||||
* The following flags can be OR'ed together to allow a client to
|
||||
* make changes to the default stream behavior:
|
||||
*
|
||||
* - \e RTAUDIO_NONINTERLEAVED: Use non-interleaved buffers (default = interleaved).
|
||||
* - \e RTAUDIO_MINIMIZE_LATENCY: Attempt to set stream parameters for lowest possible latency.
|
||||
* - \e RTAUDIO_HOG_DEVICE: Attempt grab device for exclusive use.
|
||||
* - \e RTAUDIO_SCHEDULE_REALTIME: Attempt to select realtime scheduling for callback thread.
|
||||
* - \e RTAUDIO_ALSA_USE_DEFAULT: Use the "default" PCM device (ALSA only).
|
||||
*
|
||||
* By default, RtAudio streams pass and receive audio data from the
|
||||
* client in an interleaved format. By passing the
|
||||
* RTAUDIO_NONINTERLEAVED flag to the openStream() function, audio
|
||||
* data will instead be presented in non-interleaved buffers. In
|
||||
* this case, each buffer argument in the RtAudioCallback function
|
||||
* will point to a single array of data, with \c nFrames samples for
|
||||
* each channel concatenated back-to-back. For example, the first
|
||||
* sample of data for the second channel would be located at index \c
|
||||
* nFrames (assuming the \c buffer pointer was recast to the correct
|
||||
* data type for the stream).
|
||||
*
|
||||
* Certain audio APIs offer a number of parameters that influence the
|
||||
* I/O latency of a stream. By default, RtAudio will attempt to set
|
||||
* these parameters internally for robust (glitch-free) performance
|
||||
* (though some APIs, like Windows Direct Sound, make this difficult).
|
||||
* By passing the RTAUDIO_MINIMIZE_LATENCY flag to the openStream()
|
||||
* function, internal stream settings will be influenced in an attempt
|
||||
* to minimize stream latency, though possibly at the expense of stream
|
||||
* performance.
|
||||
*
|
||||
* If the RTAUDIO_HOG_DEVICE flag is set, RtAudio will attempt to
|
||||
* open the input and/or output stream device(s) for exclusive use.
|
||||
* Note that this is not possible with all supported audio APIs.
|
||||
*
|
||||
* If the RTAUDIO_SCHEDULE_REALTIME flag is set, RtAudio will attempt
|
||||
* to select realtime scheduling (round-robin) for the callback thread.
|
||||
* The \c priority parameter will only be used if the RTAUDIO_SCHEDULE_REALTIME
|
||||
* flag is set. It defines the thread's realtime priority.
|
||||
*
|
||||
* If the RTAUDIO_ALSA_USE_DEFAULT flag is set, RtAudio will attempt to
|
||||
* open the "default" PCM device when using the ALSA API. Note that this
|
||||
* will override any specified input or output device id.
|
||||
*
|
||||
* The \c numberOfBuffers parameter can be used to control stream
|
||||
* latency in the Windows DirectSound, Linux OSS, and Linux Alsa APIs
|
||||
* only. A value of two is usually the smallest allowed. Larger
|
||||
* numbers can potentially result in more robust stream performance,
|
||||
* though likely at the cost of stream latency. The value set by the
|
||||
* user is replaced during execution of the RtAudio::openStream()
|
||||
* function by the value actually used by the system.
|
||||
*
|
||||
* The \c streamName parameter can be used to set the client name
|
||||
* when using the Jack API. By default, the client name is set to
|
||||
* RtApiJack. However, if you wish to create multiple instances of
|
||||
* RtAudio with Jack, each instance must have a unique client name.
|
||||
*/
|
||||
class StreamOptions {
|
||||
public:
|
||||
airtaudio::streamFlags flags; //!< A bit-mask of stream flags (RTAUDIO_NONINTERLEAVED, RTAUDIO_MINIMIZE_LATENCY, RTAUDIO_HOG_DEVICE, RTAUDIO_ALSA_USE_DEFAULT).
|
||||
uint32_t numberOfBuffers; //!< Number of stream buffers.
|
||||
std::string streamName; //!< A stream name (currently used only in Jack).
|
||||
int32_t priority; //!< Scheduling priority of callback thread (only used with flag RTAUDIO_SCHEDULE_REALTIME).
|
||||
// Default constructor.
|
||||
StreamOptions() :
|
||||
flags(0),
|
||||
numberOfBuffers(0),
|
||||
priority(0) {}
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@@ -1,30 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#ifndef __AIRTAUDIO_STREAM_PARAMETER_H__
|
||||
#define __AIRTAUDIO_STREAM_PARAMETER_H__
|
||||
|
||||
namespace airtaudio {
|
||||
/**
|
||||
* @brief The structure for specifying input or ouput stream parameters.
|
||||
*/
|
||||
class StreamParameters {
|
||||
public:
|
||||
uint32_t deviceId; //!< Device index (0 to getDeviceCount() - 1).
|
||||
uint32_t nChannels; //!< Number of channels.
|
||||
uint32_t firstChannel; //!< First channel index on device (default = 0).
|
||||
// Default constructor.
|
||||
StreamParameters() :
|
||||
deviceId(0),
|
||||
nChannels(0),
|
||||
firstChannel(0) { }
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,49 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if !defined(__AIRTAUDIO_API_ALSA_H__) && defined(__LINUX_ALSA__)
|
||||
#define __AIRTAUDIO_API_ALSA_H__
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class Alsa: public airtaudio::Api {
|
||||
public:
|
||||
static airtaudio::Api* Create();
|
||||
public:
|
||||
Alsa();
|
||||
virtual ~Alsa();
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
return airtaudio::api::LINUX_ALSA;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum airtaudio::errorType closeStream();
|
||||
enum airtaudio::errorType startStream();
|
||||
enum airtaudio::errorType stopStream();
|
||||
enum airtaudio::errorType abortStream();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
private:
|
||||
std::vector<airtaudio::DeviceInfo> m_devices;
|
||||
void saveDeviceInfo();
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,221 +0,0 @@
|
||||
/**
|
||||
* @author Edouard DUPIN
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#ifdef __ANDROID_JAVA__
|
||||
|
||||
#include <ewol/context/Context.h>
|
||||
#include <unistd.h>
|
||||
#include <airtaudio/Interface.h>
|
||||
#include <airtaudio/debug.h>
|
||||
#include <limits.h>
|
||||
|
||||
airtaudio::Api* airtaudio::api::Android::Create() {
|
||||
ATA_INFO("Create Android device ... ");
|
||||
return new airtaudio::api::Android();
|
||||
}
|
||||
|
||||
|
||||
airtaudio::api::Android::Android() {
|
||||
ATA_INFO("new Android");
|
||||
// On android, we set a static device ...
|
||||
ATA_INFO("get context");
|
||||
ewol::Context& tmpContext = ewol::getContext();
|
||||
ATA_INFO("done p=" << (int64_t)&tmpContext);
|
||||
int32_t deviceCount = tmpContext.audioGetDeviceCount();
|
||||
ATA_ERROR("Get count devices : " << deviceCount);
|
||||
for (int32_t iii=0; iii<deviceCount; ++iii) {
|
||||
std::string property = tmpContext.audioGetDeviceProperty(iii);
|
||||
ATA_ERROR("Get devices property : " << property);
|
||||
std::vector<std::string> listProperty = etk::split(property, ':');
|
||||
airtaudio::DeviceInfo tmp;
|
||||
tmp.name = listProperty[0];
|
||||
std::vector<std::string> listFreq = etk::split(listProperty[2], ',');
|
||||
for(size_t fff=0; fff<listFreq.size(); ++fff) {
|
||||
tmp.sampleRates.push_back(etk::string_to_int32_t(listFreq[fff]));
|
||||
}
|
||||
tmp.outputChannels = 0;
|
||||
tmp.inputChannels = 0;
|
||||
tmp.duplexChannels = 0;
|
||||
if (listProperty[1] == "out") {
|
||||
tmp.isDefaultOutput = true;
|
||||
tmp.isDefaultInput = false;
|
||||
tmp.outputChannels = etk::string_to_int32_t(listProperty[3]);
|
||||
} else if (listProperty[1] == "in") {
|
||||
tmp.isDefaultOutput = false;
|
||||
tmp.isDefaultInput = true;
|
||||
tmp.inputChannels = etk::string_to_int32_t(listProperty[3]);
|
||||
} else {
|
||||
/* duplex */
|
||||
tmp.isDefaultOutput = true;
|
||||
tmp.isDefaultInput = true;
|
||||
tmp.duplexChannels = etk::string_to_int32_t(listProperty[3]);
|
||||
}
|
||||
std::vector<std::string> listFormat = etk::split(listProperty[4], ',');
|
||||
tmp.nativeFormats = 0;
|
||||
for(size_t fff=0; fff<listFormat.size(); ++fff) {
|
||||
if (listFormat[fff] == "float") {
|
||||
tmp.nativeFormats |= FLOAT32;
|
||||
} else if (listFormat[fff] == "double") {
|
||||
tmp.nativeFormats |= FLOAT64;
|
||||
} else if (listFormat[fff] == "s32") {
|
||||
tmp.nativeFormats |= SINT32;
|
||||
} else if (listFormat[fff] == "s24") {
|
||||
tmp.nativeFormats |= SINT24;
|
||||
} else if (listFormat[fff] == "s16") {
|
||||
tmp.nativeFormats |= SINT16;
|
||||
} else if (listFormat[fff] == "s8") {
|
||||
tmp.nativeFormats |= SINT8;
|
||||
}
|
||||
}
|
||||
m_devices.push_back(tmp);
|
||||
}
|
||||
ATA_INFO("Create Android interface (end)");
|
||||
}
|
||||
|
||||
airtaudio::api::Android::~Android() {
|
||||
ATA_INFO("Destroy Android interface");
|
||||
}
|
||||
|
||||
uint32_t airtaudio::api::Android::getDeviceCount() {
|
||||
//ATA_INFO("Get device count:"<< m_devices.size());
|
||||
return m_devices.size();
|
||||
}
|
||||
|
||||
airtaudio::DeviceInfo airtaudio::api::Android::getDeviceInfo(uint32_t _device) {
|
||||
//ATA_INFO("Get device info ...");
|
||||
return m_devices[_device];
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Android::closeStream() {
|
||||
ATA_INFO("Clese Stream");
|
||||
// Can not close the stream now...
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Android::startStream() {
|
||||
ATA_INFO("Start Stream");
|
||||
// Can not close the stream now...
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Android::stopStream() {
|
||||
ATA_INFO("Stop stream");
|
||||
ewol::Context& tmpContext = ewol::getContext();
|
||||
tmpContext.audioCloseDevice(0);
|
||||
// Can not close the stream now...
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Android::abortStream() {
|
||||
ATA_INFO("Abort Stream");
|
||||
ewol::Context& tmpContext = ewol::getContext();
|
||||
tmpContext.audioCloseDevice(0);
|
||||
// Can not close the stream now...
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
void airtaudio::api::Android::callBackEvent(void* _data,
|
||||
int32_t _frameRate) {
|
||||
int32_t doStopStream = 0;
|
||||
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
|
||||
double streamTime = getStreamTime();
|
||||
airtaudio::streamStatus status = 0;
|
||||
if (m_stream.doConvertBuffer[OUTPUT] == true) {
|
||||
doStopStream = callback(m_stream.userBuffer[OUTPUT],
|
||||
NULL,
|
||||
_frameRate,
|
||||
streamTime,
|
||||
status,
|
||||
m_stream.callbackInfo.userData);
|
||||
convertBuffer((char*)_data, (char*)m_stream.userBuffer[OUTPUT], m_stream.convertInfo[OUTPUT]);
|
||||
} else {
|
||||
doStopStream = callback(_data,
|
||||
NULL,
|
||||
_frameRate,
|
||||
streamTime,
|
||||
status,
|
||||
m_stream.callbackInfo.userData);
|
||||
}
|
||||
if (doStopStream == 2) {
|
||||
abortStream();
|
||||
return;
|
||||
}
|
||||
airtaudio::Api::tickStreamTime();
|
||||
}
|
||||
|
||||
void airtaudio::api::Android::androidCallBackEvent(void* _data,
|
||||
int32_t _frameRate,
|
||||
void* _userData) {
|
||||
if (_userData == NULL) {
|
||||
ATA_INFO("callback event ... NULL pointer");
|
||||
return;
|
||||
}
|
||||
airtaudio::api::Android* myClass = static_cast<airtaudio::api::Android*>(_userData);
|
||||
myClass->callBackEvent(_data, _frameRate/2);
|
||||
}
|
||||
|
||||
bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options) {
|
||||
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
|
||||
if (_mode != OUTPUT) {
|
||||
ATA_ERROR("Can not start a device input or duplex for Android ...");
|
||||
return false;
|
||||
}
|
||||
m_stream.userFormat = _format;
|
||||
m_stream.nUserChannels[_mode] = _channels;
|
||||
ewol::Context& tmpContext = ewol::getContext();
|
||||
bool ret = false;
|
||||
if (_format == SINT8) {
|
||||
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 0, androidCallBackEvent, this);
|
||||
} else {
|
||||
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 1, androidCallBackEvent, this);
|
||||
}
|
||||
m_stream.bufferSize = 256;
|
||||
m_stream.sampleRate = _sampleRate;
|
||||
m_stream.doByteSwap[_mode] = false; // for endienness ...
|
||||
|
||||
// TODO : For now, we write it in hard ==> to bu update later ...
|
||||
m_stream.deviceFormat[_mode] = SINT16;
|
||||
m_stream.nDeviceChannels[_mode] = 2;
|
||||
m_stream.deviceInterleaved[_mode] = true;
|
||||
|
||||
m_stream.doConvertBuffer[_mode] = false;
|
||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
||||
&& m_stream.nUserChannels[_mode] > 1) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
if (m_stream.doConvertBuffer[_mode] == true) {
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes = m_stream.nUserChannels[_mode] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[_mode] == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory.");
|
||||
}
|
||||
setConvertInfo(_mode, _firstChannel);
|
||||
}
|
||||
ATA_INFO("device format : " << m_stream.deviceFormat[_mode] << " user format : " << m_stream.userFormat);
|
||||
ATA_INFO("device channels : " << m_stream.nDeviceChannels[_mode] << " user channels : " << m_stream.nUserChannels[_mode]);
|
||||
ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[_mode]);
|
||||
if (ret == false) {
|
||||
ATA_ERROR("Can not open device.");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -1,53 +0,0 @@
|
||||
/**
|
||||
* @author Edouard DUPIN
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if !defined(__AIRTAUDIO_API_ANDROID_H__) && defined(__ANDROID_JAVA__)
|
||||
#define __AIRTAUDIO_API_ANDROID_H__
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class Android: public airtaudio::Api {
|
||||
public:
|
||||
static airtaudio::Api* Create();
|
||||
public:
|
||||
Android();
|
||||
virtual ~Android();
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
return airtaudio::api::ANDROID_JAVA;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum airtaudio::errorType closeStream();
|
||||
enum airtaudio::errorType startStream();
|
||||
enum airtaudio::errorType stopStream();
|
||||
enum airtaudio::errorType abortStream();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
private:
|
||||
std::vector<airtaudio::DeviceInfo> m_devices;
|
||||
void saveDeviceInfo();
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
private:
|
||||
void callBackEvent(void* _data,
|
||||
int32_t _frameRate);
|
||||
static void androidCallBackEvent(void* _data,
|
||||
int32_t _frameRate,
|
||||
void* _userData);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,51 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if !defined(__AIRTAUDIO_API_ASIO_H__) && defined(__WINDOWS_ASIO__)
|
||||
#define __AIRTAUDIO_API_ASIO_H__
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class Asio: public airtaudio::Api {
|
||||
public:
|
||||
static airtaudio::Api* Create();
|
||||
public:
|
||||
Asio();
|
||||
virtual ~Asio();
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
return airtaudio::WINDOWS_ASIO;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum airtaudio::errorType closeStream();
|
||||
enum airtaudio::errorType startStream();
|
||||
enum airtaudio::errorType stopStream();
|
||||
enum airtaudio::errorType abortStream();
|
||||
long getStreamLatency();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
bool callbackEvent(long _bufferIndex);
|
||||
private:
|
||||
std::vector<airtaudio::DeviceInfo> m_devices;
|
||||
void saveDeviceInfo();
|
||||
bool m_coInitialized;
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@@ -1,56 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if !defined(__AIRTAUDIO_API_CORE_H__) && defined(__MACOSX_CORE__)
|
||||
#define __AIRTAUDIO_API_CORE_H__
|
||||
|
||||
#include <CoreAudio/AudioHardware.h>
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class Core: public airtaudio::Api {
|
||||
public:
|
||||
static airtaudio::Api* Create();
|
||||
public:
|
||||
Core();
|
||||
virtual ~Core();
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
return airtaudio::api::MACOSX_CORE;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
uint32_t getDefaultOutputDevice();
|
||||
uint32_t getDefaultInputDevice();
|
||||
enum airtaudio::errorType closeStream();
|
||||
enum airtaudio::errorType startStream();
|
||||
enum airtaudio::errorType stopStream();
|
||||
enum airtaudio::errorType abortStream();
|
||||
long getStreamLatency();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
bool callbackEvent(AudioDeviceID _deviceId,
|
||||
const AudioBufferList *_inBufferList,
|
||||
const AudioBufferList *_outBufferList);
|
||||
|
||||
private:
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
static const char* getErrorCode(OSStatus _code);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,53 +0,0 @@
|
||||
/**
|
||||
* @author Edouard DUPIN
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if !defined(__AIRTAUDIO_API_CORE_IOS_H__) && defined(__IOS_CORE__)
|
||||
#define __AIRTAUDIO_API_CORE_IOS_H__
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class CoreIosPrivate;
|
||||
class CoreIos: public airtaudio::Api {
|
||||
public:
|
||||
static airtaudio::Api* Create();
|
||||
public:
|
||||
CoreIos();
|
||||
virtual ~CoreIos();
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
return airtaudio::api::IOS_CORE;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum airtaudio::errorType closeStream();
|
||||
enum airtaudio::errorType startStream();
|
||||
enum airtaudio::errorType stopStream();
|
||||
enum airtaudio::errorType abortStream();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
private:
|
||||
std::vector<airtaudio::DeviceInfo> m_devices;
|
||||
void saveDeviceInfo();
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
public:
|
||||
void callBackEvent(void* _data,
|
||||
int32_t _frameRate);
|
||||
private:
|
||||
CoreIosPrivate* m_private;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,307 +0,0 @@
|
||||
/**
|
||||
* @author Edouard DUPIN
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#ifdef __IOS_CORE__
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <AudioToolbox/AudioToolbox.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <airtaudio/Interface.h>
|
||||
#include <airtaudio/debug.h>
|
||||
#include <limits.h>
|
||||
|
||||
airtaudio::Api* airtaudio::api::CoreIos::Create(void) {
|
||||
ATA_INFO("Create CoreIos device ... ");
|
||||
return new airtaudio::api::CoreIos();
|
||||
}
|
||||
|
||||
#define kOutputBus 0
|
||||
#define kInputBus 1
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class CoreIosPrivate {
|
||||
public:
|
||||
AudioComponentInstance audioUnit;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
airtaudio::api::CoreIos::CoreIos(void) :
|
||||
m_private(new airtaudio::api::CoreIosPrivate) {
|
||||
ATA_INFO("new CoreIos");
|
||||
int32_t deviceCount = 2;
|
||||
ATA_ERROR("Get count devices : " << 2);
|
||||
airtaudio::DeviceInfo tmp;
|
||||
// Add default output format :
|
||||
tmp.name = "out";
|
||||
tmp.sampleRates.push_back(48000);
|
||||
tmp.outputChannels = 2;
|
||||
tmp.inputChannels = 0;
|
||||
tmp.duplexChannels = 0;
|
||||
tmp.isDefaultOutput = true;
|
||||
tmp.isDefaultInput = false;
|
||||
tmp.nativeFormats = SINT16;
|
||||
m_devices.push_back(tmp);
|
||||
// add default input format:
|
||||
tmp.name = "in";
|
||||
tmp.sampleRates.push_back(48000);
|
||||
tmp.outputChannels = 0;
|
||||
tmp.inputChannels = 2;
|
||||
tmp.duplexChannels = 0;
|
||||
tmp.isDefaultOutput = false;
|
||||
tmp.isDefaultInput = true;
|
||||
tmp.nativeFormats = SINT16;
|
||||
m_devices.push_back(tmp);
|
||||
|
||||
ATA_INFO("Create CoreIOs interface (end)");
|
||||
}
|
||||
|
||||
airtaudio::api::CoreIos::~CoreIos(void) {
|
||||
ATA_INFO("Destroy CoreIOs interface");
|
||||
AudioUnitUninitialize(m_private->audioUnit);
|
||||
delete m_private;
|
||||
m_private = NULL;
|
||||
}
|
||||
|
||||
uint32_t airtaudio::api::CoreIos::getDeviceCount(void) {
|
||||
//ATA_INFO("Get device count:"<< m_devices.size());
|
||||
return m_devices.size();
|
||||
}
|
||||
|
||||
airtaudio::DeviceInfo airtaudio::api::CoreIos::getDeviceInfo(uint32_t _device) {
|
||||
//ATA_INFO("Get device info ...");
|
||||
return m_devices[_device];
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::CoreIos::closeStream(void) {
|
||||
ATA_INFO("Close Stream");
|
||||
// Can not close the stream now...
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::CoreIos::startStream(void) {
|
||||
ATA_INFO("Start Stream");
|
||||
OSStatus status = AudioOutputUnitStart(m_private->audioUnit);
|
||||
// Can not close the stream now...
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::CoreIos::stopStream(void) {
|
||||
ATA_INFO("Stop stream");
|
||||
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
|
||||
// Can not close the stream now...
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::CoreIos::abortStream(void) {
|
||||
ATA_INFO("Abort Stream");
|
||||
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
|
||||
// Can not close the stream now...
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
void airtaudio::api::CoreIos::callBackEvent(void* _data,
|
||||
int32_t _frameRate) {
|
||||
|
||||
#if 0
|
||||
static double value=0;
|
||||
int16_t* vals = (int16_t*)_data;
|
||||
for (int32_t iii=0; iii<_frameRate; ++iii) {
|
||||
*vals++ = (int16_t)(sin(value) * 32760.0);
|
||||
*vals++ = (int16_t)(sin(value) * 32760.0);
|
||||
value += 0.09;
|
||||
if (value >= M_PI*2.0) {
|
||||
value -= M_PI*2.0;
|
||||
}
|
||||
}
|
||||
return;
|
||||
#endif
|
||||
int32_t doStopStream = 0;
|
||||
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
|
||||
double streamTime = getStreamTime();
|
||||
airtaudio::streamStatus status = 0;
|
||||
if (m_stream.doConvertBuffer[OUTPUT] == true) {
|
||||
doStopStream = callback(m_stream.userBuffer[OUTPUT],
|
||||
NULL,
|
||||
_frameRate,
|
||||
streamTime,
|
||||
status,
|
||||
m_stream.callbackInfo.userData);
|
||||
convertBuffer((char*)_data, (char*)m_stream.userBuffer[OUTPUT], m_stream.convertInfo[OUTPUT]);
|
||||
} else {
|
||||
doStopStream = callback(_data,
|
||||
NULL,
|
||||
_frameRate,
|
||||
streamTime,
|
||||
status,
|
||||
m_stream.callbackInfo.userData);
|
||||
}
|
||||
if (doStopStream == 2) {
|
||||
abortStream();
|
||||
return;
|
||||
}
|
||||
airtaudio::Api::tickStreamTime();
|
||||
}
|
||||
|
||||
static OSStatus playbackCallback(void *_userData,
|
||||
AudioUnitRenderActionFlags *ioActionFlags,
|
||||
const AudioTimeStamp *inTimeStamp,
|
||||
uint32_t inBusNumber,
|
||||
uint32_t inNumberFrames,
|
||||
AudioBufferList *ioData) {
|
||||
if (_userData == NULL) {
|
||||
ATA_ERROR("callback event ... NULL pointer");
|
||||
return -1;
|
||||
}
|
||||
airtaudio::api::CoreIos* myClass = static_cast<airtaudio::api::CoreIos*>(_userData);
|
||||
// get all requested buffer :
|
||||
for (int32_t iii=0; iii < ioData->mNumberBuffers; iii++) {
|
||||
AudioBuffer buffer = ioData->mBuffers[iii];
|
||||
int32_t numberFrame = buffer.mDataByteSize/2/*stereo*/ /sizeof(int16_t);
|
||||
ATA_VERBOSE("request data size: " << numberFrame << " busNumber=" << inBusNumber);
|
||||
myClass->callBackEvent(buffer.mData, numberFrame);
|
||||
}
|
||||
return noErr;
|
||||
}
|
||||
|
||||
|
||||
bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options) {
|
||||
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
|
||||
if (_mode != OUTPUT) {
|
||||
ATA_ERROR("Can not start a device input or duplex for CoreIos ...");
|
||||
return false;
|
||||
}
|
||||
bool ret = true;
|
||||
|
||||
// configure Airtaudio internal configuration:
|
||||
m_stream.userFormat = _format;
|
||||
m_stream.nUserChannels[_mode] = _channels;
|
||||
m_stream.bufferSize = 8192;
|
||||
m_stream.sampleRate = _sampleRate;
|
||||
m_stream.doByteSwap[_mode] = false; // for endienness ...
|
||||
|
||||
// TODO : For now, we write it in hard ==> to be update later ...
|
||||
m_stream.deviceFormat[_mode] = SINT16;
|
||||
m_stream.nDeviceChannels[_mode] = 2;
|
||||
m_stream.deviceInterleaved[_mode] = true;
|
||||
|
||||
m_stream.doConvertBuffer[_mode] = false;
|
||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
||||
&& m_stream.nUserChannels[_mode] > 1) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
if (m_stream.doConvertBuffer[_mode] == true) {
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes = m_stream.nUserChannels[_mode] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[_mode] == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory.");
|
||||
}
|
||||
setConvertInfo(_mode, _firstChannel);
|
||||
}
|
||||
ATA_INFO("device format : " << m_stream.deviceFormat[_mode] << " user format : " << m_stream.userFormat);
|
||||
ATA_INFO("device channels : " << m_stream.nDeviceChannels[_mode] << " user channels : " << m_stream.nUserChannels[_mode]);
|
||||
ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[_mode]);
|
||||
if (ret == false) {
|
||||
ATA_ERROR("Can not open device.");
|
||||
}
|
||||
|
||||
// Configure IOs interface:
|
||||
OSStatus status;
|
||||
|
||||
// Describe audio component
|
||||
AudioComponentDescription desc;
|
||||
desc.componentType = kAudioUnitType_Output;
|
||||
desc.componentSubType = kAudioUnitSubType_RemoteIO;
|
||||
desc.componentFlags = 0;
|
||||
desc.componentFlagsMask = 0;
|
||||
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
|
||||
|
||||
// Get component
|
||||
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
|
||||
|
||||
// Get audio units
|
||||
status = AudioComponentInstanceNew(inputComponent, &m_private->audioUnit);
|
||||
if (status != 0) {
|
||||
ATA_ERROR("can not create an audio intance...");
|
||||
}
|
||||
|
||||
uint32_t flag = 1;
|
||||
// Enable IO for playback
|
||||
status = AudioUnitSetProperty(m_private->audioUnit,
|
||||
kAudioOutputUnitProperty_EnableIO,
|
||||
kAudioUnitScope_Output,
|
||||
kOutputBus,
|
||||
&flag,
|
||||
sizeof(flag));
|
||||
if (status != 0) {
|
||||
ATA_ERROR("can not request audio autorisation...");
|
||||
}
|
||||
|
||||
// Describe format
|
||||
AudioStreamBasicDescription audioFormat;
|
||||
audioFormat.mSampleRate = 48000.00;
|
||||
audioFormat.mFormatID = kAudioFormatLinearPCM;
|
||||
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
|
||||
audioFormat.mFramesPerPacket = 1; //
|
||||
audioFormat.mChannelsPerFrame = 2; // stereo
|
||||
audioFormat.mBitsPerChannel = sizeof(short) * 8;
|
||||
audioFormat.mBytesPerPacket = sizeof(short) * audioFormat.mChannelsPerFrame;
|
||||
audioFormat.mBytesPerFrame = sizeof(short) * audioFormat.mChannelsPerFrame;
|
||||
audioFormat.mReserved = 0;
|
||||
// Apply format
|
||||
status = AudioUnitSetProperty(m_private->audioUnit,
|
||||
kAudioUnitProperty_StreamFormat,
|
||||
kAudioUnitScope_Input,
|
||||
kOutputBus,
|
||||
&audioFormat,
|
||||
sizeof(audioFormat));
|
||||
if (status != 0) {
|
||||
ATA_ERROR("can not set stream properties...");
|
||||
}
|
||||
|
||||
|
||||
// Set output callback
|
||||
AURenderCallbackStruct callbackStruct;
|
||||
callbackStruct.inputProc = playbackCallback;
|
||||
callbackStruct.inputProcRefCon = this;
|
||||
status = AudioUnitSetProperty(m_private->audioUnit,
|
||||
kAudioUnitProperty_SetRenderCallback,
|
||||
kAudioUnitScope_Global,
|
||||
kOutputBus,
|
||||
&callbackStruct,
|
||||
sizeof(callbackStruct));
|
||||
if (status != 0) {
|
||||
ATA_ERROR("can not set Callback...");
|
||||
}
|
||||
|
||||
// Initialise
|
||||
status = AudioUnitInitialize(m_private->audioUnit);
|
||||
if (status != 0) {
|
||||
ATA_ERROR("can not initialize...");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
1548
airtaudio/api/Ds.cpp
1548
airtaudio/api/Ds.cpp
File diff suppressed because it is too large
Load Diff
@@ -1,54 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if !defined(__AIRTAUDIO_API_DS_H__) && defined(__WINDOWS_DS__)
|
||||
#define __AIRTAUDIO_API_DS_H__
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class Ds: public airtaudio::Api {
|
||||
public:
|
||||
static airtaudio::Api* Create();
|
||||
public:
|
||||
Ds();
|
||||
virtual ~Ds();
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
return airtaudio::api::WINDOWS_DS;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
uint32_t getDefaultOutputDevice();
|
||||
uint32_t getDefaultInputDevice();
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum airtaudio::errorType closeStream();
|
||||
enum airtaudio::errorType startStream();
|
||||
enum airtaudio::errorType stopStream();
|
||||
enum airtaudio::errorType abortStream();
|
||||
long getStreamLatency();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
private:
|
||||
bool m_coInitialized;
|
||||
bool m_buffersRolling;
|
||||
long m_duplexPrerollBytes;
|
||||
std::vector<struct DsDevice> dsDevices;
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,61 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if defined(__AIRTAUDIO_DUMMY__)
|
||||
#include <airtaudio/api/Dummy.h>
|
||||
#include <airtaudio/debug.h>
|
||||
|
||||
airtaudio::Api* airtaudio::api::Dummy::Create() {
|
||||
return new airtaudio::api::Dummy();
|
||||
}
|
||||
|
||||
|
||||
airtaudio::api::Dummy::Dummy() {
|
||||
m_errorText = "airtaudio::api::Dummy: This class provides no functionality.";
|
||||
error(airtaudio::errorWarning);
|
||||
}
|
||||
|
||||
uint32_t airtaudio::api::Dummy::getDeviceCount() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
rtaudio::DeviceInfo airtaudio::api::Dummy::getDeviceInfo(uint32_t _device) {
|
||||
()_device;
|
||||
rtaudio::DeviceInfo info;
|
||||
return info;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Dummy::closeStream() {
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Dummy::startStream() {
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Dummy::stopStream() {
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Dummy::abortStream() {
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
bool airtaudio::api::Dummy::probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -1,43 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if !defined(__AIRTAUDIO_API_DUMMY_H__) && defined(__AIRTAUDIO_DUMMY__)
|
||||
#define __AIRTAUDIO_API_DUMMY_H__
|
||||
|
||||
#include <airtaudio/Interface.h>
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class Dummy: public airtaudio::Api {
|
||||
public:
|
||||
static airtaudio::Api* Create();
|
||||
public:
|
||||
Dummy();
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
return airtaudio::api::RTAUDIO_DUMMY;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum airtaudio::errorType closeStream();
|
||||
enum airtaudio::errorType startStream();
|
||||
enum airtaudio::errorType stopStream();
|
||||
enum airtaudio::errorType abortStream();
|
||||
private:
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,748 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
|
||||
#if defined(__UNIX_JACK__)
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
#include <iostream>
|
||||
#include <airtaudio/Interface.h>
|
||||
#include <airtaudio/debug.h>
|
||||
#include <string.h>
|
||||
|
||||
airtaudio::Api* airtaudio::api::Jack::Create() {
|
||||
return new airtaudio::api::Jack();
|
||||
}
|
||||
|
||||
|
||||
// JACK is a low-latency audio server, originally written for the
|
||||
// GNU/Linux operating system and now also ported to OS-X. It can
|
||||
// connect a number of different applications to an audio device, as
|
||||
// well as allowing them to share audio between themselves.
|
||||
//
|
||||
// When using JACK with RtAudio, "devices" refer to JACK clients that
|
||||
// have ports connected to the server. The JACK server is typically
|
||||
// started in a terminal as follows:
|
||||
//
|
||||
// .jackd -d alsa -d hw:0
|
||||
//
|
||||
// or through an interface program such as qjackctl. Many of the
|
||||
// parameters normally set for a stream are fixed by the JACK server
|
||||
// and can be specified when the JACK server is started. In
|
||||
// particular,
|
||||
//
|
||||
// .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
|
||||
//
|
||||
// specifies a sample rate of 44100 Hz, a buffer size of 512 sample
|
||||
// frames, and number of buffers = 4. Once the server is running, it
|
||||
// is not possible to override these values. If the values are not
|
||||
// specified in the command-line, the JACK server uses default values.
|
||||
//
|
||||
// The JACK server does not have to be running when an instance of
|
||||
// RtApiJack is created, though the function getDeviceCount() will
|
||||
// report 0 devices found until JACK has been started. When no
|
||||
// devices are available (i.e., the JACK server is not running), a
|
||||
// stream cannot be opened.
|
||||
|
||||
#include <jack/jack.h>
|
||||
#include <unistd.h>
|
||||
#include <cstdio>
|
||||
|
||||
// A structure to hold various information related to the Jack API
|
||||
// implementation.
|
||||
struct JackHandle {
|
||||
jack_client_t *client;
|
||||
jack_port_t **ports[2];
|
||||
std::string deviceName[2];
|
||||
bool xrun[2];
|
||||
std::condition_variable condition;
|
||||
int32_t drainCounter; // Tracks callback counts when draining
|
||||
bool internalDrain; // Indicates if stop is initiated from callback or not.
|
||||
|
||||
JackHandle() :
|
||||
client(0),
|
||||
drainCounter(0),
|
||||
internalDrain(false) {
|
||||
ports[0] = 0;
|
||||
ports[1] = 0;
|
||||
xrun[0] = false;
|
||||
xrun[1] = false;
|
||||
}
|
||||
};
|
||||
|
||||
airtaudio::api::Jack::Jack() {
|
||||
// Nothing to do here.
|
||||
}
|
||||
|
||||
airtaudio::api::Jack::~Jack() {
|
||||
if (m_stream.state != STREAM_CLOSED) {
|
||||
closeStream();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t airtaudio::api::Jack::getDeviceCount() {
|
||||
// See if we can become a jack client.
|
||||
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption;
|
||||
jack_status_t *status = NULL;
|
||||
jack_client_t *client = jack_client_open("RtApiJackCount", options, status);
|
||||
if (client == NULL) {
|
||||
return 0;
|
||||
}
|
||||
const char **ports;
|
||||
std::string port, previousPort;
|
||||
uint32_t nChannels = 0, nDevices = 0;
|
||||
ports = jack_get_ports(client, NULL, NULL, 0);
|
||||
if (ports) {
|
||||
// Parse the port names up to the first colon (:).
|
||||
size_t iColon = 0;
|
||||
do {
|
||||
port = (char *) ports[ nChannels ];
|
||||
iColon = port.find(":");
|
||||
if (iColon != std::string::npos) {
|
||||
port = port.substr(0, iColon + 1);
|
||||
if (port != previousPort) {
|
||||
nDevices++;
|
||||
previousPort = port;
|
||||
}
|
||||
}
|
||||
} while (ports[++nChannels]);
|
||||
free(ports);
|
||||
}
|
||||
jack_client_close(client);
|
||||
return nDevices;
|
||||
}
|
||||
|
||||
airtaudio::DeviceInfo airtaudio::api::Jack::getDeviceInfo(uint32_t _device) {
|
||||
airtaudio::DeviceInfo info;
|
||||
info.probed = false;
|
||||
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption
|
||||
jack_status_t *status = NULL;
|
||||
jack_client_t *client = jack_client_open("RtApiJackInfo", options, status);
|
||||
if (client == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Jack::getDeviceInfo: Jack server not found or connection error!");
|
||||
// TODO : airtaudio::errorWarning;
|
||||
return info;
|
||||
}
|
||||
const char **ports;
|
||||
std::string port, previousPort;
|
||||
uint32_t nPorts = 0, nDevices = 0;
|
||||
ports = jack_get_ports(client, NULL, NULL, 0);
|
||||
if (ports) {
|
||||
// Parse the port names up to the first colon (:).
|
||||
size_t iColon = 0;
|
||||
do {
|
||||
port = (char *) ports[ nPorts ];
|
||||
iColon = port.find(":");
|
||||
if (iColon != std::string::npos) {
|
||||
port = port.substr(0, iColon);
|
||||
if (port != previousPort) {
|
||||
if (nDevices == _device) {
|
||||
info.name = port;
|
||||
}
|
||||
nDevices++;
|
||||
previousPort = port;
|
||||
}
|
||||
}
|
||||
} while (ports[++nPorts]);
|
||||
free(ports);
|
||||
}
|
||||
if (_device >= nDevices) {
|
||||
jack_client_close(client);
|
||||
ATA_ERROR("airtaudio::api::Jack::getDeviceInfo: device ID is invalid!");
|
||||
// TODO : airtaudio::errorInvalidUse;
|
||||
return info;
|
||||
}
|
||||
// Get the current jack server sample rate.
|
||||
info.sampleRates.clear();
|
||||
info.sampleRates.push_back(jack_get_sample_rate(client));
|
||||
// Count the available ports containing the client name as device
|
||||
// channels. Jack "input ports" equal RtAudio output channels.
|
||||
uint32_t nChannels = 0;
|
||||
ports = jack_get_ports(client, info.name.c_str(), NULL, JackPortIsInput);
|
||||
if (ports) {
|
||||
while (ports[ nChannels ]) {
|
||||
nChannels++;
|
||||
}
|
||||
free(ports);
|
||||
info.outputChannels = nChannels;
|
||||
}
|
||||
// Jack "output ports" equal RtAudio input channels.
|
||||
nChannels = 0;
|
||||
ports = jack_get_ports(client, info.name.c_str(), NULL, JackPortIsOutput);
|
||||
if (ports) {
|
||||
while (ports[ nChannels ]) {
|
||||
nChannels++;
|
||||
}
|
||||
free(ports);
|
||||
info.inputChannels = nChannels;
|
||||
}
|
||||
if (info.outputChannels == 0 && info.inputChannels == 0) {
|
||||
jack_client_close(client);
|
||||
ATA_ERROR("airtaudio::api::Jack::getDeviceInfo: error determining Jack input/output channels!");
|
||||
// TODO : airtaudio::errorWarning;
|
||||
return info;
|
||||
}
|
||||
// If device opens for both playback and capture, we determine the channels.
|
||||
if (info.outputChannels > 0 && info.inputChannels > 0) {
|
||||
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
||||
}
|
||||
// Jack always uses 32-bit floats.
|
||||
info.nativeFormats = airtaudio::FLOAT32;
|
||||
// Jack doesn't provide default devices so we'll use the first available one.
|
||||
if ( _device == 0
|
||||
&& info.outputChannels > 0) {
|
||||
info.isDefaultOutput = true;
|
||||
}
|
||||
if ( _device == 0
|
||||
&& info.inputChannels > 0) {
|
||||
info.isDefaultInput = true;
|
||||
}
|
||||
jack_client_close(client);
|
||||
info.probed = true;
|
||||
return info;
|
||||
}
|
||||
|
||||
static int32_t jackCallbackHandler(jack_nframes_t _nframes, void *_infoPointer) {
|
||||
airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_infoPointer;
|
||||
airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object;
|
||||
if (object->callbackEvent((uint64_t)_nframes) == false) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// This function will be called by a spawned thread when the Jack
|
||||
// server signals that it is shutting down. It is necessary to handle
|
||||
// it this way because the jackShutdown() function must return before
|
||||
// the jack_deactivate() function (in closeStream()) will return.
|
||||
static void jackCloseStream(void *_ptr) {
|
||||
airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_ptr;
|
||||
airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object;
|
||||
object->closeStream();
|
||||
}
|
||||
|
||||
static void jackShutdown(void* _infoPointer) {
|
||||
airtaudio::CallbackInfo* info = (airtaudio::CallbackInfo*)_infoPointer;
|
||||
airtaudio::api::Jack* object = (airtaudio::api::Jack*)info->object;
|
||||
// Check current stream state. If stopped, then we'll assume this
|
||||
// was called as a result of a call to airtaudio::api::Jack::stopStream (the
|
||||
// deactivation of a client handle causes this function to be called).
|
||||
// If not, we'll assume the Jack server is shutting down or some
|
||||
// other problem occurred and we should close the stream.
|
||||
if (object->isStreamRunning() == false) {
|
||||
return;
|
||||
}
|
||||
new std::thread(jackCloseStream, info);
|
||||
ATA_ERROR("RtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!");
|
||||
}
|
||||
|
||||
static int32_t jackXrun(void* _infoPointer) {
|
||||
JackHandle* handle = (JackHandle*)_infoPointer;
|
||||
if (handle->ports[0]) {
|
||||
handle->xrun[0] = true;
|
||||
}
|
||||
if (handle->ports[1]) {
|
||||
handle->xrun[1] = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t* _bufferSize,
|
||||
airtaudio::StreamOptions* _options) {
|
||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||
// Look for jack server and try to become a client (only do once per stream).
|
||||
jack_client_t *client = 0;
|
||||
if ( _mode == OUTPUT
|
||||
|| ( _mode == INPUT
|
||||
&& m_stream.mode != OUTPUT)) {
|
||||
jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption;
|
||||
jack_status_t *status = NULL;
|
||||
if (_options && !_options->streamName.empty()) {
|
||||
client = jack_client_open(_options->streamName.c_str(), jackoptions, status);
|
||||
} else {
|
||||
client = jack_client_open("RtApiJack", jackoptions, status);
|
||||
}
|
||||
if (client == 0) {
|
||||
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: Jack server not found or connection error!");
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
// The handle must have been created on an earlier pass.
|
||||
client = handle->client;
|
||||
}
|
||||
const char **ports;
|
||||
std::string port, previousPort, deviceName;
|
||||
uint32_t nPorts = 0, nDevices = 0;
|
||||
ports = jack_get_ports(client, NULL, NULL, 0);
|
||||
if (ports) {
|
||||
// Parse the port names up to the first colon (:).
|
||||
size_t iColon = 0;
|
||||
do {
|
||||
port = (char *) ports[ nPorts ];
|
||||
iColon = port.find(":");
|
||||
if (iColon != std::string::npos) {
|
||||
port = port.substr(0, iColon);
|
||||
if (port != previousPort) {
|
||||
if (nDevices == _device) {
|
||||
deviceName = port;
|
||||
}
|
||||
nDevices++;
|
||||
previousPort = port;
|
||||
}
|
||||
}
|
||||
} while (ports[++nPorts]);
|
||||
free(ports);
|
||||
}
|
||||
if (_device >= nDevices) {
|
||||
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: device ID is invalid!");
|
||||
return false;
|
||||
}
|
||||
// Count the available ports containing the client name as device
|
||||
// channels. Jack "input ports" equal RtAudio output channels.
|
||||
uint32_t nChannels = 0;
|
||||
uint64_t flag = JackPortIsInput;
|
||||
if (_mode == INPUT) flag = JackPortIsOutput;
|
||||
ports = jack_get_ports(client, deviceName.c_str(), NULL, flag);
|
||||
if (ports) {
|
||||
while (ports[ nChannels ]) {
|
||||
nChannels++;
|
||||
}
|
||||
free(ports);
|
||||
}
|
||||
// Compare the jack ports for specified client to the requested number of channels.
|
||||
if (nChannels < (_channels + _firstChannel)) {
|
||||
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: requested number of channels (" << _channels << ") + offset (" << _firstChannel << ") not found for specified device (" << _device << ":" << deviceName << ").");
|
||||
return false;
|
||||
}
|
||||
// Check the jack server sample rate.
|
||||
uint32_t jackRate = jack_get_sample_rate(client);
|
||||
if (_sampleRate != jackRate) {
|
||||
jack_client_close(client);
|
||||
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: the requested sample rate (" << _sampleRate << ") is different than the JACK server rate (" << jackRate << ").");
|
||||
return false;
|
||||
}
|
||||
m_stream.sampleRate = jackRate;
|
||||
// Get the latency of the JACK port.
|
||||
ports = jack_get_ports(client, deviceName.c_str(), NULL, flag);
|
||||
if (ports[ _firstChannel ]) {
|
||||
// Added by Ge Wang
|
||||
jack_latency_callback_mode_t cbmode = (_mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
|
||||
// the range (usually the min and max are equal)
|
||||
jack_latency_range_t latrange; latrange.min = latrange.max = 0;
|
||||
// get the latency range
|
||||
jack_port_get_latency_range(jack_port_by_name(client, ports[_firstChannel]), cbmode, &latrange);
|
||||
// be optimistic, use the min!
|
||||
m_stream.latency[_mode] = latrange.min;
|
||||
//m_stream.latency[_mode] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ]));
|
||||
}
|
||||
free(ports);
|
||||
// The jack server always uses 32-bit floating-point data.
|
||||
m_stream.deviceFormat[_mode] = FLOAT32;
|
||||
m_stream.userFormat = _format;
|
||||
if (_options && _options->flags & NONINTERLEAVED) {
|
||||
m_stream.userInterleaved = false;
|
||||
} else {
|
||||
m_stream.userInterleaved = true;
|
||||
}
|
||||
// Jack always uses non-interleaved buffers.
|
||||
m_stream.deviceInterleaved[_mode] = false;
|
||||
// Jack always provides host byte-ordered data.
|
||||
m_stream.doByteSwap[_mode] = false;
|
||||
// Get the buffer size. The buffer size and number of buffers
|
||||
// (periods) is set when the jack server is started.
|
||||
m_stream.bufferSize = (int) jack_get_buffer_size(client);
|
||||
*_bufferSize = m_stream.bufferSize;
|
||||
m_stream.nDeviceChannels[_mode] = _channels;
|
||||
m_stream.nUserChannels[_mode] = _channels;
|
||||
// Set flags for buffer conversion.
|
||||
m_stream.doConvertBuffer[_mode] = false;
|
||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
||||
&& m_stream.nUserChannels[_mode] > 1) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
// Allocate our JackHandle structure for the stream.
|
||||
if (handle == 0) {
|
||||
handle = new JackHandle;
|
||||
if (handle == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: error allocating JackHandle memory.");
|
||||
goto error;
|
||||
}
|
||||
m_stream.apiHandle = (void *) handle;
|
||||
handle->client = client;
|
||||
}
|
||||
handle->deviceName[_mode] = deviceName;
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes;
|
||||
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[_mode] == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: error allocating user buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
if (m_stream.doConvertBuffer[_mode]) {
|
||||
bool makeBuffer = true;
|
||||
if (_mode == OUTPUT) {
|
||||
bufferBytes = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
} else { // _mode == INPUT
|
||||
bufferBytes = m_stream.nDeviceChannels[1] * formatBytes(m_stream.deviceFormat[1]);
|
||||
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
if (bufferBytes < bytesOut) {
|
||||
makeBuffer = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (makeBuffer) {
|
||||
bufferBytes *= *_bufferSize;
|
||||
if (m_stream.deviceBuffer) free(m_stream.deviceBuffer);
|
||||
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.deviceBuffer == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: error allocating device buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Allocate memory for the Jack ports (channels) identifiers.
|
||||
handle->ports[_mode] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels);
|
||||
if (handle->ports[_mode] == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Jack::probeDeviceOpen: error allocating port memory.");
|
||||
goto error;
|
||||
}
|
||||
m_stream.device[_mode] = _device;
|
||||
m_stream.channelOffset[_mode] = _firstChannel;
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
m_stream.callbackInfo.object = (void *) this;
|
||||
if ( m_stream.mode == OUTPUT
|
||||
&& _mode == INPUT) {
|
||||
// We had already set up the stream for output.
|
||||
m_stream.mode = DUPLEX;
|
||||
} else {
|
||||
m_stream.mode = _mode;
|
||||
jack_set_process_callback(handle->client, jackCallbackHandler, (void *) &m_stream.callbackInfo);
|
||||
jack_set_xrun_callback(handle->client, jackXrun, (void *) &handle);
|
||||
jack_on_shutdown(handle->client, jackShutdown, (void *) &m_stream.callbackInfo);
|
||||
}
|
||||
// Register our ports.
|
||||
char label[64];
|
||||
if (_mode == OUTPUT) {
|
||||
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
|
||||
snprintf(label, 64, "outport %d", i);
|
||||
handle->ports[0][i] = jack_port_register(handle->client,
|
||||
(const char *)label,
|
||||
JACK_DEFAULT_AUDIO_TYPE,
|
||||
JackPortIsOutput,
|
||||
0);
|
||||
}
|
||||
} else {
|
||||
for (uint32_t i=0; i<m_stream.nUserChannels[1]; i++) {
|
||||
snprintf(label, 64, "inport %d", i);
|
||||
handle->ports[1][i] = jack_port_register(handle->client,
|
||||
(const char *)label,
|
||||
JACK_DEFAULT_AUDIO_TYPE,
|
||||
JackPortIsInput,
|
||||
0);
|
||||
}
|
||||
}
|
||||
// Setup the buffer conversion information structure. We don't use
|
||||
// buffers to do channel offsets, so we override that parameter
|
||||
// here.
|
||||
if (m_stream.doConvertBuffer[_mode]) {
|
||||
setConvertInfo(_mode, 0);
|
||||
}
|
||||
return true;
|
||||
error:
|
||||
if (handle) {
|
||||
jack_client_close(handle->client);
|
||||
if (handle->ports[0]) {
|
||||
free(handle->ports[0]);
|
||||
}
|
||||
if (handle->ports[1]) {
|
||||
free(handle->ports[1]);
|
||||
}
|
||||
delete handle;
|
||||
m_stream.apiHandle = NULL;
|
||||
}
|
||||
for (int32_t iii=0; iii<2; ++iii) {
|
||||
if (m_stream.userBuffer[iii]) {
|
||||
free(m_stream.userBuffer[iii]);
|
||||
m_stream.userBuffer[iii] = NULL;
|
||||
}
|
||||
}
|
||||
if (m_stream.deviceBuffer) {
|
||||
free(m_stream.deviceBuffer);
|
||||
m_stream.deviceBuffer = NULL;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Jack::closeStream() {
|
||||
if (m_stream.state == STREAM_CLOSED) {
|
||||
ATA_ERROR("airtaudio::api::Jack::closeStream(): no open stream to close!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||
if (handle != NULL) {
|
||||
if (m_stream.state == STREAM_RUNNING) {
|
||||
jack_deactivate(handle->client);
|
||||
}
|
||||
jack_client_close(handle->client);
|
||||
}
|
||||
if (handle != NULL) {
|
||||
if (handle->ports[0]) {
|
||||
free(handle->ports[0]);
|
||||
}
|
||||
if (handle->ports[1]) {
|
||||
free(handle->ports[1]);
|
||||
}
|
||||
delete handle;
|
||||
m_stream.apiHandle = NULL;
|
||||
}
|
||||
for (int32_t i=0; i<2; i++) {
|
||||
if (m_stream.userBuffer[i]) {
|
||||
free(m_stream.userBuffer[i]);
|
||||
m_stream.userBuffer[i] = NULL;
|
||||
}
|
||||
}
|
||||
if (m_stream.deviceBuffer) {
|
||||
free(m_stream.deviceBuffer);
|
||||
m_stream.deviceBuffer = NULL;
|
||||
}
|
||||
m_stream.mode = UNINITIALIZED;
|
||||
m_stream.state = STREAM_CLOSED;
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Jack::startStream() {
|
||||
if (verifyStream() != airtaudio::errorNone) {
|
||||
return airtaudio::errorFail;
|
||||
}
|
||||
if (m_stream.state == STREAM_RUNNING) {
|
||||
ATA_ERROR("airtaudio::api::Jack::startStream(): the stream is already running!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||
int32_t result = jack_activate(handle->client);
|
||||
if (result) {
|
||||
ATA_ERROR("airtaudio::api::Jack::startStream(): unable to activate JACK client!");
|
||||
goto unlock;
|
||||
}
|
||||
const char **ports;
|
||||
// Get the list of available ports.
|
||||
if ( m_stream.mode == OUTPUT
|
||||
|| m_stream.mode == DUPLEX) {
|
||||
result = 1;
|
||||
ports = jack_get_ports(handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
|
||||
if (ports == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Jack::startStream(): error determining available JACK input ports!");
|
||||
goto unlock;
|
||||
}
|
||||
// Now make the port connections. Since RtAudio wasn't designed to
|
||||
// allow the user to select particular channels of a device, we'll
|
||||
// just open the first "nChannels" ports with offset.
|
||||
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
|
||||
result = 1;
|
||||
if (ports[ m_stream.channelOffset[0] + i ])
|
||||
result = jack_connect(handle->client, jack_port_name(handle->ports[0][i]), ports[ m_stream.channelOffset[0] + i ]);
|
||||
if (result) {
|
||||
free(ports);
|
||||
ATA_ERROR("airtaudio::api::Jack::startStream(): error connecting output ports!");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
free(ports);
|
||||
}
|
||||
if ( m_stream.mode == INPUT
|
||||
|| m_stream.mode == DUPLEX) {
|
||||
result = 1;
|
||||
ports = jack_get_ports(handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput);
|
||||
if (ports == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Jack::startStream(): error determining available JACK output ports!");
|
||||
goto unlock;
|
||||
}
|
||||
// Now make the port connections. See note above.
|
||||
for (uint32_t i=0; i<m_stream.nUserChannels[1]; i++) {
|
||||
result = 1;
|
||||
if (ports[ m_stream.channelOffset[1] + i ]) {
|
||||
result = jack_connect(handle->client, ports[ m_stream.channelOffset[1] + i ], jack_port_name(handle->ports[1][i]));
|
||||
}
|
||||
if (result) {
|
||||
free(ports);
|
||||
ATA_ERROR("airtaudio::api::Jack::startStream(): error connecting input ports!");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
free(ports);
|
||||
}
|
||||
handle->drainCounter = 0;
|
||||
handle->internalDrain = false;
|
||||
m_stream.state = STREAM_RUNNING;
|
||||
unlock:
|
||||
if (result == 0) {
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
return airtaudio::errorSystemError;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Jack::stopStream() {
|
||||
if (verifyStream() != airtaudio::errorNone) {
|
||||
return airtaudio::errorFail;
|
||||
}
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
ATA_ERROR("airtaudio::api::Jack::stopStream(): the stream is already stopped!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||
if ( m_stream.mode == OUTPUT
|
||||
|| m_stream.mode == DUPLEX) {
|
||||
if (handle->drainCounter == 0) {
|
||||
handle->drainCounter = 2;
|
||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||
handle->condition.wait(lck);
|
||||
}
|
||||
}
|
||||
jack_deactivate(handle->client);
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Jack::abortStream() {
|
||||
if (verifyStream() != airtaudio::errorNone) {
|
||||
return airtaudio::errorFail;
|
||||
}
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
ATA_ERROR("airtaudio::api::Jack::abortStream(): the stream is already stopped!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||
handle->drainCounter = 2;
|
||||
return stopStream();
|
||||
}
|
||||
|
||||
// This function will be called by a spawned thread when the user
|
||||
// callback function signals that the stream should be stopped or
|
||||
// aborted. It is necessary to handle it this way because the
|
||||
// callbackEvent() function must return before the jack_deactivate()
|
||||
// function will return.
|
||||
static void jackStopStream(void *_ptr) {
|
||||
airtaudio::CallbackInfo *info = (airtaudio::CallbackInfo *) _ptr;
|
||||
airtaudio::api::Jack *object = (airtaudio::api::Jack *) info->object;
|
||||
object->stopStream();
|
||||
}
|
||||
|
||||
bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
|
||||
if ( m_stream.state == STREAM_STOPPED
|
||||
|| m_stream.state == STREAM_STOPPING) {
|
||||
return true;
|
||||
}
|
||||
if (m_stream.state == STREAM_CLOSED) {
|
||||
ATA_ERROR("RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!");
|
||||
return false;
|
||||
}
|
||||
if (m_stream.bufferSize != _nframes) {
|
||||
ATA_ERROR("RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!");
|
||||
return false;
|
||||
}
|
||||
CallbackInfo *info = (CallbackInfo *) &m_stream.callbackInfo;
|
||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||
// Check if we were draining the stream and signal is finished.
|
||||
if (handle->drainCounter > 3) {
|
||||
m_stream.state = STREAM_STOPPING;
|
||||
if (handle->internalDrain == true) {
|
||||
new std::thread(jackStopStream, info);
|
||||
} else {
|
||||
handle->condition.notify_one();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
// Invoke user callback first, to get fresh output data.
|
||||
if (handle->drainCounter == 0) {
|
||||
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) info->callback;
|
||||
double streamTime = getStreamTime();
|
||||
airtaudio::streamStatus status = 0;
|
||||
if (m_stream.mode != INPUT && handle->xrun[0] == true) {
|
||||
status |= OUTPUT_UNDERFLOW;
|
||||
handle->xrun[0] = false;
|
||||
}
|
||||
if (m_stream.mode != OUTPUT && handle->xrun[1] == true) {
|
||||
status |= INPUT_OVERFLOW;
|
||||
handle->xrun[1] = false;
|
||||
}
|
||||
int32_t cbReturnValue = callback(m_stream.userBuffer[0],
|
||||
m_stream.userBuffer[1],
|
||||
m_stream.bufferSize,
|
||||
streamTime,
|
||||
status,
|
||||
info->userData);
|
||||
if (cbReturnValue == 2) {
|
||||
m_stream.state = STREAM_STOPPING;
|
||||
handle->drainCounter = 2;
|
||||
new std::thread(jackStopStream, info);
|
||||
return true;
|
||||
}
|
||||
else if (cbReturnValue == 1) {
|
||||
handle->drainCounter = 1;
|
||||
handle->internalDrain = true;
|
||||
}
|
||||
}
|
||||
jack_default_audio_sample_t *jackbuffer;
|
||||
uint64_t bufferBytes = _nframes * sizeof(jack_default_audio_sample_t);
|
||||
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
|
||||
if (handle->drainCounter > 1) { // write zeros to the output stream
|
||||
for (uint32_t i=0; i<m_stream.nDeviceChannels[0]; i++) {
|
||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) _nframes);
|
||||
memset(jackbuffer, 0, bufferBytes);
|
||||
}
|
||||
} else if (m_stream.doConvertBuffer[0]) {
|
||||
convertBuffer(m_stream.deviceBuffer, m_stream.userBuffer[0], m_stream.convertInfo[0]);
|
||||
for (uint32_t i=0; i<m_stream.nDeviceChannels[0]; i++) {
|
||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) _nframes);
|
||||
memcpy(jackbuffer, &m_stream.deviceBuffer[i*bufferBytes], bufferBytes);
|
||||
}
|
||||
} else { // no buffer conversion
|
||||
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
|
||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) _nframes);
|
||||
memcpy(jackbuffer, &m_stream.userBuffer[0][i*bufferBytes], bufferBytes);
|
||||
}
|
||||
}
|
||||
if (handle->drainCounter) {
|
||||
handle->drainCounter++;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
if ( m_stream.mode == INPUT
|
||||
|| m_stream.mode == DUPLEX) {
|
||||
if (m_stream.doConvertBuffer[1]) {
|
||||
for (uint32_t i=0; i<m_stream.nDeviceChannels[1]; i++) {
|
||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[1][i], (jack_nframes_t) _nframes);
|
||||
memcpy(&m_stream.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes);
|
||||
}
|
||||
convertBuffer(m_stream.userBuffer[1], m_stream.deviceBuffer, m_stream.convertInfo[1]);
|
||||
} else {
|
||||
// no buffer conversion
|
||||
for (uint32_t i=0; i<m_stream.nUserChannels[1]; i++) {
|
||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[1][i], (jack_nframes_t) _nframes);
|
||||
memcpy(&m_stream.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
airtaudio::Api::tickStreamTime();
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -1,48 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if !defined(__AIRTAUDIO_API_JACK_H__) && defined(__UNIX_JACK__)
|
||||
#define __AIRTAUDIO_API_JACK_H__
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class Jack: public airtaudio::Api {
|
||||
public:
|
||||
static airtaudio::Api* Create();
|
||||
public:
|
||||
Jack();
|
||||
virtual ~Jack();
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
return airtaudio::api::UNIX_JACK;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum airtaudio::errorType closeStream();
|
||||
enum airtaudio::errorType startStream();
|
||||
enum airtaudio::errorType stopStream();
|
||||
enum airtaudio::errorType abortStream();
|
||||
long getStreamLatency();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
bool callbackEvent(uint64_t _nframes);
|
||||
private:
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,848 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
|
||||
#if defined(__LINUX_OSS__)
|
||||
#include <airtaudio/Interface.h>
|
||||
#include <airtaudio/debug.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include "soundcard.h"
|
||||
#include <errno.h>
|
||||
#include <math.h>
|
||||
|
||||
airtaudio::Api* airtaudio::api::Oss::Create() {
|
||||
return new airtaudio::api::Oss();
|
||||
}
|
||||
|
||||
static void *ossCallbackHandler(void* _ptr);
|
||||
|
||||
// A structure to hold various information related to the OSS API
|
||||
// implementation.
|
||||
struct OssHandle {
|
||||
int32_t id[2]; // device ids
|
||||
bool xrun[2];
|
||||
bool triggered;
|
||||
std::condition_variable runnable;
|
||||
OssHandle():
|
||||
triggered(false) {
|
||||
id[0] = 0;
|
||||
id[1] = 0;
|
||||
xrun[0] = false;
|
||||
xrun[1] = false;
|
||||
}
|
||||
};
|
||||
|
||||
airtaudio::api::Oss::Oss() {
|
||||
// Nothing to do here.
|
||||
}
|
||||
|
||||
airtaudio::api::Oss::~Oss() {
|
||||
if (m_stream.state != STREAM_CLOSED) {
|
||||
closeStream();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t airtaudio::api::Oss::getDeviceCount() {
|
||||
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
|
||||
if (mixerfd == -1) {
|
||||
ATA_ERROR("airtaudio::api::Oss::getDeviceCount: error opening '/dev/mixer'.");
|
||||
return 0;
|
||||
}
|
||||
oss_sysinfo sysinfo;
|
||||
if (ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo) == -1) {
|
||||
close(mixerfd);
|
||||
ATA_ERROR("airtaudio::api::Oss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.");
|
||||
return 0;
|
||||
}
|
||||
close(mixerfd);
|
||||
return sysinfo.numaudios;
|
||||
}
|
||||
|
||||
airtaudio::DeviceInfo airtaudio::api::Oss::getDeviceInfo(uint32_t _device) {
|
||||
rtaudio::DeviceInfo info;
|
||||
info.probed = false;
|
||||
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
|
||||
if (mixerfd == -1) {
|
||||
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: error opening '/dev/mixer'.");
|
||||
return info;
|
||||
}
|
||||
oss_sysinfo sysinfo;
|
||||
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
|
||||
if (result == -1) {
|
||||
close(mixerfd);
|
||||
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.");
|
||||
return info;
|
||||
}
|
||||
unsigned nDevices = sysinfo.numaudios;
|
||||
if (nDevices == 0) {
|
||||
close(mixerfd);
|
||||
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: no devices found!");
|
||||
return info;
|
||||
}
|
||||
if (_device >= nDevices) {
|
||||
close(mixerfd);
|
||||
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: device ID is invalid!");
|
||||
return info;
|
||||
}
|
||||
oss_audioinfo ainfo;
|
||||
ainfo.dev = _device;
|
||||
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
|
||||
close(mixerfd);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: error getting device (" << ainfo.name << ") info.");
|
||||
error(airtaudio::errorWarning);
|
||||
return info;
|
||||
}
|
||||
// Probe channels
|
||||
if (ainfo.caps & PCM_CAP_OUTPUT) {
|
||||
info.outputChannels = ainfo.max_channels;
|
||||
}
|
||||
if (ainfo.caps & PCM_CAP_INPUT) {
|
||||
info.inputChannels = ainfo.max_channels;
|
||||
}
|
||||
if (ainfo.caps & PCM_CAP_DUPLEX) {
|
||||
if ( info.outputChannels > 0
|
||||
&& info.inputChannels > 0
|
||||
&& ainfo.caps & PCM_CAP_DUPLEX) {
|
||||
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
||||
}
|
||||
}
|
||||
// Probe data formats ... do for input
|
||||
uint64_t mask = ainfo.iformats;
|
||||
if ( mask & AFMT_S16_LE
|
||||
|| mask & AFMT_S16_BE) {
|
||||
info.nativeFormats |= RTAUDIO_SINT16;
|
||||
}
|
||||
if (mask & AFMT_S8) {
|
||||
info.nativeFormats |= RTAUDIO_SINT8;
|
||||
}
|
||||
if ( mask & AFMT_S32_LE
|
||||
|| mask & AFMT_S32_BE) {
|
||||
info.nativeFormats |= RTAUDIO_SINT32;
|
||||
}
|
||||
if (mask & AFMT_FLOAT) {
|
||||
info.nativeFormats |= RTAUDIO_FLOAT32;
|
||||
}
|
||||
if ( mask & AFMT_S24_LE
|
||||
|| mask & AFMT_S24_BE) {
|
||||
info.nativeFormats |= RTAUDIO_SINT24;
|
||||
}
|
||||
// Check that we have at least one supported format
|
||||
if (info.nativeFormats == 0) {
|
||||
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.");
|
||||
return info;
|
||||
}
|
||||
// Probe the supported sample rates.
|
||||
info.sampleRates.clear();
|
||||
if (ainfo.nrates) {
|
||||
for (uint32_t i=0; i<ainfo.nrates; i++) {
|
||||
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
|
||||
if (ainfo.rates[i] == SAMPLE_RATES[k]) {
|
||||
info.sampleRates.push_back(SAMPLE_RATES[k]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Check min and max rate values;
|
||||
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
|
||||
if ( ainfo.min_rate <= (int) SAMPLE_RATES[k]
|
||||
&& ainfo.max_rate >= (int) SAMPLE_RATES[k]) {
|
||||
info.sampleRates.push_back(SAMPLE_RATES[k]);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (info.sampleRates.size() == 0) {
|
||||
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").");
|
||||
} else {
|
||||
info.probed = true;
|
||||
info.name = ainfo.name;
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
||||
StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
rtaudio::format _format,
|
||||
uint32_t* _bufferSize,
|
||||
rtaudio::StreamOptions* _options) {
|
||||
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
|
||||
if (mixerfd == -1) {
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error opening '/dev/mixer'.");
|
||||
return false;
|
||||
}
|
||||
oss_sysinfo sysinfo;
|
||||
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
|
||||
if (result == -1) {
|
||||
close(mixerfd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.");
|
||||
return false;
|
||||
}
|
||||
unsigned nDevices = sysinfo.numaudios;
|
||||
if (nDevices == 0) {
|
||||
// This should not happen because a check is made before this function is called.
|
||||
close(mixerfd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: no devices found!");
|
||||
return false;
|
||||
}
|
||||
if (_device >= nDevices) {
|
||||
// This should not happen because a check is made before this function is called.
|
||||
close(mixerfd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device ID is invalid!");
|
||||
return false;
|
||||
}
|
||||
oss_audioinfo ainfo;
|
||||
ainfo.dev = _device;
|
||||
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
|
||||
close(mixerfd);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("airtaudio::api::Oss::getDeviceInfo: error getting device (" << ainfo.name << ") info.");
|
||||
return false;
|
||||
}
|
||||
// Check if device supports input or output
|
||||
if ( ( _mode == OUTPUT
|
||||
&& !(ainfo.caps & PCM_CAP_OUTPUT))
|
||||
|| ( _mode == INPUT
|
||||
&& !(ainfo.caps & PCM_CAP_INPUT))) {
|
||||
if (_mode == OUTPUT) {
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.");
|
||||
} else {
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
int32_t flags = 0;
|
||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||
if (_mode == OUTPUT) {
|
||||
flags |= O_WRONLY;
|
||||
} else { // _mode == INPUT
|
||||
if ( m_stream.mode == OUTPUT
|
||||
&& m_stream.device[0] == _device) {
|
||||
// We just set the same device for playback ... close and reopen for duplex (OSS only).
|
||||
close(handle->id[0]);
|
||||
handle->id[0] = 0;
|
||||
if (!(ainfo.caps & PCM_CAP_DUPLEX)) {
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.");
|
||||
return false;
|
||||
}
|
||||
// Check that the number previously set channels is the same.
|
||||
if (m_stream.nUserChannels[0] != _channels) {
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").");
|
||||
return false;
|
||||
}
|
||||
flags |= O_RDWR;
|
||||
} else {
|
||||
flags |= O_RDONLY;
|
||||
}
|
||||
}
|
||||
// Set exclusive access if specified.
|
||||
if ( _options != NULL
|
||||
&& _options->flags & RTAUDIO_HOG_DEVICE) {
|
||||
flags |= O_EXCL;
|
||||
}
|
||||
// Try to open the device.
|
||||
int32_t fd;
|
||||
fd = open(ainfo.devnode, flags, 0);
|
||||
if (fd == -1) {
|
||||
if (errno == EBUSY) {
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") is busy.");
|
||||
} else {
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error opening device (" << ainfo.name << ").");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// For duplex operation, specifically set this mode (this doesn't seem to work).
|
||||
/*
|
||||
if (flags | O_RDWR) {
|
||||
result = ioctl(fd, SNDCTL_DSP_SETDUPLEX, NULL);
|
||||
if (result == -1) {
|
||||
m_errorStream << "airtaudio::api::Oss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
|
||||
m_errorText = m_errorStream.str();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
*/
|
||||
// Check the device channel support.
|
||||
m_stream.nUserChannels[_mode] = _channels;
|
||||
if (ainfo.max_channels < (int)(_channels + _firstChannel)) {
|
||||
close(fd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.");
|
||||
return false;
|
||||
}
|
||||
// Set the number of channels.
|
||||
int32_t deviceChannels = _channels + _firstChannel;
|
||||
result = ioctl(fd, SNDCTL_DSP_CHANNELS, &deviceChannels);
|
||||
if ( result == -1
|
||||
|| deviceChannels < (int)(_channels + _firstChannel)) {
|
||||
close(fd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").");
|
||||
return false;
|
||||
}
|
||||
m_stream.nDeviceChannels[_mode] = deviceChannels;
|
||||
// Get the data format mask
|
||||
int32_t mask;
|
||||
result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask);
|
||||
if (result == -1) {
|
||||
close(fd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.");
|
||||
return false;
|
||||
}
|
||||
// Determine how to set the device format.
|
||||
m_stream.userFormat = _format;
|
||||
int32_t deviceFormat = -1;
|
||||
m_stream.doByteSwap[_mode] = false;
|
||||
if (_format == RTAUDIO_SINT8) {
|
||||
if (mask & AFMT_S8) {
|
||||
deviceFormat = AFMT_S8;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT8;
|
||||
}
|
||||
} else if (_format == RTAUDIO_SINT16) {
|
||||
if (mask & AFMT_S16_NE) {
|
||||
deviceFormat = AFMT_S16_NE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
||||
} else if (mask & AFMT_S16_OE) {
|
||||
deviceFormat = AFMT_S16_OE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
||||
m_stream.doByteSwap[_mode] = true;
|
||||
}
|
||||
} else if (_format == RTAUDIO_SINT24) {
|
||||
if (mask & AFMT_S24_NE) {
|
||||
deviceFormat = AFMT_S24_NE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
|
||||
} else if (mask & AFMT_S24_OE) {
|
||||
deviceFormat = AFMT_S24_OE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
|
||||
m_stream.doByteSwap[_mode] = true;
|
||||
}
|
||||
} else if (_format == RTAUDIO_SINT32) {
|
||||
if (mask & AFMT_S32_NE) {
|
||||
deviceFormat = AFMT_S32_NE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
|
||||
} else if (mask & AFMT_S32_OE) {
|
||||
deviceFormat = AFMT_S32_OE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
|
||||
m_stream.doByteSwap[_mode] = true;
|
||||
}
|
||||
}
|
||||
if (deviceFormat == -1) {
|
||||
// The user requested format is not natively supported by the device.
|
||||
if (mask & AFMT_S16_NE) {
|
||||
deviceFormat = AFMT_S16_NE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
||||
} else if (mask & AFMT_S32_NE) {
|
||||
deviceFormat = AFMT_S32_NE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
|
||||
} else if (mask & AFMT_S24_NE) {
|
||||
deviceFormat = AFMT_S24_NE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
|
||||
} else if (mask & AFMT_S16_OE) {
|
||||
deviceFormat = AFMT_S16_OE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
||||
m_stream.doByteSwap[_mode] = true;
|
||||
} else if (mask & AFMT_S32_OE) {
|
||||
deviceFormat = AFMT_S32_OE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
|
||||
m_stream.doByteSwap[_mode] = true;
|
||||
} else if (mask & AFMT_S24_OE) {
|
||||
deviceFormat = AFMT_S24_OE;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
|
||||
m_stream.doByteSwap[_mode] = true;
|
||||
} else if (mask & AFMT_S8) {
|
||||
deviceFormat = AFMT_S8;
|
||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT8;
|
||||
}
|
||||
}
|
||||
if (m_stream.deviceFormat[_mode] == 0) {
|
||||
// This really shouldn't happen ...
|
||||
close(fd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.");
|
||||
return false;
|
||||
}
|
||||
// Set the data format.
|
||||
int32_t temp = deviceFormat;
|
||||
result = ioctl(fd, SNDCTL_DSP_SETFMT, &deviceFormat);
|
||||
if ( result == -1
|
||||
|| deviceFormat != temp) {
|
||||
close(fd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").");
|
||||
return false;
|
||||
}
|
||||
// Attempt to set the buffer size. According to OSS, the minimum
|
||||
// number of buffers is two. The supposed minimum buffer size is 16
|
||||
// bytes, so that will be our lower bound. The argument to this
|
||||
// call is in the form 0xMMMMSSSS (hex), where the buffer size (in
|
||||
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
|
||||
// We'll check the actual value used near the end of the setup
|
||||
// procedure.
|
||||
int32_t ossBufferBytes = *_bufferSize * formatBytes(m_stream.deviceFormat[_mode]) * deviceChannels;
|
||||
if (ossBufferBytes < 16) {
|
||||
ossBufferBytes = 16;
|
||||
}
|
||||
int32_t buffers = 0;
|
||||
if (_options != NULL) {
|
||||
buffers = _options->numberOfBuffers;
|
||||
}
|
||||
if ( _options != NULL
|
||||
&& _options->flags & RTAUDIO_MINIMIZE_LATENCY) {
|
||||
buffers = 2;
|
||||
}
|
||||
if (buffers < 2) {
|
||||
buffers = 3;
|
||||
}
|
||||
temp = ((int) buffers << 16) + (int)(log10((double)ossBufferBytes) / log10(2.0));
|
||||
result = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &temp);
|
||||
if (result == -1) {
|
||||
close(fd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").");
|
||||
return false;
|
||||
}
|
||||
m_stream.nBuffers = buffers;
|
||||
// Save buffer size (in sample frames).
|
||||
*_bufferSize = ossBufferBytes / (formatBytes(m_stream.deviceFormat[_mode]) * deviceChannels);
|
||||
m_stream.bufferSize = *_bufferSize;
|
||||
// Set the sample rate.
|
||||
int32_t srate = _sampleRate;
|
||||
result = ioctl(fd, SNDCTL_DSP_SPEED, &srate);
|
||||
if (result == -1) {
|
||||
close(fd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error setting sample rate (" << _sampleRate << ") on device (" << ainfo.name << ").");
|
||||
return false;
|
||||
}
|
||||
// Verify the sample rate setup worked.
|
||||
if (abs(srate - _sampleRate) > 100) {
|
||||
close(fd);
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << _sampleRate << ").");
|
||||
return false;
|
||||
}
|
||||
m_stream.sampleRate = _sampleRate;
|
||||
if ( _mode == INPUT
|
||||
&& m_stream._mode == OUTPUT
|
||||
&& m_stream.device[0] == _device) {
|
||||
// We're doing duplex setup here.
|
||||
m_stream.deviceFormat[0] = m_stream.deviceFormat[1];
|
||||
m_stream.nDeviceChannels[0] = deviceChannels;
|
||||
}
|
||||
// Set interleaving parameters.
|
||||
m_stream.userInterleaved = true;
|
||||
m_stream.deviceInterleaved[_mode] = true;
|
||||
if (_options && _options->flags & RTAUDIO_NONINTERLEAVED) {
|
||||
m_stream.userInterleaved = false;
|
||||
}
|
||||
// Set flags for buffer conversion
|
||||
m_stream.doConvertBuffer[_mode] = false;
|
||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
||||
&& m_stream.nUserChannels[_mode] > 1) {
|
||||
m_stream.doConvertBuffer[_mode] = true;
|
||||
}
|
||||
// Allocate the stream handles if necessary and then save.
|
||||
if (m_stream.apiHandle == 0) {
|
||||
handle = new OssHandle;
|
||||
if handle == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error allocating OssHandle memory.");
|
||||
goto error;
|
||||
}
|
||||
m_stream.apiHandle = (void *) handle;
|
||||
} else {
|
||||
handle = (OssHandle *) m_stream.apiHandle;
|
||||
}
|
||||
handle->id[_mode] = fd;
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes;
|
||||
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[_mode] == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error allocating user buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
if (m_stream.doConvertBuffer[_mode]) {
|
||||
bool makeBuffer = true;
|
||||
bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]);
|
||||
if (_mode == INPUT) {
|
||||
if ( m_stream._mode == OUTPUT
|
||||
&& m_stream.deviceBuffer) {
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
if (bufferBytes <= bytesOut) {
|
||||
makeBuffer = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (makeBuffer) {
|
||||
bufferBytes *= *_bufferSize;
|
||||
if (m_stream.deviceBuffer) {
|
||||
free(m_stream.deviceBuffer);
|
||||
}
|
||||
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.deviceBuffer == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Oss::probeDeviceOpen: error allocating device buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
}
|
||||
m_stream.device[_mode] = _device;
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
// Setup the buffer conversion information structure.
|
||||
if (m_stream.doConvertBuffer[_mode]) {
|
||||
setConvertInfo(_mode, _firstChannel);
|
||||
}
|
||||
// Setup thread if necessary.
|
||||
if (m_stream.mode == OUTPUT && _mode == INPUT) {
|
||||
// We had already set up an output stream.
|
||||
m_stream.mode = DUPLEX;
|
||||
if (m_stream.device[0] == _device) {
|
||||
handle->id[0] = fd;
|
||||
}
|
||||
} else {
|
||||
m_stream.mode = _mode;
|
||||
// Setup callback thread.
|
||||
m_stream.callbackInfo.object = (void *) this;
|
||||
m_stream.callbackInfo.isRunning = true;
|
||||
m_stream.callbackInfo.thread = new std::thread(ossCallbackHandler, &m_stream.callbackInfo);
|
||||
if (m_stream.callbackInfo.thread == NULL) {
|
||||
m_stream.callbackInfo.isRunning = false;
|
||||
ATA_ERROR("airtaudio::api::Oss::error creating callback thread!");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
error:
|
||||
if (handle) {
|
||||
if (handle->id[0]) {
|
||||
close(handle->id[0]);
|
||||
}
|
||||
if (handle->id[1]) {
|
||||
close(handle->id[1]);
|
||||
}
|
||||
delete handle;
|
||||
m_stream.apiHandle = 0;
|
||||
}
|
||||
for (int32_t i=0; i<2; i++) {
|
||||
if (m_stream.userBuffer[i]) {
|
||||
free(m_stream.userBuffer[i]);
|
||||
m_stream.userBuffer[i] = 0;
|
||||
}
|
||||
}
|
||||
if (m_stream.deviceBuffer) {
|
||||
free(m_stream.deviceBuffer);
|
||||
m_stream.deviceBuffer = 0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Oss::closeStream() {
|
||||
if (m_stream.state == STREAM_CLOSED) {
|
||||
ATA_ERROR("airtaudio::api::Oss::closeStream(): no open stream to close!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||
m_stream.callbackInfo.isRunning = false;
|
||||
m_stream.mutex.lock();
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
handle->runnable.notify_one();
|
||||
}
|
||||
m_stream.mutex.unlock();
|
||||
m_stream.callbackInfo.thread->join();
|
||||
if (m_stream.state == STREAM_RUNNING) {
|
||||
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
|
||||
ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
|
||||
} else {
|
||||
ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
|
||||
}
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
}
|
||||
if (handle) {
|
||||
if (handle->id[0]) {
|
||||
close(handle->id[0]);
|
||||
}
|
||||
if (handle->id[1]) {
|
||||
close(handle->id[1]);
|
||||
}
|
||||
delete handle;
|
||||
m_stream.apiHandle = 0;
|
||||
}
|
||||
for (int32_t i=0; i<2; i++) {
|
||||
if (m_stream.userBuffer[i]) {
|
||||
free(m_stream.userBuffer[i]);
|
||||
m_stream.userBuffer[i] = 0;
|
||||
}
|
||||
}
|
||||
if (m_stream.deviceBuffer) {
|
||||
free(m_stream.deviceBuffer);
|
||||
m_stream.deviceBuffer = 0;
|
||||
}
|
||||
m_stream.mode = UNINITIALIZED;
|
||||
m_stream.state = STREAM_CLOSED;
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Oss::startStream() {
|
||||
if (verifyStream() != airtaudio::errorNone) {
|
||||
return airtaudio::errorFail;
|
||||
}
|
||||
if (m_stream.state == STREAM_RUNNING) {
|
||||
ATA_ERROR("airtaudio::api::Oss::startStream(): the stream is already running!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
m_stream.mutex.lock();
|
||||
m_stream.state = STREAM_RUNNING;
|
||||
// No need to do anything else here ... OSS automatically starts
|
||||
// when fed samples.
|
||||
m_stream.mutex.unlock();
|
||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||
handle->runnable.notify_one();
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Oss::stopStream() {
|
||||
if (verifyStream() != airtaudio::errorNone) {
|
||||
return airtaudio::errorFail;
|
||||
}
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
ATA_ERROR("airtaudio::api::Oss::stopStream(): the stream is already stopped!");
|
||||
return;
|
||||
}
|
||||
m_stream.mutex.lock();
|
||||
// The state might change while waiting on a mutex.
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
m_stream.mutex.unlock();
|
||||
return;
|
||||
}
|
||||
int32_t result = 0;
|
||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||
if ( m_stream.mode == OUTPUT
|
||||
|| m_stream.mode == DUPLEX) {
|
||||
// Flush the output with zeros a few times.
|
||||
char *buffer;
|
||||
int32_t samples;
|
||||
airtaudio::format format;
|
||||
if (m_stream.doConvertBuffer[0]) {
|
||||
buffer = m_stream.deviceBuffer;
|
||||
samples = m_stream.bufferSize * m_stream.nDeviceChannels[0];
|
||||
format = m_stream.deviceFormat[0];
|
||||
} else {
|
||||
buffer = m_stream.userBuffer[0];
|
||||
samples = m_stream.bufferSize * m_stream.nUserChannels[0];
|
||||
format = m_stream.userFormat;
|
||||
}
|
||||
memset(buffer, 0, samples * formatBytes(format));
|
||||
for (uint32_t i=0; i<m_stream.nBuffers+1; i++) {
|
||||
result = write(handle->id[0], buffer, samples * formatBytes(format));
|
||||
if (result == -1) {
|
||||
ATA_ERROR("airtaudio::api::Oss::stopStream: audio write error.");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
}
|
||||
result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("airtaudio::api::Oss::stopStream: system error stopping callback procedure on device (" << m_stream.device[0] << ").");
|
||||
goto unlock;
|
||||
}
|
||||
handle->triggered = false;
|
||||
}
|
||||
if ( m_stream.mode == INPUT
|
||||
|| ( m_stream.mode == DUPLEX
|
||||
&& handle->id[0] != handle->id[1])) {
|
||||
result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("airtaudio::api::Oss::stopStream: system error stopping input callback procedure on device (" << m_stream.device[0] << ").");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
m_stream.mutex.unlock();
|
||||
if (result != -1) {
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
return airtaudio::errorSystemError;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Oss::abortStream() {
|
||||
if (verifyStream() != airtaudio::errorNone) {
|
||||
return airtaudio::errorFail;
|
||||
}
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
ATA_ERROR("airtaudio::api::Oss::abortStream(): the stream is already stopped!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
m_stream.mutex.lock();
|
||||
// The state might change while waiting on a mutex.
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
m_stream.mutex.unlock();
|
||||
return;
|
||||
}
|
||||
int32_t result = 0;
|
||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
|
||||
result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("airtaudio::api::Oss::abortStream: system error stopping callback procedure on device (" << m_stream.device[0] << ").");
|
||||
goto unlock;
|
||||
}
|
||||
handle->triggered = false;
|
||||
}
|
||||
if (m_stream.mode == INPUT || (m_stream.mode == DUPLEX && handle->id[0] != handle->id[1])) {
|
||||
result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("airtaudio::api::Oss::abortStream: system error stopping input callback procedure on device (" << m_stream.device[0] << ").");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
m_stream.mutex.unlock();
|
||||
if (result != -1) {
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
return airtaudio::errorSystemError;
|
||||
}
|
||||
|
||||
void airtaudio::api::Oss::callbackEvent() {
|
||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||
handle->runnable.wait(lck);
|
||||
if (m_stream.state != STREAM_RUNNING) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (m_stream.state == STREAM_CLOSED) {
|
||||
ATA_ERROR("airtaudio::api::Oss::callbackEvent(): the stream is closed ... this shouldn't happen!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
// Invoke user callback to get fresh output data.
|
||||
int32_t doStopStream = 0;
|
||||
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
|
||||
double streamTime = getStreamTime();
|
||||
rtaudio::streamStatus status = 0;
|
||||
if ( m_stream.mode != INPUT
|
||||
&& handle->xrun[0] == true) {
|
||||
status |= RTAUDIO_OUTPUT_UNDERFLOW;
|
||||
handle->xrun[0] = false;
|
||||
}
|
||||
if ( m_stream.mode != OUTPUT
|
||||
&& handle->xrun[1] == true) {
|
||||
status |= RTAUDIO_INPUT_OVERFLOW;
|
||||
handle->xrun[1] = false;
|
||||
}
|
||||
doStopStream = callback(m_stream.userBuffer[0],
|
||||
m_stream.userBuffer[1],
|
||||
m_stream.bufferSize,
|
||||
streamTime,
|
||||
status,
|
||||
m_stream.callbackInfo.userData);
|
||||
if (doStopStream == 2) {
|
||||
this->abortStream();
|
||||
return;
|
||||
}
|
||||
m_stream.mutex.lock();
|
||||
// The state might change while waiting on a mutex.
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
goto unlock;
|
||||
}
|
||||
int32_t result;
|
||||
char *buffer;
|
||||
int32_t samples;
|
||||
airtaudio::format format;
|
||||
if ( m_stream.mode == OUTPUT
|
||||
|| m_stream.mode == DUPLEX) {
|
||||
// Setup parameters and do buffer conversion if necessary.
|
||||
if (m_stream.doConvertBuffer[0]) {
|
||||
buffer = m_stream.deviceBuffer;
|
||||
convertBuffer(buffer, m_stream.userBuffer[0], m_stream.convertInfo[0]);
|
||||
samples = m_stream.bufferSize * m_stream.nDeviceChannels[0];
|
||||
format = m_stream.deviceFormat[0];
|
||||
} else {
|
||||
buffer = m_stream.userBuffer[0];
|
||||
samples = m_stream.bufferSize * m_stream.nUserChannels[0];
|
||||
format = m_stream.userFormat;
|
||||
}
|
||||
// Do byte swapping if necessary.
|
||||
if (m_stream.doByteSwap[0]) {
|
||||
byteSwapBuffer(buffer, samples, format);
|
||||
}
|
||||
if ( m_stream.mode == DUPLEX
|
||||
&& handle->triggered == false) {
|
||||
int32_t trig = 0;
|
||||
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
|
||||
result = write(handle->id[0], buffer, samples * formatBytes(format));
|
||||
trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
|
||||
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
|
||||
handle->triggered = true;
|
||||
} else {
|
||||
// Write samples to device.
|
||||
result = write(handle->id[0], buffer, samples * formatBytes(format));
|
||||
}
|
||||
if (result == -1) {
|
||||
// We'll assume this is an underrun, though there isn't a
|
||||
// specific means for determining that.
|
||||
handle->xrun[0] = true;
|
||||
ATA_ERROR("airtaudio::api::Oss::callbackEvent: audio write error.");
|
||||
//error(airtaudio::errorWarning);
|
||||
// Continue on to input section.
|
||||
}
|
||||
}
|
||||
if ( m_stream.mode == INPUT
|
||||
|| m_stream.mode == DUPLEX) {
|
||||
// Setup parameters.
|
||||
if (m_stream.doConvertBuffer[1]) {
|
||||
buffer = m_stream.deviceBuffer;
|
||||
samples = m_stream.bufferSize * m_stream.nDeviceChannels[1];
|
||||
format = m_stream.deviceFormat[1];
|
||||
} else {
|
||||
buffer = m_stream.userBuffer[1];
|
||||
samples = m_stream.bufferSize * m_stream.nUserChannels[1];
|
||||
format = m_stream.userFormat;
|
||||
}
|
||||
// Read samples from device.
|
||||
result = read(handle->id[1], buffer, samples * formatBytes(format));
|
||||
if (result == -1) {
|
||||
// We'll assume this is an overrun, though there isn't a
|
||||
// specific means for determining that.
|
||||
handle->xrun[1] = true;
|
||||
ATA_ERROR("airtaudio::api::Oss::callbackEvent: audio read error.");
|
||||
goto unlock;
|
||||
}
|
||||
// Do byte swapping if necessary.
|
||||
if (m_stream.doByteSwap[1]) {
|
||||
byteSwapBuffer(buffer, samples, format);
|
||||
}
|
||||
// Do buffer conversion if necessary.
|
||||
if (m_stream.doConvertBuffer[1]) {
|
||||
convertBuffer(m_stream.userBuffer[1], m_stream.deviceBuffer, m_stream.convertInfo[1]);
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
m_stream.mutex.unlock();
|
||||
airtaudio::Api::tickStreamTime();
|
||||
if (doStopStream == 1) {
|
||||
this->stopStream();
|
||||
}
|
||||
}
|
||||
|
||||
static void ossCallbackHandler(void* _ptr) {
|
||||
CallbackInfo* info = (CallbackInfo*)_ptr;
|
||||
RtApiOss* object = (RtApiOss*)info->object;
|
||||
bool *isRunning = &info->isRunning;
|
||||
while (*isRunning == true) {
|
||||
object->callbackEvent();
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@@ -1,47 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if !defined(__AIRTAUDIO_API_OSS_H__) && defined(__LINUX_OSS__)
|
||||
#define __AIRTAUDIO_API_OSS_H__
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class Oss: public airtaudio::Api {
|
||||
public:
|
||||
static airtaudio::Api* Create();
|
||||
public:
|
||||
Oss();
|
||||
virtual ~Oss();
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
return airtaudio::api::LINUX_OSS;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum airtaudio::errorType closeStream();
|
||||
enum airtaudio::errorType startStream();
|
||||
enum airtaudio::errorType stopStream();
|
||||
enum airtaudio::errorType abortStream();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
private:
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,442 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
|
||||
#if defined(__LINUX_PULSE__)
|
||||
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
#include <airtaudio/Interface.h>
|
||||
#include <airtaudio/debug.h>
|
||||
// Code written by Peter Meerwald, pmeerw@pmeerw.net
|
||||
// and Tristan Matthews.
|
||||
|
||||
#include <pulse/error.h>
|
||||
#include <pulse/simple.h>
|
||||
#include <cstdio>
|
||||
|
||||
airtaudio::Api* airtaudio::api::Pulse::Create() {
|
||||
return new airtaudio::api::Pulse();
|
||||
}
|
||||
|
||||
|
||||
static const uint32_t SUPPORTED_SAMPLERATES[] = {
|
||||
8000,
|
||||
16000,
|
||||
22050,
|
||||
32000,
|
||||
44100,
|
||||
48000,
|
||||
96000,
|
||||
0
|
||||
};
|
||||
|
||||
struct rtaudio_pa_format_mapping_t {
|
||||
airtaudio::format airtaudio_format;
|
||||
pa_sample_format_t pa_format;
|
||||
};
|
||||
|
||||
static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
|
||||
{airtaudio::SINT16, PA_SAMPLE_S16LE},
|
||||
{airtaudio::SINT32, PA_SAMPLE_S32LE},
|
||||
{airtaudio::FLOAT32, PA_SAMPLE_FLOAT32LE},
|
||||
{0, PA_SAMPLE_INVALID}};
|
||||
|
||||
struct PulseAudioHandle {
|
||||
pa_simple *s_play;
|
||||
pa_simple *s_rec;
|
||||
std::thread* thread;
|
||||
std::condition_variable runnable_cv;
|
||||
bool runnable;
|
||||
PulseAudioHandle() :
|
||||
s_play(0),
|
||||
s_rec(0),
|
||||
runnable(false) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
airtaudio::api::Pulse::~Pulse() {
|
||||
if (m_stream.state != STREAM_CLOSED) {
|
||||
closeStream();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t airtaudio::api::Pulse::getDeviceCount() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
airtaudio::DeviceInfo airtaudio::api::Pulse::getDeviceInfo(uint32_t _device) {
|
||||
airtaudio::DeviceInfo info;
|
||||
info.probed = true;
|
||||
info.name = "PulseAudio";
|
||||
info.outputChannels = 2;
|
||||
info.inputChannels = 2;
|
||||
info.duplexChannels = 2;
|
||||
info.isDefaultOutput = true;
|
||||
info.isDefaultInput = true;
|
||||
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
|
||||
info.sampleRates.push_back(*sr);
|
||||
}
|
||||
info.nativeFormats = SINT16 | SINT32 | FLOAT32;
|
||||
return info;
|
||||
}
|
||||
|
||||
static void pulseaudio_callback(void* _user) {
|
||||
airtaudio::CallbackInfo *cbi = static_cast<airtaudio::CallbackInfo *>(_user);
|
||||
airtaudio::api::Pulse *context = static_cast<airtaudio::api::Pulse*>(cbi->object);
|
||||
volatile bool *isRunning = &cbi->isRunning;
|
||||
while (*isRunning) {
|
||||
context->callbackEvent();
|
||||
}
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Pulse::closeStream() {
|
||||
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
||||
m_stream.callbackInfo.isRunning = false;
|
||||
if (pah) {
|
||||
m_stream.mutex.lock();
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
pah->runnable = true;
|
||||
pah->runnable_cv.notify_one();;
|
||||
}
|
||||
m_stream.mutex.unlock();
|
||||
pah->thread->join();
|
||||
if (pah->s_play) {
|
||||
pa_simple_flush(pah->s_play, NULL);
|
||||
pa_simple_free(pah->s_play);
|
||||
}
|
||||
if (pah->s_rec) {
|
||||
pa_simple_free(pah->s_rec);
|
||||
}
|
||||
delete pah;
|
||||
m_stream.apiHandle = NULL;
|
||||
}
|
||||
if (m_stream.userBuffer[0] != NULL) {
|
||||
free(m_stream.userBuffer[0]);
|
||||
m_stream.userBuffer[0] = NULL;
|
||||
}
|
||||
if (m_stream.userBuffer[1] != NULL) {
|
||||
free(m_stream.userBuffer[1]);
|
||||
m_stream.userBuffer[1] = NULL;
|
||||
}
|
||||
m_stream.state = STREAM_CLOSED;
|
||||
m_stream.mode = UNINITIALIZED;
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
void airtaudio::api::Pulse::callbackEvent() {
|
||||
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||
while (!pah->runnable) {
|
||||
pah->runnable_cv.wait(lck);
|
||||
}
|
||||
if (m_stream.state != STREAM_RUNNING) {
|
||||
m_stream.mutex.unlock();
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (m_stream.state == STREAM_CLOSED) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::callbackEvent(): the stream is closed ... this shouldn't happen!");
|
||||
return;
|
||||
}
|
||||
airtaudio::AirTAudioCallback callback = (airtaudio::AirTAudioCallback) m_stream.callbackInfo.callback;
|
||||
double streamTime = getStreamTime();
|
||||
airtaudio::streamStatus status = 0;
|
||||
int32_t doStopStream = callback(m_stream.userBuffer[OUTPUT],
|
||||
m_stream.userBuffer[INPUT],
|
||||
m_stream.bufferSize,
|
||||
streamTime,
|
||||
status,
|
||||
m_stream.callbackInfo.userData);
|
||||
if (doStopStream == 2) {
|
||||
abortStream();
|
||||
return;
|
||||
}
|
||||
m_stream.mutex.lock();
|
||||
void *pulse_in = m_stream.doConvertBuffer[INPUT] ? m_stream.deviceBuffer : m_stream.userBuffer[INPUT];
|
||||
void *pulse_out = m_stream.doConvertBuffer[OUTPUT] ? m_stream.deviceBuffer : m_stream.userBuffer[OUTPUT];
|
||||
if (m_stream.state != STREAM_RUNNING) {
|
||||
goto unlock;
|
||||
}
|
||||
int32_t pa_error;
|
||||
size_t bytes;
|
||||
if ( m_stream.mode == OUTPUT
|
||||
|| m_stream.mode == DUPLEX) {
|
||||
if (m_stream.doConvertBuffer[OUTPUT]) {
|
||||
convertBuffer(m_stream.deviceBuffer,
|
||||
m_stream.userBuffer[OUTPUT],
|
||||
m_stream.convertInfo[OUTPUT]);
|
||||
bytes = m_stream.nDeviceChannels[OUTPUT] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[OUTPUT]);
|
||||
} else {
|
||||
bytes = m_stream.nUserChannels[OUTPUT] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||
}
|
||||
if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::callbackEvent: audio write error, " << pa_strerror(pa_error) << ".");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (m_stream.mode == INPUT || m_stream.mode == DUPLEX) {
|
||||
if (m_stream.doConvertBuffer[INPUT]) {
|
||||
bytes = m_stream.nDeviceChannels[INPUT] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[INPUT]);
|
||||
} else {
|
||||
bytes = m_stream.nUserChannels[INPUT] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||
}
|
||||
if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::callbackEvent: audio read error, " << pa_strerror(pa_error) << ".");
|
||||
return;
|
||||
}
|
||||
if (m_stream.doConvertBuffer[INPUT]) {
|
||||
convertBuffer(m_stream.userBuffer[INPUT],
|
||||
m_stream.deviceBuffer,
|
||||
m_stream.convertInfo[INPUT]);
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
m_stream.mutex.unlock();
|
||||
airtaudio::Api::tickStreamTime();
|
||||
if (doStopStream == 1) {
|
||||
stopStream();
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Pulse::startStream() {
|
||||
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
||||
if (m_stream.state == STREAM_CLOSED) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::startStream(): the stream is not open!");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
if (m_stream.state == STREAM_RUNNING) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::startStream(): the stream is already running!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
m_stream.mutex.lock();
|
||||
m_stream.state = STREAM_RUNNING;
|
||||
pah->runnable = true;
|
||||
pah->runnable_cv.notify_one();
|
||||
m_stream.mutex.unlock();
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Pulse::stopStream() {
|
||||
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
||||
if (m_stream.state == STREAM_CLOSED) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::stopStream(): the stream is not open!");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::stopStream(): the stream is already stopped!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
m_stream.mutex.lock();
|
||||
if (pah && pah->s_play) {
|
||||
int32_t pa_error;
|
||||
if (pa_simple_drain(pah->s_play, &pa_error) < 0) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::stopStream: error draining output device, " << pa_strerror(pa_error) << ".");
|
||||
m_stream.mutex.unlock();
|
||||
return airtaudio::errorSystemError;
|
||||
}
|
||||
}
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
m_stream.mutex.unlock();
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
enum airtaudio::errorType airtaudio::api::Pulse::abortStream() {
|
||||
PulseAudioHandle *pah = static_cast<PulseAudioHandle*>(m_stream.apiHandle);
|
||||
if (m_stream.state == STREAM_CLOSED) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::abortStream(): the stream is not open!");
|
||||
return airtaudio::errorInvalidUse;
|
||||
}
|
||||
if (m_stream.state == STREAM_STOPPED) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::abortStream(): the stream is already stopped!");
|
||||
return airtaudio::errorWarning;
|
||||
}
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
m_stream.mutex.lock();
|
||||
if (pah && pah->s_play) {
|
||||
int32_t pa_error;
|
||||
if (pa_simple_flush(pah->s_play, &pa_error) < 0) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::abortStream: error flushing output device, " << pa_strerror(pa_error) << ".");
|
||||
m_stream.mutex.unlock();
|
||||
return airtaudio::errorSystemError;
|
||||
}
|
||||
}
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
m_stream.mutex.unlock();
|
||||
return airtaudio::errorNone;
|
||||
}
|
||||
|
||||
bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options) {
|
||||
PulseAudioHandle *pah = 0;
|
||||
uint64_t bufferBytes = 0;
|
||||
pa_sample_spec ss;
|
||||
if (_device != 0) {
|
||||
return false;
|
||||
}
|
||||
if (_mode != INPUT && _mode != OUTPUT) {
|
||||
return false;
|
||||
}
|
||||
if (_channels != 1 && _channels != 2) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: unsupported number of channels.");
|
||||
return false;
|
||||
}
|
||||
ss.channels = _channels;
|
||||
if (_firstChannel != 0) {
|
||||
return false;
|
||||
}
|
||||
bool sr_found = false;
|
||||
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
|
||||
if (_sampleRate == *sr) {
|
||||
sr_found = true;
|
||||
m_stream.sampleRate = _sampleRate;
|
||||
ss.rate = _sampleRate;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!sr_found) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: unsupported sample rate.");
|
||||
return false;
|
||||
}
|
||||
bool sf_found = 0;
|
||||
for (const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
|
||||
sf->airtaudio_format && sf->pa_format != PA_SAMPLE_INVALID;
|
||||
++sf) {
|
||||
if (_format == sf->airtaudio_format) {
|
||||
sf_found = true;
|
||||
m_stream.userFormat = sf->airtaudio_format;
|
||||
ss.format = sf->pa_format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!sf_found) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: unsupported sample format.");
|
||||
return false;
|
||||
}
|
||||
// Set interleaving parameters.
|
||||
if (_options && _options->flags & NONINTERLEAVED) {
|
||||
m_stream.userInterleaved = false;
|
||||
} else {
|
||||
m_stream.userInterleaved = true;
|
||||
}
|
||||
m_stream.deviceInterleaved[_mode] = true;
|
||||
m_stream.nBuffers = 1;
|
||||
m_stream.doByteSwap[_mode] = false;
|
||||
m_stream.doConvertBuffer[_mode] = _channels > 1 && !m_stream.userInterleaved;
|
||||
m_stream.deviceFormat[_mode] = m_stream.userFormat;
|
||||
m_stream.nUserChannels[_mode] = _channels;
|
||||
m_stream.nDeviceChannels[_mode] = _channels + _firstChannel;
|
||||
m_stream.channelOffset[_mode] = 0;
|
||||
// Allocate necessary internal buffers.
|
||||
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[_mode] == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error allocating user buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
m_stream.bufferSize = *_bufferSize;
|
||||
if (m_stream.doConvertBuffer[_mode]) {
|
||||
bool makeBuffer = true;
|
||||
bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]);
|
||||
if (_mode == INPUT) {
|
||||
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
if (bufferBytes <= bytesOut) makeBuffer = false;
|
||||
}
|
||||
}
|
||||
if (makeBuffer) {
|
||||
bufferBytes *= *_bufferSize;
|
||||
if (m_stream.deviceBuffer) free(m_stream.deviceBuffer);
|
||||
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.deviceBuffer == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error allocating device buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
}
|
||||
m_stream.device[_mode] = _device;
|
||||
// Setup the buffer conversion information structure.
|
||||
if (m_stream.doConvertBuffer[_mode]) {
|
||||
setConvertInfo(_mode, _firstChannel);
|
||||
}
|
||||
if (!m_stream.apiHandle) {
|
||||
PulseAudioHandle *pah = new PulseAudioHandle;
|
||||
if (!pah) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error allocating memory for handle.");
|
||||
goto error;
|
||||
}
|
||||
m_stream.apiHandle = pah;
|
||||
}
|
||||
pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
||||
int32_t error;
|
||||
switch (_mode) {
|
||||
case INPUT:
|
||||
pah->s_rec = pa_simple_new(NULL, "RtAudio", PA_STREAM_RECORD, NULL, "Record", &ss, NULL, NULL, &error);
|
||||
if (!pah->s_rec) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error connecting input to PulseAudio server.");
|
||||
goto error;
|
||||
}
|
||||
break;
|
||||
case OUTPUT:
|
||||
pah->s_play = pa_simple_new(NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error);
|
||||
if (!pah->s_play) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error connecting output to PulseAudio server.");
|
||||
goto error;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
goto error;
|
||||
}
|
||||
if (m_stream.mode == UNINITIALIZED) {
|
||||
m_stream.mode = _mode;
|
||||
} else if (m_stream.mode == _mode) {
|
||||
goto error;
|
||||
}else {
|
||||
m_stream.mode = DUPLEX;
|
||||
}
|
||||
if (!m_stream.callbackInfo.isRunning) {
|
||||
m_stream.callbackInfo.object = this;
|
||||
m_stream.callbackInfo.isRunning = true;
|
||||
pah->thread = new std::thread(pulseaudio_callback, (void *)&m_stream.callbackInfo);
|
||||
if (pah->thread == NULL) {
|
||||
ATA_ERROR("airtaudio::api::Pulse::probeDeviceOpen: error creating thread.");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
m_stream.state = STREAM_STOPPED;
|
||||
return true;
|
||||
error:
|
||||
if (pah && m_stream.callbackInfo.isRunning) {
|
||||
delete pah;
|
||||
m_stream.apiHandle = 0;
|
||||
}
|
||||
for (int32_t i=0; i<2; i++) {
|
||||
if (m_stream.userBuffer[i]) {
|
||||
free(m_stream.userBuffer[i]);
|
||||
m_stream.userBuffer[i] = 0;
|
||||
}
|
||||
}
|
||||
if (m_stream.deviceBuffer) {
|
||||
free(m_stream.deviceBuffer);
|
||||
m_stream.deviceBuffer = 0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
@@ -1,48 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if !defined(__AIRTAUDIO_API_PULSE_H__) && defined(__LINUX_PULSE__)
|
||||
#define __AIRTAUDIO_API_PULSE_H__
|
||||
|
||||
namespace airtaudio {
|
||||
namespace api {
|
||||
class Pulse: public airtaudio::Api {
|
||||
public:
|
||||
static airtaudio::Api* Create();
|
||||
public:
|
||||
virtual ~Pulse();
|
||||
airtaudio::api::type getCurrentApi() {
|
||||
return airtaudio::api::LINUX_PULSE;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum airtaudio::errorType closeStream();
|
||||
enum airtaudio::errorType startStream();
|
||||
enum airtaudio::errorType stopStream();
|
||||
enum airtaudio::errorType abortStream();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
private:
|
||||
std::vector<airtaudio::DeviceInfo> m_devices;
|
||||
void saveDeviceInfo();
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
airtaudio::api::StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
airtaudio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
airtaudio::StreamOptions *_options);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif
|
@@ -1,105 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
#if 0
|
||||
#include <airtaudio/base.h>
|
||||
|
||||
std::ostream& airtaudio::operator <<(std::ostream& _os, enum errorType _obj) {
|
||||
switch(_obj) {
|
||||
case errorNone:
|
||||
_os << "errorNone";
|
||||
break;
|
||||
case errorFail:
|
||||
_os << "errorFail";
|
||||
break;
|
||||
case errorWarning:
|
||||
_os << "errorWarning";
|
||||
break;
|
||||
case errorInputNull:
|
||||
_os << "errorInputNull";
|
||||
break;
|
||||
case errorInvalidUse:
|
||||
_os << "errorInvalidUse";
|
||||
break;
|
||||
case errorSystemError:
|
||||
_os << "errorSystemError";
|
||||
break;
|
||||
default:
|
||||
_os << "UNKNOW...";
|
||||
break;
|
||||
}
|
||||
return _os;
|
||||
}
|
||||
|
||||
std::ostream& airtaudio::operator <<(std::ostream& _os, const airtaudio::format& _obj) {
|
||||
switch(_obj) {
|
||||
case SINT8:
|
||||
_os << "SINT8";
|
||||
break;
|
||||
case SINT16:
|
||||
_os << "SINT16";
|
||||
break;
|
||||
case SINT24:
|
||||
_os << "SINT24";
|
||||
break;
|
||||
case SINT32:
|
||||
_os << "SINT32";
|
||||
break;
|
||||
case FLOAT32:
|
||||
_os << "FLOAT32";
|
||||
break;
|
||||
case FLOAT64:
|
||||
_os << "FLOAT64";
|
||||
break;
|
||||
default:
|
||||
_os << "UNKNOW...";
|
||||
break;
|
||||
}
|
||||
return _os;
|
||||
}
|
||||
|
||||
std::ostream& airtaudio::operator <<(std::ostream& _os, const airtaudio::streamFlags& _obj) {
|
||||
switch(_obj) {
|
||||
case NONINTERLEAVED:
|
||||
_os << "NONINTERLEAVED";
|
||||
break;
|
||||
case MINIMIZE_LATENCY:
|
||||
_os << "MINIMIZE_LATENCY";
|
||||
break;
|
||||
case HOG_DEVICE:
|
||||
_os << "HOG_DEVICE";
|
||||
break;
|
||||
case SCHEDULE_REALTIME:
|
||||
_os << "SCHEDULE_REALTIME";
|
||||
break;
|
||||
case ALSA_USE_DEFAULT:
|
||||
_os << "ALSA_USE_DEFAULT";
|
||||
break;
|
||||
default:
|
||||
_os << "UNKNOW...";
|
||||
break;
|
||||
}
|
||||
return _os;
|
||||
}
|
||||
|
||||
std::ostream& airtaudio::operator <<(std::ostream& _os, const airtaudio::streamStatus& _obj) {
|
||||
switch(_obj) {
|
||||
case INPUT_OVERFLOW:
|
||||
_os << "INPUT_OVERFLOW";
|
||||
break;
|
||||
case OUTPUT_UNDERFLOW:
|
||||
_os << "OUTPUT_UNDERFLOW";
|
||||
break;
|
||||
default:
|
||||
_os << "UNKNOW...";
|
||||
break;
|
||||
}
|
||||
return _os;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
204
airtaudio/base.h
204
airtaudio/base.h
@@ -1,204 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#ifndef __AIRTAUDIO_ERROR_H__
|
||||
#define __AIRTAUDIO_ERROR_H__
|
||||
|
||||
#include <thread>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
|
||||
// defien type : uintXX_t and intXX_t
|
||||
#define __STDC_LIMIT_MACROS
|
||||
// note in android include the macro of min max are overwitten
|
||||
#include <stdint.h>
|
||||
|
||||
#if defined(HAVE_GETTIMEOFDAY)
|
||||
#include <sys/time.h>
|
||||
#endif
|
||||
//#include <etk/Stream.h>
|
||||
|
||||
namespace airtaudio {
|
||||
//! Defined RtError types.
|
||||
enum errorType {
|
||||
errorNone, //!< No error
|
||||
errorFail, //!< An error occure in the operation
|
||||
errorWarning, //!< A non-critical error.
|
||||
errorInputNull, //!< null input or internal errror
|
||||
errorInvalidUse, //!< The function was called incorrectly.
|
||||
errorSystemError //!< A system error occured.
|
||||
};
|
||||
// airtaudio version
|
||||
static const std::string VERSION("4.0.12");
|
||||
|
||||
/**
|
||||
* @brief Debug operator To display the curent element in a Human redeable information
|
||||
*/
|
||||
//std::ostream& operator <<(std::ostream& _os, enum errorType _obj);
|
||||
/**
|
||||
* @typedef typedef uint64_t format;
|
||||
* @brief airtaudio data format type.
|
||||
*
|
||||
* Support for signed integers and floats. Audio data fed to/from an
|
||||
* airtaudio stream is assumed to ALWAYS be in host byte order. The
|
||||
* internal routines will automatically take care of any necessary
|
||||
* byte-swapping between the host format and the soundcard. Thus,
|
||||
* endian-ness is not a concern in the following format definitions.
|
||||
*
|
||||
* - \e SINT8: 8-bit signed integer.
|
||||
* - \e SINT16: 16-bit signed integer.
|
||||
* - \e SINT24: 24-bit signed integer.
|
||||
* - \e SINT32: 32-bit signed integer.
|
||||
* - \e FLOAT32: Normalized between plus/minus 1.0.
|
||||
* - \e FLOAT64: Normalized between plus/minus 1.0.
|
||||
*/
|
||||
typedef uint64_t format;
|
||||
static const format SINT8 = 0x1; // 8-bit signed integer.
|
||||
static const format SINT16 = 0x2; // 16-bit signed integer.
|
||||
static const format SINT24 = 0x4; // 24-bit signed integer.
|
||||
static const format SINT32 = 0x8; // 32-bit signed integer.
|
||||
static const format FLOAT32 = 0x10; // Normalized between plus/minus 1.0.
|
||||
static const format FLOAT64 = 0x20; // Normalized between plus/minus 1.0.
|
||||
|
||||
/**
|
||||
* @brief Debug operator To display the curent element in a Human redeable information
|
||||
*/
|
||||
//std::ostream& operator <<(std::ostream& _os, const airtaudio::format& _obj);
|
||||
|
||||
/**
|
||||
* @typedef typedef uint64_t streamFlags;
|
||||
* @brief RtAudio stream option flags.
|
||||
*
|
||||
* The following flags can be OR'ed together to allow a client to
|
||||
* make changes to the default stream behavior:
|
||||
*
|
||||
* - \e NONINTERLEAVED: Use non-interleaved buffers (default = interleaved).
|
||||
* - \e MINIMIZE_LATENCY: Attempt to set stream parameters for lowest possible latency.
|
||||
* - \e HOG_DEVICE: Attempt grab device for exclusive use.
|
||||
* - \e ALSA_USE_DEFAULT: Use the "default" PCM device (ALSA only).
|
||||
*
|
||||
* By default, RtAudio streams pass and receive audio data from the
|
||||
* client in an interleaved format. By passing the
|
||||
* RTAUDIO_NONINTERLEAVED flag to the openStream() function, audio
|
||||
* data will instead be presented in non-interleaved buffers. In
|
||||
* this case, each buffer argument in the RtAudioCallback function
|
||||
* will point to a single array of data, with \c nFrames samples for
|
||||
* each channel concatenated back-to-back. For example, the first
|
||||
* sample of data for the second channel would be located at index \c
|
||||
* nFrames (assuming the \c buffer pointer was recast to the correct
|
||||
* data type for the stream).
|
||||
*
|
||||
* Certain audio APIs offer a number of parameters that influence the
|
||||
* I/O latency of a stream. By default, RtAudio will attempt to set
|
||||
* these parameters internally for robust (glitch-free) performance
|
||||
* (though some APIs, like Windows Direct Sound, make this difficult).
|
||||
* By passing the RTAUDIO_MINIMIZE_LATENCY flag to the openStream()
|
||||
* function, internal stream settings will be influenced in an attempt
|
||||
* to minimize stream latency, though possibly at the expense of stream
|
||||
* performance.
|
||||
*
|
||||
* If the RTAUDIO_HOG_DEVICE flag is set, RtAudio will attempt to
|
||||
* open the input and/or output stream device(s) for exclusive use.
|
||||
* Note that this is not possible with all supported audio APIs.
|
||||
*
|
||||
* If the RTAUDIO_SCHEDULE_REALTIME flag is set, RtAudio will attempt
|
||||
* to select realtime scheduling (round-robin) for the callback thread.
|
||||
*
|
||||
* If the RTAUDIO_ALSA_USE_DEFAULT flag is set, RtAudio will attempt to
|
||||
* open the "default" PCM device when using the ALSA API. Note that this
|
||||
* will override any specified input or output device id.
|
||||
*/
|
||||
typedef uint32_t streamFlags;
|
||||
static const streamFlags NONINTERLEAVED = 0x1; // Use non-interleaved buffers (default = interleaved).
|
||||
static const streamFlags MINIMIZE_LATENCY = 0x2; // Attempt to set stream parameters for lowest possible latency.
|
||||
static const streamFlags HOG_DEVICE = 0x4; // Attempt grab device and prevent use by others.
|
||||
static const streamFlags SCHEDULE_REALTIME = 0x8; // Try to select realtime scheduling for callback thread.
|
||||
static const streamFlags ALSA_USE_DEFAULT = 0x10; // Use the "default" PCM device (ALSA only).
|
||||
|
||||
/**
|
||||
* @brief Debug operator To display the curent element in a Human redeable information
|
||||
*/
|
||||
//std::ostream& operator <<(std::ostream& _os, const airtaudio::streamFlags& _obj);
|
||||
|
||||
/**
|
||||
* @typedef typedef uint64_t rtaudio::streamStatus;
|
||||
* @brief RtAudio stream status (over- or underflow) flags.
|
||||
*
|
||||
* Notification of a stream over- or underflow is indicated by a
|
||||
* non-zero stream \c status argument in the RtAudioCallback function.
|
||||
* The stream status can be one of the following two options,
|
||||
* depending on whether the stream is open for output and/or input:
|
||||
*
|
||||
* - \e RTAUDIO_INPUT_OVERFLOW: Input data was discarded because of an overflow condition at the driver.
|
||||
* - \e RTAUDIO_OUTPUT_UNDERFLOW: The output buffer ran low, likely producing a break in the output sound.
|
||||
*/
|
||||
typedef uint32_t streamStatus;
|
||||
static const streamStatus INPUT_OVERFLOW = 0x1; // Input data was discarded because of an overflow condition at the driver.
|
||||
static const streamStatus OUTPUT_UNDERFLOW = 0x2; // The output buffer ran low, likely causing a gap in the output sound.
|
||||
|
||||
/**
|
||||
* @brief Debug operator To display the curent element in a Human redeable information
|
||||
*/
|
||||
//std::ostream& operator <<(std::ostream& _os, const airtaudio::streamStatus& _obj);
|
||||
|
||||
/**
|
||||
* @brief RtAudio callback function prototype.
|
||||
*
|
||||
* All RtAudio clients must create a function of type RtAudioCallback
|
||||
* to read and/or write data from/to the audio stream. When the
|
||||
* underlying audio system is ready for new input or output data, this
|
||||
* function will be invoked.
|
||||
*
|
||||
* @param _outputBuffer For output (or duplex) streams, the client
|
||||
* should write \c nFrames of audio sample frames into this
|
||||
* buffer. This argument should be recast to the datatype
|
||||
* specified when the stream was opened. For input-only
|
||||
* streams, this argument will be NULL.
|
||||
*
|
||||
* @param _inputBuffer For input (or duplex) streams, this buffer will
|
||||
* hold \c nFrames of input audio sample frames. This
|
||||
* argument should be recast to the datatype specified when the
|
||||
* stream was opened. For output-only streams, this argument
|
||||
* will be NULL.
|
||||
*
|
||||
* @param _nFrames The number of sample frames of input or output
|
||||
* data in the buffers. The actual buffer size in bytes is
|
||||
* dependent on the data type and number of channels in use.
|
||||
*
|
||||
* @param _streamTime The number of seconds that have elapsed since the
|
||||
* stream was started.
|
||||
*
|
||||
* @param _status If non-zero, this argument indicates a data overflow
|
||||
* or underflow condition for the stream. The particular
|
||||
* condition can be determined by comparison with the
|
||||
* streamStatus flags.
|
||||
*
|
||||
* @param _userData A pointer to optional data provided by the client
|
||||
* when opening the stream (default = NULL).
|
||||
*
|
||||
* To continue normal stream operation, the RtAudioCallback function
|
||||
* should return a value of zero. To stop the stream and drain the
|
||||
* output buffer, the function should return a value of one. To abort
|
||||
* the stream immediately, the client should return a value of two.
|
||||
*/
|
||||
typedef int32_t (*AirTAudioCallback)(void *_outputBuffer,
|
||||
void *_inputBuffer,
|
||||
uint32_t _nFrames,
|
||||
double _streamTime,
|
||||
airtaudio::streamStatus _status,
|
||||
void *_userData);
|
||||
}
|
||||
|
||||
#include <airtaudio/DeviceInfo.h>
|
||||
#include <airtaudio/StreamOptions.h>
|
||||
#include <airtaudio/StreamParameters.h>
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
@@ -1,14 +0,0 @@
|
||||
/**
|
||||
* @author Edouard DUPIN
|
||||
*
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
*
|
||||
* @license BSD 3 clauses (see license file)
|
||||
*/
|
||||
|
||||
#include <airtaudio/debug.h>
|
||||
|
||||
int32_t airtaudio::getLogId() {
|
||||
static int32_t g_val = etk::log::registerInstance("airtaudio");
|
||||
return g_val;
|
||||
}
|
@@ -1,52 +0,0 @@
|
||||
/**
|
||||
* @author Gary P. SCAVONE
|
||||
*
|
||||
* @copyright 2001-2013 Gary P. Scavone, all right reserved
|
||||
*
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#ifndef __AIRTAUDIO_INT24_T_H__
|
||||
#define __AIRTAUDIO_INT24_T_H__
|
||||
|
||||
#pragma pack(push, 1)
|
||||
class int24_t {
|
||||
protected:
|
||||
uint8_t c3[3];
|
||||
public:
|
||||
int24_t() {}
|
||||
int24_t& operator = (const int32_t& i) {
|
||||
c3[0] = (i & 0x000000ff);
|
||||
c3[1] = (i & 0x0000ff00) >> 8;
|
||||
c3[2] = (i & 0x00ff0000) >> 16;
|
||||
return *this;
|
||||
}
|
||||
|
||||
int24_t(const int24_t& v) {
|
||||
*this = v;
|
||||
}
|
||||
int24_t(const double& d) {
|
||||
*this = (int32_t)d;
|
||||
}
|
||||
int24_t(const float& f) {
|
||||
*this = (int32_t)f;
|
||||
}
|
||||
int24_t(const int16_t& s) {
|
||||
*this = (int32_t)s;
|
||||
}
|
||||
int24_t(const int8_t& c) {
|
||||
*this = (int32_t)c;
|
||||
}
|
||||
|
||||
int32_t asInt() {
|
||||
int32_t i = c3[0] | (c3[1] << 8) | (c3[2] << 16);
|
||||
if (i & 0x800000) {
|
||||
i |= ~0xffffff;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
|
||||
#endif
|
483
audio/orchestra/Api.cpp
Normal file
483
audio/orchestra/Api.cpp
Normal file
@@ -0,0 +1,483 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
//#include <etk/types.h>
|
||||
#include <audio/orchestra/Interface.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <climits>
|
||||
|
||||
#undef __class__
|
||||
#define __class__ "api"
|
||||
|
||||
// Static variable definitions.
|
||||
const std::vector<uint32_t>& audio::orchestra::genericSampleRate() {
|
||||
static std::vector<uint32_t> list;
|
||||
if (list.size() == 0) {
|
||||
list.push_back(4000);
|
||||
list.push_back(5512);
|
||||
list.push_back(8000);
|
||||
list.push_back(9600);
|
||||
list.push_back(11025);
|
||||
list.push_back(16000);
|
||||
list.push_back(22050);
|
||||
list.push_back(32000);
|
||||
list.push_back(44100);
|
||||
list.push_back(48000);
|
||||
list.push_back(64000);
|
||||
list.push_back(88200);
|
||||
list.push_back(96000);
|
||||
list.push_back(128000);
|
||||
list.push_back(176400);
|
||||
list.push_back(192000);
|
||||
}
|
||||
return list;
|
||||
};
|
||||
|
||||
|
||||
audio::orchestra::Api::Api() :
|
||||
m_callback(nullptr),
|
||||
m_deviceBuffer(nullptr) {
|
||||
m_device[0] = 11111;
|
||||
m_device[1] = 11111;
|
||||
m_state = audio::orchestra::state_closed;
|
||||
m_mode = audio::orchestra::mode_unknow;
|
||||
}
|
||||
|
||||
audio::orchestra::Api::~Api() {
|
||||
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::Api::startStream() {
|
||||
ATA_VERBOSE("Start Stream");
|
||||
m_startTime = audio::Time::now();
|
||||
m_duration = std11::chrono::microseconds(0);
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::Api::openStream(audio::orchestra::StreamParameters* _oParams,
|
||||
audio::orchestra::StreamParameters* _iParams,
|
||||
enum audio::format _format,
|
||||
uint32_t _sampleRate,
|
||||
uint32_t* _bufferFrames,
|
||||
audio::orchestra::AirTAudioCallback _callback,
|
||||
const audio::orchestra::StreamOptions& _options) {
|
||||
if (m_state != audio::orchestra::state_closed) {
|
||||
ATA_ERROR("a stream is already open!");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
if ( _oParams != nullptr
|
||||
&& _oParams->nChannels < 1) {
|
||||
ATA_ERROR("a non-nullptr output StreamParameters structure cannot have an nChannels value less than one.");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
if ( _iParams != nullptr
|
||||
&& _iParams->nChannels < 1) {
|
||||
ATA_ERROR("a non-nullptr input StreamParameters structure cannot have an nChannels value less than one.");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
if ( _oParams == nullptr
|
||||
&& _iParams == nullptr) {
|
||||
ATA_ERROR("input and output StreamParameters structures are both nullptr!");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
if (audio::getFormatBytes(_format) == 0) {
|
||||
ATA_ERROR("'format' parameter value is undefined.");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
uint32_t nDevices = getDeviceCount();
|
||||
uint32_t oChannels = 0;
|
||||
if (_oParams != nullptr) {
|
||||
oChannels = _oParams->nChannels;
|
||||
if ( _oParams->deviceId >= nDevices
|
||||
&& _oParams->deviceName == "") {
|
||||
ATA_ERROR("output device parameter value is invalid.");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
}
|
||||
uint32_t iChannels = 0;
|
||||
if (_iParams != nullptr) {
|
||||
iChannels = _iParams->nChannels;
|
||||
if ( _iParams->deviceId >= nDevices
|
||||
&& _iParams->deviceName == "") {
|
||||
ATA_ERROR("input device parameter value is invalid.");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
}
|
||||
clearStreamInfo();
|
||||
bool result;
|
||||
if (oChannels > 0) {
|
||||
if (_oParams->deviceId == -1) {
|
||||
result = probeDeviceOpenName(_oParams->deviceName,
|
||||
audio::orchestra::mode_output,
|
||||
oChannels,
|
||||
_oParams->firstChannel,
|
||||
_sampleRate,
|
||||
_format,
|
||||
_bufferFrames,
|
||||
_options);
|
||||
} else {
|
||||
result = probeDeviceOpen(_oParams->deviceId,
|
||||
audio::orchestra::mode_output,
|
||||
oChannels,
|
||||
_oParams->firstChannel,
|
||||
_sampleRate,
|
||||
_format,
|
||||
_bufferFrames,
|
||||
_options);
|
||||
}
|
||||
if (result == false) {
|
||||
ATA_ERROR("system ERROR");
|
||||
return audio::orchestra::error_systemError;
|
||||
}
|
||||
}
|
||||
if (iChannels > 0) {
|
||||
if (_iParams->deviceId == -1) {
|
||||
result = probeDeviceOpenName(_iParams->deviceName,
|
||||
audio::orchestra::mode_input,
|
||||
iChannels,
|
||||
_iParams->firstChannel,
|
||||
_sampleRate,
|
||||
_format,
|
||||
_bufferFrames,
|
||||
_options);
|
||||
} else {
|
||||
result = probeDeviceOpen(_iParams->deviceId,
|
||||
audio::orchestra::mode_input,
|
||||
iChannels,
|
||||
_iParams->firstChannel,
|
||||
_sampleRate,
|
||||
_format,
|
||||
_bufferFrames,
|
||||
_options);
|
||||
}
|
||||
if (result == false) {
|
||||
if (oChannels > 0) {
|
||||
closeStream();
|
||||
}
|
||||
ATA_ERROR("system error");
|
||||
return audio::orchestra::error_systemError;
|
||||
}
|
||||
}
|
||||
m_callback = _callback;
|
||||
//_options.numberOfBuffers = m_nBuffers;
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
uint32_t audio::orchestra::Api::getDefaultInputDevice() {
|
||||
// Should be implemented in subclasses if possible.
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t audio::orchestra::Api::getDefaultOutputDevice() {
|
||||
// Should be implemented in subclasses if possible.
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::Api::closeStream() {
|
||||
ATA_VERBOSE("Close Stream");
|
||||
// MUST be implemented in subclasses!
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
bool audio::orchestra::Api::probeDeviceOpen(uint32_t /*device*/,
|
||||
audio::orchestra::mode /*mode*/,
|
||||
uint32_t /*channels*/,
|
||||
uint32_t /*firstChannel*/,
|
||||
uint32_t /*sampleRate*/,
|
||||
audio::format /*format*/,
|
||||
uint32_t * /*bufferSize*/,
|
||||
const audio::orchestra::StreamOptions& /*options*/) {
|
||||
// MUST be implemented in subclasses!
|
||||
return false;
|
||||
}
|
||||
|
||||
void audio::orchestra::Api::tickStreamTime() {
|
||||
//ATA_WARNING("tick : size=" << m_bufferSize << " rate=" << m_sampleRate << " time=" << audio::Duration((int64_t(m_bufferSize) * int64_t(1000000000)) / int64_t(m_sampleRate)).count());
|
||||
//ATA_WARNING(" one element=" << audio::Duration((int64_t(1000000000)) / int64_t(m_sampleRate)).count());
|
||||
m_duration += audio::Duration((int64_t(m_bufferSize) * int64_t(1000000000)) / int64_t(m_sampleRate));
|
||||
}
|
||||
|
||||
long audio::orchestra::Api::getStreamLatency() {
|
||||
if (verifyStream() != audio::orchestra::error_none) {
|
||||
return 0;
|
||||
}
|
||||
long totalLatency = 0;
|
||||
if ( m_mode == audio::orchestra::mode_output
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
totalLatency = m_latency[0];
|
||||
}
|
||||
if ( m_mode == audio::orchestra::mode_input
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
totalLatency += m_latency[1];
|
||||
}
|
||||
return totalLatency;
|
||||
}
|
||||
|
||||
audio::Time audio::orchestra::Api::getStreamTime() {
|
||||
if (verifyStream() != audio::orchestra::error_none) {
|
||||
return audio::Time();
|
||||
}
|
||||
return m_startTime + m_duration;
|
||||
}
|
||||
|
||||
uint32_t audio::orchestra::Api::getStreamSampleRate() {
|
||||
if (verifyStream() != audio::orchestra::error_none) {
|
||||
return 0;
|
||||
}
|
||||
return m_sampleRate;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::Api::verifyStream() {
|
||||
if (m_state == audio::orchestra::state_closed) {
|
||||
ATA_ERROR("a stream is not open!");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
void audio::orchestra::Api::clearStreamInfo() {
|
||||
m_mode = audio::orchestra::mode_unknow;
|
||||
m_state = audio::orchestra::state_closed;
|
||||
m_sampleRate = 0;
|
||||
m_bufferSize = 0;
|
||||
m_nBuffers = 0;
|
||||
m_userFormat = audio::format_unknow;
|
||||
m_startTime = audio::Time();
|
||||
m_duration = audio::Duration(0);
|
||||
m_deviceBuffer = nullptr;
|
||||
m_callback = nullptr;
|
||||
for (int32_t iii=0; iii<2; ++iii) {
|
||||
m_device[iii] = 11111;
|
||||
m_doConvertBuffer[iii] = false;
|
||||
m_deviceInterleaved[iii] = true;
|
||||
m_doByteSwap[iii] = false;
|
||||
m_nUserChannels[iii] = 0;
|
||||
m_nDeviceChannels[iii] = 0;
|
||||
m_channelOffset[iii] = 0;
|
||||
m_deviceFormat[iii] = audio::format_unknow;
|
||||
m_latency[iii] = 0;
|
||||
m_userBuffer[iii].clear();
|
||||
m_convertInfo[iii].channels = 0;
|
||||
m_convertInfo[iii].inJump = 0;
|
||||
m_convertInfo[iii].outJump = 0;
|
||||
m_convertInfo[iii].inFormat = audio::format_unknow;
|
||||
m_convertInfo[iii].outFormat = audio::format_unknow;
|
||||
m_convertInfo[iii].inOffset.clear();
|
||||
m_convertInfo[iii].outOffset.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void audio::orchestra::Api::setConvertInfo(audio::orchestra::mode _mode, uint32_t _firstChannel) {
|
||||
int32_t idTable = audio::orchestra::modeToIdTable(_mode);
|
||||
if (_mode == audio::orchestra::mode_input) { // convert device to user buffer
|
||||
m_convertInfo[idTable].inJump = m_nDeviceChannels[1];
|
||||
m_convertInfo[idTable].outJump = m_nUserChannels[1];
|
||||
m_convertInfo[idTable].inFormat = m_deviceFormat[1];
|
||||
m_convertInfo[idTable].outFormat = m_userFormat;
|
||||
} else { // convert user to device buffer
|
||||
m_convertInfo[idTable].inJump = m_nUserChannels[0];
|
||||
m_convertInfo[idTable].outJump = m_nDeviceChannels[0];
|
||||
m_convertInfo[idTable].inFormat = m_userFormat;
|
||||
m_convertInfo[idTable].outFormat = m_deviceFormat[0];
|
||||
}
|
||||
if (m_convertInfo[idTable].inJump < m_convertInfo[idTable].outJump) {
|
||||
m_convertInfo[idTable].channels = m_convertInfo[idTable].inJump;
|
||||
} else {
|
||||
m_convertInfo[idTable].channels = m_convertInfo[idTable].outJump;
|
||||
}
|
||||
// Set up the interleave/deinterleave offsets.
|
||||
if (m_deviceInterleaved[idTable] == false) {
|
||||
if (_mode == audio::orchestra::mode_input) {
|
||||
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
|
||||
m_convertInfo[idTable].inOffset.push_back(kkk * m_bufferSize);
|
||||
m_convertInfo[idTable].outOffset.push_back(kkk);
|
||||
m_convertInfo[idTable].inJump = 1;
|
||||
}
|
||||
} else {
|
||||
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
|
||||
m_convertInfo[idTable].inOffset.push_back(kkk);
|
||||
m_convertInfo[idTable].outOffset.push_back(kkk * m_bufferSize);
|
||||
m_convertInfo[idTable].outJump = 1;
|
||||
}
|
||||
}
|
||||
} else { // no (de)interleaving
|
||||
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
|
||||
m_convertInfo[idTable].inOffset.push_back(kkk);
|
||||
m_convertInfo[idTable].outOffset.push_back(kkk);
|
||||
}
|
||||
}
|
||||
|
||||
// Add channel offset.
|
||||
if (_firstChannel > 0) {
|
||||
if (m_deviceInterleaved[idTable]) {
|
||||
if (_mode == audio::orchestra::mode_output) {
|
||||
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
|
||||
m_convertInfo[idTable].outOffset[kkk] += _firstChannel;
|
||||
}
|
||||
} else {
|
||||
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
|
||||
m_convertInfo[idTable].inOffset[kkk] += _firstChannel;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (_mode == audio::orchestra::mode_output) {
|
||||
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
|
||||
m_convertInfo[idTable].outOffset[kkk] += (_firstChannel * m_bufferSize);
|
||||
}
|
||||
} else {
|
||||
for (int32_t kkk=0; kkk<m_convertInfo[idTable].channels; ++kkk) {
|
||||
m_convertInfo[idTable].inOffset[kkk] += (_firstChannel * m_bufferSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void audio::orchestra::Api::convertBuffer(char *_outBuffer, char *_inBuffer, audio::orchestra::ConvertInfo &_info) {
|
||||
// This function does format conversion, input/output channel compensation, and
|
||||
// data interleaving/deinterleaving. 24-bit integers are assumed to occupy
|
||||
// the lower three bytes of a 32-bit integer.
|
||||
|
||||
// Clear our device buffer when in/out duplex device channels are different
|
||||
if ( _outBuffer == m_deviceBuffer
|
||||
&& m_mode == audio::orchestra::mode_duplex
|
||||
&& m_nDeviceChannels[0] < m_nDeviceChannels[1]) {
|
||||
memset(_outBuffer, 0, m_bufferSize * _info.outJump * audio::getFormatBytes(_info.outFormat));
|
||||
}
|
||||
switch (audio::getFormatBytes(_info.outFormat)) {
|
||||
case 1:
|
||||
{
|
||||
uint8_t *out = reinterpret_cast<uint8_t*>(_outBuffer);
|
||||
uint8_t *in = reinterpret_cast<uint8_t*>(_inBuffer);
|
||||
for (size_t iii=0; iii<m_bufferSize; ++iii) {
|
||||
for (size_t jjj=0; jjj<_info.channels; jjj++) {
|
||||
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
{
|
||||
uint16_t *out = reinterpret_cast<uint16_t*>(_outBuffer);
|
||||
uint16_t *in = reinterpret_cast<uint16_t*>(_inBuffer);
|
||||
for (size_t iii=0; iii<m_bufferSize; ++iii) {
|
||||
for (size_t jjj=0; jjj<_info.channels; jjj++) {
|
||||
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
{
|
||||
uint32_t *out = reinterpret_cast<uint32_t*>(_outBuffer);
|
||||
uint32_t *in = reinterpret_cast<uint32_t*>(_inBuffer);
|
||||
for (size_t iii=0; iii<m_bufferSize; ++iii) {
|
||||
for (size_t jjj=0; jjj<_info.channels; jjj++) {
|
||||
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 8:
|
||||
{
|
||||
uint64_t *out = reinterpret_cast<uint64_t*>(_outBuffer);
|
||||
uint64_t *in = reinterpret_cast<uint64_t*>(_inBuffer);
|
||||
for (size_t iii=0; iii<m_bufferSize; ++iii) {
|
||||
for (size_t jjj=0; jjj<_info.channels; jjj++) {
|
||||
out[_info.outOffset[jjj]] = in[_info.inOffset[jjj]];
|
||||
}
|
||||
in += _info.inJump;
|
||||
out += _info.outJump;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void audio::orchestra::Api::byteSwapBuffer(char *_buffer, uint32_t _samples, audio::format _format) {
|
||||
char val;
|
||||
char *ptr;
|
||||
ptr = _buffer;
|
||||
if (_format == audio::format_int16) {
|
||||
for (uint32_t iii=0; iii<_samples; ++iii) {
|
||||
// Swap 1st and 2nd bytes.
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+1);
|
||||
*(ptr+1) = val;
|
||||
|
||||
// Increment 2 bytes.
|
||||
ptr += 2;
|
||||
}
|
||||
} else if ( _format == audio::format_int32
|
||||
|| _format == audio::format_float) {
|
||||
for (uint32_t iii=0; iii<_samples; ++iii) {
|
||||
// Swap 1st and 4th bytes.
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+3);
|
||||
*(ptr+3) = val;
|
||||
|
||||
// Swap 2nd and 3rd bytes.
|
||||
ptr += 1;
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+1);
|
||||
*(ptr+1) = val;
|
||||
|
||||
// Increment 3 more bytes.
|
||||
ptr += 3;
|
||||
}
|
||||
} else if (_format == audio::format_int24) {
|
||||
for (uint32_t iii=0; iii<_samples; ++iii) {
|
||||
// Swap 1st and 3rd bytes.
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+2);
|
||||
*(ptr+2) = val;
|
||||
|
||||
// Increment 2 more bytes.
|
||||
ptr += 2;
|
||||
}
|
||||
} else if (_format == audio::format_double) {
|
||||
for (uint32_t iii=0; iii<_samples; ++iii) {
|
||||
// Swap 1st and 8th bytes
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+7);
|
||||
*(ptr+7) = val;
|
||||
|
||||
// Swap 2nd and 7th bytes
|
||||
ptr += 1;
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+5);
|
||||
*(ptr+5) = val;
|
||||
|
||||
// Swap 3rd and 6th bytes
|
||||
ptr += 1;
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+3);
|
||||
*(ptr+3) = val;
|
||||
|
||||
// Swap 4th and 5th bytes
|
||||
ptr += 1;
|
||||
val = *(ptr);
|
||||
*(ptr) = *(ptr+1);
|
||||
*(ptr+1) = val;
|
||||
|
||||
// Increment 5 more bytes.
|
||||
ptr += 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
182
audio/orchestra/Api.h
Normal file
182
audio/orchestra/Api.h
Normal file
@@ -0,0 +1,182 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_API_H__
|
||||
#define __AUDIO_ORCHESTRA_API_H__
|
||||
|
||||
#include <sstream>
|
||||
#include <audio/orchestra/debug.h>
|
||||
#include <audio/orchestra/type.h>
|
||||
#include <audio/orchestra/state.h>
|
||||
#include <audio/orchestra/mode.h>
|
||||
#include <audio/Time.h>
|
||||
#include <audio/Duration.h>
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
const std::vector<uint32_t>& genericSampleRate();
|
||||
|
||||
/**
|
||||
* @brief airtaudio callback function prototype.
|
||||
* @param _inputBuffer For input (or duplex) streams, this buffer will hold _nbChunk of input audio chunk (nullptr if no data).
|
||||
* @param _timeInput Timestamp of the first buffer sample (recording time).
|
||||
* @param _outputBuffer For output (or duplex) streams, the client should write _nbChunk of audio chunk into this buffer (nullptr if no data).
|
||||
* @param _timeOutput Timestamp of the first buffer sample (playing time).
|
||||
* @param _nbChunk The number of chunk of input or output chunk in the buffer (same size).
|
||||
* @param _status List of error that occured in the laps of time.
|
||||
*/
|
||||
typedef std11::function<int32_t (const void* _inputBuffer,
|
||||
const audio::Time& _timeInput,
|
||||
void* _outputBuffer,
|
||||
const audio::Time& _timeOutput,
|
||||
uint32_t _nbChunk,
|
||||
const std::vector<audio::orchestra::status>& _status)> AirTAudioCallback;
|
||||
// A protected structure used for buffer conversion.
|
||||
class ConvertInfo {
|
||||
public:
|
||||
int32_t channels;
|
||||
int32_t inJump;
|
||||
int32_t outJump;
|
||||
enum audio::format inFormat;
|
||||
enum audio::format outFormat;
|
||||
std::vector<int> inOffset;
|
||||
std::vector<int> outOffset;
|
||||
};
|
||||
|
||||
class Api {
|
||||
protected:
|
||||
std::string m_name;
|
||||
public:
|
||||
Api();
|
||||
virtual ~Api();
|
||||
void setName(const std::string& _name) {
|
||||
m_name = _name;
|
||||
}
|
||||
virtual audio::orchestra::type getCurrentApi() = 0;
|
||||
virtual uint32_t getDeviceCount() = 0;
|
||||
virtual audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device) = 0;
|
||||
// TODO : Check API ...
|
||||
virtual bool getNamedDeviceInfo(const std::string& _deviceName, audio::orchestra::DeviceInfo& _info) {
|
||||
return false;
|
||||
}
|
||||
virtual uint32_t getDefaultInputDevice();
|
||||
virtual uint32_t getDefaultOutputDevice();
|
||||
enum audio::orchestra::error openStream(audio::orchestra::StreamParameters* _outputParameters,
|
||||
audio::orchestra::StreamParameters* _inputParameters,
|
||||
audio::format _format,
|
||||
uint32_t _sampleRate,
|
||||
uint32_t* _nbChunk,
|
||||
audio::orchestra::AirTAudioCallback _callback,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
virtual enum audio::orchestra::error closeStream();
|
||||
virtual enum audio::orchestra::error startStream();
|
||||
virtual enum audio::orchestra::error stopStream() = 0;
|
||||
virtual enum audio::orchestra::error abortStream() = 0;
|
||||
long getStreamLatency();
|
||||
uint32_t getStreamSampleRate();
|
||||
virtual audio::Time getStreamTime();
|
||||
bool isStreamOpen() const {
|
||||
return m_state != audio::orchestra::state_closed;
|
||||
}
|
||||
bool isStreamRunning() const {
|
||||
return m_state == audio::orchestra::state_running;
|
||||
}
|
||||
|
||||
protected:
|
||||
mutable std11::mutex m_mutex;
|
||||
audio::orchestra::AirTAudioCallback m_callback;
|
||||
uint32_t m_device[2]; // Playback and record, respectively.
|
||||
enum audio::orchestra::mode m_mode; // audio::orchestra::mode_output, audio::orchestra::mode_input, or audio::orchestra::mode_duplex.
|
||||
enum audio::orchestra::state m_state; // STOPPED, RUNNING, or CLOSED
|
||||
std::vector<char> m_userBuffer[2]; // Playback and record, respectively.
|
||||
char *m_deviceBuffer;
|
||||
bool m_doConvertBuffer[2]; // Playback and record, respectively.
|
||||
bool m_deviceInterleaved[2]; // Playback and record, respectively.
|
||||
bool m_doByteSwap[2]; // Playback and record, respectively.
|
||||
uint32_t m_sampleRate; // TODO : Rename frequency
|
||||
uint32_t m_bufferSize;
|
||||
uint32_t m_nBuffers;
|
||||
uint32_t m_nUserChannels[2]; // Playback and record, respectively. // TODO : set only one config (open inout with the same number of channels (limitation)
|
||||
uint32_t m_nDeviceChannels[2]; // Playback and record channels, respectively.
|
||||
uint32_t m_channelOffset[2]; // Playback and record, respectively.
|
||||
uint64_t m_latency[2]; // Playback and record, respectively.
|
||||
enum audio::format m_userFormat; // TODO : Remove this ==> use can only open in the Harware format ...
|
||||
enum audio::format m_deviceFormat[2]; // Playback and record, respectively.
|
||||
audio::orchestra::ConvertInfo m_convertInfo[2];
|
||||
|
||||
//audio::Time
|
||||
audio::Time m_startTime; //!< start time of the stream (restart at every stop, pause ...)
|
||||
audio::Duration m_duration; //!< duration from wich the stream is started
|
||||
|
||||
/**
|
||||
* @brief api-specific method that attempts to open a device
|
||||
* with the given parameters. This function MUST be implemented by
|
||||
* all subclasses. If an error is encountered during the probe, a
|
||||
* "warning" message is reported and false is returned. A
|
||||
* successful probe is indicated by a return value of true.
|
||||
*/
|
||||
virtual bool probeDeviceOpen(uint32_t _device,
|
||||
enum audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
enum audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
virtual bool probeDeviceOpenName(const std::string& _deviceName,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options) { return false; }
|
||||
/**
|
||||
* @brief Increment the stream time.
|
||||
*/
|
||||
void tickStreamTime();
|
||||
/**
|
||||
* @brief Clear an RtApiStream structure.
|
||||
*/
|
||||
void clearStreamInfo();
|
||||
/**
|
||||
* @brief Check the current stream status
|
||||
*/
|
||||
enum audio::orchestra::error verifyStream();
|
||||
/**
|
||||
* @brief Protected method used to perform format, channel number, and/or interleaving
|
||||
* conversions between the user and device buffers.
|
||||
*/
|
||||
void convertBuffer(char *_outBuffer,
|
||||
char *_inBuffer,
|
||||
audio::orchestra::ConvertInfo& _info);
|
||||
|
||||
/**
|
||||
* @brief Perform byte-swapping on buffers.
|
||||
*/
|
||||
void byteSwapBuffer(char *_buffer,
|
||||
uint32_t _samples,
|
||||
enum audio::format _format);
|
||||
/**
|
||||
* @brief Sets up the parameters for buffer conversion.
|
||||
*/
|
||||
void setConvertInfo(enum audio::orchestra::mode _mode,
|
||||
uint32_t _firstChannel);
|
||||
|
||||
public:
|
||||
virtual bool isMasterOf(audio::orchestra::Api* _api) {
|
||||
return false;
|
||||
};
|
||||
};
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @brief Debug operator To display the curent element in a Human redeable information
|
||||
*/
|
||||
std::ostream& operator <<(std::ostream& _os, const audio::orchestra::type& _obj);
|
||||
|
||||
#endif
|
0
audio/orchestra/CallbackInfo.h
Normal file
0
audio/orchestra/CallbackInfo.h
Normal file
47
audio/orchestra/DeviceInfo.cpp
Normal file
47
audio/orchestra/DeviceInfo.cpp
Normal file
@@ -0,0 +1,47 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
//#include <etk/types.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
#include <audio/orchestra/DeviceInfo.h>
|
||||
#include <etk/stdTools.h>
|
||||
#include <iostream>
|
||||
|
||||
#undef __class__
|
||||
#define __class__ "DeviceInfo"
|
||||
|
||||
void audio::orchestra::DeviceInfo::display(int32_t _tabNumber) const {
|
||||
std::string space;
|
||||
for (int32_t iii=0; iii<_tabNumber; ++iii) {
|
||||
space += " ";
|
||||
}
|
||||
ATA_INFO(space + "probe=" << probed);
|
||||
ATA_INFO(space + "name=" << name);
|
||||
ATA_INFO(space + "outputChannels=" << outputChannels);
|
||||
ATA_INFO(space + "inputChannels=" << inputChannels);
|
||||
ATA_INFO(space + "duplexChannels=" << duplexChannels);
|
||||
ATA_INFO(space + "isDefaultOutput=" << (isDefaultOutput==true?"true":"false"));
|
||||
ATA_INFO(space + "isDefaultInput=" << (isDefaultInput==true?"true":"false"));
|
||||
ATA_INFO(space + "rates=" << sampleRates);
|
||||
ATA_INFO(space + "native Format: " << nativeFormats);
|
||||
}
|
||||
|
||||
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const audio::orchestra::DeviceInfo& _obj) {
|
||||
_os << "{";
|
||||
_os << "probe=" << _obj.probed << ", ";
|
||||
_os << "name=" << _obj.name << ", ";
|
||||
_os << "outputChannels=" << _obj.outputChannels << ", ";
|
||||
_os << "inputChannels=" << _obj.inputChannels << ", ";
|
||||
_os << "duplexChannels=" << _obj.duplexChannels << ", ";
|
||||
_os << "isDefaultOutput=" << _obj.isDefaultOutput << ", ";
|
||||
_os << "isDefaultInput=" << _obj.isDefaultInput << ", ";
|
||||
_os << "rates=" << _obj.sampleRates << ", ";
|
||||
_os << "native Format: " << _obj.nativeFormats;
|
||||
_os << "}";
|
||||
return _os;
|
||||
}
|
||||
|
46
audio/orchestra/DeviceInfo.h
Normal file
46
audio/orchestra/DeviceInfo.h
Normal file
@@ -0,0 +1,46 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_DEVICE_INFO_H__
|
||||
#define __AUDIO_ORCHESTRA_DEVICE_INFO_H__
|
||||
|
||||
#include <audio/format.h>
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
/**
|
||||
* @brief The public device information structure for returning queried values.
|
||||
*/
|
||||
class DeviceInfo {
|
||||
public:
|
||||
bool probed; //!< true if the device capabilities were successfully probed.
|
||||
std::string name; //!< Character string device identifier.
|
||||
uint32_t outputChannels; //!< Maximum output channels supported by device.
|
||||
uint32_t inputChannels; //!< Maximum input channels supported by device.
|
||||
uint32_t duplexChannels; //!< Maximum simultaneous input/output channels supported by device.
|
||||
bool isDefaultOutput; //!< true if this is the default output device.
|
||||
bool isDefaultInput; //!< true if this is the default input device.
|
||||
std::vector<uint32_t> sampleRates; //!< Supported sample rates (queried from list of standard rates).
|
||||
std::vector<audio::format> nativeFormats; //!< Bit mask of supported data formats.
|
||||
// Default constructor.
|
||||
DeviceInfo() :
|
||||
probed(false),
|
||||
outputChannels(0),
|
||||
inputChannels(0),
|
||||
duplexChannels(0),
|
||||
isDefaultOutput(false),
|
||||
isDefaultInput(false),
|
||||
nativeFormats() {}
|
||||
void display(int32_t _tabNumber = 1) const;
|
||||
};
|
||||
std::ostream& operator <<(std::ostream& _os, const audio::orchestra::DeviceInfo& _obj);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
9
audio/orchestra/Flags.cpp
Normal file
9
audio/orchestra/Flags.cpp
Normal file
@@ -0,0 +1,9 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#include <audio/orchestra/Flags.h>
|
||||
#include <audio/orchestra/debug.h>
|
27
audio/orchestra/Flags.h
Normal file
27
audio/orchestra/Flags.h
Normal file
@@ -0,0 +1,27 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_FLAGS_H__
|
||||
#define __AUDIO_ORCHESTRA_FLAGS_H__
|
||||
|
||||
#include <etk/types.h>
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
class Flags {
|
||||
public:
|
||||
bool m_minimizeLatency; // Simple example ==> TODO ...
|
||||
Flags() :
|
||||
m_minimizeLatency(false) {
|
||||
// nothing to do ...
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
184
audio/orchestra/Interface.cpp
Normal file
184
audio/orchestra/Interface.cpp
Normal file
@@ -0,0 +1,184 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
//#include <etk/types.h>
|
||||
#include <audio/orchestra/Interface.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
#include <iostream>
|
||||
|
||||
#undef __class__
|
||||
#define __class__ "Interface"
|
||||
|
||||
std::vector<enum audio::orchestra::type> audio::orchestra::Interface::getCompiledApi() {
|
||||
std::vector<enum audio::orchestra::type> apis;
|
||||
// The order here will control the order of RtAudio's API search in
|
||||
// the constructor.
|
||||
for (size_t iii=0; iii<m_apiAvaillable.size(); ++iii) {
|
||||
apis.push_back(m_apiAvaillable[iii].first);
|
||||
}
|
||||
return apis;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void audio::orchestra::Interface::openRtApi(enum audio::orchestra::type _api) {
|
||||
delete m_rtapi;
|
||||
m_rtapi = nullptr;
|
||||
for (size_t iii=0; iii<m_apiAvaillable.size(); ++iii) {
|
||||
ATA_INFO("try open " << m_apiAvaillable[iii].first);
|
||||
if (_api == m_apiAvaillable[iii].first) {
|
||||
ATA_INFO(" ==> call it");
|
||||
m_rtapi = m_apiAvaillable[iii].second();
|
||||
if (m_rtapi != nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO : An error occured ...
|
||||
ATA_ERROR("Error in open API ...");
|
||||
}
|
||||
|
||||
|
||||
audio::orchestra::Interface::Interface() :
|
||||
m_rtapi(nullptr) {
|
||||
ATA_DEBUG("Add interface:");
|
||||
#if defined(ORCHESTRA_BUILD_JACK)
|
||||
ATA_DEBUG(" JACK");
|
||||
addInterface(audio::orchestra::type_jack, audio::orchestra::api::Jack::create);
|
||||
#endif
|
||||
#if defined(ORCHESTRA_BUILD_ALSA)
|
||||
ATA_DEBUG(" ALSA");
|
||||
addInterface(audio::orchestra::type_alsa, audio::orchestra::api::Alsa::create);
|
||||
#endif
|
||||
#if defined(ORCHESTRA_BUILD_PULSE)
|
||||
ATA_DEBUG(" PULSE");
|
||||
addInterface(audio::orchestra::type_pulse, audio::orchestra::api::Pulse::create);
|
||||
#endif
|
||||
#if defined(ORCHESTRA_BUILD_OSS)
|
||||
ATA_DEBUG(" OSS");
|
||||
addInterface(audio::orchestra::type_oss, audio::orchestra::api::Oss::create);
|
||||
#endif
|
||||
#if defined(ORCHESTRA_BUILD_ASIO)
|
||||
ATA_DEBUG(" ASIO");
|
||||
addInterface(audio::orchestra::type_asio, audio::orchestra::api::Asio::create);
|
||||
#endif
|
||||
#if defined(ORCHESTRA_BUILD_DS)
|
||||
ATA_DEBUG(" DS");
|
||||
addInterface(audio::orchestra::type_ds, audio::orchestra::api::Ds::create);
|
||||
#endif
|
||||
#if defined(ORCHESTRA_BUILD_MACOSX_CORE)
|
||||
ATA_DEBUG(" CORE OSX");
|
||||
addInterface(audio::orchestra::type_coreOSX, audio::orchestra::api::Core::create);
|
||||
#endif
|
||||
#if defined(ORCHESTRA_BUILD_IOS_CORE)
|
||||
ATA_DEBUG(" CORE IOS");
|
||||
addInterface(audio::orchestra::type_coreIOS, audio::orchestra::api::CoreIos::create);
|
||||
#endif
|
||||
#if defined(ORCHESTRA_BUILD_JAVA)
|
||||
ATA_DEBUG(" JAVA");
|
||||
addInterface(audio::orchestra::type_java, audio::orchestra::api::Android::create);
|
||||
#endif
|
||||
#if defined(ORCHESTRA_BUILD_DUMMY)
|
||||
ATA_DEBUG(" DUMMY");
|
||||
addInterface(audio::orchestra::type_dummy, audio::orchestra::api::Dummy::create);
|
||||
#endif
|
||||
}
|
||||
|
||||
void audio::orchestra::Interface::addInterface(enum audio::orchestra::type _api, Api* (*_callbackCreate)()) {
|
||||
m_apiAvaillable.push_back(std::pair<enum audio::orchestra::type, Api* (*)()>(_api, _callbackCreate));
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::Interface::instanciate(enum audio::orchestra::type _api) {
|
||||
ATA_INFO("Instanciate API ...");
|
||||
if (m_rtapi != nullptr) {
|
||||
ATA_WARNING("Interface already started ...!");
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
if (_api != audio::orchestra::type_undefined) {
|
||||
ATA_INFO("API specified : " << _api);
|
||||
// Attempt to open the specified API.
|
||||
openRtApi(_api);
|
||||
if (m_rtapi != nullptr) {
|
||||
if (m_rtapi->getDeviceCount() != 0) {
|
||||
ATA_INFO(" ==> api open");
|
||||
}
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
// No compiled support for specified API value. Issue a debug
|
||||
// warning and continue as if no API was specified.
|
||||
ATA_ERROR("RtAudio: no compiled support for specified API argument!");
|
||||
return audio::orchestra::error_fail;
|
||||
}
|
||||
ATA_INFO("Auto choice API :");
|
||||
// Iterate through the compiled APIs and return as soon as we find
|
||||
// one with at least one device or we reach the end of the list.
|
||||
std::vector<enum audio::orchestra::type> apis = getCompiledApi();
|
||||
ATA_INFO(" find : " << apis.size() << " apis.");
|
||||
for (size_t iii=0; iii<apis.size(); ++iii) {
|
||||
ATA_INFO("try open ...");
|
||||
openRtApi(apis[iii]);
|
||||
if(m_rtapi == nullptr) {
|
||||
ATA_ERROR(" ==> can not create ...");
|
||||
continue;
|
||||
}
|
||||
if (m_rtapi->getDeviceCount() != 0) {
|
||||
ATA_INFO(" ==> api open");
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (m_rtapi != nullptr) {
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
ATA_ERROR("RtAudio: no compiled API support found ... critical error!!");
|
||||
return audio::orchestra::error_fail;
|
||||
}
|
||||
|
||||
audio::orchestra::Interface::~Interface() {
|
||||
ATA_INFO("Remove interface");
|
||||
delete m_rtapi;
|
||||
m_rtapi = nullptr;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::Interface::openStream(audio::orchestra::StreamParameters* _outputParameters,
|
||||
audio::orchestra::StreamParameters* _inputParameters,
|
||||
audio::format _format,
|
||||
uint32_t _sampleRate,
|
||||
uint32_t* _bufferFrames,
|
||||
audio::orchestra::AirTAudioCallback _callback,
|
||||
const audio::orchestra::StreamOptions& _options) {
|
||||
if (m_rtapi == nullptr) {
|
||||
return audio::orchestra::error_inputNull;
|
||||
}
|
||||
return m_rtapi->openStream(_outputParameters,
|
||||
_inputParameters,
|
||||
_format,
|
||||
_sampleRate,
|
||||
_bufferFrames,
|
||||
_callback,
|
||||
_options);
|
||||
}
|
||||
|
||||
bool audio::orchestra::Interface::isMasterOf(audio::orchestra::Interface& _interface) {
|
||||
if (m_rtapi == nullptr) {
|
||||
ATA_ERROR("Current Master API is nullptr ...");
|
||||
return false;
|
||||
}
|
||||
if (_interface.m_rtapi == nullptr) {
|
||||
ATA_ERROR("Current Slave API is nullptr ...");
|
||||
return false;
|
||||
}
|
||||
if (m_rtapi->getCurrentApi() != _interface.m_rtapi->getCurrentApi()) {
|
||||
ATA_ERROR("Can not link 2 Interface with not the same Low level type (?)");//" << _interface.m_adac->getCurrentApi() << " != " << m_adac->getCurrentApi() << ")");
|
||||
return false;
|
||||
}
|
||||
if (m_rtapi->getCurrentApi() != audio::orchestra::type_alsa) {
|
||||
ATA_ERROR("Link 2 device together work only if the interafec is ?");// << audio::orchestra::type_alsa << " not for " << m_rtapi->getCurrentApi());
|
||||
return false;
|
||||
}
|
||||
return m_rtapi->isMasterOf(_interface.m_rtapi);
|
||||
}
|
||||
|
320
audio/orchestra/Interface.h
Normal file
320
audio/orchestra/Interface.h
Normal file
@@ -0,0 +1,320 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_INTERFACE_H__
|
||||
#define __AUDIO_ORCHESTRA_INTERFACE_H__
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <audio/orchestra/base.h>
|
||||
#include <audio/orchestra/CallbackInfo.h>
|
||||
#include <audio/orchestra/Api.h>
|
||||
#include <audio/orchestra/api/Alsa.h>
|
||||
#include <audio/orchestra/api/Android.h>
|
||||
#include <audio/orchestra/api/Asio.h>
|
||||
#include <audio/orchestra/api/Core.h>
|
||||
#include <audio/orchestra/api/CoreIos.h>
|
||||
#include <audio/orchestra/api/Ds.h>
|
||||
#include <audio/orchestra/api/Dummy.h>
|
||||
#include <audio/orchestra/api/Jack.h>
|
||||
#include <audio/orchestra/api/Oss.h>
|
||||
#include <audio/orchestra/api/Pulse.h>
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
/**
|
||||
* @brief audio::orchestra::Interface class declaration.
|
||||
*
|
||||
* audio::orchestra::Interface is a "controller" used to select an available audio i/o
|
||||
* interface. It presents a common API for the user to call but all
|
||||
* functionality is implemented by the class RtApi and its
|
||||
* subclasses. RtAudio creates an instance of an RtApi subclass
|
||||
* based on the user's API choice. If no choice is made, RtAudio
|
||||
* attempts to make a "logical" API selection.
|
||||
*/
|
||||
class Interface {
|
||||
protected:
|
||||
std::vector<std::pair<enum audio::orchestra::type, Api* (*)()> > m_apiAvaillable;
|
||||
protected:
|
||||
audio::orchestra::Api *m_rtapi;
|
||||
public:
|
||||
void setName(const std::string& _name) {
|
||||
if (m_rtapi == nullptr) {
|
||||
|
||||
return;
|
||||
}
|
||||
m_rtapi->setName(_name);
|
||||
}
|
||||
/**
|
||||
* @brief A static function to determine the available compiled audio APIs.
|
||||
*
|
||||
* The values returned in the std::vector can be compared against
|
||||
* the enumerated list values. Note that there can be more than one
|
||||
* API compiled for certain operating systems.
|
||||
*/
|
||||
std::vector<enum audio::orchestra::type> getCompiledApi();
|
||||
/**
|
||||
* @brief The class constructor.
|
||||
* @note the creating of the basic instance is done by Instanciate
|
||||
*/
|
||||
Interface();
|
||||
/**
|
||||
* @brief The destructor.
|
||||
*
|
||||
* If a stream is running or open, it will be stopped and closed
|
||||
* automatically.
|
||||
*/
|
||||
virtual ~Interface();
|
||||
/**
|
||||
* @brief Add an interface of the Possible List.
|
||||
* @param[in] _api Type of the interface.
|
||||
* @param[in] _callbackCreate API creation callback.
|
||||
*/
|
||||
void addInterface(enum audio::orchestra::type _api, Api* (*_callbackCreate)());
|
||||
/**
|
||||
* @brief Create an interface instance
|
||||
*/
|
||||
enum audio::orchestra::error instanciate(enum audio::orchestra::type _api = audio::orchestra::type_undefined);
|
||||
/**
|
||||
* @return the audio API specifier for the current instance of airtaudio.
|
||||
*/
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return audio::orchestra::type_undefined;
|
||||
}
|
||||
return m_rtapi->getCurrentApi();
|
||||
}
|
||||
/**
|
||||
* @brief A public function that queries for the number of audio devices available.
|
||||
*
|
||||
* This function performs a system query of available devices each time it
|
||||
* is called, thus supporting devices connected \e after instantiation. If
|
||||
* a system error occurs during processing, a warning will be issued.
|
||||
*/
|
||||
uint32_t getDeviceCount() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
return m_rtapi->getDeviceCount();
|
||||
}
|
||||
/**
|
||||
* @brief Any device integer between 0 and getDeviceCount() - 1 is valid.
|
||||
* If an invalid argument is provided, an RtError (type = INVALID_USE)
|
||||
* will be thrown. If a device is busy or otherwise unavailable, the
|
||||
* structure member "probed" will have a value of "false" and all
|
||||
* other members are undefined. If the specified device is the
|
||||
* current default input or output device, the corresponding
|
||||
* "isDefault" member will have a value of "true".
|
||||
*
|
||||
* @return An audio::orchestra::DeviceInfo structure for a specified device number.
|
||||
*/
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device) {
|
||||
if (m_rtapi == nullptr) {
|
||||
return audio::orchestra::DeviceInfo();
|
||||
}
|
||||
return m_rtapi->getDeviceInfo(_device);
|
||||
}
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(const std::string& _deviceName) {
|
||||
if (m_rtapi == nullptr) {
|
||||
return audio::orchestra::DeviceInfo();
|
||||
}
|
||||
audio::orchestra::DeviceInfo info;
|
||||
m_rtapi->getNamedDeviceInfo(_deviceName, info);
|
||||
return info;
|
||||
}
|
||||
/**
|
||||
* @brief A function that returns the index of the default output device.
|
||||
*
|
||||
* If the underlying audio API does not provide a "default
|
||||
* device", or if no devices are available, the return value will be
|
||||
* 0. Note that this is a valid device identifier and it is the
|
||||
* client's responsibility to verify that a device is available
|
||||
* before attempting to open a stream.
|
||||
*/
|
||||
uint32_t getDefaultOutputDevice() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
return m_rtapi->getDefaultOutputDevice();
|
||||
}
|
||||
/**
|
||||
* @brief A function that returns the index of the default input device.
|
||||
*
|
||||
* If the underlying audio API does not provide a "default
|
||||
* device", or if no devices are available, the return value will be
|
||||
* 0. Note that this is a valid device identifier and it is the
|
||||
* client's responsibility to verify that a device is available
|
||||
* before attempting to open a stream.
|
||||
*/
|
||||
uint32_t getDefaultInputDevice() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
return m_rtapi->getDefaultInputDevice();
|
||||
}
|
||||
/**
|
||||
* @brief A public function for opening a stream with the specified parameters.
|
||||
*
|
||||
* An RtError (type = SYSTEM_ERROR) is thrown if a stream cannot be
|
||||
* opened with the specified parameters or an error occurs during
|
||||
* processing. An RtError (type = INVALID_USE) is thrown if any
|
||||
* invalid device ID or channel number parameters are specified.
|
||||
* @param _outputParameters Specifies output stream parameters to use
|
||||
* when opening a stream, including a device ID, number of channels,
|
||||
* and starting channel number. For input-only streams, this
|
||||
* argument should be nullptr. The device ID is an index value between
|
||||
* 0 and getDeviceCount() - 1.
|
||||
* @param _inputParameters Specifies input stream parameters to use
|
||||
* when opening a stream, including a device ID, number of channels,
|
||||
* and starting channel number. For output-only streams, this
|
||||
* argument should be nullptr. The device ID is an index value between
|
||||
* 0 and getDeviceCount() - 1.
|
||||
* @param _format An audio::format specifying the desired sample data format.
|
||||
* @param _sampleRate The desired sample rate (sample frames per second).
|
||||
* @param _bufferFrames A pointer to a value indicating the desired
|
||||
* internal buffer size in sample frames. The actual value
|
||||
* used by the device is returned via the same pointer. A
|
||||
* value of zero can be specified, in which case the lowest
|
||||
* allowable value is determined.
|
||||
* @param _callback A client-defined function that will be invoked
|
||||
* when input data is available and/or output data is needed.
|
||||
* @param _options An optional pointer to a structure containing various
|
||||
* global stream options, including a list of OR'ed audio::orchestra::streamFlags
|
||||
* and a suggested number of stream buffers that can be used to
|
||||
* control stream latency. More buffers typically result in more
|
||||
* robust performance, though at a cost of greater latency. If a
|
||||
* value of zero is specified, a system-specific median value is
|
||||
* chosen. If the airtaudio_MINIMIZE_LATENCY flag bit is set, the
|
||||
* lowest allowable value is used. The actual value used is
|
||||
* returned via the structure argument. The parameter is API dependent.
|
||||
* @param _errorCallback A client-defined function that will be invoked
|
||||
* when an error has occured.
|
||||
*/
|
||||
enum audio::orchestra::error openStream(audio::orchestra::StreamParameters *_outputParameters,
|
||||
audio::orchestra::StreamParameters *_inputParameters,
|
||||
enum audio::format _format,
|
||||
uint32_t _sampleRate,
|
||||
uint32_t* _bufferFrames,
|
||||
audio::orchestra::AirTAudioCallback _callback,
|
||||
const audio::orchestra::StreamOptions& _options = audio::orchestra::StreamOptions());
|
||||
|
||||
/**
|
||||
* @brief A function that closes a stream and frees any associated stream memory.
|
||||
*
|
||||
* If a stream is not open, this function issues a warning and
|
||||
* returns (no exception is thrown).
|
||||
*/
|
||||
enum audio::orchestra::error closeStream() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return audio::orchestra::error_inputNull;
|
||||
}
|
||||
return m_rtapi->closeStream();
|
||||
}
|
||||
/**
|
||||
* @brief A function that starts a stream.
|
||||
*
|
||||
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
|
||||
* during processing. An RtError (type = INVALID_USE) is thrown if a
|
||||
* stream is not open. A warning is issued if the stream is already
|
||||
* running.
|
||||
*/
|
||||
enum audio::orchestra::error startStream() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return audio::orchestra::error_inputNull;
|
||||
}
|
||||
return m_rtapi->startStream();
|
||||
}
|
||||
/**
|
||||
* @brief Stop a stream, allowing any samples remaining in the output queue to be played.
|
||||
*
|
||||
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
|
||||
* during processing. An RtError (type = INVALID_USE) is thrown if a
|
||||
* stream is not open. A warning is issued if the stream is already
|
||||
* stopped.
|
||||
*/
|
||||
enum audio::orchestra::error stopStream() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return audio::orchestra::error_inputNull;
|
||||
}
|
||||
return m_rtapi->stopStream();
|
||||
}
|
||||
/**
|
||||
* @brief Stop a stream, discarding any samples remaining in the input/output queue.
|
||||
* An RtError (type = SYSTEM_ERROR) is thrown if an error occurs
|
||||
* during processing. An RtError (type = INVALID_USE) is thrown if a
|
||||
* stream is not open. A warning is issued if the stream is already
|
||||
* stopped.
|
||||
*/
|
||||
enum audio::orchestra::error abortStream() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return audio::orchestra::error_inputNull;
|
||||
}
|
||||
return m_rtapi->abortStream();
|
||||
}
|
||||
/**
|
||||
* @return true if a stream is open and false if not.
|
||||
*/
|
||||
bool isStreamOpen() const {
|
||||
if (m_rtapi == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return m_rtapi->isStreamOpen();
|
||||
}
|
||||
/**
|
||||
* @return true if the stream is running and false if it is stopped or not open.
|
||||
*/
|
||||
bool isStreamRunning() const {
|
||||
if (m_rtapi == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return m_rtapi->isStreamRunning();
|
||||
}
|
||||
/**
|
||||
* @brief If a stream is not open, an RtError (type = INVALID_USE) will be thrown.
|
||||
* @return the number of elapsed seconds since the stream was started.
|
||||
*/
|
||||
audio::Time getStreamTime() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return audio::Time();
|
||||
}
|
||||
return m_rtapi->getStreamTime();
|
||||
}
|
||||
/**
|
||||
* @brief The stream latency refers to delay in audio input and/or output
|
||||
* caused by internal buffering by the audio system and/or hardware.
|
||||
* For duplex streams, the returned value will represent the sum of
|
||||
* the input and output latencies. If a stream is not open, an
|
||||
* RtError (type = INVALID_USE) will be thrown. If the API does not
|
||||
* report latency, the return value will be zero.
|
||||
* @return The internal stream latency in sample frames.
|
||||
*/
|
||||
long getStreamLatency() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
return m_rtapi->getStreamLatency();
|
||||
}
|
||||
/**
|
||||
* @brief On some systems, the sample rate used may be slightly different
|
||||
* than that specified in the stream parameters. If a stream is not
|
||||
* open, an RtError (type = INVALID_USE) will be thrown.
|
||||
* @return Returns actual sample rate in use by the stream.
|
||||
*/
|
||||
uint32_t getStreamSampleRate() {
|
||||
if (m_rtapi == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
return m_rtapi->getStreamSampleRate();
|
||||
}
|
||||
bool isMasterOf(audio::orchestra::Interface& _interface);
|
||||
protected:
|
||||
void openRtApi(enum audio::orchestra::type _api);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
45
audio/orchestra/StreamOptions.cpp
Normal file
45
audio/orchestra/StreamOptions.cpp
Normal file
@@ -0,0 +1,45 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#include <audio/orchestra/StreamOptions.h>
|
||||
#include <etk/stdTools.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
|
||||
static const char* listValue[] = {
|
||||
"hardware",
|
||||
"trigered",
|
||||
"soft"
|
||||
};
|
||||
|
||||
std::ostream& audio::orchestra::operator <<(std::ostream& _os, enum audio::orchestra::timestampMode _obj) {
|
||||
_os << listValue[_obj];
|
||||
return _os;
|
||||
}
|
||||
|
||||
namespace etk {
|
||||
template <> bool from_string<enum audio::orchestra::timestampMode>(enum audio::orchestra::timestampMode& _variableRet, const std::string& _value) {
|
||||
if (_value == "hardware") {
|
||||
_variableRet = audio::orchestra::timestampMode_Hardware;
|
||||
return true;
|
||||
}
|
||||
if (_value == "trigered") {
|
||||
_variableRet = audio::orchestra::timestampMode_trigered;
|
||||
return true;
|
||||
}
|
||||
if (_value == "soft") {
|
||||
_variableRet = audio::orchestra::timestampMode_soft;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <enum audio::orchestra::timestampMode> std::string to_string(const enum audio::orchestra::timestampMode& _variable) {
|
||||
return listValue[_variable];
|
||||
}
|
||||
}
|
||||
|
||||
|
39
audio/orchestra/StreamOptions.h
Normal file
39
audio/orchestra/StreamOptions.h
Normal file
@@ -0,0 +1,39 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_STREAM_OPTION_H__
|
||||
#define __AUDIO_ORCHESTRA_STREAM_OPTION_H__
|
||||
|
||||
#include <audio/orchestra/Flags.h>
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
enum timestampMode {
|
||||
timestampMode_Hardware, //!< enable harware timestamp
|
||||
timestampMode_trigered, //!< get harware triger time stamp and ingrement with duration
|
||||
timestampMode_soft, //!< Simulate all timestamp.
|
||||
};
|
||||
std::ostream& operator <<(std::ostream& _os, enum audio::orchestra::timestampMode _obj);
|
||||
|
||||
class StreamOptions {
|
||||
public:
|
||||
audio::orchestra::Flags flags; //!< A bit-mask of stream flags
|
||||
uint32_t numberOfBuffers; //!< Number of stream buffers.
|
||||
std::string streamName; //!< A stream name (currently used only in Jack).
|
||||
enum timestampMode mode; //!< mode of timestamping data...
|
||||
// Default constructor.
|
||||
StreamOptions() :
|
||||
flags(),
|
||||
numberOfBuffers(0),
|
||||
mode(timestampMode_Hardware) {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
35
audio/orchestra/StreamParameters.h
Normal file
35
audio/orchestra/StreamParameters.h
Normal file
@@ -0,0 +1,35 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_STREAM_PARAMETER_H__
|
||||
#define __AUDIO_ORCHESTRA_STREAM_PARAMETER_H__
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
/**
|
||||
* @brief The structure for specifying input or ouput stream parameters.
|
||||
*/
|
||||
class StreamParameters {
|
||||
public:
|
||||
int32_t deviceId; //!< Device index (-1 to getDeviceCount() - 1).
|
||||
std::string deviceName; //!< name of the device (if deviceId==-1 this must not be == "", and the oposite ...)
|
||||
uint32_t nChannels; //!< Number of channels.
|
||||
uint32_t firstChannel; //!< First channel index on device (default = 0).
|
||||
// Default constructor.
|
||||
StreamParameters() :
|
||||
deviceId(-1),
|
||||
nChannels(0),
|
||||
firstChannel(0) {
|
||||
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
1389
audio/orchestra/api/Alsa.cpp
Normal file
1389
audio/orchestra/api/Alsa.cpp
Normal file
File diff suppressed because it is too large
Load Diff
77
audio/orchestra/api/Alsa.h
Normal file
77
audio/orchestra/api/Alsa.h
Normal file
@@ -0,0 +1,77 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if !defined(__AUDIO_ORCHESTRA_API_ALSA_H__) && defined(ORCHESTRA_BUILD_ALSA)
|
||||
#define __AUDIO_ORCHESTRA_API_ALSA_H__
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class AlsaPrivate;
|
||||
class Alsa: public audio::orchestra::Api {
|
||||
public:
|
||||
static audio::orchestra::Api* create();
|
||||
public:
|
||||
Alsa();
|
||||
virtual ~Alsa();
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
return audio::orchestra::type_alsa;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
private:
|
||||
bool getNamedDeviceInfoLocal(const std::string& _deviceName,
|
||||
audio::orchestra::DeviceInfo& _info,
|
||||
int32_t _cardId=-1, // Alsa card ID
|
||||
int32_t _subdevice=-1, // alsa subdevice ID
|
||||
int32_t _localDeviceId=-1); // local ID of device fined
|
||||
public:
|
||||
bool getNamedDeviceInfo(const std::string& _deviceName, audio::orchestra::DeviceInfo& _info) {
|
||||
return getNamedDeviceInfoLocal(_deviceName, _info);
|
||||
}
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum audio::orchestra::error closeStream();
|
||||
enum audio::orchestra::error startStream();
|
||||
enum audio::orchestra::error stopStream();
|
||||
enum audio::orchestra::error abortStream();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
void callbackEventOneCycle();
|
||||
private:
|
||||
static void alsaCallbackEvent(void* _userData);
|
||||
private:
|
||||
std11::shared_ptr<AlsaPrivate> m_private;
|
||||
std::vector<audio::orchestra::DeviceInfo> m_devices;
|
||||
void saveDeviceInfo();
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
enum audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
enum audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
|
||||
virtual bool probeDeviceOpenName(const std::string& _deviceName,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
virtual audio::Time getStreamTime();
|
||||
public:
|
||||
bool isMasterOf(audio::orchestra::Api* _api);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
210
audio/orchestra/api/Android.cpp
Normal file
210
audio/orchestra/api/Android.cpp
Normal file
@@ -0,0 +1,210 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifdef ORCHESTRA_BUILD_JAVA
|
||||
|
||||
#include <ewol/context/Context.h>
|
||||
#include <unistd.h>
|
||||
#include <audio/orchestra/Interface.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
#include <limits.h>
|
||||
|
||||
#undef __class__
|
||||
#define __class__ "api::Android"
|
||||
|
||||
audio::orchestra::Api* audio::orchestra::api::Android::create() {
|
||||
ATA_INFO("Create Android device ... ");
|
||||
return new audio::orchestra::api::Android();
|
||||
}
|
||||
|
||||
|
||||
audio::orchestra::api::Android::Android() {
|
||||
ATA_INFO("new Android");
|
||||
// On android, we set a static device ...
|
||||
ATA_INFO("get context");
|
||||
ewol::Context& tmpContext = ewol::getContext();
|
||||
ATA_INFO("done p=" << (int64_t)&tmpContext);
|
||||
int32_t deviceCount = tmpContext.audioGetDeviceCount();
|
||||
ATA_ERROR("Get count devices : " << deviceCount);
|
||||
for (int32_t iii=0; iii<deviceCount; ++iii) {
|
||||
std::string property = tmpContext.audioGetDeviceProperty(iii);
|
||||
ATA_ERROR("Get devices property : " << property);
|
||||
std::vector<std::string> listProperty = etk::split(property, ':');
|
||||
audio::orchestra::DeviceInfo tmp;
|
||||
tmp.name = listProperty[0];
|
||||
std::vector<std::string> listFreq = etk::split(listProperty[2], ',');
|
||||
for(size_t fff=0; fff<listFreq.size(); ++fff) {
|
||||
tmp.sampleRates.push_back(etk::string_to_int32_t(listFreq[fff]));
|
||||
}
|
||||
tmp.outputChannels = 0;
|
||||
tmp.inputChannels = 0;
|
||||
tmp.duplexChannels = 0;
|
||||
if (listProperty[1] == "out") {
|
||||
tmp.isDefaultOutput = true;
|
||||
tmp.isDefaultInput = false;
|
||||
tmp.outputChannels = etk::string_to_int32_t(listProperty[3]);
|
||||
} else if (listProperty[1] == "in") {
|
||||
tmp.isDefaultOutput = false;
|
||||
tmp.isDefaultInput = true;
|
||||
tmp.inputChannels = etk::string_to_int32_t(listProperty[3]);
|
||||
} else {
|
||||
/* duplex */
|
||||
tmp.isDefaultOutput = true;
|
||||
tmp.isDefaultInput = true;
|
||||
tmp.duplexChannels = etk::string_to_int32_t(listProperty[3]);
|
||||
}
|
||||
tmp.nativeFormats = audio::getListFormatFromString(listProperty[4]);
|
||||
m_devices.push_back(tmp);
|
||||
}
|
||||
ATA_INFO("Create Android interface (end)");
|
||||
}
|
||||
|
||||
audio::orchestra::api::Android::~Android() {
|
||||
ATA_INFO("Destroy Android interface");
|
||||
}
|
||||
|
||||
uint32_t audio::orchestra::api::Android::getDeviceCount() {
|
||||
//ATA_INFO("Get device count:"<< m_devices.size());
|
||||
return m_devices.size();
|
||||
}
|
||||
|
||||
audio::orchestra::DeviceInfo audio::orchestra::api::Android::getDeviceInfo(uint32_t _device) {
|
||||
//ATA_INFO("Get device info ...");
|
||||
return m_devices[_device];
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Android::closeStream() {
|
||||
ATA_INFO("Clese Stream");
|
||||
// Can not close the stream now...
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Android::startStream() {
|
||||
ATA_INFO("Start Stream");
|
||||
// TODO : Check return ...
|
||||
audio::orchestra::Api::startStream();
|
||||
// Can not close the stream now...
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Android::stopStream() {
|
||||
ATA_INFO("Stop stream");
|
||||
ewol::Context& tmpContext = ewol::getContext();
|
||||
tmpContext.audioCloseDevice(0);
|
||||
// Can not close the stream now...
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Android::abortStream() {
|
||||
ATA_INFO("Abort Stream");
|
||||
ewol::Context& tmpContext = ewol::getContext();
|
||||
tmpContext.audioCloseDevice(0);
|
||||
// Can not close the stream now...
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
void audio::orchestra::api::Android::callBackEvent(void* _data,
|
||||
int32_t _frameRate) {
|
||||
int32_t doStopStream = 0;
|
||||
audio::Time streamTime = getStreamTime();
|
||||
std::vector<enum audio::orchestra::status> status;
|
||||
if (m_doConvertBuffer[audio::orchestra::mode_output] == true) {
|
||||
doStopStream = m_callback(nullptr,
|
||||
audio::Time(),
|
||||
m_userBuffer[audio::orchestra::mode_output],
|
||||
streamTime,
|
||||
_frameRate,
|
||||
status);
|
||||
convertBuffer((char*)_data, (char*)m_userBuffer[audio::orchestra::mode_output], m_convertInfo[audio::orchestra::mode_output]);
|
||||
} else {
|
||||
doStopStream = m_callback(_data,
|
||||
streamTime,
|
||||
nullptr,
|
||||
audio::Time(),
|
||||
_frameRate,
|
||||
status);
|
||||
}
|
||||
if (doStopStream == 2) {
|
||||
abortStream();
|
||||
return;
|
||||
}
|
||||
audio::orchestra::Api::tickStreamTime();
|
||||
}
|
||||
|
||||
void audio::orchestra::api::Android::androidCallBackEvent(void* _data,
|
||||
int32_t _frameRate,
|
||||
void* _userData) {
|
||||
if (_userData == nullptr) {
|
||||
ATA_INFO("callback event ... nullptr pointer");
|
||||
return;
|
||||
}
|
||||
audio::orchestra::api::Android* myClass = static_cast<audio::orchestra::api::Android*>(_userData);
|
||||
myClass->callBackEvent(_data, _frameRate/2);
|
||||
}
|
||||
|
||||
bool audio::orchestra::api::Android::probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options) {
|
||||
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
|
||||
if (_mode != audio::orchestra::mode_output) {
|
||||
ATA_ERROR("Can not start a device input or duplex for Android ...");
|
||||
return false;
|
||||
}
|
||||
m_userFormat = _format;
|
||||
m_nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||
ewol::Context& tmpContext = ewol::getContext();
|
||||
bool ret = false;
|
||||
if (_format == SINT8) {
|
||||
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 0, androidCallBackEvent, this);
|
||||
} else {
|
||||
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 1, androidCallBackEvent, this);
|
||||
}
|
||||
m_bufferSize = 256;
|
||||
m_sampleRate = _sampleRate;
|
||||
m_doByteSwap[modeToIdTable(_mode)] = false; // for endienness ...
|
||||
|
||||
// TODO : For now, we write it in hard ==> to bu update later ...
|
||||
m_deviceFormat[modeToIdTable(_mode)] = SINT16;
|
||||
m_nDeviceChannels[modeToIdTable(_mode)] = 2;
|
||||
m_deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
|
||||
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
if (m_doConvertBuffer[modeToIdTable(_mode)] == true) {
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
|
||||
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||
ATA_ERROR("audio::orchestra::api::Android::probeDeviceOpen: error allocating user buffer memory.");
|
||||
}
|
||||
setConvertInfo(_mode, _firstChannel);
|
||||
}
|
||||
ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(_mode)] << " user format : " << m_userFormat);
|
||||
ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(_mode)]);
|
||||
ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(_mode)]);
|
||||
if (ret == false) {
|
||||
ATA_ERROR("Can not open device.");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
56
audio/orchestra/api/Android.h
Normal file
56
audio/orchestra/api/Android.h
Normal file
@@ -0,0 +1,56 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if !defined(__AUDIO_ORCHESTRA_API_ANDROID_H__) && defined(ORCHESTRA_BUILD_JAVA)
|
||||
#define __AUDIO_ORCHESTRA_API_ANDROID_H__
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class Android: public audio::orchestra::Api {
|
||||
public:
|
||||
static audio::orchestra::Api* create();
|
||||
public:
|
||||
Android();
|
||||
virtual ~Android();
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
return audio::orchestra::type_java;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum audio::orchestra::error closeStream();
|
||||
enum audio::orchestra::error startStream();
|
||||
enum audio::orchestra::error stopStream();
|
||||
enum audio::orchestra::error abortStream();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
private:
|
||||
std::vector<audio::orchestra::DeviceInfo> m_devices;
|
||||
void saveDeviceInfo();
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
private:
|
||||
void callBackEvent(void* _data,
|
||||
int32_t _frameRate);
|
||||
static void androidCallBackEvent(void* _data,
|
||||
int32_t _frameRate,
|
||||
void* _userData);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
54
audio/orchestra/api/Asio.h
Normal file
54
audio/orchestra/api/Asio.h
Normal file
@@ -0,0 +1,54 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if !defined(__AUDIO_ORCHESTRA_API_ASIO_H__) && defined(ORCHESTRA_BUILD_ASIO)
|
||||
#define __AUDIO_ORCHESTRA_API_ASIO_H__
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class AsioPrivate:
|
||||
class Asio: public audio::orchestra::Api {
|
||||
public:
|
||||
static audio::orchestra::Api* create();
|
||||
public:
|
||||
Asio();
|
||||
virtual ~Asio();
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
return audio::orchestra::WINDOWS_ASIO;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum audio::orchestra::error closeStream();
|
||||
enum audio::orchestra::error startStream();
|
||||
enum audio::orchestra::error stopStream();
|
||||
enum audio::orchestra::error abortStream();
|
||||
long getStreamLatency();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
bool callbackEvent(long _bufferIndex);
|
||||
private:
|
||||
std::shared_ptr<AsioPrivate> m_private;
|
||||
std::vector<audio::orchestra::DeviceInfo> m_devices;
|
||||
void saveDeviceInfo();
|
||||
bool m_coInitialized;
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
1302
audio/orchestra/api/Core.cpp
Normal file
1302
audio/orchestra/api/Core.cpp
Normal file
File diff suppressed because it is too large
Load Diff
69
audio/orchestra/api/Core.h
Normal file
69
audio/orchestra/api/Core.h
Normal file
@@ -0,0 +1,69 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if !defined(__AUDIO_ORCHESTRA_API_CORE_H__) && defined(ORCHESTRA_BUILD_MACOSX_CORE)
|
||||
#define __AUDIO_ORCHESTRA_API_CORE_H__
|
||||
|
||||
#include <CoreAudio/AudioHardware.h>
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class CorePrivate;
|
||||
class Core: public audio::orchestra::Api {
|
||||
public:
|
||||
static audio::orchestra::Api* create();
|
||||
public:
|
||||
Core();
|
||||
virtual ~Core();
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
return audio::orchestra::type_coreOSX;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
uint32_t getDefaultOutputDevice();
|
||||
uint32_t getDefaultInputDevice();
|
||||
enum audio::orchestra::error closeStream();
|
||||
enum audio::orchestra::error startStream();
|
||||
enum audio::orchestra::error stopStream();
|
||||
enum audio::orchestra::error abortStream();
|
||||
long getStreamLatency();
|
||||
bool callbackEvent(AudioDeviceID _deviceId,
|
||||
const AudioBufferList *_inBufferList,
|
||||
const audio::Time& _inTime,
|
||||
const AudioBufferList *_outBufferList,
|
||||
const audio::Time& _outTime);
|
||||
static OSStatus callbackEvent(AudioDeviceID _inDevice,
|
||||
const AudioTimeStamp* _inNow,
|
||||
const AudioBufferList* _inInputData,
|
||||
const AudioTimeStamp* _inInputTime,
|
||||
AudioBufferList* _outOutputData,
|
||||
const AudioTimeStamp* _inOutputTime,
|
||||
void* _infoPointer);
|
||||
static void coreStopStream(void *_userData);
|
||||
private:
|
||||
std::shared_ptr<CorePrivate> m_private;
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
static const char* getErrorCode(OSStatus _code);
|
||||
static OSStatus xrunListener(AudioObjectID _inDevice,
|
||||
uint32_t _nAddresses,
|
||||
const AudioObjectPropertyAddress _properties[],
|
||||
void* _userData);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
58
audio/orchestra/api/CoreIos.h
Normal file
58
audio/orchestra/api/CoreIos.h
Normal file
@@ -0,0 +1,58 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if !defined(__AUDIO_ORCHESTRA_API_CORE_IOS_H__) && defined(ORCHESTRA_BUILD_IOS_CORE)
|
||||
#define __AUDIO_ORCHESTRA_API_CORE_IOS_H__
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class CoreIosPrivate;
|
||||
class CoreIos: public audio::orchestra::Api {
|
||||
public:
|
||||
static audio::orchestra::Api* create();
|
||||
public:
|
||||
CoreIos();
|
||||
virtual ~CoreIos();
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
return audio::orchestra::type_coreIOS;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum audio::orchestra::error closeStream();
|
||||
enum audio::orchestra::error startStream();
|
||||
enum audio::orchestra::error stopStream();
|
||||
enum audio::orchestra::error abortStream();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
private:
|
||||
std::vector<audio::orchestra::DeviceInfo> m_devices;
|
||||
void saveDeviceInfo();
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
public:
|
||||
void callBackEvent(void* _data,
|
||||
int32_t _nbChunk,
|
||||
const audio::Time& _time);
|
||||
public:
|
||||
std11::shared_ptr<CoreIosPrivate> m_private;
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
302
audio/orchestra/api/CoreIos.mm
Normal file
302
audio/orchestra/api/CoreIos.mm
Normal file
@@ -0,0 +1,302 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifdef ORCHESTRA_BUILD_IOS_CORE
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <AudioToolbox/AudioToolbox.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <audio/orchestra/Interface.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
#include <limits.h>
|
||||
|
||||
#undef __class__
|
||||
#define __class__ "api::CoreIos"
|
||||
|
||||
audio::orchestra::Api* audio::orchestra::api::CoreIos::create() {
|
||||
ATA_INFO("Create CoreIos device ... ");
|
||||
return new audio::orchestra::api::CoreIos();
|
||||
}
|
||||
|
||||
#define kOutputBus 0
|
||||
#define kInputBus 1
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class CoreIosPrivate {
|
||||
public:
|
||||
AudioComponentInstance audioUnit;
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
audio::orchestra::api::CoreIos::CoreIos(void) :
|
||||
m_private(new audio::orchestra::api::CoreIosPrivate()) {
|
||||
ATA_INFO("new CoreIos");
|
||||
int32_t deviceCount = 2;
|
||||
ATA_ERROR("Get count devices : " << 2);
|
||||
audio::orchestra::DeviceInfo tmp;
|
||||
// Add default output format :
|
||||
tmp.name = "out";
|
||||
tmp.sampleRates.push_back(48000);
|
||||
tmp.outputChannels = 2;
|
||||
tmp.inputChannels = 0;
|
||||
tmp.duplexChannels = 0;
|
||||
tmp.isDefaultOutput = true;
|
||||
tmp.isDefaultInput = false;
|
||||
tmp.nativeFormats.push_back(audio::format_int16);
|
||||
m_devices.push_back(tmp);
|
||||
// add default input format:
|
||||
tmp.name = "in";
|
||||
tmp.sampleRates.push_back(48000);
|
||||
tmp.outputChannels = 0;
|
||||
tmp.inputChannels = 2;
|
||||
tmp.duplexChannels = 0;
|
||||
tmp.isDefaultOutput = false;
|
||||
tmp.isDefaultInput = true;
|
||||
tmp.nativeFormats.push_back(audio::format_int16);
|
||||
m_devices.push_back(tmp);
|
||||
ATA_INFO("Create CoreIOs interface (end)");
|
||||
}
|
||||
|
||||
audio::orchestra::api::CoreIos::~CoreIos(void) {
|
||||
ATA_INFO("Destroy CoreIOs interface");
|
||||
AudioUnitUninitialize(m_private->audioUnit);
|
||||
}
|
||||
|
||||
uint32_t audio::orchestra::api::CoreIos::getDeviceCount(void) {
|
||||
//ATA_INFO("Get device count:"<< m_devices.size());
|
||||
return m_devices.size();
|
||||
}
|
||||
|
||||
audio::orchestra::DeviceInfo audio::orchestra::api::CoreIos::getDeviceInfo(uint32_t _device) {
|
||||
//ATA_INFO("Get device info ...");
|
||||
return m_devices[_device];
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::CoreIos::closeStream(void) {
|
||||
ATA_INFO("Close Stream");
|
||||
// Can not close the stream now...
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::CoreIos::startStream(void) {
|
||||
ATA_INFO("Start Stream");
|
||||
// TODO : Check return ...
|
||||
audio::orchestra::Api::startStream();
|
||||
OSStatus status = AudioOutputUnitStart(m_private->audioUnit);
|
||||
// Can not close the stream now...
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::CoreIos::stopStream(void) {
|
||||
ATA_INFO("Stop stream");
|
||||
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
|
||||
// Can not close the stream now...
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::CoreIos::abortStream(void) {
|
||||
ATA_INFO("Abort Stream");
|
||||
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
|
||||
// Can not close the stream now...
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
void audio::orchestra::api::CoreIos::callBackEvent(void* _data,
|
||||
int32_t _nbChunk,
|
||||
const audio::Time& _time) {
|
||||
int32_t doStopStream = 0;
|
||||
std::vector<enum audio::orchestra::status> status;
|
||||
if (m_doConvertBuffer[modeToIdTable(audio::orchestra::mode_output)] == true) {
|
||||
doStopStream = m_callback(nullptr,
|
||||
audio::Time(),
|
||||
&m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0],
|
||||
_time,
|
||||
_nbChunk,
|
||||
status);
|
||||
convertBuffer((char*)_data, &m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0], m_convertInfo[modeToIdTable(audio::orchestra::mode_output)]);
|
||||
} else {
|
||||
doStopStream = m_callback(_data,
|
||||
_time,
|
||||
nullptr,
|
||||
audio::Time(),
|
||||
_nbChunk,
|
||||
status);
|
||||
}
|
||||
if (doStopStream == 2) {
|
||||
abortStream();
|
||||
return;
|
||||
}
|
||||
audio::orchestra::Api::tickStreamTime();
|
||||
}
|
||||
|
||||
|
||||
static OSStatus playbackCallback(void *_userData,
|
||||
AudioUnitRenderActionFlags* _ioActionFlags,
|
||||
const AudioTimeStamp* _inTime,
|
||||
uint32_t _inBusNumber,
|
||||
uint32_t _inNumberFrames,
|
||||
AudioBufferList* _ioData) {
|
||||
if (_userData == nullptr) {
|
||||
ATA_ERROR("callback event ... nullptr pointer");
|
||||
return -1;
|
||||
}
|
||||
audio::Time tmpTimeime;
|
||||
if (_inTime != nullptr) {
|
||||
tmpTimeime = audio::Time(_inTime->mHostTime/1000000000LL, _inTime->mHostTime%1000000000LL);
|
||||
}
|
||||
audio::orchestra::api::CoreIos* myClass = static_cast<audio::orchestra::api::CoreIos*>(_userData);
|
||||
// get all requested buffer :
|
||||
for (int32_t iii=0; iii < _ioData->mNumberBuffers; iii++) {
|
||||
AudioBuffer buffer = _ioData->mBuffers[iii];
|
||||
int32_t numberFrame = buffer.mDataByteSize/2/*stereo*/ /sizeof(int16_t);
|
||||
ATA_VERBOSE("request data size: " << numberFrame << " busNumber=" << _inBusNumber);
|
||||
myClass->callBackEvent(buffer.mData, numberFrame, tmpTimeime);
|
||||
}
|
||||
return noErr;
|
||||
}
|
||||
|
||||
|
||||
bool audio::orchestra::api::CoreIos::probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options) {
|
||||
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
|
||||
if (_mode != audio::orchestra::mode_output) {
|
||||
ATA_ERROR("Can not start a device input or duplex for CoreIos ...");
|
||||
return false;
|
||||
}
|
||||
bool ret = true;
|
||||
|
||||
// configure Airtaudio internal configuration:
|
||||
m_userFormat = _format;
|
||||
m_nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||
m_bufferSize = 8192;
|
||||
m_sampleRate = _sampleRate;
|
||||
m_doByteSwap[modeToIdTable(_mode)] = false; // for endienness ...
|
||||
|
||||
// TODO : For now, we write it in hard ==> to be update later ...
|
||||
m_deviceFormat[modeToIdTable(_mode)] = audio::format_int16;
|
||||
m_nDeviceChannels[modeToIdTable(_mode)] = 2;
|
||||
m_deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
|
||||
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
if (m_doConvertBuffer[modeToIdTable(_mode)] == true) {
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
|
||||
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
|
||||
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
|
||||
ATA_ERROR("error allocating user buffer memory.");
|
||||
}
|
||||
setConvertInfo(_mode, _firstChannel);
|
||||
}
|
||||
ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(_mode)] << " user format : " << m_userFormat);
|
||||
ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(_mode)]);
|
||||
ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(_mode)]);
|
||||
if (ret == false) {
|
||||
ATA_ERROR("Can not open device.");
|
||||
}
|
||||
|
||||
// Configure IOs interface:
|
||||
OSStatus status;
|
||||
|
||||
// Describe audio component
|
||||
AudioComponentDescription desc;
|
||||
desc.componentType = kAudioUnitType_Output;
|
||||
desc.componentSubType = kAudioUnitSubType_RemoteIO;
|
||||
desc.componentFlags = 0;
|
||||
desc.componentFlagsMask = 0;
|
||||
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
|
||||
|
||||
// Get component
|
||||
AudioComponent inputComponent = AudioComponentFindNext(nullptr, &desc);
|
||||
|
||||
// Get audio units
|
||||
status = AudioComponentInstanceNew(inputComponent, &m_private->audioUnit);
|
||||
if (status != 0) {
|
||||
ATA_ERROR("can not create an audio intance...");
|
||||
}
|
||||
|
||||
uint32_t flag = 1;
|
||||
// Enable IO for playback
|
||||
status = AudioUnitSetProperty(m_private->audioUnit,
|
||||
kAudioOutputUnitProperty_EnableIO,
|
||||
kAudioUnitScope_Output,
|
||||
kOutputBus,
|
||||
&flag,
|
||||
sizeof(flag));
|
||||
if (status != 0) {
|
||||
ATA_ERROR("can not request audio autorisation...");
|
||||
}
|
||||
|
||||
// Describe format
|
||||
AudioStreamBasicDescription audioFormat;
|
||||
audioFormat.mSampleRate = 48000.00;
|
||||
audioFormat.mFormatID = kAudioFormatLinearPCM;
|
||||
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
|
||||
audioFormat.mFramesPerPacket = 1; //
|
||||
audioFormat.mChannelsPerFrame = 2; // stereo
|
||||
audioFormat.mBitsPerChannel = sizeof(short) * 8;
|
||||
audioFormat.mBytesPerPacket = sizeof(short) * audioFormat.mChannelsPerFrame;
|
||||
audioFormat.mBytesPerFrame = sizeof(short) * audioFormat.mChannelsPerFrame;
|
||||
audioFormat.mReserved = 0;
|
||||
// Apply format
|
||||
status = AudioUnitSetProperty(m_private->audioUnit,
|
||||
kAudioUnitProperty_StreamFormat,
|
||||
kAudioUnitScope_Input,
|
||||
kOutputBus,
|
||||
&audioFormat,
|
||||
sizeof(audioFormat));
|
||||
if (status != 0) {
|
||||
ATA_ERROR("can not set stream properties...");
|
||||
}
|
||||
|
||||
|
||||
// Set output callback
|
||||
AURenderCallbackStruct callbackStruct;
|
||||
callbackStruct.inputProc = &playbackCallback;
|
||||
callbackStruct.inputProcRefCon = this;
|
||||
status = AudioUnitSetProperty(m_private->audioUnit,
|
||||
kAudioUnitProperty_SetRenderCallback,
|
||||
kAudioUnitScope_Global,
|
||||
kOutputBus,
|
||||
&callbackStruct,
|
||||
sizeof(callbackStruct));
|
||||
if (status != 0) {
|
||||
ATA_ERROR("can not set Callback...");
|
||||
}
|
||||
|
||||
// Initialise
|
||||
status = AudioUnitInitialize(m_private->audioUnit);
|
||||
if (status != 0) {
|
||||
ATA_ERROR("can not initialize...");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
1464
audio/orchestra/api/Ds.cpp
Normal file
1464
audio/orchestra/api/Ds.cpp
Normal file
File diff suppressed because it is too large
Load Diff
58
audio/orchestra/api/Ds.h
Normal file
58
audio/orchestra/api/Ds.h
Normal file
@@ -0,0 +1,58 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if !defined(__AUDIO_ORCHESTRA_API_DS_H__) && defined(ORCHESTRA_BUILD_DS)
|
||||
#define __AUDIO_ORCHESTRA_API_DS_H__
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class DsPrivate;
|
||||
class Ds: public audio::orchestra::Api {
|
||||
public:
|
||||
static audio::orchestra::Api* create();
|
||||
public:
|
||||
Ds();
|
||||
virtual ~Ds();
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
return audio::orchestra::type_ds;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
uint32_t getDefaultOutputDevice();
|
||||
uint32_t getDefaultInputDevice();
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum audio::orchestra::error closeStream();
|
||||
enum audio::orchestra::error startStream();
|
||||
enum audio::orchestra::error stopStream();
|
||||
enum audio::orchestra::error abortStream();
|
||||
long getStreamLatency();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
private:
|
||||
static void dsCallbackEvent(void *_userData);
|
||||
std11::shared_ptr<DsPrivate> m_private;
|
||||
bool m_coInitialized;
|
||||
bool m_buffersRolling;
|
||||
long m_duplexPrerollBytes;
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
enum audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
enum audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
63
audio/orchestra/api/Dummy.cpp
Normal file
63
audio/orchestra/api/Dummy.cpp
Normal file
@@ -0,0 +1,63 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if defined(ORCHESTRA_BUILD_DUMMY)
|
||||
#include <audio/orchestra/api/Dummy.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
|
||||
#undef __class__
|
||||
#define __class__ "api::Dummy"
|
||||
|
||||
audio::orchestra::Api* audio::orchestra::api::Dummy::create() {
|
||||
return new audio::orchestra::api::Dummy();
|
||||
}
|
||||
|
||||
|
||||
audio::orchestra::api::Dummy::Dummy() {
|
||||
ATA_WARNING("This class provides no functionality.");
|
||||
}
|
||||
|
||||
uint32_t audio::orchestra::api::Dummy::getDeviceCount() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
audio::orchestra::DeviceInfo audio::orchestra::api::Dummy::getDeviceInfo(uint32_t _device) {
|
||||
(void)_device;
|
||||
return audio::orchestra::DeviceInfo();
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Dummy::closeStream() {
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Dummy::startStream() {
|
||||
// TODO : Check return ...
|
||||
audio::orchestra::Api::startStream();
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Dummy::stopStream() {
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Dummy::abortStream() {
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
bool audio::orchestra::api::Dummy::probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
45
audio/orchestra/api/Dummy.h
Normal file
45
audio/orchestra/api/Dummy.h
Normal file
@@ -0,0 +1,45 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if !defined(__AUDIO_ORCHESTRA_DUMMY__) && defined(ORCHESTRA_BUILD_DUMMY)
|
||||
#define __AUDIO_ORCHESTRA_DUMMY__
|
||||
|
||||
#include <audio/orchestra/Interface.h>
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class Dummy: public audio::orchestra::Api {
|
||||
public:
|
||||
static audio::orchestra::Api* create();
|
||||
public:
|
||||
Dummy();
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
return audio::orchestra::type_dummy;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum audio::orchestra::error closeStream();
|
||||
enum audio::orchestra::error startStream();
|
||||
enum audio::orchestra::error stopStream();
|
||||
enum audio::orchestra::error abortStream();
|
||||
private:
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
734
audio/orchestra/api/Jack.cpp
Normal file
734
audio/orchestra/api/Jack.cpp
Normal file
@@ -0,0 +1,734 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
// must run before :
|
||||
#if defined(ORCHESTRA_BUILD_JACK)
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
#include <iostream>
|
||||
#include <audio/orchestra/Interface.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
#include <string.h>
|
||||
#include <etk/thread/tools.h>
|
||||
|
||||
#undef __class__
|
||||
#define __class__ "api::Jack"
|
||||
|
||||
audio::orchestra::Api* audio::orchestra::api::Jack::create() {
|
||||
return new audio::orchestra::api::Jack();
|
||||
}
|
||||
|
||||
|
||||
// JACK is a low-latency audio server, originally written for the
|
||||
// GNU/Linux operating system and now also ported to OS-X. It can
|
||||
// connect a number of different applications to an audio device, as
|
||||
// well as allowing them to share audio between themselves.
|
||||
//
|
||||
// When using JACK with RtAudio, "devices" refer to JACK clients that
|
||||
// have ports connected to the server. The JACK server is typically
|
||||
// started in a terminal as follows:
|
||||
//
|
||||
// .jackd -d alsa -d hw:0
|
||||
//
|
||||
// or through an interface program such as qjackctl. Many of the
|
||||
// parameters normally set for a stream are fixed by the JACK server
|
||||
// and can be specified when the JACK server is started. In
|
||||
// particular,
|
||||
//
|
||||
// jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
|
||||
// jackd -r -d alsa -r 48000
|
||||
//
|
||||
// specifies a sample rate of 44100 Hz, a buffer size of 512 sample
|
||||
// frames, and number of buffers = 4. Once the server is running, it
|
||||
// is not possible to override these values. If the values are not
|
||||
// specified in the command-line, the JACK server uses default values.
|
||||
//
|
||||
// The JACK server does not have to be running when an instance of
|
||||
// audio::orchestra::Jack is created, though the function getDeviceCount() will
|
||||
// report 0 devices found until JACK has been started. When no
|
||||
// devices are available (i.e., the JACK server is not running), a
|
||||
// stream cannot be opened.
|
||||
|
||||
#include <jack/jack.h>
|
||||
#include <unistd.h>
|
||||
#include <cstdio>
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class JackPrivate {
|
||||
public:
|
||||
jack_client_t *client;
|
||||
jack_port_t **ports[2];
|
||||
std::string deviceName[2];
|
||||
bool xrun[2];
|
||||
std11::condition_variable condition;
|
||||
int32_t drainCounter; // Tracks callback counts when draining
|
||||
bool internalDrain; // Indicates if stop is initiated from callback or not.
|
||||
|
||||
JackPrivate() :
|
||||
client(0),
|
||||
drainCounter(0),
|
||||
internalDrain(false) {
|
||||
ports[0] = 0;
|
||||
ports[1] = 0;
|
||||
xrun[0] = false;
|
||||
xrun[1] = false;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
audio::orchestra::api::Jack::Jack() :
|
||||
m_private(new audio::orchestra::api::JackPrivate()) {
|
||||
// Nothing to do here.
|
||||
}
|
||||
|
||||
audio::orchestra::api::Jack::~Jack() {
|
||||
if (m_state != audio::orchestra::state_closed) {
|
||||
closeStream();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t audio::orchestra::api::Jack::getDeviceCount() {
|
||||
// See if we can become a jack client.
|
||||
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption;
|
||||
jack_status_t *status = nullptr;
|
||||
jack_client_t *client = jack_client_open("orchestraJackCount", options, status);
|
||||
if (client == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
const char **ports;
|
||||
std::string port, previousPort;
|
||||
uint32_t nChannels = 0, nDevices = 0;
|
||||
ports = jack_get_ports(client, nullptr, nullptr, 0);
|
||||
if (ports) {
|
||||
// Parse the port names up to the first colon (:).
|
||||
size_t iColon = 0;
|
||||
do {
|
||||
port = (char *) ports[ nChannels ];
|
||||
iColon = port.find(":");
|
||||
if (iColon != std::string::npos) {
|
||||
port = port.substr(0, iColon + 1);
|
||||
if (port != previousPort) {
|
||||
nDevices++;
|
||||
previousPort = port;
|
||||
}
|
||||
}
|
||||
} while (ports[++nChannels]);
|
||||
free(ports);
|
||||
}
|
||||
jack_client_close(client);
|
||||
return nDevices;
|
||||
}
|
||||
|
||||
audio::orchestra::DeviceInfo audio::orchestra::api::Jack::getDeviceInfo(uint32_t _device) {
|
||||
audio::orchestra::DeviceInfo info;
|
||||
info.probed = false;
|
||||
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption
|
||||
jack_status_t *status = nullptr;
|
||||
jack_client_t *client = jack_client_open("orchestraJackInfo", options, status);
|
||||
if (client == nullptr) {
|
||||
ATA_ERROR("Jack server not found or connection error!");
|
||||
// TODO : audio::orchestra::error_warning;
|
||||
return info;
|
||||
}
|
||||
const char **ports;
|
||||
std::string port, previousPort;
|
||||
uint32_t nPorts = 0, nDevices = 0;
|
||||
ports = jack_get_ports(client, nullptr, nullptr, 0);
|
||||
if (ports) {
|
||||
// Parse the port names up to the first colon (:).
|
||||
size_t iColon = 0;
|
||||
do {
|
||||
port = (char *) ports[ nPorts ];
|
||||
iColon = port.find(":");
|
||||
if (iColon != std::string::npos) {
|
||||
port = port.substr(0, iColon);
|
||||
if (port != previousPort) {
|
||||
if (nDevices == _device) {
|
||||
info.name = port;
|
||||
}
|
||||
nDevices++;
|
||||
previousPort = port;
|
||||
}
|
||||
}
|
||||
} while (ports[++nPorts]);
|
||||
free(ports);
|
||||
}
|
||||
if (_device >= nDevices) {
|
||||
jack_client_close(client);
|
||||
ATA_ERROR("device ID is invalid!");
|
||||
// TODO : audio::orchestra::error_invalidUse;
|
||||
return info;
|
||||
}
|
||||
// Get the current jack server sample rate.
|
||||
info.sampleRates.clear();
|
||||
info.sampleRates.push_back(jack_get_sample_rate(client));
|
||||
// Count the available ports containing the client name as device
|
||||
// channels. Jack "input ports" equal RtAudio output channels.
|
||||
uint32_t nChannels = 0;
|
||||
ports = jack_get_ports(client, info.name.c_str(), nullptr, JackPortIsInput);
|
||||
if (ports) {
|
||||
while (ports[ nChannels ]) {
|
||||
nChannels++;
|
||||
}
|
||||
free(ports);
|
||||
info.outputChannels = nChannels;
|
||||
}
|
||||
// Jack "output ports" equal RtAudio input channels.
|
||||
nChannels = 0;
|
||||
ports = jack_get_ports(client, info.name.c_str(), nullptr, JackPortIsOutput);
|
||||
if (ports) {
|
||||
while (ports[ nChannels ]) {
|
||||
nChannels++;
|
||||
}
|
||||
free(ports);
|
||||
info.inputChannels = nChannels;
|
||||
}
|
||||
if (info.outputChannels == 0 && info.inputChannels == 0) {
|
||||
jack_client_close(client);
|
||||
ATA_ERROR("error determining Jack input/output channels!");
|
||||
// TODO : audio::orchestra::error_warning;
|
||||
return info;
|
||||
}
|
||||
// If device opens for both playback and capture, we determine the channels.
|
||||
if (info.outputChannels > 0 && info.inputChannels > 0) {
|
||||
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
||||
}
|
||||
// Jack always uses 32-bit floats.
|
||||
info.nativeFormats.push_back(audio::format_float);
|
||||
// Jack doesn't provide default devices so we'll use the first available one.
|
||||
if ( _device == 0
|
||||
&& info.outputChannels > 0) {
|
||||
info.isDefaultOutput = true;
|
||||
}
|
||||
if ( _device == 0
|
||||
&& info.inputChannels > 0) {
|
||||
info.isDefaultInput = true;
|
||||
}
|
||||
jack_client_close(client);
|
||||
info.probed = true;
|
||||
return info;
|
||||
}
|
||||
|
||||
int32_t audio::orchestra::api::Jack::jackCallbackHandler(jack_nframes_t _nframes, void* _userData) {
|
||||
ATA_VERBOSE("Jack callback: [BEGIN] " << uint64_t(_userData));
|
||||
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
|
||||
if (myClass->callbackEvent((uint64_t)_nframes) == false) {
|
||||
ATA_VERBOSE("Jack callback: [END] 1");
|
||||
return 1;
|
||||
}
|
||||
ATA_VERBOSE("Jack callback: [END] 0");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// This function will be called by a spawned thread when the Jack
|
||||
// server signals that it is shutting down. It is necessary to handle
|
||||
// it this way because the jackShutdown() function must return before
|
||||
// the jack_deactivate() function (in closeStream()) will return.
|
||||
void audio::orchestra::api::Jack::jackCloseStream(void* _userData) {
|
||||
etk::thread::setName("Jack_closeStream");
|
||||
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
|
||||
myClass->closeStream();
|
||||
}
|
||||
|
||||
void audio::orchestra::api::Jack::jackShutdown(void* _userData) {
|
||||
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
|
||||
// Check current stream state. If stopped, then we'll assume this
|
||||
// was called as a result of a call to audio::orchestra::api::Jack::stopStream (the
|
||||
// deactivation of a client handle causes this function to be called).
|
||||
// If not, we'll assume the Jack server is shutting down or some
|
||||
// other problem occurred and we should close the stream.
|
||||
if (myClass->isStreamRunning() == false) {
|
||||
return;
|
||||
}
|
||||
new std11::thread(&audio::orchestra::api::Jack::jackCloseStream, _userData);
|
||||
ATA_ERROR("The Jack server is shutting down this client ... stream stopped and closed!!");
|
||||
}
|
||||
|
||||
int32_t audio::orchestra::api::Jack::jackXrun(void* _userData) {
|
||||
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
|
||||
if (myClass->m_private->ports[0]) {
|
||||
myClass->m_private->xrun[0] = true;
|
||||
}
|
||||
if (myClass->m_private->ports[1]) {
|
||||
myClass->m_private->xrun[1] = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool audio::orchestra::api::Jack::probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t* _bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options) {
|
||||
// Look for jack server and try to become a client (only do once per stream).
|
||||
jack_client_t *client = 0;
|
||||
if ( _mode == audio::orchestra::mode_output
|
||||
|| ( _mode == audio::orchestra::mode_input
|
||||
&& m_mode != audio::orchestra::mode_output)) {
|
||||
jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption;
|
||||
jack_status_t *status = nullptr;
|
||||
if (!_options.streamName.empty()) {
|
||||
client = jack_client_open(_options.streamName.c_str(), jackoptions, status);
|
||||
} else {
|
||||
client = jack_client_open("orchestraJack", jackoptions, status);
|
||||
}
|
||||
if (client == 0) {
|
||||
ATA_ERROR("Jack server not found or connection error!");
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
// The handle must have been created on an earlier pass.
|
||||
client = m_private->client;
|
||||
}
|
||||
const char **ports;
|
||||
std::string port, previousPort, deviceName;
|
||||
uint32_t nPorts = 0, nDevices = 0;
|
||||
ports = jack_get_ports(client, nullptr, nullptr, 0);
|
||||
if (ports) {
|
||||
// Parse the port names up to the first colon (:).
|
||||
size_t iColon = 0;
|
||||
do {
|
||||
port = (char *) ports[ nPorts ];
|
||||
iColon = port.find(":");
|
||||
if (iColon != std::string::npos) {
|
||||
port = port.substr(0, iColon);
|
||||
if (port != previousPort) {
|
||||
if (nDevices == _device) {
|
||||
deviceName = port;
|
||||
}
|
||||
nDevices++;
|
||||
previousPort = port;
|
||||
}
|
||||
}
|
||||
} while (ports[++nPorts]);
|
||||
free(ports);
|
||||
}
|
||||
if (_device >= nDevices) {
|
||||
ATA_ERROR("device ID is invalid!");
|
||||
return false;
|
||||
}
|
||||
// Count the available ports containing the client name as device
|
||||
// channels. Jack "input ports" equal RtAudio output channels.
|
||||
uint32_t nChannels = 0;
|
||||
uint64_t flag = JackPortIsInput;
|
||||
if (_mode == audio::orchestra::mode_input) flag = JackPortIsOutput;
|
||||
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
|
||||
if (ports) {
|
||||
while (ports[ nChannels ]) {
|
||||
nChannels++;
|
||||
}
|
||||
free(ports);
|
||||
}
|
||||
// Compare the jack ports for specified client to the requested number of channels.
|
||||
if (nChannels < (_channels + _firstChannel)) {
|
||||
ATA_ERROR("requested number of channels (" << _channels << ") + offset (" << _firstChannel << ") not found for specified device (" << _device << ":" << deviceName << ").");
|
||||
return false;
|
||||
}
|
||||
// Check the jack server sample rate.
|
||||
uint32_t jackRate = jack_get_sample_rate(client);
|
||||
if (_sampleRate != jackRate) {
|
||||
jack_client_close(client);
|
||||
ATA_ERROR("the requested sample rate (" << _sampleRate << ") is different than the JACK server rate (" << jackRate << ").");
|
||||
return false;
|
||||
}
|
||||
m_sampleRate = jackRate;
|
||||
// Get the latency of the JACK port.
|
||||
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
|
||||
if (ports[ _firstChannel ]) {
|
||||
// Added by Ge Wang
|
||||
jack_latency_callback_mode_t cbmode = (_mode == audio::orchestra::mode_input ? JackCaptureLatency : JackPlaybackLatency);
|
||||
// the range (usually the min and max are equal)
|
||||
jack_latency_range_t latrange; latrange.min = latrange.max = 0;
|
||||
// get the latency range
|
||||
jack_port_get_latency_range(jack_port_by_name(client, ports[_firstChannel]), cbmode, &latrange);
|
||||
// be optimistic, use the min!
|
||||
m_latency[modeToIdTable(_mode)] = latrange.min;
|
||||
//m_latency[modeToIdTable(_mode)] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ]));
|
||||
}
|
||||
free(ports);
|
||||
// The jack server always uses 32-bit floating-point data.
|
||||
m_deviceFormat[modeToIdTable(_mode)] = audio::format_float;
|
||||
m_userFormat = _format;
|
||||
// Jack always uses non-interleaved buffers.
|
||||
m_deviceInterleaved[modeToIdTable(_mode)] = false;
|
||||
// Jack always provides host byte-ordered data.
|
||||
m_doByteSwap[modeToIdTable(_mode)] = false;
|
||||
// Get the buffer size. The buffer size and number of buffers
|
||||
// (periods) is set when the jack server is started.
|
||||
m_bufferSize = (int) jack_get_buffer_size(client);
|
||||
*_bufferSize = m_bufferSize;
|
||||
m_nDeviceChannels[modeToIdTable(_mode)] = _channels;
|
||||
m_nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||
// Set flags for buffer conversion.
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
ATA_CRITICAL("Can not update format ==> use RIVER lib for this ...");
|
||||
}
|
||||
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
|
||||
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||
ATA_ERROR("Reorder channel for the interleaving properties ...");
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
// Allocate our JackHandle structure for the stream.
|
||||
m_private->client = client;
|
||||
m_private->deviceName[modeToIdTable(_mode)] = deviceName;
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes;
|
||||
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
|
||||
ATA_VERBOSE("allocate : nbChannel=" << m_nUserChannels[modeToIdTable(_mode)] << " bufferSize=" << *_bufferSize << " format=" << m_deviceFormat[modeToIdTable(_mode)] << "=" << audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]));
|
||||
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
|
||||
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
|
||||
ATA_ERROR("error allocating user buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
bool makeBuffer = true;
|
||||
if (_mode == audio::orchestra::mode_output) {
|
||||
bufferBytes = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
|
||||
} else { // _mode == audio::orchestra::mode_input
|
||||
bufferBytes = m_nDeviceChannels[1] * audio::getFormatBytes(m_deviceFormat[1]);
|
||||
if (m_mode == audio::orchestra::mode_output && m_deviceBuffer) {
|
||||
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
|
||||
if (bufferBytes < bytesOut) {
|
||||
makeBuffer = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (makeBuffer) {
|
||||
bufferBytes *= *_bufferSize;
|
||||
if (m_deviceBuffer) free(m_deviceBuffer);
|
||||
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
|
||||
if (m_deviceBuffer == nullptr) {
|
||||
ATA_ERROR("error allocating device buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Allocate memory for the Jack ports (channels) identifiers.
|
||||
m_private->ports[modeToIdTable(_mode)] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels);
|
||||
if (m_private->ports[modeToIdTable(_mode)] == nullptr) {
|
||||
ATA_ERROR("error allocating port memory.");
|
||||
goto error;
|
||||
}
|
||||
m_device[modeToIdTable(_mode)] = _device;
|
||||
m_channelOffset[modeToIdTable(_mode)] = _firstChannel;
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
if ( m_mode == audio::orchestra::mode_output
|
||||
&& _mode == audio::orchestra::mode_input) {
|
||||
// We had already set up the stream for output.
|
||||
m_mode = audio::orchestra::mode_duplex;
|
||||
} else {
|
||||
m_mode = _mode;
|
||||
jack_set_process_callback(m_private->client, &audio::orchestra::api::Jack::jackCallbackHandler, this);
|
||||
jack_set_xrun_callback(m_private->client, &audio::orchestra::api::Jack::jackXrun, this);
|
||||
jack_on_shutdown(m_private->client, &audio::orchestra::api::Jack::jackShutdown, this);
|
||||
}
|
||||
// Register our ports.
|
||||
char label[64];
|
||||
if (_mode == audio::orchestra::mode_output) {
|
||||
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
|
||||
snprintf(label, 64, "outport %d", i);
|
||||
m_private->ports[0][i] = jack_port_register(m_private->client,
|
||||
(const char *)label,
|
||||
JACK_DEFAULT_AUDIO_TYPE,
|
||||
JackPortIsOutput,
|
||||
0);
|
||||
}
|
||||
} else {
|
||||
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
|
||||
snprintf(label, 64, "inport %d", i);
|
||||
m_private->ports[1][i] = jack_port_register(m_private->client,
|
||||
(const char *)label,
|
||||
JACK_DEFAULT_AUDIO_TYPE,
|
||||
JackPortIsInput,
|
||||
0);
|
||||
}
|
||||
}
|
||||
// Setup the buffer conversion information structure. We don't use
|
||||
// buffers to do channel offsets, so we override that parameter
|
||||
// here.
|
||||
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
setConvertInfo(_mode, 0);
|
||||
}
|
||||
return true;
|
||||
error:
|
||||
jack_client_close(m_private->client);
|
||||
if (m_private->ports[0] != nullptr) {
|
||||
free(m_private->ports[0]);
|
||||
m_private->ports[0] = nullptr;
|
||||
}
|
||||
if (m_private->ports[1] != nullptr) {
|
||||
free(m_private->ports[1]);
|
||||
m_private->ports[1] = nullptr;
|
||||
}
|
||||
for (int32_t iii=0; iii<2; ++iii) {
|
||||
m_userBuffer[iii].clear();
|
||||
}
|
||||
if (m_deviceBuffer) {
|
||||
free(m_deviceBuffer);
|
||||
m_deviceBuffer = nullptr;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Jack::closeStream() {
|
||||
if (m_state == audio::orchestra::state_closed) {
|
||||
ATA_ERROR("no open stream to close!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
if (m_private != nullptr) {
|
||||
if (m_state == audio::orchestra::state_running) {
|
||||
jack_deactivate(m_private->client);
|
||||
}
|
||||
jack_client_close(m_private->client);
|
||||
}
|
||||
if (m_private->ports[0] != nullptr) {
|
||||
free(m_private->ports[0]);
|
||||
m_private->ports[0] = nullptr;
|
||||
}
|
||||
if (m_private->ports[1] != nullptr) {
|
||||
free(m_private->ports[1]);
|
||||
m_private->ports[1] = nullptr;
|
||||
}
|
||||
for (int32_t i=0; i<2; i++) {
|
||||
m_userBuffer[i].clear();
|
||||
}
|
||||
if (m_deviceBuffer) {
|
||||
free(m_deviceBuffer);
|
||||
m_deviceBuffer = nullptr;
|
||||
}
|
||||
m_mode = audio::orchestra::mode_unknow;
|
||||
m_state = audio::orchestra::state_closed;
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Jack::startStream() {
|
||||
// TODO : Check return ...
|
||||
audio::orchestra::Api::startStream();
|
||||
if (verifyStream() != audio::orchestra::error_none) {
|
||||
return audio::orchestra::error_fail;
|
||||
}
|
||||
if (m_state == audio::orchestra::state_running) {
|
||||
ATA_ERROR("the stream is already running!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
int32_t result = jack_activate(m_private->client);
|
||||
if (result) {
|
||||
ATA_ERROR("unable to activate JACK client!");
|
||||
goto unlock;
|
||||
}
|
||||
const char **ports;
|
||||
// Get the list of available ports.
|
||||
if ( m_mode == audio::orchestra::mode_output
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
result = 1;
|
||||
ports = jack_get_ports(m_private->client, m_private->deviceName[0].c_str(), nullptr, JackPortIsInput);
|
||||
if (ports == nullptr) {
|
||||
ATA_ERROR("error determining available JACK input ports!");
|
||||
goto unlock;
|
||||
}
|
||||
// Now make the port connections. Since RtAudio wasn't designed to
|
||||
// allow the user to select particular channels of a device, we'll
|
||||
// just open the first "nChannels" ports with offset.
|
||||
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
|
||||
result = 1;
|
||||
if (ports[ m_channelOffset[0] + i ])
|
||||
result = jack_connect(m_private->client, jack_port_name(m_private->ports[0][i]), ports[ m_channelOffset[0] + i ]);
|
||||
if (result) {
|
||||
free(ports);
|
||||
ATA_ERROR("error connecting output ports!");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
free(ports);
|
||||
}
|
||||
if ( m_mode == audio::orchestra::mode_input
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
result = 1;
|
||||
ports = jack_get_ports(m_private->client, m_private->deviceName[1].c_str(), nullptr, JackPortIsOutput);
|
||||
if (ports == nullptr) {
|
||||
ATA_ERROR("error determining available JACK output ports!");
|
||||
goto unlock;
|
||||
}
|
||||
// Now make the port connections. See note above.
|
||||
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
|
||||
result = 1;
|
||||
if (ports[ m_channelOffset[1] + i ]) {
|
||||
result = jack_connect(m_private->client, ports[ m_channelOffset[1] + i ], jack_port_name(m_private->ports[1][i]));
|
||||
}
|
||||
if (result) {
|
||||
free(ports);
|
||||
ATA_ERROR("error connecting input ports!");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
free(ports);
|
||||
}
|
||||
m_private->drainCounter = 0;
|
||||
m_private->internalDrain = false;
|
||||
m_state = audio::orchestra::state_running;
|
||||
unlock:
|
||||
if (result == 0) {
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
return audio::orchestra::error_systemError;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Jack::stopStream() {
|
||||
if (verifyStream() != audio::orchestra::error_none) {
|
||||
return audio::orchestra::error_fail;
|
||||
}
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
ATA_ERROR("the stream is already stopped!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
if ( m_mode == audio::orchestra::mode_output
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
if (m_private->drainCounter == 0) {
|
||||
m_private->drainCounter = 2;
|
||||
std11::unique_lock<std11::mutex> lck(m_mutex);
|
||||
m_private->condition.wait(lck);
|
||||
}
|
||||
}
|
||||
jack_deactivate(m_private->client);
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Jack::abortStream() {
|
||||
if (verifyStream() != audio::orchestra::error_none) {
|
||||
return audio::orchestra::error_fail;
|
||||
}
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
ATA_ERROR("the stream is already stopped!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
m_private->drainCounter = 2;
|
||||
return stopStream();
|
||||
}
|
||||
|
||||
// This function will be called by a spawned thread when the user
|
||||
// callback function signals that the stream should be stopped or
|
||||
// aborted. It is necessary to handle it this way because the
|
||||
// callbackEvent() function must return before the jack_deactivate()
|
||||
// function will return.
|
||||
static void jackStopStream(void* _userData) {
|
||||
etk::thread::setName("Jack_stopStream");
|
||||
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
|
||||
myClass->stopStream();
|
||||
}
|
||||
|
||||
bool audio::orchestra::api::Jack::callbackEvent(uint64_t _nframes) {
|
||||
if ( m_state == audio::orchestra::state_stopped
|
||||
|| m_state == audio::orchestra::state_stopping) {
|
||||
return true;
|
||||
}
|
||||
if (m_state == audio::orchestra::state_closed) {
|
||||
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
||||
return false;
|
||||
}
|
||||
if (m_bufferSize != _nframes) {
|
||||
ATA_ERROR("the JACK buffer size has changed ... cannot process!");
|
||||
return false;
|
||||
}
|
||||
// Check if we were draining the stream and signal is finished.
|
||||
if (m_private->drainCounter > 3) {
|
||||
m_state = audio::orchestra::state_stopping;
|
||||
if (m_private->internalDrain == true) {
|
||||
new std11::thread(jackStopStream, this);
|
||||
} else {
|
||||
m_private->condition.notify_one();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
// Invoke user callback first, to get fresh output data.
|
||||
if (m_private->drainCounter == 0) {
|
||||
audio::Time streamTime = getStreamTime();
|
||||
std::vector<enum audio::orchestra::status> status;
|
||||
if (m_mode != audio::orchestra::mode_input && m_private->xrun[0] == true) {
|
||||
status.push_back(audio::orchestra::status_underflow);
|
||||
m_private->xrun[0] = false;
|
||||
}
|
||||
if (m_mode != audio::orchestra::mode_output && m_private->xrun[1] == true) {
|
||||
status.push_back(audio::orchestra::status_overflow);
|
||||
m_private->xrun[1] = false;
|
||||
}
|
||||
int32_t cbReturnValue = m_callback(&m_userBuffer[1][0],
|
||||
streamTime,
|
||||
&m_userBuffer[0][0],
|
||||
streamTime,
|
||||
m_bufferSize,
|
||||
status);
|
||||
if (cbReturnValue == 2) {
|
||||
m_state = audio::orchestra::state_stopping;
|
||||
m_private->drainCounter = 2;
|
||||
new std11::thread(jackStopStream, this);
|
||||
return true;
|
||||
}
|
||||
else if (cbReturnValue == 1) {
|
||||
m_private->drainCounter = 1;
|
||||
m_private->internalDrain = true;
|
||||
}
|
||||
}
|
||||
jack_default_audio_sample_t *jackbuffer;
|
||||
uint64_t bufferBytes = _nframes * sizeof(jack_default_audio_sample_t);
|
||||
if ( m_mode == audio::orchestra::mode_output
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
if (m_private->drainCounter > 1) { // write zeros to the output stream
|
||||
for (uint32_t i=0; i<m_nDeviceChannels[0]; i++) {
|
||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
|
||||
memset(jackbuffer, 0, bufferBytes);
|
||||
}
|
||||
} else if (m_doConvertBuffer[0]) {
|
||||
convertBuffer(m_deviceBuffer, &m_userBuffer[0][0], m_convertInfo[0]);
|
||||
for (uint32_t i=0; i<m_nDeviceChannels[0]; i++) {
|
||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
|
||||
memcpy(jackbuffer, &m_deviceBuffer[i*bufferBytes], bufferBytes);
|
||||
}
|
||||
} else { // no buffer conversion
|
||||
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
|
||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
|
||||
memcpy(jackbuffer, &m_userBuffer[0][i*bufferBytes], bufferBytes);
|
||||
}
|
||||
}
|
||||
if (m_private->drainCounter) {
|
||||
m_private->drainCounter++;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
if ( m_mode == audio::orchestra::mode_input
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
if (m_doConvertBuffer[1]) {
|
||||
for (uint32_t i=0; i<m_nDeviceChannels[1]; i++) {
|
||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[1][i], (jack_nframes_t) _nframes);
|
||||
memcpy(&m_deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes);
|
||||
}
|
||||
convertBuffer(&m_userBuffer[1][0], m_deviceBuffer, m_convertInfo[1]);
|
||||
} else {
|
||||
// no buffer conversion
|
||||
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
|
||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[1][i], (jack_nframes_t) _nframes);
|
||||
memcpy(&m_userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
audio::orchestra::Api::tickStreamTime();
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
58
audio/orchestra/api/Jack.h
Normal file
58
audio/orchestra/api/Jack.h
Normal file
@@ -0,0 +1,58 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if !defined(__AUDIO_ORCHESTRA_API_JACK_H__) && defined(ORCHESTRA_BUILD_JACK)
|
||||
#define __AUDIO_ORCHESTRA_API_JACK_H__
|
||||
|
||||
#include <jack/jack.h>
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class JackPrivate;
|
||||
class Jack: public audio::orchestra::Api {
|
||||
public:
|
||||
static audio::orchestra::Api* create();
|
||||
public:
|
||||
Jack();
|
||||
virtual ~Jack();
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
return audio::orchestra::type_jack;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum audio::orchestra::error closeStream();
|
||||
enum audio::orchestra::error startStream();
|
||||
enum audio::orchestra::error stopStream();
|
||||
enum audio::orchestra::error abortStream();
|
||||
long getStreamLatency();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
bool callbackEvent(uint64_t _nframes);
|
||||
private:
|
||||
static int32_t jackXrun(void* _userData);
|
||||
static void jackCloseStream(void* _userData);
|
||||
static void jackShutdown(void* _userData);
|
||||
static int32_t jackCallbackHandler(jack_nframes_t _nframes, void* _userData);
|
||||
private:
|
||||
std11::shared_ptr<JackPrivate> m_private;
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
830
audio/orchestra/api/Oss.cpp
Normal file
830
audio/orchestra/api/Oss.cpp
Normal file
@@ -0,0 +1,830 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
|
||||
#if defined(ORCHESTRA_BUILD_OSS)
|
||||
#include <audio/orchestra/Interface.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include "soundcard.h"
|
||||
#include <errno.h>
|
||||
#include <math.h>
|
||||
|
||||
#undef __class__
|
||||
#define __class__ "api::Oss"
|
||||
|
||||
audio::orchestra::Api* audio::orchestra::api::Oss::create() {
|
||||
return new audio::orchestra::api::Oss();
|
||||
}
|
||||
|
||||
static void *ossCallbackHandler(void* _userData);
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class OssPrivate {
|
||||
public:
|
||||
int32_t id[2]; // device ids
|
||||
bool xrun[2];
|
||||
bool triggered;
|
||||
std11::condition_variable runnable;
|
||||
std11::shared_ptr<std11::thread> thread;
|
||||
bool threadRunning;
|
||||
OssPrivate():
|
||||
triggered(false),
|
||||
threadRunning(false) {
|
||||
id[0] = 0;
|
||||
id[1] = 0;
|
||||
xrun[0] = false;
|
||||
xrun[1] = false;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
audio::orchestra::api::Oss::Oss() :
|
||||
m_private(new audio::orchestra::api::OssPrivate()) {
|
||||
// Nothing to do here.
|
||||
}
|
||||
|
||||
audio::orchestra::api::Oss::~Oss() {
|
||||
if (m_state != audio::orchestra::state_closed) {
|
||||
closeStream();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t audio::orchestra::api::Oss::getDeviceCount() {
|
||||
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
|
||||
if (mixerfd == -1) {
|
||||
ATA_ERROR("error opening '/dev/mixer'.");
|
||||
return 0;
|
||||
}
|
||||
oss_sysinfo sysinfo;
|
||||
if (ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo) == -1) {
|
||||
close(mixerfd);
|
||||
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
|
||||
return 0;
|
||||
}
|
||||
close(mixerfd);
|
||||
return sysinfo.numaudios;
|
||||
}
|
||||
|
||||
audio::orchestra::DeviceInfo audio::orchestra::api::Oss::getDeviceInfo(uint32_t _device) {
|
||||
rtaudio::DeviceInfo info;
|
||||
info.probed = false;
|
||||
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
|
||||
if (mixerfd == -1) {
|
||||
ATA_ERROR("error opening '/dev/mixer'.");
|
||||
return info;
|
||||
}
|
||||
oss_sysinfo sysinfo;
|
||||
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
|
||||
if (result == -1) {
|
||||
close(mixerfd);
|
||||
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
|
||||
return info;
|
||||
}
|
||||
unsigned nDevices = sysinfo.numaudios;
|
||||
if (nDevices == 0) {
|
||||
close(mixerfd);
|
||||
ATA_ERROR("no devices found!");
|
||||
return info;
|
||||
}
|
||||
if (_device >= nDevices) {
|
||||
close(mixerfd);
|
||||
ATA_ERROR("device ID is invalid!");
|
||||
return info;
|
||||
}
|
||||
oss_audioinfo ainfo;
|
||||
ainfo.dev = _device;
|
||||
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
|
||||
close(mixerfd);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("error getting device (" << ainfo.name << ") info.");
|
||||
error(audio::orchestra::error_warning);
|
||||
return info;
|
||||
}
|
||||
// Probe channels
|
||||
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_output) {
|
||||
info.outputChannels = ainfo.max_channels;
|
||||
}
|
||||
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_input) {
|
||||
info.inputChannels = ainfo.max_channels;
|
||||
}
|
||||
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex) {
|
||||
if ( info.outputChannels > 0
|
||||
&& info.inputChannels > 0
|
||||
&& ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex) {
|
||||
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
||||
}
|
||||
}
|
||||
// Probe data formats ... do for input
|
||||
uint64_t mask = ainfo.iformats;
|
||||
if ( mask & AFMT_S16_LE
|
||||
|| mask & AFMT_S16_BE) {
|
||||
info.nativeFormats.push_back(audio::format_int16);
|
||||
}
|
||||
if (mask & AFMT_S8) {
|
||||
info.nativeFormats.push_back(audio::format_int8);
|
||||
}
|
||||
if ( mask & AFMT_S32_LE
|
||||
|| mask & AFMT_S32_BE) {
|
||||
info.nativeFormats.push_back(audio::format_int32);
|
||||
}
|
||||
if (mask & AFMT_FLOAT) {
|
||||
info.nativeFormats.push_back(audio::format_float);
|
||||
}
|
||||
if ( mask & AFMT_S24_LE
|
||||
|| mask & AFMT_S24_BE) {
|
||||
info.nativeFormats.push_back(audio::format_int24);
|
||||
}
|
||||
// Check that we have at least one supported format
|
||||
if (info.nativeFormats == 0) {
|
||||
ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio.");
|
||||
return info;
|
||||
}
|
||||
// Probe the supported sample rates.
|
||||
info.sampleRates.clear();
|
||||
if (ainfo.nrates) {
|
||||
for (uint32_t i=0; i<ainfo.nrates; i++) {
|
||||
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
|
||||
if (ainfo.rates[i] == SAMPLE_RATES[k]) {
|
||||
info.sampleRates.push_back(SAMPLE_RATES[k]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Check min and max rate values;
|
||||
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
|
||||
if ( ainfo.min_rate <= (int) SAMPLE_RATES[k]
|
||||
&& ainfo.max_rate >= (int) SAMPLE_RATES[k]) {
|
||||
info.sampleRates.push_back(SAMPLE_RATES[k]);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (info.sampleRates.size() == 0) {
|
||||
ATA_ERROR("no supported sample rates found for device (" << ainfo.name << ").");
|
||||
} else {
|
||||
info.probed = true;
|
||||
info.name = ainfo.name;
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
bool audio::orchestra::api::Oss::probeDeviceOpen(uint32_t _device,
|
||||
StreamMode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
rtaudio::format _format,
|
||||
uint32_t* _bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options) {
|
||||
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
|
||||
if (mixerfd == -1) {
|
||||
ATA_ERROR("error opening '/dev/mixer'.");
|
||||
return false;
|
||||
}
|
||||
oss_sysinfo sysinfo;
|
||||
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
|
||||
if (result == -1) {
|
||||
close(mixerfd);
|
||||
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
|
||||
return false;
|
||||
}
|
||||
unsigned nDevices = sysinfo.numaudios;
|
||||
if (nDevices == 0) {
|
||||
// This should not happen because a check is made before this function is called.
|
||||
close(mixerfd);
|
||||
ATA_ERROR("no devices found!");
|
||||
return false;
|
||||
}
|
||||
if (_device >= nDevices) {
|
||||
// This should not happen because a check is made before this function is called.
|
||||
close(mixerfd);
|
||||
ATA_ERROR("device ID is invalid!");
|
||||
return false;
|
||||
}
|
||||
oss_audioinfo ainfo;
|
||||
ainfo.dev = _device;
|
||||
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
|
||||
close(mixerfd);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("error getting device (" << ainfo.name << ") info.");
|
||||
return false;
|
||||
}
|
||||
// Check if device supports input or output
|
||||
if ( ( _mode == audio::orchestra::mode_output
|
||||
&& !(ainfo.caps & PCM_CAP_audio::orchestra::mode_output))
|
||||
|| ( _mode == audio::orchestra::mode_input
|
||||
&& !(ainfo.caps & PCM_CAP_audio::orchestra::mode_input))) {
|
||||
if (_mode == audio::orchestra::mode_output) {
|
||||
ATA_ERROR("device (" << ainfo.name << ") does not support output.");
|
||||
} else {
|
||||
ATA_ERROR("device (" << ainfo.name << ") does not support input.");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
int32_t flags = 0;
|
||||
if (_mode == audio::orchestra::mode_output) {
|
||||
flags |= O_WRONLY;
|
||||
} else { // _mode == audio::orchestra::mode_input
|
||||
if ( m_mode == audio::orchestra::mode_output
|
||||
&& m_device[0] == _device) {
|
||||
// We just set the same device for playback ... close and reopen for duplex (OSS only).
|
||||
close(m_private->id[0]);
|
||||
m_private->id[0] = 0;
|
||||
if (!(ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex)) {
|
||||
ATA_ERROR("device (" << ainfo.name << ") does not support duplex mode.");
|
||||
return false;
|
||||
}
|
||||
// Check that the number previously set channels is the same.
|
||||
if (m_nUserChannels[0] != _channels) {
|
||||
ATA_ERROR("input/output channels must be equal for OSS duplex device (" << ainfo.name << ").");
|
||||
return false;
|
||||
}
|
||||
flags |= O_RDWR;
|
||||
} else {
|
||||
flags |= O_RDONLY;
|
||||
}
|
||||
}
|
||||
// Set exclusive access if specified.
|
||||
if (_options.flags & RTAUDIO_HOG_DEVICE) {
|
||||
flags |= O_EXCL;
|
||||
}
|
||||
// Try to open the device.
|
||||
int32_t fd;
|
||||
fd = open(ainfo.devnode, flags, 0);
|
||||
if (fd == -1) {
|
||||
if (errno == EBUSY) {
|
||||
ATA_ERROR("device (" << ainfo.name << ") is busy.");
|
||||
} else {
|
||||
ATA_ERROR("error opening device (" << ainfo.name << ").");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// For duplex operation, specifically set this mode (this doesn't seem to work).
|
||||
/*
|
||||
if (flags | O_RDWR) {
|
||||
result = ioctl(fd, SNDCTL_DSP_SETaudio::orchestra::mode_duplex, nullptr);
|
||||
if (result == -1) {
|
||||
m_errorStream << "error setting duplex mode for device (" << ainfo.name << ").";
|
||||
m_errorText = m_errorStream.str();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
*/
|
||||
// Check the device channel support.
|
||||
m_nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||
if (ainfo.max_channels < (int)(_channels + _firstChannel)) {
|
||||
close(fd);
|
||||
ATA_ERROR("the device (" << ainfo.name << ") does not support requested channel parameters.");
|
||||
return false;
|
||||
}
|
||||
// Set the number of channels.
|
||||
int32_t deviceChannels = _channels + _firstChannel;
|
||||
result = ioctl(fd, SNDCTL_DSP_CHANNELS, &deviceChannels);
|
||||
if ( result == -1
|
||||
|| deviceChannels < (int)(_channels + _firstChannel)) {
|
||||
close(fd);
|
||||
ATA_ERROR("error setting channel parameters on device (" << ainfo.name << ").");
|
||||
return false;
|
||||
}
|
||||
m_nDeviceChannels[modeToIdTable(_mode)] = deviceChannels;
|
||||
// Get the data format mask
|
||||
int32_t mask;
|
||||
result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask);
|
||||
if (result == -1) {
|
||||
close(fd);
|
||||
ATA_ERROR("error getting device (" << ainfo.name << ") data formats.");
|
||||
return false;
|
||||
}
|
||||
// Determine how to set the device format.
|
||||
m_userFormat = _format;
|
||||
int32_t deviceFormat = -1;
|
||||
m_doByteSwap[modeToIdTable(_mode)] = false;
|
||||
if (_format == RTAUDIO_SINT8) {
|
||||
if (mask & AFMT_S8) {
|
||||
deviceFormat = AFMT_S8;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
|
||||
}
|
||||
} else if (_format == RTAUDIO_SINT16) {
|
||||
if (mask & AFMT_S16_NE) {
|
||||
deviceFormat = AFMT_S16_NE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||
} else if (mask & AFMT_S16_OE) {
|
||||
deviceFormat = AFMT_S16_OE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||
m_doByteSwap[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
} else if (_format == RTAUDIO_SINT24) {
|
||||
if (mask & AFMT_S24_NE) {
|
||||
deviceFormat = AFMT_S24_NE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
|
||||
} else if (mask & AFMT_S24_OE) {
|
||||
deviceFormat = AFMT_S24_OE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
|
||||
m_doByteSwap[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
} else if (_format == RTAUDIO_SINT32) {
|
||||
if (mask & AFMT_S32_NE) {
|
||||
deviceFormat = AFMT_S32_NE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
|
||||
} else if (mask & AFMT_S32_OE) {
|
||||
deviceFormat = AFMT_S32_OE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
|
||||
m_doByteSwap[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
}
|
||||
if (deviceFormat == -1) {
|
||||
// The user requested format is not natively supported by the device.
|
||||
if (mask & AFMT_S16_NE) {
|
||||
deviceFormat = AFMT_S16_NE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||
} else if (mask & AFMT_S32_NE) {
|
||||
deviceFormat = AFMT_S32_NE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
|
||||
} else if (mask & AFMT_S24_NE) {
|
||||
deviceFormat = AFMT_S24_NE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
|
||||
} else if (mask & AFMT_S16_OE) {
|
||||
deviceFormat = AFMT_S16_OE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||
m_doByteSwap[modeToIdTable(_mode)] = true;
|
||||
} else if (mask & AFMT_S32_OE) {
|
||||
deviceFormat = AFMT_S32_OE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
|
||||
m_doByteSwap[modeToIdTable(_mode)] = true;
|
||||
} else if (mask & AFMT_S24_OE) {
|
||||
deviceFormat = AFMT_S24_OE;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
|
||||
m_doByteSwap[modeToIdTable(_mode)] = true;
|
||||
} else if (mask & AFMT_S8) {
|
||||
deviceFormat = AFMT_S8;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
|
||||
}
|
||||
}
|
||||
if (m_deviceFormat[modeToIdTable(_mode)] == 0) {
|
||||
// This really shouldn't happen ...
|
||||
close(fd);
|
||||
ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio.");
|
||||
return false;
|
||||
}
|
||||
// Set the data format.
|
||||
int32_t temp = deviceFormat;
|
||||
result = ioctl(fd, SNDCTL_DSP_SETFMT, &deviceFormat);
|
||||
if ( result == -1
|
||||
|| deviceFormat != temp) {
|
||||
close(fd);
|
||||
ATA_ERROR("error setting data format on device (" << ainfo.name << ").");
|
||||
return false;
|
||||
}
|
||||
// Attempt to set the buffer size. According to OSS, the minimum
|
||||
// number of buffers is two. The supposed minimum buffer size is 16
|
||||
// bytes, so that will be our lower bound. The argument to this
|
||||
// call is in the form 0xMMMMSSSS (hex), where the buffer size (in
|
||||
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
|
||||
// We'll check the actual value used near the end of the setup
|
||||
// procedure.
|
||||
int32_t ossBufferBytes = *_bufferSize * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]) * deviceChannels;
|
||||
if (ossBufferBytes < 16) {
|
||||
ossBufferBytes = 16;
|
||||
}
|
||||
int32_t buffers = 0;
|
||||
buffers = _options.numberOfBuffers;
|
||||
if (_options.flags.m_minimizeLatency == true) {
|
||||
buffers = 2;
|
||||
}
|
||||
if (buffers < 2) {
|
||||
buffers = 3;
|
||||
}
|
||||
temp = ((int) buffers << 16) + (int)(log10((double)ossBufferBytes) / log10(2.0));
|
||||
result = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &temp);
|
||||
if (result == -1) {
|
||||
close(fd);
|
||||
ATA_ERROR("error setting buffer size on device (" << ainfo.name << ").");
|
||||
return false;
|
||||
}
|
||||
m_nBuffers = buffers;
|
||||
// Save buffer size (in sample frames).
|
||||
*_bufferSize = ossBufferBytes / (audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]) * deviceChannels);
|
||||
m_bufferSize = *_bufferSize;
|
||||
// Set the sample rate.
|
||||
int32_t srate = _sampleRate;
|
||||
result = ioctl(fd, SNDCTL_DSP_SPEED, &srate);
|
||||
if (result == -1) {
|
||||
close(fd);
|
||||
ATA_ERROR("error setting sample rate (" << _sampleRate << ") on device (" << ainfo.name << ").");
|
||||
return false;
|
||||
}
|
||||
// Verify the sample rate setup worked.
|
||||
if (abs(srate - _sampleRate) > 100) {
|
||||
close(fd);
|
||||
ATA_ERROR("device (" << ainfo.name << ") does not support sample rate (" << _sampleRate << ").");
|
||||
return false;
|
||||
}
|
||||
m_sampleRate = _sampleRate;
|
||||
if ( _mode == audio::orchestra::mode_input
|
||||
&& m__mode == audio::orchestra::mode_output
|
||||
&& m_device[0] == _device) {
|
||||
// We're doing duplex setup here.
|
||||
m_deviceFormat[0] = m_deviceFormat[1];
|
||||
m_nDeviceChannels[0] = deviceChannels;
|
||||
}
|
||||
// Set interleaving parameters.
|
||||
m_deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||
// Set flags for buffer conversion
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
|
||||
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
m_private->id[modeToIdTable(_mode)] = fd;
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes;
|
||||
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
|
||||
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||
ATA_ERROR("error allocating user buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
bool makeBuffer = true;
|
||||
bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
|
||||
if (_mode == audio::orchestra::mode_input) {
|
||||
if ( m__mode == audio::orchestra::mode_output
|
||||
&& m_deviceBuffer) {
|
||||
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
|
||||
if (bufferBytes <= bytesOut) {
|
||||
makeBuffer = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (makeBuffer) {
|
||||
bufferBytes *= *_bufferSize;
|
||||
if (m_deviceBuffer) {
|
||||
free(m_deviceBuffer);
|
||||
}
|
||||
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
|
||||
if (m_deviceBuffer == nullptr) {
|
||||
ATA_ERROR("error allocating device buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
}
|
||||
m_device[modeToIdTable(_mode)] = _device;
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
// Setup the buffer conversion information structure.
|
||||
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
setConvertInfo(_mode, _firstChannel);
|
||||
}
|
||||
// Setup thread if necessary.
|
||||
if (m_mode == audio::orchestra::mode_output && _mode == audio::orchestra::mode_input) {
|
||||
// We had already set up an output stream.
|
||||
m_mode = audio::orchestra::mode_duplex;
|
||||
if (m_device[0] == _device) {
|
||||
m_private->id[0] = fd;
|
||||
}
|
||||
} else {
|
||||
m_mode = _mode;
|
||||
// Setup callback thread.
|
||||
m_private->threadRunning = true;
|
||||
m_private->thread = new std11::thread(ossCallbackHandler, this);
|
||||
if (m_private->thread == nullptr) {
|
||||
m_private->threadRunning = false;
|
||||
ATA_ERROR("creating callback thread!");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
error:
|
||||
if (m_private->id[0] != nullptr) {
|
||||
close(m_private->id[0]);
|
||||
m_private->id[0] = nullptr;
|
||||
}
|
||||
if (m_private->id[1] != nullptr) {
|
||||
close(m_private->id[1]);
|
||||
m_private->id[1] = nullptr;
|
||||
}
|
||||
for (int32_t i=0; i<2; i++) {
|
||||
if (m_userBuffer[i]) {
|
||||
free(m_userBuffer[i]);
|
||||
m_userBuffer[i] = 0;
|
||||
}
|
||||
}
|
||||
if (m_deviceBuffer) {
|
||||
free(m_deviceBuffer);
|
||||
m_deviceBuffer = 0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Oss::closeStream() {
|
||||
if (m_state == audio::orchestra::state_closed) {
|
||||
ATA_ERROR("no open stream to close!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
m_private->threadRunning = false;
|
||||
m_mutex.lock();
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
m_private->runnable.notify_one();
|
||||
}
|
||||
m_mutex.unlock();
|
||||
m_private->thread->join();
|
||||
if (m_state == audio::orchestra::state_running) {
|
||||
if (m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) {
|
||||
ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
|
||||
} else {
|
||||
ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
|
||||
}
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
}
|
||||
if (m_private->id[0] != nullptr) {
|
||||
close(m_private->id[0]);
|
||||
m_private->id[0] = nullptr;
|
||||
}
|
||||
if (m_private->id[1] != nullptr) {
|
||||
close(m_private->id[1]);
|
||||
m_private->id[1] = nullptr;
|
||||
}
|
||||
for (int32_t i=0; i<2; i++) {
|
||||
if (m_userBuffer[i]) {
|
||||
free(m_userBuffer[i]);
|
||||
m_userBuffer[i] = 0;
|
||||
}
|
||||
}
|
||||
if (m_deviceBuffer) {
|
||||
free(m_deviceBuffer);
|
||||
m_deviceBuffer = 0;
|
||||
}
|
||||
m_mode = audio::orchestra::mode_unknow;
|
||||
m_state = audio::orchestra::state_closed;
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Oss::startStream() {
|
||||
// TODO : Check return ...
|
||||
audio::orchestra::Api::startStream();
|
||||
if (verifyStream() != audio::orchestra::error_none) {
|
||||
return audio::orchestra::error_fail;
|
||||
}
|
||||
if (m_state == audio::orchestra::state_running) {
|
||||
ATA_ERROR("the stream is already running!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
m_mutex.lock();
|
||||
m_state = audio::orchestra::state_running;
|
||||
// No need to do anything else here ... OSS automatically starts
|
||||
// when fed samples.
|
||||
m_mutex.unlock();
|
||||
m_private->runnable.notify_one();
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Oss::stopStream() {
|
||||
if (verifyStream() != audio::orchestra::error_none) {
|
||||
return audio::orchestra::error_fail;
|
||||
}
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
ATA_ERROR("the stream is already stopped!");
|
||||
return;
|
||||
}
|
||||
m_mutex.lock();
|
||||
// The state might change while waiting on a mutex.
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
m_mutex.unlock();
|
||||
return;
|
||||
}
|
||||
int32_t result = 0;
|
||||
if ( m_mode == audio::orchestra::mode_output
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
// Flush the output with zeros a few times.
|
||||
char *buffer;
|
||||
int32_t samples;
|
||||
audio::format format;
|
||||
if (m_doConvertBuffer[0]) {
|
||||
buffer = m_deviceBuffer;
|
||||
samples = m_bufferSize * m_nDeviceChannels[0];
|
||||
format = m_deviceFormat[0];
|
||||
} else {
|
||||
buffer = m_userBuffer[0];
|
||||
samples = m_bufferSize * m_nUserChannels[0];
|
||||
format = m_userFormat;
|
||||
}
|
||||
memset(buffer, 0, samples * audio::getFormatBytes(format));
|
||||
for (uint32_t i=0; i<m_nBuffers+1; i++) {
|
||||
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
|
||||
if (result == -1) {
|
||||
ATA_ERROR("audio write error.");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
}
|
||||
result = ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("system error stopping callback procedure on device (" << m_device[0] << ").");
|
||||
goto unlock;
|
||||
}
|
||||
m_private->triggered = false;
|
||||
}
|
||||
if ( m_mode == audio::orchestra::mode_input
|
||||
|| ( m_mode == audio::orchestra::mode_duplex
|
||||
&& m_private->id[0] != m_private->id[1])) {
|
||||
result = ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("system error stopping input callback procedure on device (" << m_device[0] << ").");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
m_mutex.unlock();
|
||||
if (result != -1) {
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
return audio::orchestra::error_systemError;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Oss::abortStream() {
|
||||
if (verifyStream() != audio::orchestra::error_none) {
|
||||
return audio::orchestra::error_fail;
|
||||
}
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
ATA_ERROR("the stream is already stopped!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
m_mutex.lock();
|
||||
// The state might change while waiting on a mutex.
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
m_mutex.unlock();
|
||||
return;
|
||||
}
|
||||
int32_t result = 0;
|
||||
if (m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) {
|
||||
result = ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("system error stopping callback procedure on device (" << m_device[0] << ").");
|
||||
goto unlock;
|
||||
}
|
||||
m_private->triggered = false;
|
||||
}
|
||||
if (m_mode == audio::orchestra::mode_input || (m_mode == audio::orchestra::mode_duplex && m_private->id[0] != m_private->id[1])) {
|
||||
result = ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
|
||||
if (result == -1) {
|
||||
ATA_ERROR("system error stopping input callback procedure on device (" << m_device[0] << ").");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
m_mutex.unlock();
|
||||
if (result != -1) {
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
return audio::orchestra::error_systemError;
|
||||
}
|
||||
|
||||
void audio::orchestra::api::Oss::callbackEvent() {
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
std11::unique_lock<std11::mutex> lck(m_mutex);
|
||||
m_private->runnable.wait(lck);
|
||||
if (m_state != audio::orchestra::state_running) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (m_state == audio::orchestra::state_closed) {
|
||||
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
// Invoke user callback to get fresh output data.
|
||||
int32_t doStopStream = 0;
|
||||
audio::Time streamTime = getStreamTime();
|
||||
std::vector<enum audio::orchestra::status> status;
|
||||
if ( m_mode != audio::orchestra::mode_input
|
||||
&& m_private->xrun[0] == true) {
|
||||
status.push_back(audio::orchestra::status_underflow);
|
||||
m_private->xrun[0] = false;
|
||||
}
|
||||
if ( m_mode != audio::orchestra::mode_output
|
||||
&& m_private->xrun[1] == true) {
|
||||
status.push_back(audio::orchestra::status_overflow);
|
||||
m_private->xrun[1] = false;
|
||||
}
|
||||
doStopStream = m_callback(m_userBuffer[1],
|
||||
streamTime,
|
||||
m_userBuffer[0],
|
||||
streamTime,
|
||||
m_bufferSize,
|
||||
status);
|
||||
if (doStopStream == 2) {
|
||||
this->abortStream();
|
||||
return;
|
||||
}
|
||||
m_mutex.lock();
|
||||
// The state might change while waiting on a mutex.
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
goto unlock;
|
||||
}
|
||||
int32_t result;
|
||||
char *buffer;
|
||||
int32_t samples;
|
||||
audio::format format;
|
||||
if ( m_mode == audio::orchestra::mode_output
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
// Setup parameters and do buffer conversion if necessary.
|
||||
if (m_doConvertBuffer[0]) {
|
||||
buffer = m_deviceBuffer;
|
||||
convertBuffer(buffer, m_userBuffer[0], m_convertInfo[0]);
|
||||
samples = m_bufferSize * m_nDeviceChannels[0];
|
||||
format = m_deviceFormat[0];
|
||||
} else {
|
||||
buffer = m_userBuffer[0];
|
||||
samples = m_bufferSize * m_nUserChannels[0];
|
||||
format = m_userFormat;
|
||||
}
|
||||
// Do byte swapping if necessary.
|
||||
if (m_doByteSwap[0]) {
|
||||
byteSwapBuffer(buffer, samples, format);
|
||||
}
|
||||
if ( m_mode == audio::orchestra::mode_duplex
|
||||
&& m_private->triggered == false) {
|
||||
int32_t trig = 0;
|
||||
ioctl(m_private->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
|
||||
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
|
||||
trig = PCM_ENABLE_audio::orchestra::mode_input|PCM_ENABLE_audio::orchestra::mode_output;
|
||||
ioctl(m_private->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
|
||||
m_private->triggered = true;
|
||||
} else {
|
||||
// Write samples to device.
|
||||
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
|
||||
}
|
||||
if (result == -1) {
|
||||
// We'll assume this is an underrun, though there isn't a
|
||||
// specific means for determining that.
|
||||
m_private->xrun[0] = true;
|
||||
ATA_ERROR("audio write error.");
|
||||
//error(audio::orchestra::error_warning);
|
||||
// Continue on to input section.
|
||||
}
|
||||
}
|
||||
if ( m_mode == audio::orchestra::mode_input
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
// Setup parameters.
|
||||
if (m_doConvertBuffer[1]) {
|
||||
buffer = m_deviceBuffer;
|
||||
samples = m_bufferSize * m_nDeviceChannels[1];
|
||||
format = m_deviceFormat[1];
|
||||
} else {
|
||||
buffer = m_userBuffer[1];
|
||||
samples = m_bufferSize * m_nUserChannels[1];
|
||||
format = m_userFormat;
|
||||
}
|
||||
// Read samples from device.
|
||||
result = read(m_private->id[1], buffer, samples * audio::getFormatBytes(format));
|
||||
if (result == -1) {
|
||||
// We'll assume this is an overrun, though there isn't a
|
||||
// specific means for determining that.
|
||||
m_private->xrun[1] = true;
|
||||
ATA_ERROR("audio read error.");
|
||||
goto unlock;
|
||||
}
|
||||
// Do byte swapping if necessary.
|
||||
if (m_doByteSwap[1]) {
|
||||
byteSwapBuffer(buffer, samples, format);
|
||||
}
|
||||
// Do buffer conversion if necessary.
|
||||
if (m_doConvertBuffer[1]) {
|
||||
convertBuffer(m_userBuffer[1], m_deviceBuffer, m_convertInfo[1]);
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
m_mutex.unlock();
|
||||
audio::orchestra::Api::tickStreamTime();
|
||||
if (doStopStream == 1) {
|
||||
this->stopStream();
|
||||
}
|
||||
}
|
||||
|
||||
static void ossCallbackHandler(void* _userData) {
|
||||
etk::thread::setName("OSS callback-" + m_name);
|
||||
audio::orchestra::api::Alsa* myClass = reinterpret_cast<audio::orchestra::api::Oss*>(_userData);
|
||||
while (myClass->m_private->threadRunning == true) {
|
||||
myClass->callbackEvent();
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
51
audio/orchestra/api/Oss.h
Normal file
51
audio/orchestra/api/Oss.h
Normal file
@@ -0,0 +1,51 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if !defined(__AUDIO_ORCHESTRA_API_OSS_H__) && defined(ORCHESTRA_BUILD_OSS)
|
||||
#define __AUDIO_ORCHESTRA_API_OSS_H__
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class OssPrivate;
|
||||
class Oss: public audio::orchestra::Api {
|
||||
public:
|
||||
static audio::orchestra::Api* create();
|
||||
public:
|
||||
Oss();
|
||||
virtual ~Oss();
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
return audio::orchestra::type_oss;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum audio::orchestra::error closeStream();
|
||||
enum audio::orchestra::error startStream();
|
||||
enum audio::orchestra::error stopStream();
|
||||
enum audio::orchestra::error abortStream();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEvent();
|
||||
private:
|
||||
std11::shared_ptr<OssPrivate> m_private;
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
423
audio/orchestra/api/Pulse.cpp
Normal file
423
audio/orchestra/api/Pulse.cpp
Normal file
@@ -0,0 +1,423 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
|
||||
#if defined(ORCHESTRA_BUILD_PULSE)
|
||||
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
#include <audio/orchestra/Interface.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
#include <pulse/error.h>
|
||||
#include <pulse/simple.h>
|
||||
#include <cstdio>
|
||||
#include <etk/thread/tools.h>
|
||||
|
||||
#undef __class__
|
||||
#define __class__ "api::Pulse"
|
||||
|
||||
audio::orchestra::Api* audio::orchestra::api::Pulse::create() {
|
||||
return new audio::orchestra::api::Pulse();
|
||||
}
|
||||
|
||||
|
||||
static const uint32_t SUPPORTED_SAMPLERATES[] = {
|
||||
8000,
|
||||
16000,
|
||||
22050,
|
||||
32000,
|
||||
44100,
|
||||
48000,
|
||||
96000,
|
||||
0
|
||||
};
|
||||
|
||||
struct rtaudio_pa_format_mapping_t {
|
||||
enum audio::format airtaudio_format;
|
||||
pa_sample_format_t pa_format;
|
||||
};
|
||||
|
||||
static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
|
||||
{audio::format_int16, PA_SAMPLE_S16LE},
|
||||
{audio::format_int32, PA_SAMPLE_S32LE},
|
||||
{audio::format_float, PA_SAMPLE_FLOAT32LE},
|
||||
{audio::format_unknow, PA_SAMPLE_INVALID}};
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class PulsePrivate {
|
||||
public:
|
||||
pa_simple *s_play;
|
||||
pa_simple *s_rec;
|
||||
std11::shared_ptr<std11::thread> thread;
|
||||
bool threadRunning;
|
||||
std11::condition_variable runnable_cv;
|
||||
bool runnable;
|
||||
PulsePrivate() :
|
||||
s_play(0),
|
||||
s_rec(0),
|
||||
threadRunning(false),
|
||||
runnable(false) {
|
||||
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
audio::orchestra::api::Pulse::Pulse() :
|
||||
m_private(new audio::orchestra::api::PulsePrivate()) {
|
||||
|
||||
}
|
||||
|
||||
audio::orchestra::api::Pulse::~Pulse() {
|
||||
if (m_state != audio::orchestra::state_closed) {
|
||||
closeStream();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t audio::orchestra::api::Pulse::getDeviceCount() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
audio::orchestra::DeviceInfo audio::orchestra::api::Pulse::getDeviceInfo(uint32_t _device) {
|
||||
audio::orchestra::DeviceInfo info;
|
||||
info.probed = true;
|
||||
info.name = "PulseAudio";
|
||||
info.outputChannels = 2;
|
||||
info.inputChannels = 2;
|
||||
info.duplexChannels = 2;
|
||||
info.isDefaultOutput = true;
|
||||
info.isDefaultInput = true;
|
||||
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
|
||||
info.sampleRates.push_back(*sr);
|
||||
}
|
||||
info.nativeFormats.push_back(audio::format_int16);
|
||||
info.nativeFormats.push_back(audio::format_int32);
|
||||
info.nativeFormats.push_back(audio::format_float);
|
||||
return info;
|
||||
}
|
||||
|
||||
static void pulseaudio_callback(void* _userData) {
|
||||
audio::orchestra::api::Pulse* myClass = reinterpret_cast<audio::orchestra::api::Pulse*>(_userData);
|
||||
myClass->callbackEvent();
|
||||
}
|
||||
|
||||
void audio::orchestra::api::Pulse::callbackEvent() {
|
||||
etk::thread::setName("Pulse IO-" + m_name);
|
||||
while (m_private->threadRunning == true) {
|
||||
callbackEventOneCycle();
|
||||
}
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Pulse::closeStream() {
|
||||
m_private->threadRunning = false;
|
||||
m_mutex.lock();
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
m_private->runnable = true;
|
||||
m_private->runnable_cv.notify_one();;
|
||||
}
|
||||
m_mutex.unlock();
|
||||
m_private->thread->join();
|
||||
if (m_private->s_play) {
|
||||
pa_simple_flush(m_private->s_play, nullptr);
|
||||
pa_simple_free(m_private->s_play);
|
||||
}
|
||||
if (m_private->s_rec) {
|
||||
pa_simple_free(m_private->s_rec);
|
||||
}
|
||||
m_userBuffer[0].clear();
|
||||
m_userBuffer[1].clear();
|
||||
m_state = audio::orchestra::state_closed;
|
||||
m_mode = audio::orchestra::mode_unknow;
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
void audio::orchestra::api::Pulse::callbackEventOneCycle() {
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
std11::unique_lock<std11::mutex> lck(m_mutex);
|
||||
while (!m_private->runnable) {
|
||||
m_private->runnable_cv.wait(lck);
|
||||
}
|
||||
if (m_state != audio::orchestra::state_running) {
|
||||
m_mutex.unlock();
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (m_state == audio::orchestra::state_closed) {
|
||||
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
||||
return;
|
||||
}
|
||||
audio::Time streamTime = getStreamTime();
|
||||
std::vector<enum audio::orchestra::status> status;
|
||||
int32_t doStopStream = m_callback(&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0],
|
||||
streamTime,
|
||||
&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0],
|
||||
streamTime,
|
||||
m_bufferSize,
|
||||
status);
|
||||
if (doStopStream == 2) {
|
||||
abortStream();
|
||||
return;
|
||||
}
|
||||
m_mutex.lock();
|
||||
void *pulse_in = m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] ? m_deviceBuffer : &m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0];
|
||||
void *pulse_out = m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] ? m_deviceBuffer : &m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0];
|
||||
if (m_state != audio::orchestra::state_running) {
|
||||
goto unlock;
|
||||
}
|
||||
int32_t pa_error;
|
||||
size_t bytes;
|
||||
if ( m_mode == audio::orchestra::mode_output
|
||||
|| m_mode == audio::orchestra::mode_duplex) {
|
||||
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]) {
|
||||
convertBuffer(m_deviceBuffer,
|
||||
&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0],
|
||||
m_convertInfo[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]);
|
||||
bytes = m_nDeviceChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]);
|
||||
} else {
|
||||
bytes = m_nUserChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
|
||||
}
|
||||
if (pa_simple_write(m_private->s_play, pulse_out, bytes, &pa_error) < 0) {
|
||||
ATA_ERROR("audio write error, " << pa_strerror(pa_error) << ".");
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (m_mode == audio::orchestra::mode_input || m_mode == audio::orchestra::mode_duplex) {
|
||||
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]) {
|
||||
bytes = m_nDeviceChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]);
|
||||
} else {
|
||||
bytes = m_nUserChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
|
||||
}
|
||||
if (pa_simple_read(m_private->s_rec, pulse_in, bytes, &pa_error) < 0) {
|
||||
ATA_ERROR("audio read error, " << pa_strerror(pa_error) << ".");
|
||||
return;
|
||||
}
|
||||
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]) {
|
||||
convertBuffer(&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0],
|
||||
m_deviceBuffer,
|
||||
m_convertInfo[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]);
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
m_mutex.unlock();
|
||||
audio::orchestra::Api::tickStreamTime();
|
||||
if (doStopStream == 1) {
|
||||
stopStream();
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Pulse::startStream() {
|
||||
// TODO : Check return ...
|
||||
audio::orchestra::Api::startStream();
|
||||
if (m_state == audio::orchestra::state_closed) {
|
||||
ATA_ERROR("the stream is not open!");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
if (m_state == audio::orchestra::state_running) {
|
||||
ATA_ERROR("the stream is already running!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
m_mutex.lock();
|
||||
m_state = audio::orchestra::state_running;
|
||||
m_private->runnable = true;
|
||||
m_private->runnable_cv.notify_one();
|
||||
m_mutex.unlock();
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Pulse::stopStream() {
|
||||
if (m_state == audio::orchestra::state_closed) {
|
||||
ATA_ERROR("the stream is not open!");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
ATA_ERROR("the stream is already stopped!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
m_mutex.lock();
|
||||
if (m_private->s_play) {
|
||||
int32_t pa_error;
|
||||
if (pa_simple_drain(m_private->s_play, &pa_error) < 0) {
|
||||
ATA_ERROR("error draining output device, " << pa_strerror(pa_error) << ".");
|
||||
m_mutex.unlock();
|
||||
return audio::orchestra::error_systemError;
|
||||
}
|
||||
}
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
m_mutex.unlock();
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
enum audio::orchestra::error audio::orchestra::api::Pulse::abortStream() {
|
||||
if (m_state == audio::orchestra::state_closed) {
|
||||
ATA_ERROR("the stream is not open!");
|
||||
return audio::orchestra::error_invalidUse;
|
||||
}
|
||||
if (m_state == audio::orchestra::state_stopped) {
|
||||
ATA_ERROR("the stream is already stopped!");
|
||||
return audio::orchestra::error_warning;
|
||||
}
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
m_mutex.lock();
|
||||
if (m_private && m_private->s_play) {
|
||||
int32_t pa_error;
|
||||
if (pa_simple_flush(m_private->s_play, &pa_error) < 0) {
|
||||
ATA_ERROR("error flushing output device, " << pa_strerror(pa_error) << ".");
|
||||
m_mutex.unlock();
|
||||
return audio::orchestra::error_systemError;
|
||||
}
|
||||
}
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
m_mutex.unlock();
|
||||
return audio::orchestra::error_none;
|
||||
}
|
||||
|
||||
bool audio::orchestra::api::Pulse::probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options) {
|
||||
uint64_t bufferBytes = 0;
|
||||
pa_sample_spec ss;
|
||||
if (_device != 0) {
|
||||
return false;
|
||||
}
|
||||
if (_mode != audio::orchestra::mode_input && _mode != audio::orchestra::mode_output) {
|
||||
return false;
|
||||
}
|
||||
if (_channels != 1 && _channels != 2) {
|
||||
ATA_ERROR("unsupported number of channels.");
|
||||
return false;
|
||||
}
|
||||
ss.channels = _channels;
|
||||
if (_firstChannel != 0) {
|
||||
return false;
|
||||
}
|
||||
bool sr_found = false;
|
||||
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
|
||||
if (_sampleRate == *sr) {
|
||||
sr_found = true;
|
||||
m_sampleRate = _sampleRate;
|
||||
ss.rate = _sampleRate;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!sr_found) {
|
||||
ATA_ERROR("unsupported sample rate.");
|
||||
return false;
|
||||
}
|
||||
bool sf_found = 0;
|
||||
for (const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
|
||||
sf->airtaudio_format && sf->pa_format != PA_SAMPLE_INVALID;
|
||||
++sf) {
|
||||
if (_format == sf->airtaudio_format) {
|
||||
sf_found = true;
|
||||
m_userFormat = sf->airtaudio_format;
|
||||
ss.format = sf->pa_format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!sf_found) {
|
||||
ATA_ERROR("unsupported sample format.");
|
||||
return false;
|
||||
}
|
||||
m_deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||
m_nBuffers = 1;
|
||||
m_doByteSwap[modeToIdTable(_mode)] = false;
|
||||
m_doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||
m_deviceFormat[modeToIdTable(_mode)] = m_userFormat;
|
||||
m_nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||
m_nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel;
|
||||
m_channelOffset[modeToIdTable(_mode)] = 0;
|
||||
// Allocate necessary internal buffers.
|
||||
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
|
||||
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
|
||||
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
|
||||
ATA_ERROR("error allocating user buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
m_bufferSize = *_bufferSize;
|
||||
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
bool makeBuffer = true;
|
||||
bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
|
||||
if (_mode == audio::orchestra::mode_input) {
|
||||
if (m_mode == audio::orchestra::mode_output && m_deviceBuffer) {
|
||||
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
|
||||
if (bufferBytes <= bytesOut) makeBuffer = false;
|
||||
}
|
||||
}
|
||||
if (makeBuffer) {
|
||||
bufferBytes *= *_bufferSize;
|
||||
if (m_deviceBuffer) free(m_deviceBuffer);
|
||||
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
|
||||
if (m_deviceBuffer == nullptr) {
|
||||
ATA_ERROR("error allocating device buffer memory.");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
}
|
||||
m_device[modeToIdTable(_mode)] = _device;
|
||||
// Setup the buffer conversion information structure.
|
||||
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
setConvertInfo(_mode, _firstChannel);
|
||||
}
|
||||
int32_t error;
|
||||
switch (_mode) {
|
||||
case audio::orchestra::mode_input:
|
||||
m_private->s_rec = pa_simple_new(nullptr, "orchestra", PA_STREAM_RECORD, nullptr, "Record", &ss, nullptr, nullptr, &error);
|
||||
if (!m_private->s_rec) {
|
||||
ATA_ERROR("error connecting input to PulseAudio server.");
|
||||
goto error;
|
||||
}
|
||||
break;
|
||||
case audio::orchestra::mode_output:
|
||||
m_private->s_play = pa_simple_new(nullptr, "orchestra", PA_STREAM_PLAYBACK, nullptr, "Playback", &ss, nullptr, nullptr, &error);
|
||||
if (!m_private->s_play) {
|
||||
ATA_ERROR("error connecting output to PulseAudio server.");
|
||||
goto error;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
goto error;
|
||||
}
|
||||
if (m_mode == audio::orchestra::mode_unknow) {
|
||||
m_mode = _mode;
|
||||
} else if (m_mode == _mode) {
|
||||
goto error;
|
||||
}else {
|
||||
m_mode = audio::orchestra::mode_duplex;
|
||||
}
|
||||
if (!m_private->threadRunning) {
|
||||
m_private->threadRunning = true;
|
||||
m_private->thread = std11::make_shared<std11::thread>(&pulseaudio_callback, this);
|
||||
if (m_private->thread == nullptr) {
|
||||
ATA_ERROR("error creating thread.");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
m_state = audio::orchestra::state_stopped;
|
||||
return true;
|
||||
error:
|
||||
for (int32_t i=0; i<2; i++) {
|
||||
m_userBuffer[i].clear();
|
||||
}
|
||||
if (m_deviceBuffer) {
|
||||
free(m_deviceBuffer);
|
||||
m_deviceBuffer = 0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
54
audio/orchestra/api/Pulse.h
Normal file
54
audio/orchestra/api/Pulse.h
Normal file
@@ -0,0 +1,54 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#if !defined(__AUDIO_ORCHESTRA_API_PULSE_H__) && defined(ORCHESTRA_BUILD_PULSE)
|
||||
#define __AUDIO_ORCHESTRA_API_PULSE_H__
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
namespace api {
|
||||
class PulsePrivate;
|
||||
class Pulse: public audio::orchestra::Api {
|
||||
public:
|
||||
static audio::orchestra::Api* create();
|
||||
public:
|
||||
Pulse();
|
||||
virtual ~Pulse();
|
||||
enum audio::orchestra::type getCurrentApi() {
|
||||
return audio::orchestra::type_pulse;
|
||||
}
|
||||
uint32_t getDeviceCount();
|
||||
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||
enum audio::orchestra::error closeStream();
|
||||
enum audio::orchestra::error startStream();
|
||||
enum audio::orchestra::error stopStream();
|
||||
enum audio::orchestra::error abortStream();
|
||||
// This function is intended for internal use only. It must be
|
||||
// public because it is called by the internal callback handler,
|
||||
// which is not a member of RtAudio. External use of this function
|
||||
// will most likely produce highly undesireable results!
|
||||
void callbackEventOneCycle();
|
||||
void callbackEvent();
|
||||
private:
|
||||
std11::shared_ptr<PulsePrivate> m_private;
|
||||
std::vector<audio::orchestra::DeviceInfo> m_devices;
|
||||
void saveDeviceInfo();
|
||||
bool probeDeviceOpen(uint32_t _device,
|
||||
audio::orchestra::mode _mode,
|
||||
uint32_t _channels,
|
||||
uint32_t _firstChannel,
|
||||
uint32_t _sampleRate,
|
||||
audio::format _format,
|
||||
uint32_t *_bufferSize,
|
||||
const audio::orchestra::StreamOptions& _options);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
6
audio/orchestra/base.cpp
Normal file
6
audio/orchestra/base.cpp
Normal file
@@ -0,0 +1,6 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
30
audio/orchestra/base.h
Normal file
30
audio/orchestra/base.h
Normal file
@@ -0,0 +1,30 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_CB_H__
|
||||
#define __AUDIO_ORCHESTRA_CB_H__
|
||||
#include <etk/thread.h>
|
||||
#include <etk/condition_variable.h>
|
||||
#include <etk/mutex.h>
|
||||
#include <etk/chrono.h>
|
||||
#include <etk/functional.h>
|
||||
#include <etk/memory.h>
|
||||
#include <audio/channel.h>
|
||||
#include <audio/format.h>
|
||||
#include <audio/orchestra/error.h>
|
||||
#include <audio/orchestra/status.h>
|
||||
#include <audio/orchestra/Flags.h>
|
||||
|
||||
#include <audio/orchestra/CallbackInfo.h>
|
||||
#include <audio/orchestra/DeviceInfo.h>
|
||||
#include <audio/orchestra/StreamOptions.h>
|
||||
#include <audio/orchestra/StreamParameters.h>
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
13
audio/orchestra/debug.cpp
Normal file
13
audio/orchestra/debug.cpp
Normal file
@@ -0,0 +1,13 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#include <audio/orchestra/debug.h>
|
||||
|
||||
int32_t audio::orchestra::getLogId() {
|
||||
static int32_t g_val = etk::log::registerInstance("audio-orchestra");
|
||||
return g_val;
|
||||
}
|
@@ -1,29 +1,21 @@
|
||||
/**
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
*
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
*
|
||||
* @license BSD 3 clauses (see license file)
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __EAUDIOFX_DEBUG_H__
|
||||
#define __EAUDIOFX_DEBUG_H__
|
||||
#ifndef __AUDIO_ORCHESTRA_DEBUG_H__
|
||||
#define __AUDIO_ORCHESTRA_DEBUG_H__
|
||||
|
||||
#include <etk/log.h>
|
||||
|
||||
namespace airtaudio {
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
int32_t getLogId();
|
||||
};
|
||||
// TODO : Review this problem of multiple intanciation of "std::stringbuf sb"
|
||||
#define ATA_BASE(info,data) \
|
||||
do { \
|
||||
if (info <= etk::log::getLevel(airtaudio::getLogId())) { \
|
||||
std::stringbuf sb; \
|
||||
std::ostream tmpStream(&sb); \
|
||||
tmpStream << data; \
|
||||
etk::log::logStream(airtaudio::getLogId(), info, __LINE__, __class__, __func__, tmpStream); \
|
||||
} \
|
||||
} while(0)
|
||||
}
|
||||
}
|
||||
#define ATA_BASE(info,data) TK_LOG_BASE(audio::orchestra::getLogId(),info,data)
|
||||
|
||||
#define ATA_CRITICAL(data) ATA_BASE(1, data)
|
||||
#define ATA_ERROR(data) ATA_BASE(2, data)
|
9
audio/orchestra/error.cpp
Normal file
9
audio/orchestra/error.cpp
Normal file
@@ -0,0 +1,9 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#include <audio/orchestra/error.h>
|
||||
#include <audio/orchestra/debug.h>
|
26
audio/orchestra/error.h
Normal file
26
audio/orchestra/error.h
Normal file
@@ -0,0 +1,26 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_ERROR_H__
|
||||
#define __AUDIO_ORCHESTRA_ERROR_H__
|
||||
|
||||
#include <etk/types.h>
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
enum error {
|
||||
error_none, //!< No error
|
||||
error_fail, //!< An error occure in the operation
|
||||
error_warning, //!< A non-critical error.
|
||||
error_inputNull, //!< null input or internal errror
|
||||
error_invalidUse, //!< The function was called incorrectly.
|
||||
error_systemError //!< A system error occured.
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
21
audio/orchestra/mode.cpp
Normal file
21
audio/orchestra/mode.cpp
Normal file
@@ -0,0 +1,21 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#include <audio/orchestra/mode.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
|
||||
int32_t audio::orchestra::modeToIdTable(enum mode _mode) {
|
||||
switch (_mode) {
|
||||
case mode_unknow:
|
||||
case mode_duplex:
|
||||
case mode_output:
|
||||
return 0;
|
||||
case mode_input:
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
26
audio/orchestra/mode.h
Normal file
26
audio/orchestra/mode.h
Normal file
@@ -0,0 +1,26 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_MODE_H__
|
||||
#define __AUDIO_ORCHESTRA_MODE_H__
|
||||
|
||||
#include <etk/types.h>
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
enum mode {
|
||||
mode_unknow,
|
||||
mode_output,
|
||||
mode_input,
|
||||
mode_duplex
|
||||
};
|
||||
int32_t modeToIdTable(enum mode _mode);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
6
audio/orchestra/state.cpp
Normal file
6
audio/orchestra/state.cpp
Normal file
@@ -0,0 +1,6 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
25
audio/orchestra/state.h
Normal file
25
audio/orchestra/state.h
Normal file
@@ -0,0 +1,25 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_STATE_H__
|
||||
#define __AUDIO_ORCHESTRA_STATE_H__
|
||||
|
||||
#include <etk/types.h>
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
enum state {
|
||||
state_closed,
|
||||
state_stopped,
|
||||
state_stopping,
|
||||
state_running
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
32
audio/orchestra/status.cpp
Normal file
32
audio/orchestra/status.cpp
Normal file
@@ -0,0 +1,32 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#include <audio/orchestra/status.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
static const char* listValue[] = {
|
||||
"ok",
|
||||
"overflow",
|
||||
"underflow"
|
||||
};
|
||||
|
||||
std::ostream& audio::orchestra::operator <<(std::ostream& _os, enum audio::orchestra::status _obj) {
|
||||
_os << listValue[_obj];
|
||||
return _os;
|
||||
}
|
||||
|
||||
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::status>& _obj) {
|
||||
_os << std::string("{");
|
||||
for (size_t iii=0; iii<_obj.size(); ++iii) {
|
||||
if (iii!=0) {
|
||||
_os << std::string(";");
|
||||
}
|
||||
_os << _obj[iii];
|
||||
}
|
||||
_os << std::string("}");
|
||||
return _os;
|
||||
}
|
||||
|
26
audio/orchestra/status.h
Normal file
26
audio/orchestra/status.h
Normal file
@@ -0,0 +1,26 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_STATUS_H__
|
||||
#define __AUDIO_ORCHESTRA_STATUS_H__
|
||||
|
||||
#include <etk/types.h>
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
enum status {
|
||||
status_ok, //!< nothing...
|
||||
status_overflow, //!< Internal buffer has more data than they can accept
|
||||
status_underflow //!< The internal buffer is empty
|
||||
};
|
||||
std::ostream& operator <<(std::ostream& _os, enum audio::orchestra::status _obj);
|
||||
std::ostream& operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::status>& _obj);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
73
audio/orchestra/type.cpp
Normal file
73
audio/orchestra/type.cpp
Normal file
@@ -0,0 +1,73 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#include <audio/orchestra/type.h>
|
||||
#include <audio/orchestra/debug.h>
|
||||
#include <iostream>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <climits>
|
||||
|
||||
#undef __class__
|
||||
#define __class__ "type"
|
||||
|
||||
static const char* listType[] = {
|
||||
"undefined",
|
||||
"alsa",
|
||||
"pulse",
|
||||
"oss",
|
||||
"jack",
|
||||
"coreOSX",
|
||||
"corIOS",
|
||||
"asio",
|
||||
"ds",
|
||||
"java",
|
||||
"dummy",
|
||||
"user1",
|
||||
"user2",
|
||||
"user3",
|
||||
"user4"
|
||||
};
|
||||
static int32_t listTypeSize = sizeof(listType)/sizeof(char*);
|
||||
|
||||
|
||||
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const enum audio::orchestra::type& _obj) {
|
||||
_os << listType[_obj];
|
||||
return _os;
|
||||
}
|
||||
|
||||
std::ostream& audio::orchestra::operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::type>& _obj) {
|
||||
_os << std::string("{");
|
||||
for (size_t iii=0; iii<_obj.size(); ++iii) {
|
||||
if (iii!=0) {
|
||||
_os << std::string(";");
|
||||
}
|
||||
_os << _obj[iii];
|
||||
}
|
||||
_os << std::string("}");
|
||||
return _os;
|
||||
}
|
||||
/*
|
||||
template <enum audio::format> std::string to_string(const enum audio::format& _variable) {
|
||||
return listType[_value];
|
||||
}
|
||||
*/
|
||||
std::string audio::orchestra::getTypeString(enum audio::orchestra::type _value) {
|
||||
return listType[_value];
|
||||
}
|
||||
|
||||
enum audio::orchestra::type audio::orchestra::getTypeFromString(const std::string& _value) {
|
||||
for (int32_t iii=0; iii<listTypeSize; ++iii) {
|
||||
if (_value == listType[iii]) {
|
||||
return static_cast<enum audio::orchestra::type>(iii);
|
||||
}
|
||||
}
|
||||
if (_value == "auto") {
|
||||
return audio::orchestra::type_undefined;
|
||||
}
|
||||
return audio::orchestra::type_undefined;
|
||||
}
|
44
audio/orchestra/type.h
Normal file
44
audio/orchestra/type.h
Normal file
@@ -0,0 +1,44 @@
|
||||
/** @file
|
||||
* @author Edouard DUPIN
|
||||
* @copyright 2011, Edouard DUPIN, all right reserved
|
||||
* @license APACHE v2.0 (see license file)
|
||||
* @fork from RTAudio
|
||||
*/
|
||||
|
||||
#ifndef __AUDIO_ORCHESTRA_TYPE_H__
|
||||
#define __AUDIO_ORCHESTRA_TYPE_H__
|
||||
|
||||
#include <etk/types.h>
|
||||
#include <etk/stdTools.h>
|
||||
|
||||
|
||||
namespace audio {
|
||||
namespace orchestra {
|
||||
/**
|
||||
* @brief Audio API specifier arguments.
|
||||
*/
|
||||
enum type {
|
||||
type_undefined, //!< Error API.
|
||||
type_alsa, //!< LINUX The Advanced Linux Sound Architecture.
|
||||
type_pulse, //!< LINUX The Linux PulseAudio.
|
||||
type_oss, //!< LINUX The Linux Open Sound System.
|
||||
type_jack, //!< UNIX The Jack Low-Latency Audio Server.
|
||||
type_coreOSX, //!< Macintosh OSX Core Audio.
|
||||
type_coreIOS, //!< Macintosh iOS Core Audio.
|
||||
type_asio, //!< WINDOWS The Steinberg Audio Stream I/O.
|
||||
type_ds, //!< WINDOWS The Microsoft Direct Sound.
|
||||
type_java, //!< ANDROID Interface.
|
||||
type_dummy, //!< Empty wrapper (non-functional).
|
||||
type_user1, //!< User interface 1.
|
||||
type_user2, //!< User interface 2.
|
||||
type_user3, //!< User interface 3.
|
||||
type_user4, //!< User interface 4.
|
||||
};
|
||||
std::ostream& operator <<(std::ostream& _os, const enum audio::orchestra::type& _obj);
|
||||
std::ostream& operator <<(std::ostream& _os, const std::vector<enum audio::orchestra::type>& _obj);
|
||||
std::string getTypeString(enum audio::orchestra::type _value);
|
||||
enum audio::orchestra::type getTypeFromString(const std::string& _value);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
91
catkin/CMakeLists.txt
Normal file
91
catkin/CMakeLists.txt
Normal file
@@ -0,0 +1,91 @@
|
||||
cmake_minimum_required(VERSION 2.8.3)
|
||||
project(audio_orchestra)
|
||||
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
|
||||
## Find catkin macros and libraries
|
||||
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
|
||||
## is used, also find other catkin packages
|
||||
find_package(catkin REQUIRED COMPONENTS
|
||||
etk
|
||||
audio
|
||||
)
|
||||
|
||||
find_package(ALSA REQUIRED)
|
||||
|
||||
###################################
|
||||
## catkin specific configuration ##
|
||||
###################################
|
||||
## The catkin_package macro generates cmake config files for your package
|
||||
## Declare things to be passed to dependent projects
|
||||
## INCLUDE_DIRS: uncomment this if you package contains header files
|
||||
## LIBRARIES: libraries you create in this project that dependent projects also need
|
||||
## CATKIN_DEPENDS: catkin_packages dependent projects also need
|
||||
## DEPENDS: system dependencies of this project that dependent projects also need
|
||||
catkin_package(
|
||||
INCLUDE_DIRS ../
|
||||
LIBRARIES ${PROJECT_NAME}
|
||||
CATKIN_DEPENDS etk audio
|
||||
DEPENDS system_lib
|
||||
)
|
||||
|
||||
###########
|
||||
## Build ##
|
||||
###########
|
||||
|
||||
## Specify additional locations of header files
|
||||
## Your package locations should be listed before other locations
|
||||
include_directories(
|
||||
..
|
||||
${catkin_INCLUDE_DIRS}
|
||||
)
|
||||
|
||||
## Declare a cpp library
|
||||
add_library(${PROJECT_NAME}
|
||||
../audio/orchestra/debug.cpp
|
||||
../audio/orchestra/status.cpp
|
||||
../audio/orchestra/type.cpp
|
||||
../audio/orchestra/mode.cpp
|
||||
../audio/orchestra/state.cpp
|
||||
../audio/orchestra/error.cpp
|
||||
../audio/orchestra/base.cpp
|
||||
../audio/orchestra/Interface.cpp
|
||||
../audio/orchestra/Flags.cpp
|
||||
../audio/orchestra/Api.cpp
|
||||
../audio/orchestra/DeviceInfo.cpp
|
||||
../audio/orchestra/StreamOptions.cpp
|
||||
../audio/orchestra/api/Dummy.cpp
|
||||
../audio/orchestra/api/Alsa.cpp
|
||||
../audio/orchestra/api/Jack.cpp
|
||||
../audio/orchestra/api/Pulse.cpp
|
||||
../audio/orchestra/api/Oss.cpp
|
||||
)
|
||||
|
||||
add_definitions(-D__LINUX_ALSA__)
|
||||
add_definitions(-D__DUMMY__)
|
||||
|
||||
## Add cmake target dependencies of the executable/library
|
||||
## as an example, message headers may need to be generated before nodes
|
||||
#add_dependencies(${PROJECT_NAME} test_perfo_core_generate_messages_cpp)
|
||||
|
||||
## Specify libraries to link a library or executable target against
|
||||
target_link_libraries(${PROJECT_NAME}
|
||||
${ALSA_LIBRARIES}
|
||||
${catkin_LIBRARIES}
|
||||
)
|
||||
|
||||
#############
|
||||
## Install ##
|
||||
#############
|
||||
|
||||
## Mark executables and/or libraries for installation
|
||||
install(TARGETS ${PROJECT_NAME}
|
||||
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
|
||||
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
|
||||
)
|
||||
|
||||
## Mark cpp header files for installation
|
||||
install(DIRECTORY ../audio/orchestra/
|
||||
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
|
||||
FILES_MATCHING PATTERN "*.h"
|
||||
)
|
13
catkin/package.xml
Normal file
13
catkin/package.xml
Normal file
@@ -0,0 +1,13 @@
|
||||
<?xml version="1.0"?>
|
||||
<package>
|
||||
<name>audio_orchestra</name>
|
||||
<version>0.3.0</version>
|
||||
<description>Ewol RTAudio fork</description>
|
||||
<maintainer email="yui.heero@gmail.com">Edouard DUPIN</maintainer>
|
||||
<license>Apache-2.0</license>
|
||||
<build_depend>etk</build_depend>
|
||||
<build_depend>audio</build_depend>
|
||||
<buildtool_depend>catkin</buildtool_depend>
|
||||
<run_depend>etk</run_depend>
|
||||
<run_depend>audio</run_depend>
|
||||
</package>
|
@@ -1,90 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
import lutinModule as module
|
||||
import lutinTools as tools
|
||||
import lutinDebug as debug
|
||||
|
||||
def get_desc():
|
||||
return "airtaudio : Generic wrapper on all audio interface"
|
||||
|
||||
|
||||
def create(target):
|
||||
myModule = module.Module(__file__, 'airtaudio', 'LIBRARY')
|
||||
|
||||
myModule.add_src_file([
|
||||
'airtaudio/debug.cpp',
|
||||
'airtaudio/base.cpp',
|
||||
'airtaudio/Interface.cpp',
|
||||
'airtaudio/Api.cpp',
|
||||
'airtaudio/api/Dummy.cpp',
|
||||
])
|
||||
|
||||
myModule.add_export_flag_CC(['-D__AIRTAUDIO_API_DUMMY_H__'])
|
||||
if target.name=="Windows":
|
||||
myModule.add_src_file([
|
||||
'airtaudio/api/Asio.cpp',
|
||||
'airtaudio/api/Ds.cpp',
|
||||
])
|
||||
# ASIO API on Windows
|
||||
myModule.add_export_flag_CC(['__WINDOWS_ASIO__'])
|
||||
# Windows DirectSound API
|
||||
#myModule.add_export_flag_CC(['__WINDOWS_DS__'])
|
||||
myModule.add_module_depend(['etk'])
|
||||
elif target.name=="Linux":
|
||||
myModule.add_src_file([
|
||||
'airtaudio/api/Alsa.cpp',
|
||||
'airtaudio/api/Jack.cpp',
|
||||
'airtaudio/api/Pulse.cpp',
|
||||
'airtaudio/api/Oss.cpp'
|
||||
])
|
||||
# Linux Alsa API
|
||||
#myModule.add_export_flag_CC(['-D__LINUX_ALSA__'])
|
||||
#myModule.add_export_flag_LD("-lasound")
|
||||
# Linux Jack API
|
||||
#myModule.add_export_flag_CC(['-D__UNIX_JACK__'])
|
||||
#myModule.add_export_flag_LD("-ljack")
|
||||
# Linux PulseAudio API
|
||||
myModule.add_export_flag_CC(['-D__LINUX_PULSE__'])
|
||||
myModule.add_export_flag_LD("-lpulse-simple")
|
||||
myModule.add_export_flag_LD("-lpulse")
|
||||
#myModule.add_export_flag_CC(['-D__LINUX_OSS__'])
|
||||
# ...
|
||||
myModule.add_module_depend(['etk'])
|
||||
elif target.name=="MacOs":
|
||||
myModule.add_src_file([
|
||||
'airtaudio/api/Core.cpp',
|
||||
'airtaudio/api/Oss.cpp'
|
||||
])
|
||||
# MacOsX core
|
||||
myModule.add_export_flag_CC(['-D__MACOSX_CORE__'])
|
||||
myModule.add_export_flag_LD("-framework CoreAudio")
|
||||
myModule.add_module_depend(['etk'])
|
||||
elif target.name=="IOs":
|
||||
myModule.add_src_file('airtaudio/api/CoreIos.mm')
|
||||
# IOsX core
|
||||
myModule.add_export_flag_CC(['-D__IOS_CORE__'])
|
||||
myModule.add_export_flag_LD("-framework CoreAudio")
|
||||
myModule.add_export_flag_LD("-framework AudioToolbox")
|
||||
myModule.add_module_depend(['etk'])
|
||||
elif target.name=="Android":
|
||||
myModule.add_src_file('airtaudio/api/Android.cpp')
|
||||
# MacOsX core
|
||||
myModule.add_export_flag_CC(['-D__ANDROID_JAVA__'])
|
||||
myModule.add_module_depend(['ewol'])
|
||||
else:
|
||||
debug.warning("unknow target for AIRTAudio : " + target.name);
|
||||
|
||||
myModule.add_export_path(tools.get_current_path(__file__))
|
||||
|
||||
|
||||
|
||||
# add the currrent module at the
|
||||
return myModule
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
77
lutin_audio_orchestra.py
Normal file
77
lutin_audio_orchestra.py
Normal file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/python
|
||||
import lutin.module as module
|
||||
import lutin.tools as tools
|
||||
import lutin.debug as debug
|
||||
|
||||
def get_desc():
|
||||
return "audio_orchestra : Generic wrapper on all audio interface"
|
||||
|
||||
|
||||
def create(target):
|
||||
myModule = module.Module(__file__, 'audio_orchestra', 'LIBRARY')
|
||||
|
||||
myModule.add_src_file([
|
||||
'audio/orchestra/debug.cpp',
|
||||
'audio/orchestra/status.cpp',
|
||||
'audio/orchestra/type.cpp',
|
||||
'audio/orchestra/mode.cpp',
|
||||
'audio/orchestra/state.cpp',
|
||||
'audio/orchestra/error.cpp',
|
||||
'audio/orchestra/base.cpp',
|
||||
'audio/orchestra/Interface.cpp',
|
||||
'audio/orchestra/Flags.cpp',
|
||||
'audio/orchestra/Api.cpp',
|
||||
'audio/orchestra/DeviceInfo.cpp',
|
||||
'audio/orchestra/StreamOptions.cpp',
|
||||
'audio/orchestra/api/Dummy.cpp'
|
||||
])
|
||||
myModule.add_module_depend(['audio', 'etk'])
|
||||
# add all the time the dummy interface
|
||||
myModule.add_export_flag('c++', ['-DORCHESTRA_BUILD_DUMMY'])
|
||||
# TODO : Add a FILE interface:
|
||||
|
||||
if target.name=="Windows":
|
||||
myModule.add_src_file([
|
||||
'audio/orchestra/api/Asio.cpp',
|
||||
'audio/orchestra/api/Ds.cpp',
|
||||
])
|
||||
# load optionnal API:
|
||||
myModule.add_optionnal_module_depend('asio', ["c++", "-DORCHESTRA_BUILD_ASIO"])
|
||||
myModule.add_optionnal_module_depend('ds', ["c++", "-DORCHESTRA_BUILD_DS"])
|
||||
myModule.add_optionnal_module_depend('wasapi', ["c++", "-DORCHESTRA_BUILD_WASAPI"])
|
||||
elif target.name=="Linux":
|
||||
myModule.add_src_file([
|
||||
'audio/orchestra/api/Alsa.cpp',
|
||||
'audio/orchestra/api/Jack.cpp',
|
||||
'audio/orchestra/api/Pulse.cpp',
|
||||
'audio/orchestra/api/Oss.cpp'
|
||||
])
|
||||
myModule.add_optionnal_module_depend('alsa', ["c++", "-DORCHESTRA_BUILD_ALSA"])
|
||||
myModule.add_optionnal_module_depend('jack', ["c++", "-DORCHESTRA_BUILD_JACK"])
|
||||
myModule.add_optionnal_module_depend('pulse', ["c++", "-DORCHESTRA_BUILD_PULSE"])
|
||||
myModule.add_optionnal_module_depend('oss', ["c++", "-DORCHESTRA_BUILD_OSS"])
|
||||
elif target.name=="MacOs":
|
||||
myModule.add_src_file([
|
||||
'audio/orchestra/api/Core.cpp',
|
||||
'audio/orchestra/api/Oss.cpp'
|
||||
])
|
||||
# MacOsX core
|
||||
myModule.add_optionnal_module_depend('CoreAudio', ["c++", "-DORCHESTRA_BUILD_MACOSX_CORE"])
|
||||
elif target.name=="IOs":
|
||||
myModule.add_src_file('audio/orchestra/api/CoreIos.mm')
|
||||
# IOsX core
|
||||
myModule.add_optionnal_module_depend('CoreAudio', ["c++", "-DORCHESTRA_BUILD_IOS_CORE"])
|
||||
elif target.name=="Android":
|
||||
myModule.add_src_file('audio/orchestra/api/Android.cpp')
|
||||
# specidic java interface for android:
|
||||
myModule.add_optionnal_module_depend('ewolAndroidAudio', ["c++", "-DORCHESTRA_BUILD_JAVA"])
|
||||
#myModule.add_module_depend(['ewol'])
|
||||
else:
|
||||
debug.warning("unknow target for audio_orchestra : " + target.name);
|
||||
|
||||
myModule.add_export_path(tools.get_current_path(__file__))
|
||||
|
||||
# add the currrent module at the
|
||||
return myModule
|
||||
|
||||
|
Reference in New Issue
Block a user