[DEV] rework many APIs
This commit is contained in:
parent
7c6a495d86
commit
028279a74f
@ -34,29 +34,73 @@ static const char* listType[] {
|
|||||||
"user3",
|
"user3",
|
||||||
"user4"
|
"user4"
|
||||||
};
|
};
|
||||||
|
static int32_t listTypeSize = sizeof(listType)/sizeof(char*);
|
||||||
|
|
||||||
std::ostream& operator <<(std::ostream& _os, const airtaudio::type& _obj){
|
|
||||||
|
std::ostream& airtaudio::operator <<(std::ostream& _os, const enum airtaudio::type& _obj) {
|
||||||
_os << listType[_obj];
|
_os << listType[_obj];
|
||||||
return _os;
|
return _os;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::ostream& airtaudio::operator <<(std::ostream& _os, const std::vector<enum airtaudio::type>& _obj) {
|
||||||
|
_os << std::string("{");
|
||||||
|
for (size_t iii=0; iii<_obj.size(); ++iii) {
|
||||||
|
if (iii!=0) {
|
||||||
|
_os << std::string(";");
|
||||||
|
}
|
||||||
|
_os << _obj[iii];
|
||||||
|
}
|
||||||
|
_os << std::string("}");
|
||||||
|
return _os;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string airtaudio::getTypeString(enum audio::format _value) {
|
||||||
|
return listType[_value];
|
||||||
|
}
|
||||||
|
|
||||||
|
enum airtaudio::type airtaudio::getTypeFromString(const std::string& _value) {
|
||||||
|
for (int32_t iii=0; iii<listTypeSize; ++iii) {
|
||||||
|
if (_value == listType[iii]) {
|
||||||
|
return static_cast<enum airtaudio::type>(iii);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return airtaudio::type_undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t airtaudio::modeToIdTable(enum mode _mode) {
|
||||||
|
switch (_mode) {
|
||||||
|
case mode_unknow:
|
||||||
|
case mode_duplex:
|
||||||
|
case mode_output:
|
||||||
|
return 0;
|
||||||
|
case mode_input:
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
// Static variable definitions.
|
// Static variable definitions.
|
||||||
static const uint32_t MAX_SAMPLE_RATES = 14;
|
const std::vector<uint32_t>& airtaudio::genericSampleRate() {
|
||||||
static const uint32_t SAMPLE_RATES[] = {
|
static std::vector<uint32_t> list;
|
||||||
4000,
|
if (list.size() == 0) {
|
||||||
5512,
|
list.push_back(4000);
|
||||||
8000,
|
list.push_back(5512);
|
||||||
9600,
|
list.push_back(8000);
|
||||||
11025,
|
list.push_back(9600);
|
||||||
16000,
|
list.push_back(11025);
|
||||||
22050,
|
list.push_back(16000);
|
||||||
32000,
|
list.push_back(22050);
|
||||||
44100,
|
list.push_back(32000);
|
||||||
48000,
|
list.push_back(44100);
|
||||||
88200,
|
list.push_back(48000);
|
||||||
96000,
|
list.push_back(64000);
|
||||||
176400,
|
list.push_back(88200);
|
||||||
192000
|
list.push_back(96000);
|
||||||
|
list.push_back(128000);
|
||||||
|
list.push_back(176400);
|
||||||
|
list.push_back(192000);
|
||||||
|
}
|
||||||
|
return list;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -72,32 +116,32 @@ airtaudio::Api::~Api() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters *oParams,
|
enum airtaudio::error airtaudio::Api::openStream(airtaudio::StreamParameters *oParams,
|
||||||
airtaudio::StreamParameters *iParams,
|
airtaudio::StreamParameters *iParams,
|
||||||
enum audio::format format,
|
enum audio::format format,
|
||||||
uint32_t sampleRate,
|
uint32_t sampleRate,
|
||||||
uint32_t *bufferFrames,
|
uint32_t *bufferFrames,
|
||||||
airtaudio::AirTAudioCallback callback,
|
airtaudio::AirTAudioCallback callback,
|
||||||
airtaudio::StreamOptions *options) {
|
airtaudio::StreamOptions *options) {
|
||||||
if (m_stream.state != airtaudio::api::STREAM_CLOSED) {
|
if (m_stream.state != airtaudio::state_closed) {
|
||||||
ATA_ERROR("a stream is already open!");
|
ATA_ERROR("a stream is already open!");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
if (oParams && oParams->nChannels < 1) {
|
if (oParams && oParams->nChannels < 1) {
|
||||||
ATA_ERROR("a non-nullptr output StreamParameters structure cannot have an nChannels value less than one.");
|
ATA_ERROR("a non-nullptr output StreamParameters structure cannot have an nChannels value less than one.");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
if (iParams && iParams->nChannels < 1) {
|
if (iParams && iParams->nChannels < 1) {
|
||||||
ATA_ERROR("a non-nullptr input StreamParameters structure cannot have an nChannels value less than one.");
|
ATA_ERROR("a non-nullptr input StreamParameters structure cannot have an nChannels value less than one.");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
if (oParams == nullptr && iParams == nullptr) {
|
if (oParams == nullptr && iParams == nullptr) {
|
||||||
ATA_ERROR("input and output StreamParameters structures are both nullptr!");
|
ATA_ERROR("input and output StreamParameters structures are both nullptr!");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
if (audio::getFormatBytes(format) == 0) {
|
if (audio::getFormatBytes(format) == 0) {
|
||||||
ATA_ERROR("'format' parameter value is undefined.");
|
ATA_ERROR("'format' parameter value is undefined.");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
uint32_t nDevices = getDeviceCount();
|
uint32_t nDevices = getDeviceCount();
|
||||||
uint32_t oChannels = 0;
|
uint32_t oChannels = 0;
|
||||||
@ -105,7 +149,7 @@ enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters
|
|||||||
oChannels = oParams->nChannels;
|
oChannels = oParams->nChannels;
|
||||||
if (oParams->deviceId >= nDevices) {
|
if (oParams->deviceId >= nDevices) {
|
||||||
ATA_ERROR("output device parameter value is invalid.");
|
ATA_ERROR("output device parameter value is invalid.");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
uint32_t iChannels = 0;
|
uint32_t iChannels = 0;
|
||||||
@ -113,14 +157,14 @@ enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters
|
|||||||
iChannels = iParams->nChannels;
|
iChannels = iParams->nChannels;
|
||||||
if (iParams->deviceId >= nDevices) {
|
if (iParams->deviceId >= nDevices) {
|
||||||
ATA_ERROR("input device parameter value is invalid.");
|
ATA_ERROR("input device parameter value is invalid.");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
clearStreamInfo();
|
clearStreamInfo();
|
||||||
bool result;
|
bool result;
|
||||||
if (oChannels > 0) {
|
if (oChannels > 0) {
|
||||||
result = probeDeviceOpen(oParams->deviceId,
|
result = probeDeviceOpen(oParams->deviceId,
|
||||||
airtaudio::api::OUTPUT,
|
airtaudio::mode_output,
|
||||||
oChannels,
|
oChannels,
|
||||||
oParams->firstChannel,
|
oParams->firstChannel,
|
||||||
sampleRate,
|
sampleRate,
|
||||||
@ -129,12 +173,12 @@ enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters
|
|||||||
options);
|
options);
|
||||||
if (result == false) {
|
if (result == false) {
|
||||||
ATA_ERROR("system ERROR");
|
ATA_ERROR("system ERROR");
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (iChannels > 0) {
|
if (iChannels > 0) {
|
||||||
result = probeDeviceOpen(iParams->deviceId,
|
result = probeDeviceOpen(iParams->deviceId,
|
||||||
airtaudio::api::INPUT,
|
airtaudio::mode_input,
|
||||||
iChannels,
|
iChannels,
|
||||||
iParams->firstChannel,
|
iParams->firstChannel,
|
||||||
sampleRate,
|
sampleRate,
|
||||||
@ -146,15 +190,15 @@ enum airtaudio::errorType airtaudio::Api::openStream(airtaudio::StreamParameters
|
|||||||
closeStream();
|
closeStream();
|
||||||
}
|
}
|
||||||
ATA_ERROR("system error");
|
ATA_ERROR("system error");
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.callbackInfo.callback = callback;
|
m_stream.callbackInfo.callback = callback;
|
||||||
if (options != nullptr) {
|
if (options != nullptr) {
|
||||||
options->numberOfBuffers = m_stream.nBuffers;
|
options->numberOfBuffers = m_stream.nBuffers;
|
||||||
}
|
}
|
||||||
m_stream.state = airtaudio::api::STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t airtaudio::Api::getDefaultInputDevice() {
|
uint32_t airtaudio::Api::getDefaultInputDevice() {
|
||||||
@ -167,13 +211,13 @@ uint32_t airtaudio::Api::getDefaultOutputDevice() {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::Api::closeStream() {
|
enum airtaudio::error airtaudio::Api::closeStream() {
|
||||||
// MUST be implemented in subclasses!
|
// MUST be implemented in subclasses!
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool airtaudio::Api::probeDeviceOpen(uint32_t /*device*/,
|
bool airtaudio::Api::probeDeviceOpen(uint32_t /*device*/,
|
||||||
airtaudio::api::StreamMode /*mode*/,
|
airtaudio::mode /*mode*/,
|
||||||
uint32_t /*channels*/,
|
uint32_t /*channels*/,
|
||||||
uint32_t /*firstChannel*/,
|
uint32_t /*firstChannel*/,
|
||||||
uint32_t /*sampleRate*/,
|
uint32_t /*sampleRate*/,
|
||||||
@ -195,23 +239,23 @@ void airtaudio::Api::tickStreamTime() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
long airtaudio::Api::getStreamLatency() {
|
long airtaudio::Api::getStreamLatency() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
long totalLatency = 0;
|
long totalLatency = 0;
|
||||||
if ( m_stream.mode == airtaudio::api::OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == airtaudio::api::DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
totalLatency = m_stream.latency[0];
|
totalLatency = m_stream.latency[0];
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == airtaudio::api::INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == airtaudio::api::DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
totalLatency += m_stream.latency[1];
|
totalLatency += m_stream.latency[1];
|
||||||
}
|
}
|
||||||
return totalLatency;
|
return totalLatency;
|
||||||
}
|
}
|
||||||
|
|
||||||
double airtaudio::Api::getStreamTime() {
|
double airtaudio::Api::getStreamTime() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return 0.0f;
|
return 0.0f;
|
||||||
}
|
}
|
||||||
#if defined(HAVE_GETTIMEOFDAY)
|
#if defined(HAVE_GETTIMEOFDAY)
|
||||||
@ -219,7 +263,7 @@ double airtaudio::Api::getStreamTime() {
|
|||||||
// adding in the elapsed time since the last tick.
|
// adding in the elapsed time since the last tick.
|
||||||
struct timeval then;
|
struct timeval then;
|
||||||
struct timeval now;
|
struct timeval now;
|
||||||
if (m_stream.state != airtaudio::api::STREAM_RUNNING || m_stream.streamTime == 0.0) {
|
if (m_stream.state != airtaudio::state_running || m_stream.streamTime == 0.0) {
|
||||||
return m_stream.streamTime;
|
return m_stream.streamTime;
|
||||||
}
|
}
|
||||||
gettimeofday(&now, nullptr);
|
gettimeofday(&now, nullptr);
|
||||||
@ -233,28 +277,27 @@ double airtaudio::Api::getStreamTime() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint32_t airtaudio::Api::getStreamSampleRate() {
|
uint32_t airtaudio::Api::getStreamSampleRate() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return m_stream.sampleRate;
|
return m_stream.sampleRate;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::Api::verifyStream() {
|
enum airtaudio::error airtaudio::Api::verifyStream() {
|
||||||
if (m_stream.state == airtaudio::api::STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("a stream is not open!");
|
ATA_ERROR("a stream is not open!");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
void airtaudio::Api::clearStreamInfo() {
|
void airtaudio::Api::clearStreamInfo() {
|
||||||
m_stream.mode = airtaudio::api::UNINITIALIZED;
|
m_stream.mode = airtaudio::mode_unknow;
|
||||||
m_stream.state = airtaudio::api::STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
m_stream.sampleRate = 0;
|
m_stream.sampleRate = 0;
|
||||||
m_stream.bufferSize = 0;
|
m_stream.bufferSize = 0;
|
||||||
m_stream.nBuffers = 0;
|
m_stream.nBuffers = 0;
|
||||||
m_stream.userFormat = audio::format_unknow;
|
m_stream.userFormat = audio::format_unknow;
|
||||||
m_stream.userInterleaved = true;
|
|
||||||
m_stream.streamTime = 0.0;
|
m_stream.streamTime = 0.0;
|
||||||
m_stream.apiHandle = 0;
|
m_stream.apiHandle = 0;
|
||||||
m_stream.deviceBuffer = 0;
|
m_stream.deviceBuffer = 0;
|
||||||
@ -281,91 +324,80 @@ void airtaudio::Api::clearStreamInfo() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void airtaudio::Api::setConvertInfo(airtaudio::api::StreamMode _mode, uint32_t _firstChannel) {
|
void airtaudio::Api::setConvertInfo(airtaudio::mode _mode, uint32_t _firstChannel) {
|
||||||
if (_mode == airtaudio::api::INPUT) { // convert device to user buffer
|
int32_t idTable = airtaudio::modeToIdTable(_mode);
|
||||||
m_stream.convertInfo[_mode].inJump = m_stream.nDeviceChannels[1];
|
if (_mode == airtaudio::mode_input) { // convert device to user buffer
|
||||||
m_stream.convertInfo[_mode].outJump = m_stream.nUserChannels[1];
|
m_stream.convertInfo[idTable].inJump = m_stream.nDeviceChannels[1];
|
||||||
m_stream.convertInfo[_mode].inFormat = m_stream.deviceFormat[1];
|
m_stream.convertInfo[idTable].outJump = m_stream.nUserChannels[1];
|
||||||
m_stream.convertInfo[_mode].outFormat = m_stream.userFormat;
|
m_stream.convertInfo[idTable].inFormat = m_stream.deviceFormat[1];
|
||||||
|
m_stream.convertInfo[idTable].outFormat = m_stream.userFormat;
|
||||||
} else { // convert user to device buffer
|
} else { // convert user to device buffer
|
||||||
m_stream.convertInfo[_mode].inJump = m_stream.nUserChannels[0];
|
m_stream.convertInfo[idTable].inJump = m_stream.nUserChannels[0];
|
||||||
m_stream.convertInfo[_mode].outJump = m_stream.nDeviceChannels[0];
|
m_stream.convertInfo[idTable].outJump = m_stream.nDeviceChannels[0];
|
||||||
m_stream.convertInfo[_mode].inFormat = m_stream.userFormat;
|
m_stream.convertInfo[idTable].inFormat = m_stream.userFormat;
|
||||||
m_stream.convertInfo[_mode].outFormat = m_stream.deviceFormat[0];
|
m_stream.convertInfo[idTable].outFormat = m_stream.deviceFormat[0];
|
||||||
}
|
}
|
||||||
if (m_stream.convertInfo[_mode].inJump < m_stream.convertInfo[_mode].outJump) {
|
if (m_stream.convertInfo[idTable].inJump < m_stream.convertInfo[idTable].outJump) {
|
||||||
m_stream.convertInfo[_mode].channels = m_stream.convertInfo[_mode].inJump;
|
m_stream.convertInfo[idTable].channels = m_stream.convertInfo[idTable].inJump;
|
||||||
} else {
|
} else {
|
||||||
m_stream.convertInfo[_mode].channels = m_stream.convertInfo[_mode].outJump;
|
m_stream.convertInfo[idTable].channels = m_stream.convertInfo[idTable].outJump;
|
||||||
}
|
}
|
||||||
// Set up the interleave/deinterleave offsets.
|
// Set up the interleave/deinterleave offsets.
|
||||||
if (m_stream.deviceInterleaved[_mode] != m_stream.userInterleaved) {
|
if (m_stream.deviceInterleaved[idTable] == false) {
|
||||||
if ( ( _mode == airtaudio::api::OUTPUT
|
if (_mode == airtaudio::mode_input) {
|
||||||
&& m_stream.deviceInterleaved[_mode])
|
for (int32_t kkk=0; kkk<m_stream.convertInfo[idTable].channels; ++kkk) {
|
||||||
|| ( _mode == airtaudio::api::INPUT
|
m_stream.convertInfo[idTable].inOffset.push_back(kkk * m_stream.bufferSize);
|
||||||
&& m_stream.userInterleaved)) {
|
m_stream.convertInfo[idTable].outOffset.push_back(kkk);
|
||||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
m_stream.convertInfo[idTable].inJump = 1;
|
||||||
m_stream.convertInfo[_mode].inOffset.push_back(kkk * m_stream.bufferSize);
|
|
||||||
m_stream.convertInfo[_mode].outOffset.push_back(kkk);
|
|
||||||
m_stream.convertInfo[_mode].inJump = 1;
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
for (int32_t kkk=0; kkk<m_stream.convertInfo[idTable].channels; ++kkk) {
|
||||||
m_stream.convertInfo[_mode].inOffset.push_back(kkk);
|
m_stream.convertInfo[idTable].inOffset.push_back(kkk);
|
||||||
m_stream.convertInfo[_mode].outOffset.push_back(kkk * m_stream.bufferSize);
|
m_stream.convertInfo[idTable].outOffset.push_back(kkk * m_stream.bufferSize);
|
||||||
m_stream.convertInfo[_mode].outJump = 1;
|
m_stream.convertInfo[idTable].outJump = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else { // no (de)interleaving
|
} else { // no (de)interleaving
|
||||||
if (m_stream.userInterleaved) {
|
for (int32_t kkk=0; kkk<m_stream.convertInfo[idTable].channels; ++kkk) {
|
||||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
m_stream.convertInfo[idTable].inOffset.push_back(kkk);
|
||||||
m_stream.convertInfo[_mode].inOffset.push_back(kkk);
|
m_stream.convertInfo[idTable].outOffset.push_back(kkk);
|
||||||
m_stream.convertInfo[_mode].outOffset.push_back(kkk);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
|
||||||
m_stream.convertInfo[_mode].inOffset.push_back(kkk * m_stream.bufferSize);
|
|
||||||
m_stream.convertInfo[_mode].outOffset.push_back(kkk * m_stream.bufferSize);
|
|
||||||
m_stream.convertInfo[_mode].inJump = 1;
|
|
||||||
m_stream.convertInfo[_mode].outJump = 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add channel offset.
|
// Add channel offset.
|
||||||
if (_firstChannel > 0) {
|
if (_firstChannel > 0) {
|
||||||
if (m_stream.deviceInterleaved[_mode]) {
|
if (m_stream.deviceInterleaved[idTable]) {
|
||||||
if (_mode == airtaudio::api::OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
for (int32_t kkk=0; kkk<m_stream.convertInfo[idTable].channels; ++kkk) {
|
||||||
m_stream.convertInfo[_mode].outOffset[kkk] += _firstChannel;
|
m_stream.convertInfo[idTable].outOffset[kkk] += _firstChannel;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
for (int32_t kkk=0; kkk<m_stream.convertInfo[idTable].channels; ++kkk) {
|
||||||
m_stream.convertInfo[_mode].inOffset[kkk] += _firstChannel;
|
m_stream.convertInfo[idTable].inOffset[kkk] += _firstChannel;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (_mode == airtaudio::api::OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
for (int32_t kkk=0; kkk<m_stream.convertInfo[idTable].channels; ++kkk) {
|
||||||
m_stream.convertInfo[_mode].outOffset[kkk] += (_firstChannel * m_stream.bufferSize);
|
m_stream.convertInfo[idTable].outOffset[kkk] += (_firstChannel * m_stream.bufferSize);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (int32_t kkk=0; kkk<m_stream.convertInfo[_mode].channels; ++kkk) {
|
for (int32_t kkk=0; kkk<m_stream.convertInfo[idTable].channels; ++kkk) {
|
||||||
m_stream.convertInfo[_mode].inOffset[kkk] += (_firstChannel * m_stream.bufferSize);
|
m_stream.convertInfo[idTable].inOffset[kkk] += (_firstChannel * m_stream.bufferSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void airtaudio::Api::convertBuffer(char *_outBuffer, char *_inBuffer, airtaudio::api::ConvertInfo &_info) {
|
void airtaudio::Api::convertBuffer(char *_outBuffer, char *_inBuffer, airtaudio::ConvertInfo &_info) {
|
||||||
// This function does format conversion, input/output channel compensation, and
|
// This function does format conversion, input/output channel compensation, and
|
||||||
// data interleaving/deinterleaving. 24-bit integers are assumed to occupy
|
// data interleaving/deinterleaving. 24-bit integers are assumed to occupy
|
||||||
// the lower three bytes of a 32-bit integer.
|
// the lower three bytes of a 32-bit integer.
|
||||||
|
|
||||||
// Clear our device buffer when in/out duplex device channels are different
|
// Clear our device buffer when in/out duplex device channels are different
|
||||||
if ( _outBuffer == m_stream.deviceBuffer
|
if ( _outBuffer == m_stream.deviceBuffer
|
||||||
&& m_stream.mode == airtaudio::api::DUPLEX
|
&& m_stream.mode == airtaudio::mode_duplex
|
||||||
&& m_stream.nDeviceChannels[0] < m_stream.nDeviceChannels[1]) {
|
&& m_stream.nDeviceChannels[0] < m_stream.nDeviceChannels[1]) {
|
||||||
memset(_outBuffer, 0, m_stream.bufferSize * _info.outJump * audio::getFormatBytes(_info.outFormat));
|
memset(_outBuffer, 0, m_stream.bufferSize * _info.outJump * audio::getFormatBytes(_info.outFormat));
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <airtaudio/debug.h>
|
#include <airtaudio/debug.h>
|
||||||
|
|
||||||
namespace airtaudio {
|
namespace airtaudio {
|
||||||
|
const std::vector<uint32_t>& genericSampleRate();
|
||||||
/**
|
/**
|
||||||
* @brief Audio API specifier arguments.
|
* @brief Audio API specifier arguments.
|
||||||
*/
|
*/
|
||||||
@ -33,6 +34,11 @@ namespace airtaudio {
|
|||||||
type_user3, //!< User interface 3.
|
type_user3, //!< User interface 3.
|
||||||
type_user4, //!< User interface 4.
|
type_user4, //!< User interface 4.
|
||||||
};
|
};
|
||||||
|
std::ostream& operator <<(std::ostream& _os, const enum airtaudio::type& _obj);
|
||||||
|
std::ostream& operator <<(std::ostream& _os, const std::vector<enum airtaudio::type>& _obj);
|
||||||
|
std::string getTypeString(enum audio::format _value);
|
||||||
|
enum airtaudio::type getTypeFromString(const std::string& _value);
|
||||||
|
|
||||||
enum state {
|
enum state {
|
||||||
state_closed,
|
state_closed,
|
||||||
state_stopped,
|
state_stopped,
|
||||||
@ -45,6 +51,7 @@ namespace airtaudio {
|
|||||||
mode_input,
|
mode_input,
|
||||||
mode_duplex
|
mode_duplex
|
||||||
};
|
};
|
||||||
|
int32_t modeToIdTable(enum mode _mode);
|
||||||
// A protected structure used for buffer conversion.
|
// A protected structure used for buffer conversion.
|
||||||
class ConvertInfo {
|
class ConvertInfo {
|
||||||
public:
|
public:
|
||||||
@ -63,12 +70,11 @@ namespace airtaudio {
|
|||||||
public:
|
public:
|
||||||
uint32_t device[2]; // Playback and record, respectively.
|
uint32_t device[2]; // Playback and record, respectively.
|
||||||
void *apiHandle; // void pointer for API specific stream handle information
|
void *apiHandle; // void pointer for API specific stream handle information
|
||||||
enum airtaudio::mode mode; // OUTPUT, INPUT, or DUPLEX.
|
enum airtaudio::mode mode; // airtaudio::mode_output, airtaudio::mode_input, or airtaudio::mode_duplex.
|
||||||
enum airtaudio::state state; // STOPPED, RUNNING, or CLOSED
|
enum airtaudio::state state; // STOPPED, RUNNING, or CLOSED
|
||||||
char *userBuffer[2]; // Playback and record, respectively.
|
char *userBuffer[2]; // Playback and record, respectively.
|
||||||
char *deviceBuffer;
|
char *deviceBuffer;
|
||||||
bool doConvertBuffer[2]; // Playback and record, respectively.
|
bool doConvertBuffer[2]; // Playback and record, respectively.
|
||||||
bool userInterleaved;
|
|
||||||
bool deviceInterleaved[2]; // Playback and record, respectively.
|
bool deviceInterleaved[2]; // Playback and record, respectively.
|
||||||
bool doByteSwap[2]; // Playback and record, respectively.
|
bool doByteSwap[2]; // Playback and record, respectively.
|
||||||
uint32_t sampleRate;
|
uint32_t sampleRate;
|
||||||
@ -106,17 +112,17 @@ namespace airtaudio {
|
|||||||
virtual airtaudio::DeviceInfo getDeviceInfo(uint32_t _device) = 0;
|
virtual airtaudio::DeviceInfo getDeviceInfo(uint32_t _device) = 0;
|
||||||
virtual uint32_t getDefaultInputDevice();
|
virtual uint32_t getDefaultInputDevice();
|
||||||
virtual uint32_t getDefaultOutputDevice();
|
virtual uint32_t getDefaultOutputDevice();
|
||||||
enum airtaudio::errorType openStream(airtaudio::StreamParameters *_outputParameters,
|
enum airtaudio::error openStream(airtaudio::StreamParameters *_outputParameters,
|
||||||
airtaudio::StreamParameters *_inputParameters,
|
airtaudio::StreamParameters *_inputParameters,
|
||||||
audio::format _format,
|
audio::format _format,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
uint32_t *_bufferFrames,
|
uint32_t *_bufferFrames,
|
||||||
airtaudio::AirTAudioCallback _callback,
|
airtaudio::AirTAudioCallback _callback,
|
||||||
airtaudio::StreamOptions *_options);
|
airtaudio::StreamOptions *_options);
|
||||||
virtual enum airtaudio::errorType closeStream();
|
virtual enum airtaudio::error closeStream();
|
||||||
virtual enum airtaudio::errorType startStream() = 0;
|
virtual enum airtaudio::error startStream() = 0;
|
||||||
virtual enum airtaudio::errorType stopStream() = 0;
|
virtual enum airtaudio::error stopStream() = 0;
|
||||||
virtual enum airtaudio::errorType abortStream() = 0;
|
virtual enum airtaudio::error abortStream() = 0;
|
||||||
long getStreamLatency();
|
long getStreamLatency();
|
||||||
uint32_t getStreamSampleRate();
|
uint32_t getStreamSampleRate();
|
||||||
virtual double getStreamTime();
|
virtual double getStreamTime();
|
||||||
@ -156,7 +162,7 @@ namespace airtaudio {
|
|||||||
Protected common method that throws an RtError (type =
|
Protected common method that throws an RtError (type =
|
||||||
INVALID_USE) if a stream is not open.
|
INVALID_USE) if a stream is not open.
|
||||||
*/
|
*/
|
||||||
enum airtaudio::errorType verifyStream();
|
enum airtaudio::error verifyStream();
|
||||||
/**
|
/**
|
||||||
* @brief Protected method used to perform format, channel number, and/or interleaving
|
* @brief Protected method used to perform format, channel number, and/or interleaving
|
||||||
* conversions between the user and device buffers.
|
* conversions between the user and device buffers.
|
||||||
|
@ -14,8 +14,8 @@
|
|||||||
#undef __class__
|
#undef __class__
|
||||||
#define __class__ "Interface"
|
#define __class__ "Interface"
|
||||||
|
|
||||||
std::vector<airtaudio::api::type> airtaudio::Interface::getCompiledApi() {
|
std::vector<enum airtaudio::type> airtaudio::Interface::getCompiledApi() {
|
||||||
std::vector<airtaudio::api::type> apis;
|
std::vector<enum airtaudio::type> apis;
|
||||||
// The order here will control the order of RtAudio's API search in
|
// The order here will control the order of RtAudio's API search in
|
||||||
// the constructor.
|
// the constructor.
|
||||||
for (auto &it : m_apiAvaillable) {
|
for (auto &it : m_apiAvaillable) {
|
||||||
@ -26,7 +26,7 @@ std::vector<airtaudio::api::type> airtaudio::Interface::getCompiledApi() {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
void airtaudio::Interface::openRtApi(airtaudio::api::type _api) {
|
void airtaudio::Interface::openRtApi(enum airtaudio::type _api) {
|
||||||
delete m_rtapi;
|
delete m_rtapi;
|
||||||
m_rtapi = nullptr;
|
m_rtapi = nullptr;
|
||||||
for (auto &it :m_apiAvaillable) {
|
for (auto &it :m_apiAvaillable) {
|
||||||
@ -49,57 +49,57 @@ airtaudio::Interface::Interface() :
|
|||||||
ATA_DEBUG("Add interface:");
|
ATA_DEBUG("Add interface:");
|
||||||
#if defined(__UNIX_JACK__)
|
#if defined(__UNIX_JACK__)
|
||||||
ATA_DEBUG(" JACK");
|
ATA_DEBUG(" JACK");
|
||||||
addInterface(airtaudio::api::UNIX_JACK, airtaudio::api::Jack::Create);
|
addInterface(airtaudio::type_jack, airtaudio::api::Jack::Create);
|
||||||
#endif
|
#endif
|
||||||
#if defined(__LINUX_ALSA__)
|
#if defined(__LINUX_ALSA__)
|
||||||
ATA_DEBUG(" ALSA");
|
ATA_DEBUG(" ALSA");
|
||||||
addInterface(airtaudio::api::LINUX_ALSA, airtaudio::api::Alsa::Create);
|
addInterface(airtaudio::type_alsa, airtaudio::api::Alsa::Create);
|
||||||
#endif
|
#endif
|
||||||
#if defined(__LINUX_PULSE__)
|
#if defined(__LINUX_PULSE__)
|
||||||
ATA_DEBUG(" PULSE");
|
ATA_DEBUG(" PULSE");
|
||||||
addInterface(airtaudio::api::LINUX_PULSE, airtaudio::api::Pulse::Create);
|
addInterface(airtaudio::type_pulse, airtaudio::api::Pulse::Create);
|
||||||
#endif
|
#endif
|
||||||
#if defined(__LINUX_OSS__)
|
#if defined(__LINUX_OSS__)
|
||||||
ATA_DEBUG(" OSS");
|
ATA_DEBUG(" OSS");
|
||||||
addInterface(airtaudio::api::LINUX_OSS, airtaudio::api::Oss::Create);
|
addInterface(airtaudio::type_oss, airtaudio::api::Oss::Create);
|
||||||
#endif
|
#endif
|
||||||
#if defined(__WINDOWS_ASIO__)
|
#if defined(__WINDOWS_ASIO__)
|
||||||
ATA_DEBUG(" ASIO");
|
ATA_DEBUG(" ASIO");
|
||||||
addInterface(airtaudio::api::WINDOWS_ASIO, airtaudio::api::Asio::Create);
|
addInterface(airtaudio::type_asio, airtaudio::api::Asio::Create);
|
||||||
#endif
|
#endif
|
||||||
#if defined(__WINDOWS_DS__)
|
#if defined(__WINDOWS_DS__)
|
||||||
ATA_DEBUG(" DS");
|
ATA_DEBUG(" DS");
|
||||||
addInterface(airtaudio::api::WINDOWS_DS, airtaudio::api::Ds::Create);
|
addInterface(airtaudio::type_ds, airtaudio::api::Ds::Create);
|
||||||
#endif
|
#endif
|
||||||
#if defined(__MACOSX_CORE__)
|
#if defined(__MACOSX_CORE__)
|
||||||
ATA_DEBUG(" MACOSX_CORE");
|
ATA_DEBUG(" CORE OSX");
|
||||||
addInterface(airtaudio::api::MACOSX_CORE, airtaudio::api::Core::Create);
|
addInterface(airtaudio::type_coreOSX, airtaudio::api::Core::Create);
|
||||||
#endif
|
#endif
|
||||||
#if defined(__IOS_CORE__)
|
#if defined(__IOS_CORE__)
|
||||||
ATA_DEBUG(" IOS_CORE");
|
ATA_DEBUG(" CORE IOS");
|
||||||
addInterface(airtaudio::api::IOS_CORE, airtaudio::api::CoreIos::Create);
|
addInterface(airtaudio::type_coreIOS, airtaudio::api::CoreIos::Create);
|
||||||
#endif
|
#endif
|
||||||
#if defined(__ANDROID_JAVA__)
|
#if defined(__ANDROID_JAVA__)
|
||||||
ATA_DEBUG(" JAVA");
|
ATA_DEBUG(" JAVA");
|
||||||
addInterface(airtaudio::api::ANDROID_JAVA, airtaudio::api::Android::Create);
|
addInterface(airtaudio::type_java, airtaudio::api::Android::Create);
|
||||||
#endif
|
#endif
|
||||||
#if defined(__AIRTAUDIO_DUMMY__)
|
#if defined(__AIRTAUDIO_DUMMY__)
|
||||||
ATA_DEBUG(" DUMMY");
|
ATA_DEBUG(" DUMMY");
|
||||||
addInterface(airtaudio::api::RTAUDIO_DUMMY, airtaudio::api::Dummy::Create);
|
addInterface(airtaudio::type_dummy, airtaudio::api::Dummy::Create);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void airtaudio::Interface::addInterface(airtaudio::api::type _api, Api* (*_callbackCreate)()) {
|
void airtaudio::Interface::addInterface(enum airtaudio::type _api, Api* (*_callbackCreate)()) {
|
||||||
m_apiAvaillable.push_back(std::pair<airtaudio::api::type, Api* (*)()>(_api, _callbackCreate));
|
m_apiAvaillable.push_back(std::pair<enum airtaudio::type, Api* (*)()>(_api, _callbackCreate));
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::Interface::instanciate(airtaudio::api::type _api) {
|
enum airtaudio::error airtaudio::Interface::instanciate(enum airtaudio::type _api) {
|
||||||
ATA_INFO("Instanciate API ...");
|
ATA_INFO("Instanciate API ...");
|
||||||
if (m_rtapi != nullptr) {
|
if (m_rtapi != nullptr) {
|
||||||
ATA_WARNING("Interface already started ...!");
|
ATA_WARNING("Interface already started ...!");
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
if (_api != airtaudio::api::UNSPECIFIED) {
|
if (_api != airtaudio::type_undefined) {
|
||||||
ATA_INFO("API specified : " << _api);
|
ATA_INFO("API specified : " << _api);
|
||||||
// Attempt to open the specified API.
|
// Attempt to open the specified API.
|
||||||
openRtApi(_api);
|
openRtApi(_api);
|
||||||
@ -107,17 +107,17 @@ enum airtaudio::errorType airtaudio::Interface::instanciate(airtaudio::api::type
|
|||||||
if (m_rtapi->getDeviceCount() != 0) {
|
if (m_rtapi->getDeviceCount() != 0) {
|
||||||
ATA_INFO(" ==> api open");
|
ATA_INFO(" ==> api open");
|
||||||
}
|
}
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
// No compiled support for specified API value. Issue a debug
|
// No compiled support for specified API value. Issue a debug
|
||||||
// warning and continue as if no API was specified.
|
// warning and continue as if no API was specified.
|
||||||
ATA_ERROR("RtAudio: no compiled support for specified API argument!");
|
ATA_ERROR("RtAudio: no compiled support for specified API argument!");
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
ATA_INFO("Auto choice API :");
|
ATA_INFO("Auto choice API :");
|
||||||
// Iterate through the compiled APIs and return as soon as we find
|
// Iterate through the compiled APIs and return as soon as we find
|
||||||
// one with at least one device or we reach the end of the list.
|
// one with at least one device or we reach the end of the list.
|
||||||
std::vector<airtaudio::api::type> apis = getCompiledApi();
|
std::vector<enum airtaudio::type> apis = getCompiledApi();
|
||||||
ATA_INFO(" find : " << apis.size() << " apis.");
|
ATA_INFO(" find : " << apis.size() << " apis.");
|
||||||
for (auto &it : apis) {
|
for (auto &it : apis) {
|
||||||
ATA_INFO("try open ...");
|
ATA_INFO("try open ...");
|
||||||
@ -132,10 +132,10 @@ enum airtaudio::errorType airtaudio::Interface::instanciate(airtaudio::api::type
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (m_rtapi != nullptr) {
|
if (m_rtapi != nullptr) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
ATA_ERROR("RtAudio: no compiled API support found ... critical error!!");
|
ATA_ERROR("RtAudio: no compiled API support found ... critical error!!");
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
airtaudio::Interface::~Interface() {
|
airtaudio::Interface::~Interface() {
|
||||||
@ -144,7 +144,7 @@ airtaudio::Interface::~Interface() {
|
|||||||
m_rtapi = nullptr;
|
m_rtapi = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::Interface::openStream(
|
enum airtaudio::error airtaudio::Interface::openStream(
|
||||||
airtaudio::StreamParameters* _outputParameters,
|
airtaudio::StreamParameters* _outputParameters,
|
||||||
airtaudio::StreamParameters* _inputParameters,
|
airtaudio::StreamParameters* _inputParameters,
|
||||||
audio::format _format,
|
audio::format _format,
|
||||||
@ -153,7 +153,7 @@ enum airtaudio::errorType airtaudio::Interface::openStream(
|
|||||||
airtaudio::AirTAudioCallback _callback,
|
airtaudio::AirTAudioCallback _callback,
|
||||||
airtaudio::StreamOptions* _options) {
|
airtaudio::StreamOptions* _options) {
|
||||||
if (m_rtapi == nullptr) {
|
if (m_rtapi == nullptr) {
|
||||||
return airtaudio::errorInputNull;
|
return airtaudio::error_inputNull;
|
||||||
}
|
}
|
||||||
return m_rtapi->openStream(_outputParameters,
|
return m_rtapi->openStream(_outputParameters,
|
||||||
_inputParameters,
|
_inputParameters,
|
||||||
|
@ -77,7 +77,7 @@ namespace airtaudio {
|
|||||||
/**
|
/**
|
||||||
* @brief Create an interface instance
|
* @brief Create an interface instance
|
||||||
*/
|
*/
|
||||||
enum airtaudio::errorType instanciate(enum airtaudio::type _api = airtaudio::type_undefined);
|
enum airtaudio::error instanciate(enum airtaudio::type _api = airtaudio::type_undefined);
|
||||||
/**
|
/**
|
||||||
* @return the audio API specifier for the current instance of airtaudio.
|
* @return the audio API specifier for the current instance of airtaudio.
|
||||||
*/
|
*/
|
||||||
@ -185,7 +185,7 @@ namespace airtaudio {
|
|||||||
* @param _errorCallback A client-defined function that will be invoked
|
* @param _errorCallback A client-defined function that will be invoked
|
||||||
* when an error has occured.
|
* when an error has occured.
|
||||||
*/
|
*/
|
||||||
enum airtaudio::errorType openStream(airtaudio::StreamParameters *_outputParameters,
|
enum airtaudio::error openStream(airtaudio::StreamParameters *_outputParameters,
|
||||||
airtaudio::StreamParameters *_inputParameters,
|
airtaudio::StreamParameters *_inputParameters,
|
||||||
enum audio::format _format,
|
enum audio::format _format,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
@ -199,9 +199,9 @@ namespace airtaudio {
|
|||||||
* If a stream is not open, this function issues a warning and
|
* If a stream is not open, this function issues a warning and
|
||||||
* returns (no exception is thrown).
|
* returns (no exception is thrown).
|
||||||
*/
|
*/
|
||||||
enum airtaudio::errorType closeStream() {
|
enum airtaudio::error closeStream() {
|
||||||
if (m_rtapi == nullptr) {
|
if (m_rtapi == nullptr) {
|
||||||
return airtaudio::errorInputNull;
|
return airtaudio::error_inputNull;
|
||||||
}
|
}
|
||||||
return m_rtapi->closeStream();
|
return m_rtapi->closeStream();
|
||||||
}
|
}
|
||||||
@ -213,9 +213,9 @@ namespace airtaudio {
|
|||||||
* stream is not open. A warning is issued if the stream is already
|
* stream is not open. A warning is issued if the stream is already
|
||||||
* running.
|
* running.
|
||||||
*/
|
*/
|
||||||
enum airtaudio::errorType startStream() {
|
enum airtaudio::error startStream() {
|
||||||
if (m_rtapi == nullptr) {
|
if (m_rtapi == nullptr) {
|
||||||
return airtaudio::errorInputNull;
|
return airtaudio::error_inputNull;
|
||||||
}
|
}
|
||||||
return m_rtapi->startStream();
|
return m_rtapi->startStream();
|
||||||
}
|
}
|
||||||
@ -227,9 +227,9 @@ namespace airtaudio {
|
|||||||
* stream is not open. A warning is issued if the stream is already
|
* stream is not open. A warning is issued if the stream is already
|
||||||
* stopped.
|
* stopped.
|
||||||
*/
|
*/
|
||||||
enum airtaudio::errorType stopStream() {
|
enum airtaudio::error stopStream() {
|
||||||
if (m_rtapi == nullptr) {
|
if (m_rtapi == nullptr) {
|
||||||
return airtaudio::errorInputNull;
|
return airtaudio::error_inputNull;
|
||||||
}
|
}
|
||||||
return m_rtapi->stopStream();
|
return m_rtapi->stopStream();
|
||||||
}
|
}
|
||||||
@ -240,9 +240,9 @@ namespace airtaudio {
|
|||||||
* stream is not open. A warning is issued if the stream is already
|
* stream is not open. A warning is issued if the stream is already
|
||||||
* stopped.
|
* stopped.
|
||||||
*/
|
*/
|
||||||
enum airtaudio::errorType abortStream() {
|
enum airtaudio::error abortStream() {
|
||||||
if (m_rtapi == nullptr) {
|
if (m_rtapi == nullptr) {
|
||||||
return airtaudio::errorInputNull;
|
return airtaudio::error_inputNull;
|
||||||
}
|
}
|
||||||
return m_rtapi->abortStream();
|
return m_rtapi->abortStream();
|
||||||
}
|
}
|
||||||
|
@ -10,76 +10,15 @@
|
|||||||
#define __AIRTAUDIO_STREAM_OPTION_H__
|
#define __AIRTAUDIO_STREAM_OPTION_H__
|
||||||
|
|
||||||
namespace airtaudio {
|
namespace airtaudio {
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief The structure for specifying stream options.
|
|
||||||
*
|
|
||||||
* The following flags can be OR'ed together to allow a client to
|
|
||||||
* make changes to the default stream behavior:
|
|
||||||
*
|
|
||||||
* - \e RTAUDIO_NONINTERLEAVED: Use non-interleaved buffers (default = interleaved).
|
|
||||||
* - \e RTAUDIO_MINIMIZE_LATENCY: Attempt to set stream parameters for lowest possible latency.
|
|
||||||
* - \e RTAUDIO_HOG_DEVICE: Attempt grab device for exclusive use.
|
|
||||||
* - \e RTAUDIO_SCHEDULE_REALTIME: Attempt to select realtime scheduling for callback thread.
|
|
||||||
* - \e RTAUDIO_ALSA_USE_DEFAULT: Use the "default" PCM device (ALSA only).
|
|
||||||
*
|
|
||||||
* By default, RtAudio streams pass and receive audio data from the
|
|
||||||
* client in an interleaved format. By passing the
|
|
||||||
* RTAUDIO_NONINTERLEAVED flag to the openStream() function, audio
|
|
||||||
* data will instead be presented in non-interleaved buffers. In
|
|
||||||
* this case, each buffer argument in the RtAudioCallback function
|
|
||||||
* will point to a single array of data, with \c nFrames samples for
|
|
||||||
* each channel concatenated back-to-back. For example, the first
|
|
||||||
* sample of data for the second channel would be located at index \c
|
|
||||||
* nFrames (assuming the \c buffer pointer was recast to the correct
|
|
||||||
* data type for the stream).
|
|
||||||
*
|
|
||||||
* Certain audio APIs offer a number of parameters that influence the
|
|
||||||
* I/O latency of a stream. By default, RtAudio will attempt to set
|
|
||||||
* these parameters internally for robust (glitch-free) performance
|
|
||||||
* (though some APIs, like Windows Direct Sound, make this difficult).
|
|
||||||
* By passing the RTAUDIO_MINIMIZE_LATENCY flag to the openStream()
|
|
||||||
* function, internal stream settings will be influenced in an attempt
|
|
||||||
* to minimize stream latency, though possibly at the expense of stream
|
|
||||||
* performance.
|
|
||||||
*
|
|
||||||
* If the RTAUDIO_HOG_DEVICE flag is set, RtAudio will attempt to
|
|
||||||
* open the input and/or output stream device(s) for exclusive use.
|
|
||||||
* Note that this is not possible with all supported audio APIs.
|
|
||||||
*
|
|
||||||
* If the RTAUDIO_SCHEDULE_REALTIME flag is set, RtAudio will attempt
|
|
||||||
* to select realtime scheduling (round-robin) for the callback thread.
|
|
||||||
* The \c priority parameter will only be used if the RTAUDIO_SCHEDULE_REALTIME
|
|
||||||
* flag is set. It defines the thread's realtime priority.
|
|
||||||
*
|
|
||||||
* If the RTAUDIO_ALSA_USE_DEFAULT flag is set, RtAudio will attempt to
|
|
||||||
* open the "default" PCM device when using the ALSA API. Note that this
|
|
||||||
* will override any specified input or output device id.
|
|
||||||
*
|
|
||||||
* The \c numberOfBuffers parameter can be used to control stream
|
|
||||||
* latency in the Windows DirectSound, Linux OSS, and Linux Alsa APIs
|
|
||||||
* only. A value of two is usually the smallest allowed. Larger
|
|
||||||
* numbers can potentially result in more robust stream performance,
|
|
||||||
* though likely at the cost of stream latency. The value set by the
|
|
||||||
* user is replaced during execution of the RtAudio::openStream()
|
|
||||||
* function by the value actually used by the system.
|
|
||||||
*
|
|
||||||
* The \c streamName parameter can be used to set the client name
|
|
||||||
* when using the Jack API. By default, the client name is set to
|
|
||||||
* RtApiJack. However, if you wish to create multiple instances of
|
|
||||||
* RtAudio with Jack, each instance must have a unique client name.
|
|
||||||
*/
|
|
||||||
class StreamOptions {
|
class StreamOptions {
|
||||||
public:
|
public:
|
||||||
airtaudio::streamFlags flags; //!< A bit-mask of stream flags (RTAUDIO_NONINTERLEAVED, RTAUDIO_MINIMIZE_LATENCY, RTAUDIO_HOG_DEVICE, RTAUDIO_ALSA_USE_DEFAULT).
|
airtaudio::Flags flags; //!< A bit-mask of stream flags
|
||||||
uint32_t numberOfBuffers; //!< Number of stream buffers.
|
uint32_t numberOfBuffers; //!< Number of stream buffers.
|
||||||
std::string streamName; //!< A stream name (currently used only in Jack).
|
std::string streamName; //!< A stream name (currently used only in Jack).
|
||||||
int32_t priority; //!< Scheduling priority of callback thread (only used with flag RTAUDIO_SCHEDULE_REALTIME).
|
|
||||||
// Default constructor.
|
// Default constructor.
|
||||||
StreamOptions() :
|
StreamOptions() :
|
||||||
flags(0),
|
flags(),
|
||||||
numberOfBuffers(0),
|
numberOfBuffers(0){}
|
||||||
priority(0) {}
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -35,6 +35,8 @@ struct AlsaHandle {
|
|||||||
AlsaHandle() :
|
AlsaHandle() :
|
||||||
synchronized(false),
|
synchronized(false),
|
||||||
runnable(false) {
|
runnable(false) {
|
||||||
|
handles[0] = nullptr;
|
||||||
|
handles[1] = nullptr;
|
||||||
xrun[0] = false;
|
xrun[0] = false;
|
||||||
xrun[1] = false;
|
xrun[1] = false;
|
||||||
}
|
}
|
||||||
@ -47,7 +49,7 @@ airtaudio::api::Alsa::Alsa() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
airtaudio::api::Alsa::~Alsa() {
|
airtaudio::api::Alsa::~Alsa() {
|
||||||
if (m_stream.state != STREAM_CLOSED) {
|
if (m_stream.state != airtaudio::state_closed) {
|
||||||
closeStream();
|
closeStream();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -64,16 +66,16 @@ uint32_t airtaudio::api::Alsa::getDeviceCount() {
|
|||||||
sprintf(name, "hw:%d", card);
|
sprintf(name, "hw:%d", card);
|
||||||
result = snd_ctl_open(&handle, name, 0);
|
result = snd_ctl_open(&handle, name, 0);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror(result) << ".");
|
ATA_ERROR("control open, card = " << card << ", " << snd_strerror(result) << ".");
|
||||||
// TODO : Return error airtaudio::errorWarning;
|
// TODO : Return error airtaudio::error_warning;
|
||||||
goto nextcard;
|
goto nextcard;
|
||||||
}
|
}
|
||||||
subdevice = -1;
|
subdevice = -1;
|
||||||
while(1) {
|
while(1) {
|
||||||
result = snd_ctl_pcm_next_device(handle, &subdevice);
|
result = snd_ctl_pcm_next_device(handle, &subdevice);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror(result) << ".");
|
ATA_ERROR("control next device, card = " << card << ", " << snd_strerror(result) << ".");
|
||||||
// TODO : Return error airtaudio::errorWarning;
|
// TODO : Return error airtaudio::error_warning;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (subdevice < 0) {
|
if (subdevice < 0) {
|
||||||
@ -107,14 +109,14 @@ airtaudio::DeviceInfo airtaudio::api::Alsa::getDeviceInfo(uint32_t _device) {
|
|||||||
sprintf(name, "hw:%d", card);
|
sprintf(name, "hw:%d", card);
|
||||||
result = snd_ctl_open(&chandle, name, SND_CTL_NONBLOCK);
|
result = snd_ctl_open(&chandle, name, SND_CTL_NONBLOCK);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_WARNING("airtaudio::api::Alsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror(result) << ".");
|
ATA_WARNING("control open, card = " << card << ", " << snd_strerror(result) << ".");
|
||||||
goto nextcard;
|
goto nextcard;
|
||||||
}
|
}
|
||||||
subdevice = -1;
|
subdevice = -1;
|
||||||
while(1) {
|
while(1) {
|
||||||
result = snd_ctl_pcm_next_device(chandle, &subdevice);
|
result = snd_ctl_pcm_next_device(chandle, &subdevice);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_WARNING("airtaudio::api::Alsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror(result) << ".");
|
ATA_WARNING("control next device, card = " << card << ", " << snd_strerror(result) << ".");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (subdevice < 0) {
|
if (subdevice < 0) {
|
||||||
@ -139,26 +141,26 @@ airtaudio::DeviceInfo airtaudio::api::Alsa::getDeviceInfo(uint32_t _device) {
|
|||||||
nDevices++;
|
nDevices++;
|
||||||
}
|
}
|
||||||
if (nDevices == 0) {
|
if (nDevices == 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: no devices found!");
|
ATA_ERROR("no devices found!");
|
||||||
// TODO : airtaudio::errorInvalidUse;
|
// TODO : airtaudio::error_invalidUse;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
if (_device >= nDevices) {
|
if (_device >= nDevices) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: device ID is invalid!");
|
ATA_ERROR("device ID is invalid!");
|
||||||
// TODO : airtaudio::errorInvalidUse;
|
// TODO : airtaudio::error_invalidUse;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
foundDevice:
|
foundDevice:
|
||||||
// If a stream is already open, we cannot probe the stream devices.
|
// If a stream is already open, we cannot probe the stream devices.
|
||||||
// Thus, use the saved results.
|
// Thus, use the saved results.
|
||||||
if ( m_stream.state != STREAM_CLOSED
|
if ( m_stream.state != airtaudio::state_closed
|
||||||
&& ( m_stream.device[0] == _device
|
&& ( m_stream.device[0] == _device
|
||||||
|| m_stream.device[1] == _device)) {
|
|| m_stream.device[1] == _device)) {
|
||||||
snd_ctl_close(chandle);
|
snd_ctl_close(chandle);
|
||||||
if (_device >= m_devices.size()) {
|
if (_device >= m_devices.size()) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: device ID was not present before stream was opened.");
|
ATA_ERROR("device ID was not present before stream was opened.");
|
||||||
// TODO : return airtaudio::errorWarning;
|
// TODO : return airtaudio::error_warning;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
return m_devices[ _device ];
|
return m_devices[ _device ];
|
||||||
@ -184,16 +186,16 @@ foundDevice:
|
|||||||
}
|
}
|
||||||
result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK);
|
result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
// TODO : Return airtaudio::errorWarning;
|
// TODO : Return airtaudio::error_warning;
|
||||||
goto captureProbe;
|
goto captureProbe;
|
||||||
}
|
}
|
||||||
// The device is open ... fill the parameter structure.
|
// The device is open ... fill the parameter structure.
|
||||||
result = snd_pcm_hw_params_any(phandle, params);
|
result = snd_pcm_hw_params_any(phandle, params);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
// TODO : Return airtaudio::errorWarning;
|
// TODO : Return airtaudio::error_warning;
|
||||||
goto captureProbe;
|
goto captureProbe;
|
||||||
}
|
}
|
||||||
// Get output channel information.
|
// Get output channel information.
|
||||||
@ -201,8 +203,8 @@ foundDevice:
|
|||||||
result = snd_pcm_hw_params_get_channels_max(params, &value);
|
result = snd_pcm_hw_params_get_channels_max(params, &value);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror(result) << ".");
|
ATA_ERROR("error getting device (" << name << ") output channels, " << snd_strerror(result) << ".");
|
||||||
// TODO : Return airtaudio::errorWarning;
|
// TODO : Return airtaudio::error_warning;
|
||||||
goto captureProbe;
|
goto captureProbe;
|
||||||
}
|
}
|
||||||
info.outputChannels = value;
|
info.outputChannels = value;
|
||||||
@ -225,8 +227,8 @@ captureProbe:
|
|||||||
}
|
}
|
||||||
result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK);
|
result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
// TODO : Return airtaudio::errorWarning;
|
// TODO : Return airtaudio::error_warning;
|
||||||
if (info.outputChannels == 0) {
|
if (info.outputChannels == 0) {
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
@ -236,8 +238,8 @@ captureProbe:
|
|||||||
result = snd_pcm_hw_params_any(phandle, params);
|
result = snd_pcm_hw_params_any(phandle, params);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
// TODO : Return airtaudio::errorWarning;
|
// TODO : Return airtaudio::error_warning;
|
||||||
if (info.outputChannels == 0) {
|
if (info.outputChannels == 0) {
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
@ -246,8 +248,8 @@ captureProbe:
|
|||||||
result = snd_pcm_hw_params_get_channels_max(params, &value);
|
result = snd_pcm_hw_params_get_channels_max(params, &value);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror(result) << ".");
|
ATA_ERROR("error getting device (" << name << ") input channels, " << snd_strerror(result) << ".");
|
||||||
// TODO : Return airtaudio::errorWarning;
|
// TODO : Return airtaudio::error_warning;
|
||||||
if (info.outputChannels == 0) {
|
if (info.outputChannels == 0) {
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
@ -281,29 +283,29 @@ probeParameters:
|
|||||||
snd_pcm_info_set_stream(pcminfo, stream);
|
snd_pcm_info_set_stream(pcminfo, stream);
|
||||||
result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK);
|
result = snd_pcm_open(&phandle, name, stream, openMode | SND_PCM_NONBLOCK);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("snd_pcm_open error for device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
// TODO : Return airtaudio::errorWarning;
|
// TODO : Return airtaudio::error_warning;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
// The device is open ... fill the parameter structure.
|
// The device is open ... fill the parameter structure.
|
||||||
result = snd_pcm_hw_params_any(phandle, params);
|
result = snd_pcm_hw_params_any(phandle, params);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("snd_pcm_hw_params error for device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
// TODO : Return airtaudio::errorWarning;
|
// TODO : Return airtaudio::error_warning;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
// Test our discrete set of sample rate values.
|
// Test our discrete set of sample rate values.
|
||||||
info.sampleRates.clear();
|
info.sampleRates.clear();
|
||||||
for (uint32_t i=0; i<MAX_SAMPLE_RATES; i++) {
|
for (auto &it : airtaudio::genericSampleRate()) {
|
||||||
if (snd_pcm_hw_params_test_rate(phandle, params, SAMPLE_RATES[i], 0) == 0) {
|
if (snd_pcm_hw_params_test_rate(phandle, params, it, 0) == 0) {
|
||||||
info.sampleRates.push_back(SAMPLE_RATES[i]);
|
info.sampleRates.push_back(it);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (info.sampleRates.size() == 0) {
|
if (info.sampleRates.size() == 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: no supported sample rates found for device (" << name << ").");
|
ATA_ERROR("no supported sample rates found for device (" << name << ").");
|
||||||
// TODO : Return airtaudio::errorWarning;
|
// TODO : Return airtaudio::error_warning;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
// Probe the supported data formats ... we don't care about endian-ness just yet
|
// Probe the supported data formats ... we don't care about endian-ness just yet
|
||||||
@ -335,8 +337,8 @@ probeParameters:
|
|||||||
}
|
}
|
||||||
// Check that we have at least one supported format
|
// Check that we have at least one supported format
|
||||||
if (info.nativeFormats.size() == 0) {
|
if (info.nativeFormats.size() == 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.");
|
ATA_ERROR("pcm device (" << name << ") data format not supported by RtAudio.");
|
||||||
// TODO : Return airtaudio::errorWarning;
|
// TODO : Return airtaudio::error_warning;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
// Get the device name
|
// Get the device name
|
||||||
@ -362,7 +364,7 @@ void airtaudio::api::Alsa::saveDeviceInfo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool airtaudio::api::Alsa::probeDeviceOpen(uint32_t _device,
|
bool airtaudio::api::Alsa::probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
@ -374,66 +376,64 @@ bool airtaudio::api::Alsa::probeDeviceOpen(uint32_t _device,
|
|||||||
int32_t result, subdevice, card;
|
int32_t result, subdevice, card;
|
||||||
char name[64];
|
char name[64];
|
||||||
snd_ctl_t *chandle;
|
snd_ctl_t *chandle;
|
||||||
if (_options && _options->flags & airtaudio::ALSA_USE_DEFAULT) {
|
// Count cards and devices
|
||||||
snprintf(name, sizeof(name), "%s", "default");
|
card = -1;
|
||||||
} else {
|
// NOTE : Find the device name : [BEGIN]
|
||||||
// Count cards and devices
|
snd_card_next(&card);
|
||||||
card = -1;
|
while (card >= 0) {
|
||||||
snd_card_next(&card);
|
sprintf(name, "hw:%d", card);
|
||||||
while (card >= 0) {
|
result = snd_ctl_open(&chandle, name, SND_CTL_NONBLOCK);
|
||||||
sprintf(name, "hw:%d", card);
|
if (result < 0) {
|
||||||
result = snd_ctl_open(&chandle, name, SND_CTL_NONBLOCK);
|
ATA_ERROR("control open, card = " << card << ", " << snd_strerror(result) << ".");
|
||||||
if (result < 0) {
|
return false;
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror(result) << ".");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
subdevice = -1;
|
|
||||||
while(1) {
|
|
||||||
result = snd_ctl_pcm_next_device(chandle, &subdevice);
|
|
||||||
if (result < 0) break;
|
|
||||||
if (subdevice < 0) break;
|
|
||||||
if (nDevices == _device) {
|
|
||||||
sprintf(name, "hw:%d,%d", card, subdevice);
|
|
||||||
snd_ctl_close(chandle);
|
|
||||||
goto foundDevice;
|
|
||||||
}
|
|
||||||
nDevices++;
|
|
||||||
}
|
|
||||||
snd_ctl_close(chandle);
|
|
||||||
snd_card_next(&card);
|
|
||||||
}
|
}
|
||||||
result = snd_ctl_open(&chandle, "default", SND_CTL_NONBLOCK);
|
subdevice = -1;
|
||||||
if (result == 0) {
|
while(1) {
|
||||||
|
result = snd_ctl_pcm_next_device(chandle, &subdevice);
|
||||||
|
if (result < 0) break;
|
||||||
|
if (subdevice < 0) break;
|
||||||
if (nDevices == _device) {
|
if (nDevices == _device) {
|
||||||
strcpy(name, "default");
|
sprintf(name, "hw:%d,%d", card, subdevice);
|
||||||
|
snd_ctl_close(chandle);
|
||||||
goto foundDevice;
|
goto foundDevice;
|
||||||
}
|
}
|
||||||
nDevices++;
|
nDevices++;
|
||||||
}
|
}
|
||||||
if (nDevices == 0) {
|
snd_ctl_close(chandle);
|
||||||
// This should not happen because a check is made before this function is called.
|
snd_card_next(&card);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: no devices found!");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (_device >= nDevices) {
|
|
||||||
// This should not happen because a check is made before this function is called.
|
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: device ID is invalid!");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
result = snd_ctl_open(&chandle, "default", SND_CTL_NONBLOCK);
|
||||||
|
if (result == 0) {
|
||||||
|
if (nDevices == _device) {
|
||||||
|
strcpy(name, "default");
|
||||||
|
goto foundDevice;
|
||||||
|
}
|
||||||
|
nDevices++;
|
||||||
|
}
|
||||||
|
if (nDevices == 0) {
|
||||||
|
// This should not happen because a check is made before this function is called.
|
||||||
|
ATA_ERROR("no devices found!");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (_device >= nDevices) {
|
||||||
|
// This should not happen because a check is made before this function is called.
|
||||||
|
ATA_ERROR("device ID is invalid!");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// NOTE : Find the device name : [ END ]
|
||||||
|
|
||||||
foundDevice:
|
foundDevice:
|
||||||
// The getDeviceInfo() function will not work for a device that is
|
// The getDeviceInfo() function will not work for a device that is
|
||||||
// already open. Thus, we'll probe the system before opening a
|
// already open. Thus, we'll probe the system before opening a
|
||||||
// stream and save the results for use by getDeviceInfo().
|
// stream and save the results for use by getDeviceInfo().
|
||||||
if ( _mode == OUTPUT
|
if ( _mode == airtaudio::mode_output
|
||||||
|| ( _mode == INPUT
|
|| ( _mode == airtaudio::mode_input
|
||||||
&& m_stream.mode != OUTPUT)) {
|
&& m_stream.mode != airtaudio::mode_output)) {
|
||||||
// only do once
|
// only do once
|
||||||
this->saveDeviceInfo();
|
this->saveDeviceInfo();
|
||||||
}
|
}
|
||||||
snd_pcm_stream_t stream;
|
snd_pcm_stream_t stream;
|
||||||
if (_mode == OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
stream = SND_PCM_STREAM_PLAYBACK;
|
stream = SND_PCM_STREAM_PLAYBACK;
|
||||||
} else {
|
} else {
|
||||||
stream = SND_PCM_STREAM_CAPTURE;
|
stream = SND_PCM_STREAM_CAPTURE;
|
||||||
@ -442,10 +442,10 @@ foundDevice:
|
|||||||
int32_t openMode = SND_PCM_ASYNC;
|
int32_t openMode = SND_PCM_ASYNC;
|
||||||
result = snd_pcm_open(&phandle, name, stream, openMode);
|
result = snd_pcm_open(&phandle, name, stream, openMode);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
if (_mode == OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.");
|
ATA_ERROR("pcm device (" << name << ") won't open for output.");
|
||||||
} else {
|
} else {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.");
|
ATA_ERROR("pcm device (" << name << ") won't open for input.");
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -455,33 +455,20 @@ foundDevice:
|
|||||||
result = snd_pcm_hw_params_any(phandle, hw_params);
|
result = snd_pcm_hw_params_any(phandle, hw_params);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror(result) << ".");
|
ATA_ERROR("error getting pcm device (" << name << ") parameters, " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Set access ... check user preference.
|
// Open stream all time in interleave mode (by default): (open in non interleave if we have no choice
|
||||||
if ( _options != nullptr
|
result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED);
|
||||||
&& _options->flags & airtaudio::NONINTERLEAVED) {
|
if (result < 0) {
|
||||||
m_stream.userInterleaved = false;
|
|
||||||
result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED);
|
result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED);
|
||||||
if (result < 0) {
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = false;
|
||||||
result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED);
|
|
||||||
m_stream.deviceInterleaved[_mode] = true;
|
|
||||||
} else {
|
|
||||||
m_stream.deviceInterleaved[_mode] = false;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
m_stream.userInterleaved = true;
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||||
result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED);
|
|
||||||
if (result < 0) {
|
|
||||||
result = snd_pcm_hw_params_set_access(phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED);
|
|
||||||
m_stream.deviceInterleaved[_mode] = false;
|
|
||||||
} else {
|
|
||||||
m_stream.deviceInterleaved[_mode] = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror(result) << ".");
|
ATA_ERROR("error setting pcm device (" << name << ") access, " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Determine how to set the device format.
|
// Determine how to set the device format.
|
||||||
@ -501,7 +488,7 @@ foundDevice:
|
|||||||
deviceFormat = SND_PCM_FORMAT_FLOAT64;
|
deviceFormat = SND_PCM_FORMAT_FLOAT64;
|
||||||
}
|
}
|
||||||
if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
|
if (snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
|
||||||
m_stream.deviceFormat[_mode] = _format;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = _format;
|
||||||
} else {
|
} else {
|
||||||
// If we get here, no supported format was found.
|
// If we get here, no supported format was found.
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
@ -512,18 +499,18 @@ foundDevice:
|
|||||||
result = snd_pcm_hw_params_set_format(phandle, hw_params, deviceFormat);
|
result = snd_pcm_hw_params_set_format(phandle, hw_params, deviceFormat);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror(result) << ".");
|
ATA_ERROR("error setting pcm device (" << name << ") data format, " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Determine whether byte-swaping is necessary.
|
// Determine whether byte-swaping is necessary.
|
||||||
m_stream.doByteSwap[_mode] = false;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = false;
|
||||||
if (deviceFormat != SND_PCM_FORMAT_S8) {
|
if (deviceFormat != SND_PCM_FORMAT_S8) {
|
||||||
result = snd_pcm_format_cpu_endian(deviceFormat);
|
result = snd_pcm_format_cpu_endian(deviceFormat);
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
} else if (result < 0) {
|
} else if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror(result) << ".");
|
ATA_ERROR("error getting pcm device (" << name << ") endian-ness, " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -531,37 +518,37 @@ foundDevice:
|
|||||||
result = snd_pcm_hw_params_set_rate_near(phandle, hw_params, (uint32_t*) &_sampleRate, 0);
|
result = snd_pcm_hw_params_set_rate_near(phandle, hw_params, (uint32_t*) &_sampleRate, 0);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("error setting sample rate on device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Determine the number of channels for this device. We support a possible
|
// Determine the number of channels for this device. We support a possible
|
||||||
// minimum device channel number > than the value requested by the user.
|
// minimum device channel number > than the value requested by the user.
|
||||||
m_stream.nUserChannels[_mode] = _channels;
|
m_stream.nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
result = snd_pcm_hw_params_get_channels_max(hw_params, &value);
|
result = snd_pcm_hw_params_get_channels_max(hw_params, &value);
|
||||||
uint32_t deviceChannels = value;
|
uint32_t deviceChannels = value;
|
||||||
if ( result < 0
|
if ( result < 0
|
||||||
|| deviceChannels < _channels + _firstChannel) {
|
|| deviceChannels < _channels + _firstChannel) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("requested channel parameters not supported by device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
result = snd_pcm_hw_params_get_channels_min(hw_params, &value);
|
result = snd_pcm_hw_params_get_channels_min(hw_params, &value);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("error getting minimum channels for device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
deviceChannels = value;
|
deviceChannels = value;
|
||||||
if (deviceChannels < _channels + _firstChannel) {
|
if (deviceChannels < _channels + _firstChannel) {
|
||||||
deviceChannels = _channels + _firstChannel;
|
deviceChannels = _channels + _firstChannel;
|
||||||
}
|
}
|
||||||
m_stream.nDeviceChannels[_mode] = deviceChannels;
|
m_stream.nDeviceChannels[modeToIdTable(_mode)] = deviceChannels;
|
||||||
// Set the device channels.
|
// Set the device channels.
|
||||||
result = snd_pcm_hw_params_set_channels(phandle, hw_params, deviceChannels);
|
result = snd_pcm_hw_params_set_channels(phandle, hw_params, deviceChannels);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("error setting channels for device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Set the buffer (or period) size.
|
// Set the buffer (or period) size.
|
||||||
@ -570,26 +557,38 @@ foundDevice:
|
|||||||
result = snd_pcm_hw_params_set_period_size_near(phandle, hw_params, &periodSize, &dir);
|
result = snd_pcm_hw_params_set_period_size_near(phandle, hw_params, &periodSize, &dir);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("error setting period size for device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
*_bufferSize = periodSize;
|
*_bufferSize = periodSize;
|
||||||
// Set the buffer number, which in ALSA is referred to as the "period".
|
// Set the buffer number, which in ALSA is referred to as the "period".
|
||||||
uint32_t periods = 0;
|
uint32_t periods = 0;
|
||||||
if (_options && _options->flags & airtaudio::MINIMIZE_LATENCY) periods = 2;
|
if ( _options != nullptr
|
||||||
if (_options && _options->numberOfBuffers > 0) periods = _options->numberOfBuffers;
|
&& _options->flags.m_minimizeLatency == true) {
|
||||||
if (periods < 2) periods = 4; // a fairly safe default value
|
periods = 2;
|
||||||
|
}
|
||||||
|
/* TODO : Chouse the number of low level buffer ...
|
||||||
|
if ( _options != nullptr
|
||||||
|
&& _options->numberOfBuffers > 0) {
|
||||||
|
periods = _options->numberOfBuffers;
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
if (periods < 2) {
|
||||||
|
periods = 4; // a fairly safe default value
|
||||||
|
}
|
||||||
result = snd_pcm_hw_params_set_periods_near(phandle, hw_params, &periods, &dir);
|
result = snd_pcm_hw_params_set_periods_near(phandle, hw_params, &periods, &dir);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("error setting periods for device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// If attempting to setup a duplex stream, the bufferSize parameter
|
// If attempting to setup a duplex stream, the bufferSize parameter
|
||||||
// MUST be the same in both directions!
|
// MUST be the same in both directions!
|
||||||
if (m_stream.mode == OUTPUT && _mode == INPUT && *_bufferSize != m_stream.bufferSize) {
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|
&& _mode == airtaudio::mode_input
|
||||||
|
&& *_bufferSize != m_stream.bufferSize) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").");
|
ATA_ERROR("system error setting buffer size for duplex stream on device (" << name << ").");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
m_stream.bufferSize = *_bufferSize;
|
m_stream.bufferSize = *_bufferSize;
|
||||||
@ -597,7 +596,7 @@ foundDevice:
|
|||||||
result = snd_pcm_hw_params(phandle, hw_params);
|
result = snd_pcm_hw_params(phandle, hw_params);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("error installing hardware configuration on device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
|
// Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
|
||||||
@ -618,50 +617,49 @@ foundDevice:
|
|||||||
result = snd_pcm_sw_params(phandle, sw_params);
|
result = snd_pcm_sw_params(phandle, sw_params);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
snd_pcm_close(phandle);
|
snd_pcm_close(phandle);
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror(result) << ".");
|
ATA_ERROR("error installing software configuration on device (" << name << "), " << snd_strerror(result) << ".");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Set flags for buffer conversion
|
// Set flags for buffer conversion
|
||||||
m_stream.doConvertBuffer[_mode] = false;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
|
if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false
|
||||||
&& m_stream.nUserChannels[_mode] > 1) {
|
&& m_stream.nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
// Allocate the ApiHandle if necessary and then save.
|
// Allocate the ApiHandle if necessary and then save.
|
||||||
AlsaHandle *apiInfo = 0;
|
AlsaHandle *apiInfo = nullptr;
|
||||||
if (m_stream.apiHandle == 0) {
|
if (m_stream.apiHandle == nullptr) {
|
||||||
apiInfo = (AlsaHandle *) new AlsaHandle;
|
apiInfo = (AlsaHandle *) new AlsaHandle;
|
||||||
if (apiInfo == nullptr) {
|
if (apiInfo == nullptr) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error allocating AlsaHandle memory.");
|
ATA_ERROR("error allocating AlsaHandle memory.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
m_stream.apiHandle = (void *) apiInfo;
|
m_stream.apiHandle = (void *) apiInfo;
|
||||||
apiInfo->handles[0] = 0;
|
|
||||||
apiInfo->handles[1] = 0;
|
|
||||||
} else {
|
} else {
|
||||||
apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
||||||
}
|
}
|
||||||
apiInfo->handles[_mode] = phandle;
|
apiInfo->handles[modeToIdTable(_mode)] = phandle;
|
||||||
phandle = 0;
|
phandle = 0;
|
||||||
// Allocate necessary internal buffers.
|
// Allocate necessary internal buffers.
|
||||||
uint64_t bufferBytes;
|
uint64_t bufferBytes;
|
||||||
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||||
if (m_stream.userBuffer[_mode] == nullptr) {
|
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error allocating user buffer memory.");
|
ATA_ERROR("error allocating user buffer memory.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
bool makeBuffer = true;
|
bool makeBuffer = true;
|
||||||
bufferBytes = m_stream.nDeviceChannels[_mode] * audio::getFormatBytes(m_stream.deviceFormat[_mode]);
|
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||||
if (_mode == INPUT) {
|
if (_mode == airtaudio::mode_input) {
|
||||||
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|
&& m_stream.deviceBuffer) {
|
||||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
|
uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
|
||||||
if (bufferBytes <= bytesOut) {
|
if (bufferBytes <= bytesOut) {
|
||||||
makeBuffer = false;
|
makeBuffer = false;
|
||||||
@ -676,31 +674,31 @@ foundDevice:
|
|||||||
}
|
}
|
||||||
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
|
m_stream.deviceBuffer = (char *) calloc(bufferBytes, 1);
|
||||||
if (m_stream.deviceBuffer == nullptr) {
|
if (m_stream.deviceBuffer == nullptr) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: error allocating device buffer memory.");
|
ATA_ERROR("error allocating device buffer memory.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.sampleRate = _sampleRate;
|
m_stream.sampleRate = _sampleRate;
|
||||||
m_stream.nBuffers = periods;
|
m_stream.nBuffers = periods;
|
||||||
m_stream.device[_mode] = _device;
|
m_stream.device[modeToIdTable(_mode)] = _device;
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
// Setup the buffer conversion information structure.
|
// Setup the buffer conversion information structure.
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
setConvertInfo(_mode, _firstChannel);
|
setConvertInfo(_mode, _firstChannel);
|
||||||
}
|
}
|
||||||
// Setup thread if necessary.
|
// Setup thread if necessary.
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
&& _mode == INPUT) {
|
&& _mode == airtaudio::mode_input) {
|
||||||
// We had already set up an output stream.
|
// We had already set up an output stream.
|
||||||
m_stream.mode = DUPLEX;
|
m_stream.mode = airtaudio::mode_duplex;
|
||||||
// Link the streams if possible.
|
// Link the streams if possible.
|
||||||
apiInfo->synchronized = false;
|
apiInfo->synchronized = false;
|
||||||
if (snd_pcm_link(apiInfo->handles[0], apiInfo->handles[1]) == 0) {
|
if (snd_pcm_link(apiInfo->handles[0], apiInfo->handles[1]) == 0) {
|
||||||
apiInfo->synchronized = true;
|
apiInfo->synchronized = true;
|
||||||
} else {
|
} else {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::probeDeviceOpen: unable to synchronize input and output devices.");
|
ATA_ERROR("unable to synchronize input and output devices.");
|
||||||
// TODO : airtaudio::errorWarning;
|
// TODO : airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
m_stream.mode = _mode;
|
m_stream.mode = _mode;
|
||||||
@ -710,7 +708,7 @@ foundDevice:
|
|||||||
m_stream.callbackInfo.thread = new std::thread(alsaCallbackHandler, &m_stream.callbackInfo);
|
m_stream.callbackInfo.thread = new std::thread(alsaCallbackHandler, &m_stream.callbackInfo);
|
||||||
if (m_stream.callbackInfo.thread == nullptr) {
|
if (m_stream.callbackInfo.thread == nullptr) {
|
||||||
m_stream.callbackInfo.isRunning = false;
|
m_stream.callbackInfo.isRunning = false;
|
||||||
ATA_ERROR("airtaudio::api::Alsa::error creating callback thread!");
|
ATA_ERROR("creating callback thread!");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -740,19 +738,19 @@ error:
|
|||||||
free(m_stream.deviceBuffer);
|
free(m_stream.deviceBuffer);
|
||||||
m_stream.deviceBuffer = 0;
|
m_stream.deviceBuffer = 0;
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Alsa::closeStream() {
|
enum airtaudio::error airtaudio::api::Alsa::closeStream() {
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::closeStream(): no open stream to close!");
|
ATA_ERROR("no open stream to close!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
||||||
m_stream.callbackInfo.isRunning = false;
|
m_stream.callbackInfo.isRunning = false;
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
apiInfo->runnable = true;
|
apiInfo->runnable = true;
|
||||||
apiInfo->runnable_cv.notify_one();
|
apiInfo->runnable_cv.notify_one();
|
||||||
}
|
}
|
||||||
@ -760,14 +758,14 @@ enum airtaudio::errorType airtaudio::api::Alsa::closeStream() {
|
|||||||
if (m_stream.callbackInfo.thread != nullptr) {
|
if (m_stream.callbackInfo.thread != nullptr) {
|
||||||
m_stream.callbackInfo.thread->join();
|
m_stream.callbackInfo.thread->join();
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
snd_pcm_drop(apiInfo->handles[0]);
|
snd_pcm_drop(apiInfo->handles[0]);
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
snd_pcm_drop(apiInfo->handles[1]);
|
snd_pcm_drop(apiInfo->handles[1]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -792,138 +790,151 @@ enum airtaudio::errorType airtaudio::api::Alsa::closeStream() {
|
|||||||
free(m_stream.deviceBuffer);
|
free(m_stream.deviceBuffer);
|
||||||
m_stream.deviceBuffer = 0;
|
m_stream.deviceBuffer = 0;
|
||||||
}
|
}
|
||||||
m_stream.mode = UNINITIALIZED;
|
m_stream.mode = airtaudio::mode_unknow;
|
||||||
m_stream.state = STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Alsa::startStream() {
|
enum airtaudio::error airtaudio::api::Alsa::startStream() {
|
||||||
// This method calls snd_pcm_prepare if the device isn't already in that state.
|
// This method calls snd_pcm_prepare if the device isn't already in that state.
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::startStream(): the stream is already running!");
|
ATA_ERROR("the stream is already running!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||||
int32_t result = 0;
|
int32_t result = 0;
|
||||||
snd_pcm_state_t state;
|
snd_pcm_state_t state;
|
||||||
AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
||||||
snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
|
snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
|
||||||
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
|
if (handle[0] == nullptr) {
|
||||||
|
ATA_ERROR("send nullptr to alsa ...");
|
||||||
|
if (handle[1] != nullptr) {
|
||||||
|
ATA_ERROR("note : 1 is not null");
|
||||||
|
}
|
||||||
|
}
|
||||||
state = snd_pcm_state(handle[0]);
|
state = snd_pcm_state(handle[0]);
|
||||||
if (state != SND_PCM_STATE_PREPARED) {
|
if (state != SND_PCM_STATE_PREPARED) {
|
||||||
result = snd_pcm_prepare(handle[0]);
|
result = snd_pcm_prepare(handle[0]);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::startStream: error preparing output pcm device, " << snd_strerror(result) << ".");
|
ATA_ERROR("error preparing output pcm device, " << snd_strerror(result) << ".");
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( ( m_stream.mode == INPUT
|
if ( ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX)
|
|| m_stream.mode == airtaudio::mode_duplex)
|
||||||
&& !apiInfo->synchronized) {
|
&& !apiInfo->synchronized) {
|
||||||
|
if (handle[1] == nullptr) {
|
||||||
|
ATA_ERROR("send nullptr to alsa ...");
|
||||||
|
if (handle[0] != nullptr) {
|
||||||
|
ATA_ERROR("note : 0 is not null");
|
||||||
|
}
|
||||||
|
}
|
||||||
state = snd_pcm_state(handle[1]);
|
state = snd_pcm_state(handle[1]);
|
||||||
if (state != SND_PCM_STATE_PREPARED) {
|
if (state != SND_PCM_STATE_PREPARED) {
|
||||||
result = snd_pcm_prepare(handle[1]);
|
result = snd_pcm_prepare(handle[1]);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::startStream: error preparing input pcm device, " << snd_strerror(result) << ".");
|
ATA_ERROR("error preparing input pcm device, " << snd_strerror(result) << ".");
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_RUNNING;
|
m_stream.state = airtaudio::state_running;
|
||||||
unlock:
|
unlock:
|
||||||
apiInfo->runnable = true;
|
apiInfo->runnable = true;
|
||||||
apiInfo->runnable_cv.notify_one();
|
apiInfo->runnable_cv.notify_one();
|
||||||
if (result >= 0) {
|
if (result >= 0) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Alsa::stopStream() {
|
enum airtaudio::error airtaudio::api::Alsa::stopStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::stopStream(): the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||||
int32_t result = 0;
|
int32_t result = 0;
|
||||||
AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
||||||
snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
|
snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (apiInfo->synchronized) {
|
if (apiInfo->synchronized) {
|
||||||
result = snd_pcm_drop(handle[0]);
|
result = snd_pcm_drop(handle[0]);
|
||||||
} else {
|
} else {
|
||||||
result = snd_pcm_drain(handle[0]);
|
result = snd_pcm_drain(handle[0]);
|
||||||
}
|
}
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::stopStream: error draining output pcm device, " << snd_strerror(result) << ".");
|
ATA_ERROR("error draining output pcm device, " << snd_strerror(result) << ".");
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( ( m_stream.mode == INPUT
|
if ( ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX)
|
|| m_stream.mode == airtaudio::mode_duplex)
|
||||||
&& !apiInfo->synchronized) {
|
&& !apiInfo->synchronized) {
|
||||||
result = snd_pcm_drop(handle[1]);
|
result = snd_pcm_drop(handle[1]);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::stopStream: error stopping input pcm device, " << snd_strerror(result) << ".");
|
ATA_ERROR("error stopping input pcm device, " << snd_strerror(result) << ".");
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unlock:
|
unlock:
|
||||||
if (result >= 0) {
|
if (result >= 0) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Alsa::abortStream() {
|
enum airtaudio::error airtaudio::api::Alsa::abortStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::abortStream(): the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||||
int32_t result = 0;
|
int32_t result = 0;
|
||||||
AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
||||||
snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
|
snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
result = snd_pcm_drop(handle[0]);
|
result = snd_pcm_drop(handle[0]);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::abortStream: error aborting output pcm device, " << snd_strerror(result) << ".");
|
ATA_ERROR("error aborting output pcm device, " << snd_strerror(result) << ".");
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( ( m_stream.mode == INPUT
|
if ( ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX)
|
|| m_stream.mode == airtaudio::mode_duplex)
|
||||||
&& !apiInfo->synchronized) {
|
&& !apiInfo->synchronized) {
|
||||||
result = snd_pcm_drop(handle[1]);
|
result = snd_pcm_drop(handle[1]);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::abortStream: error aborting input pcm device, " << snd_strerror(result) << ".");
|
ATA_ERROR("error aborting input pcm device, " << snd_strerror(result) << ".");
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unlock:
|
unlock:
|
||||||
if (result >= 0) {
|
if (result >= 0) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
|
|
||||||
void airtaudio::api::Alsa::callbackEvent() {
|
void airtaudio::api::Alsa::callbackEvent() {
|
||||||
AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
AlsaHandle *apiInfo = (AlsaHandle *) m_stream.apiHandle;
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||||
// TODO : Set this back ....
|
// TODO : Set this back ....
|
||||||
/*
|
/*
|
||||||
@ -931,23 +942,23 @@ void airtaudio::api::Alsa::callbackEvent() {
|
|||||||
apiInfo->runnable_cv.wait(lck);
|
apiInfo->runnable_cv.wait(lck);
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
if (m_stream.state != STREAM_RUNNING) {
|
if (m_stream.state != airtaudio::state_running) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_CRITICAL("airtaudio::api::Alsa::callbackEvent(): the stream is closed ... this shouldn't happen!");
|
ATA_CRITICAL("the stream is closed ... this shouldn't happen!");
|
||||||
return; // TODO : notify appl: airtaudio::errorWarning;
|
return; // TODO : notify appl: airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
int32_t doStopStream = 0;
|
int32_t doStopStream = 0;
|
||||||
double streamTime = getStreamTime();
|
double streamTime = getStreamTime();
|
||||||
airtaudio::streamStatus status = 0;
|
enum airtaudio::status status = airtaudio::status_ok;
|
||||||
if (m_stream.mode != INPUT && apiInfo->xrun[0] == true) {
|
if (m_stream.mode != airtaudio::mode_input && apiInfo->xrun[0] == true) {
|
||||||
status |= airtaudio::OUTPUT_UNDERFLOW;
|
status = airtaudio::status_underflow;
|
||||||
apiInfo->xrun[0] = false;
|
apiInfo->xrun[0] = false;
|
||||||
}
|
}
|
||||||
if (m_stream.mode != OUTPUT && apiInfo->xrun[1] == true) {
|
if (m_stream.mode != airtaudio::mode_output && apiInfo->xrun[1] == true) {
|
||||||
status |= airtaudio::INPUT_OVERFLOW;
|
status = airtaudio::status_overflow;
|
||||||
apiInfo->xrun[1] = false;
|
apiInfo->xrun[1] = false;
|
||||||
}
|
}
|
||||||
doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[0],
|
doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[0],
|
||||||
@ -961,7 +972,7 @@ void airtaudio::api::Alsa::callbackEvent() {
|
|||||||
}
|
}
|
||||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||||
// The state might change while waiting on a mutex.
|
// The state might change while waiting on a mutex.
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
int32_t result;
|
int32_t result;
|
||||||
@ -971,8 +982,8 @@ void airtaudio::api::Alsa::callbackEvent() {
|
|||||||
snd_pcm_sframes_t frames;
|
snd_pcm_sframes_t frames;
|
||||||
audio::format format;
|
audio::format format;
|
||||||
handle = (snd_pcm_t **) apiInfo->handles;
|
handle = (snd_pcm_t **) apiInfo->handles;
|
||||||
if ( m_stream.mode == airtaudio::api::INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == airtaudio::api::DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
// Setup parameters.
|
// Setup parameters.
|
||||||
if (m_stream.doConvertBuffer[1]) {
|
if (m_stream.doConvertBuffer[1]) {
|
||||||
buffer = m_stream.deviceBuffer;
|
buffer = m_stream.deviceBuffer;
|
||||||
@ -1001,15 +1012,15 @@ void airtaudio::api::Alsa::callbackEvent() {
|
|||||||
apiInfo->xrun[1] = true;
|
apiInfo->xrun[1] = true;
|
||||||
result = snd_pcm_prepare(handle[1]);
|
result = snd_pcm_prepare(handle[1]);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::callbackEvent: error preparing device after overrun, " << snd_strerror(result) << ".");
|
ATA_ERROR("error preparing device after overrun, " << snd_strerror(result) << ".");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::callbackEvent: error, current state is " << snd_pcm_state_name(state) << ", " << snd_strerror(result) << ".");
|
ATA_ERROR("error, current state is " << snd_pcm_state_name(state) << ", " << snd_strerror(result) << ".");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::callbackEvent: audio read error, " << snd_strerror(result) << ".");
|
ATA_ERROR("audio read error, " << snd_strerror(result) << ".");
|
||||||
}
|
}
|
||||||
// TODO : Notify application ... airtaudio::errorWarning;
|
// TODO : Notify application ... airtaudio::error_warning;
|
||||||
goto tryOutput;
|
goto tryOutput;
|
||||||
}
|
}
|
||||||
// Do byte swapping if necessary.
|
// Do byte swapping if necessary.
|
||||||
@ -1028,8 +1039,8 @@ void airtaudio::api::Alsa::callbackEvent() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tryOutput:
|
tryOutput:
|
||||||
if ( m_stream.mode == airtaudio::api::OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == airtaudio::api::DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
// Setup parameters and do buffer conversion if necessary.
|
// Setup parameters and do buffer conversion if necessary.
|
||||||
if (m_stream.doConvertBuffer[0]) {
|
if (m_stream.doConvertBuffer[0]) {
|
||||||
buffer = m_stream.deviceBuffer;
|
buffer = m_stream.deviceBuffer;
|
||||||
@ -1064,15 +1075,15 @@ tryOutput:
|
|||||||
apiInfo->xrun[0] = true;
|
apiInfo->xrun[0] = true;
|
||||||
result = snd_pcm_prepare(handle[0]);
|
result = snd_pcm_prepare(handle[0]);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::callbackEvent: error preparing device after underrun, " << snd_strerror(result) << ".");
|
ATA_ERROR("error preparing device after underrun, " << snd_strerror(result) << ".");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::callbackEvent: error, current state is " << snd_pcm_state_name(state) << ", " << snd_strerror(result) << ".");
|
ATA_ERROR("error, current state is " << snd_pcm_state_name(state) << ", " << snd_strerror(result) << ".");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ATA_ERROR("airtaudio::api::Alsa::callbackEvent: audio write error, " << snd_strerror(result) << ".");
|
ATA_ERROR("audio write error, " << snd_strerror(result) << ".");
|
||||||
}
|
}
|
||||||
// TODO : Notuify application airtaudio::errorWarning;
|
// TODO : Notuify application airtaudio::error_warning;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
// Check stream latency
|
// Check stream latency
|
||||||
|
@ -22,10 +22,10 @@ namespace airtaudio {
|
|||||||
}
|
}
|
||||||
uint32_t getDeviceCount();
|
uint32_t getDeviceCount();
|
||||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||||
enum airtaudio::errorType closeStream();
|
enum airtaudio::error closeStream();
|
||||||
enum airtaudio::errorType startStream();
|
enum airtaudio::error startStream();
|
||||||
enum airtaudio::errorType stopStream();
|
enum airtaudio::error stopStream();
|
||||||
enum airtaudio::errorType abortStream();
|
enum airtaudio::error abortStream();
|
||||||
// This function is intended for internal use only. It must be
|
// This function is intended for internal use only. It must be
|
||||||
// public because it is called by the internal callback handler,
|
// public because it is called by the internal callback handler,
|
||||||
// which is not a member of RtAudio. External use of this function
|
// which is not a member of RtAudio. External use of this function
|
||||||
|
@ -76,46 +76,46 @@ airtaudio::DeviceInfo airtaudio::api::Android::getDeviceInfo(uint32_t _device) {
|
|||||||
return m_devices[_device];
|
return m_devices[_device];
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Android::closeStream() {
|
enum airtaudio::error airtaudio::api::Android::closeStream() {
|
||||||
ATA_INFO("Clese Stream");
|
ATA_INFO("Clese Stream");
|
||||||
// Can not close the stream now...
|
// Can not close the stream now...
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Android::startStream() {
|
enum airtaudio::error airtaudio::api::Android::startStream() {
|
||||||
ATA_INFO("Start Stream");
|
ATA_INFO("Start Stream");
|
||||||
// Can not close the stream now...
|
// Can not close the stream now...
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Android::stopStream() {
|
enum airtaudio::error airtaudio::api::Android::stopStream() {
|
||||||
ATA_INFO("Stop stream");
|
ATA_INFO("Stop stream");
|
||||||
ewol::Context& tmpContext = ewol::getContext();
|
ewol::Context& tmpContext = ewol::getContext();
|
||||||
tmpContext.audioCloseDevice(0);
|
tmpContext.audioCloseDevice(0);
|
||||||
// Can not close the stream now...
|
// Can not close the stream now...
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Android::abortStream() {
|
enum airtaudio::error airtaudio::api::Android::abortStream() {
|
||||||
ATA_INFO("Abort Stream");
|
ATA_INFO("Abort Stream");
|
||||||
ewol::Context& tmpContext = ewol::getContext();
|
ewol::Context& tmpContext = ewol::getContext();
|
||||||
tmpContext.audioCloseDevice(0);
|
tmpContext.audioCloseDevice(0);
|
||||||
// Can not close the stream now...
|
// Can not close the stream now...
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
void airtaudio::api::Android::callBackEvent(void* _data,
|
void airtaudio::api::Android::callBackEvent(void* _data,
|
||||||
int32_t _frameRate) {
|
int32_t _frameRate) {
|
||||||
int32_t doStopStream = 0;
|
int32_t doStopStream = 0;
|
||||||
double streamTime = getStreamTime();
|
double streamTime = getStreamTime();
|
||||||
airtaudio::streamStatus status = 0;
|
enum airtaudio::status status = airtaudio::status_ok;
|
||||||
if (m_stream.doConvertBuffer[OUTPUT] == true) {
|
if (m_stream.doConvertBuffer[airtaudio::mode_output] == true) {
|
||||||
doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[OUTPUT],
|
doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::mode_output],
|
||||||
nullptr,
|
nullptr,
|
||||||
_frameRate,
|
_frameRate,
|
||||||
streamTime,
|
streamTime,
|
||||||
status);
|
status);
|
||||||
convertBuffer((char*)_data, (char*)m_stream.userBuffer[OUTPUT], m_stream.convertInfo[OUTPUT]);
|
convertBuffer((char*)_data, (char*)m_stream.userBuffer[airtaudio::mode_output], m_stream.convertInfo[airtaudio::mode_output]);
|
||||||
} else {
|
} else {
|
||||||
doStopStream = m_stream.callbackInfo.callback(_data,
|
doStopStream = m_stream.callbackInfo.callback(_data,
|
||||||
nullptr,
|
nullptr,
|
||||||
@ -142,7 +142,7 @@ void airtaudio::api::Android::androidCallBackEvent(void* _data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device,
|
bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
@ -150,12 +150,12 @@ bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device,
|
|||||||
uint32_t *_bufferSize,
|
uint32_t *_bufferSize,
|
||||||
airtaudio::StreamOptions *_options) {
|
airtaudio::StreamOptions *_options) {
|
||||||
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
|
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
|
||||||
if (_mode != OUTPUT) {
|
if (_mode != airtaudio::mode_output) {
|
||||||
ATA_ERROR("Can not start a device input or duplex for Android ...");
|
ATA_ERROR("Can not start a device input or duplex for Android ...");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
m_stream.userFormat = _format;
|
m_stream.userFormat = _format;
|
||||||
m_stream.nUserChannels[_mode] = _channels;
|
m_stream.nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||||
ewol::Context& tmpContext = ewol::getContext();
|
ewol::Context& tmpContext = ewol::getContext();
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
if (_format == SINT8) {
|
if (_format == SINT8) {
|
||||||
@ -165,36 +165,36 @@ bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
m_stream.bufferSize = 256;
|
m_stream.bufferSize = 256;
|
||||||
m_stream.sampleRate = _sampleRate;
|
m_stream.sampleRate = _sampleRate;
|
||||||
m_stream.doByteSwap[_mode] = false; // for endienness ...
|
m_stream.doByteSwap[modeToIdTable(_mode)] = false; // for endienness ...
|
||||||
|
|
||||||
// TODO : For now, we write it in hard ==> to bu update later ...
|
// TODO : For now, we write it in hard ==> to bu update later ...
|
||||||
m_stream.deviceFormat[_mode] = SINT16;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = SINT16;
|
||||||
m_stream.nDeviceChannels[_mode] = 2;
|
m_stream.nDeviceChannels[modeToIdTable(_mode)] = 2;
|
||||||
m_stream.deviceInterleaved[_mode] = true;
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||||
|
|
||||||
m_stream.doConvertBuffer[_mode] = false;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
|
if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false
|
||||||
&& m_stream.nUserChannels[_mode] > 1) {
|
&& m_stream.nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if (m_stream.doConvertBuffer[_mode] == true) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) {
|
||||||
// Allocate necessary internal buffers.
|
// Allocate necessary internal buffers.
|
||||||
uint64_t bufferBytes = m_stream.nUserChannels[_mode] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||||
if (m_stream.userBuffer[_mode] == nullptr) {
|
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||||
ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory.");
|
ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory.");
|
||||||
}
|
}
|
||||||
setConvertInfo(_mode, _firstChannel);
|
setConvertInfo(_mode, _firstChannel);
|
||||||
}
|
}
|
||||||
ATA_INFO("device format : " << m_stream.deviceFormat[_mode] << " user format : " << m_stream.userFormat);
|
ATA_INFO("device format : " << m_stream.deviceFormat[modeToIdTable(_mode)] << " user format : " << m_stream.userFormat);
|
||||||
ATA_INFO("device channels : " << m_stream.nDeviceChannels[_mode] << " user channels : " << m_stream.nUserChannels[_mode]);
|
ATA_INFO("device channels : " << m_stream.nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_stream.nUserChannels[modeToIdTable(_mode)]);
|
||||||
ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[_mode]);
|
ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[modeToIdTable(_mode)]);
|
||||||
if (ret == false) {
|
if (ret == false) {
|
||||||
ATA_ERROR("Can not open device.");
|
ATA_ERROR("Can not open device.");
|
||||||
}
|
}
|
||||||
|
@ -15,15 +15,15 @@ namespace airtaudio {
|
|||||||
public:
|
public:
|
||||||
Android();
|
Android();
|
||||||
virtual ~Android();
|
virtual ~Android();
|
||||||
airtaudio::api::type getCurrentApi() {
|
enum airtaudio::type getCurrentApi() {
|
||||||
return airtaudio::api::ANDROID_JAVA;
|
return airtaudio::type_java;
|
||||||
}
|
}
|
||||||
uint32_t getDeviceCount();
|
uint32_t getDeviceCount();
|
||||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||||
enum airtaudio::errorType closeStream();
|
enum airtaudio::error closeStream();
|
||||||
enum airtaudio::errorType startStream();
|
enum airtaudio::error startStream();
|
||||||
enum airtaudio::errorType stopStream();
|
enum airtaudio::error stopStream();
|
||||||
enum airtaudio::errorType abortStream();
|
enum airtaudio::error abortStream();
|
||||||
// This function is intended for internal use only. It must be
|
// This function is intended for internal use only. It must be
|
||||||
// public because it is called by the internal callback handler,
|
// public because it is called by the internal callback handler,
|
||||||
// which is not a member of RtAudio. External use of this function
|
// which is not a member of RtAudio. External use of this function
|
||||||
@ -33,7 +33,7 @@ namespace airtaudio {
|
|||||||
std::vector<airtaudio::DeviceInfo> m_devices;
|
std::vector<airtaudio::DeviceInfo> m_devices;
|
||||||
void saveDeviceInfo();
|
void saveDeviceInfo();
|
||||||
bool probeDeviceOpen(uint32_t _device,
|
bool probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
|
@ -83,7 +83,7 @@ airtaudio::api::Asio::Asio() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
airtaudio::api::Asio::~Asio() {
|
airtaudio::api::Asio::~Asio() {
|
||||||
if (m_stream.state != STREAM_CLOSED) {
|
if (m_stream.state != airtaudio::state_closed) {
|
||||||
closeStream();
|
closeStream();
|
||||||
}
|
}
|
||||||
if (m_coInitialized) {
|
if (m_coInitialized) {
|
||||||
@ -109,7 +109,7 @@ rtaudio::DeviceInfo airtaudio::api::Asio::getDeviceInfo(uint32_t _device) {
|
|||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
// If a stream is already open, we cannot probe other devices. Thus, use the saved results.
|
// If a stream is already open, we cannot probe other devices. Thus, use the saved results.
|
||||||
if (m_stream.state != STREAM_CLOSED) {
|
if (m_stream.state != airtaudio::state_closed) {
|
||||||
if (_device >= m_devices.size()) {
|
if (_device >= m_devices.size()) {
|
||||||
ATA_ERROR("device ID was not present before stream was opened.");
|
ATA_ERROR("device ID was not present before stream was opened.");
|
||||||
return info;
|
return info;
|
||||||
@ -213,7 +213,7 @@ void airtaudio::api::Asio::saveDeviceInfo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
@ -221,8 +221,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
uint32_t* _bufferSize,
|
uint32_t* _bufferSize,
|
||||||
airtaudio::StreamOptions *_options) {
|
airtaudio::StreamOptions *_options) {
|
||||||
// For ASIO, a duplex stream MUST use the same driver.
|
// For ASIO, a duplex stream MUST use the same driver.
|
||||||
if ( _mode == INPUT
|
if ( _mode == airtaudio::mode_input
|
||||||
&& m_stream.mode == OUTPUT
|
&& m_stream.mode == airtaudio::mode_output
|
||||||
&& m_stream.device[0] != _device) {
|
&& m_stream.device[0] != _device) {
|
||||||
ATA_ERROR("an ASIO duplex stream must use the same device for input and output!");
|
ATA_ERROR("an ASIO duplex stream must use the same device for input and output!");
|
||||||
return false;
|
return false;
|
||||||
@ -234,8 +234,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Only load the driver once for duplex stream.
|
// Only load the driver once for duplex stream.
|
||||||
if ( _mode != INPUT
|
if ( _mode != airtaudio::mode_input
|
||||||
|| m_stream.mode != OUTPUT) {
|
|| m_stream.mode != airtaudio::mode_output) {
|
||||||
// The getDeviceInfo() function will not work when a stream is open
|
// The getDeviceInfo() function will not work when a stream is open
|
||||||
// because ASIO does not allow multiple devices to run at the same
|
// because ASIO does not allow multiple devices to run at the same
|
||||||
// time. Thus, we'll probe the system before opening a stream and
|
// time. Thus, we'll probe the system before opening a stream and
|
||||||
@ -259,17 +259,17 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
ATA_ERROR("error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ").");
|
ATA_ERROR("error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ").");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if ( ( _mode == OUTPUT
|
if ( ( _mode == airtaudio::mode_output
|
||||||
&& (_channels+_firstChannel) > (uint32_t) outputChannels)
|
&& (_channels+_firstChannel) > (uint32_t) outputChannels)
|
||||||
|| ( _mode == INPUT
|
|| ( _mode == airtaudio::mode_input
|
||||||
&& (_channels+_firstChannel) > (uint32_t) inputChannels)) {
|
&& (_channels+_firstChannel) > (uint32_t) inputChannels)) {
|
||||||
drivers.removeCurrentDriver();
|
drivers.removeCurrentDriver();
|
||||||
ATA_ERROR("driver (" << driverName << ") does not support requested channel count (" << _channels << ") + offset (" << _firstChannel << ").");
|
ATA_ERROR("driver (" << driverName << ") does not support requested channel count (" << _channels << ") + offset (" << _firstChannel << ").");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
m_stream.nDeviceChannels[_mode] = _channels;
|
m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels;
|
||||||
m_stream.nUserChannels[_mode] = _channels;
|
m_stream.nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||||
m_stream.channelOffset[_mode] = _firstChannel;
|
m_stream.channelOffset[modeToIdTable(_mode)] = _firstChannel;
|
||||||
// Verify the sample rate is supported.
|
// Verify the sample rate is supported.
|
||||||
result = ASIOCanSampleRate((ASIOSampleRate) _sampleRate);
|
result = ASIOCanSampleRate((ASIOSampleRate) _sampleRate);
|
||||||
if (result != ASE_OK) {
|
if (result != ASE_OK) {
|
||||||
@ -297,7 +297,7 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
// Determine the driver data type.
|
// Determine the driver data type.
|
||||||
ASIOChannelInfo channelInfo;
|
ASIOChannelInfo channelInfo;
|
||||||
channelInfo.channel = 0;
|
channelInfo.channel = 0;
|
||||||
if (_mode == OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
channelInfo.isInput = false;
|
channelInfo.isInput = false;
|
||||||
} else {
|
} else {
|
||||||
channelInfo.isInput = true;
|
channelInfo.isInput = true;
|
||||||
@ -309,41 +309,41 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Assuming WINDOWS host is always little-endian.
|
// Assuming WINDOWS host is always little-endian.
|
||||||
m_stream.doByteSwap[_mode] = false;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = false;
|
||||||
m_stream.userFormat = _format;
|
m_stream.userFormat = _format;
|
||||||
m_stream.deviceFormat[_mode] = 0;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = 0;
|
||||||
if ( channelInfo.type == ASIOSTInt16MSB
|
if ( channelInfo.type == ASIOSTInt16MSB
|
||||||
|| channelInfo.type == ASIOSTInt16LSB) {
|
|| channelInfo.type == ASIOSTInt16LSB) {
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||||
if (channelInfo.type == ASIOSTInt16MSB) {
|
if (channelInfo.type == ASIOSTInt16MSB) {
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
} else if ( channelInfo.type == ASIOSTInt32MSB
|
} else if ( channelInfo.type == ASIOSTInt32MSB
|
||||||
|| channelInfo.type == ASIOSTInt32LSB) {
|
|| channelInfo.type == ASIOSTInt32LSB) {
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
|
||||||
if (channelInfo.type == ASIOSTInt32MSB) {
|
if (channelInfo.type == ASIOSTInt32MSB) {
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
} else if ( channelInfo.type == ASIOSTFloat32MSB
|
} else if ( channelInfo.type == ASIOSTFloat32MSB
|
||||||
|| channelInfo.type == ASIOSTFloat32LSB) {
|
|| channelInfo.type == ASIOSTFloat32LSB) {
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_FLOAT32;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT32;
|
||||||
if (channelInfo.type == ASIOSTFloat32MSB) {
|
if (channelInfo.type == ASIOSTFloat32MSB) {
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
} else if ( channelInfo.type == ASIOSTFloat64MSB
|
} else if ( channelInfo.type == ASIOSTFloat64MSB
|
||||||
|| channelInfo.type == ASIOSTFloat64LSB) {
|
|| channelInfo.type == ASIOSTFloat64LSB) {
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_FLOAT64;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT64;
|
||||||
if (channelInfo.type == ASIOSTFloat64MSB) {
|
if (channelInfo.type == ASIOSTFloat64MSB) {
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
} else if ( channelInfo.type == ASIOSTInt24MSB
|
} else if ( channelInfo.type == ASIOSTInt24MSB
|
||||||
|| channelInfo.type == ASIOSTInt24LSB) {
|
|| channelInfo.type == ASIOSTInt24LSB) {
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
|
||||||
if (channelInfo.type == ASIOSTInt24MSB) {
|
if (channelInfo.type == ASIOSTInt24MSB) {
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (m_stream.deviceFormat[_mode] == 0) {
|
if (m_stream.deviceFormat[modeToIdTable(_mode)] == 0) {
|
||||||
drivers.removeCurrentDriver();
|
drivers.removeCurrentDriver();
|
||||||
ATA_ERROR("driver (" << driverName << ") data format not supported by RtAudio.");
|
ATA_ERROR("driver (" << driverName << ") data format not supported by RtAudio.");
|
||||||
return false;
|
return false;
|
||||||
@ -393,8 +393,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
// Set to an even multiple of granularity, rounding up.
|
// Set to an even multiple of granularity, rounding up.
|
||||||
*_bufferSize = (*_bufferSize + granularity-1) / granularity * granularity;
|
*_bufferSize = (*_bufferSize + granularity-1) / granularity * granularity;
|
||||||
}
|
}
|
||||||
if ( _mode == INPUT
|
if ( _mode == airtaudio::mode_input
|
||||||
&& m_stream.mode == OUTPUT
|
&& m_stream.mode == airtaudio::mode_output
|
||||||
&& m_stream.bufferSize != *_bufferSize) {
|
&& m_stream.bufferSize != *_bufferSize) {
|
||||||
drivers.removeCurrentDriver();
|
drivers.removeCurrentDriver();
|
||||||
ATA_ERROR("input/output buffersize discrepancy!");
|
ATA_ERROR("input/output buffersize discrepancy!");
|
||||||
@ -402,14 +402,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
m_stream.bufferSize = *_bufferSize;
|
m_stream.bufferSize = *_bufferSize;
|
||||||
m_stream.nBuffers = 2;
|
m_stream.nBuffers = 2;
|
||||||
if ( _options != nullptr
|
|
||||||
&& _options->flags & RTAUDIO_NONINTERLEAVED) {
|
|
||||||
m_stream.userInterleaved = false;
|
|
||||||
} else {
|
|
||||||
m_stream.userInterleaved = true;
|
|
||||||
}
|
|
||||||
// ASIO always uses non-interleaved buffers.
|
// ASIO always uses non-interleaved buffers.
|
||||||
m_stream.deviceInterleaved[_mode] = false;
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = false;
|
||||||
// Allocate, if necessary, our AsioHandle structure for the stream.
|
// Allocate, if necessary, our AsioHandle structure for the stream.
|
||||||
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
|
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
|
||||||
if (handle == nullptr) {
|
if (handle == nullptr) {
|
||||||
@ -431,8 +425,8 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
// and output separately, we'll have to dispose of previously
|
// and output separately, we'll have to dispose of previously
|
||||||
// created output buffers for a duplex stream.
|
// created output buffers for a duplex stream.
|
||||||
long inputLatency, outputLatency;
|
long inputLatency, outputLatency;
|
||||||
if ( _mode == INPUT
|
if ( _mode == airtaudio::mode_input
|
||||||
&& m_stream.mode == OUTPUT) {
|
&& m_stream.mode == airtaudio::mode_output) {
|
||||||
ASIODisposeBuffers();
|
ASIODisposeBuffers();
|
||||||
if (handle->bufferInfos == nullptr) {
|
if (handle->bufferInfos == nullptr) {
|
||||||
free(handle->bufferInfos);
|
free(handle->bufferInfos);
|
||||||
@ -471,27 +465,27 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
buffersAllocated = true;
|
buffersAllocated = true;
|
||||||
// Set flags for buffer conversion.
|
// Set flags for buffer conversion.
|
||||||
m_stream.doConvertBuffer[_mode] = false;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false
|
||||||
&& m_stream.nUserChannels[_mode] > 1) {
|
&& m_stream.nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
// Allocate necessary internal buffers
|
// Allocate necessary internal buffers
|
||||||
uint64_t bufferBytes;
|
uint64_t bufferBytes;
|
||||||
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
|
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||||
if (m_stream.userBuffer[_mode] == nullptr) {
|
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||||
ATA_ERROR("error allocating user buffer memory.");
|
ATA_ERROR("error allocating user buffer memory.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
bool makeBuffer = true;
|
bool makeBuffer = true;
|
||||||
bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]);
|
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||||
if (_mode == INPUT) {
|
if (_mode == airtaudio::mode_input) {
|
||||||
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
|
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
|
||||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||||
if (bufferBytes <= bytesOut) {
|
if (bufferBytes <= bytesOut) {
|
||||||
makeBuffer = false;
|
makeBuffer = false;
|
||||||
@ -512,14 +506,14 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.sampleRate = _sampleRate;
|
m_stream.sampleRate = _sampleRate;
|
||||||
m_stream.device[_mode] = _device;
|
m_stream.device[modeToIdTable(_mode)] = _device;
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
asioCallbackInfo = &m_stream.callbackInfo;
|
asioCallbackInfo = &m_stream.callbackInfo;
|
||||||
m_stream.callbackInfo.object = (void*)this;
|
m_stream.callbackInfo.object = (void*)this;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
&& _mode == INPUT) {
|
&& _mode == airtaudio::mode_input) {
|
||||||
// We had already set up an output stream.
|
// We had already set up an output stream.
|
||||||
m_stream.mode = DUPLEX;
|
m_stream.mode = airtaudio::mode_duplex;
|
||||||
} else {
|
} else {
|
||||||
m_stream.mode = _mode;
|
m_stream.mode = _mode;
|
||||||
}
|
}
|
||||||
@ -534,7 +528,7 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
|||||||
// Setup the buffer conversion information structure. We don't use
|
// Setup the buffer conversion information structure. We don't use
|
||||||
// buffers to do channel offsets, so we override that parameter
|
// buffers to do channel offsets, so we override that parameter
|
||||||
// here.
|
// here.
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
setConvertInfo(_mode, 0);
|
setConvertInfo(_mode, 0);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@ -566,13 +560,13 @@ error:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Asio::closeStream() {
|
enum airtaudio::error airtaudio::api::Asio::closeStream() {
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("no open stream to close!");
|
ATA_ERROR("no open stream to close!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
ASIOStop();
|
ASIOStop();
|
||||||
}
|
}
|
||||||
ASIODisposeBuffers();
|
ASIODisposeBuffers();
|
||||||
@ -596,20 +590,20 @@ enum airtaudio::errorType airtaudio::api::Asio::closeStream() {
|
|||||||
free(m_stream.deviceBuffer);
|
free(m_stream.deviceBuffer);
|
||||||
m_stream.deviceBuffer = 0;
|
m_stream.deviceBuffer = 0;
|
||||||
}
|
}
|
||||||
m_stream.mode = UNINITIALIZED;
|
m_stream.mode = airtaudio::mode_unknow;
|
||||||
m_stream.state = STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool stopThreadCalled = false;
|
bool stopThreadCalled = false;
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Asio::startStream() {
|
enum airtaudio::error airtaudio::api::Asio::startStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
ATA_ERROR("the stream is already running!");
|
ATA_ERROR("the stream is already running!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
|
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
|
||||||
ASIOError result = ASIOStart();
|
ASIOError result = ASIOStart();
|
||||||
@ -620,49 +614,49 @@ enum airtaudio::errorType airtaudio::api::Asio::startStream() {
|
|||||||
handle->drainCounter = 0;
|
handle->drainCounter = 0;
|
||||||
handle->internalDrain = false;
|
handle->internalDrain = false;
|
||||||
ResetEvent(handle->condition);
|
ResetEvent(handle->condition);
|
||||||
m_stream.state = STREAM_RUNNING;
|
m_stream.state = airtaudio::state_running;
|
||||||
asioXRun = false;
|
asioXRun = false;
|
||||||
unlock:
|
unlock:
|
||||||
stopThreadCalled = false;
|
stopThreadCalled = false;
|
||||||
if (result == ASE_OK) {
|
if (result == ASE_OK) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Asio::stopStream() {
|
enum airtaudio::error airtaudio::api::Asio::stopStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
|
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
|
||||||
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
|
if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (handle->drainCounter == 0) {
|
if (handle->drainCounter == 0) {
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
WaitForSingleObject(handle->condition, INFINITE); // block until signaled
|
WaitForSingleObject(handle->condition, INFINITE); // block until signaled
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
ASIOError result = ASIOStop();
|
ASIOError result = ASIOStop();
|
||||||
if (result != ASE_OK) {
|
if (result != ASE_OK) {
|
||||||
ATA_ERROR("error (" << getAsioErrorString(result) << ") stopping device.");
|
ATA_ERROR("error (" << getAsioErrorString(result) << ") stopping device.");
|
||||||
}
|
}
|
||||||
if (result == ASE_OK) {
|
if (result == ASE_OK) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Asio::abortStream() {
|
enum airtaudio::error airtaudio::api::Asio::abortStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
error(airtaudio::errorWarning);
|
error(airtaudio::error_warning);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -689,11 +683,11 @@ static unsigned __stdcall asioStopStream(void *_ptr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
|
bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
|
||||||
if ( m_stream.state == STREAM_STOPPED
|
if ( m_stream.state == airtaudio::state_stopped
|
||||||
|| m_stream.state == STREAM_STOPPING) {
|
|| m_stream.state == airtaudio::state_stopping) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -701,7 +695,7 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
|
|||||||
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
|
AsioHandle *handle = (AsioHandle *) m_stream.apiHandle;
|
||||||
// Check if we were draining the stream and signal if finished.
|
// Check if we were draining the stream and signal if finished.
|
||||||
if (handle->drainCounter > 3) {
|
if (handle->drainCounter > 3) {
|
||||||
m_stream.state = STREAM_STOPPING;
|
m_stream.state = airtaudio::state_stopping;
|
||||||
if (handle->internalDrain == false) {
|
if (handle->internalDrain == false) {
|
||||||
SetEvent(handle->condition);
|
SetEvent(handle->condition);
|
||||||
} else { // spawn a thread to stop the stream
|
} else { // spawn a thread to stop the stream
|
||||||
@ -716,12 +710,12 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
|
|||||||
if (handle->drainCounter == 0) {
|
if (handle->drainCounter == 0) {
|
||||||
double streamTime = getStreamTime();
|
double streamTime = getStreamTime();
|
||||||
rtaudio::streamStatus status = 0;
|
rtaudio::streamStatus status = 0;
|
||||||
if (m_stream.mode != INPUT && asioXRun == true) {
|
if (m_stream.mode != airtaudio::mode_input && asioXRun == true) {
|
||||||
status |= RTAUDIO_OUTPUT_UNDERFLOW;
|
status |= RTAUDIO_airtaudio::status_underflow;
|
||||||
asioXRun = false;
|
asioXRun = false;
|
||||||
}
|
}
|
||||||
if (m_stream.mode != OUTPUT && asioXRun == true) {
|
if (m_stream.mode != airtaudio::mode_output && asioXRun == true) {
|
||||||
status |= RTAUDIO_INPUT_OVERFLOW;
|
status |= RTAUDIO_airtaudio::mode_input_OVERFLOW;
|
||||||
asioXRun = false;
|
asioXRun = false;
|
||||||
}
|
}
|
||||||
int32_t cbReturnValue = info->callback(m_stream.userBuffer[0],
|
int32_t cbReturnValue = info->callback(m_stream.userBuffer[0],
|
||||||
@ -730,7 +724,7 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
|
|||||||
streamTime,
|
streamTime,
|
||||||
status);
|
status);
|
||||||
if (cbReturnValue == 2) {
|
if (cbReturnValue == 2) {
|
||||||
m_stream.state = STREAM_STOPPING;
|
m_stream.state = airtaudio::state_stopping;
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
unsigned threadId;
|
unsigned threadId;
|
||||||
m_stream.callbackInfo.thread = _beginthreadex(nullptr,
|
m_stream.callbackInfo.thread = _beginthreadex(nullptr,
|
||||||
@ -747,8 +741,8 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
|
|||||||
}
|
}
|
||||||
uint32_t nChannels, bufferBytes, i, j;
|
uint32_t nChannels, bufferBytes, i, j;
|
||||||
nChannels = m_stream.nDeviceChannels[0] + m_stream.nDeviceChannels[1];
|
nChannels = m_stream.nDeviceChannels[0] + m_stream.nDeviceChannels[1];
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[0]);
|
bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[0]);
|
||||||
if (handle->drainCounter > 1) { // write zeros to the output stream
|
if (handle->drainCounter > 1) { // write zeros to the output stream
|
||||||
for (i=0, j=0; i<nChannels; i++) {
|
for (i=0, j=0; i<nChannels; i++) {
|
||||||
@ -789,8 +783,8 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[1]);
|
bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[1]);
|
||||||
if (m_stream.doConvertBuffer[1]) {
|
if (m_stream.doConvertBuffer[1]) {
|
||||||
// Always interleave ASIO input data.
|
// Always interleave ASIO input data.
|
||||||
@ -840,8 +834,8 @@ static void sampleRateChanged(ASIOSampleRate _sRate) {
|
|||||||
// sample rate status of an AES/EBU or S/PDIF digital input at the
|
// sample rate status of an AES/EBU or S/PDIF digital input at the
|
||||||
// audio device.
|
// audio device.
|
||||||
RtApi* object = (RtApi*)asioCallbackInfo->object;
|
RtApi* object = (RtApi*)asioCallbackInfo->object;
|
||||||
enum airtaudio::errorType ret = object->stopStream()
|
enum airtaudio::error ret = object->stopStream()
|
||||||
if (ret != airtaudio::errorNone) {
|
if (ret != airtaudio::error_none) {
|
||||||
ATA_ERROR("error stop stream!");
|
ATA_ERROR("error stop stream!");
|
||||||
} else {
|
} else {
|
||||||
ATA_ERROR("driver reports sample rate changed to " << _sRate << " ... stream stopped!!!");
|
ATA_ERROR("driver reports sample rate changed to " << _sRate << " ... stream stopped!!!");
|
||||||
|
@ -17,15 +17,15 @@ namespace airtaudio {
|
|||||||
public:
|
public:
|
||||||
Asio();
|
Asio();
|
||||||
virtual ~Asio();
|
virtual ~Asio();
|
||||||
airtaudio::api::type getCurrentApi() {
|
enum airtaudio::type getCurrentApi() {
|
||||||
return airtaudio::WINDOWS_ASIO;
|
return airtaudio::WINDOWS_ASIO;
|
||||||
}
|
}
|
||||||
uint32_t getDeviceCount();
|
uint32_t getDeviceCount();
|
||||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||||
enum airtaudio::errorType closeStream();
|
enum airtaudio::error closeStream();
|
||||||
enum airtaudio::errorType startStream();
|
enum airtaudio::error startStream();
|
||||||
enum airtaudio::errorType stopStream();
|
enum airtaudio::error stopStream();
|
||||||
enum airtaudio::errorType abortStream();
|
enum airtaudio::error abortStream();
|
||||||
long getStreamLatency();
|
long getStreamLatency();
|
||||||
// This function is intended for internal use only. It must be
|
// This function is intended for internal use only. It must be
|
||||||
// public because it is called by the internal callback handler,
|
// public because it is called by the internal callback handler,
|
||||||
@ -37,7 +37,7 @@ namespace airtaudio {
|
|||||||
void saveDeviceInfo();
|
void saveDeviceInfo();
|
||||||
bool m_coInitialized;
|
bool m_coInitialized;
|
||||||
bool probeDeviceOpen(uint32_t _device,
|
bool probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
|
@ -95,7 +95,7 @@ airtaudio::api::Core::~Core() {
|
|||||||
// The subclass destructor gets called before the base class
|
// The subclass destructor gets called before the base class
|
||||||
// destructor, so close an existing stream before deallocating
|
// destructor, so close an existing stream before deallocating
|
||||||
// apiDeviceId memory.
|
// apiDeviceId memory.
|
||||||
if (m_stream.state != STREAM_CLOSED) {
|
if (m_stream.state != airtaudio::state_closed) {
|
||||||
closeStream();
|
closeStream();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -434,7 +434,7 @@ static OSStatus rateListener(AudioObjectID _inDevice,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
@ -473,7 +473,7 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
|||||||
AudioDeviceID id = deviceList[ _device ];
|
AudioDeviceID id = deviceList[ _device ];
|
||||||
// Setup for stream mode.
|
// Setup for stream mode.
|
||||||
bool isInput = false;
|
bool isInput = false;
|
||||||
if (_mode == INPUT) {
|
if (_mode == airtaudio::mode_input) {
|
||||||
isInput = true;
|
isInput = true;
|
||||||
property.mScope = kAudioDevicePropertyScopeInput;
|
property.mScope = kAudioDevicePropertyScopeInput;
|
||||||
} else {
|
} else {
|
||||||
@ -582,7 +582,7 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
|||||||
*_bufferSize = (uint64_t) bufferRange.mMaximum;
|
*_bufferSize = (uint64_t) bufferRange.mMaximum;
|
||||||
}
|
}
|
||||||
if ( _options != nullptr
|
if ( _options != nullptr
|
||||||
&& _options->flags & MINIMIZE_LATENCY) {
|
&& _options->flags.m_minimizeLatency == true) {
|
||||||
*_bufferSize = (uint64_t) bufferRange.mMinimum;
|
*_bufferSize = (uint64_t) bufferRange.mMinimum;
|
||||||
}
|
}
|
||||||
// Set the buffer size. For multiple streams, I'm assuming we only
|
// Set the buffer size. For multiple streams, I'm assuming we only
|
||||||
@ -598,8 +598,8 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
|||||||
// If attempting to setup a duplex stream, the bufferSize parameter
|
// If attempting to setup a duplex stream, the bufferSize parameter
|
||||||
// MUST be the same in both directions!
|
// MUST be the same in both directions!
|
||||||
*_bufferSize = theSize;
|
*_bufferSize = theSize;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
&& _mode == INPUT
|
&& _mode == airtaudio::mode_input
|
||||||
&& *_bufferSize != m_stream.bufferSize) {
|
&& *_bufferSize != m_stream.bufferSize) {
|
||||||
ATA_ERROR("system error setting buffer size for duplex stream on device (" << _device << ").");
|
ATA_ERROR("system error setting buffer size for duplex stream on device (" << _device << ").");
|
||||||
return false;
|
return false;
|
||||||
@ -772,44 +772,38 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
|||||||
// Byte-swapping: According to AudioHardware.h, the stream data will
|
// Byte-swapping: According to AudioHardware.h, the stream data will
|
||||||
// always be presented in native-endian format, so we should never
|
// always be presented in native-endian format, so we should never
|
||||||
// need to byte swap.
|
// need to byte swap.
|
||||||
m_stream.doByteSwap[_mode] = false;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = false;
|
||||||
// From the CoreAudio documentation, PCM data must be supplied as
|
// From the CoreAudio documentation, PCM data must be supplied as
|
||||||
// 32-bit floats.
|
// 32-bit floats.
|
||||||
m_stream.userFormat = _format;
|
m_stream.userFormat = _format;
|
||||||
m_stream.deviceFormat[_mode] = FLOAT32;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = FLOAT32;
|
||||||
if (streamCount == 1) {
|
if (streamCount == 1) {
|
||||||
m_stream.nDeviceChannels[_mode] = description.mChannelsPerFrame;
|
m_stream.nDeviceChannels[modeToIdTable(_mode)] = description.mChannelsPerFrame;
|
||||||
} else {
|
} else {
|
||||||
// multiple streams
|
// multiple streams
|
||||||
m_stream.nDeviceChannels[_mode] = _channels;
|
m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels;
|
||||||
}
|
}
|
||||||
m_stream.nUserChannels[_mode] = _channels;
|
m_stream.nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||||
m_stream.channelOffset[_mode] = channelOffset; // offset within a CoreAudio stream
|
m_stream.channelOffset[modeToIdTable(_mode)] = channelOffset; // offset within a CoreAudio stream
|
||||||
if ( _options != nullptr
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||||
&& _options->flags & NONINTERLEAVED) {
|
|
||||||
m_stream.userInterleaved = false;
|
|
||||||
} else {
|
|
||||||
m_stream.userInterleaved = true;
|
|
||||||
}
|
|
||||||
m_stream.deviceInterleaved[_mode] = true;
|
|
||||||
if (monoMode == true) {
|
if (monoMode == true) {
|
||||||
m_stream.deviceInterleaved[_mode] = false;
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = false;
|
||||||
}
|
}
|
||||||
// Set flags for buffer conversion.
|
// Set flags for buffer conversion.
|
||||||
m_stream.doConvertBuffer[_mode] = false;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
|
if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if (streamCount == 1) {
|
if (streamCount == 1) {
|
||||||
if ( m_stream.nUserChannels[_mode] > 1
|
if ( m_stream.nUserChannels[modeToIdTable(_mode)] > 1
|
||||||
&& m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]) {
|
&& m_stream.deviceInterleaved[modeToIdTable(_mode)] == false) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
} else if (monoMode && m_stream.userInterleaved) {
|
} else if (monoMode) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
// Allocate our CoreHandle structure for the stream.
|
// Allocate our CoreHandle structure for the stream.
|
||||||
CoreHandle *handle = 0;
|
CoreHandle *handle = 0;
|
||||||
@ -823,28 +817,28 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
|||||||
} else {
|
} else {
|
||||||
handle = (CoreHandle *) m_stream.apiHandle;
|
handle = (CoreHandle *) m_stream.apiHandle;
|
||||||
}
|
}
|
||||||
handle->iStream[_mode] = firstStream;
|
handle->iStream[modeToIdTable(_mode)] = firstStream;
|
||||||
handle->nStreams[_mode] = streamCount;
|
handle->nStreams[modeToIdTable(_mode)] = streamCount;
|
||||||
handle->id[_mode] = id;
|
handle->id[modeToIdTable(_mode)] = id;
|
||||||
// Allocate necessary internal buffers.
|
// Allocate necessary internal buffers.
|
||||||
uint64_t bufferBytes;
|
uint64_t bufferBytes;
|
||||||
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
|
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||||
// m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
// m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||||
m_stream.userBuffer[_mode] = (char *) malloc(bufferBytes * sizeof(char));
|
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) malloc(bufferBytes * sizeof(char));
|
||||||
memset(m_stream.userBuffer[_mode], 0, bufferBytes * sizeof(char));
|
memset(m_stream.userBuffer[modeToIdTable(_mode)], 0, bufferBytes * sizeof(char));
|
||||||
if (m_stream.userBuffer[_mode] == nullptr) {
|
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||||
ATA_ERROR("error allocating user buffer memory.");
|
ATA_ERROR("error allocating user buffer memory.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
// If possible, we will make use of the CoreAudio stream buffers as
|
// If possible, we will make use of the CoreAudio stream buffers as
|
||||||
// "device buffers". However, we can't do this if using multiple
|
// "device buffers". However, we can't do this if using multiple
|
||||||
// streams.
|
// streams.
|
||||||
if ( m_stream.doConvertBuffer[_mode]
|
if ( m_stream.doConvertBuffer[modeToIdTable(_mode)]
|
||||||
&& handle->nStreams[_mode] > 1) {
|
&& handle->nStreams[modeToIdTable(_mode)] > 1) {
|
||||||
bool makeBuffer = true;
|
bool makeBuffer = true;
|
||||||
bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]);
|
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||||
if (_mode == INPUT) {
|
if (_mode == airtaudio::mode_input) {
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
&& m_stream.deviceBuffer) {
|
&& m_stream.deviceBuffer) {
|
||||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||||
if (bufferBytes <= bytesOut) {
|
if (bufferBytes <= bytesOut) {
|
||||||
@ -866,25 +860,25 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.sampleRate = _sampleRate;
|
m_stream.sampleRate = _sampleRate;
|
||||||
m_stream.device[_mode] = _device;
|
m_stream.device[modeToIdTable(_mode)] = _device;
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
m_stream.callbackInfo.object = (void *) this;
|
m_stream.callbackInfo.object = (void *) this;
|
||||||
// Setup the buffer conversion information structure.
|
// Setup the buffer conversion information structure.
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
if (streamCount > 1) {
|
if (streamCount > 1) {
|
||||||
setConvertInfo(_mode, 0);
|
setConvertInfo(_mode, 0);
|
||||||
} else {
|
} else {
|
||||||
setConvertInfo(_mode, channelOffset);
|
setConvertInfo(_mode, channelOffset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( _mode == INPUT
|
if ( _mode == airtaudio::mode_input
|
||||||
&& m_stream.mode == OUTPUT
|
&& m_stream.mode == airtaudio::mode_output
|
||||||
&& m_stream.device[0] == _device) {
|
&& m_stream.device[0] == _device) {
|
||||||
// Only one callback procedure per device.
|
// Only one callback procedure per device.
|
||||||
m_stream.mode = DUPLEX;
|
m_stream.mode = airtaudio::mode_duplex;
|
||||||
} else {
|
} else {
|
||||||
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
|
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
|
||||||
result = AudioDeviceCreateIOProcID(id, callbackHandler, (void *) &m_stream.callbackInfo, &handle->procId[_mode]);
|
result = AudioDeviceCreateIOProcID(id, callbackHandler, (void *) &m_stream.callbackInfo, &handle->procId[modeToIdTable(_mode)]);
|
||||||
#else
|
#else
|
||||||
// deprecated in favor of AudioDeviceCreateIOProcID()
|
// deprecated in favor of AudioDeviceCreateIOProcID()
|
||||||
result = AudioDeviceAddIOProc(id, callbackHandler, (void *) &m_stream.callbackInfo);
|
result = AudioDeviceAddIOProc(id, callbackHandler, (void *) &m_stream.callbackInfo);
|
||||||
@ -893,9 +887,9 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
|||||||
ATA_ERROR("system error setting callback for device (" << _device << ").");
|
ATA_ERROR("system error setting callback for device (" << _device << ").");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
&& _mode == INPUT) {
|
&& _mode == airtaudio::mode_input) {
|
||||||
m_stream.mode = DUPLEX;
|
m_stream.mode = airtaudio::mode_duplex;
|
||||||
} else {
|
} else {
|
||||||
m_stream.mode = _mode;
|
m_stream.mode = _mode;
|
||||||
}
|
}
|
||||||
@ -919,19 +913,19 @@ error:
|
|||||||
free(m_stream.deviceBuffer);
|
free(m_stream.deviceBuffer);
|
||||||
m_stream.deviceBuffer = 0;
|
m_stream.deviceBuffer = 0;
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Core::closeStream() {
|
enum airtaudio::error airtaudio::api::Core::closeStream() {
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("no open stream to close!");
|
ATA_ERROR("no open stream to close!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
CoreHandle *handle = (CoreHandle *) m_stream.apiHandle;
|
CoreHandle *handle = (CoreHandle *) m_stream.apiHandle;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
AudioDeviceStop(handle->id[0], callbackHandler);
|
AudioDeviceStop(handle->id[0], callbackHandler);
|
||||||
}
|
}
|
||||||
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
|
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
|
||||||
@ -941,10 +935,10 @@ enum airtaudio::errorType airtaudio::api::Core::closeStream() {
|
|||||||
AudioDeviceRemoveIOProc(handle->id[0], callbackHandler);
|
AudioDeviceRemoveIOProc(handle->id[0], callbackHandler);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| ( m_stream.mode == DUPLEX
|
|| ( m_stream.mode == airtaudio::mode_duplex
|
||||||
&& m_stream.device[0] != m_stream.device[1])) {
|
&& m_stream.device[0] != m_stream.device[1])) {
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
AudioDeviceStop(handle->id[1], callbackHandler);
|
AudioDeviceStop(handle->id[1], callbackHandler);
|
||||||
}
|
}
|
||||||
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
|
#if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5)
|
||||||
@ -966,31 +960,31 @@ enum airtaudio::errorType airtaudio::api::Core::closeStream() {
|
|||||||
}
|
}
|
||||||
delete handle;
|
delete handle;
|
||||||
m_stream.apiHandle = 0;
|
m_stream.apiHandle = 0;
|
||||||
m_stream.mode = UNINITIALIZED;
|
m_stream.mode = airtaudio::mode_unknow;
|
||||||
m_stream.state = STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Core::startStream() {
|
enum airtaudio::error airtaudio::api::Core::startStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
ATA_ERROR("the stream is already running!");
|
ATA_ERROR("the stream is already running!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
OSStatus result = noErr;
|
OSStatus result = noErr;
|
||||||
CoreHandle *handle = (CoreHandle *) m_stream.apiHandle;
|
CoreHandle *handle = (CoreHandle *) m_stream.apiHandle;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
result = AudioDeviceStart(handle->id[0], callbackHandler);
|
result = AudioDeviceStart(handle->id[0], callbackHandler);
|
||||||
if (result != noErr) {
|
if (result != noErr) {
|
||||||
ATA_ERROR("system error (" << getErrorCode(result) << ") starting callback procedure on device (" << m_stream.device[0] << ").");
|
ATA_ERROR("system error (" << getErrorCode(result) << ") starting callback procedure on device (" << m_stream.device[0] << ").");
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| ( m_stream.mode == DUPLEX
|
|| ( m_stream.mode == airtaudio::mode_duplex
|
||||||
&& m_stream.device[0] != m_stream.device[1])) {
|
&& m_stream.device[0] != m_stream.device[1])) {
|
||||||
result = AudioDeviceStart(handle->id[1], callbackHandler);
|
result = AudioDeviceStart(handle->id[1], callbackHandler);
|
||||||
if (result != noErr) {
|
if (result != noErr) {
|
||||||
@ -1000,26 +994,26 @@ enum airtaudio::errorType airtaudio::api::Core::startStream() {
|
|||||||
}
|
}
|
||||||
handle->drainCounter = 0;
|
handle->drainCounter = 0;
|
||||||
handle->internalDrain = false;
|
handle->internalDrain = false;
|
||||||
m_stream.state = STREAM_RUNNING;
|
m_stream.state = airtaudio::state_running;
|
||||||
unlock:
|
unlock:
|
||||||
if (result == noErr) {
|
if (result == noErr) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Core::stopStream() {
|
enum airtaudio::error airtaudio::api::Core::stopStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
OSStatus result = noErr;
|
OSStatus result = noErr;
|
||||||
CoreHandle *handle = (CoreHandle *) m_stream.apiHandle;
|
CoreHandle *handle = (CoreHandle *) m_stream.apiHandle;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (handle->drainCounter == 0) {
|
if (handle->drainCounter == 0) {
|
||||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
@ -1031,8 +1025,8 @@ enum airtaudio::errorType airtaudio::api::Core::stopStream() {
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| ( m_stream.mode == DUPLEX
|
|| ( m_stream.mode == airtaudio::mode_duplex
|
||||||
&& m_stream.device[0] != m_stream.device[1])) {
|
&& m_stream.device[0] != m_stream.device[1])) {
|
||||||
result = AudioDeviceStop(handle->id[1], callbackHandler);
|
result = AudioDeviceStop(handle->id[1], callbackHandler);
|
||||||
if (result != noErr) {
|
if (result != noErr) {
|
||||||
@ -1040,21 +1034,21 @@ enum airtaudio::errorType airtaudio::api::Core::stopStream() {
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
unlock:
|
unlock:
|
||||||
if (result == noErr) {
|
if (result == noErr) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Core::abortStream() {
|
enum airtaudio::error airtaudio::api::Core::abortStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
CoreHandle* handle = (CoreHandle*)m_stream.apiHandle;
|
CoreHandle* handle = (CoreHandle*)m_stream.apiHandle;
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
@ -1075,11 +1069,11 @@ static void coreStopStream(void *_ptr) {
|
|||||||
bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId,
|
bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId,
|
||||||
const AudioBufferList *_inBufferList,
|
const AudioBufferList *_inBufferList,
|
||||||
const AudioBufferList *_outBufferList) {
|
const AudioBufferList *_outBufferList) {
|
||||||
if ( m_stream.state == STREAM_STOPPED
|
if ( m_stream.state == airtaudio::state_stopped
|
||||||
|| m_stream.state == STREAM_STOPPING) {
|
|| m_stream.state == airtaudio::state_stopping) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -1087,7 +1081,7 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId,
|
|||||||
CoreHandle *handle = (CoreHandle *) m_stream.apiHandle;
|
CoreHandle *handle = (CoreHandle *) m_stream.apiHandle;
|
||||||
// Check if we were draining the stream and signal is finished.
|
// Check if we were draining the stream and signal is finished.
|
||||||
if (handle->drainCounter > 3) {
|
if (handle->drainCounter > 3) {
|
||||||
m_stream.state = STREAM_STOPPING;
|
m_stream.state = airtaudio::state_stopping;
|
||||||
if (handle->internalDrain == true) {
|
if (handle->internalDrain == true) {
|
||||||
new std::thread(coreStopStream, info);
|
new std::thread(coreStopStream, info);
|
||||||
} else {
|
} else {
|
||||||
@ -1100,17 +1094,17 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId,
|
|||||||
// Invoke user callback to get fresh output data UNLESS we are
|
// Invoke user callback to get fresh output data UNLESS we are
|
||||||
// draining stream or duplex mode AND the input/output devices are
|
// draining stream or duplex mode AND the input/output devices are
|
||||||
// different AND this function is called for the input device.
|
// different AND this function is called for the input device.
|
||||||
if (handle->drainCounter == 0 && (m_stream.mode != DUPLEX || _deviceId == outputDevice)) {
|
if (handle->drainCounter == 0 && (m_stream.mode != airtaudio::mode_duplex || _deviceId == outputDevice)) {
|
||||||
double streamTime = getStreamTime();
|
double streamTime = getStreamTime();
|
||||||
airtaudio::streamStatus status = 0;
|
enum airtaudio::status status = airtaudio::status_ok;
|
||||||
if ( m_stream.mode != INPUT
|
if ( m_stream.mode != airtaudio::mode_input
|
||||||
&& handle->xrun[0] == true) {
|
&& handle->xrun[0] == true) {
|
||||||
status |= OUTPUT_UNDERFLOW;
|
status |= airtaudio::status_underflow;
|
||||||
handle->xrun[0] = false;
|
handle->xrun[0] = false;
|
||||||
}
|
}
|
||||||
if ( m_stream.mode != OUTPUT
|
if ( m_stream.mode != airtaudio::mode_output
|
||||||
&& handle->xrun[1] == true) {
|
&& handle->xrun[1] == true) {
|
||||||
status |= INPUT_OVERFLOW;
|
status |= airtaudio::mode_input_OVERFLOW;
|
||||||
handle->xrun[1] = false;
|
handle->xrun[1] = false;
|
||||||
}
|
}
|
||||||
int32_t cbReturnValue = info->callback(m_stream.userBuffer[0],
|
int32_t cbReturnValue = info->callback(m_stream.userBuffer[0],
|
||||||
@ -1119,7 +1113,7 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId,
|
|||||||
streamTime,
|
streamTime,
|
||||||
status);
|
status);
|
||||||
if (cbReturnValue == 2) {
|
if (cbReturnValue == 2) {
|
||||||
m_stream.state = STREAM_STOPPING;
|
m_stream.state = airtaudio::state_stopping;
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
abortStream();
|
abortStream();
|
||||||
return true;
|
return true;
|
||||||
@ -1128,8 +1122,8 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId,
|
|||||||
handle->internalDrain = true;
|
handle->internalDrain = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| ( m_stream.mode == DUPLEX
|
|| ( m_stream.mode == airtaudio::mode_duplex
|
||||||
&& _deviceId == outputDevice)) {
|
&& _deviceId == outputDevice)) {
|
||||||
if (handle->drainCounter > 1) {
|
if (handle->drainCounter > 1) {
|
||||||
// write zeros to the output stream
|
// write zeros to the output stream
|
||||||
@ -1175,7 +1169,7 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId,
|
|||||||
// fill multiple multi-channel streams with interleaved data
|
// fill multiple multi-channel streams with interleaved data
|
||||||
uint32_t streamChannels, channelsLeft, inJump, outJump, inOffset;
|
uint32_t streamChannels, channelsLeft, inJump, outJump, inOffset;
|
||||||
float *out, *in;
|
float *out, *in;
|
||||||
bool inInterleaved = (m_stream.userInterleaved) ? true : false;
|
bool inInterleaved = true;
|
||||||
uint32_t inChannels = m_stream.nUserChannels[0];
|
uint32_t inChannels = m_stream.nUserChannels[0];
|
||||||
if (m_stream.doConvertBuffer[0]) {
|
if (m_stream.doConvertBuffer[0]) {
|
||||||
inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
|
inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
|
||||||
@ -1229,8 +1223,8 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId,
|
|||||||
}
|
}
|
||||||
AudioDeviceID inputDevice;
|
AudioDeviceID inputDevice;
|
||||||
inputDevice = handle->id[1];
|
inputDevice = handle->id[1];
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| ( m_stream.mode == DUPLEX
|
|| ( m_stream.mode == airtaudio::mode_duplex
|
||||||
&& _deviceId == inputDevice)) {
|
&& _deviceId == inputDevice)) {
|
||||||
if (handle->nStreams[1] == 1) {
|
if (handle->nStreams[1] == 1) {
|
||||||
if (m_stream.doConvertBuffer[1]) {
|
if (m_stream.doConvertBuffer[1]) {
|
||||||
@ -1260,7 +1254,7 @@ bool airtaudio::api::Core::callbackEvent(AudioDeviceID _deviceId,
|
|||||||
// read from multiple multi-channel streams
|
// read from multiple multi-channel streams
|
||||||
uint32_t streamChannels, channelsLeft, inJump, outJump, outOffset;
|
uint32_t streamChannels, channelsLeft, inJump, outJump, outOffset;
|
||||||
float *out, *in;
|
float *out, *in;
|
||||||
bool outInterleaved = (m_stream.userInterleaved) ? true : false;
|
bool outInterleaved = true;
|
||||||
uint32_t outChannels = m_stream.nUserChannels[1];
|
uint32_t outChannels = m_stream.nUserChannels[1];
|
||||||
if (m_stream.doConvertBuffer[1]) {
|
if (m_stream.doConvertBuffer[1]) {
|
||||||
outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
|
outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
|
||||||
|
@ -19,17 +19,17 @@ namespace airtaudio {
|
|||||||
public:
|
public:
|
||||||
Core();
|
Core();
|
||||||
virtual ~Core();
|
virtual ~Core();
|
||||||
airtaudio::api::type getCurrentApi() {
|
enum airtaudio::type getCurrentApi() {
|
||||||
return airtaudio::api::MACOSX_CORE;
|
return airtaudio::type_coreOSX;
|
||||||
}
|
}
|
||||||
uint32_t getDeviceCount();
|
uint32_t getDeviceCount();
|
||||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||||
uint32_t getDefaultOutputDevice();
|
uint32_t getDefaultOutputDevice();
|
||||||
uint32_t getDefaultInputDevice();
|
uint32_t getDefaultInputDevice();
|
||||||
enum airtaudio::errorType closeStream();
|
enum airtaudio::error closeStream();
|
||||||
enum airtaudio::errorType startStream();
|
enum airtaudio::error startStream();
|
||||||
enum airtaudio::errorType stopStream();
|
enum airtaudio::error stopStream();
|
||||||
enum airtaudio::errorType abortStream();
|
enum airtaudio::error abortStream();
|
||||||
long getStreamLatency();
|
long getStreamLatency();
|
||||||
// This function is intended for internal use only. It must be
|
// This function is intended for internal use only. It must be
|
||||||
// public because it is called by the internal callback handler,
|
// public because it is called by the internal callback handler,
|
||||||
@ -41,7 +41,7 @@ namespace airtaudio {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
bool probeDeviceOpen(uint32_t _device,
|
bool probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
|
@ -16,15 +16,15 @@ namespace airtaudio {
|
|||||||
public:
|
public:
|
||||||
CoreIos();
|
CoreIos();
|
||||||
virtual ~CoreIos();
|
virtual ~CoreIos();
|
||||||
airtaudio::api::type getCurrentApi() {
|
enum airtaudio::type getCurrentApi() {
|
||||||
return airtaudio::api::IOS_CORE;
|
return airtaudio::type_coreIOS;
|
||||||
}
|
}
|
||||||
uint32_t getDeviceCount();
|
uint32_t getDeviceCount();
|
||||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||||
enum airtaudio::errorType closeStream();
|
enum airtaudio::error closeStream();
|
||||||
enum airtaudio::errorType startStream();
|
enum airtaudio::error startStream();
|
||||||
enum airtaudio::errorType stopStream();
|
enum airtaudio::error stopStream();
|
||||||
enum airtaudio::errorType abortStream();
|
enum airtaudio::error abortStream();
|
||||||
// This function is intended for internal use only. It must be
|
// This function is intended for internal use only. It must be
|
||||||
// public because it is called by the internal callback handler,
|
// public because it is called by the internal callback handler,
|
||||||
// which is not a member of RtAudio. External use of this function
|
// which is not a member of RtAudio. External use of this function
|
||||||
@ -34,7 +34,7 @@ namespace airtaudio {
|
|||||||
std::vector<airtaudio::DeviceInfo> m_devices;
|
std::vector<airtaudio::DeviceInfo> m_devices;
|
||||||
void saveDeviceInfo();
|
void saveDeviceInfo();
|
||||||
bool probeDeviceOpen(uint32_t _device,
|
bool probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
|
@ -82,31 +82,31 @@ airtaudio::DeviceInfo airtaudio::api::CoreIos::getDeviceInfo(uint32_t _device) {
|
|||||||
return m_devices[_device];
|
return m_devices[_device];
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::CoreIos::closeStream(void) {
|
enum airtaudio::error airtaudio::api::CoreIos::closeStream(void) {
|
||||||
ATA_INFO("Close Stream");
|
ATA_INFO("Close Stream");
|
||||||
// Can not close the stream now...
|
// Can not close the stream now...
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::CoreIos::startStream(void) {
|
enum airtaudio::error airtaudio::api::CoreIos::startStream(void) {
|
||||||
ATA_INFO("Start Stream");
|
ATA_INFO("Start Stream");
|
||||||
OSStatus status = AudioOutputUnitStart(m_private->audioUnit);
|
OSStatus status = AudioOutputUnitStart(m_private->audioUnit);
|
||||||
// Can not close the stream now...
|
// Can not close the stream now...
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::CoreIos::stopStream(void) {
|
enum airtaudio::error airtaudio::api::CoreIos::stopStream(void) {
|
||||||
ATA_INFO("Stop stream");
|
ATA_INFO("Stop stream");
|
||||||
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
|
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
|
||||||
// Can not close the stream now...
|
// Can not close the stream now...
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::CoreIos::abortStream(void) {
|
enum airtaudio::error airtaudio::api::CoreIos::abortStream(void) {
|
||||||
ATA_INFO("Abort Stream");
|
ATA_INFO("Abort Stream");
|
||||||
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
|
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
|
||||||
// Can not close the stream now...
|
// Can not close the stream now...
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
void airtaudio::api::CoreIos::callBackEvent(void* _data,
|
void airtaudio::api::CoreIos::callBackEvent(void* _data,
|
||||||
@ -127,14 +127,14 @@ void airtaudio::api::CoreIos::callBackEvent(void* _data,
|
|||||||
#endif
|
#endif
|
||||||
int32_t doStopStream = 0;
|
int32_t doStopStream = 0;
|
||||||
double streamTime = getStreamTime();
|
double streamTime = getStreamTime();
|
||||||
airtaudio::streamStatus status = 0;
|
enum airtaudio::status status = airtaudio::status_ok;
|
||||||
if (m_stream.doConvertBuffer[OUTPUT] == true) {
|
if (m_stream.doConvertBuffer[airtaudio::mode_output] == true) {
|
||||||
doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[OUTPUT],
|
doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::mode_output],
|
||||||
nullptr,
|
nullptr,
|
||||||
_frameRate,
|
_frameRate,
|
||||||
streamTime,
|
streamTime,
|
||||||
status);
|
status);
|
||||||
convertBuffer((char*)_data, (char*)m_stream.userBuffer[OUTPUT], m_stream.convertInfo[OUTPUT]);
|
convertBuffer((char*)_data, (char*)m_stream.userBuffer[airtaudio::mode_output], m_stream.convertInfo[airtaudio::mode_output]);
|
||||||
} else {
|
} else {
|
||||||
doStopStream = m_stream.callbackInfo.callback(_data,
|
doStopStream = m_stream.callbackInfo.callback(_data,
|
||||||
nullptr,
|
nullptr,
|
||||||
@ -172,7 +172,7 @@ static OSStatus playbackCallback(void *_userData,
|
|||||||
|
|
||||||
|
|
||||||
bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device,
|
bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
@ -180,7 +180,7 @@ bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device,
|
|||||||
uint32_t *_bufferSize,
|
uint32_t *_bufferSize,
|
||||||
airtaudio::StreamOptions *_options) {
|
airtaudio::StreamOptions *_options) {
|
||||||
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
|
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
|
||||||
if (_mode != OUTPUT) {
|
if (_mode != airtaudio::mode_output) {
|
||||||
ATA_ERROR("Can not start a device input or duplex for CoreIos ...");
|
ATA_ERROR("Can not start a device input or duplex for CoreIos ...");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -188,39 +188,39 @@ bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device,
|
|||||||
|
|
||||||
// configure Airtaudio internal configuration:
|
// configure Airtaudio internal configuration:
|
||||||
m_stream.userFormat = _format;
|
m_stream.userFormat = _format;
|
||||||
m_stream.nUserChannels[_mode] = _channels;
|
m_stream.nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||||
m_stream.bufferSize = 8192;
|
m_stream.bufferSize = 8192;
|
||||||
m_stream.sampleRate = _sampleRate;
|
m_stream.sampleRate = _sampleRate;
|
||||||
m_stream.doByteSwap[_mode] = false; // for endienness ...
|
m_stream.doByteSwap[modeToIdTable(_mode)] = false; // for endienness ...
|
||||||
|
|
||||||
// TODO : For now, we write it in hard ==> to be update later ...
|
// TODO : For now, we write it in hard ==> to be update later ...
|
||||||
m_stream.deviceFormat[_mode] = SINT16;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = SINT16;
|
||||||
m_stream.nDeviceChannels[_mode] = 2;
|
m_stream.nDeviceChannels[modeToIdTable(_mode)] = 2;
|
||||||
m_stream.deviceInterleaved[_mode] = true;
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||||
|
|
||||||
m_stream.doConvertBuffer[_mode] = false;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
|
if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false
|
||||||
&& m_stream.nUserChannels[_mode] > 1) {
|
&& m_stream.nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if (m_stream.doConvertBuffer[_mode] == true) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) {
|
||||||
// Allocate necessary internal buffers.
|
// Allocate necessary internal buffers.
|
||||||
uint64_t bufferBytes = m_stream.nUserChannels[_mode] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||||
if (m_stream.userBuffer[_mode] == nullptr) {
|
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||||
ATA_ERROR("error allocating user buffer memory.");
|
ATA_ERROR("error allocating user buffer memory.");
|
||||||
}
|
}
|
||||||
setConvertInfo(_mode, _firstChannel);
|
setConvertInfo(_mode, _firstChannel);
|
||||||
}
|
}
|
||||||
ATA_INFO("device format : " << m_stream.deviceFormat[_mode] << " user format : " << m_stream.userFormat);
|
ATA_INFO("device format : " << m_stream.deviceFormat[modeToIdTable(_mode)] << " user format : " << m_stream.userFormat);
|
||||||
ATA_INFO("device channels : " << m_stream.nDeviceChannels[_mode] << " user channels : " << m_stream.nUserChannels[_mode]);
|
ATA_INFO("device channels : " << m_stream.nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_stream.nUserChannels[modeToIdTable(_mode)]);
|
||||||
ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[_mode]);
|
ATA_INFO("do convert buffer : " << m_stream.doConvertBuffer[modeToIdTable(_mode)]);
|
||||||
if (ret == false) {
|
if (ret == false) {
|
||||||
ATA_ERROR("Can not open device.");
|
ATA_ERROR("Can not open device.");
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ airtaudio::api::Ds::~Ds() {
|
|||||||
if (m_coInitialized) {
|
if (m_coInitialized) {
|
||||||
CoUninitialize(); // balanced call.
|
CoUninitialize(); // balanced call.
|
||||||
}
|
}
|
||||||
if (m_stream.state != STREAM_CLOSED) {
|
if (m_stream.state != airtaudio::state_closed) {
|
||||||
closeStream();
|
closeStream();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -417,12 +417,12 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
|
|||||||
ATA_ERROR("device ID is invalid!");
|
ATA_ERROR("device ID is invalid!");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (_mode == OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
if (dsDevices[ _device ].validId[0] == false) {
|
if (dsDevices[ _device ].validId[0] == false) {
|
||||||
ATA_ERROR("device (" << _device << ") does not support output!");
|
ATA_ERROR("device (" << _device << ") does not support output!");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else { // _mode == INPUT
|
} else { // _mode == airtaudio::mode_input
|
||||||
if (dsDevices[ _device ].validId[1] == false) {
|
if (dsDevices[ _device ].validId[1] == false) {
|
||||||
ATA_ERROR("device (" << _device << ") does not support input!");
|
ATA_ERROR("device (" << _device << ") does not support input!");
|
||||||
return false;
|
return false;
|
||||||
@ -445,9 +445,10 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
|
|||||||
nBuffers = _options->numberOfBuffers;
|
nBuffers = _options->numberOfBuffers;
|
||||||
}
|
}
|
||||||
if ( _options!= nullptr
|
if ( _options!= nullptr
|
||||||
&& _options->flags & RTAUDIO_MINIMIZE_LATENCY) {
|
&& _options->flags.m_minimizeLatency == true) {
|
||||||
nBuffers = 2;
|
nBuffers = 2;
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
if (nBuffers < 2) {
|
if (nBuffers < 2) {
|
||||||
nBuffers = 3;
|
nBuffers = 3;
|
||||||
}
|
}
|
||||||
@ -470,7 +471,7 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
|
|||||||
DWORD dsPointerLeadTime = 0;
|
DWORD dsPointerLeadTime = 0;
|
||||||
void *ohandle = 0, *bhandle = 0;
|
void *ohandle = 0, *bhandle = 0;
|
||||||
HRESULT result;
|
HRESULT result;
|
||||||
if (_mode == OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
LPDIRECTSOUND output;
|
LPDIRECTSOUND output;
|
||||||
result = DirectSoundCreate(dsDevices[ _device ].id[0], &output, nullptr);
|
result = DirectSoundCreate(dsDevices[ _device ].id[0], &output, nullptr);
|
||||||
if (FAILED(result)) {
|
if (FAILED(result)) {
|
||||||
@ -496,10 +497,10 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
|
|||||||
&& !( _format == RTAUDIO_SINT8
|
&& !( _format == RTAUDIO_SINT8
|
||||||
&& outCaps.dwFlags & DSCAPS_PRIMARY8BIT)) {
|
&& outCaps.dwFlags & DSCAPS_PRIMARY8BIT)) {
|
||||||
waveFormat.wBitsPerSample = 16;
|
waveFormat.wBitsPerSample = 16;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||||
} else {
|
} else {
|
||||||
waveFormat.wBitsPerSample = 8;
|
waveFormat.wBitsPerSample = 8;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT8;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
|
||||||
}
|
}
|
||||||
m_stream.userFormat = _format;
|
m_stream.userFormat = _format;
|
||||||
// Update wave format structure and buffer information.
|
// Update wave format structure and buffer information.
|
||||||
@ -600,7 +601,7 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
|
|||||||
ohandle = (void *) output;
|
ohandle = (void *) output;
|
||||||
bhandle = (void *) buffer;
|
bhandle = (void *) buffer;
|
||||||
}
|
}
|
||||||
if (_mode == INPUT) {
|
if (_mode == airtaudio::mode_input) {
|
||||||
LPDIRECTSOUNDCAPTURE input;
|
LPDIRECTSOUNDCAPTURE input;
|
||||||
result = DirectSoundCaptureCreate(dsDevices[ _device ].id[1], &input, nullptr);
|
result = DirectSoundCaptureCreate(dsDevices[ _device ].id[1], &input, nullptr);
|
||||||
if (FAILED(result)) {
|
if (FAILED(result)) {
|
||||||
@ -627,20 +628,20 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
|
|||||||
deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
|
deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
|
||||||
if (format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats) {
|
if (format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats) {
|
||||||
waveFormat.wBitsPerSample = 8;
|
waveFormat.wBitsPerSample = 8;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT8;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
|
||||||
} else { // assume 16-bit is supported
|
} else { // assume 16-bit is supported
|
||||||
waveFormat.wBitsPerSample = 16;
|
waveFormat.wBitsPerSample = 16;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||||
}
|
}
|
||||||
} else { // channel == 1
|
} else { // channel == 1
|
||||||
deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
|
deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
|
||||||
if (format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats) {
|
if (format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats) {
|
||||||
waveFormat.wBitsPerSample = 8;
|
waveFormat.wBitsPerSample = 8;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT8;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
|
||||||
}
|
}
|
||||||
else { // assume 16-bit is supported
|
else { // assume 16-bit is supported
|
||||||
waveFormat.wBitsPerSample = 16;
|
waveFormat.wBitsPerSample = 16;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.userFormat = _format;
|
m_stream.userFormat = _format;
|
||||||
@ -708,41 +709,35 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
// Set various stream parameters
|
// Set various stream parameters
|
||||||
DsHandle *handle = 0;
|
DsHandle *handle = 0;
|
||||||
m_stream.nDeviceChannels[_mode] = _channels + _firstChannel;
|
m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel;
|
||||||
m_stream.nUserChannels[_mode] = _channels;
|
m_stream.nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||||
m_stream.bufferSize = *_bufferSize;
|
m_stream.bufferSize = *_bufferSize;
|
||||||
m_stream.channelOffset[_mode] = _firstChannel;
|
m_stream.channelOffset[modeToIdTable(_mode)] = _firstChannel;
|
||||||
m_stream.deviceInterleaved[_mode] = true;
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||||
if ( _options != nullptr
|
|
||||||
&& _options->flags & RTAUDIO_NONINTERLEAVED) {
|
|
||||||
m_stream.userInterleaved = false;
|
|
||||||
} else {
|
|
||||||
m_stream.userInterleaved = true;
|
|
||||||
}
|
|
||||||
// Set flag for buffer conversion
|
// Set flag for buffer conversion
|
||||||
m_stream.doConvertBuffer[_mode] = false;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||||
if (m_stream.nUserChannels[_mode] != m_stream.nDeviceChannels[_mode]) {
|
if (m_stream.nUserChannels[modeToIdTable(_mode)] != m_stream.nDeviceChannels[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false
|
||||||
&& m_stream.nUserChannels[_mode] > 1) {
|
&& m_stream.nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
// Allocate necessary internal buffers
|
// Allocate necessary internal buffers
|
||||||
long bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
|
long bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||||
if (m_stream.userBuffer[_mode] == nullptr) {
|
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||||
ATA_ERROR("error allocating user buffer memory.");
|
ATA_ERROR("error allocating user buffer memory.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
bool makeBuffer = true;
|
bool makeBuffer = true;
|
||||||
bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]);
|
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||||
if (_mode == INPUT) {
|
if (_mode == airtaudio::mode_input) {
|
||||||
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
|
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
|
||||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||||
if (bufferBytes <= (long) bytesOut) {
|
if (bufferBytes <= (long) bytesOut) {
|
||||||
makeBuffer = false;
|
makeBuffer = false;
|
||||||
@ -777,23 +772,23 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
|
|||||||
} else {
|
} else {
|
||||||
handle = (DsHandle *) m_stream.apiHandle;
|
handle = (DsHandle *) m_stream.apiHandle;
|
||||||
}
|
}
|
||||||
handle->id[_mode] = ohandle;
|
handle->id[modeToIdTable(_mode)] = ohandle;
|
||||||
handle->buffer[_mode] = bhandle;
|
handle->buffer[modeToIdTable(_mode)] = bhandle;
|
||||||
handle->dsBufferSize[_mode] = dsBufferSize;
|
handle->dsBufferSize[modeToIdTable(_mode)] = dsBufferSize;
|
||||||
handle->dsPointerLeadTime[_mode] = dsPointerLeadTime;
|
handle->dsPointerLeadTime[modeToIdTable(_mode)] = dsPointerLeadTime;
|
||||||
m_stream.device[_mode] = _device;
|
m_stream.device[modeToIdTable(_mode)] = _device;
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
&& _mode == INPUT) {
|
&& _mode == airtaudio::mode_input) {
|
||||||
// We had already set up an output stream.
|
// We had already set up an output stream.
|
||||||
m_stream.mode = DUPLEX;
|
m_stream.mode = airtaudio::mode_duplex;
|
||||||
} else {
|
} else {
|
||||||
m_stream.mode = _mode;
|
m_stream.mode = _mode;
|
||||||
}
|
}
|
||||||
m_stream.nBuffers = nBuffers;
|
m_stream.nBuffers = nBuffers;
|
||||||
m_stream.sampleRate = _sampleRate;
|
m_stream.sampleRate = _sampleRate;
|
||||||
// Setup the buffer conversion information structure.
|
// Setup the buffer conversion information structure.
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
setConvertInfo(_mode, _firstChannel);
|
setConvertInfo(_mode, _firstChannel);
|
||||||
}
|
}
|
||||||
// Setup the callback thread.
|
// Setup the callback thread.
|
||||||
@ -847,14 +842,14 @@ error:
|
|||||||
free(m_stream.deviceBuffer);
|
free(m_stream.deviceBuffer);
|
||||||
m_stream.deviceBuffer = 0;
|
m_stream.deviceBuffer = 0;
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Ds::closeStream() {
|
enum airtaudio::error airtaudio::api::Ds::closeStream() {
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("no open stream to close!");
|
ATA_ERROR("no open stream to close!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
// Stop the callback thread.
|
// Stop the callback thread.
|
||||||
m_stream.callbackInfo.isRunning = false;
|
m_stream.callbackInfo.isRunning = false;
|
||||||
@ -894,17 +889,17 @@ enum airtaudio::errorType airtaudio::api::Ds::closeStream() {
|
|||||||
free(m_stream.deviceBuffer);
|
free(m_stream.deviceBuffer);
|
||||||
m_stream.deviceBuffer = 0;
|
m_stream.deviceBuffer = 0;
|
||||||
}
|
}
|
||||||
m_stream.mode = UNINITIALIZED;
|
m_stream.mode = airtaudio::mode_unknow;
|
||||||
m_stream.state = STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Ds::startStream() {
|
enum airtaudio::error airtaudio::api::Ds::startStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
ATA_ERROR("the stream is already running!");
|
ATA_ERROR("the stream is already running!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
DsHandle *handle = (DsHandle *) m_stream.apiHandle;
|
DsHandle *handle = (DsHandle *) m_stream.apiHandle;
|
||||||
// Increase scheduler frequency on lesser windows (a side-effect of
|
// Increase scheduler frequency on lesser windows (a side-effect of
|
||||||
@ -913,13 +908,13 @@ enum airtaudio::errorType airtaudio::api::Ds::startStream() {
|
|||||||
timeBeginPeriod(1);
|
timeBeginPeriod(1);
|
||||||
m_buffersRolling = false;
|
m_buffersRolling = false;
|
||||||
m_duplexPrerollBytes = 0;
|
m_duplexPrerollBytes = 0;
|
||||||
if (m_stream.mode == DUPLEX) {
|
if (m_stream.mode == airtaudio::mode_duplex) {
|
||||||
// 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
|
// 0.5 seconds of silence in airtaudio::mode_duplex mode while the devices spin up and synchronize.
|
||||||
m_duplexPrerollBytes = (int) (0.5 * m_stream.sampleRate * formatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1]);
|
m_duplexPrerollBytes = (int) (0.5 * m_stream.sampleRate * formatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1]);
|
||||||
}
|
}
|
||||||
HRESULT result = 0;
|
HRESULT result = 0;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
||||||
result = buffer->Play(0, 0, DSBPLAY_LOOPING);
|
result = buffer->Play(0, 0, DSBPLAY_LOOPING);
|
||||||
if (FAILED(result)) {
|
if (FAILED(result)) {
|
||||||
@ -927,8 +922,8 @@ enum airtaudio::errorType airtaudio::api::Ds::startStream() {
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
||||||
result = buffer->Start(DSCBSTART_LOOPING);
|
result = buffer->Start(DSCBSTART_LOOPING);
|
||||||
if (FAILED(result)) {
|
if (FAILED(result)) {
|
||||||
@ -939,33 +934,33 @@ enum airtaudio::errorType airtaudio::api::Ds::startStream() {
|
|||||||
handle->drainCounter = 0;
|
handle->drainCounter = 0;
|
||||||
handle->internalDrain = false;
|
handle->internalDrain = false;
|
||||||
ResetEvent(handle->condition);
|
ResetEvent(handle->condition);
|
||||||
m_stream.state = STREAM_RUNNING;
|
m_stream.state = airtaudio::state_running;
|
||||||
unlock:
|
unlock:
|
||||||
if (FAILED(result)) {
|
if (FAILED(result)) {
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Ds::stopStream() {
|
enum airtaudio::error airtaudio::api::Ds::stopStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
HRESULT result = 0;
|
HRESULT result = 0;
|
||||||
LPVOID audioPtr;
|
LPVOID audioPtr;
|
||||||
DWORD dataLen;
|
DWORD dataLen;
|
||||||
DsHandle *handle = (DsHandle *) m_stream.apiHandle;
|
DsHandle *handle = (DsHandle *) m_stream.apiHandle;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (handle->drainCounter == 0) {
|
if (handle->drainCounter == 0) {
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
WaitForSingleObject(handle->condition, INFINITE); // block until signaled
|
WaitForSingleObject(handle->condition, INFINITE); // block until signaled
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
// Stop the buffer and clear memory
|
// Stop the buffer and clear memory
|
||||||
LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
||||||
result = buffer->Stop();
|
result = buffer->Stop();
|
||||||
@ -991,12 +986,12 @@ enum airtaudio::errorType airtaudio::api::Ds::stopStream() {
|
|||||||
// If we start playing again, we must begin at beginning of buffer.
|
// If we start playing again, we must begin at beginning of buffer.
|
||||||
handle->bufferPointer[0] = 0;
|
handle->bufferPointer[0] = 0;
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
||||||
audioPtr = nullptr;
|
audioPtr = nullptr;
|
||||||
dataLen = 0;
|
dataLen = 0;
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
result = buffer->Stop();
|
result = buffer->Stop();
|
||||||
if (FAILED(result)) {
|
if (FAILED(result)) {
|
||||||
ATA_ERROR("error (" << getErrorString(result) << ") stopping input buffer!");
|
ATA_ERROR("error (" << getErrorString(result) << ") stopping input buffer!");
|
||||||
@ -1023,18 +1018,18 @@ enum airtaudio::errorType airtaudio::api::Ds::stopStream() {
|
|||||||
unlock:
|
unlock:
|
||||||
timeEndPeriod(1); // revert to normal scheduler frequency on lesser windows.
|
timeEndPeriod(1); // revert to normal scheduler frequency on lesser windows.
|
||||||
if (FAILED(result)) {
|
if (FAILED(result)) {
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Ds::abortStream() {
|
enum airtaudio::error airtaudio::api::Ds::abortStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
DsHandle *handle = (DsHandle *) m_stream.apiHandle;
|
DsHandle *handle = (DsHandle *) m_stream.apiHandle;
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
@ -1042,11 +1037,11 @@ enum airtaudio::errorType airtaudio::api::Ds::abortStream() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void airtaudio::api::Ds::callbackEvent() {
|
void airtaudio::api::Ds::callbackEvent() {
|
||||||
if (m_stream.state == STREAM_STOPPED || m_stream.state == STREAM_STOPPING) {
|
if (m_stream.state == airtaudio::state_stopped || m_stream.state == airtaudio::state_stopping) {
|
||||||
Sleep(50); // sleep 50 milliseconds
|
Sleep(50); // sleep 50 milliseconds
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1054,7 +1049,7 @@ void airtaudio::api::Ds::callbackEvent() {
|
|||||||
DsHandle *handle = (DsHandle *) m_stream.apiHandle;
|
DsHandle *handle = (DsHandle *) m_stream.apiHandle;
|
||||||
// Check if we were draining the stream and signal is finished.
|
// Check if we were draining the stream and signal is finished.
|
||||||
if (handle->drainCounter > m_stream.nBuffers + 2) {
|
if (handle->drainCounter > m_stream.nBuffers + 2) {
|
||||||
m_stream.state = STREAM_STOPPING;
|
m_stream.state = airtaudio::state_stopping;
|
||||||
if (handle->internalDrain == false) {
|
if (handle->internalDrain == false) {
|
||||||
SetEvent(handle->condition);
|
SetEvent(handle->condition);
|
||||||
} else {
|
} else {
|
||||||
@ -1067,14 +1062,14 @@ void airtaudio::api::Ds::callbackEvent() {
|
|||||||
if (handle->drainCounter == 0) {
|
if (handle->drainCounter == 0) {
|
||||||
double streamTime = getStreamTime();
|
double streamTime = getStreamTime();
|
||||||
rtaudio::streamStatus status = 0;
|
rtaudio::streamStatus status = 0;
|
||||||
if ( m_stream.mode != INPUT
|
if ( m_stream.mode != airtaudio::mode_input
|
||||||
&& handle->xrun[0] == true) {
|
&& handle->xrun[0] == true) {
|
||||||
status |= RTAUDIO_OUTPUT_UNDERFLOW;
|
status |= RTAUDIO_airtaudio::status_underflow;
|
||||||
handle->xrun[0] = false;
|
handle->xrun[0] = false;
|
||||||
}
|
}
|
||||||
if ( m_stream.mode != OUTPUT
|
if ( m_stream.mode != airtaudio::mode_output
|
||||||
&& handle->xrun[1] == true) {
|
&& handle->xrun[1] == true) {
|
||||||
status |= RTAUDIO_INPUT_OVERFLOW;
|
status |= RTAUDIO_airtaudio::mode_input_OVERFLOW;
|
||||||
handle->xrun[1] = false;
|
handle->xrun[1] = false;
|
||||||
}
|
}
|
||||||
int32_t cbReturnValue = info->callback(m_stream.userBuffer[0],
|
int32_t cbReturnValue = info->callback(m_stream.userBuffer[0],
|
||||||
@ -1083,7 +1078,7 @@ void airtaudio::api::Ds::callbackEvent() {
|
|||||||
streamTime,
|
streamTime,
|
||||||
status);
|
status);
|
||||||
if (cbReturnValue == 2) {
|
if (cbReturnValue == 2) {
|
||||||
m_stream.state = STREAM_STOPPING;
|
m_stream.state = airtaudio::state_stopping;
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
abortStream();
|
abortStream();
|
||||||
return;
|
return;
|
||||||
@ -1103,7 +1098,7 @@ void airtaudio::api::Ds::callbackEvent() {
|
|||||||
char *buffer;
|
char *buffer;
|
||||||
long bufferBytes;
|
long bufferBytes;
|
||||||
if (m_buffersRolling == false) {
|
if (m_buffersRolling == false) {
|
||||||
if (m_stream.mode == DUPLEX) {
|
if (m_stream.mode == airtaudio::mode_duplex) {
|
||||||
//assert(handle->dsBufferSize[0] == handle->dsBufferSize[1]);
|
//assert(handle->dsBufferSize[0] == handle->dsBufferSize[1]);
|
||||||
// It takes a while for the devices to get rolling. As a result,
|
// It takes a while for the devices to get rolling. As a result,
|
||||||
// there's no guarantee that the capture and write device pointers
|
// there's no guarantee that the capture and write device pointers
|
||||||
@ -1153,7 +1148,7 @@ void airtaudio::api::Ds::callbackEvent() {
|
|||||||
handle->bufferPointer[0] -= handle->dsBufferSize[0];
|
handle->bufferPointer[0] -= handle->dsBufferSize[0];
|
||||||
}
|
}
|
||||||
handle->bufferPointer[1] = safeReadPointer;
|
handle->bufferPointer[1] = safeReadPointer;
|
||||||
} else if (m_stream.mode == OUTPUT) {
|
} else if (m_stream.mode == airtaudio::mode_output) {
|
||||||
// Set the proper nextWritePosition after initial startup.
|
// Set the proper nextWritePosition after initial startup.
|
||||||
LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
||||||
result = dsWriteBuffer->GetCurrentPosition(¤tWritePointer, &safeWritePointer);
|
result = dsWriteBuffer->GetCurrentPosition(¤tWritePointer, &safeWritePointer);
|
||||||
@ -1168,8 +1163,8 @@ void airtaudio::api::Ds::callbackEvent() {
|
|||||||
}
|
}
|
||||||
m_buffersRolling = true;
|
m_buffersRolling = true;
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
||||||
if (handle->drainCounter > 1) { // write zeros to the output stream
|
if (handle->drainCounter > 1) { // write zeros to the output stream
|
||||||
bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0];
|
bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0];
|
||||||
@ -1273,8 +1268,8 @@ void airtaudio::api::Ds::callbackEvent() {
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
// Setup parameters.
|
// Setup parameters.
|
||||||
if (m_stream.doConvertBuffer[1]) {
|
if (m_stream.doConvertBuffer[1]) {
|
||||||
buffer = m_stream.deviceBuffer;
|
buffer = m_stream.deviceBuffer;
|
||||||
@ -1298,20 +1293,20 @@ void airtaudio::api::Ds::callbackEvent() {
|
|||||||
safeReadPointer += dsBufferSize; // unwrap offset
|
safeReadPointer += dsBufferSize; // unwrap offset
|
||||||
}
|
}
|
||||||
DWORD endRead = nextReadPointer + bufferBytes;
|
DWORD endRead = nextReadPointer + bufferBytes;
|
||||||
// Handling depends on whether we are INPUT or DUPLEX.
|
// Handling depends on whether we are airtaudio::mode_input or airtaudio::mode_duplex.
|
||||||
// If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
|
// If we're in airtaudio::mode_input mode then waiting is a good thing. If we're in airtaudio::mode_duplex mode,
|
||||||
// then a wait here will drag the write pointers into the forbidden zone.
|
// then a wait here will drag the write pointers into the forbidden zone.
|
||||||
//
|
//
|
||||||
// In DUPLEX mode, rather than wait, we will back off the read pointer until
|
// In airtaudio::mode_duplex mode, rather than wait, we will back off the read pointer until
|
||||||
// it's in a safe position. This causes dropouts, but it seems to be the only
|
// it's in a safe position. This causes dropouts, but it seems to be the only
|
||||||
// practical way to sync up the read and write pointers reliably, given the
|
// practical way to sync up the read and write pointers reliably, given the
|
||||||
// the very complex relationship between phase and increment of the read and write
|
// the very complex relationship between phase and increment of the read and write
|
||||||
// pointers.
|
// pointers.
|
||||||
//
|
//
|
||||||
// In order to minimize audible dropouts in DUPLEX mode, we will
|
// In order to minimize audible dropouts in airtaudio::mode_duplex mode, we will
|
||||||
// provide a pre-roll period of 0.5 seconds in which we return
|
// provide a pre-roll period of 0.5 seconds in which we return
|
||||||
// zeros from the read buffer while the pointers sync up.
|
// zeros from the read buffer while the pointers sync up.
|
||||||
if (m_stream.mode == DUPLEX) {
|
if (m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (safeReadPointer < endRead) {
|
if (safeReadPointer < endRead) {
|
||||||
if (m_duplexPrerollBytes <= 0) {
|
if (m_duplexPrerollBytes <= 0) {
|
||||||
// Pre-roll time over. Be more agressive.
|
// Pre-roll time over. Be more agressive.
|
||||||
@ -1338,7 +1333,7 @@ void airtaudio::api::Ds::callbackEvent() {
|
|||||||
}
|
}
|
||||||
endRead = nextReadPointer + bufferBytes;
|
endRead = nextReadPointer + bufferBytes;
|
||||||
}
|
}
|
||||||
} else { // _mode == INPUT
|
} else { // _mode == airtaudio::mode_input
|
||||||
while ( safeReadPointer < endRead
|
while ( safeReadPointer < endRead
|
||||||
&& m_stream.callbackInfo.isRunning) {
|
&& m_stream.callbackInfo.isRunning) {
|
||||||
// See comments for playback.
|
// See comments for playback.
|
||||||
|
@ -17,17 +17,17 @@ namespace airtaudio {
|
|||||||
public:
|
public:
|
||||||
Ds();
|
Ds();
|
||||||
virtual ~Ds();
|
virtual ~Ds();
|
||||||
airtaudio::api::type getCurrentApi() {
|
enum airtaudio::type getCurrentApi() {
|
||||||
return airtaudio::api::WINDOWS_DS;
|
return airtaudio::type_ds;
|
||||||
}
|
}
|
||||||
uint32_t getDeviceCount();
|
uint32_t getDeviceCount();
|
||||||
uint32_t getDefaultOutputDevice();
|
uint32_t getDefaultOutputDevice();
|
||||||
uint32_t getDefaultInputDevice();
|
uint32_t getDefaultInputDevice();
|
||||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||||
enum airtaudio::errorType closeStream();
|
enum airtaudio::error closeStream();
|
||||||
enum airtaudio::errorType startStream();
|
enum airtaudio::error startStream();
|
||||||
enum airtaudio::errorType stopStream();
|
enum airtaudio::error stopStream();
|
||||||
enum airtaudio::errorType abortStream();
|
enum airtaudio::error abortStream();
|
||||||
long getStreamLatency();
|
long getStreamLatency();
|
||||||
// This function is intended for internal use only. It must be
|
// This function is intended for internal use only. It must be
|
||||||
// public because it is called by the internal callback handler,
|
// public because it is called by the internal callback handler,
|
||||||
@ -40,7 +40,7 @@ namespace airtaudio {
|
|||||||
long m_duplexPrerollBytes;
|
long m_duplexPrerollBytes;
|
||||||
std::vector<struct DsDevice> dsDevices;
|
std::vector<struct DsDevice> dsDevices;
|
||||||
bool probeDeviceOpen(uint32_t _device,
|
bool probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
|
@ -20,7 +20,7 @@ airtaudio::Api* airtaudio::api::Dummy::Create() {
|
|||||||
|
|
||||||
airtaudio::api::Dummy::Dummy() {
|
airtaudio::api::Dummy::Dummy() {
|
||||||
m_errorText = "This class provides no functionality.";
|
m_errorText = "This class provides no functionality.";
|
||||||
error(airtaudio::errorWarning);
|
error(airtaudio::error_warning);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t airtaudio::api::Dummy::getDeviceCount() {
|
uint32_t airtaudio::api::Dummy::getDeviceCount() {
|
||||||
@ -33,24 +33,24 @@ rtaudio::DeviceInfo airtaudio::api::Dummy::getDeviceInfo(uint32_t _device) {
|
|||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Dummy::closeStream() {
|
enum airtaudio::error airtaudio::api::Dummy::closeStream() {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Dummy::startStream() {
|
enum airtaudio::error airtaudio::api::Dummy::startStream() {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Dummy::stopStream() {
|
enum airtaudio::error airtaudio::api::Dummy::stopStream() {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Dummy::abortStream() {
|
enum airtaudio::error airtaudio::api::Dummy::abortStream() {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool airtaudio::api::Dummy::probeDeviceOpen(uint32_t _device,
|
bool airtaudio::api::Dummy::probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
|
@ -6,8 +6,8 @@
|
|||||||
* @license like MIT (see license file)
|
* @license like MIT (see license file)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if !defined(__AIRTAUDIO_API_DUMMY_H__) && defined(__AIRTAUDIO_DUMMY__)
|
#if !defined(__AIRTAUDIO_DUMMY__) && defined(__DUMMY__)
|
||||||
#define __AIRTAUDIO_API_DUMMY_H__
|
#define __AIRTAUDIO_DUMMY__
|
||||||
|
|
||||||
#include <airtaudio/Interface.h>
|
#include <airtaudio/Interface.h>
|
||||||
|
|
||||||
@ -18,18 +18,18 @@ namespace airtaudio {
|
|||||||
static airtaudio::Api* Create();
|
static airtaudio::Api* Create();
|
||||||
public:
|
public:
|
||||||
Dummy();
|
Dummy();
|
||||||
airtaudio::api::type getCurrentApi() {
|
enum airtaudio::type getCurrentApi() {
|
||||||
return airtaudio::api::RTAUDIO_DUMMY;
|
return airtaudio::type_dummy;
|
||||||
}
|
}
|
||||||
uint32_t getDeviceCount();
|
uint32_t getDeviceCount();
|
||||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||||
enum airtaudio::errorType closeStream();
|
enum airtaudio::error closeStream();
|
||||||
enum airtaudio::errorType startStream();
|
enum airtaudio::error startStream();
|
||||||
enum airtaudio::errorType stopStream();
|
enum airtaudio::error stopStream();
|
||||||
enum airtaudio::errorType abortStream();
|
enum airtaudio::error abortStream();
|
||||||
private:
|
private:
|
||||||
bool probeDeviceOpen(uint32_t _device,
|
bool probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
|
@ -83,7 +83,7 @@ airtaudio::api::Jack::Jack() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
airtaudio::api::Jack::~Jack() {
|
airtaudio::api::Jack::~Jack() {
|
||||||
if (m_stream.state != STREAM_CLOSED) {
|
if (m_stream.state != airtaudio::state_closed) {
|
||||||
closeStream();
|
closeStream();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -128,7 +128,7 @@ airtaudio::DeviceInfo airtaudio::api::Jack::getDeviceInfo(uint32_t _device) {
|
|||||||
jack_client_t *client = jack_client_open("RtApiJackInfo", options, status);
|
jack_client_t *client = jack_client_open("RtApiJackInfo", options, status);
|
||||||
if (client == nullptr) {
|
if (client == nullptr) {
|
||||||
ATA_ERROR("Jack server not found or connection error!");
|
ATA_ERROR("Jack server not found or connection error!");
|
||||||
// TODO : airtaudio::errorWarning;
|
// TODO : airtaudio::error_warning;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
const char **ports;
|
const char **ports;
|
||||||
@ -157,7 +157,7 @@ airtaudio::DeviceInfo airtaudio::api::Jack::getDeviceInfo(uint32_t _device) {
|
|||||||
if (_device >= nDevices) {
|
if (_device >= nDevices) {
|
||||||
jack_client_close(client);
|
jack_client_close(client);
|
||||||
ATA_ERROR("device ID is invalid!");
|
ATA_ERROR("device ID is invalid!");
|
||||||
// TODO : airtaudio::errorInvalidUse;
|
// TODO : airtaudio::error_invalidUse;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
// Get the current jack server sample rate.
|
// Get the current jack server sample rate.
|
||||||
@ -187,7 +187,7 @@ airtaudio::DeviceInfo airtaudio::api::Jack::getDeviceInfo(uint32_t _device) {
|
|||||||
if (info.outputChannels == 0 && info.inputChannels == 0) {
|
if (info.outputChannels == 0 && info.inputChannels == 0) {
|
||||||
jack_client_close(client);
|
jack_client_close(client);
|
||||||
ATA_ERROR("error determining Jack input/output channels!");
|
ATA_ERROR("error determining Jack input/output channels!");
|
||||||
// TODO : airtaudio::errorWarning;
|
// TODO : airtaudio::error_warning;
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
// If device opens for both playback and capture, we determine the channels.
|
// If device opens for both playback and capture, we determine the channels.
|
||||||
@ -256,7 +256,7 @@ static int32_t jackXrun(void* _infoPointer) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
@ -266,9 +266,9 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
|||||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||||
// Look for jack server and try to become a client (only do once per stream).
|
// Look for jack server and try to become a client (only do once per stream).
|
||||||
jack_client_t *client = 0;
|
jack_client_t *client = 0;
|
||||||
if ( _mode == OUTPUT
|
if ( _mode == airtaudio::mode_output
|
||||||
|| ( _mode == INPUT
|
|| ( _mode == airtaudio::mode_input
|
||||||
&& m_stream.mode != OUTPUT)) {
|
&& m_stream.mode != airtaudio::mode_output)) {
|
||||||
jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption;
|
jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption;
|
||||||
jack_status_t *status = nullptr;
|
jack_status_t *status = nullptr;
|
||||||
if (_options && !_options->streamName.empty()) {
|
if (_options && !_options->streamName.empty()) {
|
||||||
@ -315,7 +315,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
|||||||
// channels. Jack "input ports" equal RtAudio output channels.
|
// channels. Jack "input ports" equal RtAudio output channels.
|
||||||
uint32_t nChannels = 0;
|
uint32_t nChannels = 0;
|
||||||
uint64_t flag = JackPortIsInput;
|
uint64_t flag = JackPortIsInput;
|
||||||
if (_mode == INPUT) flag = JackPortIsOutput;
|
if (_mode == airtaudio::mode_input) flag = JackPortIsOutput;
|
||||||
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
|
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
|
||||||
if (ports) {
|
if (ports) {
|
||||||
while (ports[ nChannels ]) {
|
while (ports[ nChannels ]) {
|
||||||
@ -340,42 +340,37 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
|||||||
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
|
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
|
||||||
if (ports[ _firstChannel ]) {
|
if (ports[ _firstChannel ]) {
|
||||||
// Added by Ge Wang
|
// Added by Ge Wang
|
||||||
jack_latency_callback_mode_t cbmode = (_mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
|
jack_latency_callback_mode_t cbmode = (_mode == airtaudio::mode_input ? JackCaptureLatency : JackPlaybackLatency);
|
||||||
// the range (usually the min and max are equal)
|
// the range (usually the min and max are equal)
|
||||||
jack_latency_range_t latrange; latrange.min = latrange.max = 0;
|
jack_latency_range_t latrange; latrange.min = latrange.max = 0;
|
||||||
// get the latency range
|
// get the latency range
|
||||||
jack_port_get_latency_range(jack_port_by_name(client, ports[_firstChannel]), cbmode, &latrange);
|
jack_port_get_latency_range(jack_port_by_name(client, ports[_firstChannel]), cbmode, &latrange);
|
||||||
// be optimistic, use the min!
|
// be optimistic, use the min!
|
||||||
m_stream.latency[_mode] = latrange.min;
|
m_stream.latency[modeToIdTable(_mode)] = latrange.min;
|
||||||
//m_stream.latency[_mode] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ]));
|
//m_stream.latency[modeToIdTable(_mode)] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ]));
|
||||||
}
|
}
|
||||||
free(ports);
|
free(ports);
|
||||||
// The jack server always uses 32-bit floating-point data.
|
// The jack server always uses 32-bit floating-point data.
|
||||||
m_stream.deviceFormat[_mode] = FLOAT32;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = FLOAT32;
|
||||||
m_stream.userFormat = _format;
|
m_stream.userFormat = _format;
|
||||||
if (_options && _options->flags & NONINTERLEAVED) {
|
|
||||||
m_stream.userInterleaved = false;
|
|
||||||
} else {
|
|
||||||
m_stream.userInterleaved = true;
|
|
||||||
}
|
|
||||||
// Jack always uses non-interleaved buffers.
|
// Jack always uses non-interleaved buffers.
|
||||||
m_stream.deviceInterleaved[_mode] = false;
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = false;
|
||||||
// Jack always provides host byte-ordered data.
|
// Jack always provides host byte-ordered data.
|
||||||
m_stream.doByteSwap[_mode] = false;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = false;
|
||||||
// Get the buffer size. The buffer size and number of buffers
|
// Get the buffer size. The buffer size and number of buffers
|
||||||
// (periods) is set when the jack server is started.
|
// (periods) is set when the jack server is started.
|
||||||
m_stream.bufferSize = (int) jack_get_buffer_size(client);
|
m_stream.bufferSize = (int) jack_get_buffer_size(client);
|
||||||
*_bufferSize = m_stream.bufferSize;
|
*_bufferSize = m_stream.bufferSize;
|
||||||
m_stream.nDeviceChannels[_mode] = _channels;
|
m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels;
|
||||||
m_stream.nUserChannels[_mode] = _channels;
|
m_stream.nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||||
// Set flags for buffer conversion.
|
// Set flags for buffer conversion.
|
||||||
m_stream.doConvertBuffer[_mode] = false;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false
|
||||||
&& m_stream.nUserChannels[_mode] > 1) {
|
&& m_stream.nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
// Allocate our JackHandle structure for the stream.
|
// Allocate our JackHandle structure for the stream.
|
||||||
if (handle == 0) {
|
if (handle == 0) {
|
||||||
@ -387,22 +382,22 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
|||||||
m_stream.apiHandle = (void *) handle;
|
m_stream.apiHandle = (void *) handle;
|
||||||
handle->client = client;
|
handle->client = client;
|
||||||
}
|
}
|
||||||
handle->deviceName[_mode] = deviceName;
|
handle->deviceName[modeToIdTable(_mode)] = deviceName;
|
||||||
// Allocate necessary internal buffers.
|
// Allocate necessary internal buffers.
|
||||||
uint64_t bufferBytes;
|
uint64_t bufferBytes;
|
||||||
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
|
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||||
if (m_stream.userBuffer[_mode] == nullptr) {
|
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||||
ATA_ERROR("error allocating user buffer memory.");
|
ATA_ERROR("error allocating user buffer memory.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
bool makeBuffer = true;
|
bool makeBuffer = true;
|
||||||
if (_mode == OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
bufferBytes = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
bufferBytes = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||||
} else { // _mode == INPUT
|
} else { // _mode == airtaudio::mode_input
|
||||||
bufferBytes = m_stream.nDeviceChannels[1] * formatBytes(m_stream.deviceFormat[1]);
|
bufferBytes = m_stream.nDeviceChannels[1] * formatBytes(m_stream.deviceFormat[1]);
|
||||||
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
|
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
|
||||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||||
if (bufferBytes < bytesOut) {
|
if (bufferBytes < bytesOut) {
|
||||||
makeBuffer = false;
|
makeBuffer = false;
|
||||||
@ -420,19 +415,19 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Allocate memory for the Jack ports (channels) identifiers.
|
// Allocate memory for the Jack ports (channels) identifiers.
|
||||||
handle->ports[_mode] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels);
|
handle->ports[modeToIdTable(_mode)] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels);
|
||||||
if (handle->ports[_mode] == nullptr) {
|
if (handle->ports[modeToIdTable(_mode)] == nullptr) {
|
||||||
ATA_ERROR("error allocating port memory.");
|
ATA_ERROR("error allocating port memory.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
m_stream.device[_mode] = _device;
|
m_stream.device[modeToIdTable(_mode)] = _device;
|
||||||
m_stream.channelOffset[_mode] = _firstChannel;
|
m_stream.channelOffset[modeToIdTable(_mode)] = _firstChannel;
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
m_stream.callbackInfo.object = (void *) this;
|
m_stream.callbackInfo.object = (void *) this;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
&& _mode == INPUT) {
|
&& _mode == airtaudio::mode_input) {
|
||||||
// We had already set up the stream for output.
|
// We had already set up the stream for output.
|
||||||
m_stream.mode = DUPLEX;
|
m_stream.mode = airtaudio::mode_duplex;
|
||||||
} else {
|
} else {
|
||||||
m_stream.mode = _mode;
|
m_stream.mode = _mode;
|
||||||
jack_set_process_callback(handle->client, jackCallbackHandler, (void *) &m_stream.callbackInfo);
|
jack_set_process_callback(handle->client, jackCallbackHandler, (void *) &m_stream.callbackInfo);
|
||||||
@ -441,7 +436,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
// Register our ports.
|
// Register our ports.
|
||||||
char label[64];
|
char label[64];
|
||||||
if (_mode == OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
|
for (uint32_t i=0; i<m_stream.nUserChannels[0]; i++) {
|
||||||
snprintf(label, 64, "outport %d", i);
|
snprintf(label, 64, "outport %d", i);
|
||||||
handle->ports[0][i] = jack_port_register(handle->client,
|
handle->ports[0][i] = jack_port_register(handle->client,
|
||||||
@ -463,7 +458,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
|||||||
// Setup the buffer conversion information structure. We don't use
|
// Setup the buffer conversion information structure. We don't use
|
||||||
// buffers to do channel offsets, so we override that parameter
|
// buffers to do channel offsets, so we override that parameter
|
||||||
// here.
|
// here.
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
setConvertInfo(_mode, 0);
|
setConvertInfo(_mode, 0);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@ -492,14 +487,14 @@ error:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Jack::closeStream() {
|
enum airtaudio::error airtaudio::api::Jack::closeStream() {
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("no open stream to close!");
|
ATA_ERROR("no open stream to close!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||||
if (handle != nullptr) {
|
if (handle != nullptr) {
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
jack_deactivate(handle->client);
|
jack_deactivate(handle->client);
|
||||||
}
|
}
|
||||||
jack_client_close(handle->client);
|
jack_client_close(handle->client);
|
||||||
@ -524,18 +519,18 @@ enum airtaudio::errorType airtaudio::api::Jack::closeStream() {
|
|||||||
free(m_stream.deviceBuffer);
|
free(m_stream.deviceBuffer);
|
||||||
m_stream.deviceBuffer = nullptr;
|
m_stream.deviceBuffer = nullptr;
|
||||||
}
|
}
|
||||||
m_stream.mode = UNINITIALIZED;
|
m_stream.mode = airtaudio::mode_unknow;
|
||||||
m_stream.state = STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Jack::startStream() {
|
enum airtaudio::error airtaudio::api::Jack::startStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
ATA_ERROR("the stream is already running!");
|
ATA_ERROR("the stream is already running!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||||
int32_t result = jack_activate(handle->client);
|
int32_t result = jack_activate(handle->client);
|
||||||
@ -545,8 +540,8 @@ enum airtaudio::errorType airtaudio::api::Jack::startStream() {
|
|||||||
}
|
}
|
||||||
const char **ports;
|
const char **ports;
|
||||||
// Get the list of available ports.
|
// Get the list of available ports.
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
result = 1;
|
result = 1;
|
||||||
ports = jack_get_ports(handle->client, handle->deviceName[0].c_str(), nullptr, JackPortIsInput);
|
ports = jack_get_ports(handle->client, handle->deviceName[0].c_str(), nullptr, JackPortIsInput);
|
||||||
if (ports == nullptr) {
|
if (ports == nullptr) {
|
||||||
@ -568,8 +563,8 @@ enum airtaudio::errorType airtaudio::api::Jack::startStream() {
|
|||||||
}
|
}
|
||||||
free(ports);
|
free(ports);
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
result = 1;
|
result = 1;
|
||||||
ports = jack_get_ports(handle->client, handle->deviceName[1].c_str(), nullptr, JackPortIsOutput);
|
ports = jack_get_ports(handle->client, handle->deviceName[1].c_str(), nullptr, JackPortIsOutput);
|
||||||
if (ports == nullptr) {
|
if (ports == nullptr) {
|
||||||
@ -592,25 +587,25 @@ enum airtaudio::errorType airtaudio::api::Jack::startStream() {
|
|||||||
}
|
}
|
||||||
handle->drainCounter = 0;
|
handle->drainCounter = 0;
|
||||||
handle->internalDrain = false;
|
handle->internalDrain = false;
|
||||||
m_stream.state = STREAM_RUNNING;
|
m_stream.state = airtaudio::state_running;
|
||||||
unlock:
|
unlock:
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Jack::stopStream() {
|
enum airtaudio::error airtaudio::api::Jack::stopStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (handle->drainCounter == 0) {
|
if (handle->drainCounter == 0) {
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||||
@ -618,17 +613,17 @@ enum airtaudio::errorType airtaudio::api::Jack::stopStream() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
jack_deactivate(handle->client);
|
jack_deactivate(handle->client);
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Jack::abortStream() {
|
enum airtaudio::error airtaudio::api::Jack::abortStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
@ -647,11 +642,11 @@ static void jackStopStream(void *_ptr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
|
bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
|
||||||
if ( m_stream.state == STREAM_STOPPED
|
if ( m_stream.state == airtaudio::state_stopped
|
||||||
|| m_stream.state == STREAM_STOPPING) {
|
|| m_stream.state == airtaudio::state_stopping) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!");
|
ATA_ERROR("RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -663,7 +658,7 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
|
|||||||
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
JackHandle *handle = (JackHandle *) m_stream.apiHandle;
|
||||||
// Check if we were draining the stream and signal is finished.
|
// Check if we were draining the stream and signal is finished.
|
||||||
if (handle->drainCounter > 3) {
|
if (handle->drainCounter > 3) {
|
||||||
m_stream.state = STREAM_STOPPING;
|
m_stream.state = airtaudio::state_stopping;
|
||||||
if (handle->internalDrain == true) {
|
if (handle->internalDrain == true) {
|
||||||
new std::thread(jackStopStream, info);
|
new std::thread(jackStopStream, info);
|
||||||
} else {
|
} else {
|
||||||
@ -674,13 +669,13 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
|
|||||||
// Invoke user callback first, to get fresh output data.
|
// Invoke user callback first, to get fresh output data.
|
||||||
if (handle->drainCounter == 0) {
|
if (handle->drainCounter == 0) {
|
||||||
double streamTime = getStreamTime();
|
double streamTime = getStreamTime();
|
||||||
airtaudio::streamStatus status = 0;
|
enum airtaudio::status status = airtaudio::status_ok;
|
||||||
if (m_stream.mode != INPUT && handle->xrun[0] == true) {
|
if (m_stream.mode != airtaudio::mode_input && handle->xrun[0] == true) {
|
||||||
status |= OUTPUT_UNDERFLOW;
|
status |= airtaudio::status_underflow;
|
||||||
handle->xrun[0] = false;
|
handle->xrun[0] = false;
|
||||||
}
|
}
|
||||||
if (m_stream.mode != OUTPUT && handle->xrun[1] == true) {
|
if (m_stream.mode != airtaudio::mode_output && handle->xrun[1] == true) {
|
||||||
status |= INPUT_OVERFLOW;
|
status |= airtaudio::mode_input_OVERFLOW;
|
||||||
handle->xrun[1] = false;
|
handle->xrun[1] = false;
|
||||||
}
|
}
|
||||||
int32_t cbReturnValue = info->callback(m_stream.userBuffer[0],
|
int32_t cbReturnValue = info->callback(m_stream.userBuffer[0],
|
||||||
@ -689,7 +684,7 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
|
|||||||
streamTime,
|
streamTime,
|
||||||
status);
|
status);
|
||||||
if (cbReturnValue == 2) {
|
if (cbReturnValue == 2) {
|
||||||
m_stream.state = STREAM_STOPPING;
|
m_stream.state = airtaudio::state_stopping;
|
||||||
handle->drainCounter = 2;
|
handle->drainCounter = 2;
|
||||||
new std::thread(jackStopStream, info);
|
new std::thread(jackStopStream, info);
|
||||||
return true;
|
return true;
|
||||||
@ -701,7 +696,7 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
|
|||||||
}
|
}
|
||||||
jack_default_audio_sample_t *jackbuffer;
|
jack_default_audio_sample_t *jackbuffer;
|
||||||
uint64_t bufferBytes = _nframes * sizeof(jack_default_audio_sample_t);
|
uint64_t bufferBytes = _nframes * sizeof(jack_default_audio_sample_t);
|
||||||
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
|
if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (handle->drainCounter > 1) { // write zeros to the output stream
|
if (handle->drainCounter > 1) { // write zeros to the output stream
|
||||||
for (uint32_t i=0; i<m_stream.nDeviceChannels[0]; i++) {
|
for (uint32_t i=0; i<m_stream.nDeviceChannels[0]; i++) {
|
||||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) _nframes);
|
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[0][i], (jack_nframes_t) _nframes);
|
||||||
@ -724,8 +719,8 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (m_stream.doConvertBuffer[1]) {
|
if (m_stream.doConvertBuffer[1]) {
|
||||||
for (uint32_t i=0; i<m_stream.nDeviceChannels[1]; i++) {
|
for (uint32_t i=0; i<m_stream.nDeviceChannels[1]; i++) {
|
||||||
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[1][i], (jack_nframes_t) _nframes);
|
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(handle->ports[1][i], (jack_nframes_t) _nframes);
|
||||||
|
@ -17,15 +17,15 @@ namespace airtaudio {
|
|||||||
public:
|
public:
|
||||||
Jack();
|
Jack();
|
||||||
virtual ~Jack();
|
virtual ~Jack();
|
||||||
airtaudio::api::type getCurrentApi() {
|
enum airtaudio::type getCurrentApi() {
|
||||||
return airtaudio::api::UNIX_JACK;
|
return airtaudio::type_jack;
|
||||||
}
|
}
|
||||||
uint32_t getDeviceCount();
|
uint32_t getDeviceCount();
|
||||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||||
enum airtaudio::errorType closeStream();
|
enum airtaudio::error closeStream();
|
||||||
enum airtaudio::errorType startStream();
|
enum airtaudio::error startStream();
|
||||||
enum airtaudio::errorType stopStream();
|
enum airtaudio::error stopStream();
|
||||||
enum airtaudio::errorType abortStream();
|
enum airtaudio::error abortStream();
|
||||||
long getStreamLatency();
|
long getStreamLatency();
|
||||||
// This function is intended for internal use only. It must be
|
// This function is intended for internal use only. It must be
|
||||||
// public because it is called by the internal callback handler,
|
// public because it is called by the internal callback handler,
|
||||||
@ -34,7 +34,7 @@ namespace airtaudio {
|
|||||||
bool callbackEvent(uint64_t _nframes);
|
bool callbackEvent(uint64_t _nframes);
|
||||||
private:
|
private:
|
||||||
bool probeDeviceOpen(uint32_t _device,
|
bool probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
|
@ -49,7 +49,7 @@ airtaudio::api::Oss::Oss() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
airtaudio::api::Oss::~Oss() {
|
airtaudio::api::Oss::~Oss() {
|
||||||
if (m_stream.state != STREAM_CLOSED) {
|
if (m_stream.state != airtaudio::state_closed) {
|
||||||
closeStream();
|
closeStream();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -102,20 +102,20 @@ airtaudio::DeviceInfo airtaudio::api::Oss::getDeviceInfo(uint32_t _device) {
|
|||||||
close(mixerfd);
|
close(mixerfd);
|
||||||
if (result == -1) {
|
if (result == -1) {
|
||||||
ATA_ERROR("error getting device (" << ainfo.name << ") info.");
|
ATA_ERROR("error getting device (" << ainfo.name << ") info.");
|
||||||
error(airtaudio::errorWarning);
|
error(airtaudio::error_warning);
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
// Probe channels
|
// Probe channels
|
||||||
if (ainfo.caps & PCM_CAP_OUTPUT) {
|
if (ainfo.caps & PCM_CAP_airtaudio::mode_output) {
|
||||||
info.outputChannels = ainfo.max_channels;
|
info.outputChannels = ainfo.max_channels;
|
||||||
}
|
}
|
||||||
if (ainfo.caps & PCM_CAP_INPUT) {
|
if (ainfo.caps & PCM_CAP_airtaudio::mode_input) {
|
||||||
info.inputChannels = ainfo.max_channels;
|
info.inputChannels = ainfo.max_channels;
|
||||||
}
|
}
|
||||||
if (ainfo.caps & PCM_CAP_DUPLEX) {
|
if (ainfo.caps & PCM_CAP_airtaudio::mode_duplex) {
|
||||||
if ( info.outputChannels > 0
|
if ( info.outputChannels > 0
|
||||||
&& info.inputChannels > 0
|
&& info.inputChannels > 0
|
||||||
&& ainfo.caps & PCM_CAP_DUPLEX) {
|
&& ainfo.caps & PCM_CAP_airtaudio::mode_duplex) {
|
||||||
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -215,11 +215,11 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Check if device supports input or output
|
// Check if device supports input or output
|
||||||
if ( ( _mode == OUTPUT
|
if ( ( _mode == airtaudio::mode_output
|
||||||
&& !(ainfo.caps & PCM_CAP_OUTPUT))
|
&& !(ainfo.caps & PCM_CAP_airtaudio::mode_output))
|
||||||
|| ( _mode == INPUT
|
|| ( _mode == airtaudio::mode_input
|
||||||
&& !(ainfo.caps & PCM_CAP_INPUT))) {
|
&& !(ainfo.caps & PCM_CAP_airtaudio::mode_input))) {
|
||||||
if (_mode == OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
ATA_ERROR("device (" << ainfo.name << ") does not support output.");
|
ATA_ERROR("device (" << ainfo.name << ") does not support output.");
|
||||||
} else {
|
} else {
|
||||||
ATA_ERROR("device (" << ainfo.name << ") does not support input.");
|
ATA_ERROR("device (" << ainfo.name << ") does not support input.");
|
||||||
@ -228,15 +228,15 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
int32_t flags = 0;
|
int32_t flags = 0;
|
||||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||||
if (_mode == OUTPUT) {
|
if (_mode == airtaudio::mode_output) {
|
||||||
flags |= O_WRONLY;
|
flags |= O_WRONLY;
|
||||||
} else { // _mode == INPUT
|
} else { // _mode == airtaudio::mode_input
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
&& m_stream.device[0] == _device) {
|
&& m_stream.device[0] == _device) {
|
||||||
// We just set the same device for playback ... close and reopen for duplex (OSS only).
|
// We just set the same device for playback ... close and reopen for duplex (OSS only).
|
||||||
close(handle->id[0]);
|
close(handle->id[0]);
|
||||||
handle->id[0] = 0;
|
handle->id[0] = 0;
|
||||||
if (!(ainfo.caps & PCM_CAP_DUPLEX)) {
|
if (!(ainfo.caps & PCM_CAP_airtaudio::mode_duplex)) {
|
||||||
ATA_ERROR("device (" << ainfo.name << ") does not support duplex mode.");
|
ATA_ERROR("device (" << ainfo.name << ") does not support duplex mode.");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -269,7 +269,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
// For duplex operation, specifically set this mode (this doesn't seem to work).
|
// For duplex operation, specifically set this mode (this doesn't seem to work).
|
||||||
/*
|
/*
|
||||||
if (flags | O_RDWR) {
|
if (flags | O_RDWR) {
|
||||||
result = ioctl(fd, SNDCTL_DSP_SETDUPLEX, nullptr);
|
result = ioctl(fd, SNDCTL_DSP_SETairtaudio::mode_duplex, nullptr);
|
||||||
if (result == -1) {
|
if (result == -1) {
|
||||||
m_errorStream << "error setting duplex mode for device (" << ainfo.name << ").";
|
m_errorStream << "error setting duplex mode for device (" << ainfo.name << ").";
|
||||||
m_errorText = m_errorStream.str();
|
m_errorText = m_errorStream.str();
|
||||||
@ -278,7 +278,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
// Check the device channel support.
|
// Check the device channel support.
|
||||||
m_stream.nUserChannels[_mode] = _channels;
|
m_stream.nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||||
if (ainfo.max_channels < (int)(_channels + _firstChannel)) {
|
if (ainfo.max_channels < (int)(_channels + _firstChannel)) {
|
||||||
close(fd);
|
close(fd);
|
||||||
ATA_ERROR("the device (" << ainfo.name << ") does not support requested channel parameters.");
|
ATA_ERROR("the device (" << ainfo.name << ") does not support requested channel parameters.");
|
||||||
@ -293,7 +293,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
ATA_ERROR("error setting channel parameters on device (" << ainfo.name << ").");
|
ATA_ERROR("error setting channel parameters on device (" << ainfo.name << ").");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
m_stream.nDeviceChannels[_mode] = deviceChannels;
|
m_stream.nDeviceChannels[modeToIdTable(_mode)] = deviceChannels;
|
||||||
// Get the data format mask
|
// Get the data format mask
|
||||||
int32_t mask;
|
int32_t mask;
|
||||||
result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask);
|
result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask);
|
||||||
@ -305,69 +305,69 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
// Determine how to set the device format.
|
// Determine how to set the device format.
|
||||||
m_stream.userFormat = _format;
|
m_stream.userFormat = _format;
|
||||||
int32_t deviceFormat = -1;
|
int32_t deviceFormat = -1;
|
||||||
m_stream.doByteSwap[_mode] = false;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = false;
|
||||||
if (_format == RTAUDIO_SINT8) {
|
if (_format == RTAUDIO_SINT8) {
|
||||||
if (mask & AFMT_S8) {
|
if (mask & AFMT_S8) {
|
||||||
deviceFormat = AFMT_S8;
|
deviceFormat = AFMT_S8;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT8;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
|
||||||
}
|
}
|
||||||
} else if (_format == RTAUDIO_SINT16) {
|
} else if (_format == RTAUDIO_SINT16) {
|
||||||
if (mask & AFMT_S16_NE) {
|
if (mask & AFMT_S16_NE) {
|
||||||
deviceFormat = AFMT_S16_NE;
|
deviceFormat = AFMT_S16_NE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||||
} else if (mask & AFMT_S16_OE) {
|
} else if (mask & AFMT_S16_OE) {
|
||||||
deviceFormat = AFMT_S16_OE;
|
deviceFormat = AFMT_S16_OE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
} else if (_format == RTAUDIO_SINT24) {
|
} else if (_format == RTAUDIO_SINT24) {
|
||||||
if (mask & AFMT_S24_NE) {
|
if (mask & AFMT_S24_NE) {
|
||||||
deviceFormat = AFMT_S24_NE;
|
deviceFormat = AFMT_S24_NE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
|
||||||
} else if (mask & AFMT_S24_OE) {
|
} else if (mask & AFMT_S24_OE) {
|
||||||
deviceFormat = AFMT_S24_OE;
|
deviceFormat = AFMT_S24_OE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
} else if (_format == RTAUDIO_SINT32) {
|
} else if (_format == RTAUDIO_SINT32) {
|
||||||
if (mask & AFMT_S32_NE) {
|
if (mask & AFMT_S32_NE) {
|
||||||
deviceFormat = AFMT_S32_NE;
|
deviceFormat = AFMT_S32_NE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
|
||||||
} else if (mask & AFMT_S32_OE) {
|
} else if (mask & AFMT_S32_OE) {
|
||||||
deviceFormat = AFMT_S32_OE;
|
deviceFormat = AFMT_S32_OE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (deviceFormat == -1) {
|
if (deviceFormat == -1) {
|
||||||
// The user requested format is not natively supported by the device.
|
// The user requested format is not natively supported by the device.
|
||||||
if (mask & AFMT_S16_NE) {
|
if (mask & AFMT_S16_NE) {
|
||||||
deviceFormat = AFMT_S16_NE;
|
deviceFormat = AFMT_S16_NE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||||
} else if (mask & AFMT_S32_NE) {
|
} else if (mask & AFMT_S32_NE) {
|
||||||
deviceFormat = AFMT_S32_NE;
|
deviceFormat = AFMT_S32_NE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
|
||||||
} else if (mask & AFMT_S24_NE) {
|
} else if (mask & AFMT_S24_NE) {
|
||||||
deviceFormat = AFMT_S24_NE;
|
deviceFormat = AFMT_S24_NE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
|
||||||
} else if (mask & AFMT_S16_OE) {
|
} else if (mask & AFMT_S16_OE) {
|
||||||
deviceFormat = AFMT_S16_OE;
|
deviceFormat = AFMT_S16_OE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT16;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
} else if (mask & AFMT_S32_OE) {
|
} else if (mask & AFMT_S32_OE) {
|
||||||
deviceFormat = AFMT_S32_OE;
|
deviceFormat = AFMT_S32_OE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT32;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
} else if (mask & AFMT_S24_OE) {
|
} else if (mask & AFMT_S24_OE) {
|
||||||
deviceFormat = AFMT_S24_OE;
|
deviceFormat = AFMT_S24_OE;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT24;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
|
||||||
m_stream.doByteSwap[_mode] = true;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = true;
|
||||||
} else if (mask & AFMT_S8) {
|
} else if (mask & AFMT_S8) {
|
||||||
deviceFormat = AFMT_S8;
|
deviceFormat = AFMT_S8;
|
||||||
m_stream.deviceFormat[_mode] = RTAUDIO_SINT8;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (m_stream.deviceFormat[_mode] == 0) {
|
if (m_stream.deviceFormat[modeToIdTable(_mode)] == 0) {
|
||||||
// This really shouldn't happen ...
|
// This really shouldn't happen ...
|
||||||
close(fd);
|
close(fd);
|
||||||
ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio.");
|
ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio.");
|
||||||
@ -389,7 +389,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
|
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
|
||||||
// We'll check the actual value used near the end of the setup
|
// We'll check the actual value used near the end of the setup
|
||||||
// procedure.
|
// procedure.
|
||||||
int32_t ossBufferBytes = *_bufferSize * formatBytes(m_stream.deviceFormat[_mode]) * deviceChannels;
|
int32_t ossBufferBytes = *_bufferSize * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels;
|
||||||
if (ossBufferBytes < 16) {
|
if (ossBufferBytes < 16) {
|
||||||
ossBufferBytes = 16;
|
ossBufferBytes = 16;
|
||||||
}
|
}
|
||||||
@ -398,7 +398,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
buffers = _options->numberOfBuffers;
|
buffers = _options->numberOfBuffers;
|
||||||
}
|
}
|
||||||
if ( _options != nullptr
|
if ( _options != nullptr
|
||||||
&& _options->flags & RTAUDIO_MINIMIZE_LATENCY) {
|
&& _options->flags.m_minimizeLatency == true) {
|
||||||
buffers = 2;
|
buffers = 2;
|
||||||
}
|
}
|
||||||
if (buffers < 2) {
|
if (buffers < 2) {
|
||||||
@ -413,7 +413,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
m_stream.nBuffers = buffers;
|
m_stream.nBuffers = buffers;
|
||||||
// Save buffer size (in sample frames).
|
// Save buffer size (in sample frames).
|
||||||
*_bufferSize = ossBufferBytes / (formatBytes(m_stream.deviceFormat[_mode]) * deviceChannels);
|
*_bufferSize = ossBufferBytes / (formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels);
|
||||||
m_stream.bufferSize = *_bufferSize;
|
m_stream.bufferSize = *_bufferSize;
|
||||||
// Set the sample rate.
|
// Set the sample rate.
|
||||||
int32_t srate = _sampleRate;
|
int32_t srate = _sampleRate;
|
||||||
@ -430,30 +430,26 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
m_stream.sampleRate = _sampleRate;
|
m_stream.sampleRate = _sampleRate;
|
||||||
if ( _mode == INPUT
|
if ( _mode == airtaudio::mode_input
|
||||||
&& m_stream._mode == OUTPUT
|
&& m_stream._mode == airtaudio::mode_output
|
||||||
&& m_stream.device[0] == _device) {
|
&& m_stream.device[0] == _device) {
|
||||||
// We're doing duplex setup here.
|
// We're doing duplex setup here.
|
||||||
m_stream.deviceFormat[0] = m_stream.deviceFormat[1];
|
m_stream.deviceFormat[0] = m_stream.deviceFormat[1];
|
||||||
m_stream.nDeviceChannels[0] = deviceChannels;
|
m_stream.nDeviceChannels[0] = deviceChannels;
|
||||||
}
|
}
|
||||||
// Set interleaving parameters.
|
// Set interleaving parameters.
|
||||||
m_stream.userInterleaved = true;
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||||
m_stream.deviceInterleaved[_mode] = true;
|
|
||||||
if (_options && _options->flags & RTAUDIO_NONINTERLEAVED) {
|
|
||||||
m_stream.userInterleaved = false;
|
|
||||||
}
|
|
||||||
// Set flags for buffer conversion
|
// Set flags for buffer conversion
|
||||||
m_stream.doConvertBuffer[_mode] = false;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||||
if (m_stream.userFormat != m_stream.deviceFormat[_mode]) {
|
if (m_stream.userFormat != m_stream.deviceFormat[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if (m_stream.nUserChannels[_mode] < m_stream.nDeviceChannels[_mode]) {
|
if (m_stream.nUserChannels[modeToIdTable(_mode)] < m_stream.nDeviceChannels[modeToIdTable(_mode)]) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
if ( m_stream.userInterleaved != m_stream.deviceInterleaved[_mode]
|
if ( m_stream.deviceInterleaved[modeToIdTable(_mode)] == false
|
||||||
&& m_stream.nUserChannels[_mode] > 1) {
|
&& m_stream.nUserChannels[modeToIdTable(_mode)] > 1) {
|
||||||
m_stream.doConvertBuffer[_mode] = true;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||||
}
|
}
|
||||||
// Allocate the stream handles if necessary and then save.
|
// Allocate the stream handles if necessary and then save.
|
||||||
if (m_stream.apiHandle == 0) {
|
if (m_stream.apiHandle == 0) {
|
||||||
@ -466,20 +462,20 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
} else {
|
} else {
|
||||||
handle = (OssHandle *) m_stream.apiHandle;
|
handle = (OssHandle *) m_stream.apiHandle;
|
||||||
}
|
}
|
||||||
handle->id[_mode] = fd;
|
handle->id[modeToIdTable(_mode)] = fd;
|
||||||
// Allocate necessary internal buffers.
|
// Allocate necessary internal buffers.
|
||||||
uint64_t bufferBytes;
|
uint64_t bufferBytes;
|
||||||
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
|
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||||
if (m_stream.userBuffer[_mode] == nullptr) {
|
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||||
ATA_ERROR("error allocating user buffer memory.");
|
ATA_ERROR("error allocating user buffer memory.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
bool makeBuffer = true;
|
bool makeBuffer = true;
|
||||||
bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]);
|
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||||
if (_mode == INPUT) {
|
if (_mode == airtaudio::mode_input) {
|
||||||
if ( m_stream._mode == OUTPUT
|
if ( m_stream._mode == airtaudio::mode_output
|
||||||
&& m_stream.deviceBuffer) {
|
&& m_stream.deviceBuffer) {
|
||||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||||
if (bufferBytes <= bytesOut) {
|
if (bufferBytes <= bytesOut) {
|
||||||
@ -499,16 +495,16 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.device[_mode] = _device;
|
m_stream.device[modeToIdTable(_mode)] = _device;
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
// Setup the buffer conversion information structure.
|
// Setup the buffer conversion information structure.
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
setConvertInfo(_mode, _firstChannel);
|
setConvertInfo(_mode, _firstChannel);
|
||||||
}
|
}
|
||||||
// Setup thread if necessary.
|
// Setup thread if necessary.
|
||||||
if (m_stream.mode == OUTPUT && _mode == INPUT) {
|
if (m_stream.mode == airtaudio::mode_output && _mode == airtaudio::mode_input) {
|
||||||
// We had already set up an output stream.
|
// We had already set up an output stream.
|
||||||
m_stream.mode = DUPLEX;
|
m_stream.mode = airtaudio::mode_duplex;
|
||||||
if (m_stream.device[0] == _device) {
|
if (m_stream.device[0] == _device) {
|
||||||
handle->id[0] = fd;
|
handle->id[0] = fd;
|
||||||
}
|
}
|
||||||
@ -549,26 +545,26 @@ error:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Oss::closeStream() {
|
enum airtaudio::error airtaudio::api::Oss::closeStream() {
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("no open stream to close!");
|
ATA_ERROR("no open stream to close!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||||
m_stream.callbackInfo.isRunning = false;
|
m_stream.callbackInfo.isRunning = false;
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
handle->runnable.notify_one();
|
handle->runnable.notify_one();
|
||||||
}
|
}
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
m_stream.callbackInfo.thread->join();
|
m_stream.callbackInfo.thread->join();
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
|
if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) {
|
||||||
ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
|
ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
|
||||||
} else {
|
} else {
|
||||||
ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
|
ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
}
|
}
|
||||||
if (handle) {
|
if (handle) {
|
||||||
if (handle->id[0]) {
|
if (handle->id[0]) {
|
||||||
@ -590,21 +586,21 @@ enum airtaudio::errorType airtaudio::api::Oss::closeStream() {
|
|||||||
free(m_stream.deviceBuffer);
|
free(m_stream.deviceBuffer);
|
||||||
m_stream.deviceBuffer = 0;
|
m_stream.deviceBuffer = 0;
|
||||||
}
|
}
|
||||||
m_stream.mode = UNINITIALIZED;
|
m_stream.mode = airtaudio::mode_unknow;
|
||||||
m_stream.state = STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Oss::startStream() {
|
enum airtaudio::error airtaudio::api::Oss::startStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
ATA_ERROR("the stream is already running!");
|
ATA_ERROR("the stream is already running!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
m_stream.state = STREAM_RUNNING;
|
m_stream.state = airtaudio::state_running;
|
||||||
// No need to do anything else here ... OSS automatically starts
|
// No need to do anything else here ... OSS automatically starts
|
||||||
// when fed samples.
|
// when fed samples.
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
@ -612,24 +608,24 @@ enum airtaudio::errorType airtaudio::api::Oss::startStream() {
|
|||||||
handle->runnable.notify_one();
|
handle->runnable.notify_one();
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Oss::stopStream() {
|
enum airtaudio::error airtaudio::api::Oss::stopStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
// The state might change while waiting on a mutex.
|
// The state might change while waiting on a mutex.
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int32_t result = 0;
|
int32_t result = 0;
|
||||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
// Flush the output with zeros a few times.
|
// Flush the output with zeros a few times.
|
||||||
char *buffer;
|
char *buffer;
|
||||||
int32_t samples;
|
int32_t samples;
|
||||||
@ -648,7 +644,7 @@ enum airtaudio::errorType airtaudio::api::Oss::stopStream() {
|
|||||||
result = write(handle->id[0], buffer, samples * formatBytes(format));
|
result = write(handle->id[0], buffer, samples * formatBytes(format));
|
||||||
if (result == -1) {
|
if (result == -1) {
|
||||||
ATA_ERROR("audio write error.");
|
ATA_ERROR("audio write error.");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
|
result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
|
||||||
@ -658,8 +654,8 @@ enum airtaudio::errorType airtaudio::api::Oss::stopStream() {
|
|||||||
}
|
}
|
||||||
handle->triggered = false;
|
handle->triggered = false;
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| ( m_stream.mode == DUPLEX
|
|| ( m_stream.mode == airtaudio::mode_duplex
|
||||||
&& handle->id[0] != handle->id[1])) {
|
&& handle->id[0] != handle->id[1])) {
|
||||||
result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
|
result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
|
||||||
if (result == -1) {
|
if (result == -1) {
|
||||||
@ -668,31 +664,31 @@ enum airtaudio::errorType airtaudio::api::Oss::stopStream() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
unlock:
|
unlock:
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
if (result != -1) {
|
if (result != -1) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Oss::abortStream() {
|
enum airtaudio::error airtaudio::api::Oss::abortStream() {
|
||||||
if (verifyStream() != airtaudio::errorNone) {
|
if (verifyStream() != airtaudio::error_none) {
|
||||||
return airtaudio::errorFail;
|
return airtaudio::error_fail;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
// The state might change while waiting on a mutex.
|
// The state might change while waiting on a mutex.
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int32_t result = 0;
|
int32_t result = 0;
|
||||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||||
if (m_stream.mode == OUTPUT || m_stream.mode == DUPLEX) {
|
if (m_stream.mode == airtaudio::mode_output || m_stream.mode == airtaudio::mode_duplex) {
|
||||||
result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
|
result = ioctl(handle->id[0], SNDCTL_DSP_HALT, 0);
|
||||||
if (result == -1) {
|
if (result == -1) {
|
||||||
ATA_ERROR("system error stopping callback procedure on device (" << m_stream.device[0] << ").");
|
ATA_ERROR("system error stopping callback procedure on device (" << m_stream.device[0] << ").");
|
||||||
@ -700,7 +696,7 @@ enum airtaudio::errorType airtaudio::api::Oss::abortStream() {
|
|||||||
}
|
}
|
||||||
handle->triggered = false;
|
handle->triggered = false;
|
||||||
}
|
}
|
||||||
if (m_stream.mode == INPUT || (m_stream.mode == DUPLEX && handle->id[0] != handle->id[1])) {
|
if (m_stream.mode == airtaudio::mode_input || (m_stream.mode == airtaudio::mode_duplex && handle->id[0] != handle->id[1])) {
|
||||||
result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
|
result = ioctl(handle->id[1], SNDCTL_DSP_HALT, 0);
|
||||||
if (result == -1) {
|
if (result == -1) {
|
||||||
ATA_ERROR("system error stopping input callback procedure on device (" << m_stream.device[0] << ").");
|
ATA_ERROR("system error stopping input callback procedure on device (" << m_stream.device[0] << ").");
|
||||||
@ -708,39 +704,39 @@ enum airtaudio::errorType airtaudio::api::Oss::abortStream() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
unlock:
|
unlock:
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
if (result != -1) {
|
if (result != -1) {
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
|
|
||||||
void airtaudio::api::Oss::callbackEvent() {
|
void airtaudio::api::Oss::callbackEvent() {
|
||||||
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
OssHandle *handle = (OssHandle *) m_stream.apiHandle;
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||||
handle->runnable.wait(lck);
|
handle->runnable.wait(lck);
|
||||||
if (m_stream.state != STREAM_RUNNING) {
|
if (m_stream.state != airtaudio::state_running) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
// Invoke user callback to get fresh output data.
|
// Invoke user callback to get fresh output data.
|
||||||
int32_t doStopStream = 0;
|
int32_t doStopStream = 0;
|
||||||
double streamTime = getStreamTime();
|
double streamTime = getStreamTime();
|
||||||
rtaudio::streamStatus status = 0;
|
rtaudio::streamStatus status = 0;
|
||||||
if ( m_stream.mode != INPUT
|
if ( m_stream.mode != airtaudio::mode_input
|
||||||
&& handle->xrun[0] == true) {
|
&& handle->xrun[0] == true) {
|
||||||
status |= RTAUDIO_OUTPUT_UNDERFLOW;
|
status |= RTAUDIO_airtaudio::status_underflow;
|
||||||
handle->xrun[0] = false;
|
handle->xrun[0] = false;
|
||||||
}
|
}
|
||||||
if ( m_stream.mode != OUTPUT
|
if ( m_stream.mode != airtaudio::mode_output
|
||||||
&& handle->xrun[1] == true) {
|
&& handle->xrun[1] == true) {
|
||||||
status |= RTAUDIO_INPUT_OVERFLOW;
|
status |= RTAUDIO_airtaudio::mode_input_OVERFLOW;
|
||||||
handle->xrun[1] = false;
|
handle->xrun[1] = false;
|
||||||
}
|
}
|
||||||
doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[0],
|
doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[0],
|
||||||
@ -754,15 +750,15 @@ void airtaudio::api::Oss::callbackEvent() {
|
|||||||
}
|
}
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
// The state might change while waiting on a mutex.
|
// The state might change while waiting on a mutex.
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
int32_t result;
|
int32_t result;
|
||||||
char *buffer;
|
char *buffer;
|
||||||
int32_t samples;
|
int32_t samples;
|
||||||
audio::format format;
|
audio::format format;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
// Setup parameters and do buffer conversion if necessary.
|
// Setup parameters and do buffer conversion if necessary.
|
||||||
if (m_stream.doConvertBuffer[0]) {
|
if (m_stream.doConvertBuffer[0]) {
|
||||||
buffer = m_stream.deviceBuffer;
|
buffer = m_stream.deviceBuffer;
|
||||||
@ -778,12 +774,12 @@ void airtaudio::api::Oss::callbackEvent() {
|
|||||||
if (m_stream.doByteSwap[0]) {
|
if (m_stream.doByteSwap[0]) {
|
||||||
byteSwapBuffer(buffer, samples, format);
|
byteSwapBuffer(buffer, samples, format);
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == DUPLEX
|
if ( m_stream.mode == airtaudio::mode_duplex
|
||||||
&& handle->triggered == false) {
|
&& handle->triggered == false) {
|
||||||
int32_t trig = 0;
|
int32_t trig = 0;
|
||||||
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
|
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
|
||||||
result = write(handle->id[0], buffer, samples * formatBytes(format));
|
result = write(handle->id[0], buffer, samples * formatBytes(format));
|
||||||
trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
|
trig = PCM_ENABLE_airtaudio::mode_input|PCM_ENABLE_airtaudio::mode_output;
|
||||||
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
|
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
|
||||||
handle->triggered = true;
|
handle->triggered = true;
|
||||||
} else {
|
} else {
|
||||||
@ -795,12 +791,12 @@ void airtaudio::api::Oss::callbackEvent() {
|
|||||||
// specific means for determining that.
|
// specific means for determining that.
|
||||||
handle->xrun[0] = true;
|
handle->xrun[0] = true;
|
||||||
ATA_ERROR("audio write error.");
|
ATA_ERROR("audio write error.");
|
||||||
//error(airtaudio::errorWarning);
|
//error(airtaudio::error_warning);
|
||||||
// Continue on to input section.
|
// Continue on to input section.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ( m_stream.mode == INPUT
|
if ( m_stream.mode == airtaudio::mode_input
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
// Setup parameters.
|
// Setup parameters.
|
||||||
if (m_stream.doConvertBuffer[1]) {
|
if (m_stream.doConvertBuffer[1]) {
|
||||||
buffer = m_stream.deviceBuffer;
|
buffer = m_stream.deviceBuffer;
|
||||||
|
@ -17,15 +17,15 @@ namespace airtaudio {
|
|||||||
public:
|
public:
|
||||||
Oss();
|
Oss();
|
||||||
virtual ~Oss();
|
virtual ~Oss();
|
||||||
airtaudio::api::type getCurrentApi() {
|
enum airtaudio::type getCurrentApi() {
|
||||||
return airtaudio::api::LINUX_OSS;
|
return airtaudio::type_oss;
|
||||||
}
|
}
|
||||||
uint32_t getDeviceCount();
|
uint32_t getDeviceCount();
|
||||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||||
enum airtaudio::errorType closeStream();
|
enum airtaudio::error closeStream();
|
||||||
enum airtaudio::errorType startStream();
|
enum airtaudio::error startStream();
|
||||||
enum airtaudio::errorType stopStream();
|
enum airtaudio::error stopStream();
|
||||||
enum airtaudio::errorType abortStream();
|
enum airtaudio::error abortStream();
|
||||||
// This function is intended for internal use only. It must be
|
// This function is intended for internal use only. It must be
|
||||||
// public because it is called by the internal callback handler,
|
// public because it is called by the internal callback handler,
|
||||||
// which is not a member of RtAudio. External use of this function
|
// which is not a member of RtAudio. External use of this function
|
||||||
@ -33,7 +33,7 @@ namespace airtaudio {
|
|||||||
void callbackEvent();
|
void callbackEvent();
|
||||||
private:
|
private:
|
||||||
bool probeDeviceOpen(uint32_t _device,
|
bool probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
|
@ -65,7 +65,7 @@ struct PulseAudioHandle {
|
|||||||
};
|
};
|
||||||
|
|
||||||
airtaudio::api::Pulse::~Pulse() {
|
airtaudio::api::Pulse::~Pulse() {
|
||||||
if (m_stream.state != STREAM_CLOSED) {
|
if (m_stream.state != airtaudio::state_closed) {
|
||||||
closeStream();
|
closeStream();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -101,12 +101,12 @@ static void pulseaudio_callback(void* _user) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Pulse::closeStream() {
|
enum airtaudio::error airtaudio::api::Pulse::closeStream() {
|
||||||
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
||||||
m_stream.callbackInfo.isRunning = false;
|
m_stream.callbackInfo.isRunning = false;
|
||||||
if (pah) {
|
if (pah) {
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
pah->runnable = true;
|
pah->runnable = true;
|
||||||
pah->runnable_cv.notify_one();;
|
pah->runnable_cv.notify_one();;
|
||||||
}
|
}
|
||||||
@ -130,31 +130,31 @@ enum airtaudio::errorType airtaudio::api::Pulse::closeStream() {
|
|||||||
free(m_stream.userBuffer[1]);
|
free(m_stream.userBuffer[1]);
|
||||||
m_stream.userBuffer[1] = nullptr;
|
m_stream.userBuffer[1] = nullptr;
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_CLOSED;
|
m_stream.state = airtaudio::state_closed;
|
||||||
m_stream.mode = UNINITIALIZED;
|
m_stream.mode = airtaudio::mode_unknow;
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
void airtaudio::api::Pulse::callbackEvent() {
|
void airtaudio::api::Pulse::callbackEvent() {
|
||||||
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
std::unique_lock<std::mutex> lck(m_stream.mutex);
|
||||||
while (!pah->runnable) {
|
while (!pah->runnable) {
|
||||||
pah->runnable_cv.wait(lck);
|
pah->runnable_cv.wait(lck);
|
||||||
}
|
}
|
||||||
if (m_stream.state != STREAM_RUNNING) {
|
if (m_stream.state != airtaudio::state_running) {
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
ATA_ERROR("the stream is closed ... this shouldn't happen!");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
double streamTime = getStreamTime();
|
double streamTime = getStreamTime();
|
||||||
airtaudio::streamStatus status = 0;
|
enum airtaudio::status status = airtaudio::status_ok;
|
||||||
int32_t doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[OUTPUT],
|
int32_t doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::mode_output],
|
||||||
m_stream.userBuffer[INPUT],
|
m_stream.userBuffer[airtaudio::mode_input],
|
||||||
m_stream.bufferSize,
|
m_stream.bufferSize,
|
||||||
streamTime,
|
streamTime,
|
||||||
status);
|
status);
|
||||||
@ -163,42 +163,42 @@ void airtaudio::api::Pulse::callbackEvent() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
void *pulse_in = m_stream.doConvertBuffer[INPUT] ? m_stream.deviceBuffer : m_stream.userBuffer[INPUT];
|
void *pulse_in = m_stream.doConvertBuffer[airtaudio::mode_input] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::mode_input];
|
||||||
void *pulse_out = m_stream.doConvertBuffer[OUTPUT] ? m_stream.deviceBuffer : m_stream.userBuffer[OUTPUT];
|
void *pulse_out = m_stream.doConvertBuffer[airtaudio::mode_output] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::mode_output];
|
||||||
if (m_stream.state != STREAM_RUNNING) {
|
if (m_stream.state != airtaudio::state_running) {
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
int32_t pa_error;
|
int32_t pa_error;
|
||||||
size_t bytes;
|
size_t bytes;
|
||||||
if ( m_stream.mode == OUTPUT
|
if ( m_stream.mode == airtaudio::mode_output
|
||||||
|| m_stream.mode == DUPLEX) {
|
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (m_stream.doConvertBuffer[OUTPUT]) {
|
if (m_stream.doConvertBuffer[airtaudio::mode_output]) {
|
||||||
convertBuffer(m_stream.deviceBuffer,
|
convertBuffer(m_stream.deviceBuffer,
|
||||||
m_stream.userBuffer[OUTPUT],
|
m_stream.userBuffer[airtaudio::mode_output],
|
||||||
m_stream.convertInfo[OUTPUT]);
|
m_stream.convertInfo[airtaudio::mode_output]);
|
||||||
bytes = m_stream.nDeviceChannels[OUTPUT] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[OUTPUT]);
|
bytes = m_stream.nDeviceChannels[airtaudio::mode_output] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[airtaudio::mode_output]);
|
||||||
} else {
|
} else {
|
||||||
bytes = m_stream.nUserChannels[OUTPUT] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
bytes = m_stream.nUserChannels[airtaudio::mode_output] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||||
}
|
}
|
||||||
if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0) {
|
if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0) {
|
||||||
ATA_ERROR("audio write error, " << pa_strerror(pa_error) << ".");
|
ATA_ERROR("audio write error, " << pa_strerror(pa_error) << ".");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (m_stream.mode == INPUT || m_stream.mode == DUPLEX) {
|
if (m_stream.mode == airtaudio::mode_input || m_stream.mode == airtaudio::mode_duplex) {
|
||||||
if (m_stream.doConvertBuffer[INPUT]) {
|
if (m_stream.doConvertBuffer[airtaudio::mode_input]) {
|
||||||
bytes = m_stream.nDeviceChannels[INPUT] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[INPUT]);
|
bytes = m_stream.nDeviceChannels[airtaudio::mode_input] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[airtaudio::mode_input]);
|
||||||
} else {
|
} else {
|
||||||
bytes = m_stream.nUserChannels[INPUT] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
bytes = m_stream.nUserChannels[airtaudio::mode_input] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||||
}
|
}
|
||||||
if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0) {
|
if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0) {
|
||||||
ATA_ERROR("audio read error, " << pa_strerror(pa_error) << ".");
|
ATA_ERROR("audio read error, " << pa_strerror(pa_error) << ".");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (m_stream.doConvertBuffer[INPUT]) {
|
if (m_stream.doConvertBuffer[airtaudio::mode_input]) {
|
||||||
convertBuffer(m_stream.userBuffer[INPUT],
|
convertBuffer(m_stream.userBuffer[airtaudio::mode_input],
|
||||||
m_stream.deviceBuffer,
|
m_stream.deviceBuffer,
|
||||||
m_stream.convertInfo[INPUT]);
|
m_stream.convertInfo[airtaudio::mode_input]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unlock:
|
unlock:
|
||||||
@ -211,76 +211,76 @@ unlock:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Pulse::startStream() {
|
enum airtaudio::error airtaudio::api::Pulse::startStream() {
|
||||||
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("the stream is not open!");
|
ATA_ERROR("the stream is not open!");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_RUNNING) {
|
if (m_stream.state == airtaudio::state_running) {
|
||||||
ATA_ERROR("the stream is already running!");
|
ATA_ERROR("the stream is already running!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
m_stream.state = STREAM_RUNNING;
|
m_stream.state = airtaudio::state_running;
|
||||||
pah->runnable = true;
|
pah->runnable = true;
|
||||||
pah->runnable_cv.notify_one();
|
pah->runnable_cv.notify_one();
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Pulse::stopStream() {
|
enum airtaudio::error airtaudio::api::Pulse::stopStream() {
|
||||||
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
PulseAudioHandle *pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("the stream is not open!");
|
ATA_ERROR("the stream is not open!");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
if (pah && pah->s_play) {
|
if (pah && pah->s_play) {
|
||||||
int32_t pa_error;
|
int32_t pa_error;
|
||||||
if (pa_simple_drain(pah->s_play, &pa_error) < 0) {
|
if (pa_simple_drain(pah->s_play, &pa_error) < 0) {
|
||||||
ATA_ERROR("error draining output device, " << pa_strerror(pa_error) << ".");
|
ATA_ERROR("error draining output device, " << pa_strerror(pa_error) << ".");
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum airtaudio::errorType airtaudio::api::Pulse::abortStream() {
|
enum airtaudio::error airtaudio::api::Pulse::abortStream() {
|
||||||
PulseAudioHandle *pah = static_cast<PulseAudioHandle*>(m_stream.apiHandle);
|
PulseAudioHandle *pah = static_cast<PulseAudioHandle*>(m_stream.apiHandle);
|
||||||
if (m_stream.state == STREAM_CLOSED) {
|
if (m_stream.state == airtaudio::state_closed) {
|
||||||
ATA_ERROR("the stream is not open!");
|
ATA_ERROR("the stream is not open!");
|
||||||
return airtaudio::errorInvalidUse;
|
return airtaudio::error_invalidUse;
|
||||||
}
|
}
|
||||||
if (m_stream.state == STREAM_STOPPED) {
|
if (m_stream.state == airtaudio::state_stopped) {
|
||||||
ATA_ERROR("the stream is already stopped!");
|
ATA_ERROR("the stream is already stopped!");
|
||||||
return airtaudio::errorWarning;
|
return airtaudio::error_warning;
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
m_stream.mutex.lock();
|
m_stream.mutex.lock();
|
||||||
if (pah && pah->s_play) {
|
if (pah && pah->s_play) {
|
||||||
int32_t pa_error;
|
int32_t pa_error;
|
||||||
if (pa_simple_flush(pah->s_play, &pa_error) < 0) {
|
if (pa_simple_flush(pah->s_play, &pa_error) < 0) {
|
||||||
ATA_ERROR("error flushing output device, " << pa_strerror(pa_error) << ".");
|
ATA_ERROR("error flushing output device, " << pa_strerror(pa_error) << ".");
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
return airtaudio::errorSystemError;
|
return airtaudio::error_systemError;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
m_stream.mutex.unlock();
|
m_stream.mutex.unlock();
|
||||||
return airtaudio::errorNone;
|
return airtaudio::error_none;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
@ -293,7 +293,7 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
|||||||
if (_device != 0) {
|
if (_device != 0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (_mode != INPUT && _mode != OUTPUT) {
|
if (_mode != airtaudio::mode_input && _mode != airtaudio::mode_output) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (_channels != 1 && _channels != 2) {
|
if (_channels != 1 && _channels != 2) {
|
||||||
@ -332,33 +332,27 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
|||||||
ATA_ERROR("unsupported sample format.");
|
ATA_ERROR("unsupported sample format.");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Set interleaving parameters.
|
m_stream.deviceInterleaved[modeToIdTable(_mode)] = true;
|
||||||
if (_options && _options->flags & NONINTERLEAVED) {
|
|
||||||
m_stream.userInterleaved = false;
|
|
||||||
} else {
|
|
||||||
m_stream.userInterleaved = true;
|
|
||||||
}
|
|
||||||
m_stream.deviceInterleaved[_mode] = true;
|
|
||||||
m_stream.nBuffers = 1;
|
m_stream.nBuffers = 1;
|
||||||
m_stream.doByteSwap[_mode] = false;
|
m_stream.doByteSwap[modeToIdTable(_mode)] = false;
|
||||||
m_stream.doConvertBuffer[_mode] = _channels > 1 && !m_stream.userInterleaved;
|
m_stream.doConvertBuffer[modeToIdTable(_mode)] = false;
|
||||||
m_stream.deviceFormat[_mode] = m_stream.userFormat;
|
m_stream.deviceFormat[modeToIdTable(_mode)] = m_stream.userFormat;
|
||||||
m_stream.nUserChannels[_mode] = _channels;
|
m_stream.nUserChannels[modeToIdTable(_mode)] = _channels;
|
||||||
m_stream.nDeviceChannels[_mode] = _channels + _firstChannel;
|
m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel;
|
||||||
m_stream.channelOffset[_mode] = 0;
|
m_stream.channelOffset[modeToIdTable(_mode)] = 0;
|
||||||
// Allocate necessary internal buffers.
|
// Allocate necessary internal buffers.
|
||||||
bufferBytes = m_stream.nUserChannels[_mode] * *_bufferSize * formatBytes(m_stream.userFormat);
|
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||||
m_stream.userBuffer[_mode] = (char *) calloc(bufferBytes, 1);
|
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||||
if (m_stream.userBuffer[_mode] == nullptr) {
|
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||||
ATA_ERROR("error allocating user buffer memory.");
|
ATA_ERROR("error allocating user buffer memory.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
m_stream.bufferSize = *_bufferSize;
|
m_stream.bufferSize = *_bufferSize;
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
bool makeBuffer = true;
|
bool makeBuffer = true;
|
||||||
bufferBytes = m_stream.nDeviceChannels[_mode] * formatBytes(m_stream.deviceFormat[_mode]);
|
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||||
if (_mode == INPUT) {
|
if (_mode == airtaudio::mode_input) {
|
||||||
if (m_stream.mode == OUTPUT && m_stream.deviceBuffer) {
|
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
|
||||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||||
if (bufferBytes <= bytesOut) makeBuffer = false;
|
if (bufferBytes <= bytesOut) makeBuffer = false;
|
||||||
}
|
}
|
||||||
@ -373,9 +367,9 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.device[_mode] = _device;
|
m_stream.device[modeToIdTable(_mode)] = _device;
|
||||||
// Setup the buffer conversion information structure.
|
// Setup the buffer conversion information structure.
|
||||||
if (m_stream.doConvertBuffer[_mode]) {
|
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||||
setConvertInfo(_mode, _firstChannel);
|
setConvertInfo(_mode, _firstChannel);
|
||||||
}
|
}
|
||||||
if (!m_stream.apiHandle) {
|
if (!m_stream.apiHandle) {
|
||||||
@ -389,14 +383,14 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
|||||||
pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
pah = static_cast<PulseAudioHandle *>(m_stream.apiHandle);
|
||||||
int32_t error;
|
int32_t error;
|
||||||
switch (_mode) {
|
switch (_mode) {
|
||||||
case INPUT:
|
case airtaudio::mode_input:
|
||||||
pah->s_rec = pa_simple_new(nullptr, "airtAudio", PA_STREAM_RECORD, nullptr, "Record", &ss, nullptr, nullptr, &error);
|
pah->s_rec = pa_simple_new(nullptr, "airtAudio", PA_STREAM_RECORD, nullptr, "Record", &ss, nullptr, nullptr, &error);
|
||||||
if (!pah->s_rec) {
|
if (!pah->s_rec) {
|
||||||
ATA_ERROR("error connecting input to PulseAudio server.");
|
ATA_ERROR("error connecting input to PulseAudio server.");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case OUTPUT:
|
case airtaudio::mode_output:
|
||||||
pah->s_play = pa_simple_new(nullptr, "airtAudio", PA_STREAM_PLAYBACK, nullptr, "Playback", &ss, nullptr, nullptr, &error);
|
pah->s_play = pa_simple_new(nullptr, "airtAudio", PA_STREAM_PLAYBACK, nullptr, "Playback", &ss, nullptr, nullptr, &error);
|
||||||
if (!pah->s_play) {
|
if (!pah->s_play) {
|
||||||
ATA_ERROR("error connecting output to PulseAudio server.");
|
ATA_ERROR("error connecting output to PulseAudio server.");
|
||||||
@ -406,12 +400,12 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
|||||||
default:
|
default:
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
if (m_stream.mode == UNINITIALIZED) {
|
if (m_stream.mode == airtaudio::mode_unknow) {
|
||||||
m_stream.mode = _mode;
|
m_stream.mode = _mode;
|
||||||
} else if (m_stream.mode == _mode) {
|
} else if (m_stream.mode == _mode) {
|
||||||
goto error;
|
goto error;
|
||||||
}else {
|
}else {
|
||||||
m_stream.mode = DUPLEX;
|
m_stream.mode = airtaudio::mode_duplex;
|
||||||
}
|
}
|
||||||
if (!m_stream.callbackInfo.isRunning) {
|
if (!m_stream.callbackInfo.isRunning) {
|
||||||
m_stream.callbackInfo.object = this;
|
m_stream.callbackInfo.object = this;
|
||||||
@ -422,7 +416,7 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m_stream.state = STREAM_STOPPED;
|
m_stream.state = airtaudio::state_stopped;
|
||||||
return true;
|
return true;
|
||||||
error:
|
error:
|
||||||
if (pah && m_stream.callbackInfo.isRunning) {
|
if (pah && m_stream.callbackInfo.isRunning) {
|
||||||
|
@ -16,15 +16,15 @@ namespace airtaudio {
|
|||||||
static airtaudio::Api* Create();
|
static airtaudio::Api* Create();
|
||||||
public:
|
public:
|
||||||
virtual ~Pulse();
|
virtual ~Pulse();
|
||||||
airtaudio::api::type getCurrentApi() {
|
enum airtaudio::type getCurrentApi() {
|
||||||
return airtaudio::api::LINUX_PULSE;
|
return airtaudio::type_pulse;
|
||||||
}
|
}
|
||||||
uint32_t getDeviceCount();
|
uint32_t getDeviceCount();
|
||||||
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
airtaudio::DeviceInfo getDeviceInfo(uint32_t _device);
|
||||||
enum airtaudio::errorType closeStream();
|
enum airtaudio::error closeStream();
|
||||||
enum airtaudio::errorType startStream();
|
enum airtaudio::error startStream();
|
||||||
enum airtaudio::errorType stopStream();
|
enum airtaudio::error stopStream();
|
||||||
enum airtaudio::errorType abortStream();
|
enum airtaudio::error abortStream();
|
||||||
// This function is intended for internal use only. It must be
|
// This function is intended for internal use only. It must be
|
||||||
// public because it is called by the internal callback handler,
|
// public because it is called by the internal callback handler,
|
||||||
// which is not a member of RtAudio. External use of this function
|
// which is not a member of RtAudio. External use of this function
|
||||||
@ -34,7 +34,7 @@ namespace airtaudio {
|
|||||||
std::vector<airtaudio::DeviceInfo> m_devices;
|
std::vector<airtaudio::DeviceInfo> m_devices;
|
||||||
void saveDeviceInfo();
|
void saveDeviceInfo();
|
||||||
bool probeDeviceOpen(uint32_t _device,
|
bool probeDeviceOpen(uint32_t _device,
|
||||||
airtaudio::api::StreamMode _mode,
|
airtaudio::mode _mode,
|
||||||
uint32_t _channels,
|
uint32_t _channels,
|
||||||
uint32_t _firstChannel,
|
uint32_t _firstChannel,
|
||||||
uint32_t _sampleRate,
|
uint32_t _sampleRate,
|
||||||
|
@ -5,101 +5,3 @@
|
|||||||
*
|
*
|
||||||
* @license like MIT (see license file)
|
* @license like MIT (see license file)
|
||||||
*/
|
*/
|
||||||
#if 0
|
|
||||||
#include <airtaudio/base.h>
|
|
||||||
|
|
||||||
std::ostream& airtaudio::operator <<(std::ostream& _os, enum errorType _obj) {
|
|
||||||
switch(_obj) {
|
|
||||||
case errorNone:
|
|
||||||
_os << "errorNone";
|
|
||||||
break;
|
|
||||||
case errorFail:
|
|
||||||
_os << "errorFail";
|
|
||||||
break;
|
|
||||||
case errorWarning:
|
|
||||||
_os << "errorWarning";
|
|
||||||
break;
|
|
||||||
case errorInputNull:
|
|
||||||
_os << "errorInputNull";
|
|
||||||
break;
|
|
||||||
case errorInvalidUse:
|
|
||||||
_os << "errorInvalidUse";
|
|
||||||
break;
|
|
||||||
case errorSystemError:
|
|
||||||
_os << "errorSystemError";
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
_os << "UNKNOW...";
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return _os;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::ostream& airtaudio::operator <<(std::ostream& _os, const audio::format& _obj) {
|
|
||||||
switch(_obj) {
|
|
||||||
case SINT8:
|
|
||||||
_os << "SINT8";
|
|
||||||
break;
|
|
||||||
case SINT16:
|
|
||||||
_os << "SINT16";
|
|
||||||
break;
|
|
||||||
case SINT24:
|
|
||||||
_os << "SINT24";
|
|
||||||
break;
|
|
||||||
case SINT32:
|
|
||||||
_os << "SINT32";
|
|
||||||
break;
|
|
||||||
case FLOAT32:
|
|
||||||
_os << "FLOAT32";
|
|
||||||
break;
|
|
||||||
case FLOAT64:
|
|
||||||
_os << "FLOAT64";
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
_os << "UNKNOW...";
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return _os;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::ostream& airtaudio::operator <<(std::ostream& _os, const airtaudio::streamFlags& _obj) {
|
|
||||||
switch(_obj) {
|
|
||||||
case NONINTERLEAVED:
|
|
||||||
_os << "NONINTERLEAVED";
|
|
||||||
break;
|
|
||||||
case MINIMIZE_LATENCY:
|
|
||||||
_os << "MINIMIZE_LATENCY";
|
|
||||||
break;
|
|
||||||
case HOG_DEVICE:
|
|
||||||
_os << "HOG_DEVICE";
|
|
||||||
break;
|
|
||||||
case SCHEDULE_REALTIME:
|
|
||||||
_os << "SCHEDULE_REALTIME";
|
|
||||||
break;
|
|
||||||
case ALSA_USE_DEFAULT:
|
|
||||||
_os << "ALSA_USE_DEFAULT";
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
_os << "UNKNOW...";
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return _os;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::ostream& airtaudio::operator <<(std::ostream& _os, const airtaudio::streamStatus& _obj) {
|
|
||||||
switch(_obj) {
|
|
||||||
case INPUT_OVERFLOW:
|
|
||||||
_os << "INPUT_OVERFLOW";
|
|
||||||
break;
|
|
||||||
case OUTPUT_UNDERFLOW:
|
|
||||||
_os << "OUTPUT_UNDERFLOW";
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
_os << "UNKNOW...";
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return _os;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
102
airtaudio/base.h
102
airtaudio/base.h
@ -27,87 +27,29 @@
|
|||||||
//#include <etk/Stream.h>
|
//#include <etk/Stream.h>
|
||||||
|
|
||||||
namespace airtaudio {
|
namespace airtaudio {
|
||||||
//! Defined RtError types.
|
//! Defined error types.
|
||||||
enum errorType {
|
enum error {
|
||||||
errorNone, //!< No error
|
error_none, //!< No error
|
||||||
errorFail, //!< An error occure in the operation
|
error_fail, //!< An error occure in the operation
|
||||||
errorWarning, //!< A non-critical error.
|
error_warning, //!< A non-critical error.
|
||||||
errorInputNull, //!< null input or internal errror
|
error_inputNull, //!< null input or internal errror
|
||||||
errorInvalidUse, //!< The function was called incorrectly.
|
error_invalidUse, //!< The function was called incorrectly.
|
||||||
errorSystemError //!< A system error occured.
|
error_systemError //!< A system error occured.
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
class Flags {
|
||||||
* @typedef typedef uint64_t streamFlags;
|
public:
|
||||||
* @brief RtAudio stream option flags.
|
bool m_minimizeLatency; // Simple example ==> TODO ...
|
||||||
*
|
Flags() :
|
||||||
* The following flags can be OR'ed together to allow a client to
|
m_minimizeLatency(false) {
|
||||||
* make changes to the default stream behavior:
|
// nothing to do ...
|
||||||
*
|
}
|
||||||
* - \e NONINTERLEAVED: Use non-interleaved buffers (default = interleaved).
|
};
|
||||||
* - \e MINIMIZE_LATENCY: Attempt to set stream parameters for lowest possible latency.
|
enum status {
|
||||||
* - \e HOG_DEVICE: Attempt grab device for exclusive use.
|
status_ok, //!< nothing...
|
||||||
* - \e ALSA_USE_DEFAULT: Use the "default" PCM device (ALSA only).
|
status_overflow, //!< Internal buffer has more data than they can accept
|
||||||
*
|
status_underflow //!< The internal buffer is empty
|
||||||
* By default, RtAudio streams pass and receive audio data from the
|
};
|
||||||
* client in an interleaved format. By passing the
|
|
||||||
* RTAUDIO_NONINTERLEAVED flag to the openStream() function, audio
|
|
||||||
* data will instead be presented in non-interleaved buffers. In
|
|
||||||
* this case, each buffer argument in the RtAudioCallback function
|
|
||||||
* will point to a single array of data, with \c nFrames samples for
|
|
||||||
* each channel concatenated back-to-back. For example, the first
|
|
||||||
* sample of data for the second channel would be located at index \c
|
|
||||||
* nFrames (assuming the \c buffer pointer was recast to the correct
|
|
||||||
* data type for the stream).
|
|
||||||
*
|
|
||||||
* Certain audio APIs offer a number of parameters that influence the
|
|
||||||
* I/O latency of a stream. By default, RtAudio will attempt to set
|
|
||||||
* these parameters internally for robust (glitch-free) performance
|
|
||||||
* (though some APIs, like Windows Direct Sound, make this difficult).
|
|
||||||
* By passing the RTAUDIO_MINIMIZE_LATENCY flag to the openStream()
|
|
||||||
* function, internal stream settings will be influenced in an attempt
|
|
||||||
* to minimize stream latency, though possibly at the expense of stream
|
|
||||||
* performance.
|
|
||||||
*
|
|
||||||
* If the RTAUDIO_HOG_DEVICE flag is set, RtAudio will attempt to
|
|
||||||
* open the input and/or output stream device(s) for exclusive use.
|
|
||||||
* Note that this is not possible with all supported audio APIs.
|
|
||||||
*
|
|
||||||
* If the RTAUDIO_SCHEDULE_REALTIME flag is set, RtAudio will attempt
|
|
||||||
* to select realtime scheduling (round-robin) for the callback thread.
|
|
||||||
*
|
|
||||||
* If the RTAUDIO_ALSA_USE_DEFAULT flag is set, RtAudio will attempt to
|
|
||||||
* open the "default" PCM device when using the ALSA API. Note that this
|
|
||||||
* will override any specified input or output device id.
|
|
||||||
*/
|
|
||||||
typedef uint32_t streamFlags;
|
|
||||||
static const streamFlags NONINTERLEAVED = 0x1; // Use non-interleaved buffers (default = interleaved).
|
|
||||||
static const streamFlags MINIMIZE_LATENCY = 0x2; // Attempt to set stream parameters for lowest possible latency.
|
|
||||||
static const streamFlags HOG_DEVICE = 0x4; // Attempt grab device and prevent use by others.
|
|
||||||
static const streamFlags SCHEDULE_REALTIME = 0x8; // Try to select realtime scheduling for callback thread.
|
|
||||||
static const streamFlags ALSA_USE_DEFAULT = 0x10; // Use the "default" PCM device (ALSA only).
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Debug operator To display the curent element in a Human redeable information
|
|
||||||
*/
|
|
||||||
//std::ostream& operator <<(std::ostream& _os, const airtaudio::streamFlags& _obj);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @typedef typedef uint64_t rtaudio::streamStatus;
|
|
||||||
* @brief RtAudio stream status (over- or underflow) flags.
|
|
||||||
*
|
|
||||||
* Notification of a stream over- or underflow is indicated by a
|
|
||||||
* non-zero stream \c status argument in the RtAudioCallback function.
|
|
||||||
* The stream status can be one of the following two options,
|
|
||||||
* depending on whether the stream is open for output and/or input:
|
|
||||||
*
|
|
||||||
* - \e RTAUDIO_INPUT_OVERFLOW: Input data was discarded because of an overflow condition at the driver.
|
|
||||||
* - \e RTAUDIO_OUTPUT_UNDERFLOW: The output buffer ran low, likely producing a break in the output sound.
|
|
||||||
*/
|
|
||||||
typedef uint32_t streamStatus;
|
|
||||||
static const streamStatus INPUT_OVERFLOW = 0x1; // Input data was discarded because of an overflow condition at the driver.
|
|
||||||
static const streamStatus OUTPUT_UNDERFLOW = 0x2; // The output buffer ran low, likely causing a gap in the output sound.
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief RtAudio callback function prototype.
|
* @brief RtAudio callback function prototype.
|
||||||
*
|
*
|
||||||
@ -149,7 +91,7 @@ namespace airtaudio {
|
|||||||
void* _inputBuffer,
|
void* _inputBuffer,
|
||||||
uint32_t _nFrames,
|
uint32_t _nFrames,
|
||||||
double _streamTime,
|
double _streamTime,
|
||||||
airtaudio::streamStatus _status)> AirTAudioCallback;
|
airtaudio::status _status)> AirTAudioCallback;
|
||||||
}
|
}
|
||||||
|
|
||||||
#include <airtaudio/DeviceInfo.h>
|
#include <airtaudio/DeviceInfo.h>
|
||||||
|
@ -17,14 +17,15 @@ def create(target):
|
|||||||
'airtaudio/Api.cpp',
|
'airtaudio/Api.cpp',
|
||||||
'airtaudio/api/Dummy.cpp',
|
'airtaudio/api/Dummy.cpp',
|
||||||
])
|
])
|
||||||
myModule.add_module_depend(['audio'])
|
myModule.add_module_depend(['audio', 'etk'])
|
||||||
|
|
||||||
|
myModule.add_export_flag_CC(['-D__DUMMY__'])
|
||||||
|
|
||||||
myModule.add_export_flag_CC(['-D__AIRTAUDIO_API_DUMMY_H__'])
|
|
||||||
if target.name=="Windows":
|
if target.name=="Windows":
|
||||||
myModule.add_src_file([
|
myModule.add_src_file([
|
||||||
'airtaudio/api/Asio.cpp',
|
'airtaudio/api/Asio.cpp',
|
||||||
'airtaudio/api/Ds.cpp',
|
'airtaudio/api/Ds.cpp',
|
||||||
])
|
])
|
||||||
# ASIO API on Windows
|
# ASIO API on Windows
|
||||||
myModule.add_export_flag_CC(['__WINDOWS_ASIO__'])
|
myModule.add_export_flag_CC(['__WINDOWS_ASIO__'])
|
||||||
# Windows DirectSound API
|
# Windows DirectSound API
|
||||||
@ -32,52 +33,37 @@ def create(target):
|
|||||||
myModule.add_module_depend(['etk'])
|
myModule.add_module_depend(['etk'])
|
||||||
elif target.name=="Linux":
|
elif target.name=="Linux":
|
||||||
myModule.add_src_file([
|
myModule.add_src_file([
|
||||||
'airtaudio/api/Alsa.cpp',
|
'airtaudio/api/Alsa.cpp',
|
||||||
'airtaudio/api/Jack.cpp',
|
'airtaudio/api/Jack.cpp',
|
||||||
'airtaudio/api/Pulse.cpp',
|
'airtaudio/api/Pulse.cpp',
|
||||||
'airtaudio/api/Oss.cpp'
|
'airtaudio/api/Oss.cpp'
|
||||||
])
|
])
|
||||||
# Linux Alsa API
|
myModule.add_optionnal_module_depend('alsa', "__LINUX_ALSA__")
|
||||||
# TODO : myModule.add_optionnal_module_depend('alsa', "__LINUX_ALSA__")
|
myModule.add_optionnal_module_depend('jack', "__UNIX_JACK__")
|
||||||
myModule.add_export_flag_CC(['-D__LINUX_ALSA__'])
|
myModule.add_optionnal_module_depend('pulse', "__LINUX_PULSE__")
|
||||||
myModule.add_export_flag_LD("-lasound")
|
myModule.add_optionnal_module_depend('oss', "__LINUX_OSS__")
|
||||||
# Linux Jack API
|
|
||||||
# TODO : myModule.add_optionnal_module_depend('jack', "__UNIX_JACK__")
|
|
||||||
#myModule.add_export_flag_CC(['-D__UNIX_JACK__'])
|
|
||||||
#myModule.add_export_flag_LD("-ljack")
|
|
||||||
# Linux PulseAudio API
|
|
||||||
# TODO : myModule.add_optionnal_module_depend('pulse', "__LINUX_PULSE__")
|
|
||||||
#myModule.add_export_flag_CC(['-D__LINUX_PULSE__'])
|
|
||||||
#myModule.add_export_flag_LD("-lpulse-simple")
|
|
||||||
#myModule.add_export_flag_LD("-lpulse")
|
|
||||||
# TODO : myModule.add_optionnal_module_depend('oss', "__LINUX_OSS__")
|
|
||||||
#myModule.add_export_flag_CC(['-D__LINUX_OSS__'])
|
|
||||||
# ...
|
|
||||||
myModule.add_module_depend(['etk'])
|
|
||||||
elif target.name=="MacOs":
|
elif target.name=="MacOs":
|
||||||
myModule.add_src_file([
|
myModule.add_src_file([
|
||||||
'airtaudio/api/Core.cpp',
|
'airtaudio/api/Core.cpp',
|
||||||
'airtaudio/api/Oss.cpp'
|
'airtaudio/api/Oss.cpp'
|
||||||
])
|
])
|
||||||
# MacOsX core
|
# MacOsX core
|
||||||
# TODO : myModule.add_optionnal_module_depend('CoreAudio', "__MACOSX_CORE__")
|
myModule.add_optionnal_module_depend('CoreAudio', "__MACOSX_CORE__")
|
||||||
myModule.add_export_flag_CC(['-D__MACOSX_CORE__'])
|
#myModule.add_export_flag_CC(['-D__MACOSX_CORE__'])
|
||||||
myModule.add_export_flag_LD("-framework CoreAudio")
|
#myModule.add_export_flag_LD("-framework CoreAudio")
|
||||||
myModule.add_module_depend(['etk'])
|
|
||||||
elif target.name=="IOs":
|
elif target.name=="IOs":
|
||||||
myModule.add_src_file('airtaudio/api/CoreIos.mm')
|
myModule.add_src_file('airtaudio/api/CoreIos.mm')
|
||||||
# IOsX core
|
# IOsX core
|
||||||
# TODO : myModule.add_optionnal_module_depend('CoreAudio', "__IOS_CORE__")
|
myModule.add_optionnal_module_depend('CoreAudio', "__IOS_CORE__")
|
||||||
myModule.add_export_flag_CC(['-D__IOS_CORE__'])
|
#myModule.add_export_flag_CC(['-D__IOS_CORE__'])
|
||||||
myModule.add_export_flag_LD("-framework CoreAudio")
|
#myModule.add_export_flag_LD("-framework CoreAudio")
|
||||||
myModule.add_export_flag_LD("-framework AudioToolbox")
|
#myModule.add_export_flag_LD("-framework AudioToolbox")
|
||||||
myModule.add_module_depend(['etk'])
|
|
||||||
elif target.name=="Android":
|
elif target.name=="Android":
|
||||||
myModule.add_src_file('airtaudio/api/Android.cpp')
|
myModule.add_src_file('airtaudio/api/Android.cpp')
|
||||||
# MacOsX core
|
# MacOsX core
|
||||||
# TODO : myModule.add_optionnal_module_depend('ewolAndroidAudio', "__ANDROID_JAVA__")
|
myModule.add_optionnal_module_depend('ewolAndroidAudio', "__ANDROID_JAVA__")
|
||||||
myModule.add_export_flag_CC(['-D__ANDROID_JAVA__'])
|
#myModule.add_export_flag_CC(['-D__ANDROID_JAVA__'])
|
||||||
myModule.add_module_depend(['ewol'])
|
#myModule.add_module_depend(['ewol'])
|
||||||
else:
|
else:
|
||||||
debug.warning("unknow target for AIRTAudio : " + target.name);
|
debug.warning("unknow target for AIRTAudio : " + target.name);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user