[DEV] correct jack and pulse output

This commit is contained in:
Edouard DUPIN 2015-02-08 15:09:39 +01:00
parent 028279a74f
commit f4471f25e8
9 changed files with 71 additions and 72 deletions

View File

@ -185,7 +185,7 @@ bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device,
} }
if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) { if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) {
// Allocate necessary internal buffers. // Allocate necessary internal buffers.
uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * formatBytes(m_stream.userFormat); uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat);
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory."); ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory.");

View File

@ -475,7 +475,7 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
} }
// Allocate necessary internal buffers // Allocate necessary internal buffers
uint64_t bufferBytes; uint64_t bufferBytes;
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory."); ATA_ERROR("error allocating user buffer memory.");
@ -483,10 +483,10 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
} }
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true; bool makeBuffer = true;
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
if (_mode == airtaudio::mode_input) { if (_mode == airtaudio::mode_input) {
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
if (bufferBytes <= bytesOut) { if (bufferBytes <= bytesOut) {
makeBuffer = false; makeBuffer = false;
} }
@ -743,7 +743,7 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
nChannels = m_stream.nDeviceChannels[0] + m_stream.nDeviceChannels[1]; nChannels = m_stream.nDeviceChannels[0] + m_stream.nDeviceChannels[1];
if ( m_stream.mode == airtaudio::mode_output if ( m_stream.mode == airtaudio::mode_output
|| m_stream.mode == airtaudio::mode_duplex) { || m_stream.mode == airtaudio::mode_duplex) {
bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[0]); bufferBytes = m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[0]);
if (handle->drainCounter > 1) { // write zeros to the output stream if (handle->drainCounter > 1) { // write zeros to the output stream
for (i=0, j=0; i<nChannels; i++) { for (i=0, j=0; i<nChannels; i++) {
if (handle->bufferInfos[i].isInput != ASIOTrue) { if (handle->bufferInfos[i].isInput != ASIOTrue) {
@ -785,7 +785,7 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
} }
if ( m_stream.mode == airtaudio::mode_input if ( m_stream.mode == airtaudio::mode_input
|| m_stream.mode == airtaudio::mode_duplex) { || m_stream.mode == airtaudio::mode_duplex) {
bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[1]); bufferBytes = m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[1]);
if (m_stream.doConvertBuffer[1]) { if (m_stream.doConvertBuffer[1]) {
// Always interleave ASIO input data. // Always interleave ASIO input data.
for (i=0, j=0; i<nChannels; i++) { for (i=0, j=0; i<nChannels; i++) {

View File

@ -822,7 +822,7 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
handle->id[modeToIdTable(_mode)] = id; handle->id[modeToIdTable(_mode)] = id;
// Allocate necessary internal buffers. // Allocate necessary internal buffers.
uint64_t bufferBytes; uint64_t bufferBytes;
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
// m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); // m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) malloc(bufferBytes * sizeof(char)); m_stream.userBuffer[modeToIdTable(_mode)] = (char *) malloc(bufferBytes * sizeof(char));
memset(m_stream.userBuffer[modeToIdTable(_mode)], 0, bufferBytes * sizeof(char)); memset(m_stream.userBuffer[modeToIdTable(_mode)], 0, bufferBytes * sizeof(char));
@ -836,11 +836,11 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
if ( m_stream.doConvertBuffer[modeToIdTable(_mode)] if ( m_stream.doConvertBuffer[modeToIdTable(_mode)]
&& handle->nStreams[modeToIdTable(_mode)] > 1) { && handle->nStreams[modeToIdTable(_mode)] > 1) {
bool makeBuffer = true; bool makeBuffer = true;
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
if (_mode == airtaudio::mode_input) { if (_mode == airtaudio::mode_input) {
if ( m_stream.mode == airtaudio::mode_output if ( m_stream.mode == airtaudio::mode_output
&& m_stream.deviceBuffer) { && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
if (bufferBytes <= bytesOut) { if (bufferBytes <= bytesOut) {
makeBuffer = false; makeBuffer = false;
} }

View File

@ -211,7 +211,7 @@ bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device,
} }
if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) { if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) {
// Allocate necessary internal buffers. // Allocate necessary internal buffers.
uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * formatBytes(m_stream.userFormat); uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat);
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory."); ATA_ERROR("error allocating user buffer memory.");

View File

@ -727,7 +727,7 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true; m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
} }
// Allocate necessary internal buffers // Allocate necessary internal buffers
long bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); long bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory."); ATA_ERROR("error allocating user buffer memory.");
@ -735,10 +735,10 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
} }
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true; bool makeBuffer = true;
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
if (_mode == airtaudio::mode_input) { if (_mode == airtaudio::mode_input) {
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
if (bufferBytes <= (long) bytesOut) { if (bufferBytes <= (long) bytesOut) {
makeBuffer = false; makeBuffer = false;
} }
@ -910,7 +910,7 @@ enum airtaudio::error airtaudio::api::Ds::startStream() {
m_duplexPrerollBytes = 0; m_duplexPrerollBytes = 0;
if (m_stream.mode == airtaudio::mode_duplex) { if (m_stream.mode == airtaudio::mode_duplex) {
// 0.5 seconds of silence in airtaudio::mode_duplex mode while the devices spin up and synchronize. // 0.5 seconds of silence in airtaudio::mode_duplex mode while the devices spin up and synchronize.
m_duplexPrerollBytes = (int) (0.5 * m_stream.sampleRate * formatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1]); m_duplexPrerollBytes = (int) (0.5 * m_stream.sampleRate * audio::getFormatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1]);
} }
HRESULT result = 0; HRESULT result = 0;
if ( m_stream.mode == airtaudio::mode_output if ( m_stream.mode == airtaudio::mode_output
@ -1168,7 +1168,7 @@ void airtaudio::api::Ds::callbackEvent() {
LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0]; LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
if (handle->drainCounter > 1) { // write zeros to the output stream if (handle->drainCounter > 1) { // write zeros to the output stream
bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0]; bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0];
bufferBytes *= formatBytes(m_stream.userFormat); bufferBytes *= audio::getFormatBytes(m_stream.userFormat);
memset(m_stream.userBuffer[0], 0, bufferBytes); memset(m_stream.userBuffer[0], 0, bufferBytes);
} }
// Setup parameters and do buffer conversion if necessary. // Setup parameters and do buffer conversion if necessary.
@ -1176,11 +1176,11 @@ void airtaudio::api::Ds::callbackEvent() {
buffer = m_stream.deviceBuffer; buffer = m_stream.deviceBuffer;
convertBuffer(buffer, m_stream.userBuffer[0], m_stream.convertInfo[0]); convertBuffer(buffer, m_stream.userBuffer[0], m_stream.convertInfo[0]);
bufferBytes = m_stream.bufferSize * m_stream.nDeviceChannels[0]; bufferBytes = m_stream.bufferSize * m_stream.nDeviceChannels[0];
bufferBytes *= formatBytes(m_stream.deviceFormat[0]); bufferBytes *= audio::getFormatBytes(m_stream.deviceFormat[0]);
} else { } else {
buffer = m_stream.userBuffer[0]; buffer = m_stream.userBuffer[0];
bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0]; bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0];
bufferBytes *= formatBytes(m_stream.userFormat); bufferBytes *= audio::getFormatBytes(m_stream.userFormat);
} }
// No byte swapping necessary in DirectSound implementation. // No byte swapping necessary in DirectSound implementation.
// Ahhh ... windoze. 16-bit data is signed but 8-bit data is // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
@ -1221,7 +1221,7 @@ void airtaudio::api::Ds::callbackEvent() {
// beyond the end of our next write region. We use the // beyond the end of our next write region. We use the
// Sleep() function to suspend operation until that happens. // Sleep() function to suspend operation until that happens.
double millis = (endWrite - leadPointer) * 1000.0; double millis = (endWrite - leadPointer) * 1000.0;
millis /= (formatBytes(m_stream.deviceFormat[0]) * m_stream.nDeviceChannels[0] * m_stream.sampleRate); millis /= (audio::getFormatBytes(m_stream.deviceFormat[0]) * m_stream.nDeviceChannels[0] * m_stream.sampleRate);
if (millis < 1.0) { if (millis < 1.0) {
millis = 1.0; millis = 1.0;
} }
@ -1274,11 +1274,11 @@ void airtaudio::api::Ds::callbackEvent() {
if (m_stream.doConvertBuffer[1]) { if (m_stream.doConvertBuffer[1]) {
buffer = m_stream.deviceBuffer; buffer = m_stream.deviceBuffer;
bufferBytes = m_stream.bufferSize * m_stream.nDeviceChannels[1]; bufferBytes = m_stream.bufferSize * m_stream.nDeviceChannels[1];
bufferBytes *= formatBytes(m_stream.deviceFormat[1]); bufferBytes *= audio::getFormatBytes(m_stream.deviceFormat[1]);
} else { } else {
buffer = m_stream.userBuffer[1]; buffer = m_stream.userBuffer[1];
bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[1]; bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[1];
bufferBytes *= formatBytes(m_stream.userFormat); bufferBytes *= audio::getFormatBytes(m_stream.userFormat);
} }
LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1]; LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
long nextReadPointer = handle->bufferPointer[1]; long nextReadPointer = handle->bufferPointer[1];
@ -1338,7 +1338,7 @@ void airtaudio::api::Ds::callbackEvent() {
&& m_stream.callbackInfo.isRunning) { && m_stream.callbackInfo.isRunning) {
// See comments for playback. // See comments for playback.
double millis = (endRead - safeReadPointer) * 1000.0; double millis = (endRead - safeReadPointer) * 1000.0;
millis /= (formatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1] * m_stream.sampleRate); millis /= (audio::getFormatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1] * m_stream.sampleRate);
if (millis < 1.0) { if (millis < 1.0) {
millis = 1.0; millis = 1.0;
} }

View File

@ -6,7 +6,7 @@
* @license like MIT (see license file) * @license like MIT (see license file)
*/ */
#if defined(__AIRTAUDIO_DUMMY__) #if defined(__DUMMY__)
#include <airtaudio/api/Dummy.h> #include <airtaudio/api/Dummy.h>
#include <airtaudio/debug.h> #include <airtaudio/debug.h>
@ -19,18 +19,16 @@ airtaudio::Api* airtaudio::api::Dummy::Create() {
airtaudio::api::Dummy::Dummy() { airtaudio::api::Dummy::Dummy() {
m_errorText = "This class provides no functionality."; ATA_WARNING("This class provides no functionality.");
error(airtaudio::error_warning);
} }
uint32_t airtaudio::api::Dummy::getDeviceCount() { uint32_t airtaudio::api::Dummy::getDeviceCount() {
return 0; return 0;
} }
rtaudio::DeviceInfo airtaudio::api::Dummy::getDeviceInfo(uint32_t _device) { airtaudio::DeviceInfo airtaudio::api::Dummy::getDeviceInfo(uint32_t _device) {
(void)_device; (void)_device;
rtaudio::DeviceInfo info; return airtaudio::DeviceInfo();
return info;
} }
enum airtaudio::error airtaudio::api::Dummy::closeStream() { enum airtaudio::error airtaudio::api::Dummy::closeStream() {

View File

@ -6,7 +6,7 @@
* @license like MIT (see license file) * @license like MIT (see license file)
*/ */
// must run before :
#if defined(__UNIX_JACK__) #if defined(__UNIX_JACK__)
#include <unistd.h> #include <unistd.h>
#include <limits.h> #include <limits.h>
@ -39,7 +39,8 @@ airtaudio::Api* airtaudio::api::Jack::Create() {
// and can be specified when the JACK server is started. In // and can be specified when the JACK server is started. In
// particular, // particular,
// //
// .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4 // jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
// jackd -r -d alsa -r 48000
// //
// specifies a sample rate of 44100 Hz, a buffer size of 512 sample // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
// frames, and number of buffers = 4. Once the server is running, it // frames, and number of buffers = 4. Once the server is running, it
@ -351,7 +352,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
} }
free(ports); free(ports);
// The jack server always uses 32-bit floating-point data. // The jack server always uses 32-bit floating-point data.
m_stream.deviceFormat[modeToIdTable(_mode)] = FLOAT32; m_stream.deviceFormat[modeToIdTable(_mode)] = audio::format_float;
m_stream.userFormat = _format; m_stream.userFormat = _format;
// Jack always uses non-interleaved buffers. // Jack always uses non-interleaved buffers.
m_stream.deviceInterleaved[modeToIdTable(_mode)] = false; m_stream.deviceInterleaved[modeToIdTable(_mode)] = false;
@ -385,7 +386,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
handle->deviceName[modeToIdTable(_mode)] = deviceName; handle->deviceName[modeToIdTable(_mode)] = deviceName;
// Allocate necessary internal buffers. // Allocate necessary internal buffers.
uint64_t bufferBytes; uint64_t bufferBytes;
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory."); ATA_ERROR("error allocating user buffer memory.");
@ -394,11 +395,11 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true; bool makeBuffer = true;
if (_mode == airtaudio::mode_output) { if (_mode == airtaudio::mode_output) {
bufferBytes = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); bufferBytes = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
} else { // _mode == airtaudio::mode_input } else { // _mode == airtaudio::mode_input
bufferBytes = m_stream.nDeviceChannels[1] * formatBytes(m_stream.deviceFormat[1]); bufferBytes = m_stream.nDeviceChannels[1] * audio::getFormatBytes(m_stream.deviceFormat[1]);
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
if (bufferBytes < bytesOut) { if (bufferBytes < bytesOut) {
makeBuffer = false; makeBuffer = false;
} }
@ -671,11 +672,11 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
double streamTime = getStreamTime(); double streamTime = getStreamTime();
enum airtaudio::status status = airtaudio::status_ok; enum airtaudio::status status = airtaudio::status_ok;
if (m_stream.mode != airtaudio::mode_input && handle->xrun[0] == true) { if (m_stream.mode != airtaudio::mode_input && handle->xrun[0] == true) {
status |= airtaudio::status_underflow; status = airtaudio::status_underflow;
handle->xrun[0] = false; handle->xrun[0] = false;
} }
if (m_stream.mode != airtaudio::mode_output && handle->xrun[1] == true) { if (m_stream.mode != airtaudio::mode_output && handle->xrun[1] == true) {
status |= airtaudio::mode_input_OVERFLOW; status = airtaudio::status_overflow;
handle->xrun[1] = false; handle->xrun[1] = false;
} }
int32_t cbReturnValue = info->callback(m_stream.userBuffer[0], int32_t cbReturnValue = info->callback(m_stream.userBuffer[0],

View File

@ -389,7 +389,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM. // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
// We'll check the actual value used near the end of the setup // We'll check the actual value used near the end of the setup
// procedure. // procedure.
int32_t ossBufferBytes = *_bufferSize * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels; int32_t ossBufferBytes = *_bufferSize * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels;
if (ossBufferBytes < 16) { if (ossBufferBytes < 16) {
ossBufferBytes = 16; ossBufferBytes = 16;
} }
@ -413,7 +413,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
} }
m_stream.nBuffers = buffers; m_stream.nBuffers = buffers;
// Save buffer size (in sample frames). // Save buffer size (in sample frames).
*_bufferSize = ossBufferBytes / (formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels); *_bufferSize = ossBufferBytes / (audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels);
m_stream.bufferSize = *_bufferSize; m_stream.bufferSize = *_bufferSize;
// Set the sample rate. // Set the sample rate.
int32_t srate = _sampleRate; int32_t srate = _sampleRate;
@ -465,7 +465,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
handle->id[modeToIdTable(_mode)] = fd; handle->id[modeToIdTable(_mode)] = fd;
// Allocate necessary internal buffers. // Allocate necessary internal buffers.
uint64_t bufferBytes; uint64_t bufferBytes;
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory."); ATA_ERROR("error allocating user buffer memory.");
@ -473,11 +473,11 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
} }
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true; bool makeBuffer = true;
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
if (_mode == airtaudio::mode_input) { if (_mode == airtaudio::mode_input) {
if ( m_stream._mode == airtaudio::mode_output if ( m_stream._mode == airtaudio::mode_output
&& m_stream.deviceBuffer) { && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
if (bufferBytes <= bytesOut) { if (bufferBytes <= bytesOut) {
makeBuffer = false; makeBuffer = false;
} }
@ -639,9 +639,9 @@ enum airtaudio::error airtaudio::api::Oss::stopStream() {
samples = m_stream.bufferSize * m_stream.nUserChannels[0]; samples = m_stream.bufferSize * m_stream.nUserChannels[0];
format = m_stream.userFormat; format = m_stream.userFormat;
} }
memset(buffer, 0, samples * formatBytes(format)); memset(buffer, 0, samples * audio::getFormatBytes(format));
for (uint32_t i=0; i<m_stream.nBuffers+1; i++) { for (uint32_t i=0; i<m_stream.nBuffers+1; i++) {
result = write(handle->id[0], buffer, samples * formatBytes(format)); result = write(handle->id[0], buffer, samples * audio::getFormatBytes(format));
if (result == -1) { if (result == -1) {
ATA_ERROR("audio write error."); ATA_ERROR("audio write error.");
return airtaudio::error_warning; return airtaudio::error_warning;
@ -778,13 +778,13 @@ void airtaudio::api::Oss::callbackEvent() {
&& handle->triggered == false) { && handle->triggered == false) {
int32_t trig = 0; int32_t trig = 0;
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig); ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
result = write(handle->id[0], buffer, samples * formatBytes(format)); result = write(handle->id[0], buffer, samples * audio::getFormatBytes(format));
trig = PCM_ENABLE_airtaudio::mode_input|PCM_ENABLE_airtaudio::mode_output; trig = PCM_ENABLE_airtaudio::mode_input|PCM_ENABLE_airtaudio::mode_output;
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig); ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
handle->triggered = true; handle->triggered = true;
} else { } else {
// Write samples to device. // Write samples to device.
result = write(handle->id[0], buffer, samples * formatBytes(format)); result = write(handle->id[0], buffer, samples * audio::getFormatBytes(format));
} }
if (result == -1) { if (result == -1) {
// We'll assume this is an underrun, though there isn't a // We'll assume this is an underrun, though there isn't a
@ -808,7 +808,7 @@ void airtaudio::api::Oss::callbackEvent() {
format = m_stream.userFormat; format = m_stream.userFormat;
} }
// Read samples from device. // Read samples from device.
result = read(handle->id[1], buffer, samples * formatBytes(format)); result = read(handle->id[1], buffer, samples * audio::getFormatBytes(format));
if (result == -1) { if (result == -1) {
// We'll assume this is an overrun, though there isn't a // We'll assume this is an overrun, though there isn't a
// specific means for determining that. // specific means for determining that.

View File

@ -40,7 +40,7 @@ static const uint32_t SUPPORTED_SAMPLERATES[] = {
}; };
struct rtaudio_pa_format_mapping_t { struct rtaudio_pa_format_mapping_t {
audio::format airtaudio_format; enum audio::format airtaudio_format;
pa_sample_format_t pa_format; pa_sample_format_t pa_format;
}; };
@ -48,7 +48,7 @@ static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
{audio::format_int16, PA_SAMPLE_S16LE}, {audio::format_int16, PA_SAMPLE_S16LE},
{audio::format_int32, PA_SAMPLE_S32LE}, {audio::format_int32, PA_SAMPLE_S32LE},
{audio::format_float, PA_SAMPLE_FLOAT32LE}, {audio::format_float, PA_SAMPLE_FLOAT32LE},
{0, PA_SAMPLE_INVALID}}; {audio::format_unknow, PA_SAMPLE_INVALID}};
struct PulseAudioHandle { struct PulseAudioHandle {
pa_simple *s_play; pa_simple *s_play;
@ -153,8 +153,8 @@ void airtaudio::api::Pulse::callbackEvent() {
} }
double streamTime = getStreamTime(); double streamTime = getStreamTime();
enum airtaudio::status status = airtaudio::status_ok; enum airtaudio::status status = airtaudio::status_ok;
int32_t doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::mode_output], int32_t doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)],
m_stream.userBuffer[airtaudio::mode_input], m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)],
m_stream.bufferSize, m_stream.bufferSize,
streamTime, streamTime,
status); status);
@ -163,8 +163,8 @@ void airtaudio::api::Pulse::callbackEvent() {
return; return;
} }
m_stream.mutex.lock(); m_stream.mutex.lock();
void *pulse_in = m_stream.doConvertBuffer[airtaudio::mode_input] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::mode_input]; void *pulse_in = m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)];
void *pulse_out = m_stream.doConvertBuffer[airtaudio::mode_output] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::mode_output]; void *pulse_out = m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)];
if (m_stream.state != airtaudio::state_running) { if (m_stream.state != airtaudio::state_running) {
goto unlock; goto unlock;
} }
@ -172,13 +172,13 @@ void airtaudio::api::Pulse::callbackEvent() {
size_t bytes; size_t bytes;
if ( m_stream.mode == airtaudio::mode_output if ( m_stream.mode == airtaudio::mode_output
|| m_stream.mode == airtaudio::mode_duplex) { || m_stream.mode == airtaudio::mode_duplex) {
if (m_stream.doConvertBuffer[airtaudio::mode_output]) { if (m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)]) {
convertBuffer(m_stream.deviceBuffer, convertBuffer(m_stream.deviceBuffer,
m_stream.userBuffer[airtaudio::mode_output], m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)],
m_stream.convertInfo[airtaudio::mode_output]); m_stream.convertInfo[airtaudio::modeToIdTable(airtaudio::mode_output)]);
bytes = m_stream.nDeviceChannels[airtaudio::mode_output] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[airtaudio::mode_output]); bytes = m_stream.nDeviceChannels[airtaudio::modeToIdTable(airtaudio::mode_output)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[airtaudio::modeToIdTable(airtaudio::mode_output)]);
} else { } else {
bytes = m_stream.nUserChannels[airtaudio::mode_output] * m_stream.bufferSize * formatBytes(m_stream.userFormat); bytes = m_stream.nUserChannels[airtaudio::modeToIdTable(airtaudio::mode_output)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat);
} }
if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0) { if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0) {
ATA_ERROR("audio write error, " << pa_strerror(pa_error) << "."); ATA_ERROR("audio write error, " << pa_strerror(pa_error) << ".");
@ -186,19 +186,19 @@ void airtaudio::api::Pulse::callbackEvent() {
} }
} }
if (m_stream.mode == airtaudio::mode_input || m_stream.mode == airtaudio::mode_duplex) { if (m_stream.mode == airtaudio::mode_input || m_stream.mode == airtaudio::mode_duplex) {
if (m_stream.doConvertBuffer[airtaudio::mode_input]) { if (m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)]) {
bytes = m_stream.nDeviceChannels[airtaudio::mode_input] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[airtaudio::mode_input]); bytes = m_stream.nDeviceChannels[airtaudio::modeToIdTable(airtaudio::mode_input)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[airtaudio::modeToIdTable(airtaudio::mode_input)]);
} else { } else {
bytes = m_stream.nUserChannels[airtaudio::mode_input] * m_stream.bufferSize * formatBytes(m_stream.userFormat); bytes = m_stream.nUserChannels[airtaudio::modeToIdTable(airtaudio::mode_input)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat);
} }
if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0) { if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0) {
ATA_ERROR("audio read error, " << pa_strerror(pa_error) << "."); ATA_ERROR("audio read error, " << pa_strerror(pa_error) << ".");
return; return;
} }
if (m_stream.doConvertBuffer[airtaudio::mode_input]) { if (m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)]) {
convertBuffer(m_stream.userBuffer[airtaudio::mode_input], convertBuffer(m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)],
m_stream.deviceBuffer, m_stream.deviceBuffer,
m_stream.convertInfo[airtaudio::mode_input]); m_stream.convertInfo[airtaudio::modeToIdTable(airtaudio::mode_input)]);
} }
} }
unlock: unlock:
@ -341,7 +341,7 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel; m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel;
m_stream.channelOffset[modeToIdTable(_mode)] = 0; m_stream.channelOffset[modeToIdTable(_mode)] = 0;
// Allocate necessary internal buffers. // Allocate necessary internal buffers.
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat); bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) { if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory."); ATA_ERROR("error allocating user buffer memory.");
@ -350,10 +350,10 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
m_stream.bufferSize = *_bufferSize; m_stream.bufferSize = *_bufferSize;
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) { if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true; bool makeBuffer = true;
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]); bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
if (_mode == airtaudio::mode_input) { if (_mode == airtaudio::mode_input) {
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) { if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]); uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
if (bufferBytes <= bytesOut) makeBuffer = false; if (bufferBytes <= bytesOut) makeBuffer = false;
} }
} }