[DEV] correct jack and pulse output
This commit is contained in:
parent
028279a74f
commit
f4471f25e8
@ -185,7 +185,7 @@ bool airtaudio::api::Android::probeDeviceOpen(uint32_t _device,
|
||||
}
|
||||
if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) {
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||
uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||
ATA_ERROR("airtaudio::api::Android::probeDeviceOpen: error allocating user buffer memory.");
|
||||
|
@ -475,7 +475,7 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
||||
}
|
||||
// Allocate necessary internal buffers
|
||||
uint64_t bufferBytes;
|
||||
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||
ATA_ERROR("error allocating user buffer memory.");
|
||||
@ -483,10 +483,10 @@ bool airtaudio::api::Asio::probeDeviceOpen(uint32_t _device,
|
||||
}
|
||||
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
bool makeBuffer = true;
|
||||
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||
if (_mode == airtaudio::mode_input) {
|
||||
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
|
||||
if (bufferBytes <= bytesOut) {
|
||||
makeBuffer = false;
|
||||
}
|
||||
@ -743,7 +743,7 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
|
||||
nChannels = m_stream.nDeviceChannels[0] + m_stream.nDeviceChannels[1];
|
||||
if ( m_stream.mode == airtaudio::mode_output
|
||||
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||
bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[0]);
|
||||
bufferBytes = m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[0]);
|
||||
if (handle->drainCounter > 1) { // write zeros to the output stream
|
||||
for (i=0, j=0; i<nChannels; i++) {
|
||||
if (handle->bufferInfos[i].isInput != ASIOTrue) {
|
||||
@ -785,7 +785,7 @@ bool airtaudio::api::Asio::callbackEvent(long bufferIndex) {
|
||||
}
|
||||
if ( m_stream.mode == airtaudio::mode_input
|
||||
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||
bufferBytes = m_stream.bufferSize * formatBytes(m_stream.deviceFormat[1]);
|
||||
bufferBytes = m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[1]);
|
||||
if (m_stream.doConvertBuffer[1]) {
|
||||
// Always interleave ASIO input data.
|
||||
for (i=0, j=0; i<nChannels; i++) {
|
||||
|
@ -822,7 +822,7 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
||||
handle->id[modeToIdTable(_mode)] = id;
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes;
|
||||
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||
// m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) malloc(bufferBytes * sizeof(char));
|
||||
memset(m_stream.userBuffer[modeToIdTable(_mode)], 0, bufferBytes * sizeof(char));
|
||||
@ -836,11 +836,11 @@ bool airtaudio::api::Core::probeDeviceOpen(uint32_t _device,
|
||||
if ( m_stream.doConvertBuffer[modeToIdTable(_mode)]
|
||||
&& handle->nStreams[modeToIdTable(_mode)] > 1) {
|
||||
bool makeBuffer = true;
|
||||
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||
if (_mode == airtaudio::mode_input) {
|
||||
if ( m_stream.mode == airtaudio::mode_output
|
||||
&& m_stream.deviceBuffer) {
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
|
||||
if (bufferBytes <= bytesOut) {
|
||||
makeBuffer = false;
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ bool airtaudio::api::CoreIos::probeDeviceOpen(uint32_t _device,
|
||||
}
|
||||
if (m_stream.doConvertBuffer[modeToIdTable(_mode)] == true) {
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||
uint64_t bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||
ATA_ERROR("error allocating user buffer memory.");
|
||||
|
@ -727,7 +727,7 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
|
||||
m_stream.doConvertBuffer[modeToIdTable(_mode)] = true;
|
||||
}
|
||||
// Allocate necessary internal buffers
|
||||
long bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||
long bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||
ATA_ERROR("error allocating user buffer memory.");
|
||||
@ -735,10 +735,10 @@ bool airtaudio::api::Ds::probeDeviceOpen(uint32_t _device,
|
||||
}
|
||||
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
bool makeBuffer = true;
|
||||
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||
if (_mode == airtaudio::mode_input) {
|
||||
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
|
||||
if (bufferBytes <= (long) bytesOut) {
|
||||
makeBuffer = false;
|
||||
}
|
||||
@ -910,7 +910,7 @@ enum airtaudio::error airtaudio::api::Ds::startStream() {
|
||||
m_duplexPrerollBytes = 0;
|
||||
if (m_stream.mode == airtaudio::mode_duplex) {
|
||||
// 0.5 seconds of silence in airtaudio::mode_duplex mode while the devices spin up and synchronize.
|
||||
m_duplexPrerollBytes = (int) (0.5 * m_stream.sampleRate * formatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1]);
|
||||
m_duplexPrerollBytes = (int) (0.5 * m_stream.sampleRate * audio::getFormatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1]);
|
||||
}
|
||||
HRESULT result = 0;
|
||||
if ( m_stream.mode == airtaudio::mode_output
|
||||
@ -1168,7 +1168,7 @@ void airtaudio::api::Ds::callbackEvent() {
|
||||
LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
|
||||
if (handle->drainCounter > 1) { // write zeros to the output stream
|
||||
bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0];
|
||||
bufferBytes *= formatBytes(m_stream.userFormat);
|
||||
bufferBytes *= audio::getFormatBytes(m_stream.userFormat);
|
||||
memset(m_stream.userBuffer[0], 0, bufferBytes);
|
||||
}
|
||||
// Setup parameters and do buffer conversion if necessary.
|
||||
@ -1176,11 +1176,11 @@ void airtaudio::api::Ds::callbackEvent() {
|
||||
buffer = m_stream.deviceBuffer;
|
||||
convertBuffer(buffer, m_stream.userBuffer[0], m_stream.convertInfo[0]);
|
||||
bufferBytes = m_stream.bufferSize * m_stream.nDeviceChannels[0];
|
||||
bufferBytes *= formatBytes(m_stream.deviceFormat[0]);
|
||||
bufferBytes *= audio::getFormatBytes(m_stream.deviceFormat[0]);
|
||||
} else {
|
||||
buffer = m_stream.userBuffer[0];
|
||||
bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[0];
|
||||
bufferBytes *= formatBytes(m_stream.userFormat);
|
||||
bufferBytes *= audio::getFormatBytes(m_stream.userFormat);
|
||||
}
|
||||
// No byte swapping necessary in DirectSound implementation.
|
||||
// Ahhh ... windoze. 16-bit data is signed but 8-bit data is
|
||||
@ -1221,7 +1221,7 @@ void airtaudio::api::Ds::callbackEvent() {
|
||||
// beyond the end of our next write region. We use the
|
||||
// Sleep() function to suspend operation until that happens.
|
||||
double millis = (endWrite - leadPointer) * 1000.0;
|
||||
millis /= (formatBytes(m_stream.deviceFormat[0]) * m_stream.nDeviceChannels[0] * m_stream.sampleRate);
|
||||
millis /= (audio::getFormatBytes(m_stream.deviceFormat[0]) * m_stream.nDeviceChannels[0] * m_stream.sampleRate);
|
||||
if (millis < 1.0) {
|
||||
millis = 1.0;
|
||||
}
|
||||
@ -1274,11 +1274,11 @@ void airtaudio::api::Ds::callbackEvent() {
|
||||
if (m_stream.doConvertBuffer[1]) {
|
||||
buffer = m_stream.deviceBuffer;
|
||||
bufferBytes = m_stream.bufferSize * m_stream.nDeviceChannels[1];
|
||||
bufferBytes *= formatBytes(m_stream.deviceFormat[1]);
|
||||
bufferBytes *= audio::getFormatBytes(m_stream.deviceFormat[1]);
|
||||
} else {
|
||||
buffer = m_stream.userBuffer[1];
|
||||
bufferBytes = m_stream.bufferSize * m_stream.nUserChannels[1];
|
||||
bufferBytes *= formatBytes(m_stream.userFormat);
|
||||
bufferBytes *= audio::getFormatBytes(m_stream.userFormat);
|
||||
}
|
||||
LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
|
||||
long nextReadPointer = handle->bufferPointer[1];
|
||||
@ -1338,7 +1338,7 @@ void airtaudio::api::Ds::callbackEvent() {
|
||||
&& m_stream.callbackInfo.isRunning) {
|
||||
// See comments for playback.
|
||||
double millis = (endRead - safeReadPointer) * 1000.0;
|
||||
millis /= (formatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1] * m_stream.sampleRate);
|
||||
millis /= (audio::getFormatBytes(m_stream.deviceFormat[1]) * m_stream.nDeviceChannels[1] * m_stream.sampleRate);
|
||||
if (millis < 1.0) {
|
||||
millis = 1.0;
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
#if defined(__AIRTAUDIO_DUMMY__)
|
||||
#if defined(__DUMMY__)
|
||||
#include <airtaudio/api/Dummy.h>
|
||||
#include <airtaudio/debug.h>
|
||||
|
||||
@ -19,18 +19,16 @@ airtaudio::Api* airtaudio::api::Dummy::Create() {
|
||||
|
||||
|
||||
airtaudio::api::Dummy::Dummy() {
|
||||
m_errorText = "This class provides no functionality.";
|
||||
error(airtaudio::error_warning);
|
||||
ATA_WARNING("This class provides no functionality.");
|
||||
}
|
||||
|
||||
uint32_t airtaudio::api::Dummy::getDeviceCount() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
rtaudio::DeviceInfo airtaudio::api::Dummy::getDeviceInfo(uint32_t _device) {
|
||||
airtaudio::DeviceInfo airtaudio::api::Dummy::getDeviceInfo(uint32_t _device) {
|
||||
(void)_device;
|
||||
rtaudio::DeviceInfo info;
|
||||
return info;
|
||||
return airtaudio::DeviceInfo();
|
||||
}
|
||||
|
||||
enum airtaudio::error airtaudio::api::Dummy::closeStream() {
|
||||
|
@ -6,7 +6,7 @@
|
||||
* @license like MIT (see license file)
|
||||
*/
|
||||
|
||||
|
||||
// must run before :
|
||||
#if defined(__UNIX_JACK__)
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
@ -29,26 +29,27 @@ airtaudio::Api* airtaudio::api::Jack::Create() {
|
||||
// well as allowing them to share audio between themselves.
|
||||
//
|
||||
// When using JACK with RtAudio, "devices" refer to JACK clients that
|
||||
// have ports connected to the server. The JACK server is typically
|
||||
// have ports connected to the server. The JACK server is typically
|
||||
// started in a terminal as follows:
|
||||
//
|
||||
// .jackd -d alsa -d hw:0
|
||||
//
|
||||
// or through an interface program such as qjackctl. Many of the
|
||||
// or through an interface program such as qjackctl. Many of the
|
||||
// parameters normally set for a stream are fixed by the JACK server
|
||||
// and can be specified when the JACK server is started. In
|
||||
// and can be specified when the JACK server is started. In
|
||||
// particular,
|
||||
//
|
||||
// .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
|
||||
// jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
|
||||
// jackd -r -d alsa -r 48000
|
||||
//
|
||||
// specifies a sample rate of 44100 Hz, a buffer size of 512 sample
|
||||
// frames, and number of buffers = 4. Once the server is running, it
|
||||
// is not possible to override these values. If the values are not
|
||||
// frames, and number of buffers = 4. Once the server is running, it
|
||||
// is not possible to override these values. If the values are not
|
||||
// specified in the command-line, the JACK server uses default values.
|
||||
//
|
||||
// The JACK server does not have to be running when an instance of
|
||||
// RtApiJack is created, though the function getDeviceCount() will
|
||||
// report 0 devices found until JACK has been started. When no
|
||||
// report 0 devices found until JACK has been started. When no
|
||||
// devices are available (i.e., the JACK server is not running), a
|
||||
// stream cannot be opened.
|
||||
|
||||
@ -351,7 +352,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
||||
}
|
||||
free(ports);
|
||||
// The jack server always uses 32-bit floating-point data.
|
||||
m_stream.deviceFormat[modeToIdTable(_mode)] = FLOAT32;
|
||||
m_stream.deviceFormat[modeToIdTable(_mode)] = audio::format_float;
|
||||
m_stream.userFormat = _format;
|
||||
// Jack always uses non-interleaved buffers.
|
||||
m_stream.deviceInterleaved[modeToIdTable(_mode)] = false;
|
||||
@ -385,7 +386,7 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
||||
handle->deviceName[modeToIdTable(_mode)] = deviceName;
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes;
|
||||
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||
ATA_ERROR("error allocating user buffer memory.");
|
||||
@ -394,11 +395,11 @@ bool airtaudio::api::Jack::probeDeviceOpen(uint32_t _device,
|
||||
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
bool makeBuffer = true;
|
||||
if (_mode == airtaudio::mode_output) {
|
||||
bufferBytes = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
bufferBytes = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
|
||||
} else { // _mode == airtaudio::mode_input
|
||||
bufferBytes = m_stream.nDeviceChannels[1] * formatBytes(m_stream.deviceFormat[1]);
|
||||
bufferBytes = m_stream.nDeviceChannels[1] * audio::getFormatBytes(m_stream.deviceFormat[1]);
|
||||
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
|
||||
if (bufferBytes < bytesOut) {
|
||||
makeBuffer = false;
|
||||
}
|
||||
@ -671,11 +672,11 @@ bool airtaudio::api::Jack::callbackEvent(uint64_t _nframes) {
|
||||
double streamTime = getStreamTime();
|
||||
enum airtaudio::status status = airtaudio::status_ok;
|
||||
if (m_stream.mode != airtaudio::mode_input && handle->xrun[0] == true) {
|
||||
status |= airtaudio::status_underflow;
|
||||
status = airtaudio::status_underflow;
|
||||
handle->xrun[0] = false;
|
||||
}
|
||||
if (m_stream.mode != airtaudio::mode_output && handle->xrun[1] == true) {
|
||||
status |= airtaudio::mode_input_OVERFLOW;
|
||||
status = airtaudio::status_overflow;
|
||||
handle->xrun[1] = false;
|
||||
}
|
||||
int32_t cbReturnValue = info->callback(m_stream.userBuffer[0],
|
||||
|
@ -389,7 +389,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
||||
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
|
||||
// We'll check the actual value used near the end of the setup
|
||||
// procedure.
|
||||
int32_t ossBufferBytes = *_bufferSize * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels;
|
||||
int32_t ossBufferBytes = *_bufferSize * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels;
|
||||
if (ossBufferBytes < 16) {
|
||||
ossBufferBytes = 16;
|
||||
}
|
||||
@ -413,7 +413,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
||||
}
|
||||
m_stream.nBuffers = buffers;
|
||||
// Save buffer size (in sample frames).
|
||||
*_bufferSize = ossBufferBytes / (formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels);
|
||||
*_bufferSize = ossBufferBytes / (audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]) * deviceChannels);
|
||||
m_stream.bufferSize = *_bufferSize;
|
||||
// Set the sample rate.
|
||||
int32_t srate = _sampleRate;
|
||||
@ -465,7 +465,7 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
||||
handle->id[modeToIdTable(_mode)] = fd;
|
||||
// Allocate necessary internal buffers.
|
||||
uint64_t bufferBytes;
|
||||
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||
ATA_ERROR("error allocating user buffer memory.");
|
||||
@ -473,11 +473,11 @@ bool airtaudio::api::Oss::probeDeviceOpen(uint32_t _device,
|
||||
}
|
||||
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
bool makeBuffer = true;
|
||||
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||
if (_mode == airtaudio::mode_input) {
|
||||
if ( m_stream._mode == airtaudio::mode_output
|
||||
&& m_stream.deviceBuffer) {
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
|
||||
if (bufferBytes <= bytesOut) {
|
||||
makeBuffer = false;
|
||||
}
|
||||
@ -639,9 +639,9 @@ enum airtaudio::error airtaudio::api::Oss::stopStream() {
|
||||
samples = m_stream.bufferSize * m_stream.nUserChannels[0];
|
||||
format = m_stream.userFormat;
|
||||
}
|
||||
memset(buffer, 0, samples * formatBytes(format));
|
||||
memset(buffer, 0, samples * audio::getFormatBytes(format));
|
||||
for (uint32_t i=0; i<m_stream.nBuffers+1; i++) {
|
||||
result = write(handle->id[0], buffer, samples * formatBytes(format));
|
||||
result = write(handle->id[0], buffer, samples * audio::getFormatBytes(format));
|
||||
if (result == -1) {
|
||||
ATA_ERROR("audio write error.");
|
||||
return airtaudio::error_warning;
|
||||
@ -778,13 +778,13 @@ void airtaudio::api::Oss::callbackEvent() {
|
||||
&& handle->triggered == false) {
|
||||
int32_t trig = 0;
|
||||
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
|
||||
result = write(handle->id[0], buffer, samples * formatBytes(format));
|
||||
result = write(handle->id[0], buffer, samples * audio::getFormatBytes(format));
|
||||
trig = PCM_ENABLE_airtaudio::mode_input|PCM_ENABLE_airtaudio::mode_output;
|
||||
ioctl(handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
|
||||
handle->triggered = true;
|
||||
} else {
|
||||
// Write samples to device.
|
||||
result = write(handle->id[0], buffer, samples * formatBytes(format));
|
||||
result = write(handle->id[0], buffer, samples * audio::getFormatBytes(format));
|
||||
}
|
||||
if (result == -1) {
|
||||
// We'll assume this is an underrun, though there isn't a
|
||||
@ -808,7 +808,7 @@ void airtaudio::api::Oss::callbackEvent() {
|
||||
format = m_stream.userFormat;
|
||||
}
|
||||
// Read samples from device.
|
||||
result = read(handle->id[1], buffer, samples * formatBytes(format));
|
||||
result = read(handle->id[1], buffer, samples * audio::getFormatBytes(format));
|
||||
if (result == -1) {
|
||||
// We'll assume this is an overrun, though there isn't a
|
||||
// specific means for determining that.
|
||||
|
@ -40,7 +40,7 @@ static const uint32_t SUPPORTED_SAMPLERATES[] = {
|
||||
};
|
||||
|
||||
struct rtaudio_pa_format_mapping_t {
|
||||
audio::format airtaudio_format;
|
||||
enum audio::format airtaudio_format;
|
||||
pa_sample_format_t pa_format;
|
||||
};
|
||||
|
||||
@ -48,7 +48,7 @@ static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
|
||||
{audio::format_int16, PA_SAMPLE_S16LE},
|
||||
{audio::format_int32, PA_SAMPLE_S32LE},
|
||||
{audio::format_float, PA_SAMPLE_FLOAT32LE},
|
||||
{0, PA_SAMPLE_INVALID}};
|
||||
{audio::format_unknow, PA_SAMPLE_INVALID}};
|
||||
|
||||
struct PulseAudioHandle {
|
||||
pa_simple *s_play;
|
||||
@ -153,8 +153,8 @@ void airtaudio::api::Pulse::callbackEvent() {
|
||||
}
|
||||
double streamTime = getStreamTime();
|
||||
enum airtaudio::status status = airtaudio::status_ok;
|
||||
int32_t doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::mode_output],
|
||||
m_stream.userBuffer[airtaudio::mode_input],
|
||||
int32_t doStopStream = m_stream.callbackInfo.callback(m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)],
|
||||
m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)],
|
||||
m_stream.bufferSize,
|
||||
streamTime,
|
||||
status);
|
||||
@ -163,8 +163,8 @@ void airtaudio::api::Pulse::callbackEvent() {
|
||||
return;
|
||||
}
|
||||
m_stream.mutex.lock();
|
||||
void *pulse_in = m_stream.doConvertBuffer[airtaudio::mode_input] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::mode_input];
|
||||
void *pulse_out = m_stream.doConvertBuffer[airtaudio::mode_output] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::mode_output];
|
||||
void *pulse_in = m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)];
|
||||
void *pulse_out = m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)] ? m_stream.deviceBuffer : m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)];
|
||||
if (m_stream.state != airtaudio::state_running) {
|
||||
goto unlock;
|
||||
}
|
||||
@ -172,13 +172,13 @@ void airtaudio::api::Pulse::callbackEvent() {
|
||||
size_t bytes;
|
||||
if ( m_stream.mode == airtaudio::mode_output
|
||||
|| m_stream.mode == airtaudio::mode_duplex) {
|
||||
if (m_stream.doConvertBuffer[airtaudio::mode_output]) {
|
||||
if (m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)]) {
|
||||
convertBuffer(m_stream.deviceBuffer,
|
||||
m_stream.userBuffer[airtaudio::mode_output],
|
||||
m_stream.convertInfo[airtaudio::mode_output]);
|
||||
bytes = m_stream.nDeviceChannels[airtaudio::mode_output] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[airtaudio::mode_output]);
|
||||
m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_output)],
|
||||
m_stream.convertInfo[airtaudio::modeToIdTable(airtaudio::mode_output)]);
|
||||
bytes = m_stream.nDeviceChannels[airtaudio::modeToIdTable(airtaudio::mode_output)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[airtaudio::modeToIdTable(airtaudio::mode_output)]);
|
||||
} else {
|
||||
bytes = m_stream.nUserChannels[airtaudio::mode_output] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||
bytes = m_stream.nUserChannels[airtaudio::modeToIdTable(airtaudio::mode_output)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||
}
|
||||
if (pa_simple_write(pah->s_play, pulse_out, bytes, &pa_error) < 0) {
|
||||
ATA_ERROR("audio write error, " << pa_strerror(pa_error) << ".");
|
||||
@ -186,19 +186,19 @@ void airtaudio::api::Pulse::callbackEvent() {
|
||||
}
|
||||
}
|
||||
if (m_stream.mode == airtaudio::mode_input || m_stream.mode == airtaudio::mode_duplex) {
|
||||
if (m_stream.doConvertBuffer[airtaudio::mode_input]) {
|
||||
bytes = m_stream.nDeviceChannels[airtaudio::mode_input] * m_stream.bufferSize * formatBytes(m_stream.deviceFormat[airtaudio::mode_input]);
|
||||
if (m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)]) {
|
||||
bytes = m_stream.nDeviceChannels[airtaudio::modeToIdTable(airtaudio::mode_input)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.deviceFormat[airtaudio::modeToIdTable(airtaudio::mode_input)]);
|
||||
} else {
|
||||
bytes = m_stream.nUserChannels[airtaudio::mode_input] * m_stream.bufferSize * formatBytes(m_stream.userFormat);
|
||||
bytes = m_stream.nUserChannels[airtaudio::modeToIdTable(airtaudio::mode_input)] * m_stream.bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||
}
|
||||
if (pa_simple_read(pah->s_rec, pulse_in, bytes, &pa_error) < 0) {
|
||||
ATA_ERROR("audio read error, " << pa_strerror(pa_error) << ".");
|
||||
return;
|
||||
}
|
||||
if (m_stream.doConvertBuffer[airtaudio::mode_input]) {
|
||||
convertBuffer(m_stream.userBuffer[airtaudio::mode_input],
|
||||
if (m_stream.doConvertBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)]) {
|
||||
convertBuffer(m_stream.userBuffer[airtaudio::modeToIdTable(airtaudio::mode_input)],
|
||||
m_stream.deviceBuffer,
|
||||
m_stream.convertInfo[airtaudio::mode_input]);
|
||||
m_stream.convertInfo[airtaudio::modeToIdTable(airtaudio::mode_input)]);
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
@ -341,7 +341,7 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
||||
m_stream.nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel;
|
||||
m_stream.channelOffset[modeToIdTable(_mode)] = 0;
|
||||
// Allocate necessary internal buffers.
|
||||
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * formatBytes(m_stream.userFormat);
|
||||
bufferBytes = m_stream.nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_stream.userFormat);
|
||||
m_stream.userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
|
||||
if (m_stream.userBuffer[modeToIdTable(_mode)] == nullptr) {
|
||||
ATA_ERROR("error allocating user buffer memory.");
|
||||
@ -350,10 +350,10 @@ bool airtaudio::api::Pulse::probeDeviceOpen(uint32_t _device,
|
||||
m_stream.bufferSize = *_bufferSize;
|
||||
if (m_stream.doConvertBuffer[modeToIdTable(_mode)]) {
|
||||
bool makeBuffer = true;
|
||||
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * formatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||
bufferBytes = m_stream.nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_stream.deviceFormat[modeToIdTable(_mode)]);
|
||||
if (_mode == airtaudio::mode_input) {
|
||||
if (m_stream.mode == airtaudio::mode_output && m_stream.deviceBuffer) {
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * formatBytes(m_stream.deviceFormat[0]);
|
||||
uint64_t bytesOut = m_stream.nDeviceChannels[0] * audio::getFormatBytes(m_stream.deviceFormat[0]);
|
||||
if (bufferBytes <= bytesOut) makeBuffer = false;
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user