[DEV] rework Airtaudio in audioo::orchestra

This commit is contained in:
2015-04-10 22:06:17 +02:00
parent d4c53a53bf
commit f5c3affccb
72 changed files with 2805 additions and 2732 deletions

1389
audio/orchestra/api/Alsa.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,77 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_ALSA_H__) && defined(ORCHESTRA_BUILD_ALSA)
#define __AUDIO_ORCHESTRA_API_ALSA_H__
namespace audio {
namespace orchestra {
namespace api {
class AlsaPrivate;
class Alsa: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Alsa();
virtual ~Alsa();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_alsa;
}
uint32_t getDeviceCount();
private:
bool getNamedDeviceInfoLocal(const std::string& _deviceName,
audio::orchestra::DeviceInfo& _info,
int32_t _cardId=-1, // Alsa card ID
int32_t _subdevice=-1, // alsa subdevice ID
int32_t _localDeviceId=-1); // local ID of device fined
public:
bool getNamedDeviceInfo(const std::string& _deviceName, audio::orchestra::DeviceInfo& _info) {
return getNamedDeviceInfoLocal(_deviceName, _info);
}
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
void callbackEventOneCycle();
private:
static void alsaCallbackEvent(void* _userData);
private:
std11::shared_ptr<AlsaPrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual bool probeDeviceOpenName(const std::string& _deviceName,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
virtual std11::chrono::system_clock::time_point getStreamTime();
public:
bool isMasterOf(audio::orchestra::Api* _api);
};
}
}
}
#endif

View File

@@ -0,0 +1,210 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifdef ORCHESTRA_BUILD_JAVA
#include <ewol/context/Context.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <limits.h>
#undef __class__
#define __class__ "api::Android"
audio::orchestra::Api* audio::orchestra::api::Android::Create() {
ATA_INFO("Create Android device ... ");
return new audio::orchestra::api::Android();
}
audio::orchestra::api::Android::Android() {
ATA_INFO("new Android");
// On android, we set a static device ...
ATA_INFO("get context");
ewol::Context& tmpContext = ewol::getContext();
ATA_INFO("done p=" << (int64_t)&tmpContext);
int32_t deviceCount = tmpContext.audioGetDeviceCount();
ATA_ERROR("Get count devices : " << deviceCount);
for (int32_t iii=0; iii<deviceCount; ++iii) {
std::string property = tmpContext.audioGetDeviceProperty(iii);
ATA_ERROR("Get devices property : " << property);
std::vector<std::string> listProperty = etk::split(property, ':');
audio::orchestra::DeviceInfo tmp;
tmp.name = listProperty[0];
std::vector<std::string> listFreq = etk::split(listProperty[2], ',');
for(size_t fff=0; fff<listFreq.size(); ++fff) {
tmp.sampleRates.push_back(etk::string_to_int32_t(listFreq[fff]));
}
tmp.outputChannels = 0;
tmp.inputChannels = 0;
tmp.duplexChannels = 0;
if (listProperty[1] == "out") {
tmp.isDefaultOutput = true;
tmp.isDefaultInput = false;
tmp.outputChannels = etk::string_to_int32_t(listProperty[3]);
} else if (listProperty[1] == "in") {
tmp.isDefaultOutput = false;
tmp.isDefaultInput = true;
tmp.inputChannels = etk::string_to_int32_t(listProperty[3]);
} else {
/* duplex */
tmp.isDefaultOutput = true;
tmp.isDefaultInput = true;
tmp.duplexChannels = etk::string_to_int32_t(listProperty[3]);
}
tmp.nativeFormats = audio::getListFormatFromString(listProperty[4]);
m_devices.push_back(tmp);
}
ATA_INFO("Create Android interface (end)");
}
audio::orchestra::api::Android::~Android() {
ATA_INFO("Destroy Android interface");
}
uint32_t audio::orchestra::api::Android::getDeviceCount() {
//ATA_INFO("Get device count:"<< m_devices.size());
return m_devices.size();
}
audio::orchestra::DeviceInfo audio::orchestra::api::Android::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
return m_devices[_device];
}
enum audio::orchestra::error audio::orchestra::api::Android::closeStream() {
ATA_INFO("Clese Stream");
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Android::startStream() {
ATA_INFO("Start Stream");
// TODO : Check return ...
audio::orchestra::Api::startStream();
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Android::stopStream() {
ATA_INFO("Stop stream");
ewol::Context& tmpContext = ewol::getContext();
tmpContext.audioCloseDevice(0);
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Android::abortStream() {
ATA_INFO("Abort Stream");
ewol::Context& tmpContext = ewol::getContext();
tmpContext.audioCloseDevice(0);
// Can not close the stream now...
return audio::orchestra::error_none;
}
void audio::orchestra::api::Android::callBackEvent(void* _data,
int32_t _frameRate) {
int32_t doStopStream = 0;
std11::chrono::system_clock::time_point streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[audio::orchestra::mode_output] == true) {
doStopStream = m_callback(nullptr,
std11::chrono::system_clock::time_point(),
m_userBuffer[audio::orchestra::mode_output],
streamTime,
_frameRate,
status);
convertBuffer((char*)_data, (char*)m_userBuffer[audio::orchestra::mode_output], m_convertInfo[audio::orchestra::mode_output]);
} else {
doStopStream = m_callback(_data,
streamTime,
nullptr,
std11::chrono::system_clock::time_point(),
_frameRate,
status);
}
if (doStopStream == 2) {
abortStream();
return;
}
audio::orchestra::Api::tickStreamTime();
}
void audio::orchestra::api::Android::androidCallBackEvent(void* _data,
int32_t _frameRate,
void* _userData) {
if (_userData == nullptr) {
ATA_INFO("callback event ... nullptr pointer");
return;
}
audio::orchestra::api::Android* myClass = static_cast<audio::orchestra::api::Android*>(_userData);
myClass->callBackEvent(_data, _frameRate/2);
}
bool audio::orchestra::api::Android::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
if (_mode != audio::orchestra::mode_output) {
ATA_ERROR("Can not start a device input or duplex for Android ...");
return false;
}
m_userFormat = _format;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
ewol::Context& tmpContext = ewol::getContext();
bool ret = false;
if (_format == SINT8) {
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 0, androidCallBackEvent, this);
} else {
ret = tmpContext.audioOpenDevice(_device, _sampleRate, _channels, 1, androidCallBackEvent, this);
}
m_bufferSize = 256;
m_sampleRate = _sampleRate;
m_doByteSwap[modeToIdTable(_mode)] = false; // for endienness ...
// TODO : For now, we write it in hard ==> to bu update later ...
m_deviceFormat[modeToIdTable(_mode)] = SINT16;
m_nDeviceChannels[modeToIdTable(_mode)] = 2;
m_deviceInterleaved[modeToIdTable(_mode)] = true;
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_doConvertBuffer[modeToIdTable(_mode)] == true) {
// Allocate necessary internal buffers.
uint64_t bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("audio::orchestra::api::Android::probeDeviceOpen: error allocating user buffer memory.");
}
setConvertInfo(_mode, _firstChannel);
}
ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(_mode)] << " user format : " << m_userFormat);
ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(_mode)]);
ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(_mode)]);
if (ret == false) {
ATA_ERROR("Can not open device.");
}
return ret;
}
#endif

View File

@@ -0,0 +1,56 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_ANDROID_H__) && defined(ORCHESTRA_BUILD_JAVA)
#define __AUDIO_ORCHESTRA_API_ANDROID_H__
namespace audio {
namespace orchestra {
namespace api {
class Android: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Android();
virtual ~Android();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_java;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
private:
void callBackEvent(void* _data,
int32_t _frameRate);
static void androidCallBackEvent(void* _data,
int32_t _frameRate,
void* _userData);
};
}
}
}
#endif

View File

@@ -0,0 +1,923 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_ASIO)
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
audio::orchestra::Api* audio::orchestra::api::Asio::Create() {
return new audio::orchestra::api::Asio();
}
// The ASIO API is designed around a callback scheme, so this
// implementation is similar to that used for OS-X CoreAudio and Linux
// Jack. The primary constraint with ASIO is that it only allows
// access to a single driver at a time. Thus, it is not possible to
// have more than one simultaneous RtAudio stream.
//
// This implementation also requires a number of external ASIO files
// and a few global variables. The ASIO callback scheme does not
// allow for the passing of user data, so we must create a global
// pointer to our callbackInfo structure.
//
// On unix systems, we make use of a pthread condition variable.
// Since there is no equivalent in Windows, I hacked something based
// on information found in
// http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
#include "asiosys.h"
#include "asio.h"
#include "iasiothiscallresolver.h"
#include "asiodrivers.h"
#include <cmath>
#undef __class__
#define __class__ "api::Asio"
static AsioDrivers drivers;
static ASIOCallbacks asioCallbacks;
static ASIODriverInfo driverInfo;
static CallbackInfo *asioCallbackInfo;
static bool asioXRun;
namespace audio {
namespace orchestra {
namespace api {
class AsioPrivate {
public:
int32_t drainCounter; // Tracks callback counts when draining
bool internalDrain; // Indicates if stop is initiated from callback or not.
ASIOBufferInfo *bufferInfos;
HANDLE condition;
AsioPrivate() :
drainCounter(0),
internalDrain(false),
bufferInfos(0) {
}
};
}
}
}
// Function declarations (definitions at end of section)
static const char* getAsioErrorString(ASIOError _result);
static void sampleRateChanged(ASIOSampleRate _sRate);
static long asioMessages(long _selector, long _value, void* _message, double* _opt);
audio::orchestra::api::Asio::Asio() :
m_private(new audio::orchestra::api::AsioPrivate()) {
// ASIO cannot run on a multi-threaded appartment. You can call
// CoInitialize beforehand, but it must be for appartment threading
// (in which case, CoInitilialize will return S_FALSE here).
m_coInitialized = false;
HRESULT hr = CoInitialize(nullptr);
if (FAILED(hr)) {
ATA_ERROR("requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)");
}
m_coInitialized = true;
drivers.removeCurrentDriver();
driverInfo.asioVersion = 2;
// See note in DirectSound implementation about GetDesktopWindow().
driverInfo.sysRef = GetForegroundWindow();
}
audio::orchestra::api::Asio::~Asio() {
if (m_state != audio::orchestra::state_closed) {
closeStream();
}
if (m_coInitialized) {
CoUninitialize();
}
}
uint32_t audio::orchestra::api::Asio::getDeviceCount() {
return (uint32_t) drivers.asioGetNumDev();
}
rtaudio::DeviceInfo audio::orchestra::api::Asio::getDeviceInfo(uint32_t _device) {
rtaudio::DeviceInfo info;
info.probed = false;
// Get device ID
uint32_t nDevices = getDeviceCount();
if (nDevices == 0) {
ATA_ERROR("no devices found!");
return info;
}
if (_device >= nDevices) {
ATA_ERROR("device ID is invalid!");
return info;
}
// If a stream is already open, we cannot probe other devices. Thus, use the saved results.
if (m_state != audio::orchestra::state_closed) {
if (_device >= m_devices.size()) {
ATA_ERROR("device ID was not present before stream was opened.");
return info;
}
return m_devices[ _device ];
}
char driverName[32];
ASIOError result = drivers.asioGetDriverName((int) _device, driverName, 32);
if (result != ASE_OK) {
ATA_ERROR("unable to get driver name (" << getAsioErrorString(result) << ").");
return info;
}
info.name = driverName;
if (!drivers.loadDriver(driverName)) {
ATA_ERROR("unable to load driver (" << driverName << ").");
return info;
}
result = ASIOInit(&driverInfo);
if (result != ASE_OK) {
ATA_ERROR("error (" << getAsioErrorString(result) << ") initializing driver (" << driverName << ").");
return info;
}
// Determine the device channel information.
long inputChannels, outputChannels;
result = ASIOGetChannels(&inputChannels, &outputChannels);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ").");
return info;
}
info.outputChannels = outputChannels;
info.inputChannels = inputChannels;
if (info.outputChannels > 0 && info.inputChannels > 0) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
// Determine the supported sample rates.
info.sampleRates.clear();
for (uint32_t i=0; i<MAX_SAMPLE_RATES; i++) {
result = ASIOCanSampleRate((ASIOSampleRate) SAMPLE_RATES[i]);
if (result == ASE_OK) {
info.sampleRates.push_back(SAMPLE_RATES[i]);
}
}
// Determine supported data types ... just check first channel and assume rest are the same.
ASIOChannelInfo channelInfo;
channelInfo.channel = 0;
channelInfo.isInput = true;
if (info.inputChannels <= 0) {
channelInfo.isInput = false;
}
result = ASIOGetChannelInfo(&channelInfo);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("error (" << getAsioErrorString(result) << ") getting driver channel info (" << driverName << ").");
return info;
}
info.nativeFormats.clear();
if ( channelInfo.type == ASIOSTInt16MSB
|| channelInfo.type == ASIOSTInt16LSB) {
info.nativeFormats.push_back(audio::format_int16);
} else if ( channelInfo.type == ASIOSTInt32MSB
|| channelInfo.type == ASIOSTInt32LSB) {
info.nativeFormats.push_back(audio::format_int32);
} else if ( channelInfo.type == ASIOSTFloat32MSB
|| channelInfo.type == ASIOSTFloat32LSB) {
info.nativeFormats.push_back(audio::format_float);
} else if ( channelInfo.type == ASIOSTFloat64MSB
|| channelInfo.type == ASIOSTFloat64LSB) {
info.nativeFormats.push_back(audio::format_double);
} else if ( channelInfo.type == ASIOSTInt24MSB
|| channelInfo.type == ASIOSTInt24LSB) {
info.nativeFormats.push_back(audio::format_int24);
}
if (info.outputChannels > 0){
if (getDefaultOutputDevice() == _device) {
info.isDefaultOutput = true;
}
}
if (info.inputChannels > 0) {
if (getDefaultInputDevice() == _device) {
info.isDefaultInput = true;
}
}
info.probed = true;
drivers.removeCurrentDriver();
return info;
}
static void bufferSwitch(long _index, ASIOBool _processNow) {
RtApiAsio* object = (RtApiAsio*)asioCallbackInfo->object;
object->callbackEvent(_index);
}
void audio::orchestra::api::Asio::saveDeviceInfo() {
m_devices.clear();
uint32_t nDevices = getDeviceCount();
m_devices.resize(nDevices);
for (uint32_t i=0; i<nDevices; i++) {
m_devices[i] = getDeviceInfo(i);
}
}
bool audio::orchestra::api::Asio::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
// For ASIO, a duplex stream MUST use the same driver.
if ( _mode == audio::orchestra::mode_input
&& m_mode == audio::orchestra::mode_output
&& m_device[0] != _device) {
ATA_ERROR("an ASIO duplex stream must use the same device for input and output!");
return false;
}
char driverName[32];
ASIOError result = drivers.asioGetDriverName((int) _device, driverName, 32);
if (result != ASE_OK) {
ATA_ERROR("unable to get driver name (" << getAsioErrorString(result) << ").");
return false;
}
// Only load the driver once for duplex stream.
if ( _mode != audio::orchestra::mode_input
|| m_mode != audio::orchestra::mode_output) {
// The getDeviceInfo() function will not work when a stream is open
// because ASIO does not allow multiple devices to run at the same
// time. Thus, we'll probe the system before opening a stream and
// save the results for use by getDeviceInfo().
this->saveDeviceInfo();
if (!drivers.loadDriver(driverName)) {
ATA_ERROR("unable to load driver (" << driverName << ").");
return false;
}
result = ASIOInit(&driverInfo);
if (result != ASE_OK) {
ATA_ERROR("error (" << getAsioErrorString(result) << ") initializing driver (" << driverName << ").");
return false;
}
}
// Check the device channel count.
long inputChannels, outputChannels;
result = ASIOGetChannels(&inputChannels, &outputChannels);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("error (" << getAsioErrorString(result) << ") getting channel count (" << driverName << ").");
return false;
}
if ( ( _mode == audio::orchestra::mode_output
&& (_channels+_firstChannel) > (uint32_t) outputChannels)
|| ( _mode == audio::orchestra::mode_input
&& (_channels+_firstChannel) > (uint32_t) inputChannels)) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") does not support requested channel count (" << _channels << ") + offset (" << _firstChannel << ").");
return false;
}
m_nDeviceChannels[modeToIdTable(_mode)] = _channels;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
m_channelOffset[modeToIdTable(_mode)] = _firstChannel;
// Verify the sample rate is supported.
result = ASIOCanSampleRate((ASIOSampleRate) _sampleRate);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") does not support requested sample rate (" << _sampleRate << ").");
return false;
}
// Get the current sample rate
ASIOSampleRate currentRate;
result = ASIOGetSampleRate(&currentRate);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") error getting sample rate.");
return false;
}
// Set the sample rate only if necessary
if (currentRate != _sampleRate) {
result = ASIOSetSampleRate((ASIOSampleRate) _sampleRate);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") error setting sample rate (" << _sampleRate << ").");
return false;
}
}
// Determine the driver data type.
ASIOChannelInfo channelInfo;
channelInfo.channel = 0;
if (_mode == audio::orchestra::mode_output) {
channelInfo.isInput = false;
} else {
channelInfo.isInput = true;
}
result = ASIOGetChannelInfo(&channelInfo);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting data format.");
return false;
}
// Assuming WINDOWS host is always little-endian.
m_doByteSwap[modeToIdTable(_mode)] = false;
m_userFormat = _format;
m_deviceFormat[modeToIdTable(_mode)] = 0;
if ( channelInfo.type == ASIOSTInt16MSB
|| channelInfo.type == ASIOSTInt16LSB) {
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
if (channelInfo.type == ASIOSTInt16MSB) {
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if ( channelInfo.type == ASIOSTInt32MSB
|| channelInfo.type == ASIOSTInt32LSB) {
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
if (channelInfo.type == ASIOSTInt32MSB) {
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if ( channelInfo.type == ASIOSTFloat32MSB
|| channelInfo.type == ASIOSTFloat32LSB) {
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT32;
if (channelInfo.type == ASIOSTFloat32MSB) {
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if ( channelInfo.type == ASIOSTFloat64MSB
|| channelInfo.type == ASIOSTFloat64LSB) {
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_FLOAT64;
if (channelInfo.type == ASIOSTFloat64MSB) {
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if ( channelInfo.type == ASIOSTInt24MSB
|| channelInfo.type == ASIOSTInt24LSB) {
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
if (channelInfo.type == ASIOSTInt24MSB) {
m_doByteSwap[modeToIdTable(_mode)] = true;
}
}
if (m_deviceFormat[modeToIdTable(_mode)] == 0) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") data format not supported by RtAudio.");
return false;
}
// Set the buffer size. For a duplex stream, this will end up
// setting the buffer size based on the input constraints, which
// should be ok.
long minSize, maxSize, preferSize, granularity;
result = ASIOGetBufferSize(&minSize, &maxSize, &preferSize, &granularity);
if (result != ASE_OK) {
drivers.removeCurrentDriver();
ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting buffer size.");
return false;
}
if (*_bufferSize < (uint32_t) minSize) {
*_bufferSize = (uint32_t) minSize;
} else if (*_bufferSize > (uint32_t) maxSize) {
*_bufferSize = (uint32_t) maxSize;
} else if (granularity == -1) {
// Make sure bufferSize is a power of two.
int32_t log2_of_min_size = 0;
int32_t log2_of_max_size = 0;
for (uint32_t i = 0; i < sizeof(long) * 8; i++) {
if (minSize & ((long)1 << i)) {
log2_of_min_size = i;
}
if (maxSize & ((long)1 << i)) {
log2_of_max_size = i;
}
}
long min_delta = std::abs((long)*_bufferSize - ((long)1 << log2_of_min_size));
int32_t min_delta_num = log2_of_min_size;
for (int32_t i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
long current_delta = std::abs((long)*_bufferSize - ((long)1 << i));
if (current_delta < min_delta) {
min_delta = current_delta;
min_delta_num = i;
}
}
*_bufferSize = ((uint32_t)1 << min_delta_num);
if (*_bufferSize < (uint32_t) {
minSize) *_bufferSize = (uint32_t) minSize;
} else if (*_bufferSize > (uint32_t) maxSize) {
*_bufferSize = (uint32_t) maxSize;
}
} else if (granularity != 0) {
// Set to an even multiple of granularity, rounding up.
*_bufferSize = (*_bufferSize + granularity-1) / granularity * granularity;
}
if ( _mode == audio::orchestra::mode_input
&& m_mode == audio::orchestra::mode_output
&& m_bufferSize != *_bufferSize) {
drivers.removeCurrentDriver();
ATA_ERROR("input/output buffersize discrepancy!");
return false;
}
m_bufferSize = *_bufferSize;
m_nBuffers = 2;
// ASIO always uses non-interleaved buffers.
m_deviceInterleaved[modeToIdTable(_mode)] = false;
m_private->bufferInfos = 0;
// Create a manual-reset event.
m_private->condition = CreateEvent(nullptr, // no security
TRUE, // manual-reset
FALSE, // non-signaled initially
nullptr); // unnamed
// Create the ASIO internal buffers. Since RtAudio sets up input
// and output separately, we'll have to dispose of previously
// created output buffers for a duplex stream.
long inputLatency, outputLatency;
if ( _mode == audio::orchestra::mode_input
&& m_mode == audio::orchestra::mode_output) {
ASIODisposeBuffers();
if (m_private->bufferInfos == nullptr) {
free(m_private->bufferInfos);
m_private->bufferInfos = nullptr;
}
}
// Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
bool buffersAllocated = false;
uint32_t i, nChannels = m_nDeviceChannels[0] + m_nDeviceChannels[1];
m_private->bufferInfos = (ASIOBufferInfo *) malloc(nChannels * sizeof(ASIOBufferInfo));
if (m_private->bufferInfos == nullptr) {
ATA_ERROR("error allocating bufferInfo memory for driver (" << driverName << ").");
goto error;
}
ASIOBufferInfo *infos;
infos = m_private->bufferInfos;
for (i=0; i<m_nDeviceChannels[0]; i++, infos++) {
infos->isInput = ASIOFalse;
infos->channelNum = i + m_channelOffset[0];
infos->buffers[0] = infos->buffers[1] = 0;
}
for (i=0; i<m_nDeviceChannels[1]; i++, infos++) {
infos->isInput = ASIOTrue;
infos->channelNum = i + m_channelOffset[1];
infos->buffers[0] = infos->buffers[1] = 0;
}
// Set up the ASIO callback structure and create the ASIO data buffers.
asioCallbacks.bufferSwitch = &bufferSwitch;
asioCallbacks.sampleRateDidChange = &sampleRateChanged;
asioCallbacks.asioMessage = &asioMessages;
asioCallbacks.bufferSwitchTimeInfo = nullptr;
result = ASIOCreateBuffers(m_private->bufferInfos, nChannels, m_bufferSize, &asioCallbacks);
if (result != ASE_OK) {
ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") creating buffers.");
goto error;
}
buffersAllocated = true;
// Set flags for buffer conversion.
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
// Allocate necessary internal buffers
uint64_t bufferBytes;
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
if (_mode == audio::orchestra::mode_input) {
if (m_mode == audio::orchestra::mode_output && m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes <= bytesOut) {
makeBuffer = false;
}
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
}
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
m_sampleRate = _sampleRate;
m_device[modeToIdTable(_mode)] = _device;
m_state = audio::orchestra::state_stopped;
if ( _mode == audio::orchestra::mode_output
&& _mode == audio::orchestra::mode_input) {
// We had already set up an output stream.
m_mode = audio::orchestra::mode_duplex;
} else {
m_mode = _mode;
}
// Determine device latencies
result = ASIOGetLatencies(&inputLatency, &outputLatency);
if (result != ASE_OK) {
ATA_ERROR("driver (" << driverName << ") error (" << getAsioErrorString(result) << ") getting latency.");
} else {
m_latency[0] = outputLatency;
m_latency[1] = inputLatency;
}
// Setup the buffer conversion information structure. We don't use
// buffers to do channel offsets, so we override that parameter
// here.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, 0);
}
return true;
error:
if (buffersAllocated) {
ASIODisposeBuffers();
}
drivers.removeCurrentDriver();
CloseHandle(m_private->condition);
if (m_private->bufferInfos != nullptr) {
free(m_private->bufferInfos);
m_private->bufferInfos = nullptr;
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
free(m_userBuffer[i]);
m_userBuffer[i] = 0;
}
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
return false;
}
enum audio::orchestra::error audio::orchestra::api::Asio::closeStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
if (m_state == audio::orchestra::state_running) {
m_state = audio::orchestra::state_stopped;
ASIOStop();
}
ASIODisposeBuffers();
drivers.removeCurrentDriver();
CloseHandle(m_private->condition);
if (m_private->bufferInfos) {
free(m_private->bufferInfos);
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
free(m_userBuffer[i]);
m_userBuffer[i] = 0;
}
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
return audio::orchestra::error_none;
}
bool stopThreadCalled = false;
enum audio::orchestra::error audio::orchestra::api::Asio::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
ASIOError result = ASIOStart();
if (result != ASE_OK) {
ATA_ERROR("error (" << getAsioErrorString(result) << ") starting device.");
goto unlock;
}
m_private->drainCounter = 0;
m_private->internalDrain = false;
ResetEvent(m_private->condition);
m_state = audio::orchestra::state_running;
asioXRun = false;
unlock:
stopThreadCalled = false;
if (result == ASE_OK) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
enum audio::orchestra::error audio::orchestra::api::Asio::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
if (m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) {
if (m_private->drainCounter == 0) {
m_private->drainCounter = 2;
WaitForSingleObject(m_private->condition, INFINITE); // block until signaled
}
}
m_state = audio::orchestra::state_stopped;
ASIOError result = ASIOStop();
if (result != ASE_OK) {
ATA_ERROR("error (" << getAsioErrorString(result) << ") stopping device.");
}
if (result == ASE_OK) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
enum audio::orchestra::error audio::orchestra::api::Asio::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
error(audio::orchestra::error_warning);
return;
}
// The following lines were commented-out because some behavior was
// noted where the device buffers need to be zeroed to avoid
// continuing sound, even when the device buffers are completely
// disposed. So now, calling abort is the same as calling stop.
// handle->drainCounter = 2;
return stopStream();
}
// This function will be called by a spawned thread when the user
// callback function signals that the stream should be stopped or
// aborted. It is necessary to handle it this way because the
// callbackEvent() function must return before the ASIOStop()
// function will return.
static unsigned __stdcall asioStopStream(void *_ptr) {
CallbackInfo* info = (CallbackInfo*)_ptr;
RtApiAsio* object = (RtApiAsio*)info->object;
object->stopStream();
_endthreadex(0);
return 0;
}
bool audio::orchestra::api::Asio::callbackEvent(long bufferIndex) {
if ( m_state == audio::orchestra::state_stopped
|| m_state == audio::orchestra::state_stopping) {
return true;
}
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return false;
}
CallbackInfo *info = (CallbackInfo *) &m_callbackInfo;
// Check if we were draining the stream and signal if finished.
if (m_private->drainCounter > 3) {
m_state = audio::orchestra::state_stopping;
if (m_private->internalDrain == false) {
SetEvent(m_private->condition);
} else { // spawn a thread to stop the stream
unsigned threadId;
m_callbackInfo.thread = _beginthreadex(nullptr,
0,
&asioStopStream,
&m_callbackInfo,
0,
&threadId);
}
return true;
}
// Invoke user callback to get fresh output data UNLESS we are
// draining stream.
if (m_private->drainCounter == 0) {
std11::chrono::system_clock::time_point streamTime = getStreamTime();
std::vector<enum audio::orchestra::status status;
if (m_mode != audio::orchestra::mode_input && asioXRun == true) {
status.push_back(audio::orchestra::status_underflow);
asioXRun = false;
}
if (m_mode != audio::orchestra::mode_output && asioXRun == true) {
status.push_back(audio::orchestra::status_underflow;
asioXRun = false;
}
int32_t cbReturnValue = info->callback(m_userBuffer[1],
streamTime,
m_userBuffer[0],
streamTime,
m_bufferSize,
status);
if (cbReturnValue == 2) {
m_state = audio::orchestra::state_stopping;
m_private->drainCounter = 2;
unsigned threadId;
m_callbackInfo.thread = _beginthreadex(nullptr,
0,
&asioStopStream,
&m_callbackInfo,
0,
&threadId);
return true;
} else if (cbReturnValue == 1) {
m_private->drainCounter = 1;
m_private->internalDrain = true;
}
}
uint32_t nChannels, bufferBytes, i, j;
nChannels = m_nDeviceChannels[0] + m_nDeviceChannels[1];
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
bufferBytes = m_bufferSize * audio::getFormatBytes(m_deviceFormat[0]);
if (m_private->drainCounter > 1) { // write zeros to the output stream
for (i=0, j=0; i<nChannels; i++) {
if (m_private->bufferInfos[i].isInput != ASIOTrue) {
memset(m_private->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes);
}
}
} else if (m_doConvertBuffer[0]) {
convertBuffer(m_deviceBuffer, m_userBuffer[0], m_convertInfo[0]);
if (m_doByteSwap[0]) {
byteSwapBuffer(m_deviceBuffer,
m_bufferSize * m_nDeviceChannels[0],
m_deviceFormat[0]);
}
for (i=0, j=0; i<nChannels; i++) {
if (m_private->bufferInfos[i].isInput != ASIOTrue) {
memcpy(m_private->bufferInfos[i].buffers[bufferIndex],
&m_deviceBuffer[j++*bufferBytes],
bufferBytes);
}
}
} else {
if (m_doByteSwap[0]) {
byteSwapBuffer(m_userBuffer[0],
m_bufferSize * m_nUserChannels[0],
m_userFormat);
}
for (i=0, j=0; i<nChannels; i++) {
if (m_private->bufferInfos[i].isInput != ASIOTrue) {
memcpy(m_private->bufferInfos[i].buffers[bufferIndex],
&m_userBuffer[0][bufferBytes*j++],
bufferBytes);
}
}
}
if (m_private->drainCounter) {
m_private->drainCounter++;
goto unlock;
}
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
bufferBytes = m_bufferSize * audio::getFormatBytes(m_deviceFormat[1]);
if (m_doConvertBuffer[1]) {
// Always interleave ASIO input data.
for (i=0, j=0; i<nChannels; i++) {
if (m_private->bufferInfos[i].isInput == ASIOTrue) {
memcpy(&m_deviceBuffer[j++*bufferBytes],
m_private->bufferInfos[i].buffers[bufferIndex],
bufferBytes);
}
}
if (m_doByteSwap[1]) {
byteSwapBuffer(m_deviceBuffer,
m_bufferSize * m_nDeviceChannels[1],
m_deviceFormat[1]);
}
convertBuffer(m_userBuffer[1],
m_deviceBuffer,
m_convertInfo[1]);
} else {
for (i=0, j=0; i<nChannels; i++) {
if (m_private->bufferInfos[i].isInput == ASIOTrue) {
memcpy(&m_userBuffer[1][bufferBytes*j++],
m_private->bufferInfos[i].buffers[bufferIndex],
bufferBytes);
}
}
if (m_doByteSwap[1]) {
byteSwapBuffer(m_userBuffer[1],
m_bufferSize * m_nUserChannels[1],
m_userFormat);
}
}
}
unlock:
// The following call was suggested by Malte Clasen. While the API
// documentation indicates it should not be required, some device
// drivers apparently do not function correctly without it.
ASIOOutputReady();
audio::orchestra::Api::tickStreamTime();
return true;
}
static void sampleRateChanged(ASIOSampleRate _sRate) {
// The ASIO documentation says that this usually only happens during
// external sync. Audio processing is not stopped by the driver,
// actual sample rate might not have even changed, maybe only the
// sample rate status of an AES/EBU or S/PDIF digital input at the
// audio device.
RtApi* object = (RtApi*)asioCallbackInfo->object;
enum audio::orchestra::error ret = object->stopStream()
if (ret != audio::orchestra::error_none) {
ATA_ERROR("error stop stream!");
} else {
ATA_ERROR("driver reports sample rate changed to " << _sRate << " ... stream stopped!!!");
}
}
static long asioMessages(long _selector, long _value, void* _message, double* _opt) {
long ret = 0;
switch(_selector) {
case kAsioSelectorSupported:
if ( _value == kAsioResetRequest
|| _value == kAsioEngineVersion
|| _value == kAsioResyncRequest
|| _value == kAsioLatenciesChanged
// The following three were added for ASIO 2.0, you don't
// necessarily have to support them.
|| _value == kAsioSupportsTimeInfo
|| _value == kAsioSupportsTimeCode
|| _value == kAsioSupportsInputMonitor) {
ret = 1L;
}
break;
case kAsioResetRequest:
// Defer the task and perform the reset of the driver during the
// next "safe" situation. You cannot reset the driver right now,
// as this code is called from the driver. Reset the driver is
// done by completely destruct is. I.e. ASIOStop(),
// ASIODisposeBuffers(), Destruction Afterwards you initialize the
// driver again.
ATA_ERROR("driver reset requested!!!");
ret = 1L;
break;
case kAsioResyncRequest:
// This informs the application that the driver encountered some
// non-fatal data loss. It is used for synchronization purposes
// of different media. Added mainly to work around the Win16Mutex
// problems in Windows 95/98 with the Windows Multimedia system,
// which could lose data because the Mutex was held too long by
// another thread. However a driver can issue it in other
// situations, too.
// ATA_ERROR("driver resync requested!!!");
asioXRun = true;
ret = 1L;
break;
case kAsioLatenciesChanged:
// This will inform the host application that the drivers were
// latencies changed. Beware, it this does not mean that the
// buffer sizes have changed! You might need to update internal
// delay data.
ATA_ERROR("driver latency may have changed!!!");
ret = 1L;
break;
case kAsioEngineVersion:
// Return the supported ASIO version of the host application. If
// a host application does not implement this selector, ASIO 1.0
// is assumed by the driver.
ret = 2L;
break;
case kAsioSupportsTimeInfo:
// Informs the driver whether the
// asioCallbacks.bufferSwitchTimeInfo() callback is supported.
// For compatibility with ASIO 1.0 drivers the host application
// should always support the "old" bufferSwitch method, too.
ret = 0;
break;
case kAsioSupportsTimeCode:
// Informs the driver whether application is interested in time
// code info. If an application does not need to know about time
// code, the driver has less work to do.
ret = 0;
break;
}
return ret;
}
static const char* getAsioErrorString(ASIOError _result) {
struct Messages {
ASIOError value;
const char*message;
};
static const Messages m[] = {
{ ASE_NotPresent, "Hardware input or output is not present or available." },
{ ASE_HWMalfunction, "Hardware is malfunctioning." },
{ ASE_InvalidParameter, "Invalid input parameter." },
{ ASE_InvalidMode, "Invalid mode." },
{ ASE_SPNotAdvancing, "Sample position not advancing." },
{ ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
{ ASE_NoMemory, "Not enough memory to complete the request." }
};
for (uint32_t i = 0; i < sizeof(m)/sizeof(m[0]); ++i) {
if (m[i].value == result) {
return m[i].message;
}
}
return "Unknown error.";
}
#endif

View File

@@ -0,0 +1,54 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_ASIO_H__) && defined(ORCHESTRA_BUILD_ASIO)
#define __AUDIO_ORCHESTRA_API_ASIO_H__
namespace audio {
namespace orchestra {
namespace api {
class AsioPrivate:
class Asio: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Asio();
virtual ~Asio();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::WINDOWS_ASIO;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(long _bufferIndex);
private:
std::shared_ptr<AsioPrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool m_coInitialized;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

1292
audio/orchestra/api/Core.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_CORE_H__) && defined(ORCHESTRA_BUILD_MACOSX_CORE)
#define __AUDIO_ORCHESTRA_API_CORE_H__
#include <CoreAudio/AudioHardware.h>
namespace audio {
namespace orchestra {
namespace api {
class CorePrivate;
class Core: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Core();
virtual ~Core();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_coreOSX;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
uint32_t getDefaultOutputDevice();
uint32_t getDefaultInputDevice();
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
bool callbackEvent(AudioDeviceID _deviceId,
const AudioBufferList *_inBufferList,
const std11::chrono::system_clock::time_point& _inTime,
const AudioBufferList *_outBufferList,
const std11::chrono::system_clock::time_point& _outTime);
static OSStatus callbackEvent(AudioDeviceID _inDevice,
const AudioTimeStamp* _inNow,
const AudioBufferList* _inInputData,
const AudioTimeStamp* _inInputTime,
AudioBufferList* _outOutputData,
const AudioTimeStamp* _inOutputTime,
void* _infoPointer);
static void coreStopStream(void *_userData);
private:
std::shared_ptr<CorePrivate> m_private;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
static const char* getErrorCode(OSStatus _code);
static OSStatus xrunListener(AudioObjectID _inDevice,
uint32_t _nAddresses,
const AudioObjectPropertyAddress _properties[],
void* _userData);
};
}
}
}
#endif

View File

@@ -0,0 +1,57 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_CORE_IOS_H__) && defined(ORCHESTRA_BUILD_IOS_CORE)
#define __AUDIO_ORCHESTRA_API_CORE_IOS_H__
namespace audio {
namespace orchestra {
namespace api {
class CoreIosPrivate;
class CoreIos: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
CoreIos();
virtual ~CoreIos();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_coreIOS;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
public:
void callBackEvent(void* _data,
int32_t _frameRate);
public:
std11::shared_ptr<CoreIosPrivate> m_private;
};
}
}
}
#endif

View File

@@ -0,0 +1,312 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#ifdef ORCHESTRA_BUILD_IOS_CORE
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#include <unistd.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <limits.h>
#undef __class__
#define __class__ "api::CoreIos"
audio::orchestra::Api* audio::orchestra::api::CoreIos::Create(void) {
ATA_INFO("Create CoreIos device ... ");
return new audio::orchestra::api::CoreIos();
}
#define kOutputBus 0
#define kInputBus 1
namespace audio {
namespace orchestra {
namespace api {
class CoreIosPrivate {
public:
AudioComponentInstance audioUnit;
};
}
}
}
audio::orchestra::api::CoreIos::CoreIos(void) :
m_private(new audio::orchestra::api::CoreIosPrivate()) {
ATA_INFO("new CoreIos");
int32_t deviceCount = 2;
ATA_ERROR("Get count devices : " << 2);
audio::orchestra::DeviceInfo tmp;
// Add default output format :
tmp.name = "out";
tmp.sampleRates.push_back(48000);
tmp.outputChannels = 2;
tmp.inputChannels = 0;
tmp.duplexChannels = 0;
tmp.isDefaultOutput = true;
tmp.isDefaultInput = false;
tmp.nativeFormats.push_back(audio::format_int16);
m_devices.push_back(tmp);
// add default input format:
tmp.name = "in";
tmp.sampleRates.push_back(48000);
tmp.outputChannels = 0;
tmp.inputChannels = 2;
tmp.duplexChannels = 0;
tmp.isDefaultOutput = false;
tmp.isDefaultInput = true;
tmp.nativeFormats.push_back(audio::format_int16);
m_devices.push_back(tmp);
ATA_INFO("Create CoreIOs interface (end)");
}
audio::orchestra::api::CoreIos::~CoreIos(void) {
ATA_INFO("Destroy CoreIOs interface");
AudioUnitUninitialize(m_private->audioUnit);
}
uint32_t audio::orchestra::api::CoreIos::getDeviceCount(void) {
//ATA_INFO("Get device count:"<< m_devices.size());
return m_devices.size();
}
audio::orchestra::DeviceInfo audio::orchestra::api::CoreIos::getDeviceInfo(uint32_t _device) {
//ATA_INFO("Get device info ...");
return m_devices[_device];
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::closeStream(void) {
ATA_INFO("Close Stream");
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::startStream(void) {
ATA_INFO("Start Stream");
// TODO : Check return ...
audio::orchestra::Api::startStream();
OSStatus status = AudioOutputUnitStart(m_private->audioUnit);
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::stopStream(void) {
ATA_INFO("Stop stream");
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
// Can not close the stream now...
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::CoreIos::abortStream(void) {
ATA_INFO("Abort Stream");
OSStatus status = AudioOutputUnitStop(m_private->audioUnit);
// Can not close the stream now...
return audio::orchestra::error_none;
}
void audio::orchestra::api::CoreIos::callBackEvent(void* _data,
int32_t _nbChunk) {
#if 0
static double value=0;
int16_t* vals = (int16_t*)_data;
for (int32_t iii=0; iii<_frameRate; ++iii) {
*vals++ = (int16_t)(sin(value) * 32760.0);
*vals++ = (int16_t)(sin(value) * 32760.0);
value += 0.09;
if (value >= M_PI*2.0) {
value -= M_PI*2.0;
}
}
return;
#endif
int32_t doStopStream = 0;
std11::chrono::system_clock::time_point streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if (m_doConvertBuffer[modeToIdTable(audio::orchestra::mode_output)] == true) {
doStopStream = m_callback(nullptr,
streamTime,
&m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0],
streamTime,
_nbChunk,
status);
convertBuffer((char*)_data, &m_userBuffer[modeToIdTable(audio::orchestra::mode_output)][0], m_convertInfo[modeToIdTable(audio::orchestra::mode_output)]);
} else {
doStopStream = m_callback(_data,
streamTime,
nullptr,
streamTime,
_nbChunk,
status);
}
if (doStopStream == 2) {
abortStream();
return;
}
audio::orchestra::Api::tickStreamTime();
}
static OSStatus playbackCallback(void *_userData,
AudioUnitRenderActionFlags* _ioActionFlags,
const AudioTimeStamp* _inTimeStamp,
uint32_t _inBusNumber,
uint32_t _inNumberFrames,
AudioBufferList* _ioData) {
if (_userData == nullptr) {
ATA_ERROR("callback event ... nullptr pointer");
return -1;
}
audio::orchestra::api::CoreIos* myClass = static_cast<audio::orchestra::api::CoreIos*>(_userData);
// get all requested buffer :
for (int32_t iii=0; iii < _ioData->mNumberBuffers; iii++) {
AudioBuffer buffer = _ioData->mBuffers[iii];
int32_t numberFrame = buffer.mDataByteSize/2/*stereo*/ /sizeof(int16_t);
ATA_VERBOSE("request data size: " << numberFrame << " busNumber=" << _inBusNumber);
myClass->callBackEvent(buffer.mData, numberFrame);
}
return noErr;
}
bool audio::orchestra::api::CoreIos::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
ATA_INFO("Probe : device=" << _device << " channels=" << _channels << " firstChannel=" << _firstChannel << " sampleRate=" << _sampleRate);
if (_mode != audio::orchestra::mode_output) {
ATA_ERROR("Can not start a device input or duplex for CoreIos ...");
return false;
}
bool ret = true;
// configure Airtaudio internal configuration:
m_userFormat = _format;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
m_bufferSize = 8192;
m_sampleRate = _sampleRate;
m_doByteSwap[modeToIdTable(_mode)] = false; // for endienness ...
// TODO : For now, we write it in hard ==> to be update later ...
m_deviceFormat[modeToIdTable(_mode)] = audio::format_int16;
m_nDeviceChannels[modeToIdTable(_mode)] = 2;
m_deviceInterleaved[modeToIdTable(_mode)] = true;
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_doConvertBuffer[modeToIdTable(_mode)] == true) {
// Allocate necessary internal buffers.
uint64_t bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
}
setConvertInfo(_mode, _firstChannel);
}
ATA_INFO("device format : " << m_deviceFormat[modeToIdTable(_mode)] << " user format : " << m_userFormat);
ATA_INFO("device channels : " << m_nDeviceChannels[modeToIdTable(_mode)] << " user channels : " << m_nUserChannels[modeToIdTable(_mode)]);
ATA_INFO("do convert buffer : " << m_doConvertBuffer[modeToIdTable(_mode)]);
if (ret == false) {
ATA_ERROR("Can not open device.");
}
// Configure IOs interface:
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(nullptr, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &m_private->audioUnit);
if (status != 0) {
ATA_ERROR("can not create an audio intance...");
}
uint32_t flag = 1;
// Enable IO for playback
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
if (status != 0) {
ATA_ERROR("can not request audio autorisation...");
}
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 48000.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1; //
audioFormat.mChannelsPerFrame = 2; // stereo
audioFormat.mBitsPerChannel = sizeof(short) * 8;
audioFormat.mBytesPerPacket = sizeof(short) * audioFormat.mChannelsPerFrame;
audioFormat.mBytesPerFrame = sizeof(short) * audioFormat.mChannelsPerFrame;
audioFormat.mReserved = 0;
// Apply format
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
if (status != 0) {
ATA_ERROR("can not set stream properties...");
}
// Set output callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = &playbackCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty(m_private->audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
if (status != 0) {
ATA_ERROR("can not set Callback...");
}
// Initialise
status = AudioUnitInitialize(m_private->audioUnit);
if (status != 0) {
ATA_ERROR("can not initialize...");
}
return ret;
}
#endif

1464
audio/orchestra/api/Ds.cpp Normal file

File diff suppressed because it is too large Load Diff

58
audio/orchestra/api/Ds.h Normal file
View File

@@ -0,0 +1,58 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_DS_H__) && defined(ORCHESTRA_BUILD_DS)
#define __AUDIO_ORCHESTRA_API_DS_H__
namespace audio {
namespace orchestra {
namespace api {
class DsPrivate;
class Ds: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Ds();
virtual ~Ds();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_ds;
}
uint32_t getDeviceCount();
uint32_t getDefaultOutputDevice();
uint32_t getDefaultInputDevice();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
static void dsCallbackEvent(void *_userData);
std11::shared_ptr<DsPrivate> m_private;
bool m_coInitialized;
bool m_buffersRolling;
long m_duplexPrerollBytes;
bool probeDeviceOpen(uint32_t _device,
enum audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
enum audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@@ -0,0 +1,63 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(__DUMMY__)
#include <audio/orchestra/api/Dummy.h>
#include <audio/orchestra/debug.h>
#undef __class__
#define __class__ "api::Dummy"
audio::orchestra::Api* audio::orchestra::api::Dummy::Create() {
return new audio::orchestra::api::Dummy();
}
audio::orchestra::api::Dummy::Dummy() {
ATA_WARNING("This class provides no functionality.");
}
uint32_t audio::orchestra::api::Dummy::getDeviceCount() {
return 0;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Dummy::getDeviceInfo(uint32_t _device) {
(void)_device;
return audio::orchestra::DeviceInfo();
}
enum audio::orchestra::error audio::orchestra::api::Dummy::closeStream() {
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Dummy::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Dummy::stopStream() {
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Dummy::abortStream() {
return audio::orchestra::error_none;
}
bool audio::orchestra::api::Dummy::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
return false;
}
#endif

View File

@@ -0,0 +1,45 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_DUMMY__) && defined(ORCHESTRA_BUILD_DUMMY)
#define __AUDIO_ORCHESTRA_DUMMY__
#include <audio/orchestra/Interface.h>
namespace audio {
namespace orchestra {
namespace api {
class Dummy: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Dummy();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_dummy;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
private:
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@@ -0,0 +1,734 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
// must run before :
#if defined(ORCHESTRA_BUILD_JACK)
#include <unistd.h>
#include <limits.h>
#include <iostream>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <string.h>
#include <etk/thread/tools.h>
#undef __class__
#define __class__ "api::Jack"
audio::orchestra::Api* audio::orchestra::api::Jack::Create() {
return new audio::orchestra::api::Jack();
}
// JACK is a low-latency audio server, originally written for the
// GNU/Linux operating system and now also ported to OS-X. It can
// connect a number of different applications to an audio device, as
// well as allowing them to share audio between themselves.
//
// When using JACK with RtAudio, "devices" refer to JACK clients that
// have ports connected to the server. The JACK server is typically
// started in a terminal as follows:
//
// .jackd -d alsa -d hw:0
//
// or through an interface program such as qjackctl. Many of the
// parameters normally set for a stream are fixed by the JACK server
// and can be specified when the JACK server is started. In
// particular,
//
// jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
// jackd -r -d alsa -r 48000
//
// specifies a sample rate of 44100 Hz, a buffer size of 512 sample
// frames, and number of buffers = 4. Once the server is running, it
// is not possible to override these values. If the values are not
// specified in the command-line, the JACK server uses default values.
//
// The JACK server does not have to be running when an instance of
// audio::orchestra::Jack is created, though the function getDeviceCount() will
// report 0 devices found until JACK has been started. When no
// devices are available (i.e., the JACK server is not running), a
// stream cannot be opened.
#include <jack/jack.h>
#include <unistd.h>
#include <cstdio>
namespace audio {
namespace orchestra {
namespace api {
class JackPrivate {
public:
jack_client_t *client;
jack_port_t **ports[2];
std::string deviceName[2];
bool xrun[2];
std11::condition_variable condition;
int32_t drainCounter; // Tracks callback counts when draining
bool internalDrain; // Indicates if stop is initiated from callback or not.
JackPrivate() :
client(0),
drainCounter(0),
internalDrain(false) {
ports[0] = 0;
ports[1] = 0;
xrun[0] = false;
xrun[1] = false;
}
};
}
}
}
audio::orchestra::api::Jack::Jack() :
m_private(new audio::orchestra::api::JackPrivate()) {
// Nothing to do here.
}
audio::orchestra::api::Jack::~Jack() {
if (m_state != audio::orchestra::state_closed) {
closeStream();
}
}
uint32_t audio::orchestra::api::Jack::getDeviceCount() {
// See if we can become a jack client.
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = nullptr;
jack_client_t *client = jack_client_open("orchestraJackCount", options, status);
if (client == nullptr) {
return 0;
}
const char **ports;
std::string port, previousPort;
uint32_t nChannels = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nChannels ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon + 1);
if (port != previousPort) {
nDevices++;
previousPort = port;
}
}
} while (ports[++nChannels]);
free(ports);
}
jack_client_close(client);
return nDevices;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Jack::getDeviceInfo(uint32_t _device) {
audio::orchestra::DeviceInfo info;
info.probed = false;
jack_options_t options = (jack_options_t) (JackNoStartServer); //JackNullOption
jack_status_t *status = nullptr;
jack_client_t *client = jack_client_open("orchestraJackInfo", options, status);
if (client == nullptr) {
ATA_ERROR("Jack server not found or connection error!");
// TODO : audio::orchestra::error_warning;
return info;
}
const char **ports;
std::string port, previousPort;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (port != previousPort) {
if (nDevices == _device) {
info.name = port;
}
nDevices++;
previousPort = port;
}
}
} while (ports[++nPorts]);
free(ports);
}
if (_device >= nDevices) {
jack_client_close(client);
ATA_ERROR("device ID is invalid!");
// TODO : audio::orchestra::error_invalidUse;
return info;
}
// Get the current jack server sample rate.
info.sampleRates.clear();
info.sampleRates.push_back(jack_get_sample_rate(client));
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
ports = jack_get_ports(client, info.name.c_str(), nullptr, JackPortIsInput);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
info.outputChannels = nChannels;
}
// Jack "output ports" equal RtAudio input channels.
nChannels = 0;
ports = jack_get_ports(client, info.name.c_str(), nullptr, JackPortIsOutput);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
info.inputChannels = nChannels;
}
if (info.outputChannels == 0 && info.inputChannels == 0) {
jack_client_close(client);
ATA_ERROR("error determining Jack input/output channels!");
// TODO : audio::orchestra::error_warning;
return info;
}
// If device opens for both playback and capture, we determine the channels.
if (info.outputChannels > 0 && info.inputChannels > 0) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
// Jack always uses 32-bit floats.
info.nativeFormats.push_back(audio::format_float);
// Jack doesn't provide default devices so we'll use the first available one.
if ( _device == 0
&& info.outputChannels > 0) {
info.isDefaultOutput = true;
}
if ( _device == 0
&& info.inputChannels > 0) {
info.isDefaultInput = true;
}
jack_client_close(client);
info.probed = true;
return info;
}
int32_t audio::orchestra::api::Jack::jackCallbackHandler(jack_nframes_t _nframes, void* _userData) {
ATA_VERBOSE("Jack callback: [BEGIN] " << uint64_t(_userData));
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
if (myClass->callbackEvent((uint64_t)_nframes) == false) {
ATA_VERBOSE("Jack callback: [END] 1");
return 1;
}
ATA_VERBOSE("Jack callback: [END] 0");
return 0;
}
// This function will be called by a spawned thread when the Jack
// server signals that it is shutting down. It is necessary to handle
// it this way because the jackShutdown() function must return before
// the jack_deactivate() function (in closeStream()) will return.
void audio::orchestra::api::Jack::jackCloseStream(void* _userData) {
etk::thread::setName("Jack_closeStream");
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
myClass->closeStream();
}
void audio::orchestra::api::Jack::jackShutdown(void* _userData) {
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
// Check current stream state. If stopped, then we'll assume this
// was called as a result of a call to audio::orchestra::api::Jack::stopStream (the
// deactivation of a client handle causes this function to be called).
// If not, we'll assume the Jack server is shutting down or some
// other problem occurred and we should close the stream.
if (myClass->isStreamRunning() == false) {
return;
}
new std11::thread(&audio::orchestra::api::Jack::jackCloseStream, _userData);
ATA_ERROR("The Jack server is shutting down this client ... stream stopped and closed!!");
}
int32_t audio::orchestra::api::Jack::jackXrun(void* _userData) {
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
if (myClass->m_private->ports[0]) {
myClass->m_private->xrun[0] = true;
}
if (myClass->m_private->ports[1]) {
myClass->m_private->xrun[1] = true;
}
return 0;
}
bool audio::orchestra::api::Jack::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
// Look for jack server and try to become a client (only do once per stream).
jack_client_t *client = 0;
if ( _mode == audio::orchestra::mode_output
|| ( _mode == audio::orchestra::mode_input
&& m_mode != audio::orchestra::mode_output)) {
jack_options_t jackoptions = (jack_options_t) (JackNoStartServer); //JackNullOption;
jack_status_t *status = nullptr;
if (!_options.streamName.empty()) {
client = jack_client_open(_options.streamName.c_str(), jackoptions, status);
} else {
client = jack_client_open("orchestraJack", jackoptions, status);
}
if (client == 0) {
ATA_ERROR("Jack server not found or connection error!");
return false;
}
} else {
// The handle must have been created on an earlier pass.
client = m_private->client;
}
const char **ports;
std::string port, previousPort, deviceName;
uint32_t nPorts = 0, nDevices = 0;
ports = jack_get_ports(client, nullptr, nullptr, 0);
if (ports) {
// Parse the port names up to the first colon (:).
size_t iColon = 0;
do {
port = (char *) ports[ nPorts ];
iColon = port.find(":");
if (iColon != std::string::npos) {
port = port.substr(0, iColon);
if (port != previousPort) {
if (nDevices == _device) {
deviceName = port;
}
nDevices++;
previousPort = port;
}
}
} while (ports[++nPorts]);
free(ports);
}
if (_device >= nDevices) {
ATA_ERROR("device ID is invalid!");
return false;
}
// Count the available ports containing the client name as device
// channels. Jack "input ports" equal RtAudio output channels.
uint32_t nChannels = 0;
uint64_t flag = JackPortIsInput;
if (_mode == audio::orchestra::mode_input) flag = JackPortIsOutput;
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
if (ports) {
while (ports[ nChannels ]) {
nChannels++;
}
free(ports);
}
// Compare the jack ports for specified client to the requested number of channels.
if (nChannels < (_channels + _firstChannel)) {
ATA_ERROR("requested number of channels (" << _channels << ") + offset (" << _firstChannel << ") not found for specified device (" << _device << ":" << deviceName << ").");
return false;
}
// Check the jack server sample rate.
uint32_t jackRate = jack_get_sample_rate(client);
if (_sampleRate != jackRate) {
jack_client_close(client);
ATA_ERROR("the requested sample rate (" << _sampleRate << ") is different than the JACK server rate (" << jackRate << ").");
return false;
}
m_sampleRate = jackRate;
// Get the latency of the JACK port.
ports = jack_get_ports(client, deviceName.c_str(), nullptr, flag);
if (ports[ _firstChannel ]) {
// Added by Ge Wang
jack_latency_callback_mode_t cbmode = (_mode == audio::orchestra::mode_input ? JackCaptureLatency : JackPlaybackLatency);
// the range (usually the min and max are equal)
jack_latency_range_t latrange; latrange.min = latrange.max = 0;
// get the latency range
jack_port_get_latency_range(jack_port_by_name(client, ports[_firstChannel]), cbmode, &latrange);
// be optimistic, use the min!
m_latency[modeToIdTable(_mode)] = latrange.min;
//m_latency[modeToIdTable(_mode)] = jack_port_get_latency(jack_port_by_name(client, ports[ _firstChannel ]));
}
free(ports);
// The jack server always uses 32-bit floating-point data.
m_deviceFormat[modeToIdTable(_mode)] = audio::format_float;
m_userFormat = _format;
// Jack always uses non-interleaved buffers.
m_deviceInterleaved[modeToIdTable(_mode)] = false;
// Jack always provides host byte-ordered data.
m_doByteSwap[modeToIdTable(_mode)] = false;
// Get the buffer size. The buffer size and number of buffers
// (periods) is set when the jack server is started.
m_bufferSize = (int) jack_get_buffer_size(client);
*_bufferSize = m_bufferSize;
m_nDeviceChannels[modeToIdTable(_mode)] = _channels;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
// Set flags for buffer conversion.
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
ATA_CRITICAL("Can not update format ==> use RIVER lib for this ...");
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
ATA_ERROR("Reorder channel for the interleaving properties ...");
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
// Allocate our JackHandle structure for the stream.
m_private->client = client;
m_private->deviceName[modeToIdTable(_mode)] = deviceName;
// Allocate necessary internal buffers.
uint64_t bufferBytes;
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
ATA_VERBOSE("allocate : nbChannel=" << m_nUserChannels[modeToIdTable(_mode)] << " bufferSize=" << *_bufferSize << " format=" << m_deviceFormat[modeToIdTable(_mode)] << "=" << audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]));
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
if (_mode == audio::orchestra::mode_output) {
bufferBytes = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
} else { // _mode == audio::orchestra::mode_input
bufferBytes = m_nDeviceChannels[1] * audio::getFormatBytes(m_deviceFormat[1]);
if (m_mode == audio::orchestra::mode_output && m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes < bytesOut) {
makeBuffer = false;
}
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) free(m_deviceBuffer);
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
// Allocate memory for the Jack ports (channels) identifiers.
m_private->ports[modeToIdTable(_mode)] = (jack_port_t **) malloc (sizeof (jack_port_t *) * _channels);
if (m_private->ports[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating port memory.");
goto error;
}
m_device[modeToIdTable(_mode)] = _device;
m_channelOffset[modeToIdTable(_mode)] = _firstChannel;
m_state = audio::orchestra::state_stopped;
if ( m_mode == audio::orchestra::mode_output
&& _mode == audio::orchestra::mode_input) {
// We had already set up the stream for output.
m_mode = audio::orchestra::mode_duplex;
} else {
m_mode = _mode;
jack_set_process_callback(m_private->client, &audio::orchestra::api::Jack::jackCallbackHandler, this);
jack_set_xrun_callback(m_private->client, &audio::orchestra::api::Jack::jackXrun, this);
jack_on_shutdown(m_private->client, &audio::orchestra::api::Jack::jackShutdown, this);
}
// Register our ports.
char label[64];
if (_mode == audio::orchestra::mode_output) {
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
snprintf(label, 64, "outport %d", i);
m_private->ports[0][i] = jack_port_register(m_private->client,
(const char *)label,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput,
0);
}
} else {
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
snprintf(label, 64, "inport %d", i);
m_private->ports[1][i] = jack_port_register(m_private->client,
(const char *)label,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput,
0);
}
}
// Setup the buffer conversion information structure. We don't use
// buffers to do channel offsets, so we override that parameter
// here.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, 0);
}
return true;
error:
jack_client_close(m_private->client);
if (m_private->ports[0] != nullptr) {
free(m_private->ports[0]);
m_private->ports[0] = nullptr;
}
if (m_private->ports[1] != nullptr) {
free(m_private->ports[1]);
m_private->ports[1] = nullptr;
}
for (int32_t iii=0; iii<2; ++iii) {
m_userBuffer[iii].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
}
return false;
}
enum audio::orchestra::error audio::orchestra::api::Jack::closeStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
if (m_private != nullptr) {
if (m_state == audio::orchestra::state_running) {
jack_deactivate(m_private->client);
}
jack_client_close(m_private->client);
}
if (m_private->ports[0] != nullptr) {
free(m_private->ports[0]);
m_private->ports[0] = nullptr;
}
if (m_private->ports[1] != nullptr) {
free(m_private->ports[1]);
m_private->ports[1] = nullptr;
}
for (int32_t i=0; i<2; i++) {
m_userBuffer[i].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = nullptr;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Jack::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
int32_t result = jack_activate(m_private->client);
if (result) {
ATA_ERROR("unable to activate JACK client!");
goto unlock;
}
const char **ports;
// Get the list of available ports.
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
result = 1;
ports = jack_get_ports(m_private->client, m_private->deviceName[0].c_str(), nullptr, JackPortIsInput);
if (ports == nullptr) {
ATA_ERROR("error determining available JACK input ports!");
goto unlock;
}
// Now make the port connections. Since RtAudio wasn't designed to
// allow the user to select particular channels of a device, we'll
// just open the first "nChannels" ports with offset.
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
result = 1;
if (ports[ m_channelOffset[0] + i ])
result = jack_connect(m_private->client, jack_port_name(m_private->ports[0][i]), ports[ m_channelOffset[0] + i ]);
if (result) {
free(ports);
ATA_ERROR("error connecting output ports!");
goto unlock;
}
}
free(ports);
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
result = 1;
ports = jack_get_ports(m_private->client, m_private->deviceName[1].c_str(), nullptr, JackPortIsOutput);
if (ports == nullptr) {
ATA_ERROR("error determining available JACK output ports!");
goto unlock;
}
// Now make the port connections. See note above.
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
result = 1;
if (ports[ m_channelOffset[1] + i ]) {
result = jack_connect(m_private->client, ports[ m_channelOffset[1] + i ], jack_port_name(m_private->ports[1][i]));
}
if (result) {
free(ports);
ATA_ERROR("error connecting input ports!");
goto unlock;
}
}
free(ports);
}
m_private->drainCounter = 0;
m_private->internalDrain = false;
m_state = audio::orchestra::state_running;
unlock:
if (result == 0) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
enum audio::orchestra::error audio::orchestra::api::Jack::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_private->drainCounter == 0) {
m_private->drainCounter = 2;
std11::unique_lock<std11::mutex> lck(m_mutex);
m_private->condition.wait(lck);
}
}
jack_deactivate(m_private->client);
m_state = audio::orchestra::state_stopped;
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Jack::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_private->drainCounter = 2;
return stopStream();
}
// This function will be called by a spawned thread when the user
// callback function signals that the stream should be stopped or
// aborted. It is necessary to handle it this way because the
// callbackEvent() function must return before the jack_deactivate()
// function will return.
static void jackStopStream(void* _userData) {
etk::thread::setName("Jack_stopStream");
audio::orchestra::api::Jack* myClass = reinterpret_cast<audio::orchestra::api::Jack*>(_userData);
myClass->stopStream();
}
bool audio::orchestra::api::Jack::callbackEvent(uint64_t _nframes) {
if ( m_state == audio::orchestra::state_stopped
|| m_state == audio::orchestra::state_stopping) {
return true;
}
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return false;
}
if (m_bufferSize != _nframes) {
ATA_ERROR("the JACK buffer size has changed ... cannot process!");
return false;
}
// Check if we were draining the stream and signal is finished.
if (m_private->drainCounter > 3) {
m_state = audio::orchestra::state_stopping;
if (m_private->internalDrain == true) {
new std11::thread(jackStopStream, this);
} else {
m_private->condition.notify_one();
}
return true;
}
// Invoke user callback first, to get fresh output data.
if (m_private->drainCounter == 0) {
std11::chrono::time_point<std11::chrono::system_clock> streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if (m_mode != audio::orchestra::mode_input && m_private->xrun[0] == true) {
status.push_back(audio::orchestra::status_underflow);
m_private->xrun[0] = false;
}
if (m_mode != audio::orchestra::mode_output && m_private->xrun[1] == true) {
status.push_back(audio::orchestra::status_overflow);
m_private->xrun[1] = false;
}
int32_t cbReturnValue = m_callback(&m_userBuffer[1][0],
streamTime,
&m_userBuffer[0][0],
streamTime,
m_bufferSize,
status);
if (cbReturnValue == 2) {
m_state = audio::orchestra::state_stopping;
m_private->drainCounter = 2;
new std11::thread(jackStopStream, this);
return true;
}
else if (cbReturnValue == 1) {
m_private->drainCounter = 1;
m_private->internalDrain = true;
}
}
jack_default_audio_sample_t *jackbuffer;
uint64_t bufferBytes = _nframes * sizeof(jack_default_audio_sample_t);
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_private->drainCounter > 1) { // write zeros to the output stream
for (uint32_t i=0; i<m_nDeviceChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
memset(jackbuffer, 0, bufferBytes);
}
} else if (m_doConvertBuffer[0]) {
convertBuffer(m_deviceBuffer, &m_userBuffer[0][0], m_convertInfo[0]);
for (uint32_t i=0; i<m_nDeviceChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
memcpy(jackbuffer, &m_deviceBuffer[i*bufferBytes], bufferBytes);
}
} else { // no buffer conversion
for (uint32_t i=0; i<m_nUserChannels[0]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[0][i], (jack_nframes_t) _nframes);
memcpy(jackbuffer, &m_userBuffer[0][i*bufferBytes], bufferBytes);
}
}
if (m_private->drainCounter) {
m_private->drainCounter++;
goto unlock;
}
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
if (m_doConvertBuffer[1]) {
for (uint32_t i=0; i<m_nDeviceChannels[1]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[1][i], (jack_nframes_t) _nframes);
memcpy(&m_deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes);
}
convertBuffer(&m_userBuffer[1][0], m_deviceBuffer, m_convertInfo[1]);
} else {
// no buffer conversion
for (uint32_t i=0; i<m_nUserChannels[1]; i++) {
jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer(m_private->ports[1][i], (jack_nframes_t) _nframes);
memcpy(&m_userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes);
}
}
}
unlock:
audio::orchestra::Api::tickStreamTime();
return true;
}
#endif

View File

@@ -0,0 +1,58 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_JACK_H__) && defined(ORCHESTRA_BUILD_JACK)
#define __AUDIO_ORCHESTRA_API_JACK_H__
#include <jack/jack.h>
namespace audio {
namespace orchestra {
namespace api {
class JackPrivate;
class Jack: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Jack();
virtual ~Jack();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_jack;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
long getStreamLatency();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
bool callbackEvent(uint64_t _nframes);
private:
static int32_t jackXrun(void* _userData);
static void jackCloseStream(void* _userData);
static void jackShutdown(void* _userData);
static int32_t jackCallbackHandler(jack_nframes_t _nframes, void* _userData);
private:
std11::shared_ptr<JackPrivate> m_private;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

830
audio/orchestra/api/Oss.cpp Normal file
View File

@@ -0,0 +1,830 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_OSS)
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <fcntl.h>
#include "soundcard.h"
#include <errno.h>
#include <math.h>
#undef __class__
#define __class__ "api::Oss"
audio::orchestra::Api* audio::orchestra::api::Oss::Create() {
return new audio::orchestra::api::Oss();
}
static void *ossCallbackHandler(void* _userData);
namespace audio {
namespace orchestra {
namespace api {
class OssPrivate {
public:
int32_t id[2]; // device ids
bool xrun[2];
bool triggered;
std11::condition_variable runnable;
std11::shared_ptr<std11::thread> thread;
bool threadRunning;
OssPrivate():
triggered(false),
threadRunning(false) {
id[0] = 0;
id[1] = 0;
xrun[0] = false;
xrun[1] = false;
}
};
}
}
}
audio::orchestra::api::Oss::Oss() :
m_private(new audio::orchestra::api::OssPrivate()) {
// Nothing to do here.
}
audio::orchestra::api::Oss::~Oss() {
if (m_state != audio::orchestra::state_closed) {
closeStream();
}
}
uint32_t audio::orchestra::api::Oss::getDeviceCount() {
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("error opening '/dev/mixer'.");
return 0;
}
oss_sysinfo sysinfo;
if (ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo) == -1) {
close(mixerfd);
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
return 0;
}
close(mixerfd);
return sysinfo.numaudios;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Oss::getDeviceInfo(uint32_t _device) {
rtaudio::DeviceInfo info;
info.probed = false;
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("error opening '/dev/mixer'.");
return info;
}
oss_sysinfo sysinfo;
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
if (result == -1) {
close(mixerfd);
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
return info;
}
unsigned nDevices = sysinfo.numaudios;
if (nDevices == 0) {
close(mixerfd);
ATA_ERROR("no devices found!");
return info;
}
if (_device >= nDevices) {
close(mixerfd);
ATA_ERROR("device ID is invalid!");
return info;
}
oss_audioinfo ainfo;
ainfo.dev = _device;
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
close(mixerfd);
if (result == -1) {
ATA_ERROR("error getting device (" << ainfo.name << ") info.");
error(audio::orchestra::error_warning);
return info;
}
// Probe channels
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_output) {
info.outputChannels = ainfo.max_channels;
}
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_input) {
info.inputChannels = ainfo.max_channels;
}
if (ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex) {
if ( info.outputChannels > 0
&& info.inputChannels > 0
&& ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex) {
info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
}
}
// Probe data formats ... do for input
uint64_t mask = ainfo.iformats;
if ( mask & AFMT_S16_LE
|| mask & AFMT_S16_BE) {
info.nativeFormats.push_back(audio::format_int16);
}
if (mask & AFMT_S8) {
info.nativeFormats.push_back(audio::format_int8);
}
if ( mask & AFMT_S32_LE
|| mask & AFMT_S32_BE) {
info.nativeFormats.push_back(audio::format_int32);
}
if (mask & AFMT_FLOAT) {
info.nativeFormats.push_back(audio::format_float);
}
if ( mask & AFMT_S24_LE
|| mask & AFMT_S24_BE) {
info.nativeFormats.push_back(audio::format_int24);
}
// Check that we have at least one supported format
if (info.nativeFormats == 0) {
ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio.");
return info;
}
// Probe the supported sample rates.
info.sampleRates.clear();
if (ainfo.nrates) {
for (uint32_t i=0; i<ainfo.nrates; i++) {
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
if (ainfo.rates[i] == SAMPLE_RATES[k]) {
info.sampleRates.push_back(SAMPLE_RATES[k]);
break;
}
}
}
} else {
// Check min and max rate values;
for (uint32_t k=0; k<MAX_SAMPLE_RATES; k++) {
if ( ainfo.min_rate <= (int) SAMPLE_RATES[k]
&& ainfo.max_rate >= (int) SAMPLE_RATES[k]) {
info.sampleRates.push_back(SAMPLE_RATES[k]);
}
}
}
if (info.sampleRates.size() == 0) {
ATA_ERROR("no supported sample rates found for device (" << ainfo.name << ").");
} else {
info.probed = true;
info.name = ainfo.name;
}
return info;
}
bool audio::orchestra::api::Oss::probeDeviceOpen(uint32_t _device,
StreamMode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
rtaudio::format _format,
uint32_t* _bufferSize,
const audio::orchestra::StreamOptions& _options) {
int32_t mixerfd = open("/dev/mixer", O_RDWR, 0);
if (mixerfd == -1) {
ATA_ERROR("error opening '/dev/mixer'.");
return false;
}
oss_sysinfo sysinfo;
int32_t result = ioctl(mixerfd, SNDCTL_SYSINFO, &sysinfo);
if (result == -1) {
close(mixerfd);
ATA_ERROR("error getting sysinfo, OSS version >= 4.0 is required.");
return false;
}
unsigned nDevices = sysinfo.numaudios;
if (nDevices == 0) {
// This should not happen because a check is made before this function is called.
close(mixerfd);
ATA_ERROR("no devices found!");
return false;
}
if (_device >= nDevices) {
// This should not happen because a check is made before this function is called.
close(mixerfd);
ATA_ERROR("device ID is invalid!");
return false;
}
oss_audioinfo ainfo;
ainfo.dev = _device;
result = ioctl(mixerfd, SNDCTL_AUDIOINFO, &ainfo);
close(mixerfd);
if (result == -1) {
ATA_ERROR("error getting device (" << ainfo.name << ") info.");
return false;
}
// Check if device supports input or output
if ( ( _mode == audio::orchestra::mode_output
&& !(ainfo.caps & PCM_CAP_audio::orchestra::mode_output))
|| ( _mode == audio::orchestra::mode_input
&& !(ainfo.caps & PCM_CAP_audio::orchestra::mode_input))) {
if (_mode == audio::orchestra::mode_output) {
ATA_ERROR("device (" << ainfo.name << ") does not support output.");
} else {
ATA_ERROR("device (" << ainfo.name << ") does not support input.");
}
return false;
}
int32_t flags = 0;
if (_mode == audio::orchestra::mode_output) {
flags |= O_WRONLY;
} else { // _mode == audio::orchestra::mode_input
if ( m_mode == audio::orchestra::mode_output
&& m_device[0] == _device) {
// We just set the same device for playback ... close and reopen for duplex (OSS only).
close(m_private->id[0]);
m_private->id[0] = 0;
if (!(ainfo.caps & PCM_CAP_audio::orchestra::mode_duplex)) {
ATA_ERROR("device (" << ainfo.name << ") does not support duplex mode.");
return false;
}
// Check that the number previously set channels is the same.
if (m_nUserChannels[0] != _channels) {
ATA_ERROR("input/output channels must be equal for OSS duplex device (" << ainfo.name << ").");
return false;
}
flags |= O_RDWR;
} else {
flags |= O_RDONLY;
}
}
// Set exclusive access if specified.
if (_options.flags & RTAUDIO_HOG_DEVICE) {
flags |= O_EXCL;
}
// Try to open the device.
int32_t fd;
fd = open(ainfo.devnode, flags, 0);
if (fd == -1) {
if (errno == EBUSY) {
ATA_ERROR("device (" << ainfo.name << ") is busy.");
} else {
ATA_ERROR("error opening device (" << ainfo.name << ").");
}
return false;
}
// For duplex operation, specifically set this mode (this doesn't seem to work).
/*
if (flags | O_RDWR) {
result = ioctl(fd, SNDCTL_DSP_SETaudio::orchestra::mode_duplex, nullptr);
if (result == -1) {
m_errorStream << "error setting duplex mode for device (" << ainfo.name << ").";
m_errorText = m_errorStream.str();
return false;
}
}
*/
// Check the device channel support.
m_nUserChannels[modeToIdTable(_mode)] = _channels;
if (ainfo.max_channels < (int)(_channels + _firstChannel)) {
close(fd);
ATA_ERROR("the device (" << ainfo.name << ") does not support requested channel parameters.");
return false;
}
// Set the number of channels.
int32_t deviceChannels = _channels + _firstChannel;
result = ioctl(fd, SNDCTL_DSP_CHANNELS, &deviceChannels);
if ( result == -1
|| deviceChannels < (int)(_channels + _firstChannel)) {
close(fd);
ATA_ERROR("error setting channel parameters on device (" << ainfo.name << ").");
return false;
}
m_nDeviceChannels[modeToIdTable(_mode)] = deviceChannels;
// Get the data format mask
int32_t mask;
result = ioctl(fd, SNDCTL_DSP_GETFMTS, &mask);
if (result == -1) {
close(fd);
ATA_ERROR("error getting device (" << ainfo.name << ") data formats.");
return false;
}
// Determine how to set the device format.
m_userFormat = _format;
int32_t deviceFormat = -1;
m_doByteSwap[modeToIdTable(_mode)] = false;
if (_format == RTAUDIO_SINT8) {
if (mask & AFMT_S8) {
deviceFormat = AFMT_S8;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
}
} else if (_format == RTAUDIO_SINT16) {
if (mask & AFMT_S16_NE) {
deviceFormat = AFMT_S16_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
} else if (mask & AFMT_S16_OE) {
deviceFormat = AFMT_S16_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if (_format == RTAUDIO_SINT24) {
if (mask & AFMT_S24_NE) {
deviceFormat = AFMT_S24_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
} else if (mask & AFMT_S24_OE) {
deviceFormat = AFMT_S24_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
m_doByteSwap[modeToIdTable(_mode)] = true;
}
} else if (_format == RTAUDIO_SINT32) {
if (mask & AFMT_S32_NE) {
deviceFormat = AFMT_S32_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
} else if (mask & AFMT_S32_OE) {
deviceFormat = AFMT_S32_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
m_doByteSwap[modeToIdTable(_mode)] = true;
}
}
if (deviceFormat == -1) {
// The user requested format is not natively supported by the device.
if (mask & AFMT_S16_NE) {
deviceFormat = AFMT_S16_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
} else if (mask & AFMT_S32_NE) {
deviceFormat = AFMT_S32_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
} else if (mask & AFMT_S24_NE) {
deviceFormat = AFMT_S24_NE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
} else if (mask & AFMT_S16_OE) {
deviceFormat = AFMT_S16_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT16;
m_doByteSwap[modeToIdTable(_mode)] = true;
} else if (mask & AFMT_S32_OE) {
deviceFormat = AFMT_S32_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT32;
m_doByteSwap[modeToIdTable(_mode)] = true;
} else if (mask & AFMT_S24_OE) {
deviceFormat = AFMT_S24_OE;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT24;
m_doByteSwap[modeToIdTable(_mode)] = true;
} else if (mask & AFMT_S8) {
deviceFormat = AFMT_S8;
m_deviceFormat[modeToIdTable(_mode)] = RTAUDIO_SINT8;
}
}
if (m_deviceFormat[modeToIdTable(_mode)] == 0) {
// This really shouldn't happen ...
close(fd);
ATA_ERROR("device (" << ainfo.name << ") data format not supported by RtAudio.");
return false;
}
// Set the data format.
int32_t temp = deviceFormat;
result = ioctl(fd, SNDCTL_DSP_SETFMT, &deviceFormat);
if ( result == -1
|| deviceFormat != temp) {
close(fd);
ATA_ERROR("error setting data format on device (" << ainfo.name << ").");
return false;
}
// Attempt to set the buffer size. According to OSS, the minimum
// number of buffers is two. The supposed minimum buffer size is 16
// bytes, so that will be our lower bound. The argument to this
// call is in the form 0xMMMMSSSS (hex), where the buffer size (in
// bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
// We'll check the actual value used near the end of the setup
// procedure.
int32_t ossBufferBytes = *_bufferSize * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]) * deviceChannels;
if (ossBufferBytes < 16) {
ossBufferBytes = 16;
}
int32_t buffers = 0;
buffers = _options.numberOfBuffers;
if (_options.flags.m_minimizeLatency == true) {
buffers = 2;
}
if (buffers < 2) {
buffers = 3;
}
temp = ((int) buffers << 16) + (int)(log10((double)ossBufferBytes) / log10(2.0));
result = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &temp);
if (result == -1) {
close(fd);
ATA_ERROR("error setting buffer size on device (" << ainfo.name << ").");
return false;
}
m_nBuffers = buffers;
// Save buffer size (in sample frames).
*_bufferSize = ossBufferBytes / (audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]) * deviceChannels);
m_bufferSize = *_bufferSize;
// Set the sample rate.
int32_t srate = _sampleRate;
result = ioctl(fd, SNDCTL_DSP_SPEED, &srate);
if (result == -1) {
close(fd);
ATA_ERROR("error setting sample rate (" << _sampleRate << ") on device (" << ainfo.name << ").");
return false;
}
// Verify the sample rate setup worked.
if (abs(srate - _sampleRate) > 100) {
close(fd);
ATA_ERROR("device (" << ainfo.name << ") does not support sample rate (" << _sampleRate << ").");
return false;
}
m_sampleRate = _sampleRate;
if ( _mode == audio::orchestra::mode_input
&& m__mode == audio::orchestra::mode_output
&& m_device[0] == _device) {
// We're doing duplex setup here.
m_deviceFormat[0] = m_deviceFormat[1];
m_nDeviceChannels[0] = deviceChannels;
}
// Set interleaving parameters.
m_deviceInterleaved[modeToIdTable(_mode)] = true;
// Set flags for buffer conversion
m_doConvertBuffer[modeToIdTable(_mode)] = false;
if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
if ( m_deviceInterleaved[modeToIdTable(_mode)] == false
&& m_nUserChannels[modeToIdTable(_mode)] > 1) {
m_doConvertBuffer[modeToIdTable(_mode)] = true;
}
m_private->id[modeToIdTable(_mode)] = fd;
// Allocate necessary internal buffers.
uint64_t bufferBytes;
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1);
if (m_userBuffer[modeToIdTable(_mode)] == nullptr) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
if (_mode == audio::orchestra::mode_input) {
if ( m__mode == audio::orchestra::mode_output
&& m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes <= bytesOut) {
makeBuffer = false;
}
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) {
free(m_deviceBuffer);
}
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
m_device[modeToIdTable(_mode)] = _device;
m_state = audio::orchestra::state_stopped;
// Setup the buffer conversion information structure.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, _firstChannel);
}
// Setup thread if necessary.
if (m_mode == audio::orchestra::mode_output && _mode == audio::orchestra::mode_input) {
// We had already set up an output stream.
m_mode = audio::orchestra::mode_duplex;
if (m_device[0] == _device) {
m_private->id[0] = fd;
}
} else {
m_mode = _mode;
// Setup callback thread.
m_private->threadRunning = true;
m_private->thread = new std11::thread(ossCallbackHandler, this);
if (m_private->thread == nullptr) {
m_private->threadRunning = false;
ATA_ERROR("creating callback thread!");
goto error;
}
}
return true;
error:
if (m_private->id[0] != nullptr) {
close(m_private->id[0]);
m_private->id[0] = nullptr;
}
if (m_private->id[1] != nullptr) {
close(m_private->id[1]);
m_private->id[1] = nullptr;
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
free(m_userBuffer[i]);
m_userBuffer[i] = 0;
}
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
return false;
}
enum audio::orchestra::error audio::orchestra::api::Oss::closeStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("no open stream to close!");
return audio::orchestra::error_warning;
}
m_private->threadRunning = false;
m_mutex.lock();
if (m_state == audio::orchestra::state_stopped) {
m_private->runnable.notify_one();
}
m_mutex.unlock();
m_private->thread->join();
if (m_state == audio::orchestra::state_running) {
if (m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) {
ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
} else {
ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
}
m_state = audio::orchestra::state_stopped;
}
if (m_private->id[0] != nullptr) {
close(m_private->id[0]);
m_private->id[0] = nullptr;
}
if (m_private->id[1] != nullptr) {
close(m_private->id[1]);
m_private->id[1] = nullptr;
}
for (int32_t i=0; i<2; i++) {
if (m_userBuffer[i]) {
free(m_userBuffer[i]);
m_userBuffer[i] = 0;
}
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
m_mode = audio::orchestra::mode_unknow;
m_state = audio::orchestra::state_closed;
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Oss::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
m_mutex.lock();
m_state = audio::orchestra::state_running;
// No need to do anything else here ... OSS automatically starts
// when fed samples.
m_mutex.unlock();
m_private->runnable.notify_one();
}
enum audio::orchestra::error audio::orchestra::api::Oss::stopStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return;
}
m_mutex.lock();
// The state might change while waiting on a mutex.
if (m_state == audio::orchestra::state_stopped) {
m_mutex.unlock();
return;
}
int32_t result = 0;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
// Flush the output with zeros a few times.
char *buffer;
int32_t samples;
audio::format format;
if (m_doConvertBuffer[0]) {
buffer = m_deviceBuffer;
samples = m_bufferSize * m_nDeviceChannels[0];
format = m_deviceFormat[0];
} else {
buffer = m_userBuffer[0];
samples = m_bufferSize * m_nUserChannels[0];
format = m_userFormat;
}
memset(buffer, 0, samples * audio::getFormatBytes(format));
for (uint32_t i=0; i<m_nBuffers+1; i++) {
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
if (result == -1) {
ATA_ERROR("audio write error.");
return audio::orchestra::error_warning;
}
}
result = ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
m_private->triggered = false;
}
if ( m_mode == audio::orchestra::mode_input
|| ( m_mode == audio::orchestra::mode_duplex
&& m_private->id[0] != m_private->id[1])) {
result = ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping input callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
}
unlock:
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
if (result != -1) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
enum audio::orchestra::error audio::orchestra::api::Oss::abortStream() {
if (verifyStream() != audio::orchestra::error_none) {
return audio::orchestra::error_fail;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_mutex.lock();
// The state might change while waiting on a mutex.
if (m_state == audio::orchestra::state_stopped) {
m_mutex.unlock();
return;
}
int32_t result = 0;
if (m_mode == audio::orchestra::mode_output || m_mode == audio::orchestra::mode_duplex) {
result = ioctl(m_private->id[0], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
m_private->triggered = false;
}
if (m_mode == audio::orchestra::mode_input || (m_mode == audio::orchestra::mode_duplex && m_private->id[0] != m_private->id[1])) {
result = ioctl(m_private->id[1], SNDCTL_DSP_HALT, 0);
if (result == -1) {
ATA_ERROR("system error stopping input callback procedure on device (" << m_device[0] << ").");
goto unlock;
}
}
unlock:
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
if (result != -1) {
return audio::orchestra::error_none;
}
return audio::orchestra::error_systemError;
}
void audio::orchestra::api::Oss::callbackEvent() {
if (m_state == audio::orchestra::state_stopped) {
std11::unique_lock<std11::mutex> lck(m_mutex);
m_private->runnable.wait(lck);
if (m_state != audio::orchestra::state_running) {
return;
}
}
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return audio::orchestra::error_warning;
}
// Invoke user callback to get fresh output data.
int32_t doStopStream = 0;
std11::chrono::system_clock::time_point streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
if ( m_mode != audio::orchestra::mode_input
&& m_private->xrun[0] == true) {
status.push_back(audio::orchestra::status_underflow);
m_private->xrun[0] = false;
}
if ( m_mode != audio::orchestra::mode_output
&& m_private->xrun[1] == true) {
status.push_back(audio::orchestra::status_overflow);
m_private->xrun[1] = false;
}
doStopStream = m_callback(m_userBuffer[1],
streamTime,
m_userBuffer[0],
streamTime,
m_bufferSize,
status);
if (doStopStream == 2) {
this->abortStream();
return;
}
m_mutex.lock();
// The state might change while waiting on a mutex.
if (m_state == audio::orchestra::state_stopped) {
goto unlock;
}
int32_t result;
char *buffer;
int32_t samples;
audio::format format;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
// Setup parameters and do buffer conversion if necessary.
if (m_doConvertBuffer[0]) {
buffer = m_deviceBuffer;
convertBuffer(buffer, m_userBuffer[0], m_convertInfo[0]);
samples = m_bufferSize * m_nDeviceChannels[0];
format = m_deviceFormat[0];
} else {
buffer = m_userBuffer[0];
samples = m_bufferSize * m_nUserChannels[0];
format = m_userFormat;
}
// Do byte swapping if necessary.
if (m_doByteSwap[0]) {
byteSwapBuffer(buffer, samples, format);
}
if ( m_mode == audio::orchestra::mode_duplex
&& m_private->triggered == false) {
int32_t trig = 0;
ioctl(m_private->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
trig = PCM_ENABLE_audio::orchestra::mode_input|PCM_ENABLE_audio::orchestra::mode_output;
ioctl(m_private->id[0], SNDCTL_DSP_SETTRIGGER, &trig);
m_private->triggered = true;
} else {
// Write samples to device.
result = write(m_private->id[0], buffer, samples * audio::getFormatBytes(format));
}
if (result == -1) {
// We'll assume this is an underrun, though there isn't a
// specific means for determining that.
m_private->xrun[0] = true;
ATA_ERROR("audio write error.");
//error(audio::orchestra::error_warning);
// Continue on to input section.
}
}
if ( m_mode == audio::orchestra::mode_input
|| m_mode == audio::orchestra::mode_duplex) {
// Setup parameters.
if (m_doConvertBuffer[1]) {
buffer = m_deviceBuffer;
samples = m_bufferSize * m_nDeviceChannels[1];
format = m_deviceFormat[1];
} else {
buffer = m_userBuffer[1];
samples = m_bufferSize * m_nUserChannels[1];
format = m_userFormat;
}
// Read samples from device.
result = read(m_private->id[1], buffer, samples * audio::getFormatBytes(format));
if (result == -1) {
// We'll assume this is an overrun, though there isn't a
// specific means for determining that.
m_private->xrun[1] = true;
ATA_ERROR("audio read error.");
goto unlock;
}
// Do byte swapping if necessary.
if (m_doByteSwap[1]) {
byteSwapBuffer(buffer, samples, format);
}
// Do buffer conversion if necessary.
if (m_doConvertBuffer[1]) {
convertBuffer(m_userBuffer[1], m_deviceBuffer, m_convertInfo[1]);
}
}
unlock:
m_mutex.unlock();
audio::orchestra::Api::tickStreamTime();
if (doStopStream == 1) {
this->stopStream();
}
}
static void ossCallbackHandler(void* _userData) {
etk::thread::setName("OSS callback-" + m_name);
audio::orchestra::api::Alsa* myClass = reinterpret_cast<audio::orchestra::api::Oss*>(_userData);
while (myClass->m_private->threadRunning == true) {
myClass->callbackEvent();
}
}
#endif

51
audio/orchestra/api/Oss.h Normal file
View File

@@ -0,0 +1,51 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_OSS_H__) && defined(ORCHESTRA_BUILD_OSS)
#define __AUDIO_ORCHESTRA_API_OSS_H__
namespace audio {
namespace orchestra {
namespace api {
class OssPrivate;
class Oss: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Oss();
virtual ~Oss();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_oss;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEvent();
private:
std11::shared_ptr<OssPrivate> m_private;
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif

View File

@@ -0,0 +1,424 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if defined(ORCHESTRA_BUILD_PULSE)
#include <unistd.h>
#include <limits.h>
#include <audio/orchestra/Interface.h>
#include <audio/orchestra/debug.h>
#include <pulse/error.h>
#include <pulse/simple.h>
#include <cstdio>
#include <etk/thread/tools.h>
#undef __class__
#define __class__ "api::Pulse"
audio::orchestra::Api* audio::orchestra::api::Pulse::Create() {
return new audio::orchestra::api::Pulse();
}
static const uint32_t SUPPORTED_SAMPLERATES[] = {
8000,
16000,
22050,
32000,
44100,
48000,
96000,
0
};
struct rtaudio_pa_format_mapping_t {
enum audio::format airtaudio_format;
pa_sample_format_t pa_format;
};
static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
{audio::format_int16, PA_SAMPLE_S16LE},
{audio::format_int32, PA_SAMPLE_S32LE},
{audio::format_float, PA_SAMPLE_FLOAT32LE},
{audio::format_unknow, PA_SAMPLE_INVALID}};
namespace audio {
namespace orchestra {
namespace api {
class PulsePrivate {
public:
pa_simple *s_play;
pa_simple *s_rec;
std11::shared_ptr<std11::thread> thread;
bool threadRunning;
std11::condition_variable runnable_cv;
bool runnable;
PulsePrivate() :
s_play(0),
s_rec(0),
threadRunning(false),
runnable(false) {
}
};
}
}
}
audio::orchestra::api::Pulse::Pulse() :
m_private(new audio::orchestra::api::PulsePrivate()) {
}
audio::orchestra::api::Pulse::~Pulse() {
if (m_state != audio::orchestra::state_closed) {
closeStream();
}
}
uint32_t audio::orchestra::api::Pulse::getDeviceCount() {
return 1;
}
audio::orchestra::DeviceInfo audio::orchestra::api::Pulse::getDeviceInfo(uint32_t _device) {
audio::orchestra::DeviceInfo info;
info.probed = true;
info.name = "PulseAudio";
info.outputChannels = 2;
info.inputChannels = 2;
info.duplexChannels = 2;
info.isDefaultOutput = true;
info.isDefaultInput = true;
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
info.sampleRates.push_back(*sr);
}
info.nativeFormats.push_back(audio::format_int16);
info.nativeFormats.push_back(audio::format_int32);
info.nativeFormats.push_back(audio::format_float);
return info;
}
static void pulseaudio_callback(void* _userData) {
audio::orchestra::api::Pulse* myClass = reinterpret_cast<audio::orchestra::api::Pulse*>(_userData);
myClass->callbackEvent();
}
void audio::orchestra::api::Pulse::callbackEvent() {
etk::thread::setName("Pulse IO-" + m_name);
while (m_private->threadRunning == true) {
callbackEventOneCycle();
}
}
enum audio::orchestra::error audio::orchestra::api::Pulse::closeStream() {
m_private->threadRunning = false;
m_mutex.lock();
if (m_state == audio::orchestra::state_stopped) {
m_private->runnable = true;
m_private->runnable_cv.notify_one();;
}
m_mutex.unlock();
m_private->thread->join();
if (m_private->s_play) {
pa_simple_flush(m_private->s_play, nullptr);
pa_simple_free(m_private->s_play);
}
if (m_private->s_rec) {
pa_simple_free(m_private->s_rec);
}
m_userBuffer[0].clear();
m_userBuffer[1].clear();
m_state = audio::orchestra::state_closed;
m_mode = audio::orchestra::mode_unknow;
return audio::orchestra::error_none;
}
void audio::orchestra::api::Pulse::callbackEventOneCycle() {
if (m_state == audio::orchestra::state_stopped) {
std11::unique_lock<std11::mutex> lck(m_mutex);
while (!m_private->runnable) {
m_private->runnable_cv.wait(lck);
}
if (m_state != audio::orchestra::state_running) {
m_mutex.unlock();
return;
}
}
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is closed ... this shouldn't happen!");
return;
}
std11::chrono::system_clock::time_point streamTime = getStreamTime();
std::vector<enum audio::orchestra::status> status;
int32_t doStopStream = m_callback(&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0],
streamTime,
&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0],
streamTime,
m_bufferSize,
status);
if (doStopStream == 2) {
abortStream();
return;
}
m_mutex.lock();
void *pulse_in = m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] ? m_deviceBuffer : &m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0];
void *pulse_out = m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] ? m_deviceBuffer : &m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0];
if (m_state != audio::orchestra::state_running) {
goto unlock;
}
int32_t pa_error;
size_t bytes;
if ( m_mode == audio::orchestra::mode_output
|| m_mode == audio::orchestra::mode_duplex) {
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]) {
convertBuffer(m_deviceBuffer,
&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)][0],
m_convertInfo[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]);
bytes = m_nDeviceChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)]);
} else {
bytes = m_nUserChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_output)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
}
if (pa_simple_write(m_private->s_play, pulse_out, bytes, &pa_error) < 0) {
ATA_ERROR("audio write error, " << pa_strerror(pa_error) << ".");
return;
}
}
if (m_mode == audio::orchestra::mode_input || m_mode == audio::orchestra::mode_duplex) {
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]) {
bytes = m_nDeviceChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] * m_bufferSize * audio::getFormatBytes(m_deviceFormat[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]);
} else {
bytes = m_nUserChannels[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)] * m_bufferSize * audio::getFormatBytes(m_userFormat);
}
if (pa_simple_read(m_private->s_rec, pulse_in, bytes, &pa_error) < 0) {
ATA_ERROR("audio read error, " << pa_strerror(pa_error) << ".");
return;
}
if (m_doConvertBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]) {
convertBuffer(&m_userBuffer[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)][0],
m_deviceBuffer,
m_convertInfo[audio::orchestra::modeToIdTable(audio::orchestra::mode_input)]);
}
}
unlock:
m_mutex.unlock();
audio::orchestra::Api::tickStreamTime();
if (doStopStream == 1) {
stopStream();
return;
}
return;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::startStream() {
// TODO : Check return ...
audio::orchestra::Api::startStream();
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state_running) {
ATA_ERROR("the stream is already running!");
return audio::orchestra::error_warning;
}
m_mutex.lock();
m_state = audio::orchestra::state_running;
m_private->runnable = true;
m_private->runnable_cv.notify_one();
m_mutex.unlock();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::stopStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_state = audio::orchestra::state_stopped;
m_mutex.lock();
if (m_private->s_play) {
int32_t pa_error;
if (pa_simple_drain(m_private->s_play, &pa_error) < 0) {
ATA_ERROR("error draining output device, " << pa_strerror(pa_error) << ".");
m_mutex.unlock();
return audio::orchestra::error_systemError;
}
}
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
return audio::orchestra::error_none;
}
enum audio::orchestra::error audio::orchestra::api::Pulse::abortStream() {
if (m_state == audio::orchestra::state_closed) {
ATA_ERROR("the stream is not open!");
return audio::orchestra::error_invalidUse;
}
if (m_state == audio::orchestra::state_stopped) {
ATA_ERROR("the stream is already stopped!");
return audio::orchestra::error_warning;
}
m_state = audio::orchestra::state_stopped;
m_mutex.lock();
if (m_private && m_private->s_play) {
int32_t pa_error;
if (pa_simple_flush(m_private->s_play, &pa_error) < 0) {
ATA_ERROR("error flushing output device, " << pa_strerror(pa_error) << ".");
m_mutex.unlock();
return audio::orchestra::error_systemError;
}
}
m_state = audio::orchestra::state_stopped;
m_mutex.unlock();
return audio::orchestra::error_none;
}
bool audio::orchestra::api::Pulse::probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options) {
uint64_t bufferBytes = 0;
pa_sample_spec ss;
if (_device != 0) {
return false;
}
if (_mode != audio::orchestra::mode_input && _mode != audio::orchestra::mode_output) {
return false;
}
if (_channels != 1 && _channels != 2) {
ATA_ERROR("unsupported number of channels.");
return false;
}
ss.channels = _channels;
if (_firstChannel != 0) {
return false;
}
bool sr_found = false;
for (const uint32_t *sr = SUPPORTED_SAMPLERATES; *sr; ++sr) {
if (_sampleRate == *sr) {
sr_found = true;
m_sampleRate = _sampleRate;
ss.rate = _sampleRate;
break;
}
}
if (!sr_found) {
ATA_ERROR("unsupported sample rate.");
return false;
}
bool sf_found = 0;
for (const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
sf->airtaudio_format && sf->pa_format != PA_SAMPLE_INVALID;
++sf) {
if (_format == sf->airtaudio_format) {
sf_found = true;
m_userFormat = sf->airtaudio_format;
ss.format = sf->pa_format;
break;
}
}
if (!sf_found) {
ATA_ERROR("unsupported sample format.");
return false;
}
m_deviceInterleaved[modeToIdTable(_mode)] = true;
m_nBuffers = 1;
m_doByteSwap[modeToIdTable(_mode)] = false;
m_doConvertBuffer[modeToIdTable(_mode)] = false;
m_deviceFormat[modeToIdTable(_mode)] = m_userFormat;
m_nUserChannels[modeToIdTable(_mode)] = _channels;
m_nDeviceChannels[modeToIdTable(_mode)] = _channels + _firstChannel;
m_channelOffset[modeToIdTable(_mode)] = 0;
// Allocate necessary internal buffers.
bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat);
m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0);
if (m_userBuffer[modeToIdTable(_mode)].size() == 0) {
ATA_ERROR("error allocating user buffer memory.");
goto error;
}
m_bufferSize = *_bufferSize;
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
bool makeBuffer = true;
bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]);
if (_mode == audio::orchestra::mode_input) {
if (m_mode == audio::orchestra::mode_output && m_deviceBuffer) {
uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]);
if (bufferBytes <= bytesOut) makeBuffer = false;
}
}
if (makeBuffer) {
bufferBytes *= *_bufferSize;
if (m_deviceBuffer) free(m_deviceBuffer);
m_deviceBuffer = (char *) calloc(bufferBytes, 1);
if (m_deviceBuffer == nullptr) {
ATA_ERROR("error allocating device buffer memory.");
goto error;
}
}
}
m_device[modeToIdTable(_mode)] = _device;
// Setup the buffer conversion information structure.
if (m_doConvertBuffer[modeToIdTable(_mode)]) {
setConvertInfo(_mode, _firstChannel);
}
int32_t error;
switch (_mode) {
case audio::orchestra::mode_input:
m_private->s_rec = pa_simple_new(nullptr, "orchestra", PA_STREAM_RECORD, nullptr, "Record", &ss, nullptr, nullptr, &error);
if (!m_private->s_rec) {
ATA_ERROR("error connecting input to PulseAudio server.");
goto error;
}
break;
case audio::orchestra::mode_output:
m_private->s_play = pa_simple_new(nullptr, "orchestra", PA_STREAM_PLAYBACK, nullptr, "Playback", &ss, nullptr, nullptr, &error);
if (!m_private->s_play) {
ATA_ERROR("error connecting output to PulseAudio server.");
goto error;
}
break;
default:
goto error;
}
if (m_mode == audio::orchestra::mode_unknow) {
m_mode = _mode;
} else if (m_mode == _mode) {
goto error;
}else {
m_mode = audio::orchestra::mode_duplex;
}
if (!m_private->threadRunning) {
m_private->threadRunning = true;
std11::shared_ptr<std11::thread> tmpThread(new std11::thread(&pulseaudio_callback, this));
m_private->thread = std::move(tmpThread);
if (m_private->thread == nullptr) {
ATA_ERROR("error creating thread.");
goto error;
}
}
m_state = audio::orchestra::state_stopped;
return true;
error:
for (int32_t i=0; i<2; i++) {
m_userBuffer[i].clear();
}
if (m_deviceBuffer) {
free(m_deviceBuffer);
m_deviceBuffer = 0;
}
return false;
}
#endif

View File

@@ -0,0 +1,54 @@
/** @file
* @author Edouard DUPIN
* @copyright 2011, Edouard DUPIN, all right reserved
* @license APACHE v2.0 (see license file)
* @fork from RTAudio
*/
#if !defined(__AUDIO_ORCHESTRA_API_PULSE_H__) && defined(ORCHESTRA_BUILD_PULSE)
#define __AUDIO_ORCHESTRA_API_PULSE_H__
namespace audio {
namespace orchestra {
namespace api {
class PulsePrivate;
class Pulse: public audio::orchestra::Api {
public:
static audio::orchestra::Api* Create();
public:
Pulse();
virtual ~Pulse();
enum audio::orchestra::type getCurrentApi() {
return audio::orchestra::type_pulse;
}
uint32_t getDeviceCount();
audio::orchestra::DeviceInfo getDeviceInfo(uint32_t _device);
enum audio::orchestra::error closeStream();
enum audio::orchestra::error startStream();
enum audio::orchestra::error stopStream();
enum audio::orchestra::error abortStream();
// This function is intended for internal use only. It must be
// public because it is called by the internal callback handler,
// which is not a member of RtAudio. External use of this function
// will most likely produce highly undesireable results!
void callbackEventOneCycle();
void callbackEvent();
private:
std11::shared_ptr<PulsePrivate> m_private;
std::vector<audio::orchestra::DeviceInfo> m_devices;
void saveDeviceInfo();
bool probeDeviceOpen(uint32_t _device,
audio::orchestra::mode _mode,
uint32_t _channels,
uint32_t _firstChannel,
uint32_t _sampleRate,
audio::format _format,
uint32_t *_bufferSize,
const audio::orchestra::StreamOptions& _options);
};
}
}
}
#endif